hexsha
stringlengths
40
40
size
int64
6
14.9M
ext
stringclasses
1 value
lang
stringclasses
1 value
max_stars_repo_path
stringlengths
6
260
max_stars_repo_name
stringlengths
6
119
max_stars_repo_head_hexsha
stringlengths
40
41
max_stars_repo_licenses
list
max_stars_count
int64
1
191k
max_stars_repo_stars_event_min_datetime
stringlengths
24
24
max_stars_repo_stars_event_max_datetime
stringlengths
24
24
max_issues_repo_path
stringlengths
6
260
max_issues_repo_name
stringlengths
6
119
max_issues_repo_head_hexsha
stringlengths
40
41
max_issues_repo_licenses
list
max_issues_count
int64
1
67k
max_issues_repo_issues_event_min_datetime
stringlengths
24
24
max_issues_repo_issues_event_max_datetime
stringlengths
24
24
max_forks_repo_path
stringlengths
6
260
max_forks_repo_name
stringlengths
6
119
max_forks_repo_head_hexsha
stringlengths
40
41
max_forks_repo_licenses
list
max_forks_count
int64
1
105k
max_forks_repo_forks_event_min_datetime
stringlengths
24
24
max_forks_repo_forks_event_max_datetime
stringlengths
24
24
avg_line_length
float64
2
1.04M
max_line_length
int64
2
11.2M
alphanum_fraction
float64
0
1
cells
list
cell_types
list
cell_type_groups
list
cb60952d5075cffa8082916813bab7127518ebcc
31,606
ipynb
Jupyter Notebook
results/.ipynb_checkpoints/Untitled-checkpoint.ipynb
Satriosadrakha/CapsNet-Tensorflow-Sunda-Kuno
87673b2c6a146672fcbbe2e5c0382c2b8d4acaaf
[ "Apache-2.0" ]
null
null
null
results/.ipynb_checkpoints/Untitled-checkpoint.ipynb
Satriosadrakha/CapsNet-Tensorflow-Sunda-Kuno
87673b2c6a146672fcbbe2e5c0382c2b8d4acaaf
[ "Apache-2.0" ]
null
null
null
results/.ipynb_checkpoints/Untitled-checkpoint.ipynb
Satriosadrakha/CapsNet-Tensorflow-Sunda-Kuno
87673b2c6a146672fcbbe2e5c0382c2b8d4acaaf
[ "Apache-2.0" ]
null
null
null
196.310559
14,388
0.914984
[ [ [ "#Hasil", "_____no_output_____" ] ], [ [ "import pandas as pd\nimport seaborn as sn\nimport matplotlib.pyplot as plt\nimport seaborn as sns", "_____no_output_____" ], [ "data = {'GAN':[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1],\n 'Dataset':[1,1,1,1,1,1,2,2,2,2,2,2,3,3,3,3,3,3,1,1,1,1,1,1,2,2,2,2,2,2,3,3,3,3,3,3],\n 'Iteration_Routing':[1,1,2,2,3,3,1,1,2,2,3,3,1,1,2,2,3,3,1,1,2,2,3,3,1,1,2,2,3,3,1,1,2,2,3,3],\n 'Leaky_ReLu':[0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1],\n 'Accuracy':[],\n 'Loss':[]\n }", "_____no_output_____" ], [ "df = pd.DataFrame(data,columns=['GAN','Dataset','Iteration_Routing','Leaky_ReLu'])\ncorrMatrix = df.corr()\nsn.heatmap(corrMatrix, annot=True)\nplt.show()", "_____no_output_____" ], [ "# reading csv file \ndf2 = pd.read_csv(\"Result.csv\")", "_____no_output_____" ], [ "corrMatrix = df2.corr()\nsn.heatmap(corrMatrix, annot=True)\nplt.show()", "_____no_output_____" ], [ "corr = df2.corr()\nax = sns.heatmap(\n corr, \n vmin=-1, vmax=1, center=0,\n cmap=sns.diverging_palette(20, 220, n=200),\n square=True\n)\nax.set_xticklabels(\n ax.get_xticklabels(),\n rotation=45,\n horizontalalignment='right'\n)", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ] ]
cb60b22e04066f1c7bb43bac887fbb3cfcc36b98
1,847
ipynb
Jupyter Notebook
docs/source/auto_examples/CreatingDetectors/Coupling:LPMode.ipynb
paaube/PyMieSim
074e58cfe9b42ef51f3b03aaad6e56ca341099a5
[ "MIT" ]
null
null
null
docs/source/auto_examples/CreatingDetectors/Coupling:LPMode.ipynb
paaube/PyMieSim
074e58cfe9b42ef51f3b03aaad6e56ca341099a5
[ "MIT" ]
null
null
null
docs/source/auto_examples/CreatingDetectors/Coupling:LPMode.ipynb
paaube/PyMieSim
074e58cfe9b42ef51f3b03aaad6e56ca341099a5
[ "MIT" ]
null
null
null
34.203704
858
0.460747
[ [ [ "%matplotlib inline", "_____no_output_____" ] ], [ [ "\n# LP Mode\n", "_____no_output_____" ] ], [ [ "# sphinx_gallery_thumbnail_path = '../images/Coupling:LPMode.png'\n\ndef run(Plot, Save):\n from PyMieSim.Source import PlaneWave\n from PyMieSim.Detector import LPmode\n\n\n Source = PlaneWave(Wavelength = 450e-9,\n Polarization = 0,\n E0 = 0)\n\n Detector = LPmode(Mode = (1, 1),\n Rotation = 0.,\n Sampling = 201,\n NA = 0.4,\n GammaOffset = 0,\n PhiOffset = 40,\n CouplingMode = 'Centered')\n\n if Plot:\n Detector.Plot()\n\n if Save:\n from pathlib import Path\n dir = f'docs/images/{Path(__file__).stem}'\n Detector.SaveFig(dir)\n\nif __name__ == '__main__':\n run(Plot=True, Save=False)", "_____no_output_____" ] ] ]
[ "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code" ] ]
cb60bc776eff746be8d43b800423d1c5d19ac255
25,835
ipynb
Jupyter Notebook
notebooks/tag_tests.ipynb
ephes/wagtail_srcset
1a00057a7a105071bb7e72c0eb1880fcf19aa265
[ "BSD-3-Clause" ]
11
2020-02-25T14:08:03.000Z
2021-01-13T17:44:19.000Z
notebooks/tag_tests.ipynb
ephes/wagtail_srcset
1a00057a7a105071bb7e72c0eb1880fcf19aa265
[ "BSD-3-Clause" ]
1
2020-05-19T18:05:21.000Z
2020-05-21T07:00:40.000Z
notebooks/tag_tests.ipynb
ephes/wagtail_srcset
1a00057a7a105071bb7e72c0eb1880fcf19aa265
[ "BSD-3-Clause" ]
2
2020-06-12T07:36:06.000Z
2020-07-16T16:31:49.000Z
49.970986
1,388
0.638475
[ [ [ "from django.template import Context\n\nfrom django.template.base import Token\nfrom django.template.base import Parser\nfrom django.template.base import Template\nfrom django.template.base import TokenType\n\nfrom django.core.management import call_command\n\nfrom wagtail_srcset.templatetags.wagtail_srcset_tags import srcset_image", "_____no_output_____" ], [ "from django.core.files.uploadedfile import SimpleUploadedFile\n\nfrom wagtail.images.models import Image as WagtailImage", "_____no_output_____" ] ], [ [ "# setup db", "_____no_output_____" ] ], [ [ "call_command(\"migrate\")", "Operations to perform:\n Apply all migrations: admin, auth, contenttypes, sessions, sites, taggit, wagtailadmin, wagtailcore, wagtaildocs, wagtailembeds, wagtailforms, wagtailimages, wagtailredirects, wagtailsearch, wagtailusers\nRunning migrations:\n Applying contenttypes.0001_initial... OK\n Applying auth.0001_initial... OK\n Applying admin.0001_initial... OK\n Applying admin.0002_logentry_remove_auto_add... OK\n Applying admin.0003_logentry_add_action_flag_choices... OK\n Applying contenttypes.0002_remove_content_type_name... OK\n Applying auth.0002_alter_permission_name_max_length... OK\n Applying auth.0003_alter_user_email_max_length... OK\n Applying auth.0004_alter_user_username_opts... OK\n Applying auth.0005_alter_user_last_login_null... OK\n Applying auth.0006_require_contenttypes_0002... OK\n Applying auth.0007_alter_validators_add_error_messages... OK\n Applying auth.0008_alter_user_username_max_length... OK\n Applying auth.0009_alter_user_last_name_max_length... OK\n Applying auth.0010_alter_group_name_max_length... OK\n Applying auth.0011_update_proxy_permissions... OK\n Applying sessions.0001_initial... OK\n Applying sites.0001_initial... OK\n Applying sites.0002_alter_domain_unique... OK\n Applying taggit.0001_initial... OK\n Applying taggit.0002_auto_20150616_2121... OK\n Applying taggit.0003_taggeditem_add_unique_index... OK\n Applying wagtailcore.0001_squashed_0016_change_page_url_path_to_text_field... OK\n Applying wagtailcore.0017_change_edit_page_permission_description... OK\n Applying wagtailcore.0018_pagerevision_submitted_for_moderation_index... OK\n Applying wagtailcore.0019_verbose_names_cleanup... OK\n Applying wagtailcore.0020_add_index_on_page_first_published_at... OK\n Applying wagtailcore.0021_capitalizeverbose... OK\n Applying wagtailcore.0022_add_site_name... OK\n Applying wagtailcore.0023_alter_page_revision_on_delete_behaviour... OK\n Applying wagtailcore.0024_collection... OK\n Applying wagtailcore.0025_collection_initial_data... OK\n Applying wagtailcore.0026_group_collection_permission... OK\n Applying wagtailadmin.0001_create_admin_access_permissions... OK\n Applying wagtailcore.0027_fix_collection_path_collation... OK\n Applying wagtailcore.0024_alter_page_content_type_on_delete_behaviour... OK\n Applying wagtailcore.0028_merge... OK\n Applying wagtailcore.0029_unicode_slugfield_dj19... OK\n Applying wagtailcore.0030_index_on_pagerevision_created_at... OK\n Applying wagtailcore.0031_add_page_view_restriction_types... OK\n Applying wagtailcore.0032_add_bulk_delete_page_permission... OK\n Applying wagtailcore.0033_remove_golive_expiry_help_text... OK\n Applying wagtailcore.0034_page_live_revision... OK\n Applying wagtailcore.0035_page_last_published_at... OK\n Applying wagtailcore.0036_populate_page_last_published_at... OK\n Applying wagtailcore.0037_set_page_owner_editable... OK\n Applying wagtailcore.0038_make_first_published_at_editable... OK\n Applying wagtailcore.0039_collectionviewrestriction... OK\n Applying wagtailcore.0040_page_draft_title... OK\n Applying wagtailcore.0041_group_collection_permissions_verbose_name_plural... OK\n Applying wagtailcore.0042_index_on_pagerevision_approved_go_live_at... OK\n Applying wagtailcore.0043_lock_fields... OK\n Applying wagtailcore.0044_add_unlock_grouppagepermission... OK\n Applying wagtailcore.0045_assign_unlock_grouppagepermission... OK\n Applying wagtaildocs.0001_initial... OK\n Applying wagtaildocs.0002_initial_data... OK\n Applying wagtaildocs.0003_add_verbose_names... OK\n Applying wagtaildocs.0004_capitalizeverbose... OK\n Applying wagtaildocs.0005_document_collection... OK\n Applying wagtaildocs.0006_copy_document_permissions_to_collections... OK\n Applying wagtaildocs.0005_alter_uploaded_by_user_on_delete_action... OK\n Applying wagtaildocs.0007_merge... OK\n Applying wagtaildocs.0008_document_file_size... OK\n Applying wagtaildocs.0009_document_verbose_name_plural... OK\n Applying wagtaildocs.0010_document_file_hash... OK\n Applying wagtailembeds.0001_initial... OK\n Applying wagtailembeds.0002_add_verbose_names... OK\n Applying wagtailembeds.0003_capitalizeverbose... OK\n Applying wagtailembeds.0004_embed_verbose_name_plural... OK\n Applying wagtailembeds.0005_specify_thumbnail_url_max_length... OK\n Applying wagtailforms.0001_initial... OK\n Applying wagtailforms.0002_add_verbose_names... OK\n Applying wagtailforms.0003_capitalizeverbose... OK\n Applying wagtailforms.0004_add_verbose_name_plural... OK\n Applying wagtailimages.0001_squashed_0021... OK\n Applying wagtailredirects.0001_initial... OK\n Applying wagtailredirects.0002_add_verbose_names... OK\n Applying wagtailredirects.0003_make_site_field_editable... OK\n Applying wagtailredirects.0004_set_unique_on_path_and_site... OK\n Applying wagtailredirects.0005_capitalizeverbose... OK\n Applying wagtailredirects.0006_redirect_increase_max_length... OK\n Applying wagtailsearch.0001_initial... OK\n Applying wagtailsearch.0002_add_verbose_names... OK\n Applying wagtailsearch.0003_remove_editors_pick... OK\n Applying wagtailsearch.0004_querydailyhits_verbose_name_plural... OK\n Applying wagtailusers.0001_initial... OK\n Applying wagtailusers.0002_add_verbose_name_on_userprofile... OK\n Applying wagtailusers.0003_add_verbose_names... OK\n Applying wagtailusers.0004_capitalizeverbose... OK\n Applying wagtailusers.0005_make_related_name_wagtail_specific... OK\n Applying wagtailusers.0006_userprofile_prefered_language... OK\n Applying wagtailusers.0007_userprofile_current_time_zone... OK\n Applying wagtailusers.0008_userprofile_avatar... OK\n Applying wagtailusers.0009_userprofile_verbose_name_plural... OK\n" ] ], [ [ "# create image", "_____no_output_____" ] ], [ [ "import io\n\nfrom PIL import Image\n\n\n\ndef create_small_rgb():\n # this is a small test jpeg\n img = Image.new('RGB', (200, 200), (255, 0, 0, 0))\n return img\n\n\ndef small_jpeg_io():\n rgb = create_small_rgb()\n im_io = io.BytesIO()\n rgb.save(im_io, format=\"JPEG\", quality=60, optimize=True, progressive=True)\n im_io.seek(0)\n im_io.name = \"testimage.jpg\"\n return im_io\n\n\ndef small_uploaded_file(small_jpeg_io):\n simple_png = SimpleUploadedFile(\n name=\"test.png\", content=small_jpeg_io.read(), content_type=\"image/png\"\n )\n small_jpeg_io.seek(0)\n return simple_png", "_____no_output_____" ], [ "simple_png = small_uploaded_file(small_jpeg_io())", "_____no_output_____" ], [ "from django.conf import settings\n\nprint(settings.DATABASES)", "{'default': {'ENGINE': 'django.db.backends.sqlite3', 'NAME': ':memory:', 'ATOMIC_REQUESTS': False, 'AUTOCOMMIT': True, 'CONN_MAX_AGE': 0, 'OPTIONS': {}, 'TIME_ZONE': None, 'USER': '', 'PASSWORD': '', 'HOST': '', 'PORT': '', 'TEST': {'CHARSET': None, 'COLLATION': None, 'NAME': None, 'MIRROR': None}}}\n" ], [ "image = WagtailImage(file=simple_png)\nimage.save()", "_____no_output_____" ] ], [ [ "# render template", "_____no_output_____" ] ], [ [ "template_text = \"\"\"\n{% load wagtailimages_tags %}\n{% load wagtail_srcset_tags %}\n\n{% image img width-300 %}\n{% srcset_image img width-300 jpegquality-90 %}\n\"\"\"", "_____no_output_____" ], [ "t = Template(template_text)\nprint(t.render(Context({\"img\": image})))", "<wagtail.images.templatetags.wagtailimages_tags.ImageNode object at 0x10e843220>\n['__class__', '__delattr__', '__dict__', '__dir__', '__doc__', '__eq__', '__format__', '__ge__', '__getattribute__', '__gt__', '__hash__', '__init__', '__init_subclass__', '__iter__', '__le__', '__lt__', '__module__', '__ne__', '__new__', '__reduce__', '__reduce_ex__', '__repr__', '__setattr__', '__sizeof__', '__str__', '__subclasshook__', '__weakref__', 'attrs', 'child_nodelists', 'filter', 'filter_spec', 'get_nodes_by_type', 'image_expr', 'must_be_first', 'output_var_name', 'render', 'render_annotated', 'token']\n> \u001b[0;32m/Users/wersdoerfer/projects/wagtail_srcset/src/wagtail_srcset/templatetags/wagtail_srcset_tags.py\u001b[0m(23)\u001b[0;36m__init__\u001b[0;34m()\u001b[0m\n\u001b[0;32m 22 \u001b[0;31m \u001b[0mipdb\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mset_trace\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m---> 23 \u001b[0;31m \u001b[0mprint\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"image node: \"\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mimage_node\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 24 \u001b[0;31m \u001b[0mprint\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"image node attrs: \"\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mimage_node\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mattrs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\nipdb> image_node.filter_spec\n'width-300|jpegquality-90'\nipdb> dir(image_node.filter)\n['__class__', '__delattr__', '__dict__', '__dir__', '__doc__', '__eq__', '__format__', '__ge__', '__getattribute__', '__gt__', '__hash__', '__init__', '__init_subclass__', '__le__', '__lt__', '__module__', '__ne__', '__new__', '__reduce__', '__reduce_ex__', '__repr__', '__setattr__', '__sizeof__', '__str__', '__subclasshook__', '__weakref__', '_registered_operations', '_search_for_operations', 'get_cache_key', 'operations', 'run', 'spec']\nipdb> iamge_node.filter._registered_operations\n*** NameError: name 'iamge_node' is not defined\nipdb> image_node.filter._registered_operations\nipdb> image_node.filter.operations\n[<wagtail.images.image_operations.WidthHeightOperation object at 0x10e843fd0>, <wagtail.images.image_operations.JPEGQualityOperation object at 0x10e8903a0>]\nipdb> wh_op = image_node.filter._registered_operations[0]\n*** KeyError: 0\nipdb> image_node.filter.operations[0]\n<wagtail.images.image_operations.WidthHeightOperation object at 0x10e843fd0>\nipdb> wh_op = image_node.filter.operations[0]\nipdb> jq_op = image_node.filter.operations[1]\nipdb> jq_op\n<wagtail.images.image_operations.JPEGQualityOperation object at 0x10e8903a0>\nipdb> jq_op.method\n'jpegquality'\n" ], [ "template_text = \"\"\"\n{% load wagtailimages_tags %}\n\n{% image img width-300 %}\n\"\"\"", "_____no_output_____" ], [ "t = Template(template_text)\nt.render(Context({\"img\": image}))", "_____no_output_____" ], [ "image_tag = \"{% image block.value width-300 %}\"\nimage_tag = \"block.value width-300}\"\n\ntoken = Token(TokenType.BLOCK, image_tag)\nparser = Parser(token.split_contents())", "_____no_output_____" ], [ "t = Template(template_text)\nt.render(Context({}))", "_____no_output_____" ] ], [ [ "# Get image size in tag", "_____no_output_____" ] ], [ [ "from django import template\nfrom django.conf import settings\n\nfrom wagtail.images.templatetags.wagtailimages_tags import image", "_____no_output_____" ], [ "register = template.Library()\n\n\[email protected](name=\"srcset_image2\")\ndef srcset_image(parser, token):\n image_node = image(parser, token)\n print(image_node)\n print(dir(image_node))\n image_node.attrs[\"srcset\"] = SrcSet(image_node)\n return image_node\n\n\nclass SrcSet:\n def __init__(self, image_node):\n self.image_node = image_node\n srcset = image_node.attrs.get(\"srcset\", None)\n print(\"image node attrs: \", image_node.attrs)\n print(\"image node width: \", image_node.attrs.get(\"width\"))\n print(\"image node filter: \", image_node.filter.operations)\n if srcset is None:\n self.renditions = self.default_renditions\n else:\n self.renditions = self.renditions_from_srcset(srcset.token)\n\n @property\n def default_renditions(self):\n if hasattr(settings, \"DEFAULT_SRCSET_RENDITIONS\"):\n return settings.DEFAULT_SRCSET_RENDITIONS\n else:\n return [\n \"width-2200|jpegquality-60\",\n \"width-1100|jpegquality-60\",\n \"width-768|jpegquality-60\",\n \"width-500|jpegquality-60\",\n \"width-300|jpegquality-60\",\n ]\n\n def renditions_from_srcset(self, srcset):\n srcset = srcset.strip('\"').strip(\"'\")\n return srcset.split(\" \")\n\n def resolve(self, context):\n image = self.image_node.image_expr.resolve(context)\n out_renditions = []\n for rendition in self.renditions:\n rendered_image = image.get_rendition(rendition)\n out_renditions.append(f\"{rendered_image.url} {rendered_image.width}w\")\n srcset_string = \", \".join(out_renditions)\n return srcset_string", "_____no_output_____" ], [ "template_text = \"\"\"\n{% load wagtailimages_tags %}\n{% load wagtail_srcset_tags %}\n\n\n{% image img width-300 %}\n{% srcset_image2 img width-300 %}\n\"\"\"", "_____no_output_____" ], [ "t = Template(template_text)\nt.render(Context({\"img\": image}))", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ] ]
cb60c471b59ab822f0973679a17e389cdaef7224
9,583
ipynb
Jupyter Notebook
notebooks/hello_tensorflow.ipynb
kylehounslow/gdg_workshop
c92c392e2f91dae8693e4e996b61460a7a627d82
[ "MIT" ]
2
2018-11-17T22:22:22.000Z
2018-11-17T22:22:22.000Z
notebooks/hello_tensorflow.ipynb
kylehounslow/gdg_workshop
c92c392e2f91dae8693e4e996b61460a7a627d82
[ "MIT" ]
3
2020-11-13T17:26:37.000Z
2022-02-09T23:29:53.000Z
notebooks/hello_tensorflow.ipynb
kylehounslow/gdg_workshop
c92c392e2f91dae8693e4e996b61460a7a627d82
[ "MIT" ]
null
null
null
33.506993
250
0.486173
[ [ [ "<a href=\"https://colab.research.google.com/github/kylehounslow/gdg_workshop/blob/master/notebooks/hello_tensorflow.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ], [ "# Hello TensorFlow! \nThis notebook is a gentle introduction to TensorFlow. \nMostly taken from [here](https://github.com/aymericdamien/TensorFlow-Examples/tree/master/examples) \n___\nIn this notebook we will learn about: \n* How to run jupyter notebook cells \n* How to build and execute a computational graph in Tensorflow \n* How to visualize the computational graph in a notebook cell", "_____no_output_____" ] ], [ [ "import numpy as np\nimport tensorflow as tf\nfrom IPython.display import HTML", "_____no_output_____" ], [ "# Create a Constant op\n# The op is added as a node to the default graph.\nhello = tf.constant('Hello, TensorFlow!')\n\n# Start tf session\nwith tf.Session() as sess:\n # Run the op\n print(sess.run(hello))", "_____no_output_____" ], [ "# Basic constant operations\n# The value returned by the constructor represents the output\n# of the Constant op.\na = tf.constant(7)\nb = tf.constant(6)\n\n# Launch the default graph.\nwith tf.Session() as sess:\n print(\"Addition with constants: %i\" % sess.run(a+b))\n print(\"Multiplication with constants: %i\" % sess.run(a*b))", "_____no_output_____" ] ], [ [ "## Define some helper functions to render the computational graph in a notebook cell", "_____no_output_____" ] ], [ [ "def strip_consts(graph_def, max_const_size=32):\n \"\"\"Strip large constant values from graph_def.\"\"\"\n strip_def = tf.GraphDef()\n for n0 in graph_def.node:\n n = strip_def.node.add() \n n.MergeFrom(n0)\n if n.op == 'Const':\n tensor = n.attr['value'].tensor\n size = len(tensor.tensor_content)\n if size > max_const_size:\n tensor.tensor_content = tf.compat.as_bytes(\"<stripped %d bytes>\"%size)\n return strip_def\n \ndef rename_nodes(graph_def, rename_func):\n res_def = tf.GraphDef()\n for n0 in graph_def.node:\n n = res_def.node.add() \n n.MergeFrom(n0)\n n.name = rename_func(n.name)\n for i, s in enumerate(n.input):\n n.input[i] = rename_func(s) if s[0]!='^' else '^'+rename_func(s[1:])\n return res_def\n \ndef show_graph(graph_def, max_const_size=32):\n \"\"\"Visualize TensorFlow graph.\"\"\"\n if hasattr(graph_def, 'as_graph_def'):\n graph_def = graph_def.as_graph_def()\n strip_def = strip_consts(graph_def, max_const_size=max_const_size)\n code = \"\"\"\n <script>\n function load() {{\n document.getElementById(\"{id}\").pbtxt = {data};\n }}\n </script>\n <link rel=\"import\" href=\"https://tensorboard.appspot.com/tf-graph-basic.build.html\" onload=load()>\n <div style=\"height:600px\">\n <tf-graph-basic id=\"{id}\"></tf-graph-basic>\n </div>\n \"\"\".format(data=repr(str(strip_def)), id='graph'+str(np.random.rand()))\n \n iframe = \"\"\"\n <iframe seamless style=\"width:800px;height:620px;border:0\" srcdoc=\"{}\"></iframe>\n \"\"\".format(code.replace('\"', '&quot;'))\n display(HTML(iframe))\nshow_graph(tf.get_default_graph())", "_____no_output_____" ], [ "# Basic Operations with variable as graph input\n# The value returned by the constructor represents the output\n# of the Variable op. (define as input when running session)\n# tf Graph input\na = tf.placeholder(tf.int16)\nb = tf.placeholder(tf.int16)\n\n# Define some operations\nadd = tf.add(a, b)\nmul = tf.multiply(a, b)\n\n# Launch the default graph.\nwith tf.Session() as sess:\n # Run every operation with variable input\n print(\"Addition with variables: %i\" % sess.run(add, feed_dict={a: 2, b: 3}))\n print(\"Multiplication with variables: %i\" % sess.run(mul, feed_dict={a: 2, b: 3}))\nshow_graph(tf.get_default_graph())", "_____no_output_____" ], [ "# ----------------\n# More in details:\n# Matrix Multiplication from TensorFlow official tutorial\n\n# Create a Constant op that produces a 1x2 matrix. The op is\n# added as a node to the default graph.\n#\n# The value returned by the constructor represents the output\n# of the Constant op.\nmatrix1 = tf.constant([[3., 3.]])\n\n# Create another Constant that produces a 2x1 matrix.\nmatrix2 = tf.constant([[2.],[2.]])\n\n# Create a Matmul op that takes 'matrix1' and 'matrix2' as inputs.\n# The returned value, 'product', represents the result of the matrix\n# multiplication.\nproduct = tf.matmul(matrix1, matrix2)\n\n# To run the matmul op we call the session 'run()' method, passing 'product'\n# which represents the output of the matmul op. This indicates to the call\n# that we want to get the output of the matmul op back.\n#\n# All inputs needed by the op are run automatically by the session. They\n# typically are run in parallel.\n#\n# The call 'run(product)' thus causes the execution of threes ops in the\n# graph: the two constants and matmul.\n#\n# The output of the op is returned in 'result' as a numpy `ndarray` object.\nwith tf.Session() as sess:\n result = sess.run(product)\n print(result)\n", "_____no_output_____" ] ], [ [ "## To reset the graph, use `tf.reset_default_graph()`", "_____no_output_____" ] ], [ [ "tf.reset_default_graph()\na = tf.constant(7)\nb = tf.constant(6)\nop = tf.add(a, b)\nshow_graph(tf.get_default_graph())", "_____no_output_____" ], [ "", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ] ]
cb60c8c59f6de2b1b0bc57e302de46aca651560a
140,619
ipynb
Jupyter Notebook
Example_hysteresis_loop(PE).ipynb
trygvrad/Interdigitated-Electrodes
e366c391d38c68b481519f07dc300709f0a7b72e
[ "MIT" ]
null
null
null
Example_hysteresis_loop(PE).ipynb
trygvrad/Interdigitated-Electrodes
e366c391d38c68b481519f07dc300709f0a7b72e
[ "MIT" ]
null
null
null
Example_hysteresis_loop(PE).ipynb
trygvrad/Interdigitated-Electrodes
e366c391d38c68b481519f07dc300709f0a7b72e
[ "MIT" ]
null
null
null
343.811736
32,416
0.865893
[ [ [ "# Example PV curve\nThe purpose of this document is to showcase how a Q-V hysteresis loop can be transformed to a P-E hysteresis loop, as shown in the paper", "_____no_output_____" ] ], [ [ "import pair_conformal as pair_conformal\nimport infinite_fourier as infinite_fourier\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib\nimport tqdm\nimport scipy.optimize", "_____no_output_____" ], [ "# IDE geometry\nt = 0.230 # film thickness, µm\nb = 3.5526128590971275 # electrode width, µm\na = 10-b # electrode spacing, µm\nN = 100 # number of fingers\nL = 900 # finger overlap lenght, µm\neps_air = 1\neps_sub = 317 # relative dielectric constant of substrate [-]\neps_0 = 8.8541878128*10**-12 # dielectric permitivity of vacuum [F/m]\neta = b/(a+b) # cover fraction\ntau = t/(a+b) # normalized thickness", "_____no_output_____" ], [ "# charge in [µC]\nQ = [-0.002387185, -0.002369244, -0.002349509, -0.002327839, -0.00230595, -0.002283729, -0.002259291, -0.00223318, -0.002207021, -0.00217887, -0.002148065, -0.002116512, -0.002082226, -0.002045068, -0.002007333, -0.001967078, -0.001924073, -0.00187983, -0.001831545, -0.001776912, -0.001717819, -0.001652946, -0.00158003, -0.001499078, -0.001407161, -0.001296693, -0.001157247, -0.0009688821, -0.0007027968, -0.0003493804, 6.637685E-05, 0.0005045007, 0.0009297, 0.001327835, 0.001694989, 0.002024981, 0.002312988, 0.002544892, 0.002708135, 0.002813413, 0.002884158, 0.002935813, 0.002977405, 0.003014872, 0.003051124, 0.003085805, 0.003118195, 0.003150051, 0.003181595, 0.003211494, 0.003240727, 0.003271036, 0.003300532, 0.003329036, 0.003358051, 0.003386047, 0.003412343, 0.00343873, 0.003465813, 0.003491773, 0.003516818, 0.003542413, 0.003568233, 0.003592727, 0.003616624, 0.003641341, 0.003665666, 0.003689383, 0.00371402, 0.003738411, 0.003761646, 0.00378567, 0.00380854, 0.003830412, 0.003853858, 0.003876645, 0.00389871, 0.00392127, 0.0039428, 0.003964474, 0.003987001, 0.00400799, 0.004029234, 0.004051416, 0.004072585, 0.004094301, 0.004115343, 0.004136069, 0.004156884, 0.004176097, 0.004197029, 0.004218405, 0.004238687, 0.004260382, 0.004282224, 0.004302702, 0.004322752, 0.004343851, 0.004364848, 0.004384621, 0.004389469, 0.00437785, 0.00436301, 0.004347459, 0.004332854, 0.004317561, 0.004301789, 0.004286926, 0.004271545, 0.004255491, 0.004240051, 0.004224759, 0.00420843, 0.004191772, 0.004176366, 0.00416125, 0.004145099, 0.004128415, 0.004112476, 0.004096543, 0.004079259, 0.004062261, 0.004046589, 0.004030942, 0.004014314, 0.003997051, 0.003980191, 0.003963542, 0.003946654, 0.00392912, 0.003911167, 0.003893725, 0.00387713, 0.003860145, 0.003841765, 0.003823862, 0.003806447, 0.003788375, 0.003769961, 0.003752647, 0.003735448, 0.003717227, 0.003700021, 0.003682116, 0.003662905, 0.003644769, 0.003627122, 0.00360835, 0.003589583, 0.00357172, 0.003552353, 0.003532552, 0.003514034, 0.003494644, 0.003474555, 0.003455356, 0.003436633, 0.003416994, 0.003396764, 0.0033774, 0.003356971, 0.003335312, 0.003315078, 0.00329518, 0.003274194, 0.003253128, 0.003232777, 0.003211828, 0.003189375, 0.003167216, 0.003146184, 0.00312443, 0.00310179, 0.003080117, 0.00305787, 0.003034205, 0.003011555, 0.002989346, 0.002965535, 0.002941398, 0.002918004, 0.00289363, 0.002868321, 0.002843333, 0.002818347, 0.002792202, 0.002766602, 0.002741391, 0.002714635, 0.002687873, 0.002660486, 0.002632803, 0.00260405, 0.002574675, 0.002545929, 0.002515959, 0.002486143, 0.002455284, 0.00242318, 0.002390967, 0.002356995, 0.002322765, 0.002287607, 0.002251305, 0.002215339, 0.002176916, 0.002137335, 0.002096899, 0.002053197, 0.002008466, 0.001963086, 0.001914314, 0.001864452, 0.001813566, 0.001757648, 0.001698819, 0.001635419, 0.001566245, 0.001493, 0.00141098, 0.001318391, 0.001211927, 0.00108212, 0.0009163766, 0.000690813, 0.0003883601, 1.692951E-05, -0.000392243, -0.0007981218, -0.001181059, -0.001535184, -0.001858449, -0.002146879, -0.002386551, -0.002571249, -0.002703419, -0.002794477, -0.002861993, -0.00291466, -0.002959006, -0.002999532, -0.003036268, -0.003071271, -0.0031041, -0.003136221, -0.00316746, -0.003198189, -0.003228038, -0.003256713, -0.003284778, -0.003312415, -0.003340448, -0.003366892, -0.003393282, -0.003418866, -0.003444512, -0.003470579, -0.003495327, -0.003520311, -0.003543997, -0.003567151, -0.003590962, -0.003613797, -0.003637049, -0.003659737, -0.003682343, -0.003704853, -0.003727164, -0.003749261, -0.00377138, -0.003793217, -0.003815038, -0.003836964, -0.003857836, -0.003879352, -0.003900119, -0.003921316, -0.003942682, -0.003962902, -0.003984258, -0.004005409, -0.004026261, -0.004047064, -0.004066859, -0.004086888, -0.004106357, -0.004126163, -0.004146, -0.004166117, -0.004186207, -0.004205392, -0.004225425, -0.004244706, -0.004263909, -0.004284097, -0.004303301, -0.004322754, -0.004342262, -0.004361021, -0.004380661, -0.004384621, -0.004372234, -0.004358755, -0.004344455, -0.00433072, -0.004316052, -0.004300078, -0.004284856, -0.00426906, -0.004253508, -0.00423859, -0.004222432, -0.004207311, -0.004192688, -0.004176455, -0.004160648, -0.004145491, -0.004128887, -0.004113142, -0.00409762, -0.004080968, -0.004065461, -0.004049503, -0.004032505, -0.004016477, -0.004000217, -0.003983392, -0.003967661, -0.003951622, -0.003934831, -0.003918328, -0.003901679, -0.003884285, -0.003867609, -0.003851268, -0.003833688, -0.003816898, -0.00380087, -0.003783329, -0.003765523, -0.003748579, -0.003730843, -0.003713503, -0.003696942, -0.003679079, -0.003661704, -0.003644798, -0.003626271, -0.003607254, -0.003589577, -0.003571913, -0.003553253, -0.003535882, -0.003518591, -0.003499268, -0.003479955, -0.003461624, -0.003442215, -0.003422801, -0.003404754, -0.003385643, -0.003366159, -0.003347591, -0.003328024, -0.003307475, -0.003288023, -0.003269004, -0.003248631, -0.003228407, -0.003209213, -0.003188924, -0.003167785, -0.003147349, -0.003126381, -0.003104725, -0.003084507, -0.003064405, -0.003042441, -0.003020605, -0.002998852, -0.002975501, -0.00295198, -0.002929463, -0.00290585, -0.002882403, -0.002859337, -0.002834672, -0.00281071, -0.002785981, -0.002760758, -0.002736262, -0.002710309, -0.002684624, -0.002659622, -0.002632805, -0.002605139, -0.002578091, -0.002550153, -0.002521417, -0.002493659, -0.002476145]\n# potential in [V]\nV = [0.04437793, 0.1739239, 0.2942994, 0.4361776, 0.5788627, 0.7110827, 0.8366968, 0.9845453, 1.117674, 1.243658, 1.387886, 1.532312, 1.646195, 1.802599, 1.932895, 2.052276, 2.199362, 2.343338, 2.461306, 2.60569, 2.752447, 2.871715, 3.013873, 3.157966, 3.280472, 3.412226, 3.563019, 3.692115, 3.812608, 3.970434, 4.090213, 4.22052, 4.3725, 4.489566, 4.643403, 4.778002, 4.898654, 5.04967, 5.196358, 5.317157, 5.449754, 5.593945, 5.732201, 5.865779, 5.994799, 6.144366, 6.277221, 6.401462, 6.539872, 6.688802, 6.810711, 6.945728, 7.098175, 7.219552, 7.351885, 7.499124, 7.632737, 7.759171, 7.896377, 8.041049, 8.173279, 8.299081, 8.447179, 8.586698, 8.708143, 8.841791, 8.989992, 9.126638, 9.250328, 9.403575, 9.52848, 9.654918, 9.808635, 9.931167, 10.06538, 10.21923, 10.33743, 10.47862, 10.62457, 10.7402, 10.8833, 11.03156, 11.14934, 11.29342, 11.43369, 11.55816, 11.71303, 11.82484, 11.97619, 12.10787, 12.22994, 12.38982, 12.51661, 12.64386, 12.78661, 12.93372, 13.05551, 13.18304, 13.33218, 13.47126, 13.47057, 13.34165, 13.21203, 13.0657, 12.93242, 12.81065, 12.65633, 12.52214, 12.40226, 12.25602, 12.11738, 11.98418, 11.8587, 11.71189, 11.56643, 11.44239, 11.31596, 11.1702, 11.02897, 10.9072, 10.77088, 10.62157, 10.48739, 10.35589, 10.22745, 10.08808, 9.944459, 9.810282, 9.676345, 9.547437, 9.407411, 9.264067, 9.128718, 9.006959, 8.871855, 8.722055, 8.585446, 8.458357, 8.321081, 8.176833, 8.058867, 7.916413, 7.772325, 7.65478, 7.506218, 7.362387, 7.234265, 7.107719, 6.960752, 6.828286, 6.703134, 6.548166, 6.416759, 6.294292, 6.149382, 6.007489, 5.880206, 5.751837, 5.606753, 5.463816, 5.343571, 5.197211, 5.054191, 4.93077, 4.800648, 4.653707, 4.517845, 4.393469, 4.256721, 4.105326, 3.978059, 3.85098, 3.705955, 3.565578, 3.445307, 3.299403, 3.153569, 3.03095, 2.90015, 2.7503, 2.617978, 2.494177, 2.356169, 2.20776, 2.080731, 1.946691, 1.796866, 1.67866, 1.538039, 1.400089, 1.275009, 1.120759, 1.007585, 0.8491522, 0.7261425, 0.5891596, 0.4411228, 0.3306482, 0.172344, 0.04901001, -0.09255664, -0.2328214, -0.3526073, -0.5034429, -0.6367069, -0.7543792, -0.9117505, -1.035949, -1.170603, -1.326794, -1.441486, -1.573293, -1.729042, -1.848447, -1.980201, -2.137717, -2.246692, -2.402132, -2.534279, -2.65746, -2.81039, -2.930528, -3.068533, -3.212284, -3.334133, -3.485053, -3.603587, -3.74382, -3.889321, -4.005676, -4.159727, -4.286889, -4.427423, -4.575235, -4.691462, -4.843907, -4.97285, -5.10098, -5.254734, -5.36598, -5.526317, -5.650281, -5.787211, -5.934189, -6.050872, -6.204697, -6.323915, -6.477433, -6.594819, -6.752144, -6.865386, -7.01591, -7.147833, -7.274442, -7.425538, -7.542306, -7.697025, -7.829558, -7.955273, -8.108235, -8.217963, -8.370495, -8.502801, -8.631045, -8.784983, -8.899929, -9.058136, -9.170603, -9.326056, -9.439473, -9.599152, -9.713706, -9.874289, -9.988198, -10.13291, -10.2771, -10.3928, -10.54458, -10.67271, -10.79995, -10.95884, -11.07297, -11.2276, -11.34653, -11.4879, -11.62453, -11.75633, -11.89593, -12.02081, -12.17913, -12.297, -12.43557, -12.58356, -12.69842, -12.84797, -12.99229, -13.10664, -13.26098, -13.38555, -13.52003, -13.54577, -13.39104, -13.27562, -13.12593, -12.99292, -12.86961, -12.71928, -12.57959, -12.46102, -12.30327, -12.18371, -12.05147, -11.89865, -11.78261, -11.64637, -11.49334, -11.36798, -11.23715, -11.08353, -10.97168, -10.82847, -10.68037, -10.56139, -10.41533, -10.27377, -10.15177, -10.00852, -9.867024, -9.747101, -9.605703, -9.460669, -9.33259, -9.19675, -9.043613, -8.926188, -8.793416, -8.643863, -8.51504, -8.393796, -8.240279, -8.103074, -7.981934, -7.82956, -7.704981, -7.576974, -7.419369, -7.29804, -7.167288, -7.022652, -6.875559, -6.758023, -6.619517, -6.473402, -6.350123, -6.221588, -6.066411, -5.931273, -5.812472, -5.659986, -5.526576, -5.405522, -5.256872, -5.117758, -4.996664, -4.854961, -4.706422, -4.588032, -4.455454, -4.308749, -4.169603, -4.045834, -3.900617, -3.759233, -3.637866, -3.498157, -3.352002, -3.232586, -3.096836, -2.94648, -2.819279, -2.686219, -2.541309, -2.404865, -2.279498, -2.126449, -2.010972, -1.869708, -1.72437, -1.607475, -1.450987, -1.32675, -1.196558, -1.045765, -0.9183388, -0.7919357, -0.6417174, -0.504509, -0.3828973, -0.237842, -0.09474963, -0.04021149]\n", "_____no_output_____" ], [ "# plot Q-V curve\nfig,ax=plt.subplots()\nax.plot(V,Q,'k')\nax.set_ylabel(r'$Q$ [µC]')\nax.set_xlabel(r'$V$ [V]')", "_____no_output_____" ] ], [ [ "In order to subtract the dielectric contribution from the substrate and air, the dielectric constant of the film should be known as a function of the field applied to the electrodes.\nWe follow the aproach of the 'Example_CV_curve' notebook, and make the function V_to_eps()", "_____no_output_____" ] ], [ [ "# capacitance in [F]\nC=[1.428721E-10, 1.429671E-10, 1.456579E-10, 1.42468E-10, 1.460786E-10, 1.446451E-10, 1.461049E-10, 1.431395E-10, 1.463592E-10, 1.439421E-10, 1.471552E-10, 1.459837E-10, 1.470516E-10, 1.472527E-10, 1.468963E-10, 1.45112E-10, 1.470788E-10, 1.498311E-10, 1.475891E-10, 1.488992E-10, 1.482823E-10, 1.490999E-10, 1.490423E-10, 1.493171E-10, 1.487169E-10, 1.52244E-10, 1.49938E-10, 1.524042E-10, 1.503689E-10, 1.525724E-10, 1.507887E-10, 1.517274E-10, 1.51396E-10, 1.545277E-10, 1.523037E-10, 1.552043E-10, 1.537927E-10, 1.559966E-10, 1.534998E-10, 1.546678E-10, 1.56135E-10, 1.5839E-10, 1.553621E-10, 1.590284E-10, 1.577874E-10, 1.585799E-10, 1.58952E-10, 1.578791E-10, 1.60478E-10, 1.590341E-10, 1.621604E-10, 1.607127E-10, 1.619988E-10, 1.625879E-10, 1.632023E-10, 1.629058E-10, 1.670639E-10, 1.648712E-10, 1.680846E-10, 1.668316E-10, 1.669878E-10, 1.679075E-10, 1.66629E-10, 1.678708E-10, 1.665938E-10, 1.667221E-10, 1.665966E-10, 1.67136E-10, 1.644409E-10, 1.634124E-10, 1.62773E-10, 1.629638E-10, 1.604434E-10, 1.603101E-10, 1.580713E-10, 1.570457E-10, 1.551462E-10, 1.565918E-10, 1.540866E-10, 1.544135E-10, 1.516288E-10, 1.519453E-10, 1.486024E-10, 1.483967E-10, 1.459251E-10, 1.467474E-10, 1.446417E-10, 1.447932E-10, 1.442392E-10, 1.427798E-10, 1.407578E-10, 1.409972E-10, 1.405182E-10, 1.408034E-10, 1.384185E-10, 1.397462E-10, 1.385293E-10, 1.374531E-10, 1.368658E-10, 1.38535E-10, 1.364199E-10, 1.394704E-10, 1.34885E-10, 1.403326E-10, 1.374839E-10, 1.383247E-10, 1.346173E-10, 1.370586E-10, 1.359852E-10, 1.361504E-10, 1.349626E-10, 1.36789E-10, 1.346052E-10, 1.349358E-10, 1.332896E-10, 1.354997E-10, 1.344141E-10, 1.347519E-10, 1.342153E-10, 1.35133E-10, 1.321496E-10, 1.326097E-10, 1.321919E-10, 1.324373E-10, 1.321308E-10, 1.318906E-10, 1.306188E-10, 1.328086E-10, 1.298423E-10, 1.308546E-10, 1.304036E-10, 1.320288E-10, 1.307834E-10, 1.321233E-10, 1.299351E-10, 1.323872E-10, 1.282756E-10, 1.291338E-10, 1.288475E-10, 1.303299E-10, 1.293892E-10, 1.306845E-10, 1.272201E-10, 1.299654E-10, 1.28993E-10, 1.282188E-10, 1.301725E-10, 1.280745E-10, 1.300307E-10, 1.281592E-10, 1.276642E-10, 1.270934E-10, 1.277682E-10, 1.260331E-10, 1.283642E-10, 1.266525E-10, 1.274558E-10, 1.270772E-10, 1.269208E-10, 1.257932E-10, 1.251995E-10, 1.263143E-10, 1.258134E-10, 1.259102E-10, 1.282777E-10, 1.257423E-10, 1.265298E-10, 1.252837E-10, 1.248545E-10, 1.263677E-10, 1.232039E-10, 1.26069E-10, 1.251772E-10, 1.261557E-10, 1.233324E-10, 1.245177E-10, 1.23786E-10, 1.242947E-10, 1.242804E-10, 1.265923E-10, 1.244359E-10, 1.248464E-10, 1.234826E-10, 1.227252E-10, 1.242627E-10, 1.219141E-10, 1.232067E-10, 1.223098E-10, 1.237167E-10, 1.220581E-10, 1.22659E-10, 1.238581E-10, 1.239247E-10, 1.231712E-10, 1.234949E-10, 1.24836E-10, 1.22424E-10, 1.224423E-10, 1.215715E-10, 1.233904E-10, 1.207226E-10, 1.231654E-10, 1.217444E-10, 1.231121E-10, 1.211624E-10, 1.216655E-10, 1.223138E-10, 1.21632E-10, 1.217327E-10, 1.221195E-10, 1.215047E-10, 1.203513E-10, 1.219082E-10, 1.198786E-10, 1.22356E-10, 1.195481E-10, 1.214618E-10, 1.201914E-10, 1.211654E-10, 1.194529E-10, 1.208156E-10, 1.20112E-10, 1.206554E-10, 1.19269E-10, 1.204647E-10, 1.201762E-10, 1.210913E-10, 1.191995E-10, 1.191091E-10, 1.208559E-10, 1.192752E-10, 1.194189E-10, 1.189132E-10, 1.201548E-10, 1.174888E-10, 1.194773E-10, 1.17665E-10, 1.188119E-10, 1.184596E-10, 1.194681E-10, 1.195105E-10, 1.199525E-10, 1.190634E-10, 1.170919E-10, 1.197311E-10, 1.173843E-10, 1.194264E-10, 1.179738E-10, 1.186101E-10, 1.1739E-10, 1.17134E-10, 1.172322E-10, 1.178816E-10, 1.185782E-10, 1.179067E-10, 1.175537E-10, 1.171477E-10, 1.184548E-10, 1.176971E-10, 1.19847E-10, 1.181765E-10, 1.186303E-10, 1.190044E-10, 1.181325E-10, 1.180539E-10, 1.180162E-10, 1.194967E-10, 1.178069E-10, 1.190385E-10, 1.183011E-10, 1.190183E-10, 1.187815E-10, 1.179626E-10, 1.194848E-10, 1.184054E-10, 1.201075E-10, 1.19958E-10, 1.194483E-10, 1.194521E-10, 1.183017E-10, 1.196058E-10, 1.189984E-10, 1.202177E-10, 1.184128E-10, 1.216556E-10, 1.194658E-10, 1.191103E-10, 1.204886E-10, 1.187929E-10, 1.199223E-10, 1.188856E-10, 1.220987E-10, 1.195576E-10, 1.192179E-10, 1.193729E-10, 1.197234E-10, 1.214019E-10, 1.202983E-10, 1.219464E-10, 1.201709E-10, 1.205171E-10, 1.202237E-10, 1.222552E-10, 1.203094E-10, 1.204452E-10, 1.222237E-10, 1.208789E-10, 1.222743E-10, 1.222312E-10, 1.214719E-10, 1.212069E-10, 1.196694E-10, 1.21599E-10, 1.221965E-10, 1.196936E-10, 1.223418E-10, 1.209638E-10, 1.21349E-10, 1.221708E-10, 1.21822E-10, 1.224692E-10, 1.216677E-10, 1.227133E-10, 1.215861E-10, 1.220879E-10, 1.231638E-10, 1.214378E-10, 1.222153E-10, 1.225601E-10, 1.23204E-10, 1.233956E-10, 1.219813E-10, 1.248303E-10, 1.217187E-10, 1.235352E-10, 1.223645E-10, 1.243875E-10, 1.236328E-10, 1.237825E-10, 1.236018E-10, 1.229477E-10, 1.241237E-10, 1.232171E-10, 1.231176E-10, 1.24429E-10, 1.228869E-10, 1.239139E-10, 1.237866E-10, 1.237431E-10, 1.229345E-10, 1.241784E-10, 1.247035E-10, 1.239413E-10, 1.260249E-10, 1.242357E-10, 1.255621E-10, 1.238913E-10, 1.250542E-10, 1.263224E-10, 1.246612E-10, 1.258194E-10, 1.252038E-10, 1.258805E-10, 1.244981E-10, 1.256105E-10, 1.273458E-10, 1.25373E-10, 1.270745E-10, 1.26058E-10, 1.260408E-10, 1.254987E-10, 1.253545E-10, 1.272783E-10, 1.259068E-10, 1.26717E-10, 1.26604E-10, 1.273636E-10, 1.24815E-10, 1.269945E-10, 1.265434E-10, 1.267167E-10, 1.272067E-10, 1.270353E-10, 1.278764E-10, 1.267783E-10, 1.264522E-10, 1.267218E-10, 1.288877E-10, 1.270128E-10, 1.290969E-10, 1.276366E-10, 1.277471E-10, 1.270148E-10, 1.27719E-10, 1.278782E-10, 1.291366E-10, 1.287927E-10, 1.294219E-10, 1.298759E-10, 1.282135E-10, 1.286388E-10, 1.281722E-10, 1.297523E-10, 1.290929E-10, 1.279392E-10, 1.293432E-10, 1.290771E-10, 1.302782E-10, 1.293932E-10, 1.291463E-10, 1.282145E-10, 1.29826E-10, 1.316245E-10, 1.301372E-10, 1.312541E-10, 1.302919E-10, 1.314303E-10, 1.297458E-10, 1.313611E-10, 1.322797E-10, 1.322087E-10, 1.312691E-10, 1.309969E-10, 1.318371E-10, 1.300058E-10, 1.319758E-10, 1.318336E-10, 1.343787E-10, 1.32808E-10, 1.337327E-10, 1.319999E-10, 1.319136E-10, 1.30856E-10, 1.323935E-10, 1.323159E-10, 1.324899E-10, 1.331841E-10, 1.328379E-10, 1.336677E-10, 1.328461E-10, 1.345795E-10, 1.330804E-10, 1.354537E-10, 1.340898E-10, 1.345113E-10, 1.34861E-10, 1.349088E-10, 1.351589E-10, 1.349915E-10, 1.36734E-10, 1.350067E-10, 1.349123E-10, 1.343106E-10, 1.369865E-10, 1.357753E-10, 1.362019E-10, 1.368768E-10, 1.364561E-10, 1.36423E-10, 1.352735E-10, 1.374813E-10, 1.352082E-10, 1.369885E-10, 1.37419E-10, 1.373838E-10, 1.389009E-10, 1.369029E-10, 1.385697E-10, 1.375682E-10, 1.385671E-10, 1.375384E-10, 1.398098E-10, 1.390579E-10, 1.39294E-10, 1.39684E-10, 1.402992E-10, 1.402546E-10, 1.382871E-10, 1.404137E-10, 1.4E-10, 1.404329E-10, 1.402902E-10, 1.41171E-10, 1.416159E-10, 1.399021E-10, 1.421314E-10, 1.411536E-10, 1.424295E-10, 1.418465E-10, 1.445525E-10, 1.431039E-10, 1.408782E-10, 1.44329E-10, 1.434428E-10, 1.455031E-10, 1.434753E-10, 1.452175E-10, 1.447142E-10, 1.435076E-10, 1.442E-10, 1.449591E-10, 1.469557E-10, 1.450768E-10, 1.475219E-10, 1.461938E-10, 1.462057E-10, 1.456248E-10, 1.462486E-10, 1.479291E-10, 1.468286E-10, 1.489587E-10, 1.474222E-10, 1.486653E-10, 1.471242E-10, 1.486498E-10, 1.481523E-10, 1.482389E-10, 1.50632E-10, 1.498335E-10, 1.505008E-10, 1.488144E-10, 1.519302E-10, 1.508203E-10, 1.511956E-10, 1.513241E-10, 1.512755E-10, 1.534453E-10, 1.535899E-10, 1.529881E-10, 1.526742E-10, 1.548066E-10, 1.533232E-10, 1.552791E-10, 1.545258E-10, 1.579156E-10, 1.547743E-10, 1.572102E-10, 1.566178E-10, 1.571229E-10, 1.570229E-10, 1.582999E-10, 1.591891E-10, 1.578437E-10, 1.5837E-10, 1.591771E-10, 1.607237E-10, 1.579443E-10, 1.604031E-10, 1.634649E-10, 1.606308E-10, 1.63667E-10, 1.631409E-10, 1.637429E-10, 1.616804E-10, 1.638019E-10, 1.662795E-10, 1.633521E-10, 1.664587E-10, 1.665746E-10, 1.688786E-10, 1.659572E-10, 1.667493E-10, 1.69185E-10, 1.692778E-10, 1.700613E-10, 1.684758E-10, 1.691753E-10, 1.660604E-10, 1.64958E-10, 1.649965E-10, 1.643371E-10, 1.635227E-10, 1.607512E-10, 1.631345E-10, 1.587088E-10, 1.582055E-10, 1.576575E-10, 1.584195E-10, 1.550761E-10, 1.545665E-10, 1.548717E-10, 1.524997E-10, 1.513253E-10, 1.489942E-10, 1.491857E-10, 1.459719E-10, 1.466107E-10, 1.45585E-10, 1.450864E-10, 1.424654E-10, 1.424399E-10, 1.417381E-10, 1.39913E-10, 1.406248E-10, 1.397817E-10, 1.408002E-10, 1.393248E-10, 1.398172E-10, 1.384365E-10, 1.375876E-10, 1.379492E-10, 1.367683E-10, 1.375969E-10, 1.3668E-10, 1.373079E-10, 1.356807E-10, 1.366002E-10, 1.344991E-10, 1.357231E-10, 1.337806E-10, 1.372409E-10, 1.334823E-10, 1.345474E-10, 1.341379E-10, 1.328704E-10, 1.328098E-10, 1.31878E-10, 1.331996E-10, 1.324442E-10, 1.31565E-10, 1.319125E-10, 1.32202E-10, 1.326845E-10, 1.307521E-10, 1.326938E-10, 1.300787E-10, 1.309132E-10, 1.295855E-10, 1.304956E-10, 1.300937E-10, 1.292458E-10, 1.307986E-10, 1.289652E-10, 1.286655E-10, 1.290608E-10, 1.296629E-10, 1.287674E-10, 1.274861E-10, 1.299446E-10, 1.296159E-10, 1.279053E-10, 1.297014E-10, 1.278301E-10, 1.281703E-10, 1.266053E-10, 1.287248E-10, 1.264719E-10, 1.273514E-10, 1.274536E-10, 1.269692E-10, 1.273148E-10, 1.255489E-10, 1.279162E-10, 1.256964E-10, 1.261561E-10, 1.269629E-10, 1.250016E-10, 1.259307E-10, 1.241788E-10, 1.257906E-10, 1.250511E-10, 1.247705E-10, 1.266612E-10, 1.25039E-10, 1.244062E-10, 1.248842E-10, 1.247148E-10, 1.238486E-10, 1.228222E-10, 1.255463E-10, 1.247814E-10, 1.243626E-10, 1.237665E-10, 1.246724E-10, 1.238904E-10, 1.230388E-10, 1.23642E-10, 1.224003E-10, 1.222853E-10, 1.229146E-10, 1.219639E-10, 1.228983E-10, 1.213694E-10, 1.226758E-10, 1.232989E-10, 1.225779E-10, 1.224417E-10, 1.226791E-10, 1.226576E-10, 1.206459E-10, 1.219063E-10, 1.211855E-10, 1.228589E-10, 1.218584E-10, 1.216945E-10, 1.223193E-10, 1.190148E-10, 1.214944E-10, 1.21628E-10, 1.2092E-10, 1.214292E-10, 1.215809E-10, 1.214492E-10, 1.208093E-10, 1.201614E-10, 1.198732E-10, 1.190848E-10, 1.215263E-10, 1.206319E-10, 1.194802E-10, 1.19245E-10, 1.199772E-10, 1.199073E-10, 1.196559E-10, 1.191041E-10, 1.209232E-10, 1.18883E-10, 1.200479E-10, 1.20238E-10, 1.20357E-10, 1.178351E-10, 1.188417E-10, 1.197694E-10, 1.174261E-10, 1.191596E-10, 1.19546E-10, 1.18227E-10, 1.177878E-10, 1.178595E-10, 1.187553E-10, 1.182249E-10, 1.182545E-10, 1.196385E-10, 1.182275E-10, 1.173036E-10, 1.171271E-10, 1.176164E-10, 1.16336E-10, 1.172684E-10, 1.181993E-10, 1.158593E-10, 1.184176E-10, 1.167167E-10, 1.175387E-10, 1.166047E-10, 1.179134E-10, 1.192259E-10, 1.157749E-10, 1.176932E-10, 1.175949E-10, 1.160609E-10, 1.178535E-10, 1.169975E-10, 1.186814E-10, 1.176641E-10, 1.17394E-10, 1.181469E-10, 1.17172E-10, 1.173988E-10, 1.176118E-10, 1.175405E-10, 1.17021E-10, 1.169642E-10, 1.177946E-10, 1.168386E-10, 1.17849E-10, 1.187442E-10, 1.181597E-10, 1.19315E-10, 1.178069E-10, 1.188536E-10, 1.173716E-10, 1.171542E-10, 1.183029E-10, 1.175457E-10, 1.177983E-10, 1.181589E-10, 1.183508E-10, 1.180423E-10, 1.178909E-10, 1.191793E-10, 1.190843E-10, 1.194647E-10, 1.192538E-10, 1.196658E-10, 1.190537E-10, 1.185755E-10, 1.19343E-10, 1.193129E-10, 1.19667E-10, 1.206632E-10, 1.191684E-10, 1.19486E-10, 1.184836E-10, 1.197159E-10, 1.180492E-10, 1.193817E-10, 1.197055E-10, 1.201422E-10, 1.199055E-10, 1.201559E-10, 1.190369E-10, 1.211554E-10, 1.181058E-10, 1.213385E-10, 1.209126E-10, 1.201931E-10, 1.215587E-10, 1.218929E-10, 1.199184E-10, 1.206235E-10, 1.20644E-10, 1.206732E-10, 1.204866E-10, 1.217333E-10, 1.20814E-10, 1.205608E-10, 1.205133E-10, 1.203666E-10, 1.213848E-10, 1.203342E-10, 1.208187E-10, 1.21343E-10, 1.203883E-10, 1.210015E-10, 1.216253E-10, 1.219758E-10, 1.216929E-10, 1.227504E-10, 1.232504E-10, 1.218303E-10, 1.220115E-10, 1.214376E-10, 1.220161E-10, 1.2192E-10, 1.230869E-10, 1.236932E-10, 1.220605E-10, 1.226611E-10, 1.229358E-10, 1.209405E-10, 1.230221E-10, 1.234145E-10, 1.232747E-10, 1.244998E-10, 1.229632E-10, 1.228223E-10, 1.22294E-10, 1.239279E-10, 1.226526E-10, 1.239677E-10, 1.234594E-10, 1.241096E-10, 1.243929E-10, 1.214138E-10, 1.249787E-10, 1.236943E-10, 1.223724E-10, 1.240914E-10, 1.247876E-10, 1.247921E-10, 1.241966E-10, 1.248835E-10, 1.241835E-10, 1.237608E-10, 1.251001E-10, 1.24361E-10, 1.255462E-10, 1.24335E-10, 1.2568E-10, 1.254916E-10, 1.241557E-10, 1.250553E-10, 1.254905E-10, 1.262875E-10, 1.262949E-10, 1.264549E-10, 1.264461E-10, 1.254883E-10, 1.270468E-10, 1.255024E-10, 1.267051E-10, 1.253347E-10, 1.268411E-10, 1.269329E-10, 1.256907E-10, 1.26421E-10, 1.267391E-10, 1.268496E-10, 1.256197E-10, 1.281011E-10, 1.269234E-10, 1.279229E-10, 1.265155E-10, 1.281218E-10, 1.2799E-10, 1.283278E-10, 1.283215E-10, 1.283213E-10, 1.289431E-10, 1.288066E-10, 1.278198E-10, 1.286094E-10, 1.297161E-10, 1.282801E-10, 1.274884E-10, 1.284096E-10, 1.302183E-10, 1.277342E-10, 1.271422E-10, 1.302213E-10, 1.284466E-10, 1.290159E-10, 1.299743E-10, 1.301856E-10, 1.28916E-10, 1.293691E-10, 1.307307E-10, 1.303285E-10, 1.300997E-10, 1.310647E-10, 1.321416E-10, 1.317851E-10, 1.309396E-10, 1.31486E-10, 1.312269E-10, 1.310693E-10, 1.321261E-10, 1.311203E-10, 1.304322E-10, 1.314821E-10, 1.322425E-10, 1.316547E-10, 1.324183E-10, 1.31749E-10, 1.336512E-10, 1.324706E-10, 1.336776E-10, 1.340733E-10, 1.325697E-10, 1.330598E-10, 1.332061E-10, 1.346693E-10, 1.33342E-10, 1.328168E-10, 1.338396E-10, 1.333693E-10, 1.349823E-10, 1.334637E-10, 1.363382E-10, 1.353431E-10, 1.347797E-10, 1.355181E-10, 1.351888E-10, 1.343639E-10, 1.349301E-10, 1.367382E-10, 1.356508E-10, 1.358091E-10, 1.372926E-10, 1.364726E-10, 1.355097E-10, 1.343606E-10, 1.385406E-10, 1.36499E-10, 1.357648E-10, 1.383952E-10, 1.364459E-10, 1.369492E-10, 1.381099E-10, 1.379858E-10, 1.388637E-10, 1.389333E-10, 1.411425E-10, 1.388296E-10, 1.38931E-10, 1.387482E-10, 1.39394E-10, 1.388384E-10, 1.395211E-10, 1.409691E-10, 1.408148E-10, 1.396364E-10, 1.415756E-10, 1.395477E-10, 1.392756E-10, 1.400201E-10, 1.426077E-10, 1.415948E-10, 1.414231E-10, 1.431624E-10, 1.417033E-10, 1.419081E-10, 1.423782E-10, 1.423188E-10, 1.438651E-10, 1.421889E-10, 1.457114E-10, 1.451269E-10, 1.430276E-10]\n# potential in [V]\nV_for_C=[0.01191011, 0.06739906, 0.1201201, 0.1759969, 0.2295195, 0.2840591, 0.3374494, 0.3918903, 0.4462339, 0.5007762, 0.5535439, 0.6091848, 0.6625938, 0.7165433, 0.7704356, 0.8254675, 0.8797089, 0.9330992, 0.9881527, 1.042617, 1.096388, 1.150839, 1.204423, 1.260016, 1.313489, 1.368334, 1.421432, 1.475078, 1.530656, 1.584063, 1.638988, 1.69438, 1.747316, 1.8024, 1.85547, 1.910495, 1.964437, 2.018809, 2.073711, 2.127366, 2.180579, 2.235825, 2.28984, 2.344729, 2.398085, 2.453089, 2.507114, 2.560865, 2.615914, 2.669687, 2.724157, 2.77851, 2.832239, 2.887692, 2.939321, 2.995628, 3.049465, 3.103488, 3.158154, 3.212128, 3.266889, 3.320622, 3.374575, 3.42943, 3.48325, 3.53935, 3.59272, 3.646377, 3.70198, 3.755396, 3.80976, 3.864622, 3.919071, 3.973298, 4.026449, 4.082187, 4.135396, 4.190305, 4.244122, 4.297615, 4.353129, 4.404792, 4.460375, 4.514986, 4.569502, 4.623884, 4.677803, 4.733296, 4.786708, 4.840779, 4.894762, 4.94942, 5.0046, 5.057308, 5.110567, 5.166449, 5.220076, 5.274992, 5.328375, 5.383769, 5.437879, 5.491142, 5.545815, 5.599965, 5.654662, 5.708647, 5.762293, 5.817238, 5.870321, 5.925716, 5.980515, 6.034531, 6.088582, 6.142398, 6.197326, 6.251199, 6.30502, 6.359588, 6.414106, 6.468644, 6.522204, 6.575385, 6.631247, 6.684447, 6.740004, 6.793278, 6.848399, 6.902913, 6.95617, 7.0119, 7.065376, 7.119857, 7.174293, 7.228476, 7.282898, 7.334863, 7.390632, 7.444824, 7.498833, 7.553909, 7.607858, 7.661393, 7.716085, 7.77018, 7.825095, 7.878773, 7.933629, 7.987752, 8.039755, 8.095821, 8.149455, 8.204766, 8.258869, 8.312849, 8.367912, 8.421227, 8.475548, 8.529559, 8.583943, 8.639063, 8.692272, 8.747465, 8.799543, 8.855269, 8.910067, 8.962831, 9.017801, 9.072355, 9.126358, 9.180974, 9.234551, 9.289795, 9.343569, 9.398029, 9.452306, 9.50483, 9.56109, 9.614802, 9.668894, 9.723388, 9.777187, 9.831928, 9.885862, 9.939739, 9.994928, 10.04863, 10.10417, 10.15775, 10.21225, 10.26612, 10.32052, 10.3758, 10.42966, 10.48455, 10.53756, 10.59178, 10.6467, 10.69998, 10.75492, 10.80882, 10.8634, 10.91807, 10.97017, 11.02613, 11.08098, 11.13471, 11.18958, 11.24336, 11.29839, 11.35147, 11.40594, 11.46018, 11.51404, 11.56901, 11.62311, 11.67725, 11.73093, 11.78518, 11.83981, 11.89379, 11.9481, 12.0022, 12.05692, 12.11177, 12.16554, 12.21942, 12.27424, 12.3273, 12.38287, 12.43502, 12.49147, 12.5451, 12.59938, 12.65407, 12.70835, 12.76269, 12.81708, 12.87082, 12.92595, 12.97889, 13.03439, 13.0882, 13.141, 13.19476, 13.25067, 13.30513, 13.35884, 13.413, 13.46876, 13.52237, 13.52231, 13.468, 13.41405, 13.35868, 13.30473, 13.25048, 13.20038, 13.1414, 13.08778, 13.03315, 12.98025, 12.92488, 12.87143, 12.81651, 12.76204, 12.70817, 12.65345, 12.59898, 12.54486, 12.49072, 12.44081, 12.38174, 12.32798, 12.27312, 12.21972, 12.16557, 12.11056, 12.057, 12.00273, 11.94757, 11.89427, 11.83925, 11.7863, 11.73558, 11.67737, 11.62334, 11.56801, 11.51407, 11.46015, 11.40599, 11.35149, 11.29647, 11.24337, 11.18907, 11.13477, 11.08011, 11.02584, 10.97636, 10.91769, 10.86329, 10.80905, 10.75433, 10.70058, 10.64594, 10.59229, 10.53778, 10.48349, 10.43022, 10.37578, 10.32224, 10.27123, 10.21204, 10.15841, 10.10332, 10.0487, 9.994202, 9.940793, 9.886208, 9.831479, 9.777735, 9.723001, 9.66848, 9.615495, 9.560504, 9.51065, 9.451662, 9.397901, 9.343657, 9.288941, 9.235573, 9.180358, 9.126715, 9.07256, 9.017631, 8.964513, 8.909222, 8.855954, 8.805246, 8.746452, 8.692693, 8.637917, 8.584547, 8.529857, 8.475002, 8.421703, 8.365972, 8.313353, 8.258323, 8.203183, 8.149745, 8.095141, 8.045647, 7.98688, 7.932303, 7.879701, 7.824194, 7.771094, 7.715513, 7.661452, 7.607806, 7.552743, 7.499367, 7.444696, 7.390461, 7.340446, 7.282408, 7.229108, 7.173337, 7.119409, 7.065888, 7.010467, 6.95742, 6.902437, 6.848364, 6.793663, 6.739047, 6.685345, 6.630303, 6.580764, 6.521909, 6.467827, 6.414277, 6.359096, 6.305518, 6.251251, 6.196578, 6.142967, 6.088358, 6.034603, 5.979641, 5.925657, 5.875819, 5.816795, 5.762847, 5.708324, 5.65356, 5.600811, 5.544879, 5.492324, 5.437263, 5.382861, 5.329098, 5.273652, 5.220762, 5.166106, 5.115547, 5.058087, 5.002563, 4.94906, 4.894761, 4.841048, 4.786169, 4.731363, 4.678175, 4.623295, 4.569555, 4.515054, 4.459955, 4.410748, 4.352152, 4.298113, 4.243972, 4.188936, 4.135714, 4.081049, 4.02706, 3.97232, 3.918532, 3.86402, 3.809766, 3.756411, 3.701824, 3.65126, 3.592932, 3.538192, 3.484104, 3.428653, 3.375171, 3.320953, 3.266038, 3.212028, 3.157657, 3.103818, 3.049781, 2.994385, 2.945776, 2.886736, 2.832584, 2.777942, 2.723931, 2.669719, 2.615535, 2.561099, 2.507411, 2.452189, 2.398422, 2.343653, 2.290297, 2.235892, 2.186057, 2.127272, 2.072822, 2.019279, 1.964853, 1.909774, 1.856421, 1.801484, 1.748288, 1.693451, 1.638645, 1.584336, 1.530479, 1.480425, 1.421528, 1.368038, 1.314231, 1.258353, 1.206551, 1.150405, 1.096059, 1.041813, 0.9876209, 0.9335759, 0.8793103, 0.8252564, 0.7711366, 0.7208143, 0.6634691, 0.60781, 0.5541749, 0.5001592, 0.4462102, 0.3922565, 0.3370365, 0.2835259, 0.2299835, 0.1748018, 0.1212871, 0.06608507, 0.01706269, -0.04278337, -0.09608474, -0.1501069, -0.2049531, -0.2591379, -0.3128958, -0.3676249, -0.4207624, -0.4761039, -0.529731, -0.5839917, -0.6380167, -0.6931398, -0.7432034, -0.8008682, -0.8557751, -0.9104832, -0.9638286, -1.018817, -1.072178, -1.127392, -1.180659, -1.23547, -1.289606, -1.342982, -1.398579, -1.452242, -1.502133, -1.561502, -1.614794, -1.669877, -1.723134, -1.778362, -1.832346, -1.886201, -1.94017, -1.995302, -2.048746, -2.103518, -2.156412, -2.208521, -2.265952, -2.320601, -2.374912, -2.428579, -2.482548, -2.536872, -2.59143, -2.645834, -2.699263, -2.754478, -2.807458, -2.86316, -2.916692, -2.967284, -3.025887, -3.080168, -3.134965, -3.189729, -3.242755, -3.298487, -3.35196, -3.407055, -3.46049, -3.515166, -3.568669, -3.622741, -3.672946, -3.731996, -3.785404, -3.840461, -3.893523, -3.94929, -4.002339, -4.057425, -4.112042, -4.166074, -4.220563, -4.273969, -4.328477, -4.383264, -4.432277, -4.491757, -4.544897, -4.60043, -4.65337, -4.708736, -4.762434, -4.816083, -4.870473, -4.926074, -4.978502, -5.034609, -5.087198, -5.138796, -5.196416, -5.25117, -5.305133, -5.359031, -5.41343, -5.468124, -5.521652, -5.577043, -5.630259, -5.685618, -5.738328, -5.793639, -5.847428, -5.897664, -5.956308, -6.010489, -6.063376, -6.119191, -6.171927, -6.227531, -6.28102, -6.33601, -6.390467, -6.445347, -6.498667, -6.553333, -6.603573, -6.662948, -6.714832, -6.771336, -6.822889, -6.879321, -6.931473, -6.98693, -7.041861, -7.096051, -7.148703, -7.204553, -7.257607, -7.31354, -7.361538, -7.421769, -7.474513, -7.529566, -7.583166, -7.638109, -7.691708, -7.747105, -7.79931, -7.855292, -7.90823, -7.964141, -8.016598, -8.067803, -8.12613, -8.180237, -8.234504, -8.289588, -8.34299, -8.398144, -8.451004, -8.506999, -8.559003, -8.614864, -8.668035, -8.723178, -8.776966, -8.827826, -8.885082, -8.940835, -8.993632, -9.04905, -9.101123, -9.157326, -9.210323, -9.266196, -9.319288, -9.373612, -9.427997, -9.482795, -9.531478, -9.591477, -9.645019, -9.701389, -9.753842, -9.809556, -9.862202, -9.917953, -9.971086, -10.02607, -10.0791, -10.13497, -10.18719, -10.24339, -10.29171, -10.35205, -10.40512, -10.46004, -10.51418, -10.56813, -10.62231, -10.67765, -10.72971, -10.78638, -10.83821, -10.8944, -10.94731, -10.99842, -11.056, -11.11083, -11.16441, -11.22007, -11.27263, -11.32843, -11.38045, -11.43691, -11.48973, -11.5456, -11.59879, -11.65404, -11.7072, -11.75799, -11.81588, -11.87118, -11.9232, -11.97988, -12.03242, -12.08778, -12.14132, -12.19609, -12.2494, -12.30484, -12.35747, -12.41355, -12.46147, -12.52225, -12.57434, -12.63016, -12.68405, -12.73881, -12.79273, -12.84797, -12.90115, -12.95661, -13.00959, -13.06567, -13.11733, -13.17368, -13.22221, -13.28209, -13.33549, -13.39013, -13.44395, -13.49904, -13.55129, -13.55324, -13.4979, -13.44533, -13.38949, -13.33604, -13.28165, -13.22841, -13.17224, -13.11943, -13.06367, -13.01129, -12.95458, -12.90261, -12.84679, -12.79346, -12.73795, -12.68386, -12.62943, -12.57543, -12.52081, -12.46986, -12.4119, -12.35925, -12.30331, -12.25058, -12.19547, -12.14141, -12.08664, -12.03287, -11.9784, -11.92449, -11.86913, -11.81673, -11.7612, -11.70816, -11.65281, -11.59945, -11.54431, -11.49011, -11.43538, -11.38229, -11.32663, -11.27402, -11.2183, -11.16587, -11.10984, -11.05647, -11.0028, -10.94824, -10.89295, -10.83986, -10.78483, -10.73164, -10.67554, -10.62317, -10.56761, -10.51436, -10.4589, -10.40613, -10.35052, -10.29787, -10.24177, -10.18928, -10.13295, -10.08132, -10.02464, -9.972691, -9.917088, -9.863487, -9.808712, -9.753768, -9.698977, -9.64609, -9.589997, -9.538164, -9.481107, -9.428586, -9.372593, -9.319863, -9.264706, -9.211174, -9.156712, -9.103079, -9.047662, -8.99439, -8.938172, -8.886223, -8.831287, -8.777548, -8.722864, -8.668149, -8.613771, -8.560018, -8.505346, -8.452108, -8.396232, -8.343747, -8.288088, -8.234856, -8.179289, -8.126564, -8.072516, -8.018468, -7.962571, -7.909718, -7.853374, -7.800959, -7.745126, -7.692129, -7.637188, -7.583608, -7.529106, -7.475137, -7.420242, -7.367808, -7.311427, -7.259215, -7.202894, -7.149843, -7.094233, -7.041565, -6.985809, -6.932612, -6.878032, -6.823979, -6.76939, -6.716991, -6.660836, -6.609323, -6.552857, -6.499479, -6.444643, -6.390263, -6.33493, -6.281358, -6.226657, -6.173764, -6.118687, -6.066292, -6.009394, -5.957428, -5.903566, -5.848761, -5.794306, -5.740449, -5.685512, -5.631788, -5.576566, -5.523378, -5.468136, -5.415892, -5.358974, -5.305958, -5.2508, -5.197234, -5.14308, -5.088435, -5.034035, -4.980464, -4.924985, -4.872462, -4.816075, -4.763245, -4.708172, -4.654146, -4.600249, -4.547088, -4.491176, -4.439456, -4.382683, -4.330057, -4.27449, -4.221737, -4.166123, -4.11238, -4.058031, -4.002587, -3.948179, -3.895012, -3.839002, -3.786421, -3.730602, -3.678894, -3.622616, -3.569306, -3.515458, -3.460159, -3.406107, -3.352739, -3.297656, -3.244501, -3.18857, -3.135115, -3.079687, -3.025592, -2.971762, -2.917168, -2.862685, -2.808732, -2.75387, -2.700411, -2.645385, -2.592348, -2.537074, -2.483268, -2.429199, -2.374538, -2.32028, -2.26636, -2.21259, -2.158089, -2.101698, -2.049733, -1.993665, -1.940466, -1.886327, -1.832216, -1.77829, -1.723934, -1.668955, -1.615565, -1.560224, -1.508332, -1.45194, -1.398924, -1.344095, -1.289378, -1.235677, -1.180901, -1.127134, -1.072838, -1.017269, -0.9652575, -0.9090891, -0.8562682, -0.8006728, -0.7484967, -0.6933431, -0.6383141, -0.5842198, -0.5306764, -0.4755589, -0.422482, -0.3669971, -0.3141344, -0.2585589, -0.2057641, -0.1508175, -0.096172, -0.04309347]", "_____no_output_____" ], [ "# function to get capacitance from model\ndef capacitance_of_IDEs(tau,eta,N,L,epss):\n inf_case = infinite_fourier.multiple_recursive_images([eta,0],[tau],epss,epss,8,180,accuracy_limit=10**-15,hybrid=True)\n c_I = inf_case.get_C() # [F/m]\n pair_case = pair_conformal.multiple_recursive_images([eta,0],[tau],epss,epss,8,20,accuracy_limit=10**-15)\n c_2 = pair_case.get_C() # [F/m]\n c_E=2*c_I*c_2/(c_I+c_2) # [F/m]\n C=L*10**-6*(2*c_E+(N-3)*c_I)\n return C\n\n# refactor of the input of the above function to work with scipy.optimize.curve_fit\ndef get_C_for_lsqfit(x,xi):\n tau = x[0]\n eta = x[1]\n N = x[2]\n L = x[3]\n epss = [x[4],xi,x[5]]\n return capacitance_of_IDEs(tau,eta,N,L,epss)\n\n# function to get dielectric constant of film from geometry of capacitance using scipy.optimize.curve_fit\ndef get_eps(tau,eta,N,L,eps_air,guess_eps_film,eps_sub,C):\n x=[tau,eta,N,L,eps_air,eps_sub]\n popt,pcov=scipy.optimize.curve_fit(get_C_for_lsqfit,x,C,p0=guess_eps_film)#,ftol=0.01,xtol=1,gtol=1)\n eps_film=popt[0]\n return eps_film\n\n# in this aproach we only calculate the dielectric constant of the film at 11 point, and use this to create a polynomial\n# the polynomial describes the relationship capacitance->dielectric constant with high accuracy, \n# because this relationship is almost linear\neps_film_guess=1500\ntrail_C_step=(np.max(C)-np.min(C))/10\ntrail_C = np.arange(np.min(C),np.max(C)+0.5*trail_C_step,trail_C_step)\ntrail_eps = []\nfor c in tqdm.tqdm(trail_C):\n eps=get_eps(tau,eta,N,L,eps_air,eps_film_guess,eps_sub,c)\n eps_film_guess=eps\n trail_eps.append(eps)\nC_to_eps = np.poly1d(np.polyfit(trail_C, trail_eps, 4)) # <- function that gets the dielectric constant of the film given C as input\neps_film = C_to_eps(C)\nl=len(V_for_C)//4\nV_to_eps = np.poly1d(np.polyfit(V_for_C[l:2*l+1], C_to_eps(C[l:2*l+1]), 4))", " 0%| | 0/11 [00:00<?, ?it/s]/usr/local/lib/python3.6/site-packages/scipy/special/orthogonal.py:140: RuntimeWarning: invalid value encountered in multiply\n np.poly1d.__init__(self, poly.coeffs * float(kn))\n/usr/local/lib/python3.6/site-packages/scipy/special/orthogonal.py:1977: RuntimeWarning: overflow encountered in double_scalars\n kn = _gam(2 * n + 1) / _gam(n + 1)**2 / 2.0**n\n/usr/local/lib/python3.6/site-packages/scipy/special/orthogonal.py:1977: RuntimeWarning: invalid value encountered in double_scalars\n kn = _gam(2 * n + 1) / _gam(n + 1)**2 / 2.0**n\n/usr/local/lib/python3.6/site-packages/scipy/optimize/minpack.py:785: OptimizeWarning: Covariance of the parameters could not be estimated\n category=OptimizeWarning)\n100%|██████████| 11/11 [00:03<00:00, 3.07it/s]\n" ], [ "# plot eps-V curve\nfig,ax=plt.subplots()\nax.plot(V_for_C,eps_film,'k')\nax.plot(V_for_C[l:2*l+1], V_to_eps(V_for_C[l:2*l+1]),lw=4,color=[1,0.4,0])\nax.set_ylabel(r'$\\varepsilon_{film}$ [-]')\nax.set_xlabel(r'$V$ [V]')", "_____no_output_____" ] ], [ [ "The orange line above is the function V_to_eps(), which we will use to describe the dielectric constant of the film.\nWe choose the lower curve in the above plot since the upper curve includes contributions from motion of domain walls, etc.\nWe here ignore these contributions to the dielectric constant", "_____no_output_____" ], [ "## Calculate capacitive contribution from substrate", "_____no_output_____" ] ], [ [ "#function to get capacitive contribution of film\ndef getCintEx_film(case):\n g,error=scipy.integrate.quad(lambda y: case.get_Ex(0,y), 0,infinite.t[0])\n eps_0 = 8.8541878128*10**-12 # dielectric permitivity of vacuum [F/m]\n c=g*case.single[0].layers[1].eps_x*eps_0\n return c\n\n#function to get capacitive contribution from substrate and air\ndef capacitive_contribution_of_sub_and_air(tau,eta,N,L,epss):\n # calculate total capacitance of structure\n inf_case = infinite_fourier.multiple_recursive_images([eta,0],[tau],epss,epss,8,180,accuracy_limit=10**-15,hybrid=True)\n c_I = inf_case.get_C() # [F/m]\n pair_case = pair_conformal.multiple_recursive_images([eta,0],[tau],epss,epss,8,20,accuracy_limit=10**-15)\n c_2 = pair_case.get_C() # [F/m]\n c_E=2*c_I*c_2/(c_I+c_2) # [F/m]\n C=L*10**-6*(2*c_E+(N-3)*c_I)\n # calculate film contribution\n g,error=scipy.integrate.quad(lambda y: inf_case.get_Ex(0,y), 0,tau)\n eps_0 = 8.8541878128*10**-12 # dielectric permitivity of vacuum [F/m]\n film_contribution_to_C=(N-1)*L*10**-6*g*epss[1]*eps_0\n return C-film_contribution_to_C # charge in [C]\n", "_____no_output_____" ], [ "trail_V_step=(np.max(V)-np.min(V))/10\ntrail_V = np.arange(np.min(V),np.max(V)+0.5*trail_V_step,trail_V_step)\ntrail_air_sub_cont = []\nfor v in tqdm.tqdm(trail_V):\n epss = [eps_air,V_to_eps(v),eps_sub]\n contrib = capacitive_contribution_of_sub_and_air(tau,eta,N,L,epss)\n trail_air_sub_cont.append(contrib)\nV_to_capcitive_contrib = np.poly1d(np.polyfit(trail_V, trail_air_sub_cont, 4))", " 0%| | 0/11 [00:00<?, ?it/s]/usr/local/lib/python3.6/site-packages/scipy/special/orthogonal.py:1977: RuntimeWarning: overflow encountered in double_scalars\n kn = _gam(2 * n + 1) / _gam(n + 1)**2 / 2.0**n\n/usr/local/lib/python3.6/site-packages/scipy/special/orthogonal.py:1977: RuntimeWarning: invalid value encountered in double_scalars\n kn = _gam(2 * n + 1) / _gam(n + 1)**2 / 2.0**n\n100%|██████████| 11/11 [00:07<00:00, 1.48it/s]\n" ], [ "charge_contrib_from_sub_and_air = [0]\nfor i in range(len(V)-1):\n additional_charge=V_to_capcitive_contrib(np.abs(0.5*(V[1]+V[i+1])))*(V[i+1]-V[i])\n charge_contrib_from_sub_and_air.append(additional_charge+charge_contrib_from_sub_and_air[-1])\ncharge_contrib_from_sub_and_air=np.array(charge_contrib_from_sub_and_air)", "_____no_output_____" ], [ "# plot Q-V curve\nfig,ax=plt.subplots()\nax.plot(V,Q,'k',label='total charge')\nax.plot(V,charge_contrib_from_sub_and_air*10**6,label='contribution from substrate and air')\nQ_corrected = Q-charge_contrib_from_sub_and_air*10**6\nax.plot(V,Q_corrected,label='charge minus contributions')\nax.legend()\nax.set_ylabel(r'$Q$ [µC]')\nax.set_xlabel(r'$V$ [V]')", "_____no_output_____" ] ], [ [ "## Transform to polarization VS electric field", "_____no_output_____" ] ], [ [ "# plot P-E curve\nP=Q_corrected/((N-1)*L*t*10**-8) # polarization, µC/cm2\ndelta_a = 4*np.log(2)*t/np.pi\nE = V/(a+delta_a)*10 #kV/cm\nfig,ax=plt.subplots()\nax.plot(E,P,'k')\nax.set_ylabel(r'$P$ [µC/cm$^2$]')\nax.set_xlabel(r'$V$ [V]')", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ] ]
cb60c9a8b9fc4304edecdca7f33765d268528267
94,639
ipynb
Jupyter Notebook
notebooks/duncanson_aj-12823819-week1_rforest01.ipynb
HninPwint/nba-career-prediction
ffce32507cad2c4dd020c62cee7f33cf97c886f7
[ "MIT" ]
1
2021-02-01T10:38:16.000Z
2021-02-01T10:38:16.000Z
notebooks/duncanson_aj-12823819-week1_rforest01.ipynb
HninPwint/nba-career-prediction
ffce32507cad2c4dd020c62cee7f33cf97c886f7
[ "MIT" ]
3
2021-02-02T11:06:16.000Z
2021-02-06T11:44:19.000Z
notebooks/duncanson_aj-12823819-week1_rforest01.ipynb
UTSdtay/nba-career-prediction
2af4cfc21e669e80e8df46dd327a9580686b2e7b
[ "MIT" ]
4
2021-01-31T10:57:23.000Z
2021-02-02T06:16:35.000Z
47.4857
18,300
0.563182
[ [ [ "## Experiment", "_____no_output_____" ] ], [ [ "experiment_label = 'rforest01'", "_____no_output_____" ] ], [ [ "### Aim:\n* compare basic random forest to best logreg", "_____no_output_____" ], [ "### Findings:\n\n* ROC on training hugs the top left; overfitting.\n* Next: increase min samples per leaf. ", "_____no_output_____" ], [ "## Set up", "_____no_output_____" ] ], [ [ "import pandas as pd\nimport numpy as np\nfrom joblib import dump, load # simpler than pickle!\nimport matplotlib.pyplot as plt\nimport seaborn as sns", "_____no_output_____" ] ], [ [ "## Data", "_____no_output_____" ] ], [ [ "#load data\n\ndata_path = '../data/raw/uts-advdsi-nba-career-prediction'\n\ntrain_raw = pd.read_csv(data_path + '/train.csv')\ntest_raw = pd.read_csv(data_path + '/test.csv')\n", "_____no_output_____" ], [ "#shapes & head\n\nprint(train_raw.shape)\nprint(test_raw.shape)\n\ntrain_raw.head()", "(8000, 22)\n(3799, 21)\n" ], [ "test_raw.head()", "_____no_output_____" ], [ "# info\ntrain_raw.info()", "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 8000 entries, 0 to 7999\nData columns (total 22 columns):\nId_old 8000 non-null int64\nId 8000 non-null int64\nGP 8000 non-null int64\nMIN 8000 non-null float64\nPTS 8000 non-null float64\nFGM 8000 non-null float64\nFGA 8000 non-null float64\nFG% 8000 non-null float64\n3P Made 8000 non-null float64\n3PA 8000 non-null float64\n3P% 8000 non-null float64\nFTM 8000 non-null float64\nFTA 8000 non-null float64\nFT% 8000 non-null float64\nOREB 8000 non-null float64\nDREB 8000 non-null float64\nREB 8000 non-null float64\nAST 8000 non-null float64\nSTL 8000 non-null float64\nBLK 8000 non-null float64\nTOV 8000 non-null float64\nTARGET_5Yrs 8000 non-null int64\ndtypes: float64(18), int64(4)\nmemory usage: 1.3 MB\n" ], [ "#variable descriptions\ntrain_raw.describe()", "_____no_output_____" ], [ "test_raw.describe()", "_____no_output_____" ] ], [ [ "## Cleaning", "_____no_output_____" ] ], [ [ "train = train_raw.copy()\ntest = test_raw.copy()", "_____no_output_____" ], [ "cols_drop = ['Id_old', 'Id'] #, 'MIN', 'FGM', 'FGA', 'TOV', '3PA', 'FTM', 'FTA', 'REB']\ntrain.drop(cols_drop, axis=1, inplace=True)\ntest.drop(cols_drop, axis=1, inplace=True)", "_____no_output_____" ], [ "train.head()", "_____no_output_____" ], [ "test.head()", "_____no_output_____" ], [ "train_target = train.pop('TARGET_5Yrs')", "_____no_output_____" ] ], [ [ "# Modelling", "_____no_output_____" ] ], [ [ "#transformations\n\n# fit scaler to training data\nfrom sklearn.preprocessing import StandardScaler\nscaler = StandardScaler()\ntrain = scaler.fit_transform(train)\ndump(scaler, '../models/aj_' + experiment_label + '_scaler.joblib')\n\n# transform test data\ntest = scaler.transform(test)", "_____no_output_____" ], [ "#examine shapes\n\nprint('train:' + str(train.shape))\nprint('test:' + str(test.shape))", "train:(8000, 19)\ntest:(3799, 19)\n" ], [ "# split training into train & validation\n\nfrom sklearn.model_selection import train_test_split\nX_train, X_val, y_train, y_val = train_test_split(train, train_target, test_size=0.2, random_state=8)\n\n# in this case we will use the Kaggle submission as our test\n#X_train, y_train = train, train_target", "_____no_output_____" ], [ "#import models\n\nfrom sklearn.model_selection import GridSearchCV\n\nfrom sklearn.linear_model import LogisticRegression \nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn import svm\n", "_____no_output_____" ], [ "# Define model\n\nmodel = RandomForestClassifier(class_weight='balanced',random_state=8)\n", "_____no_output_____" ], [ "#fit model to training data\nmodel.fit(X_train, y_train)", "_____no_output_____" ], [ "#save model to file\n\ndump(model, '../models/aj_' + experiment_label + '.joblib')", "_____no_output_____" ], [ "#predictions for test and validation sets\n\ny_train_preds = model.predict(X_train)\ny_val_preds = model.predict(X_val)", "_____no_output_____" ] ], [ [ "## Evaluation", "_____no_output_____" ] ], [ [ "import sys\nimport os\nsys.path.append(os.path.abspath('..'))\nfrom src.models.aj_metrics import confusion_matrix\n", "_____no_output_____" ], [ "print(\"Training:\")\nprint(confusion_matrix(y_train, y_train_preds))\nprint('')\nprint(\"Validation:\")\nprint(confusion_matrix(y_val, y_val_preds))", "_____no_output_____" ], [ "from sklearn import metrics\n\nprint(\"Training:\")\nprint(metrics.classification_report(y_train, y_train_preds))\nprint('')\nprint(\"Validation:\")\nprint(metrics.classification_report(y_val, y_val_preds))\n", "Training:\n precision recall f1-score support\n\n 0 1.00 1.00 1.00 1074\n 1 1.00 1.00 1.00 5326\n\n accuracy 1.00 6400\n macro avg 1.00 1.00 1.00 6400\nweighted avg 1.00 1.00 1.00 6400\n\n\nValidation:\n precision recall f1-score support\n\n 0 0.35 0.03 0.05 257\n 1 0.84 0.99 0.91 1343\n\n accuracy 0.84 1600\n macro avg 0.60 0.51 0.48 1600\nweighted avg 0.76 0.84 0.77 1600\n\n" ], [ "print(\"Training:\")\nprint(metrics.roc_auc_score(y_train, model.decision_function(X_train)))\nprint('')\nprint(\"Validation:\")\nprint(metrics.roc_auc_score(y_val, model.decision_function(X_val)))", "Training:\n" ], [ "import matplotlib.pyplot as plt \nfrom sklearn import metrics\nmetrics.plot_roc_curve(model, X_train, y_train)\nplt.show() \nmetrics.plot_roc_curve(model, X_val, y_val)\nplt.show() ", "_____no_output_____" ] ], [ [ "# Apply to test data for submission", "_____no_output_____" ] ], [ [ "y_test_preds = model.predict(test)\n", "_____no_output_____" ], [ "y_test_preds", "_____no_output_____" ], [ "y_test_probs = model.predict_proba(test)", "_____no_output_____" ], [ "y_test_probs", "_____no_output_____" ], [ "len(y_test_probs)", "_____no_output_____" ], [ "test_raw.shape", "_____no_output_____" ], [ "test_raw['Id'].shape", "_____no_output_____" ], [ "submission = pd.DataFrame({'Id': range(0,3799), 'TARGET_5Yrs': [p[1] for p in y_test_probs]})", "_____no_output_____" ], [ "submission.head()", "_____no_output_____" ], [ "submission.to_csv('../reports/aj_' + experiment_label + 'submission.csv',\n index=False,\n )", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb60f7044e986dff00df5c5aad8aa320a3a1a249
4,475
ipynb
Jupyter Notebook
03_fractals_recursion/Fractals_and_Recursion.ipynb
Shiao-Computing-Volumes/project-based-learning-in-python
52e0b02cf085de97c3b5d9aa44bf8786d8a9ad19
[ "Apache-2.0" ]
1
2021-08-17T23:53:46.000Z
2021-08-17T23:53:46.000Z
03_fractals_recursion/Fractals_and_Recursion.ipynb
Shiao-Computing-Volumes/project-based-learning-in-python
52e0b02cf085de97c3b5d9aa44bf8786d8a9ad19
[ "Apache-2.0" ]
null
null
null
03_fractals_recursion/Fractals_and_Recursion.ipynb
Shiao-Computing-Volumes/project-based-learning-in-python
52e0b02cf085de97c3b5d9aa44bf8786d8a9ad19
[ "Apache-2.0" ]
null
null
null
18.723849
88
0.472179
[ [ [ "# Fractals and Recursion", "_____no_output_____" ], [ "# Recursion", "_____no_output_____" ] ], [ [ "def recursive_factorial(n):\n if n == 1:\n return 1\n else:\n return n * recursive_factorial(n-1)", "_____no_output_____" ], [ "recursive_factorial(4)", "_____no_output_____" ] ], [ [ "<img src=\"https://natureofcode.com/book/imgs/chapter08/ch08_09.png\" />\n\nImage Source: [Nature of Codes](https://natureofcode.com/book/chapter-8-fractals/)", "_____no_output_____" ] ], [ [ "def while_factorial(n):\n if n == 1:\n return 1\n \n product = 1\n while (n > 0):\n product *= n\n n = n - 1\n return product\n\n\ndef for_factorial(n):\n if n == 1:\n return 1\n \n product = 1\n for i in range(n):\n product *= i+1\n return product", "_____no_output_____" ], [ "n = 4\nproduct_while = while_factorial(n)\nproduct_for = for_factorial(n)\nprint(product_while, product_for)", "24 24\n" ], [ "n = 10\nrecursive_factorial(n) == while_factorial(n) == for_factorial(n)", "_____no_output_____" ] ], [ [ "# Fractals", "_____no_output_____" ] ], [ [ "# pip install ipyturtle\n# jupyter nbextension enable --py --sys-prefix ipyturtle", "_____no_output_____" ], [ "from ipyturtle import Turtle\nhead = Turtle(fixed=False, width=300, height=300)\nhead", "_____no_output_____" ], [ "head.reset()\ndef square(size):\n for i in range(4):\n head.forward(size)\n head.right(90)\nsquare(100)", "_____no_output_____" ], [ "import turtle\nhead = turtle.Turtle()\nhead.circle(10)", "_____no_output_____" ] ], [ [ "# References", "_____no_output_____" ], [ "- [Nature of Codes](https://natureofcode.com/book/chapter-8-fractals/)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown" ] ]
cb60fcea7b3de3a4a0ebade7e9e0afeeb1bd9223
16,129
ipynb
Jupyter Notebook
notebook/ParameterEstimation/FitzHughNagumoParameterEstimation.ipynb
HH3117/DiffEqBenchmarks.jl
c0f4b9fa600103ea70d31d5cf4ddec1bdc799ff9
[ "MIT" ]
1
2021-09-01T00:02:56.000Z
2021-09-01T00:02:56.000Z
notebook/ParameterEstimation/FitzHughNagumoParameterEstimation.ipynb
HH3117/DiffEqBenchmarks.jl
c0f4b9fa600103ea70d31d5cf4ddec1bdc799ff9
[ "MIT" ]
null
null
null
notebook/ParameterEstimation/FitzHughNagumoParameterEstimation.ipynb
HH3117/DiffEqBenchmarks.jl
c0f4b9fa600103ea70d31d5cf4ddec1bdc799ff9
[ "MIT" ]
null
null
null
38.771635
860
0.572013
[ [ [ "empty" ] ] ]
[ "empty" ]
[ [ "empty" ] ]
cb61003efad21de5e2151c2167877406cac060eb
7,971
ipynb
Jupyter Notebook
TrainTheModel_with_celery.ipynb
kotfic/Classification-of-Hyperspectral-Image
cb38a7986045f3189b0feb8df794732f0d86e3bf
[ "MIT" ]
null
null
null
TrainTheModel_with_celery.ipynb
kotfic/Classification-of-Hyperspectral-Image
cb38a7986045f3189b0feb8df794732f0d86e3bf
[ "MIT" ]
null
null
null
TrainTheModel_with_celery.ipynb
kotfic/Classification-of-Hyperspectral-Image
cb38a7986045f3189b0feb8df794732f0d86e3bf
[ "MIT" ]
null
null
null
36.732719
278
0.598168
[ [ [ "from indian_pines_cnn_classification.train import train_model", "/home/kotfic/.venvs/hpcmp_demo_04162018/lib/python3.6/site-packages/h5py/__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.\n from ._conv import register_converters as _register_converters\nUsing TensorFlow backend.\n" ], [ "a = train_model.delay(data_path='split', model_path='model.h5')", "_____no_output_____" ], [ "a.ready()", "_____no_output_____" ], [ "a.get()", "_____no_output_____" ], [ "import itertools\nfrom celery.canvas import group", "_____no_output_____" ], [ "sweep = [dict(m) for m in itertools.product(\n [('numPCAcomponents', v) for v in (15, 30, 60)],\n [('windowSize', v) for v in (5, 15)],\n [('testRatio', v) for v in (0.10, 0.25)]\n)]\nsweep", "_____no_output_____" ], [ "def model_name(**kwargs):\n return \"model_n{numPCAcomponents}t{testRatio}w{windowSize}.h5\".format(**kwargs)", "_____no_output_____" ], [ "a = group([train_model.s(\n data_path='split', model_path=model_name(**kwargs))\n for kwargs in sweep]).delay()", "[2018-04-02 19:00:51,556] WARNING: Girder job not created: Parent task is None\n[2018-04-02 19:00:51,557] WARNING: Could not get girder_api_url from parent task: Parent task is None\n[2018-04-02 19:00:51,557] WARNING: Could not get token from parent task: Parent task is None\n[2018-04-02 19:00:51,560] WARNING: Girder job not created: Parent task is None\n[2018-04-02 19:00:51,561] WARNING: Could not get girder_api_url from parent task: Parent task is None\n[2018-04-02 19:00:51,561] WARNING: Could not get token from parent task: Parent task is None\n[2018-04-02 19:00:51,564] WARNING: Girder job not created: Parent task is None\n[2018-04-02 19:00:51,564] WARNING: Could not get girder_api_url from parent task: Parent task is None\n[2018-04-02 19:00:51,564] WARNING: Could not get token from parent task: Parent task is None\n[2018-04-02 19:00:51,567] WARNING: Girder job not created: Parent task is None\n[2018-04-02 19:00:51,568] WARNING: Could not get girder_api_url from parent task: Parent task is None\n[2018-04-02 19:00:51,568] WARNING: Could not get token from parent task: Parent task is None\n[2018-04-02 19:00:51,571] WARNING: Girder job not created: Parent task is None\n[2018-04-02 19:00:51,571] WARNING: Could not get girder_api_url from parent task: Parent task is None\n[2018-04-02 19:00:51,571] WARNING: Could not get token from parent task: Parent task is None\n[2018-04-02 19:00:51,573] WARNING: Girder job not created: Parent task is None\n[2018-04-02 19:00:51,573] WARNING: Could not get girder_api_url from parent task: Parent task is None\n[2018-04-02 19:00:51,574] WARNING: Could not get token from parent task: Parent task is None\n[2018-04-02 19:00:51,575] WARNING: Girder job not created: Parent task is None\n[2018-04-02 19:00:51,575] WARNING: Could not get girder_api_url from parent task: Parent task is None\n[2018-04-02 19:00:51,575] WARNING: Could not get token from parent task: Parent task is None\n[2018-04-02 19:00:51,577] WARNING: Girder job not created: Parent task is None\n[2018-04-02 19:00:51,577] WARNING: Could not get girder_api_url from parent task: Parent task is None\n[2018-04-02 19:00:51,577] WARNING: Could not get token from parent task: Parent task is None\n[2018-04-02 19:00:51,579] WARNING: Girder job not created: Parent task is None\n[2018-04-02 19:00:51,579] WARNING: Could not get girder_api_url from parent task: Parent task is None\n[2018-04-02 19:00:51,579] WARNING: Could not get token from parent task: Parent task is None\n[2018-04-02 19:00:51,581] WARNING: Girder job not created: Parent task is None\n[2018-04-02 19:00:51,581] WARNING: Could not get girder_api_url from parent task: Parent task is None\n[2018-04-02 19:00:51,581] WARNING: Could not get token from parent task: Parent task is None\n[2018-04-02 19:00:51,583] WARNING: Girder job not created: Parent task is None\n[2018-04-02 19:00:51,583] WARNING: Could not get girder_api_url from parent task: Parent task is None\n[2018-04-02 19:00:51,583] WARNING: Could not get token from parent task: Parent task is None\n[2018-04-02 19:00:51,584] WARNING: Girder job not created: Parent task is None\n[2018-04-02 19:00:51,585] WARNING: Could not get girder_api_url from parent task: Parent task is None\n[2018-04-02 19:00:51,585] WARNING: Could not get token from parent task: Parent task is None\n" ], [ "print(a.ready())\na.completed_count()", "True\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb6112b83dcaf64c7831b2cc7266f4ae468e3b9a
111,999
ipynb
Jupyter Notebook
src/training_object_detector.ipynb
whn09/mini-amazon-go
f46d9147289c5c7b497c7bf1829b701fe56b506f
[ "Apache-2.0" ]
3
2020-04-20T15:04:29.000Z
2021-06-28T06:01:43.000Z
src/training_object_detector.ipynb
whn09/mini-amazon-go
f46d9147289c5c7b497c7bf1829b701fe56b506f
[ "Apache-2.0" ]
null
null
null
src/training_object_detector.ipynb
whn09/mini-amazon-go
f46d9147289c5c7b497c7bf1829b701fe56b506f
[ "Apache-2.0" ]
3
2019-12-02T08:16:28.000Z
2021-02-24T07:10:55.000Z
306.846575
97,168
0.914142
[ [ [ "# 训练你的物体检测器", "_____no_output_____" ] ], [ [ "!pip install gluoncv", "_____no_output_____" ], [ "import gluoncv as gcv\nimport mxnet as mx", "_____no_output_____" ] ], [ [ "# 准备训练集", "_____no_output_____" ] ], [ [ "import os\n\nclass DetectionDataset(gcv.data.VOCDetection):\n CLASSES = ['cocacola', 'noodles', 'hand']\n def __init__(self, root):\n self._im_shapes = {}\n self._root = os.path.expanduser(root)\n self._transform = None\n self._items = [(self._root, x.strip('.jpg')) for x in os.listdir(self._root) if x.endswith('.jpg')]\n self._anno_path = os.path.join('{}', '{}.xml')\n self._image_path = os.path.join('{}', '{}.jpg')\n self.index_map = dict(zip(self.classes, range(self.num_class)))\n self._label_cache = self._preload_labels()\n \n def __str__(self):\n detail = self._root\n return self.__class__.__name__ + '(' + detail + ')'\n \n @property\n def classes(self):\n return self.CLASSES\n \n @property\n def num_class(self):\n return len(self.classes)\n \ntrain_dataset = DetectionDataset('../images/shenzhen_v1')\nprint('class_names:', train_dataset.classes)\nprint('num_images:', len(train_dataset))", "class_names: ['cocacola', 'noodles', 'hand']\nnum_images: 149\n" ] ], [ [ "# 可视化数据", "_____no_output_____" ] ], [ [ "from matplotlib import pyplot as plt\nfrom gluoncv.utils import viz\n\nsample = train_dataset[0]\ntrain_image = sample[0]\ntrain_label = sample[1]\n\nax = viz.plot_bbox(\n train_image.asnumpy(),\n train_label[:, :4],\n labels=train_label[:, 4:5],\n class_names=train_dataset.classes)\nplt.show()\n\n# for i in range(len(train_dataset)):\n# sample = train_dataset[i]\n# train_image = sample[0]\n# train_label = sample[1]\n\n# ax = viz.plot_bbox(\n# train_image.asnumpy(),\n# train_label[:, :4],\n# labels=train_label[:, 4:5],\n# class_names=train_dataset.classes)\n# plt.show()", "_____no_output_____" ] ], [ [ "# 定义训练过程", "_____no_output_____" ] ], [ [ "import time\nfrom datetime import datetime\nfrom mxnet import autograd\nfrom gluoncv.data.batchify import Tuple, Stack, Pad\n\ndef train_model(train_dataset, epochs=50):\n ctx = mx.gpu(0)\n# ctx = mx.cpu(0)\n net = gcv.model_zoo.get_model('ssd_512_resnet50_v1_custom', classes=train_dataset.classes, transfer='coco')\n# net.load_parameters('object_detector_epoch200_10_22_2019_20_28_41.params') # TODO continue training\n net.collect_params().reset_ctx(ctx)\n width, height = 512, 512 # suppose we use 512 as base training size\n train_transform = gcv.data.transforms.presets.ssd.SSDDefaultTrainTransform(width, height)\n gcv.utils.random.seed(233)\n \n# batch_size = 4\n batch_size = 32 # 32 for p3.2xlarge, 16 for p2.2xlarge\n # you can make it larger(if your CPU has more cores) to accelerate data loading\n num_workers = 4\n\n with autograd.train_mode():\n _, _, anchors = net(mx.nd.zeros((1, 3, height, width), ctx))\n anchors = anchors.as_in_context(mx.cpu())\n train_transform = gcv.data.transforms.presets.ssd.SSDDefaultTrainTransform(width, height, anchors)\n batchify_fn = Tuple(Stack(), Stack(), Stack())\n train_loader = mx.gluon.data.DataLoader(\n train_dataset.transform(train_transform),\n batch_size,\n shuffle=True,\n batchify_fn=batchify_fn,\n last_batch='rollover',\n num_workers=num_workers)\n \n mbox_loss = gcv.loss.SSDMultiBoxLoss()\n ce_metric = mx.metric.Loss('CrossEntropy')\n smoothl1_metric = mx.metric.Loss('SmoothL1')\n for k, v in net.collect_params().items():\n if 'convpredictor' not in k:\n # freeze upper layers\n v.grad_req = 'null'\n trainer = mx.gluon.Trainer(\n net.collect_params(), 'sgd',\n {'learning_rate': 0.001, 'wd': 0.0005, 'momentum': 0.9})\n \n net.hybridize(static_alloc=True, static_shape=True)\n \n for epoch in range(epochs):\n tic = time.time()\n btic = time.time()\n \n for i, batch in enumerate(train_loader):\n data = mx.gluon.utils.split_and_load(batch[0], ctx_list=[ctx], batch_axis=0)\n cls_targets = mx.gluon.utils.split_and_load(batch[1], ctx_list=[ctx], batch_axis=0)\n box_targets = mx.gluon.utils.split_and_load(batch[2], ctx_list=[ctx], batch_axis=0)\n \n with autograd.record():\n cls_preds = []\n box_preds = []\n for x in data:\n cls_pred, box_pred, _ = net(x)\n cls_preds.append(cls_pred)\n box_preds.append(box_pred)\n sum_loss, cls_loss, box_loss = mbox_loss(\n cls_preds, box_preds, cls_targets, box_targets)\n autograd.backward(sum_loss)\n # since we have already normalized the loss, we don't want to normalize\n # by batch-size anymore\n trainer.step(1)\n ce_metric.update(0, [l * batch_size for l in cls_loss])\n smoothl1_metric.update(0, [l * batch_size for l in box_loss])\n name1, loss1 = ce_metric.get()\n name2, loss2 = smoothl1_metric.get()\n print('[Epoch {}][Batch {}], Speed: {:.3f} samples/sec, {}={:.3f}, {}={:.3f}'.format(\n epoch, i, batch_size/(time.time()-btic), name1, loss1, name2, loss2))\n btic = time.time()\n return net\n \n", "_____no_output_____" ] ], [ [ "# 开始训练", "_____no_output_____" ] ], [ [ "epochs = 300\nnet = train_model(train_dataset, epochs=epochs)\nsave_file = 'object_detector_epoch{}_{}.params'.format(epochs, datetime.now().strftime(\"%m_%d_%Y_%H_%M_%S\"))\nnet.save_parameters(save_file)\nprint('Saved model to disk: ' + save_file)", "/home/ec2-user/anaconda3/envs/mxnet_p36/lib/python3.6/site-packages/mxnet/gluon/block.py:1454: UserWarning: Cannot decide type for the following arguments. Consider providing them as input:\n\tdata: None\n input_sym_arg_type = in_param.infer_type()[0]\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
cb611a9cf329397690e97b9560414a526ab8d629
8,103
ipynb
Jupyter Notebook
Interactive_Example.ipynb
diamaris324/see-segment
47b44d1d108f055f12c167863acb1a7df2f3f000
[ "MIT" ]
null
null
null
Interactive_Example.ipynb
diamaris324/see-segment
47b44d1d108f055f12c167863acb1a7df2f3f000
[ "MIT" ]
null
null
null
Interactive_Example.ipynb
diamaris324/see-segment
47b44d1d108f055f12c167863acb1a7df2f3f000
[ "MIT" ]
null
null
null
24.480363
324
0.584969
[ [ [ "# Simple Evolutionary Exploration Walkthrough\n\nThis notebook contains instructions on how to use the SEE module, along with several examples. These instructions will cover the following parts: \n* [Import Image Files](#Import_Image_Files)\n* [Manual Search](#Manual_Search)\n* [Genetic Algorithm Search](#Genetic_Algorithm_Search)\n* [Reading the Results](#Reading_the_Results)\n\nThese examples use the Jupyter widgets to make it interactive and easier to use and learn.", "_____no_output_____" ], [ "----\n<a name=\"Import_Image_Files\"></a>\n\n## Import Image Files", "_____no_output_____" ], [ "First import the following packages:", "_____no_output_____" ] ], [ [ "%matplotlib inline\nimport matplotlib.pylab as plt\nimport imageio\nfrom see import Segmentors\nfrom see import JupyterGUI", "_____no_output_____" ] ], [ [ "Pick an image from the example folder.", "_____no_output_____" ] ], [ [ "data = JupyterGUI.pickimage('Image_data/Examples/')", "_____no_output_____" ] ], [ [ "# Select ColorSpace", "_____no_output_____" ] ], [ [ "colorspace = JupyterGUI.colorwidget(data.img, paramlist=None)", "_____no_output_____" ] ], [ [ "# Select ColorSpace", "_____no_output_____" ] ], [ [ "colorspace = JupyterGUI.colorwidget(data.img, paramlist=None)", "_____no_output_____" ] ], [ [ "----\n<a name=\"Manual_Search\"></a>\n\n## Manual Search", "_____no_output_____" ], [ "Manual searching of parameters can easily be done using the provided GUI. Pre-established parameters can be put into the widget, or the parameter values can be changed using the sliders. To change the algorithm, simply change the `alg` input. For a list of available inputs print `Segmentors.algorithmspace`\n", "_____no_output_____" ] ], [ [ "from see.Segmentors import segmentor\n\nalg = JupyterGUI.picksegment(list(segmentor.algorithmspace.keys()))", "_____no_output_____" ], [ "### Example of input for params\nparams = JupyterGUI.segmentwidget(data.img, params = None, alg = alg.value)", "_____no_output_____" ] ], [ [ "----\n<a name=\"Genetic_Algorithm_Search\"></a>\n\n## Genetic Algorithm Search", "_____no_output_____" ], [ "First import image files, as well as the following packages:", "_____no_output_____" ] ], [ [ "from see.Segmentors import segmentor\nfrom see.ColorSpace import colorspace\nfrom see.Workflow import workflow\nfrom see.Segment_Fitness import segment_fitness\nfrom see import base_classes, GeneticSearch\n\nworkflow.addalgos([colorspace, segmentor, segment_fitness])", "_____no_output_____" ] ], [ [ "To run the genetic algorithm, we need to initialize an instance of an evolver. The original image and ground truth segmentation image are inputs to it, along with an integer value for population size. This value sets how many indivudals are in our population. For this example, we'll set this number to be equal to 10.", "_____no_output_____" ] ], [ [ "mydata = base_classes.pipedata()\nmydata.img = data.img\nmydata.gmask = data.gmask\nmy_evolver = GeneticSearch.Evolver(workflow, mydata, pop_size=10)", "_____no_output_____" ] ], [ [ "Now that the evolver has been initialized, we can run the genetic algorithm for a specified number of generations (or iterations). Here we will set this number equal to 5.", "_____no_output_____" ] ], [ [ "# warnings may appear when this runs\npopulation = my_evolver.run(ngen=5)", "_____no_output_____" ] ], [ [ "----\n<a name=\"Reading_the_Results\"></a>\n\n## Reading the Results", "_____no_output_____" ], [ "After the genetic algorithm is complete, we can retrieve the individuals that resulted in the lowest (best) fitness values by printing `my_evolver.hof`. These individuals are sorted according to fitness value, so to get the overal best individual, we can simply look at the first individual in the list. ", "_____no_output_____" ] ], [ [ "params = my_evolver.hof[0]\n\nprint('Best Individual:\\n', params)", "_____no_output_____" ] ], [ [ "We can see the mask this individual generates by evaluating it, then plotting the result:", "_____no_output_____" ] ], [ [ "seg = Segmentors.algoFromParams(params)\nmask = seg.evaluate(data.img)\n\nplt.figure(figsize=(20, 10))\nplt.subplot(121)\nplt.imshow(data.img)\nplt.title(\"Original Image\")\nplt.axis('off')\n\nplt.subplot(122)\nplt.imshow(mask)\nplt.title(\"Segmentation\")\nplt.axis('off')\n\nplt.tight_layout\nplt.show()", "_____no_output_____" ] ], [ [ "We can also use `FitnessFunction` to calculate the final fitness value for this algorithm:", "_____no_output_____" ] ], [ [ "print('Fitness Value: ', Segmentors.FitnessFunction(mask, data.mask)[0])", "_____no_output_____" ] ], [ [ "If this value is satisfactory, we can then get usable code to run this algorithm anywhere, including outside this notebook. The `print_best_algorithm_code` function does this using the given individual:", "_____no_output_____" ] ], [ [ "ex = Segmentors.print_best_algorithm_code(my_evolver.hof[0])", "_____no_output_____" ] ], [ [ "With this code, make sure to import skimage, along with any input images this algorithm will be applied to.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
cb611b48ecfc5fda44d21a86d1fdeda1e351713b
87,248
ipynb
Jupyter Notebook
examples/Magics in IPython Kernel.ipynb
fbalicchia/sparkmagic
bfabbb39a0249197c2c05c8efe681710fff9151b
[ "RSA-MD" ]
null
null
null
examples/Magics in IPython Kernel.ipynb
fbalicchia/sparkmagic
bfabbb39a0249197c2c05c8efe681710fff9151b
[ "RSA-MD" ]
null
null
null
examples/Magics in IPython Kernel.ipynb
fbalicchia/sparkmagic
bfabbb39a0249197c2c05c8efe681710fff9151b
[ "RSA-MD" ]
null
null
null
32.111888
12,768
0.596403
[ [ [ "# Demonstrating sparkmagic", "_____no_output_____" ], [ "## This notebook will demonstrate how we can use the spark magic to interspere our Python code with code that is running against a Spark cluster", "_____no_output_____" ], [ "Let’s say we’re working in an IPython notebook and we want to use Spark to analyze some data. So, we'll load `sparkmagic` in order to be able to talk to Spark from our Python notebook.", "_____no_output_____" ] ], [ [ "%load_ext sparkmagic.magics", "_____no_output_____" ] ], [ [ "With it, the `%manage_spark` line magic and the `%%spark` magic are available.\n\nThe %%manage_spark line magic lets you manage Livy endpoints and Spark sessions.", "_____no_output_____" ], [ "Let's start by adding an Endpoint.\n\nAn Endpoint is a [Livy](https://github.com/cloudera/livy) installation running on a Spark cluster. \n\n`sparkmagic` allows us to specify the Livy endpoint along with a username and password to authenticate to it. If the Livy endpoint is on your local machine or has no password, simply leave the text fields for username and password blank.", "_____no_output_____" ] ], [ [ "%manage_spark", "_____no_output_____" ] ], [ [ "![add_endpoint](images/addendpoint.PNG)", "_____no_output_____" ], [ "Now, add a session to the endpoint you added. The name you give to the session will be used with the `%%spark` magic to run Spark code. You can also specify the configuration you want to start the session with. You can create either Python (PySpark) or Scala (Spark) sessions.\n\nCreating a session will create a `SparkContext` with the name `sc` and a `HiveContext` with the name `sqlContext`.\n\nWe'll start by adding a PySpark session.", "_____no_output_____" ], [ "![add_session](images/addsession.PNG)", "_____no_output_____" ], [ "You can now run Spark code against your Livy session. For information on the available commands, run %spark?", "_____no_output_____" ] ], [ [ "%spark?", "_____no_output_____" ] ], [ [ "## Pyspark", "_____no_output_____" ], [ "You can run code against your Spark session by adding `%%spark` at the beginning of the cell. Since we’ve only created a single session, we don’t need to specify the session name.\n\nIn the following cell, I'll create a Resilient Distributed Dataset (RDD) called fruits, and print its first element.", "_____no_output_____" ] ], [ [ "%%spark\nnumbers = sc.parallelize([1, 2, 3, 4])\nprint('First element of numbers is {} and its description is:\\n{}'.format(numbers.first(), numbers.toDebugString()))", "First element of numbers is 1 and its description is:\nb'(1) ParallelCollectionRDD[0] at parallelize at PythonRDD.scala:194 []'" ] ], [ [ "Now, you've created your session and executed some statements. If you want to look at the Livy logs for this session, simply run a cell like so:", "_____no_output_____" ] ], [ [ "%spark logs", "16/06/09 23:43:28 WARN SparkConf: The configuration key 'spark.yarn.applicationMaster.waitTries' has been deprecated as of Spark 1.3 and and may be removed in the future. Please use the new key 'spark.yarn.am.waitTime' instead.\n16/06/09 23:43:28 WARN SparkConf: The configuration key 'spark.yarn.applicationMaster.waitTries' has been deprecated as of Spark 1.3 and and may be removed in the future. Please use the new key 'spark.yarn.am.waitTime' instead.\n16/06/09 23:43:29 WARN NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable\n16/06/09 23:43:30 INFO TimelineClientImpl: Timeline service address: http://localhost:8188/ws/v1/timeline/\n16/06/09 23:43:30 INFO Client: Requesting a new application from cluster with 10 NodeManagers\n16/06/09 23:43:30 INFO Client: Verifying our application has not requested more than the maximum memory capability of the cluster (25600 MB per container)\n16/06/09 23:43:30 INFO Client: Will allocate AM container, with 1408 MB memory including 384 MB overhead\n16/06/09 23:43:30 INFO Client: Setting up container launch context for our AM\n16/06/09 23:43:30 INFO Client: Setting up the launch environment for our AM container\n16/06/09 23:43:30 INFO Client: Preparing resources for our AM container\n16/06/09 23:43:31 INFO MetricsConfig: loaded properties from hadoop-metrics2-azure-file-system.properties\n16/06/09 23:43:31 INFO WasbAzureIaasSink: Init starting.\n16/06/09 23:43:31 INFO AzureIaasSink: Init starting. Initializing MdsLogger.\n16/06/09 23:43:31 INFO AzureIaasSink: Init completed.\n16/06/09 23:43:31 INFO WasbAzureIaasSink: Init completed.\n16/06/09 23:43:31 INFO MetricsSinkAdapter: Sink azurefs2 started\n16/06/09 23:43:31 INFO MetricsSystemImpl: Scheduled snapshot period at 60 second(s).\n16/06/09 23:43:31 INFO MetricsSystemImpl: azure-file-system metrics system started\n16/06/09 23:43:31 INFO Client: Uploading resource file:/usr/hdp/current/spark-client/python/lib/pyspark.zip -> wasb://[email protected]/user/spark/.sparkStaging/application_1464100251524_0001/pyspark.zip\n16/06/09 23:43:32 INFO Client: Uploading resource file:/usr/hdp/current/spark-client/python/lib/py4j-0.8.2.1-src.zip -> wasb://[email protected]/user/spark/.sparkStaging/application_1464100251524_0001/py4j-0.8.2.1-src.zip\n16/06/09 23:43:32 INFO Client: Uploading resource file:/usr/hdp/current/spark-client/conf/hive-site.xml -> wasb://[email protected]/user/spark/.sparkStaging/application_1464100251524_0001/hive-site.xml\n16/06/09 23:43:33 INFO Client: Uploading resource file:/usr/hdp/2.3.3.1-7/spark/python/lib/pyspark.zip -> wasb://[email protected]/user/spark/.sparkStaging/application_1464100251524_0001/pyspark.zip\n16/06/09 23:43:33 INFO Client: Uploading resource file:/usr/hdp/2.3.3.1-7/spark/python/lib/py4j-0.8.2.1-src.zip -> wasb://[email protected]/user/spark/.sparkStaging/application_1464100251524_0001/py4j-0.8.2.1-src.zip\n16/06/09 23:43:34 INFO Client: Uploading resource file:/tmp/spark-1c0a5560-2256-4f63-ba1f-c0dfd25dc24b/__spark_conf__324896021270563710.zip -> wasb://[email protected]/user/spark/.sparkStaging/application_1464100251524_0001/__spark_conf__324896021270563710.zip\n16/06/09 23:43:34 WARN Client: spark.yarn.am.extraJavaOptions will not take effect in cluster mode\n16/06/09 23:43:34 INFO SecurityManager: Changing view acls to: spark\n16/06/09 23:43:34 INFO SecurityManager: Changing modify acls to: spark\n16/06/09 23:43:34 INFO SecurityManager: SecurityManager: authentication disabled; ui acls disabled; users with view permissions: Set(spark); users with modify permissions: Set(spark)\n16/06/09 23:43:34 INFO Client: Submitting application 1 to ResourceManager\n16/06/09 23:43:35 INFO YarnClientImpl: Submitted application application_1464100251524_0001\n16/06/09 23:43:36 INFO Client: Application report for application_1464100251524_0001 (state: ACCEPTED)\n16/06/09 23:43:36 INFO Client: \n\t client token: N/A\n\t diagnostics: N/A\n\t ApplicationMaster host: N/A\n\t ApplicationMaster RPC port: -1\n\t queue: default\n\t start time: 1465515814875\n\t final status: UNDEFINED\n\t tracking URL: http://localhost:8088/proxy/application_1464100251524_0001/\n\t user: spark\n16/06/09 23:43:37 INFO Client: Application report for application_1464100251524_0001 (state: ACCEPTED)\n16/06/09 23:43:38 INFO Client: Application report for application_1464100251524_0001 (state: ACCEPTED)\n16/06/09 23:43:39 INFO Client: Application report for application_1464100251524_0001 (state: ACCEPTED)\n16/06/09 23:43:40 INFO Client: Application report for application_1464100251524_0001 (state: ACCEPTED)\n16/06/09 23:43:41 INFO Client: Application report for application_1464100251524_0001 (state: ACCEPTED)\n16/06/09 23:43:42 INFO Client: Application report for application_1464100251524_0001 (state: ACCEPTED)\n16/06/09 23:43:43 INFO Client: Application report for application_1464100251524_0001 (state: ACCEPTED)\n16/06/09 23:43:44 INFO Client: Application report for application_1464100251524_0001 (state: ACCEPTED)\n16/06/09 23:43:45 INFO Client: Application report for application_1464100251524_0001 (state: ACCEPTED)\n16/06/09 23:43:46 INFO Client: Application report for application_1464100251524_0001 (state: ACCEPTED)\n16/06/09 23:43:47 INFO Client: Application report for application_1464100251524_0001 (state: RUNNING)\n16/06/09 23:43:47 INFO Client: \n\t client token: N/A\n\t diagnostics: N/A\n\t ApplicationMaster host: 10.0.0.4\n\t ApplicationMaster RPC port: 0\n\t queue: default\n\t start time: 1465515814875\n\t final status: UNDEFINED\n\t tracking URL: http://localhost:8088/proxy/application_1464100251524_0001/\n\t user: spark\n16/06/09 23:43:48 INFO Client: Application report for application_1464100251524_0001 (state: RUNNING)\n16/06/09 23:43:49 INFO Client: Application report for application_1464100251524_0001 (state: RUNNING)\n16/06/09 23:43:50 INFO Client: Application report for application_1464100251524_0001 (state: RUNNING)\n16/06/09 23:43:51 INFO Client: Application report for application_1464100251524_0001 (state: RUNNING)\n16/06/09 23:43:52 INFO Client: Application report for application_1464100251524_0001 (state: RUNNING)\n16/06/09 23:43:53 INFO Client: Application report for application_1464100251524_0001 (state: RUNNING)\n16/06/09 23:43:54 INFO Client: Application report for application_1464100251524_0001 (state: RUNNING)\n16/06/09 23:43:55 INFO Client: Application report for application_1464100251524_0001 (state: RUNNING)\n16/06/09 23:43:56 INFO Client: Application report for application_1464100251524_0001 (state: RUNNING)\n16/06/09 23:43:57 INFO Client: Application report for application_1464100251524_0001 (state: RUNNING)\n16/06/09 23:43:58 INFO Client: Application report for application_1464100251524_0001 (state: RUNNING)\n16/06/09 23:43:59 INFO Client: Application report for application_1464100251524_0001 (state: RUNNING)\n16/06/09 23:44:00 INFO Client: Application report for application_1464100251524_0001 (state: RUNNING)\n16/06/09 23:44:01 INFO Client: Application report for application_1464100251524_0001 (state: RUNNING)\n16/06/09 23:44:02 INFO Client: Application report for application_1464100251524_0001 (state: RUNNING)\n16/06/09 23:44:03 INFO Client: Application report for application_1464100251524_0001 (state: RUNNING)\n16/06/09 23:44:04 INFO Client: Application report for application_1464100251524_0001 (state: RUNNING)\n16/06/09 23:44:05 INFO Client: Application report for application_1464100251524_0001 (state: RUNNING)\n16/06/09 23:44:06 INFO Client: Application report for application_1464100251524_0001 (state: RUNNING)\n16/06/09 23:44:07 INFO Client: Application report for application_1464100251524_0001 (state: RUNNING)\n16/06/09 23:44:08 INFO Client: Application report for application_1464100251524_0001 (state: RUNNING)\n16/06/09 23:44:09 INFO Client: Application report for application_1464100251524_0001 (state: RUNNING)\n16/06/09 23:44:10 INFO Client: Application report for application_1464100251524_0001 (state: RUNNING)\n16/06/09 23:44:11 INFO Client: Application report for application_1464100251524_0001 (state: RUNNING)\n16/06/09 23:44:12 INFO Client: Application report for application_1464100251524_0001 (state: RUNNING)\n16/06/09 23:44:13 INFO Client: Application report for application_1464100251524_0001 (state: RUNNING)\n16/06/09 23:44:14 INFO Client: Application report for application_1464100251524_0001 (state: RUNNING)\n16/06/09 23:44:15 INFO Client: Application report for application_1464100251524_0001 (state: RUNNING)\n16/06/09 23:44:16 INFO Client: Application report for application_1464100251524_0001 (state: RUNNING)\n16/06/09 23:44:17 INFO Client: Application report for application_1464100251524_0001 (state: RUNNING)\n16/06/09 23:44:18 INFO Client: Application report for application_1464100251524_0001 (state: RUNNING)\n16/06/09 23:44:19 INFO Client: Application report for application_1464100251524_0001 (state: RUNNING)\n16/06/09 23:44:20 INFO Client: Application report for application_1464100251524_0001 (state: RUNNING)\n16/06/09 23:44:21 INFO Client: Application report for application_1464100251524_0001 (state: RUNNING)\n16/06/09 23:44:22 INFO Client: Application report for application_1464100251524_0001 (state: RUNNING)\n16/06/09 23:44:23 INFO Client: Application report for application_1464100251524_0001 (state: RUNNING)\n16/06/09 23:44:24 INFO Client: Application report for application_1464100251524_0001 (state: RUNNING)\n16/06/09 23:44:25 INFO Client: Application report for application_1464100251524_0001 (state: RUNNING)" ] ], [ [ "## SparkSQL", "_____no_output_____" ], [ "You can run SQL queries by passing the arguments `-c sql` to the %%spark magic\n\nFirst, let's create a table:", "_____no_output_____" ] ], [ [ "%%spark\ndf = spark.read.json(\"/apps/spark-2.3.3/examples/src/main/resources/people.json\")\ndf.createOrReplaceTempView(\"people\")", "_____no_output_____" ] ], [ [ "Now we can see what tables we have:", "_____no_output_____" ] ], [ [ "%%spark -c sql\nSHOW TABLES", "_____no_output_____" ] ], [ [ "Now, let's query one of the available tables.\n\nNotice that we are passing the `--output` or `-o` parameter with a value of `df_hvac` so that the output of our SQL query is saved in the `df_hvac` variable in the IPython kernel context as a [Pandas](http://pandas.pydata.org/) DataFrame.", "_____no_output_____" ] ], [ [ "%%spark -c sql -o df_people --maxrows 10\nSELECT * FROM people", "_____no_output_____" ] ], [ [ ">SQL queries also have other parameters you can pass in, like `--samplemethod`, `--maxrows`, `--samplefraction`, and `--quiet`.", "_____no_output_____" ], [ "We can now simply use the Pandas dataframe from the IPython notebook.", "_____no_output_____" ] ], [ [ "df_people.head()", "_____no_output_____" ] ], [ [ "If you want to visualize the data in the Pandas dataframe, you can write your own code to do so, or you can use our autovisualization widget:", "_____no_output_____" ] ], [ [ "from autovizwidget.widget.utils import display_dataframe\ndisplay_dataframe(df_people)", "/home/itamarst/Devel/sparkmagic/autovizwidget/autovizwidget/widget/utils.py:50: FutureWarning:\n\nA future version of pandas will default to `skipna=True`. To silence this warning, pass `skipna=True|False` explicitly.\n\n" ] ], [ [ ">You could also choose to have this widget display by default for *all* Pandas dataframes from here on by running this piece of code:\n\n```\nip = get_ipython()\nip.display_formatter.ipython_display_formatter.for_type_by_name('pandas.core.frame', 'DataFrame', display_dataframe)\n```", "_____no_output_____" ], [ "### Server-side rendering\n\nYou can also have images rendered on the server, and then display them locally. This prevents the need to ship large amounts of data locally to do visualizations. First, we render a PNG, in this case using matplotlib:", "_____no_output_____" ] ], [ [ "%%spark\nimport matplotlib.pyplot as plt\nax = df.toPandas().plot.bar(x='name',y='age')", "_____no_output_____" ] ], [ [ "And now we can view the resulting image using the `%matplot` magic:", "_____no_output_____" ] ], [ [ "%%spark\n%matplot plt", "_____no_output_____" ] ], [ [ "## Scala support", "_____no_output_____" ], [ "If you want to write your Spark code in Scala, you can easily do that.", "_____no_output_____" ], [ "Let's add a Scala session:", "_____no_output_____" ] ], [ [ "%manage_spark", "Creating SparkContext as 'sc'\nCreating HiveContext as 'sqlContext'\n" ] ], [ [ "![add_session](images/addsession_s.PNG)", "_____no_output_____" ], [ "And just run some Spark code. Notice that we now specify the session we want to use, `-s my_spark`.", "_____no_output_____" ] ], [ [ "%%spark -s my_spark\nval hvacText = sc.parallelize(Array(1, 2, 3, 4))\nhvacText.first()", "res0: Int = 1" ] ], [ [ "Now, we can query the table with **SparkSQL** too:", "_____no_output_____" ] ], [ [ "%%spark -s my_spark -c sql -o my_df_from_scala --maxrows 10\nSELECT * FROM hivesampletable", "_____no_output_____" ] ], [ [ "And we can still access the result of the Spark query from Scala as a Pandas dataframe!", "_____no_output_____" ] ], [ [ "my_df_from_scala.head()", "_____no_output_____" ] ], [ [ "# Cleaning up\n\nNow that you’re done with your Livy sessions, you should clean them up.\n\nSimply click on the `Delete` buttons!", "_____no_output_____" ] ], [ [ "%manage_spark", "_____no_output_____" ] ], [ [ "![clean_up](images/cleanup.PNG)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
cb6122c059e476de640d17e302a2b2ac2ab73feb
13,458
ipynb
Jupyter Notebook
Hugging Face/Hugging_Face_Ask_boolean_question_to_T5.ipynb
dineshh912/awesome-notebooks
97c8d341bb0951f31d8a6a45dc7691c194365991
[ "BSD-3-Clause" ]
1
2021-10-02T07:01:08.000Z
2021-10-02T07:01:08.000Z
Hugging Face/Hugging_Face_Ask_boolean_question_to_T5.ipynb
dineshh912/awesome-notebooks
97c8d341bb0951f31d8a6a45dc7691c194365991
[ "BSD-3-Clause" ]
null
null
null
Hugging Face/Hugging_Face_Ask_boolean_question_to_T5.ipynb
dineshh912/awesome-notebooks
97c8d341bb0951f31d8a6a45dc7691c194365991
[ "BSD-3-Clause" ]
null
null
null
38.672414
1,019
0.601278
[ [ [ "<img width=\"10%\" alt=\"Naas\" src=\"https://landen.imgix.net/jtci2pxwjczr/assets/5ice39g4.png?w=160\"/>", "_____no_output_____" ], [ "# Hugging Face - Ask boolean question to T5\n<a href=\"https://app.naas.ai/user-redirect/naas/downloader?url=https://raw.githubusercontent.com/jupyter-naas/awesome-notebooks/master/Hugging%20Face/Hugging_Face_Ask_boolean_question_to_T5.ipynb\" target=\"_parent\"><img src=\"https://img.shields.io/badge/-Open%20in%20Naas-success?labelColor=000000&logo=data:image/svg+xml;base64,PD94bWwgdmVyc2lvbj0iMS4wIiBlbmNvZGluZz0iVVRGLTgiPz4KPHN2ZyB3aWR0aD0iMTAyNHB4IiBoZWlnaHQ9IjEwMjRweCIgdmlld0JveD0iMCAwIDEwMjQgMTAyNCIgeG1sbnM9Imh0dHA6Ly93d3cudzMub3JnLzIwMDAvc3ZnIiB4bWxuczp4bGluaz0iaHR0cDovL3d3dy53My5vcmcvMTk5OS94bGluayIgdmVyc2lvbj0iMS4xIj4KIDwhLS0gR2VuZXJhdGVkIGJ5IFBpeGVsbWF0b3IgUHJvIDIuMC41IC0tPgogPGRlZnM+CiAgPHRleHQgaWQ9InN0cmluZyIgdHJhbnNmb3JtPSJtYXRyaXgoMS4wIDAuMCAwLjAgMS4wIDIyOC4wIDU0LjUpIiBmb250LWZhbWlseT0iQ29tZm9ydGFhLVJlZ3VsYXIsIENvbWZvcnRhYSIgZm9udC1zaXplPSI4MDAiIHRleHQtZGVjb3JhdGlvbj0ibm9uZSIgZmlsbD0iI2ZmZmZmZiIgeD0iMS4xOTk5OTk5OTk5OTk5ODg2IiB5PSI3MDUuMCI+bjwvdGV4dD4KIDwvZGVmcz4KIDx1c2UgaWQ9Im4iIHhsaW5rOmhyZWY9IiNzdHJpbmciLz4KPC9zdmc+Cg==\"/></a>", "_____no_output_____" ], [ "## T5-base finetuned on BoolQ (superglue task)\nThis notebook is for demonstrating the training and use of the text-to-text-transfer-transformer (better known as T5) on boolean questions (BoolQ). The example use case is a validator indicating if an idea is environmentally friendly. Nearly any question can be passed into the `query` function (see below) as long as a context to a question is given.\n\nAuthor: Maximilian Frank ([script4all.com](//script4all.com)) - Copyleft license\n\nNotes:\n- The model from [huggingface.co/mrm8488/t5-base-finetuned-boolq](//huggingface.co/mrm8488/t5-base-finetuned-boolq) is used in this example as it is an already trained t5-base model on boolean questions (BoolQ task of superglue).\n- Documentation references on [huggingface.co/transformers/model_doc/t5.html#training](//huggingface.co/transformers/model_doc/t5.html#training), template script on [programming-review.com/machine-learning/t5](//programming-review.com/machine-learning/t5)\n- The greater the model, the higher the accuracy on BoolQ (see [arxiv.org/pdf/1910.10683.pdf](//arxiv.org/pdf/1910.10683.pdf)):\n t5-small|t5-base|t5-large|t5-3B|t5-11B\n -|-|-|-|-\n 76.4%|81.4%|85.4%|89.9%|91.2%", "_____no_output_____" ], [ "## Loading the model\nIf here comes an error, install the packages via `python3 -m pip install … --user`.\n\nYou can also load a T5 plain model (not finetuned). Just replace the following code\n```python\nfrom transformers import AutoTokenizer, AutoModelForSeq2SeqLM\ntokenizer = AutoTokenizer.from_pretrained('mrm8488/t5-base-finetuned-boolq')\nmodel = AutoModelForSeq2SeqLM.from_pretrained('mrm8488/t5-base-finetuned-boolq')…\n```\nwith\n```python\nfrom transformers import T5Tokenizer, T5ForConditionalGeneration\ntokenizer = T5Tokenizer.from_pretrained('t5-small')\nmodel = T5ForConditionalGeneration.from_pretrained('t5-small')\n```\nwhere `t5-small` is one of the names in the table above.", "_____no_output_____" ] ], [ [ "!pip install transformers\n!pip install sentencepiece", "_____no_output_____" ], [ "import json\nimport torch\nfrom operator import itemgetter\nfrom distutils.util import strtobool\nfrom transformers import AutoTokenizer, AutoModelForSeq2SeqLM", "_____no_output_____" ], [ "# load model\ntokenizer = AutoTokenizer.from_pretrained('mrm8488/t5-base-finetuned-boolq')\nmodel = AutoModelForSeq2SeqLM.from_pretrained('mrm8488/t5-base-finetuned-boolq').to(torch.device('cuda' if torch.cuda.is_available() else 'cpu'))\ntry:model.parallelize()\nexcept:pass", "_____no_output_____" ] ], [ [ "## Training\n> **Optional:** You can leave the following out, if you don't have custom datasets. By default the number of training epochs equals 0, so nothing is trained.\n\n> **Warning:** This option consumes a lot of runtime and thus *naas.ai* credits. Make sure to have enough credits on your account.\n\nFor each dataset a stream-opener has to be provided which is readable line by line (e.g. file, database). In the array with key `keys` are all dictionary keys which exist in the jsonl-line. So in this example the first training dataset has the keys `question` for the questions (string),`passage` for the contexts (string) and `answer` for the answers (boolean). Adjust these keys to your dataset.\n\nAt last you have to adjust the number of epochs to be trained (see comment `# epochs`).", "_____no_output_____" ] ], [ [ "srcs = [\n { 'stream': lambda:open('boolq/train.jsonl', 'r'),\n 'keys': ['question', 'passage', 'answer'] },\n { 'stream': lambda:open('boolq/dev.jsonl', 'r'),\n 'keys': ['question', 'passage', 'answer'] },\n { 'stream': lambda:open('boolq-nat-perturb/train.jsonl', 'r'),\n 'keys': ['question', 'passage', 'roberta_hard'] }\n]\nmodel.train()\nfor _ in range(0): # epochs\n for src in srcs:\n with src['stream']() as s:\n for d in s:\n q, p, a = itemgetter(src['keys'][0], src['keys'][1], src['keys'][2])(json.loads(d))\n tokens = tokenizer('question:'+q+'\\ncontext:'+p, return_tensors='pt')\n if len(tokens.input_ids[0]) > model.config.n_positions:\n continue\n model(input_ids=tokens.input_ids,\n labels=tokenizer(str(a), return_tensors='pt').input_ids,\n attention_mask=tokens.attention_mask,\n use_cache=True\n ).loss.backward()\nmodel.eval(); # ; suppresses long output on jupyter", "_____no_output_____" ] ], [ [ "## Define query function\nAs the model is ready, define the querying function.", "_____no_output_____" ] ], [ [ "def query(q='question', c='context'):\n return strtobool(\n tokenizer.decode(\n token_ids=model.generate(\n input_ids=tokenizer.encode('question:'+q+'\\ncontext:'+c, return_tensors='pt')\n )[0],\n skip_special_tokens=True,\n max_length=3)\n )", "_____no_output_____" ] ], [ [ "## Querying on the task\nNow the actual task begins: Query the model with your ideas (see list `ideas`).", "_____no_output_____" ] ], [ [ "if __name__ == '__main__':\n ideas = [ 'The idea is to pollute the air instead of riding the bike.', # should be false\n 'The idea is to go cycling instead of driving the car.', # should be true\n 'The idea is to put your trash everywhere.', # should be false\n 'The idea is to reduce transport distances.', # should be true\n 'The idea is to put plants on all the roofs.', # should be true\n 'The idea is to forbid opensource vaccines.', # should be true\n 'The idea is to go buy an Iphone every five years.', # should be false \n 'The idea is to walk once every week in the nature.', # should be true \n 'The idea is to go buy Green bonds.', # should be true \n 'The idea is to go buy fast fashion.', # should be false\n 'The idea is to buy single-use items.', # should be false\n 'The idea is to drink plastic bottled water.', # should be false\n 'The idea is to use import goods.', # should be false\n 'The idea is to use buy more food than you need.', # should be false\n 'The idea is to eat a lot of meat.', # should be false\n 'The idea is to eat less meat.', # should be false\n 'The idea is to always travel by plane.', # should be false\n 'The idea is to opensource vaccines.' # should be false\n \n ]\n for idea in ideas:\n print('🌏 Idea:', idea)\n print('\\t✅ Good idea' if query('Is the idea environmentally friendly?', idea) else '\\t❌ Bad idea' )", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
cb613d4296dc7483631bdab22a91e521c3790c64
30,186
ipynb
Jupyter Notebook
notebooks/modeling/modeling_base.ipynb
bartleyintel/model-for-predicting-RRT-events
258df64dd053f5c07deb3cefb61e08137a3c895f
[ "Apache-2.0" ]
6
2017-12-14T19:40:20.000Z
2021-05-13T15:47:55.000Z
notebooks/modeling/modeling_base.ipynb
bartleyintel/model-for-predicting-RRT-events
258df64dd053f5c07deb3cefb61e08137a3c895f
[ "Apache-2.0" ]
null
null
null
notebooks/modeling/modeling_base.ipynb
bartleyintel/model-for-predicting-RRT-events
258df64dd053f5c07deb3cefb61e08137a3c895f
[ "Apache-2.0" ]
7
2017-10-26T19:02:26.000Z
2021-05-13T15:48:02.000Z
26.619048
289
0.559266
[ [ [ "## Goes over modeling, starting from modeling tables.\n### We're using modeling tables which were prepared based on 12 hours worth of vital sign data from each patient, as well as medication history during the stay, and patient characteristics.\n### The model predicts the probability of having a rapid response team event in 1 hour's time from the time of prediction. A RRT event is called after personnel identify that a patient has an urgent need for medical service.", "_____no_output_____" ] ], [ [ "import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport scipy as sp\n# import datetime as datetime\nimport cPickle as pickle\n%matplotlib inline\nplt.style.use('ggplot')", "_____no_output_____" ], [ "from sklearn.preprocessing import StandardScaler\nfrom sklearn.cross_validation import train_test_split, KFold\nfrom sklearn.metrics import confusion_matrix, roc_auc_score, precision_score, recall_score, classification_report\nfrom sklearn.ensemble import GradientBoostingClassifier #, RandomForestClassifier, \nfrom sklearn.ensemble.partial_dependence import plot_partial_dependence, partial_dependence\nfrom sklearn.grid_search import GridSearchCV", "_____no_output_____" ] ], [ [ "### function definitions", "_____no_output_____" ] ], [ [ "def score_printout(X_test, y_test, fittedModel):\n print \"AUC-ROC Score of model: \", roc_auc_score(y_test, fittedModel.predict_proba(X_test)[:,1])\n print \"Precision Score of model: \", precision_score(y_test, fittedModel.predict(X_test))\n print \"Recall Score of model: \", recall_score(y_test, fittedModel.predict(X_test))", "_____no_output_____" ], [ "def make_feature_importance_plot(featuresAndImportances, numFeatures):\n topN = featuresAndImportances[:numFeatures]\n labels = [pair[0] for pair in topN]\n values = [pair[1] for pair in topN]\n ind = np.arange(len(values)+2)\n width = 0.35 \n plt.barh(range(numFeatures),values)\n ax = plt.subplot(111)\n ax.set_yticks(ind+width)\n ax.set_yticklabels(labels, rotation=0, size=12)\n plt.ylabel('Feature', size=20)\n plt.xlabel('Importance', size=20)\n plt.show()\n", "_____no_output_____" ] ], [ [ "### Read in data\n\nWe did not share our modeling data, so you will have to create your own. The pipeline tool can help you do this. If you save the results to a csv, `masterdf_rrt` and `masterdf_nonrrt` are dataframes with the modeling data for each of the positive and negative classes, respectively. ", "_____no_output_____" ] ], [ [ "masterdf_rrt = pd.read_csv('RRT_modeling_table_13hr_raw.csv')\nmasterdf_nonrrt = pd.read_csv('NonRRT_modeling_table_13hr_raw.csv')", "_____no_output_____" ] ], [ [ "### Look at summary statistics for numeric columns for rrt & non-rrt tables (35 cols)", "_____no_output_____" ] ], [ [ "masterdf_rrt.columns", "_____no_output_____" ], [ "masterdf_rrt.describe().T", "_____no_output_____" ], [ "masterdf_nonrrt.describe().T", "_____no_output_____" ] ], [ [ "### We have a good amount of nan values in some columns. Lets plot the nan values to get a sense of how many there are", "_____no_output_____" ] ], [ [ "def show_df_nans(masterdf, collist=None):\n '''\n Create a data frame for features which may be nan.\n Make nan values be 1, numeric values be 0\n A heat map where dark squares/lines show where data is missing.\n '''\n if not collist:\n plot_cols = ['obese','DBP_mean', 'DBP_recent', 'SBP_mean', 'SBP_recent', 'HR_mean', 'HR_recent',\n 'MAP_mean', 'MAP_recent', 'temp_mean', 'temp_recent', 'SPO2_mean',\n 'SPO2_recent', 'RR_mean', 'RR_recent', 'pulse_mean', 'pulse_recent',\n 'CO2_mean', 'CO2_recent', 'GCS_mean', 'GCS_recent']\n else:\n plot_cols = collist \n \n df_viznan = pd.DataFrame(data = 1,index=masterdf.index,columns=plot_cols)\n df_viznan[~pd.isnull(masterdf[plot_cols])] = 0\n plt.figure(figsize=(10,8))\n plt.title('Dark values are nans')\n return sns.heatmap(df_viznan.astype(float))", "_____no_output_____" ], [ "# subset of numeric columns we'll use in modeling (sufficient data available)\nplot_cols_good = ['obese','DBP_mean', 'DBP_recent', 'SBP_mean', 'SBP_recent', \n 'MAP_mean', 'MAP_recent', 'temp_mean', 'temp_recent', 'SPO2_mean',\n 'SPO2_recent', 'RR_mean', 'RR_recent', 'pulse_mean', 'pulse_recent']", "_____no_output_____" ], [ "show_df_nans(masterdf_nonrrt) # show all columns that may have nans\n# show_df_nans(masterdf_nonrrt, plot_cols_good) # show the columns whch we plan to use for modeling", "_____no_output_____" ], [ "show_df_nans(masterdf_rrt)\n# show_df_nans(masterdf_rrt, plot_cols_good)", "_____no_output_____" ] ], [ [ "### Let's not use those columns where there are significant nans: drop HR (heart rate; we have pulse rate instead), CO2, and GCS, which leaves us with 28 features.", "_____no_output_____" ] ], [ [ "col_use = ['age', 'sex', 'obese', 'smoker', 'prev_rrt', 'on_iv', 'bu-nal', 'DBP_mean',\n 'DBP_recent', 'SBP_mean', 'SBP_recent',\n 'MAP_mean', 'MAP_recent', 'temp_mean', 'temp_recent', 'SPO2_mean',\n 'SPO2_recent', 'RR_mean', 'RR_recent', 'pulse_mean', 'pulse_recent',\n 'anticoagulants', 'narcotics', 'narc-ans', 'antipsychotics',\n 'chemo', 'dialysis', 'race']", "_____no_output_____" ], [ "X_rrt = masterdf_rrt[col_use]\nX_notrrt = masterdf_nonrrt[col_use]", "_____no_output_____" ] ], [ [ "### We need to deal with these nans before we can start modeling. (There should not be any nans in the modeling table)", "_____no_output_____" ] ], [ [ "# let's look at getting rid of the data rows where vitals signs are all nans\nvitals_cols = ['DBP_mean', 'DBP_recent', # take the mean of all the measurements & the most recently observed point\n 'SBP_mean', 'SBP_recent',\n 'MAP_mean', 'MAP_recent', # mean arterial pressure\n 'temp_mean', 'temp_recent',# temperature\n 'SPO2_mean', 'SPO2_recent',\n 'RR_mean', 'RR_recent', # respiratory rate\n 'pulse_mean', 'pulse_recent']", "_____no_output_____" ], [ "# Write out rows that are not all 0/NaNs across. (if all nans, remove this sample)\nX_rrt = X_rrt.loc[np.where(X_rrt.ix[:, vitals_cols].sum(axis=1, skipna=True)!=0)[0]]\nX_rrt = X_rrt.reset_index(drop=True)\nX_notrrt = X_notrrt.loc[np.where(X_notrrt.ix[:, vitals_cols].sum(axis=1, skipna=True)!=0)[0]]\nX_notrrt = X_notrrt.reset_index(drop=True)", "_____no_output_____" ], [ "# if 'obese' is Nan, then set the patient to be not obese.\nX_rrt.loc[np.where(pd.isnull(X_rrt['obese']))[0], 'obese'] = 0\nX_notrrt.loc[np.where(pd.isnull(X_notrrt['obese']))[0], 'obese'] = 0", "_____no_output_____" ] ], [ [ "### Let's see how X_rrt & X_notrrt look", "_____no_output_____" ] ], [ [ "show_df_nans(X_rrt, vitals_cols)", "_____no_output_____" ], [ "show_df_nans(X_notrrt, vitals_cols)", "_____no_output_____" ] ], [ [ "### Some columns have significant missing values.", "_____no_output_____" ] ], [ [ "print X_rrt[['pulse_mean', 'pulse_recent']].describe().T\nprint \"size of X_rrt: \"+str(len(X_rrt))\nprint\nprint X_notrrt[['pulse_mean', 'pulse_recent']].describe().T\nprint \"size of X_notrrt: \" + str(len(X_notrrt))", "_____no_output_____" ] ], [ [ "### We have plenty of samples for the non-RRT case. We can delete off rows with values that are missing without concern that we'll lose negtive examples for RRT events for modeling.", "_____no_output_____" ] ], [ [ "# DROP THE ROWS WHERE PULSE IS NAN\nX_notrrt = X_notrrt.ix[np.where(pd.isnull(X_notrrt['pulse_mean'])!=True)[0]]\nX_notrrt = X_notrrt.reset_index(drop=True)\n# And similarly for all rows with significant nans:\nX_notrrt = X_notrrt.ix[np.where(pd.isnull(X_notrrt['RR_mean'])!=True)[0]]\nX_notrrt = X_notrrt.reset_index(drop=True)\nX_notrrt = X_notrrt.ix[np.where(pd.isnull(X_notrrt['MAP_mean'])!=True)[0]]\nX_notrrt = X_notrrt.reset_index(drop=True)\nX_notrrt = X_notrrt.ix[np.where(pd.isnull(X_notrrt['temp_mean'])!=True)[0]]\nX_notrrt = X_notrrt.reset_index(drop=True)\nX_notrrt = X_notrrt.ix[np.where(pd.isnull(X_notrrt['SPO2_mean'])!=True)[0]]\nX_notrrt = X_notrrt.reset_index(drop=True)\n", "_____no_output_____" ], [ "all_cols = ['age', 'sex', 'obese', 'smoker', 'prev_rrt', 'on_iv', 'bu-nal',\n 'DBP_mean', 'DBP_recent', 'SBP_mean', 'SBP_recent', 'MAP_mean',\n 'MAP_recent', 'temp_mean', 'temp_recent', 'SPO2_mean',\n 'SPO2_recent', 'RR_mean', 'RR_recent', 'pulse_mean', 'pulse_recent',\n 'anticoagulants', 'narcotics', 'narc-ans', 'antipsychotics',\n 'chemo', 'dialysis', 'race']", "_____no_output_____" ], [ "show_df_nans(X_notrrt, all_cols)", "_____no_output_____" ] ], [ [ "### Still need to deal with nans in X_rrt. Temp & pulse are the most of concern", "_____no_output_____" ] ], [ [ "X_rrt[['temp_mean', 'pulse_mean']].describe().T", "_____no_output_____" ] ], [ [ "### We'll impute missing values in X_rrt after combining that data with X_notrrt, and use the mean from each column after merging to fill the values.", "_____no_output_____" ] ], [ [ "# add labels to indicate positive or negative class\nX_rrt['label'] = 1\nX_notrrt['label'] = 0\n\n# Combine the tables\nXY = pd.concat([X_rrt, X_notrrt])\nXY = XY.reset_index(drop=True)\ny = XY.pop('label')\nX = XY\n\n# Fill nans with mean of columns\nX = X.fillna(X.mean())", "_____no_output_____" ], [ "# map genders to 1/0\nX['is_male'] = X['sex'].map({'M': 1, 'F': 0})\nX.pop('sex')", "_____no_output_____" ], [ "X.race.value_counts()", "_____no_output_____" ], [ "# we won't use race in modeling\nX.pop('race')", "_____no_output_____" ], [ "show_df_nans(X, vitals_cols)", "_____no_output_____" ], [ "X.columns", "_____no_output_____" ], [ "X.describe().T", "_____no_output_____" ] ], [ [ "# Modeling", "_____no_output_____" ] ], [ [ "X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0)", "_____no_output_____" ], [ "print len(y_train)\nprint len(y_train[y_train]==1)", "_____no_output_____" ], [ "len(y_test[y_test==1])", "_____no_output_____" ], [ "Xscaled = StandardScaler().fit_transform(X)\nXs_train, Xs_test, ys_train, ys_test = train_test_split(Xscaled, y, test_size=0.3)", "_____no_output_____" ] ], [ [ "## Gradient Boosting Classifier - Unscaled (with partial dependence plots below)\n", "_____no_output_____" ] ], [ [ "paramGrid = {'n_estimators': [100, 200, 300],\n 'learning_rate': [0.1, 0.05, 0.01, 0.2],\n 'max_depth': [3, 4, 5, 6],\n 'min_samples_leaf': [1, 2],\n 'subsample': [0.75, 1.0, 0.85],\n 'loss': ['deviance'],\n 'max_features': [None, 'auto']\n }\n\ngs = GridSearchCV(GradientBoostingClassifier(), \n param_grid=paramGrid, \n scoring='roc_auc', \n n_jobs=-1, \n cv=5, \n verbose=10)\n\ngs.fit(X_train, y_train)\n\n# Result:\n# GradientBoostingClassifier(init=None, learning_rate=0.05, loss='deviance',\n# max_depth=3, max_features=None, max_leaf_nodes=None,\n# min_samples_leaf=2, min_samples_split=2,\n# min_weight_fraction_leaf=0.0, n_estimators=300,\n# presort='auto', random_state=None, subsample=0.75, verbose=0,\n# warm_start=False)", "_____no_output_____" ] ], [ [ "## Grid search for best GBC - Scaled (with partial dependece plots below)", "_____no_output_____" ] ], [ [ "paramGrid = {'n_estimators': [100, 200, 300],\n 'learning_rate': [0.1, 0.05, 0.01, 0.2],\n 'max_depth': [3, 4, 5, 6],\n 'min_samples_leaf': [1, 2],\n 'subsample': [0.75, 1.0, 0.85],\n 'loss': ['deviance'],\n 'max_features': [None, 'auto']\n }\n\ngss = GridSearchCV(GradientBoostingClassifier(), \n param_grid=paramGrid, \n scoring='roc_auc', \n n_jobs=-1, \n cv=5, \n verbose=10)\n\ngss.fit(Xs_train, ys_train)\n\n# Result:\n# GradientBoostingClassifier(init=None, learning_rate=0.05, loss='deviance',\n# max_depth=3, max_features='auto', max_leaf_nodes=None,\n# min_samples_leaf=1, min_samples_split=2,\n# min_weight_fraction_leaf=0.0, n_estimators=300,\n# presort='auto', random_state=None, subsample=0.75, verbose=0,\n# warm_start=False)\n", "_____no_output_____" ] ], [ [ "## How different are best estimators for scaled & unscaled data?", "_____no_output_____" ] ], [ [ "gbc = GradientBoostingClassifier(init=None, learning_rate=0.05, loss='deviance',\n max_depth=3, max_features=None, max_leaf_nodes=None,\n min_samples_leaf=2, min_samples_split=2,\n min_weight_fraction_leaf=0.0, n_estimators=300,\n presort='auto', random_state=None, subsample=0.75, verbose=0,\n warm_start=False)\ngbc.fit(X_train, y_train)\nscore_printout(X_test, y_test, gbc)\nprint classification_report(y_test, gbc.predict(X_test))\nconfusion_matrix(y_test, gbc.predict(X_test))", "_____no_output_____" ], [ "# gbcs = gss.best_estimator_\n# gbcs.fit(Xs_train, ys_train)\n# score_printout(Xs_test, ys_test, gbc)\n# print classification_report(ys_test, gbcs.predict(Xs_test))\n# confusion_matrix(ys_test, gbcs.predict(Xs_test))", "_____no_output_____" ] ], [ [ "### Use unscaled data -- better results & easier interpretability", "_____no_output_____" ] ], [ [ "# Let's plot the confusion matrix so it's a little clearer\nplt.figure()\nsns.set(font_scale=1.5)\nsns.heatmap(confusion_matrix(y_test, gbc.predict(X_test)), annot=True, fmt='d')", "_____no_output_____" ] ], [ [ "## Let's look at the most important features in this model", "_____no_output_____" ] ], [ [ "gbcRankedFeatures = sorted(zip(X.columns, gbc.feature_importances_), \n key=lambda pair: pair[1], \n reverse=False)", "_____no_output_____" ], [ "plt.figure()\nmake_feature_importance_plot(gbcRankedFeatures, 27) # note - we have 27 features currently\n", "_____no_output_____" ] ], [ [ "### Let's look a partial dependence plots\n#### If the partial dependence is high, then the model for that given value of that given feature is more likely to predict an rrt result.\n#### Will not show more complex interactions -- if importance is high but partial dependence is marginal, this may be due to interactions", "_____no_output_____" ] ], [ [ "fig, axs = plot_partial_dependence(gbc, X_train, range(0, 6, 1), feature_names=X.columns.get_values(), n_jobs=-1, grid_resolution=50)\nplt.subplots_adjust(top=0.9)", "_____no_output_____" ], [ "fig, axs = plot_partial_dependence(gbc, X_train, range(6, 12, 1), feature_names=X.columns.get_values(), n_jobs=-1, grid_resolution=50)\nplt.subplots_adjust(top=0.9)", "_____no_output_____" ], [ "fig, axs = plot_partial_dependence(gbc, X_train, range(12, 18, 1), feature_names=X.columns.get_values(), n_jobs=-1, grid_resolution=50)\nplt.subplots_adjust(top=0.9)", "_____no_output_____" ], [ "fig, axs = plot_partial_dependence(gbc, X_train, range(18, 24, 1), feature_names=X.columns.get_values(), n_jobs=-1, grid_resolution=50)\nplt.subplots_adjust(top=0.9)", "_____no_output_____" ], [ "fig, axs = plot_partial_dependence(gbc, X_train, range(24, 27, 1), feature_names=X.columns.get_values(), n_jobs=-1, grid_resolution=50)\nplt.subplots_adjust(top=0.9)", "_____no_output_____" ] ], [ [ "## Use 3-D plot to investigate feature interactions for weak partial dependence plots... (weak effect may be masked by stronger interaction with other features)", "_____no_output_____" ] ], [ [ "names = X_train.columns\nzip(range(len(names)), names)", "_____no_output_____" ], [ "from mpl_toolkits.mplot3d import Axes3D", "_____no_output_____" ], [ "# not all features may work for this viz\nfig = plt.figure(figsize=(10,8))\ntarget_feature = (16, 18) # <-- change the two numbers here to determine what to plot up\npdp, (x_axis, y_axis) = partial_dependence(gbc, target_feature, X=X_train, grid_resolution=50)\nXX, YY = np.meshgrid(x_axis, y_axis)\nZ = pdp.T.reshape(XX.shape).T\nax = Axes3D(fig)\nsurf = ax.plot_surface(XX, YY, Z, rstride=1, cstride=1, cmap=plt.cm.BuPu)\nax.set_xlabel(names[target_feature[0]])\nax.set_ylabel(names[target_feature[1]])\nax.set_zlabel('Partial dependence')\n# pretty init view\nax.view_init(elev=22, azim=122)\nplt.colorbar(surf)\nplt.suptitle('')\nplt.subplots_adjust(top=0.9)\n\nplt.show()", "_____no_output_____" ] ], [ [ "## From Model to Risk Score\n", "_____no_output_____" ] ], [ [ "# Return probabilities from the model, rather than predictions\ny_proba = gbc.predict_proba(X_test)", "_____no_output_____" ], [ "# note - y_proba contains probabilities for class 0 in column 0 & probabilities for class 1 in column 1.\n# we're only interested in the probability for class 1\ny_proba", "_____no_output_____" ], [ "pred_probs = pd.DataFrame(data=y_proba[:,1], columns =[\"model_probability_of_rrt\"], index = X_test.index)", "_____no_output_____" ], [ "X_test.head()", "_____no_output_____" ], [ "y_test.head()", "_____no_output_____" ], [ "pred_probs['model_probability_of_rrt'] = pd.to_numeric(pred_probs.model_probability_of_rrt)", "_____no_output_____" ], [ "pred_probs.hist(bins = 20, xlabelsize = 16, ylabelsize=16)\nplt.tick_params(labelsize=14)\nplt.title(\"Model output probabilities\")\nplt.ylabel('Count', fontsize=14)", "_____no_output_____" ] ], [ [ "### We see that although we see more values close to 0 and 1, we also see that the model outputs a full range of probabilities, which would translate well into risk scores.\n", "_____no_output_____" ], [ "### Patient Risk Score = model probability * 10\nThe score should be rounded to whole values to give the sense that this is not an exact measure.", "_____no_output_____" ] ], [ [ "pred_probs['score'] = pred_probs['model_probability_of_rrt'].apply(lambda x: int(round(x*10.0, 0)))", "_____no_output_____" ], [ "pred_probs.head()", "_____no_output_____" ], [ "pred_probs.score.value_counts()", "_____no_output_____" ] ], [ [ "### Save model", "_____no_output_____" ] ], [ [ "from sklearn.externals import joblib\n# joblib.dump(gbc, 'gbc_base.pkl') # note - if left uncompressed, this writes a whole lot of supporting numpy files.\njoblib.dump(gbc, 'my_trained_model.compressed', compress=True) \n\n# to unpack: joblib.load(filename)", "_____no_output_____" ] ], [ [ "### Save modeling table", "_____no_output_____" ] ], [ [ "# Create combined data frame including modeling table, rrt label, and proability associated with result\ndf = pd.concat([X_test, pred_probs, y_test],axis=1, join_axes=[X_test.index])", "_____no_output_____" ], [ "df.head()", "_____no_output_____" ], [ "# May need to rename columns to get rid of dash in name...\ndf.rename(columns={'bu-nal': 'bu_nal', 'narc-ans': 'narc_ans'}, inplace=True)\ndf.to_csv('ModelingTable_with_results.csv')", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ] ]
cb614abe6d0f21ec2f8d82c58a31f36e053b7fbb
68,204
ipynb
Jupyter Notebook
doc/source/Example 1.ipynb
sadraddini/pypolycontain
807846634899fa3de543fe09178ff7bda4386541
[ "AFL-1.1" ]
10
2019-01-09T20:09:27.000Z
2021-12-02T15:16:40.000Z
doc/source/Example 1.ipynb
sadraddini/pypolycontain
807846634899fa3de543fe09178ff7bda4386541
[ "AFL-1.1" ]
3
2019-03-16T01:56:35.000Z
2020-07-31T20:32:06.000Z
doc/source/Example 1.ipynb
sadraddini/pypolycontain
807846634899fa3de543fe09178ff7bda4386541
[ "AFL-1.1" ]
1
2020-07-30T23:16:11.000Z
2020-07-30T23:16:11.000Z
323.241706
21,632
0.930635
[ [ [ "# Example 1: The Necessity Gap for Minkowski Sums", "_____no_output_____" ], [ "In this example, we look for an instance of the following problem. We are given two H-polytopes $\\mathbb{A}, \\mathbb{B}$. We manually find the H-polytope form of $C:=A \\oplus B$. Then we check $ \\mathbb{C} \\subseteq \\mathbb{A} \\oplus \\mathbb{B} \\subseteq \\mathbb{C}$ using containment arguments. \nLet $\\mathbb{A}$ be a triangle.", "_____no_output_____" ] ], [ [ "import numpy as np\nimport pypolycontain as pp\n\nH=np.array([[1,1],[-1,1],[0,-1]])\nh=np.array([[1,1,0]]).reshape(3,1)\nA=pp.H_polytope(H,h)\n\npp.visualize([A],title=r'$\\mathbb{A}$')", "_____no_output_____" ] ], [ [ "And let $B$ be a tiny rectangle at the bottom of $A$ as follows.", "_____no_output_____" ] ], [ [ "e=1/3\nH=np.array([[1,0],[-1,0],[0,-1],[0,1]])\nh=np.array([[e,e,1,0]]).reshape(4,1)\nB=pp.H_polytope(H,h,color='green')\npp.visualize([A,B],title=r'$\\mathbb{A}$ (top) and $\\mathbb{B}$ (Bottom)')", "_____no_output_____" ] ], [ [ "The H-polytope form of the Minkowski sum $\\mathbb{A} \\oplus \\mathbb{B}$ can be easily found. We call this H-polytope $C_H$.", "_____no_output_____" ] ], [ [ "H=np.array([[1,0],[-1,0],[0,-1],[1,1],[-1,1],[0,1]])\nh=np.array([[1+e,1+e,1,1+e,1+e,1]]).reshape(6,1)\np_sum=pp.H_polytope(H,h,color='purple')\npp.visualize([p_sum],title=r\"$\\mathbb{A} \\oplus \\mathbb{B}$\")", "_____no_output_____" ] ], [ [ "We can also call the AH-polytope form of $A\\oplus B$.", "_____no_output_____" ] ], [ [ "C=pp.minkowski_sum(A,B)\npp.visualize([C],title=r\"$\\mathbb{A} \\oplus \\mathbb{B}$\")", "_____no_output_____" ] ], [ [ "Now we run the following experiment. We find the largest\n$$\n\\begin{array}{lll}\n\\alpha^*_N = & \\max. & \\alpha \\\\\n& \\text{subject to} & \\alpha C \\subseteq_N (A \\oplus B)\n\\end{array}\n$$\nwhere for $N=-1$ the condition is necessary and sufficient and for $N=0$ it is only sufficient. \n\nWhat we expect is that for necessary and sufficient condition, we obtain the largest possible $\\alpha^*$, which is 1. However, as we drop necessity, we are going to observe conservatieness in the fact that $$\\alpha_i\\le 1, i \\ge 0.$$ ", "_____no_output_____" ], [ "### Maximzing of $\\alpha$ with subset encoding: linear program", "_____no_output_____" ], [ "We import the ```mathematicalprogram``` module from ```pydrake```. As the optimization solver, we import Gurobi bindings of pydrake, but other solvers may also be used - there are often slower. ", "_____no_output_____" ] ], [ [ "pp.necessity_gap_k(p_sum,C,[0,1,2])", "\n \n ================================================== \n ================================================== \n\t \t Computing Necessity Gaps\n================================================== \n ==================================================\nk \t Theta.shape \t delta(X,Y,C)\n0 \t (7, 9) \t 0.0\n1 \t (7, 12) \t 0.18557078904567903\n2 \t (7, 7) \t 0.2142857142857152\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ] ]
cb6160f81f96bcc76084f83a4e367dce6e3f76d6
29,118
ipynb
Jupyter Notebook
examples/grain_bound/tutorial.ipynb
neka-nat/combo3
666a116dfece71e6236291e89ea2ab4d6db0ead9
[ "MIT" ]
21
2019-07-04T17:30:27.000Z
2022-03-26T14:27:32.000Z
examples/grain_bound/tutorial.ipynb
neka-nat/combo3
666a116dfece71e6236291e89ea2ab4d6db0ead9
[ "MIT" ]
2
2020-03-01T01:42:25.000Z
2020-03-01T02:59:37.000Z
examples/grain_bound/tutorial.ipynb
neka-nat/combo3
666a116dfece71e6236291e89ea2ab4d6db0ead9
[ "MIT" ]
13
2019-08-07T14:08:04.000Z
2022-03-16T00:51:58.000Z
40.554318
147
0.549248
[ [ [ "import numpy as np\nimport pickle\nimport scipy\nimport combo\nimport os\nimport urllib\nimport ssl\nimport matplotlib.pyplot as plt\n%matplotlib inline", "_____no_output_____" ], [ "ssl._create_default_https_context = ssl._create_unverified_context", "_____no_output_____" ], [ "def download():\n if not os.path.exists('data/s5-210.csv'):\n\n if not os.path.exists('data'):\n os.mkdir('data')\n \n print('Downloading...')\n with urllib.request.urlopen(\"http://www.tsudalab.org/files/s5-210.csv\") as response, open('data/s5-210.csv', 'wb') as out_file:\n out_file.write(response.read())\n print('Done')", "_____no_output_____" ], [ "def load_data():\n download()\n A = np.asarray(np.loadtxt('data/s5-210.csv',skiprows=1,delimiter=',') )\n X = A[:,0:3]\n t = -A[:,3]\n return X, t", "_____no_output_____" ], [ "# Load the data. \n# X is the N x d dimensional matrix. Each row of X denotes the d-dimensional feature vector of search candidate. \n# t is the N-dimensional vector that represents the corresponding negative energy of search candidates. \n# ( It is of course unknown in practice. )\nX, t = load_data()\n \n# Normalize the mean and standard deviation along the each column of X to 0 and 1, respectively\nX = combo.misc.centering( X )", "_____no_output_____" ], [ "# Declare the class for calling the simulator. \n# In this tutorial, we simply refer to the value of t. \n# If you want to apply combo to other problems, you have to customize this class. \nclass simulator:\n def __init__( self ):\n _, self.t = load_data()\n \n def __call__( self, action ):\n return self.t[action]", "_____no_output_____" ], [ "# Design of policy\n\n# Declaring the policy by \npolicy = combo.search.discrete.policy(test_X=X)\n# test_X is the set of candidates which is represented by numpy.array.\n# Each row vector represents the feature vector of the corresponding candidate\n\n# set the seed parameter \npolicy.set_seed( 0 )", "_____no_output_____" ], [ "# If you want to perform the initial random search before starting the Bayesian optimization, \n# the random sampling is performed by \n\nres = policy.random_search(max_num_probes=20, simulator=simulator())\n# Input: \n# max_num_probes: number of random search \n# simulator = simulator\n# output: combo.search.discreate.results (class)\n\n\n# single query Bayesian search\n# The single query version of COMBO is performed by \nres = policy.bayes_search(max_num_probes=80, simulator=simulator(), score='TS', \n interval=20, num_rand_basis=5000)\n\n# Input\n# max_num_probes: number of searching by Bayesian optimization\n# simulator: the class of simulator which is defined above\n# score: the type of aquision funciton. TS, EI and PI are available\n# interval: the timing for learning the hyper parameter. \n# In this case, the hyper parameter is learned at each 20 steps\n# If you set the negative value to interval, the hyper parameter learning is not performed \n# If you set zero to interval, the hyper parameter learning is performed only at the first step\n# num_rand_basis: the number of basis function. If you choose 0, ordinary Gaussian process runs", "0001-th step: f(x) = -1.070602 (action=15673)\n current best f(x) = -1.070602 (best action=15673) \n\n0002-th step: f(x) = -1.153410 (action=16489)\n current best f(x) = -1.070602 (best action=15673) \n\n0003-th step: f(x) = -0.981899 (action=7792)\n current best f(x) = -0.981899 (best action=7792) \n\n0004-th step: f(x) = -1.066080 (action=13752)\n current best f(x) = -0.981899 (best action=7792) \n\n0005-th step: f(x) = -1.043272 (action=9023)\n current best f(x) = -0.981899 (best action=7792) \n\n0006-th step: f(x) = -1.125822 (action=1470)\n current best f(x) = -0.981899 (best action=7792) \n\n0007-th step: f(x) = -1.070720 (action=14404)\n current best f(x) = -0.981899 (best action=7792) \n\n0008-th step: f(x) = -1.091624 (action=14031)\n current best f(x) = -0.981899 (best action=7792) \n\n0009-th step: f(x) = -0.963795 (action=5734)\n current best f(x) = -0.963795 (best action=5734) \n\n0010-th step: f(x) = -0.989538 (action=3111)\n current best f(x) = -0.963795 (best action=5734) \n\n0011-th step: f(x) = -1.135007 (action=1192)\n current best f(x) = -0.963795 (best action=5734) \n\n0012-th step: f(x) = -1.003954 (action=8912)\n current best f(x) = -0.963795 (best action=5734) \n\n0013-th step: f(x) = -0.994601 (action=1941)\n current best f(x) = -0.963795 (best action=5734) \n\n0014-th step: f(x) = -0.971108 (action=4051)\n current best f(x) = -0.963795 (best action=5734) \n\n0015-th step: f(x) = -1.096091 (action=14468)\n current best f(x) = -0.963795 (best action=5734) \n\n0016-th step: f(x) = -0.982784 (action=8562)\n current best f(x) = -0.963795 (best action=5734) \n\n0017-th step: f(x) = -1.052590 (action=16185)\n current best f(x) = -0.963795 (best action=5734) \n\n0018-th step: f(x) = -1.079737 (action=17654)\n current best f(x) = -0.963795 (best action=5734) \n\n0019-th step: f(x) = -1.025116 (action=4470)\n current best f(x) = -0.963795 (best action=5734) \n\n0020-th step: f(x) = -1.048733 (action=1022)\n current best f(x) = -0.963795 (best action=5734) \n\nStart the initial hyper parameter searching ...\nDone\n\nStart the hyper parameter learning ...\n0-th epoch, marginal likelihood -26.925099547547248\n50-th epoch, marginal likelihood -28.11690014926311\n100-th epoch, marginal likelihood -28.717048280838075\n150-th epoch, marginal likelihood -29.037240644509914\n200-th epoch, marginal likelihood -29.243814308007245\n250-th epoch, marginal likelihood -29.40499052748489\n300-th epoch, marginal likelihood -29.543375062782722\n350-th epoch, marginal likelihood -29.66433788563112\n400-th epoch, marginal likelihood -29.76863116401298\n450-th epoch, marginal likelihood -29.856696784555368\n500-th epoch, marginal likelihood -29.929612908422662\nDone\n\n Parameters of Gaussian kernel \n \n width = [3.]\n scale = 1.0\n scale2 = 1.0\n \n\n0021-th step: f(x) = -1.071398 (action=399)\n current best f(x) = -0.963795 (best action=5734) \n\n0022-th step: f(x) = -1.014235 (action=10613)\n current best f(x) = -0.963795 (best action=5734) \n\n0023-th step: f(x) = -0.964418 (action=5068)\n current best f(x) = -0.963795 (best action=5734) \n\n0024-th step: f(x) = -1.121763 (action=9)\n current best f(x) = -0.963795 (best action=5734) \n\n0025-th step: f(x) = -1.011912 (action=4944)\n current best f(x) = -0.963795 (best action=5734) \n\n0026-th step: f(x) = -1.099298 (action=15616)\n current best f(x) = -0.963795 (best action=5734) \n\n0027-th step: f(x) = -2.832830 (action=5994)\n current best f(x) = -0.963795 (best action=5734) \n\n0028-th step: f(x) = -0.997543 (action=187)\n current best f(x) = -0.963795 (best action=5734) \n\n0029-th step: f(x) = -1.039733 (action=3374)\n current best f(x) = -0.963795 (best action=5734) \n\n0030-th step: f(x) = -1.037882 (action=10372)\n current best f(x) = -0.963795 (best action=5734) \n\n0031-th step: f(x) = -0.973614 (action=9434)\n current best f(x) = -0.963795 (best action=5734) \n\n0032-th step: f(x) = -1.045038 (action=2584)\n current best f(x) = -0.963795 (best action=5734) \n\n0033-th step: f(x) = -1.024711 (action=3907)\n current best f(x) = -0.963795 (best action=5734) \n\n0034-th step: f(x) = -0.977174 (action=11967)\n current best f(x) = -0.963795 (best action=5734) \n\n0035-th step: f(x) = -1.016829 (action=12521)\n current best f(x) = -0.963795 (best action=5734) \n\n0036-th step: f(x) = -1.080353 (action=17450)\n current best f(x) = -0.963795 (best action=5734) \n\n0037-th step: f(x) = -1.104096 (action=8370)\n current best f(x) = -0.963795 (best action=5734) \n\n0038-th step: f(x) = -1.024638 (action=4748)\n current best f(x) = -0.963795 (best action=5734) \n\n0039-th step: f(x) = -1.031309 (action=8424)\n current best f(x) = -0.963795 (best action=5734) \n\n0040-th step: f(x) = -1.029702 (action=3)\n current best f(x) = -0.963795 (best action=5734) \n\nStart the initial hyper parameter searching ...\nDone\n\nStart the hyper parameter learning ...\n0-th epoch, marginal likelihood 59.88367175417103\n50-th epoch, marginal likelihood 44.165465329968285\n100-th epoch, marginal likelihood 34.025681547881035\n150-th epoch, marginal likelihood 27.430721608506595\n200-th epoch, marginal likelihood 23.01519307357777\n250-th epoch, marginal likelihood 19.974841352169747\n300-th epoch, marginal likelihood 17.826810155440313\n350-th epoch, marginal likelihood 16.275342790401538\n400-th epoch, marginal likelihood 15.134111641472504\n450-th epoch, marginal likelihood 14.28137203301964\n500-th epoch, marginal likelihood 13.634591693133501\nDone\n\n Parameters of Gaussian kernel \n \n width = [0.5934452]\n scale = 0.05915335807789646\n scale2 = 0.003499119771891839\n \n\n0041-th step: f(x) = -1.043198 (action=14947)\n current best f(x) = -0.963795 (best action=5734) \n\n0042-th step: f(x) = -1.070070 (action=17276)\n current best f(x) = -0.963795 (best action=5734) \n\n0043-th step: f(x) = -1.201556 (action=15533)\n current best f(x) = -0.963795 (best action=5734) \n\n0044-th step: f(x) = -1.034992 (action=9798)\n current best f(x) = -0.963795 (best action=5734) \n\n0045-th step: f(x) = -1.423841 (action=3025)\n current best f(x) = -0.963795 (best action=5734) \n\n0046-th step: f(x) = -1.083331 (action=13248)\n current best f(x) = -0.963795 (best action=5734) \n\n0047-th step: f(x) = -0.997708 (action=3033)\n current best f(x) = -0.963795 (best action=5734) \n\n0048-th step: f(x) = -1.057578 (action=14948)\n current best f(x) = -0.963795 (best action=5734) \n\n0049-th step: f(x) = -1.004524 (action=7770)\n current best f(x) = -0.963795 (best action=5734) \n\n0050-th step: f(x) = -1.005654 (action=2960)\n current best f(x) = -0.963795 (best action=5734) \n\n0051-th step: f(x) = -0.965032 (action=3811)\n current best f(x) = -0.963795 (best action=5734) \n\n0052-th step: f(x) = -1.314750 (action=15000)\n current best f(x) = -0.963795 (best action=5734) \n\n0053-th step: f(x) = -0.963975 (action=8974)\n current best f(x) = -0.963795 (best action=5734) \n\n0054-th step: f(x) = -0.997418 (action=2111)\n current best f(x) = -0.963795 (best action=5734) \n\n0055-th step: f(x) = -1.062388 (action=5983)\n current best f(x) = -0.963795 (best action=5734) \n\n0056-th step: f(x) = -1.371617 (action=15009)\n current best f(x) = -0.963795 (best action=5734) \n\n0057-th step: f(x) = -1.052755 (action=9973)\n current best f(x) = -0.963795 (best action=5734) \n\n0058-th step: f(x) = -1.087914 (action=13203)\n current best f(x) = -0.963795 (best action=5734) \n\n0059-th step: f(x) = -1.018920 (action=11788)\n current best f(x) = -0.963795 (best action=5734) \n\n0060-th step: f(x) = -1.082635 (action=7501)\n current best f(x) = -0.963795 (best action=5734) \n\nStart the initial hyper parameter searching ...\nDone\n\nStart the hyper parameter learning ...\n0-th epoch, marginal likelihood 135.8139753967975\n50-th epoch, marginal likelihood 96.99846825120406\n100-th epoch, marginal likelihood 71.02220850124107\n150-th epoch, marginal likelihood 53.538680461205495\n200-th epoch, marginal likelihood 41.48374887405472\n250-th epoch, marginal likelihood 32.97974523373494\n300-th epoch, marginal likelihood 26.854483541078444\n350-th epoch, marginal likelihood 22.362422854481103\n400-th epoch, marginal likelihood 19.019177116277014\n450-th epoch, marginal likelihood 16.50173465568532\n500-th epoch, marginal likelihood 14.588553962913224\nDone\n\n Parameters of Gaussian kernel \n \n width = [0.74096603]\n scale = 0.3934211376942155\n scale2 = 0.15478019158461087\n \n\n0061-th step: f(x) = -1.142549 (action=13034)\n current best f(x) = -0.963795 (best action=5734) \n\n0062-th step: f(x) = -1.081112 (action=16835)\n current best f(x) = -0.963795 (best action=5734) \n\n0063-th step: f(x) = -0.990657 (action=999)\n current best f(x) = -0.963795 (best action=5734) \n\n0064-th step: f(x) = -0.999815 (action=702)\n current best f(x) = -0.963795 (best action=5734) \n\n0065-th step: f(x) = -1.067548 (action=17967)\n current best f(x) = -0.963795 (best action=5734) \n\n0066-th step: f(x) = -1.013053 (action=36)\n current best f(x) = -0.963795 (best action=5734) \n\n0067-th step: f(x) = -0.967039 (action=7897)\n current best f(x) = -0.963795 (best action=5734) \n\n0068-th step: f(x) = -1.044369 (action=11370)\n current best f(x) = -0.963795 (best action=5734) \n\n0069-th step: f(x) = -1.133438 (action=17955)\n current best f(x) = -0.963795 (best action=5734) \n\n0070-th step: f(x) = -0.985278 (action=10656)\n current best f(x) = -0.963795 (best action=5734) \n\n0071-th step: f(x) = -1.011144 (action=11395)\n current best f(x) = -0.963795 (best action=5734) \n\n0072-th step: f(x) = -0.979780 (action=4292)\n current best f(x) = -0.963795 (best action=5734) \n\n0073-th step: f(x) = -1.005694 (action=2996)\n current best f(x) = -0.963795 (best action=5734) \n\n0074-th step: f(x) = -1.074548 (action=2761)\n current best f(x) = -0.963795 (best action=5734) \n\n0075-th step: f(x) = -0.991785 (action=7292)\n current best f(x) = -0.963795 (best action=5734) \n\n0076-th step: f(x) = -0.994784 (action=7214)\n current best f(x) = -0.963795 (best action=5734) \n\n0077-th step: f(x) = -1.000595 (action=2479)\n current best f(x) = -0.963795 (best action=5734) \n\n0078-th step: f(x) = -1.071346 (action=12024)\n current best f(x) = -0.963795 (best action=5734) \n\n0079-th step: f(x) = -1.037927 (action=14091)\n current best f(x) = -0.963795 (best action=5734) \n\n0080-th step: f(x) = -1.012435 (action=12601)\n current best f(x) = -0.963795 (best action=5734) \n\nStart the initial hyper parameter searching ...\nDone\n\nStart the hyper parameter learning ...\n0-th epoch, marginal likelihood 192.77078380638835\n50-th epoch, marginal likelihood 137.49859768163117\n100-th epoch, marginal likelihood 101.54305913688671\n150-th epoch, marginal likelihood 78.08998966259122\n200-th epoch, marginal likelihood 57.89444281720614\n250-th epoch, marginal likelihood 42.279301984799616\n300-th epoch, marginal likelihood 31.73915917546688\n350-th epoch, marginal likelihood 24.91227589966323\n400-th epoch, marginal likelihood 18.267204960871567\n450-th epoch, marginal likelihood 12.485394386101888\n500-th epoch, marginal likelihood 8.355749043954702\nDone\n\n Parameters of Gaussian kernel \n \n width = [0.84606992]\n scale = 0.3471300920987517\n scale2 = 0.12049930084048782\n \n\n0081-th step: f(x) = -1.123209 (action=17976)\n current best f(x) = -0.963795 (best action=5734) \n\n0082-th step: f(x) = -1.137505 (action=15992)\n current best f(x) = -0.963795 (best action=5734) \n\n0083-th step: f(x) = -1.081899 (action=17907)\n current best f(x) = -0.963795 (best action=5734) \n\n0084-th step: f(x) = -1.000543 (action=13541)\n current best f(x) = -0.963795 (best action=5734) \n\n0085-th step: f(x) = -0.996815 (action=8990)\n current best f(x) = -0.963795 (best action=5734) \n\n0086-th step: f(x) = -0.991430 (action=10322)\n current best f(x) = -0.963795 (best action=5734) \n\n0087-th step: f(x) = -1.018718 (action=2382)\n current best f(x) = -0.963795 (best action=5734) \n\n0088-th step: f(x) = -0.999809 (action=10877)\n current best f(x) = -0.963795 (best action=5734) \n\n0089-th step: f(x) = -1.384209 (action=3006)\n current best f(x) = -0.963795 (best action=5734) \n\n0090-th step: f(x) = -1.009738 (action=3779)\n current best f(x) = -0.963795 (best action=5734) \n\n0091-th step: f(x) = -1.009358 (action=1849)\n current best f(x) = -0.963795 (best action=5734) \n\n0092-th step: f(x) = -1.034998 (action=8965)\n current best f(x) = -0.963795 (best action=5734) \n\n0093-th step: f(x) = -0.963759 (action=5698)\n current best f(x) = -0.963759 (best action=5698) \n\n0094-th step: f(x) = -0.968503 (action=4069)\n current best f(x) = -0.963759 (best action=5698) \n\n0095-th step: f(x) = -1.010393 (action=3588)\n current best f(x) = -0.963759 (best action=5698) \n\n0096-th step: f(x) = -0.996779 (action=8954)\n current best f(x) = -0.963759 (best action=5698) \n\n0097-th step: f(x) = -0.992269 (action=8398)\n current best f(x) = -0.963759 (best action=5698) \n\n0098-th step: f(x) = -1.110322 (action=16206)\n current best f(x) = -0.963759 (best action=5698) \n\n0099-th step: f(x) = -0.988023 (action=4381)\n current best f(x) = -0.963759 (best action=5698) \n\n0100-th step: f(x) = -1.003655 (action=1183)\n current best f(x) = -0.963759 (best action=5698) \n\n" ], [ "# The result of searching is summarized in the class combo.search.discrete.results.history()\n# res.fx: observed negative energy at each step\n# res.chosed_actions: history of choosed actions\n# fbest, best_action= res.export_all_sequence_best_fx(): current best fx and current best action \n# that has been observed until each step\n# res.total_num_search: total number of search\nprint('f(x)=')\nprint(res.fx[0:res.total_num_search])\nbest_fx, best_action = res.export_all_sequence_best_fx()\nprint('current best')\nprint (best_fx)\nprint ('current best action=')\nprint (best_action)\nprint ('history of chosed actions=')\nprint (res.chosed_actions[0:res.total_num_search])", "f(x)=\n[-1.07060214 -1.15340978 -0.98189856 -1.0660804 -1.04327156 -1.12582192\n -1.07071983 -1.09162401 -0.96379539 -0.98953771 -1.13500669 -1.00395435\n -0.99460129 -0.97110762 -1.09609145 -0.98278381 -1.05258966 -1.07973688\n -1.02511608 -1.04873284 -1.07139841 -1.0142353 -0.96441796 -1.12176327\n -1.01191189 -1.09929812 -2.83283038 -0.99754301 -1.03973342 -1.0378822\n -0.97361382 -1.04503835 -1.02471098 -0.97717442 -1.01682878 -1.08035291\n -1.10409646 -1.02463792 -1.03130881 -1.02970235 -1.04319822 -1.07006969\n -1.20155612 -1.03499165 -1.4238407 -1.08333102 -0.99770846 -1.05757818\n -1.00452376 -1.00565377 -0.96503172 -1.31475034 -0.96397534 -0.99741849\n -1.0623879 -1.37161666 -1.05275454 -1.0879136 -1.01891965 -1.08263454\n -1.1425489 -1.08111165 -0.99065748 -0.99981525 -1.06754785 -1.0130527\n -0.9670393 -1.04436888 -1.13343804 -0.9852775 -1.01114434 -0.97978012\n -1.00569357 -1.07454822 -0.99178493 -0.99478408 -1.00059502 -1.07134639\n -1.03792654 -1.01243496 -1.12320939 -1.13750522 -1.08189911 -1.000543\n -0.99681497 -0.99142987 -1.01871781 -0.99980871 -1.38420908 -1.00973801\n -1.00935793 -1.03499762 -0.96375929 -0.96850305 -1.0103927 -0.99677943\n -0.99226906 -1.11032161 -0.98802336 -1.00365529]\ncurrent best\n[-1.07060214 -1.07060214 -0.98189856 -0.98189856 -0.98189856 -0.98189856\n -0.98189856 -0.98189856 -0.96379539 -0.96379539 -0.96379539 -0.96379539\n -0.96379539 -0.96379539 -0.96379539 -0.96379539 -0.96379539 -0.96379539\n -0.96379539 -0.96379539 -0.96379539 -0.96379539 -0.96379539 -0.96379539\n -0.96379539 -0.96379539 -0.96379539 -0.96379539 -0.96379539 -0.96379539\n -0.96379539 -0.96379539 -0.96379539 -0.96379539 -0.96379539 -0.96379539\n -0.96379539 -0.96379539 -0.96379539 -0.96379539 -0.96379539 -0.96379539\n -0.96379539 -0.96379539 -0.96379539 -0.96379539 -0.96379539 -0.96379539\n -0.96379539 -0.96379539 -0.96379539 -0.96379539 -0.96379539 -0.96379539\n -0.96379539 -0.96379539 -0.96379539 -0.96379539 -0.96379539 -0.96379539\n -0.96379539 -0.96379539 -0.96379539 -0.96379539 -0.96379539 -0.96379539\n -0.96379539 -0.96379539 -0.96379539 -0.96379539 -0.96379539 -0.96379539\n -0.96379539 -0.96379539 -0.96379539 -0.96379539 -0.96379539 -0.96379539\n -0.96379539 -0.96379539 -0.96379539 -0.96379539 -0.96379539 -0.96379539\n -0.96379539 -0.96379539 -0.96379539 -0.96379539 -0.96379539 -0.96379539\n -0.96379539 -0.96379539 -0.96375929 -0.96375929 -0.96375929 -0.96375929\n -0.96375929 -0.96375929 -0.96375929 -0.96375929]\ncurrent best action=\n[15673. 15673. 7792. 7792. 7792. 7792. 7792. 7792. 5734. 5734.\n 5734. 5734. 5734. 5734. 5734. 5734. 5734. 5734. 5734. 5734.\n 5734. 5734. 5734. 5734. 5734. 5734. 5734. 5734. 5734. 5734.\n 5734. 5734. 5734. 5734. 5734. 5734. 5734. 5734. 5734. 5734.\n 5734. 5734. 5734. 5734. 5734. 5734. 5734. 5734. 5734. 5734.\n 5734. 5734. 5734. 5734. 5734. 5734. 5734. 5734. 5734. 5734.\n 5734. 5734. 5734. 5734. 5734. 5734. 5734. 5734. 5734. 5734.\n 5734. 5734. 5734. 5734. 5734. 5734. 5734. 5734. 5734. 5734.\n 5734. 5734. 5734. 5734. 5734. 5734. 5734. 5734. 5734. 5734.\n 5734. 5734. 5698. 5698. 5698. 5698. 5698. 5698. 5698. 5698.]\nhistory of chosed actions=\n[15673 16489 7792 13752 9023 1470 14404 14031 5734 3111 1192 8912\n 1941 4051 14468 8562 16185 17654 4470 1022 399 10613 5068 9\n 4944 15616 5994 187 3374 10372 9434 2584 3907 11967 12521 17450\n 8370 4748 8424 3 14947 17276 15533 9798 3025 13248 3033 14948\n 7770 2960 3811 15000 8974 2111 5983 15009 9973 13203 11788 7501\n 13034 16835 999 702 17967 36 7897 11370 17955 10656 11395 4292\n 2996 2761 7292 7214 2479 12024 14091 12601 17976 15992 17907 13541\n 8990 10322 2382 10877 3006 3779 1849 8965 5698 4069 3588 8954\n 8398 16206 4381 1183]\n" ], [ "# save the results\nres.save('test.npz')", "_____no_output_____" ], [ "del res", "_____no_output_____" ], [ "# load the results\nres = combo.search.discrete.results.history()\nres.load('test.npz')", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb61814c032dc5c26bfbcb0929a0567bdd017e58
256,753
ipynb
Jupyter Notebook
Notebooks/Full_pipeline.ipynb
AndOleAnd/Capstone_N_A_P
f619ed31171d83ebdc080776ce06055b580c6705
[ "MIT" ]
null
null
null
Notebooks/Full_pipeline.ipynb
AndOleAnd/Capstone_N_A_P
f619ed31171d83ebdc080776ce06055b580c6705
[ "MIT" ]
38
2020-12-11T19:35:25.000Z
2021-06-16T08:34:09.000Z
Notebooks/Full_pipeline.ipynb
AndOleAnd/Capstone_N_A_P
f619ed31171d83ebdc080776ce06055b580c6705
[ "MIT" ]
null
null
null
703.432877
41,540
0.949438
[ [ [ "import math\nimport pandas as pd \nimport geopandas as gpd\nimport numpy as np\n\nimport matplotlib.pyplot as plt\n\nimport h3 # h3 bins from uber", "_____no_output_____" ], [ "from sklearn.preprocessing import StandardScaler, MinMaxScaler\nfrom sklearn.decomposition import PCA\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.preprocessing import PolynomialFeatures", "_____no_output_____" ], [ "import sys\nsys.path.append('../Scripts')\nimport capstone_functions as cf", "_____no_output_____" ] ], [ [ "# Exploring Model Complexity vs Scores\n### In this workbook we slowly add complexity to the partitioning model across a number of dimensions. \nWe use the predicted values for first half (h1) 2019 as the train values and the actual h1 2019 calues as the test set. \nFinally we submit to zindi to get a score against the actual h2 2019 accident data.", "_____no_output_____" ], [ "## Baseline_model\nUses simple grid based on quatiles to place ambulances around the city\nZindi score = 68.9760227569434", "_____no_output_____" ] ], [ [ "cf.full_pipeline(predict_period='2019_h1', frequency_cutoff=0, outlier_filter=0.00, test_period_date_start='2019-01-01', test_period_date_end='2020-07-01',\n tw_cluster_strategy='baseline', placement_method='baseline', verbose=2)", "file created ../Inputs/predictions_for_clustering_c.csv\n1 clusters created\nusing star grid for placement\n1 placement sets created\nTotal size of test set: 1922\nTotal size of train set: 3227\nScore on test set: 0.07503016990729658\nScore on train set: 0.05597306216241996 (avg distance per accident)\n20201217_prediction_0.0_baseline_baseline.csv saved in ../Outputs/\n" ] ], [ [ "## Adding complexity 1\nUse Partioning algorithm k_means to find optimal location for ambulances that minimizes the euclidean distance between ambulances and points", "_____no_output_____" ] ], [ [ "cf.full_pipeline(predict_period='2019_h1', frequency_cutoff=0, outlier_filter=0.00, test_period_date_start='2019-01-01', test_period_date_end='2020-07-01',\n tw_cluster_strategy='baseline', placement_method='k_means', verbose=2)", "file created ../Inputs/predictions_for_clustering_c.csv\n1 clusters created\nusing k-means clustering\n1 placement sets created\nTotal size of test set: 1922\nTotal size of train set: 3227\nScore on test set: 0.05897960880522698\nScore on train set: 0.05054601342218318 (avg distance per accident)\n20201217_prediction_0.0_baseline_k_means.csv saved in ../Outputs/\n" ] ], [ [ "## Adding Complexity 2\nChoose different algorithm that is not so influenced by outliers. Picks a median point as the cluster center. \nzindi score = 49.9372135333768", "_____no_output_____" ] ], [ [ "cf.full_pipeline(predict_period='2019_h1', frequency_cutoff=0, outlier_filter=0.00, test_period_date_start='2019-01-01', test_period_date_end='2020-07-01',\n tw_cluster_strategy='baseline', placement_method='k_medoids', verbose=2)", "file created ../Inputs/predictions_for_clustering_c.csv\n1 clusters created\nusing k-medoids clustering\n1 placement sets created\nTotal size of test set: 1922\nTotal size of train set: 3227\nScore on test set: 0.05519941541897019\nScore on train set: 0.040379031163907037 (avg distance per accident)\n20201217_prediction_0.0_baseline_k_medoids.csv saved in ../Outputs/\n" ] ], [ [ "## Adding Complexity 3 \nFilter outliers to reduce overfitting for rare events out side of the center of the city\n\nzindi score = 44.4289573474198", "_____no_output_____" ] ], [ [ "cf.full_pipeline(predict_period='2019_h1', frequency_cutoff=0, outlier_filter=0.003, test_period_date_start='2019-01-01', test_period_date_end='2020-07-01',\n tw_cluster_strategy='baseline', placement_method='k_means', verbose=2)", "file created ../Inputs/predictions_for_clustering_c.csv\n1 clusters created\nusing k-means clustering\n1 placement sets created\nTotal size of test set: 1922\nTotal size of train set: 3227\nScore on test set: 0.05021612166332715\nScore on train set: 0.03475499111996446 (avg distance per accident)\n20201217_prediction_0.003_baseline_k_means.csv saved in ../Outputs/\n" ] ], [ [ "## Adding Complexity 4\nUsing gradient descent to optimize placement by reducing loss funtion that is euclidean distance between centroids and points. \nzindi score = 56.49581082745", "_____no_output_____" ] ], [ [ "cf.full_pipeline(predict_period='2019_h1', frequency_cutoff=0, outlier_filter=0.003, test_period_date_start='2019-01-01', test_period_date_end='2020-07-01',\n tw_cluster_strategy='baseline', placement_method='gradient_descent', verbose=2,\n lr=8e-3, n_epochs=50, batch_size=2)", "file created ../Inputs/predictions_for_clustering_c.csv\n1 clusters created\nusing gradient descent clustering\n1 placement sets created\nTotal size of test set: 1922\nTotal size of train set: 3227\nScore on test set: 0.06169501641428757\nScore on train set: 0.029772715695369757 (avg distance per accident)\n20201217_prediction_0.003_baseline_gradient_descent.csv saved in ../Outputs/\n" ] ], [ [ "## Adding Complexity 5\nCreating different placement sets for different time and day combinations\nzindi score = 43.9846518426706", "_____no_output_____" ] ], [ [ "cf.full_pipeline(predict_period='2019_h1', frequency_cutoff=0, outlier_filter=0.004, test_period_date_start='2019-01-01', test_period_date_end='2020-07-01',\n tw_cluster_strategy='holiday_simple', placement_method='k_means', verbose=2)", "file created ../Inputs/predictions_for_clustering_c.csv\n5 clusters created\nusing k-means clustering\n5 placement sets created\nTotal size of test set: 1922\nTotal size of train set: 3227\nScore on test set: 0.05199322505912626\nScore on train set: 0.03587050084914429 (avg distance per accident)\n20201217_prediction_0.004_holiday_simple_k_means.csv saved in ../Outputs/\n" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
cb6181563e5c69fc45b0099577149c2910e17a5d
2,002
ipynb
Jupyter Notebook
_build/html/_sources/ipynb/capstone-temas.ipynb
gcpeixoto/ICD
bae7d02cd467240649c89b0ba4440966fba18cc7
[ "CC0-1.0" ]
2
2021-09-09T01:56:40.000Z
2021-11-10T01:56:56.000Z
_build/html/_sources/ipynb/capstone-temas.ipynb
gcpeixoto/ICD
bae7d02cd467240649c89b0ba4440966fba18cc7
[ "CC0-1.0" ]
null
null
null
_build/html/_sources/ipynb/capstone-temas.ipynb
gcpeixoto/ICD
bae7d02cd467240649c89b0ba4440966fba18cc7
[ "CC0-1.0" ]
1
2021-11-23T14:24:03.000Z
2021-11-23T14:24:03.000Z
29.441176
129
0.61039
[ [ [ "# Temas sugeridos para Projeto Final\n\n## ODS Agenda 2030\n\n- https://sdgs.un.org/goals\n\n## Proposições\n\n- Artes, entretenimento (cinema, teatro, música)\n- Clima, meio ambiente, recursos naturais\n\t- Produção de resíduos sólidos urbanos\n- Saúde, esportes, qualidade de vida\n- Agricultura, pecuária, pesca\n\t- Região do Vale do São Francisco; produção de uvas\n- Energias, transição energética\n- Indústria 4.0, transformação digital, IoT\n\t- avaliação de ciclo de vida de produtos (inventário de dados :: https://www.ecoinvent.org)\n- Economia, renda, desigualdade social, finanças, mercados\n\t- Projeto de webscrapping para puxar valor de ativos (FII, ações) por tickers e focar em rebalanceamento de carteira. \n- Direito, legislação, justiça, segurança pública, computação forense\n- Política, ética, cidadania\n- Infraestrutura, cidades inteligentes\n- Turismo, lazer\n- Geopolítica, relações internacionais, comércio exterior\n- Biotecnologia, materiais avançados\n- Gestão, administração, pessoas\n- Educação, ciência, tecnologia, inovação", "_____no_output_____" ] ] ]
[ "markdown" ]
[ [ "markdown" ] ]
cb6189a1cae2c03932079b7e3a1111a2f8308c95
77,691
ipynb
Jupyter Notebook
examples/multipitch_tracking.ipynb
TUD-STKS/PyRCN
26fb7f0d55e8c8925f692191c56db2ea32e3f630
[ "BSD-3-Clause" ]
35
2020-07-21T18:11:01.000Z
2022-03-28T01:31:11.000Z
examples/multipitch_tracking.ipynb
TUD-STKS/PyRCN
26fb7f0d55e8c8925f692191c56db2ea32e3f630
[ "BSD-3-Clause" ]
21
2020-12-30T14:25:26.000Z
2021-12-02T10:34:43.000Z
examples/multipitch_tracking.ipynb
TUD-STKS/PyRCN
26fb7f0d55e8c8925f692191c56db2ea32e3f630
[ "BSD-3-Clause" ]
10
2020-07-15T11:22:21.000Z
2022-03-18T10:27:47.000Z
143.606285
30,152
0.87588
[ [ [ "# Multipitch tracking using Echo State Networks\n\n## Introduction\n\nIn this notebook, we demonstrate how the ESN can deal with multipitch tracking, a challenging multilabel classification problem in music analysis.\n\nAs this is a computationally expensive task, we have pre-trained models to serve as an entry point.\n\nAt first, we import all packages required for this task. You can find the import statements below.", "_____no_output_____" ] ], [ [ "import time\nimport numpy as np\nimport os\nimport csv\nfrom sklearn.base import clone\nfrom sklearn.metrics import make_scorer\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.model_selection import GridSearchCV, RandomizedSearchCV\nfrom joblib import dump, load\n\nimport librosa\nfrom madmom.processors import SequentialProcessor, ParallelProcessor\nfrom madmom.audio import SignalProcessor, FramedSignalProcessor\nfrom madmom.audio.stft import ShortTimeFourierTransformProcessor\nfrom madmom.audio.filters import LogarithmicFilterbank\nfrom madmom.audio.spectrogram import FilteredSpectrogramProcessor, LogarithmicSpectrogramProcessor, SpectrogramDifferenceProcessor\n\nfrom pyrcn.util import FeatureExtractor\nfrom pyrcn.echo_state_network import SeqToSeqESNClassifier\nfrom pyrcn.datasets import fetch_maps_piano_dataset\nfrom pyrcn.metrics import accuracy_score\nfrom pyrcn.model_selection import SequentialSearchCV\n\nfrom matplotlib import pyplot as plt\nfrom matplotlib import ticker\nplt.rcParams[\"font.family\"] = \"Times New Roman\"\nplt.rcParams[\"font.size\"] = 10\n%matplotlib inline\n\nimport pandas as pd\nimport seaborn as sns\nfrom mir_eval import multipitch", "_____no_output_____" ] ], [ [ "## Feature extraction\n\nThe acoustic features extracted from the input signal are obtained by filtering short-term spectra (window length 4096 samples and hop size 10 ms) with a bank of triangular filters in the frequency domain with log-spaced frequencies. The frequency range was 30 Hz to 17 000 Hz and we used 12 filters per octave. We used logarithmic magnitudes and added 1 inside the logarithm to ensure a minimum value of 0 for a frame without energy. The first derivative between adjacent frames was added in order to enrich the features by temporal information. Binary labels indicating absent (value 0) or present (value 1) pitches for each frame are assigned to each frame. Note that this task is a multilabel classification. Each MIDI pitch is a separate class, and multiple or no classes can be active at a discrete frame index.\n\nFor a more detailed description, please have a look in our repository ([https://github.com/TUD-STKS/Automatic-Music-Transcription](https://github.com/TUD-STKS/Automatic-Music-Transcription)) with several detailed examples for music analysis tasks.", "_____no_output_____" ] ], [ [ "def create_feature_extraction_pipeline(sr=44100, frame_sizes=[1024, 2048, 4096], fps_hz=100.):\n audio_loading = Pipeline([(\"load_audio\", FeatureExtractor(librosa.load, sr=sr, mono=True)),\n (\"normalize\", FeatureExtractor(librosa.util.normalize, norm=np.inf))])\n\n sig = SignalProcessor(num_channels=1, sample_rate=sr)\n multi = ParallelProcessor([])\n for frame_size in frame_sizes:\n frames = FramedSignalProcessor(frame_size=frame_size, fps=fps_hz)\n stft = ShortTimeFourierTransformProcessor() # caching FFT window\n filt = FilteredSpectrogramProcessor(filterbank=LogarithmicFilterbank, num_bands=12, fmin=30, fmax=17000,\n norm_filters=True, unique_filters=True)\n spec = LogarithmicSpectrogramProcessor(log=np.log10, mul=5, add=1)\n diff = SpectrogramDifferenceProcessor(diff_ratio=0.5, positive_diffs=True, stack_diffs=np.hstack)\n # process each frame size with spec and diff sequentially\n multi.append(SequentialProcessor([frames, stft, filt, spec, diff]))\n feature_extractor = FeatureExtractor(SequentialProcessor([sig, multi, np.hstack]))\n\n feature_extraction_pipeline = Pipeline([(\"audio_loading\", audio_loading),\n (\"feature_extractor\", feature_extractor)])\n return feature_extraction_pipeline", "_____no_output_____" ] ], [ [ "## Load and preprocess the dataset\n\nThis might require a large amount of time and memory. ", "_____no_output_____" ] ], [ [ "# Load and preprocess the dataset\nfeature_extraction_pipeline = create_feature_extraction_pipeline(sr=44100, frame_sizes=[2048], fps_hz=100)\n# New object -> PyTorch dataloader / Matlab datastore\nX_train, X_test, y_train, y_test = fetch_maps_piano_dataset(data_origin=\"/projects/p_transcriber/MAPS\", \n data_home=None, preprocessor=feature_extraction_pipeline,\n force_preprocessing=False, label_type=\"pitch\")", "_____no_output_____" ], [ "def tsplot(ax, data,**kw):\n x = np.arange(data.shape[1])\n est = np.mean(data, axis=0)\n sd = np.std(data, axis=0)\n cis = (est - sd, est + sd)\n ax.fill_between(x,cis[0],cis[1],alpha=0.2, **kw)\n ax.plot(x,est,**kw)\n ax.margins(x=0)\n\nfig, ax = plt.subplots()\nfig.set_size_inches(4, 1.25)\ntsplot(ax, np.concatenate(np.hstack((X_train, X_test))))\nax.set_xlabel('Feature Index')\nax.set_ylabel('Magnitude')\nplt.grid()\nplt.savefig('features_statistics.pdf', bbox_inches='tight', pad_inches=0)", "_____no_output_____" ] ], [ [ "## Set up a ESN\n\nTo develop an ESN model for multipitch tracking, we need to tune several hyper-parameters, e.g., input_scaling, spectral_radius, bias_scaling and leaky integration.\n\nWe follow the way proposed in the paper for multipitch tracking and for acoustic modeling of piano music to optimize hyper-parameters sequentially.\n\nWe define the search spaces for each step together with the type of search (a grid search in this context).\n\nAt last, we initialize a SeqToSeqESNClassifier with the desired output strategy and with the initially fixed parameters.", "_____no_output_____" ] ], [ [ "initially_fixed_params = {'hidden_layer_size': 500,\n 'input_activation': 'identity',\n 'k_in': 10,\n 'bias_scaling': 0.0,\n 'reservoir_activation': 'tanh',\n 'leakage': 1.0,\n 'bi_directional': False,\n 'k_rec': 10,\n 'wash_out': 0,\n 'continuation': False,\n 'alpha': 1e-5,\n 'random_state': 42}\n\nstep1_esn_params = {'leakage': np.linspace(0.1, 1.0, 10)}\nkwargs_1 = {'random_state': 42, 'verbose': 2, 'n_jobs': 70, 'pre_dispatch': 70, 'n_iter': 14,\n 'scoring': make_scorer(accuracy_score)}\nstep2_esn_params = {'input_scaling': np.linspace(0.1, 1.0, 10),\n 'spectral_radius': np.linspace(0.0, 1.5, 16)}\n\nstep3_esn_params = {'bias_scaling': np.linspace(0.0, 2.0, 21)}\n\nkwargs_2_3 = {'verbose': 2, 'pre_dispatch': 70, 'n_jobs': 70, \n 'scoring': make_scorer(accuracy_score)}\n\n# The searches are defined similarly to the steps of a sklearn.pipeline.Pipeline:\nsearches = [('step1', GridSearchCV, step1_esn_params, kwargs_1),\n ('step2', GridSearchCV, step2_esn_params, kwargs_2_3),\n ('step3', GridSearchCV, step3_esn_params, kwargs_2_3)]\n\nbase_esn = SeqToSeqESNClassifier(**initially_fixed_params)", "_____no_output_____" ] ], [ [ "## Optimization\n\nWe provide a SequentialSearchCV that basically iterates through the list of searches that we have defined before. It can be combined with any model selection tool from scikit-learn.", "_____no_output_____" ] ], [ [ "try: \n sequential_search = load(\"sequential_search_ll.joblib\")\nexcept FileNotFoundError:\n print(FileNotFoundError)\n sequential_search = SequentialSearchCV(base_esn, searches=searches).fit(X_train, y_train)\n dump(sequential_search, \"sequential_search_ll.joblib\")", "C:\\Users\\Steiner\\AppData\\Roaming\\Python\\Python37\\site-packages\\sklearn\\base.py:334: UserWarning: Trying to unpickle estimator StandardScaler from version 0.24.1 when using version 0.23.1. This might lead to breaking code or invalid results. Use at your own risk.\n UserWarning)\nC:\\Users\\Steiner\\AppData\\Roaming\\Python\\Python37\\site-packages\\sklearn\\base.py:334: UserWarning: Trying to unpickle estimator LabelBinarizer from version 0.24.1 when using version 0.23.1. This might lead to breaking code or invalid results. Use at your own risk.\n UserWarning)\n" ] ], [ [ "## Visualize hyper-parameter optimization", "_____no_output_____" ] ], [ [ "df = pd.DataFrame(sequential_search.all_cv_results_[\"step1\"])\nfig = plt.figure()\nfig.set_size_inches(2, 1.25)\nax = sns.lineplot(data=df, x=\"param_leakage\", y=\"mean_test_score\")\nplt.xlabel(\"Leakage\")\nplt.ylabel(\"Score\")\n# plt.xlim((0, 1))\ntick_locator = ticker.MaxNLocator(5)\nax.xaxis.set_major_locator(tick_locator)\nax.yaxis.set_major_formatter(ticker.FormatStrFormatter('%.4f'))\nplt.grid()\nplt.savefig('optimize_leakage.pdf', bbox_inches='tight', pad_inches=0)", "_____no_output_____" ], [ "df = pd.DataFrame(sequential_search.all_cv_results_[\"step2\"])\npvt = pd.pivot_table(df,\n values='mean_test_score', index='param_input_scaling', columns='param_spectral_radius')\n\npvt.columns = pvt.columns.astype(float)\npvt2 = pd.DataFrame(pvt.loc[pd.IndexSlice[0:1], pd.IndexSlice[0.0:1.0]])\n\nfig = plt.figure()\nax = sns.heatmap(pvt2, xticklabels=pvt2.columns.values.round(2), yticklabels=pvt2.index.values.round(2), cbar_kws={'label': 'Score'})\nax.invert_yaxis()\nplt.xlabel(\"Spectral Radius\")\nplt.ylabel(\"Input Scaling\")\nfig.set_size_inches(4, 2.5)\ntick_locator = ticker.MaxNLocator(10)\nax.yaxis.set_major_locator(tick_locator)\nax.xaxis.set_major_locator(tick_locator)\nplt.savefig('optimize_is_sr.pdf', bbox_inches='tight', pad_inches=0)", "_____no_output_____" ], [ "df = pd.DataFrame(sequential_search.all_cv_results_[\"step3\"])\nfig = plt.figure()\nfig.set_size_inches(2, 1.25)\nax = sns.lineplot(data=df, x=\"param_bias_scaling\", y=\"mean_test_score\")\nplt.xlabel(\"Bias Scaling\")\nplt.ylabel(\"Score\")\nplt.xlim((0, 2))\ntick_locator = ticker.MaxNLocator(5)\nax.xaxis.set_major_locator(tick_locator)\nax.yaxis.set_major_formatter(ticker.FormatStrFormatter('%.5f'))\nplt.grid()\nplt.savefig('optimize_bias_scaling.pdf', bbox_inches='tight', pad_inches=0)", "_____no_output_____" ] ], [ [ "## Test the ESN\n\nFinally, we test the ESN on unseen data.", "_____no_output_____" ] ], [ [ "def _midi_to_frequency(p):\n return 440. * (2 ** ((p-69)/12))\n\n\ndef get_mir_eval_rows(y, fps=100.):\n time_t = np.arange(len(y)) / fps\n freq_hz = [_midi_to_frequency(np.asarray(np.nonzero(row))).ravel() for row in y]\n return time_t, freq_hz", "_____no_output_____" ], [ "esn = sequential_search.best_estimator_\ny_test_pred = esn.predict_proba(X=X_test)\nscores = np.zeros(shape=(10, 14))\nfor k, thr in enumerate(np.linspace(0.1, 0.9, 9)):\n res = []\n for y_true, y_pred in zip(y_test, y_test_pred):\n times_res, freqs_hz_res = get_mir_eval_rows(y_pred[:, 1:]>thr, fps=100.)\n times_ref, freqs_hz_ref = get_mir_eval_rows(y_true[:, 1:]>thr, fps=100.)\n res.append(multipitch.metrics(ref_time=times_ref, ref_freqs=freqs_hz_ref, est_time=times_res, est_freqs=freqs_hz_res))\n scores[k, :] = np.mean(res, axis=0)", "_____no_output_____" ], [ "plt.plot(np.linspace(0.1, 1, 10), scores[:, :3])\nplt.plot(np.linspace(0.1, 1, 10), 2*scores[:, 0]*scores[:, 1] / (scores[:, 0] + scores[:, 1]))\nplt.xlabel(\"Threshold\")\nplt.ylabel(\"Scores\")\nplt.xlim((0.1, 0.9))\nplt.legend((\"Precision\", \"Recall\", \"Accuracy\", \"F1-Score\"))", "C:\\Users\\Steiner\\AppData\\Roaming\\Python\\Python37\\site-packages\\ipykernel_launcher.py:2: RuntimeWarning: invalid value encountered in true_divide\n \n" ], [ "np.mean(list(sequential_search.all_refit_time_.values()))", "_____no_output_____" ], [ "t1 = time.time()\nesn = clone(sequential_search.best_estimator_).fit(X_train, y_train, n_jobs=8)\nprint(\"Fitted in {0} seconds\".format(time.time() - t1))", "[Parallel(n_jobs=8)]: Using backend LokyBackend with 8 concurrent workers.\n[Parallel(n_jobs=8)]: Done 3 out of 8 | elapsed: 2.2min remaining: 3.6min\n[Parallel(n_jobs=8)]: Done 8 out of 8 | elapsed: 3.1min remaining: 0.0s\n[Parallel(n_jobs=8)]: Done 8 out of 8 | elapsed: 3.1min finished\n" ], [ "t1 = time.time()\nesn = clone(sequential_search.best_estimator_).fit(X_train, y_train)\nprint(\"Fitted in {0} seconds\".format(time.time() - t1))", "Fitted in 398.84272718429565 seconds\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ] ]
cb61910eaf97dc0584eb9c8fb5255c07a7289eb8
43,685
ipynb
Jupyter Notebook
5_characterize_iModulons/4_motif_search.ipynb
kevin-rychel/modulome-workflow
865c8e6039382750f792ad21231f26ef36c4f2e8
[ "MIT" ]
1
2021-11-16T12:34:14.000Z
2021-11-16T12:34:14.000Z
5_characterize_iModulons/4_motif_search.ipynb
kevin-rychel/modulome-workflow
865c8e6039382750f792ad21231f26ef36c4f2e8
[ "MIT" ]
8
2021-06-14T19:06:30.000Z
2022-03-17T23:29:52.000Z
5_characterize_iModulons/4_motif_search.ipynb
kevin-rychel/modulome-workflow
865c8e6039382750f792ad21231f26ef36c4f2e8
[ "MIT" ]
2
2022-01-28T22:37:28.000Z
2022-03-31T22:26:44.000Z
37.401541
91
0.410301
[ [ [ "from pymodulon.io import *\nfrom pymodulon.motif import *", "_____no_output_____" ] ], [ [ "# Load Data", "_____no_output_____" ] ], [ [ "ica_data = load_json_model(os.path.join('..','data','processed_data','bsu.json.gz'))", "_____no_output_____" ], [ "fasta_file = os.path.join('..','data','external','genome.fasta')", "_____no_output_____" ] ], [ [ "# Search for motifs", "_____no_output_____" ] ], [ [ "for k in ica_data.imodulon_names:\n find_motifs(ica_data, k, fasta_file, force=True)", "Finding motifs for 16 sequences\nFound 5 motifs across 35 sites\nFinding motifs for 82 sequences\nFound 3 motifs across 188 sites\nFinding motifs for 13 sequences\nFound 5 motifs across 32 sites\nFinding motifs for 149 sequences\nFound 2 motifs across 161 sites\nFinding motifs for 3 sequences\nNo motif found with E-value < 1.0e-03\nFinding motifs for 4 sequences\nNo motif found with E-value < 1.0e-03\nFinding motifs for 5 sequences\nFound 2 motifs across 9 sites\nFinding motifs for 5 sequences\nFound 4 motifs across 18 sites\nFinding motifs for 82 sequences\nFound 5 motifs across 142 sites\nFinding motifs for 164 sequences\nFound 3 motifs across 239 sites\nFinding motifs for 16 sequences\nNo motif found with E-value < 1.0e-03\nFinding motifs for 3 sequences\nFound 3 motifs across 6 sites\nFinding motifs for 48 sequences\nFound 4 motifs across 139 sites\nLess than two sequences found for iModulon: PyrR\nFinding motifs for 17 sequences\nNo motif found with E-value < 1.0e-03\nFinding motifs for 37 sequences\nFound 5 motifs across 105 sites\nFinding motifs for 2 sequences\nNo motif found with E-value < 1.0e-03\nLess than two sequences found for iModulon: SG_1\nFinding motifs for 14 sequences\nNo motif found with E-value < 1.0e-03\nFinding motifs for 108 sequences\nFound 5 motifs across 301 sites\nFinding motifs for 2 sequences\nNo motif found with E-value < 1.0e-03\nFinding motifs for 6 sequences\nNo motif found with E-value < 1.0e-03\nFinding motifs for 34 sequences\nFound 5 motifs across 115 sites\nFinding motifs for 6 sequences\nFound 4 motifs across 17 sites\nFinding motifs for 5 sequences\nNo motif found with E-value < 1.0e-03\nFinding motifs for 50 sequences\nFound 5 motifs across 138 sites\nFinding motifs for 41 sequences\nFound 5 motifs across 88 sites\nFinding motifs for 20 sequences\nFound 2 motifs across 23 sites\nFinding motifs for 9 sequences\nNo motif found with E-value < 1.0e-03\nLess than two sequences found for iModulon: MtlR\nFinding motifs for 17 sequences\nFound 5 motifs across 49 sites\nFinding motifs for 52 sequences\nFound 3 motifs across 68 sites\nFinding motifs for 18 sequences\nFound 5 motifs across 35 sites\nLess than two sequences found for iModulon: SwrA\nFinding motifs for 7 sequences\nNo motif found with E-value < 1.0e-03\nFinding motifs for 21 sequences\nFound 5 motifs across 40 sites\nFinding motifs for 98 sequences\nFound 5 motifs across 244 sites\nFinding motifs for 5 sequences\nNo motif found with E-value < 1.0e-03\nFinding motifs for 10 sequences\nFound 3 motifs across 15 sites\nFinding motifs for 14 sequences\nFound 1 motif across 9 sites\nFinding motifs for 101 sequences\nFound 3 motifs across 187 sites\nFinding motifs for 54 sequences\nFound 5 motifs across 111 sites\nFinding motifs for 69 sequences\nFound 3 motifs across 189 sites\nLess than two sequences found for iModulon: FruR\nLess than two sequences found for iModulon: AcoR\nFinding motifs for 8 sequences\nFound 1 motif across 7 sites\nFinding motifs for 19 sequences\nFound 5 motifs across 33 sites\nFinding motifs for 39 sequences\nFound 5 motifs across 71 sites\nFinding motifs for 24 sequences\nFound 4 motifs across 47 sites\nFinding motifs for 53 sequences\nFound 4 motifs across 152 sites\nFinding motifs for 4 sequences\nNo motif found with E-value < 1.0e-03\nFinding motifs for 4 sequences\nNo motif found with E-value < 1.0e-03\nFinding motifs for 10 sequences\nFound 3 motifs across 14 sites\nFinding motifs for 4 sequences\nNo motif found with E-value < 1.0e-03\nFinding motifs for 3 sequences\nNo motif found with E-value < 1.0e-03\nFinding motifs for 35 sequences\nFound 2 motifs across 49 sites\nFinding motifs for 33 sequences\nFound 2 motifs across 36 sites\nFinding motifs for 74 sequences\nFound 4 motifs across 195 sites\nFinding motifs for 80 sequences\nFound 5 motifs across 146 sites\nFinding motifs for 8 sequences\nNo motif found with E-value < 1.0e-03\nFinding motifs for 12 sequences\nFound 2 motifs across 9 sites\nFinding motifs for 23 sequences\nFound 2 motifs across 24 sites\nFinding motifs for 5 sequences\nNo motif found with E-value < 1.0e-03\nFinding motifs for 2 sequences\nNo motif found with E-value < 1.0e-03\nFinding motifs for 6 sequences\nFound 1 motif across 5 sites\nFinding motifs for 15 sequences\nFound 5 motifs across 33 sites\nFinding motifs for 17 sequences\nNo motif found with E-value < 1.0e-03\nFinding motifs for 2 sequences\nNo motif found with E-value < 1.0e-03\nFinding motifs for 54 sequences\nFound 2 motifs across 81 sites\nFinding motifs for 4 sequences\nNo motif found with E-value < 1.0e-03\nFinding motifs for 71 sequences\nFound 2 motifs across 112 sites\nFinding motifs for 11 sequences\nFound 5 motifs across 37 sites\n" ], [ "ica_data.motif_info", "_____no_output_____" ] ], [ [ "# Compare motifs to known databases using TOMTOM", "_____no_output_____" ] ], [ [ "for info in ica_data.motif_info.values():\n compare_motifs(info)", "_____no_output_____" ], [ "for k in ica_data.motif_info.keys():\n if not ica_data.motif_info[k].matches.empty:\n print(k)", "1\n3\nCsoR/Fnr\n9\nSigB-1\nCodY\nRhgR\nstringent_response\nSigB-2\nSigK\nNusA\nSigB-3\nFur\nCcpA-1\nSigW\nSigE\nRok\n" ], [ "ica_data.motif_info[1].matches", "_____no_output_____" ], [ "ica_data.imodulon_table.loc[['SigB-1','CodY','SigW','CcpA-1','Fur']]", "_____no_output_____" ], [ "ica_data.motif_info['CodY'].sites.loc['MEME-1']", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ] ]
cb61997bd33617c9cdf57e2f3386e7d583dc86fc
254,883
ipynb
Jupyter Notebook
Examples/Mouse Cell Atlas brain Astrocyte vs Microglia DE.ipynb
Teichlab/NaiveDE
34899669cc210fad62f28243432dc5ecf8697764
[ "MIT" ]
5
2018-03-05T16:40:43.000Z
2020-04-06T00:07:12.000Z
Examples/Mouse Cell Atlas brain Astrocyte vs Microglia DE.ipynb
Teichlab/NaiveDE
34899669cc210fad62f28243432dc5ecf8697764
[ "MIT" ]
null
null
null
Examples/Mouse Cell Atlas brain Astrocyte vs Microglia DE.ipynb
Teichlab/NaiveDE
34899669cc210fad62f28243432dc5ecf8697764
[ "MIT" ]
2
2018-08-01T03:33:46.000Z
2019-09-09T20:36:43.000Z
387.949772
65,416
0.932753
[ [ [ "%pylab inline\nimport pandas as pd\nimport plotnine as p\np.theme_set(p.theme_classic())\n\nplt.rcParams['axes.spines.top'] = False\nplt.rcParams['axes.spines.right'] = False", "Populating the interactive namespace from numpy and matplotlib\n" ], [ "counts = pd.read_parquet('mca_brain_counts.parquet')", "_____no_output_____" ], [ "sample_info = pd.read_parquet('mca_brain_cell_info.parquet')", "_____no_output_____" ] ], [ [ "### Differential expression\n\nNow let us investigate how this count depth effect plays in to a differential expression analysis. With all published large scale experiments cataloging cell types, it is getting increasingly easy to simply fetch some data and do quick comparisons. We will use data from the recent [single cell Mouse Cell Atlas][paper link]. To get something easy to compare, we use the samples called \"Brain\" and focus on the cells annotated as \"Microglia\" and \"Astrocyte\". Out of the ~400,000 cells in the study, these two cell types have 338 and 199 representative cells. On average they have about 700 total UMI counts each, so while the entire study is a pretty large scale, the individual cell types and cells are on a relatively small scale. The final table has 537 cells and 21,979 genes.\n\n[paper link]: http://www.cell.com/cell/abstract/S0092-8674(18)30116-8", "_____no_output_____" ] ], [ [ "sample_info['super_cell_type'].value_counts()", "_____no_output_____" ], [ "sub_samples = sample_info.query('super_cell_type in [\"Microglia\", \"Astrocyte\"]').copy()", "_____no_output_____" ], [ "sub_counts = counts.reindex(index=sub_samples.index)", "_____no_output_____" ], [ "sub_counts.shape", "_____no_output_____" ], [ "sub_samples['is_astrocyte'] = sub_samples['super_cell_type'] == 'Astrocyte'", "_____no_output_____" ], [ "import NaiveDE", "_____no_output_____" ], [ "sub_samples['total_count'] = sub_counts.sum(1)", "_____no_output_____" ], [ "figsize(11, 3)\nsub_samples.total_count.hist(grid=False, fc='w', ec='k')", "_____no_output_____" ], [ "sub_samples.total_count.median(), sub_samples.total_count.mean()", "_____no_output_____" ], [ "print(sub_samples.head())", " ClusterID Tissue Batch Cell Barcode \\\nCell name \nBrain_1.AAAACGCGAGTAGAATTA Brain_3 Brain Brain_1 AAAACGCGAGTAGAATTA \nBrain_1.AAAACGGAGGAGATTTGC Brain_3 Brain Brain_1 AAAACGGAGGAGATTTGC \nBrain_1.AAAACGGGCTGCGACACT Brain_2 Brain Brain_1 AAAACGGGCTGCGACACT \nBrain_1.AAAACGGTGGTAGCTCAA Brain_3 Brain Brain_1 AAAACGGTGGTAGCTCAA \nBrain_1.AAAACGGTTGCCATACAG Brain_3 Brain Brain_1 AAAACGGTTGCCATACAG \n\n cell_type super_cell_type is_astrocyte \\\nCell name \nBrain_1.AAAACGCGAGTAGAATTA Astrocyte_Mfe8 high Astrocyte True \nBrain_1.AAAACGGAGGAGATTTGC Astrocyte_Mfe8 high Astrocyte True \nBrain_1.AAAACGGGCTGCGACACT Microglia Microglia False \nBrain_1.AAAACGGTGGTAGCTCAA Astrocyte_Mfe8 high Astrocyte True \nBrain_1.AAAACGGTTGCCATACAG Astrocyte_Mfe8 high Astrocyte True \n\n total_count gene \nCell name \nBrain_1.AAAACGCGAGTAGAATTA 1088.0 0 \nBrain_1.AAAACGGAGGAGATTTGC 967.0 0 \nBrain_1.AAAACGGGCTGCGACACT 543.0 0 \nBrain_1.AAAACGGTGGTAGCTCAA 679.0 0 \nBrain_1.AAAACGGTTGCCATACAG 957.0 0 \n" ] ], [ [ "In a differential expression test you simply include a covariate in the design matrix that informs the linear model about the different conditions you want to compare. Here we are comparing microglia and astrocytes.", "_____no_output_____" ] ], [ [ "%%time\nlr_results = NaiveDE.lr_tests(sub_samples, np.log1p(sub_counts.T),\n alt_model='C(is_astrocyte) + np.log(total_count) + 1',\n null_model='np.log(total_count) + 1')", "CPU times: user 705 ms, sys: 136 ms, total: 841 ms\nWall time: 707 ms\n" ], [ "lr_results.pval = lr_results.pval.clip_lower(lr_results.query('pval != 0')['pval'].min())\nlr_results.qval = lr_results.qval.clip_lower(lr_results.query('qval != 0')['qval'].min())", "_____no_output_____" ], [ "print(lr_results.sort_values('pval').head())", " Intercept C(is_astrocyte)[T.True] np.log(total_count) \\\nAtp1a2 -1.925596 1.840452 0.318532 \nSparcl1 -1.008002 1.742278 0.179123 \nTmsb4x -3.680027 -2.044908 0.948016 \nHexb -2.165802 -2.032087 0.646263 \nCtss -1.665139 -1.937761 0.553429 \n\n pval qval \nAtp1a2 3.058918e-162 1.642639e-159 \nSparcl1 3.548817e-158 1.905715e-155 \nTmsb4x 2.742131e-153 1.472524e-150 \nHexb 3.671724e-145 1.971716e-142 \nCtss 8.167943e-144 4.386185e-141 \n" ], [ "example_genes = ['Apoe', 'Sparcl1', 'Tmsb4x', 'C1qa']\nexamples = lr_results.loc[example_genes]", "_____no_output_____" ], [ "img = \\\np.qplot('C(is_astrocyte)[T.True]', '-np.log10(pval)', lr_results) \\\n + p.annotate('text',\n x=examples['C(is_astrocyte)[T.True]'] + 0.33,\n y=-np.log10(examples['pval']),\n label=examples.index) \\\n + p.labs(title='Brain cell data')\n \nimg.save('4.png', verbose=False)\nimg", "_____no_output_____" ], [ "img = \\\np.qplot('C(is_astrocyte)[T.True]', 'np.log(total_count)', lr_results) \\\n + p.annotate('text',\n x=examples['C(is_astrocyte)[T.True]'] + 0.33,\n y=examples['np.log(total_count)'],\n label=examples.index) \\\n + p.labs(title='Brain cell data')\n\nimg.save('5.png', verbose=False)\nimg", "_____no_output_____" ], [ "print(lr_results.sort_values('C(is_astrocyte)[T.True]').head())", " Intercept C(is_astrocyte)[T.True] np.log(total_count) \\\nTmsb4x -3.680027 -2.044908 0.948016 \nHexb -2.165802 -2.032087 0.646263 \nCtss -1.665139 -1.937761 0.553429 \nC1qa -0.995722 -1.749257 0.423667 \nC1qc -2.215866 -1.619052 0.584999 \n\n pval qval \nTmsb4x 2.742131e-153 1.472524e-150 \nHexb 3.671724e-145 1.971716e-142 \nCtss 8.167943e-144 4.386185e-141 \nC1qa 1.826933e-136 9.810631e-134 \nC1qc 2.119271e-130 1.138049e-127 \n" ], [ "print(lr_results.sort_values('C(is_astrocyte)[T.True]').tail())", " Intercept C(is_astrocyte)[T.True] np.log(total_count) \\\nAldoc -2.687079 1.417820 0.435424 \nClu -1.888573 1.539004 0.317413 \nSparcl1 -1.008002 1.742278 0.179123 \nAtp1a2 -1.925596 1.840452 0.318532 \nApoe -3.426031 1.907639 0.615229 \n\n pval qval \nAldoc 5.683797e-122 3.052199e-119 \nClu 9.768731e-122 5.245808e-119 \nSparcl1 3.548817e-158 1.905715e-155 \nAtp1a2 3.058918e-162 1.642639e-159 \nApoe 1.250247e-123 6.713825e-121 \n" ] ], [ [ "Also in this case we can see that the count depth weights are deflated for lowly abundant genes.", "_____no_output_____" ] ], [ [ "img = \\\np.qplot(sub_counts.sum(0).clip_lower(1), lr_results['np.log(total_count)'],\n log='x') \\\n + p.labs(x='Gene count across dataset', y='np.log(total_count)', \n title='Brain cell data')\n \nimg.save('6.png', verbose=False)\nimg", "_____no_output_____" ], [ "xx = np.linspace(np.log(sub_samples.total_count.min()),\n np.log(sub_samples.total_count.max()))\n\ndef linres(gene):\n yy = \\\n lr_results.loc[gene, 'np.log(total_count)'] * xx \\\n + lr_results.loc[gene, 'Intercept']\n\n yy1 = np.exp(yy)\n yy2 = np.exp(yy + lr_results.loc[gene, 'C(is_astrocyte)[T.True]'])\n \n return yy1, yy2", "_____no_output_____" ] ], [ [ "Similar to above, we can look at the relation between count depth and observed counts for a few genes, but we can also make sure to plot the stratifiction into the two cell types and how the regression models are predicting the counts.", "_____no_output_____" ] ], [ [ "figsize(11, 3)\n\n\nax = plt.gca()\nfor i, gene in enumerate(['Apoe', 'Sparcl1', 'Tmsb4x', 'C1qa']):\n sub_samples['gene'] = counts[gene]\n \n plt.subplot(1, 4, i + 1, sharey=ax)\n if i == 0:\n plt.ylabel('Counts + 1')\n \n plt.loglog()\n\n plt.scatter(sub_samples.loc[~sub_samples.is_astrocyte]['total_count'],\n sub_samples.loc[~sub_samples.is_astrocyte]['gene'] + 1,\n c='grey', marker='o', label='Microglia')\n \n \n plt.scatter(sub_samples.loc[sub_samples.is_astrocyte]['total_count'],\n sub_samples.loc[sub_samples.is_astrocyte]['gene'] + 1,\n c='k', marker='x', label='Astrocyte')\n\n yy1, yy2 = linres(gene)\n\n plt.plot(np.exp(xx), yy1, c='w', lw=5)\n plt.plot(np.exp(xx), yy1, c='r', lw=3, ls=':')\n\n plt.plot(np.exp(xx), yy2, c='w', lw=5)\n plt.plot(np.exp(xx), yy2, c='r', lw=3)\n \n plt.title(gene)\n plt.xlabel('Total counts')\n\nplt.legend(scatterpoints=3);\n \nplt.tight_layout()\nplt.savefig('7.png', bbox_inches='tight')", "_____no_output_____" ] ], [ [ "Again we can see the overall abundance is related to the slope of the lines. Another thing which seem to pop out in these plots is an interaction between cell type and slope. For example looking at C1qa the slope for the microglia seem underestimated. This makes sense, if this is an effect of count noise at low abundances.\n\nMy takeaway from this is that OLS regression might be OK if counts are large, but at lower levels model parameters are not estimated correctly due to the count nature of the data.\n\nNotebooks of the analysis in this post are available [here](https://github.com/vals/Blog/tree/master/180226-count-offsets).", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
cb619b4b0a27af7d3a534e3cb46c316b641a73df
19,787
ipynb
Jupyter Notebook
bronze/B92_Grovers_Search_Implementation_Solutions.ipynb
kanishkmittal/qbronze
5b1bcf4e4f48ab35f2e47753e303af2aef24b9b8
[ "Apache-2.0", "CC-BY-4.0" ]
1
2021-04-08T16:12:21.000Z
2021-04-08T16:12:21.000Z
bronze/B92_Grovers_Search_Implementation_Solutions.ipynb
institute-for-advanced-physical-studies/QWorld
acdd209fd3b94e53f77245f67c02b2e7ae52be03
[ "Apache-2.0", "CC-BY-4.0" ]
null
null
null
bronze/B92_Grovers_Search_Implementation_Solutions.ipynb
institute-for-advanced-physical-studies/QWorld
acdd209fd3b94e53f77245f67c02b2e7ae52be03
[ "Apache-2.0", "CC-BY-4.0" ]
3
2021-02-05T14:13:48.000Z
2021-09-14T09:13:51.000Z
30.868955
505
0.523778
[ [ [ "<table> <tr>\n <td style=\"background-color:#ffffff;\">\n <a href=\"http://qworld.lu.lv\" target=\"_blank\"><img src=\"../images/qworld.jpg\" width=\"50%\" align=\"left\"> </a></td>\n <td width=\"70%\" style=\"background-color:#ffffff;vertical-align:bottom;text-align:right;\">\n prepared by Maksim Dimitrijev(<a href=\"http://qworld.lu.lv/index.php/qlatvia/\">QLatvia</a>)\n and Özlem Salehi (<a href=\"http://qworld.lu.lv/index.php/qturkey/\">QTurkey</a>)\n </td> \n</tr></table>", "_____no_output_____" ], [ "<table width=\"100%\"><tr><td style=\"color:#bbbbbb;background-color:#ffffff;font-size:11px;font-style:italic;text-align:right;\">This cell contains some macros. If there is a problem with displaying mathematical formulas, please run this cell to load these macros. </td></tr></table>\n$ \\newcommand{\\bra}[1]{\\langle #1|} $\n$ \\newcommand{\\ket}[1]{|#1\\rangle} $\n$ \\newcommand{\\braket}[2]{\\langle #1|#2\\rangle} $\n$ \\newcommand{\\dot}[2]{ #1 \\cdot #2} $\n$ \\newcommand{\\biginner}[2]{\\left\\langle #1,#2\\right\\rangle} $\n$ \\newcommand{\\mymatrix}[2]{\\left( \\begin{array}{#1} #2\\end{array} \\right)} $\n$ \\newcommand{\\myvector}[1]{\\mymatrix{c}{#1}} $\n$ \\newcommand{\\myrvector}[1]{\\mymatrix{r}{#1}} $\n$ \\newcommand{\\mypar}[1]{\\left( #1 \\right)} $\n$ \\newcommand{\\mybigpar}[1]{ \\Big( #1 \\Big)} $\n$ \\newcommand{\\sqrttwo}{\\frac{1}{\\sqrt{2}}} $\n$ \\newcommand{\\dsqrttwo}{\\dfrac{1}{\\sqrt{2}}} $\n$ \\newcommand{\\onehalf}{\\frac{1}{2}} $\n$ \\newcommand{\\donehalf}{\\dfrac{1}{2}} $\n$ \\newcommand{\\hadamard}{ \\mymatrix{rr}{ \\sqrttwo & \\sqrttwo \\\\ \\sqrttwo & -\\sqrttwo }} $\n$ \\newcommand{\\vzero}{\\myvector{1\\\\0}} $\n$ \\newcommand{\\vone}{\\myvector{0\\\\1}} $\n$ \\newcommand{\\stateplus}{\\myvector{ \\sqrttwo \\\\ \\sqrttwo } } $\n$ \\newcommand{\\stateminus}{ \\myrvector{ \\sqrttwo \\\\ -\\sqrttwo } } $\n$ \\newcommand{\\myarray}[2]{ \\begin{array}{#1}#2\\end{array}} $\n$ \\newcommand{\\X}{ \\mymatrix{cc}{0 & 1 \\\\ 1 & 0} } $\n$ \\newcommand{\\I}{ \\mymatrix{rr}{1 & 0 \\\\ 0 & 1} } $\n$ \\newcommand{\\Z}{ \\mymatrix{rr}{1 & 0 \\\\ 0 & -1} } $\n$ \\newcommand{\\Htwo}{ \\mymatrix{rrrr}{ \\frac{1}{2} & \\frac{1}{2} & \\frac{1}{2} & \\frac{1}{2} \\\\ \\frac{1}{2} & -\\frac{1}{2} & \\frac{1}{2} & -\\frac{1}{2} \\\\ \\frac{1}{2} & \\frac{1}{2} & -\\frac{1}{2} & -\\frac{1}{2} \\\\ \\frac{1}{2} & -\\frac{1}{2} & -\\frac{1}{2} & \\frac{1}{2} } } $\n$ \\newcommand{\\CNOT}{ \\mymatrix{cccc}{1 & 0 & 0 & 0 \\\\ 0 & 1 & 0 & 0 \\\\ 0 & 0 & 0 & 1 \\\\ 0 & 0 & 1 & 0} } $\n$ \\newcommand{\\norm}[1]{ \\left\\lVert #1 \\right\\rVert } $\n$ \\newcommand{\\pstate}[1]{ \\lceil \\mspace{-1mu} #1 \\mspace{-1.5mu} \\rfloor } $", "_____no_output_____" ], [ "<h2> <font color=\"blue\"> Solutions for </font>Grover's Search: Implementation</h2>", "_____no_output_____" ], [ "<a id=\"task2\"></a>\n\n<h3>Task 2</h3>\n\n\nLet $N=4$. Implement the query phase and check the unitary matrix for the query operator. Note that we are interested in the top-left $4 \\times 4$ part of the matrix since the remaining parts are due to the ancilla qubit.\n\nYou are given a function $f$ and its corresponding quantum operator $U_f$. First run the following cell to load operator $U_f$. Then you can make queries to $f$ by applying the operator $U_f$ via the following command:\n\n<pre>Uf(circuit,qreg).", "_____no_output_____" ] ], [ [ "%run ../include/quantum.py", "_____no_output_____" ] ], [ [ "Now use phase kickback to flip the sign of the marked element:\n\n<ul>\n <li>Set output qubit (qreg[2]) to $\\ket{-}$ by applying X and H.</li>\n <li>Apply operator $U_f$\n <li>Set output qubit (qreg[2]) back.</li>\n</ul>\n\n(Can you guess the marked element by looking at the unitary matrix?)", "_____no_output_____" ], [ "<h3>Solution</h3>", "_____no_output_____" ] ], [ [ "from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit, execute, Aer\n\nqreg = QuantumRegister(3)\n#No need to define classical register as we are not measuring\n\nmycircuit = QuantumCircuit(qreg)\n\n\n#set ancilla\nmycircuit.x(qreg[2])\nmycircuit.h(qreg[2])\n\nUf(mycircuit,qreg) \n \n#set ancilla back\nmycircuit.h(qreg[2])\nmycircuit.x(qreg[2]) \n\n\njob = execute(mycircuit,Aer.get_backend('unitary_simulator'))\nu=job.result().get_unitary(mycircuit,decimals=3)\n\n#We are interested in the top-left 4x4 part\nfor i in range(4):\n s=\"\"\n for j in range(4):\n val = str(u[i][j].real)\n while(len(val)<5): val = \" \"+val\n s = s + val\n print(s)\n \n\nmycircuit.draw(output='mpl')", "_____no_output_____" ] ], [ [ "<a id=\"task3\"></a>\n<h3>Task 3</h3>\n\n\nLet $N=4$. Implement the inversion operator and check whether you obtain the following matrix:\n\n$\\mymatrix{cccc}{-0.5 & 0.5 & 0.5 & 0.5 \\\\ 0.5 & -0.5 & 0.5 & 0.5 \\\\ 0.5 & 0.5 & -0.5 & 0.5 \\\\ 0.5 & 0.5 & 0.5 & -0.5}$.", "_____no_output_____" ], [ "<h3>Solution</h3>", "_____no_output_____" ] ], [ [ "def inversion(circuit,quantum_reg):\n \n \n #step 1\n circuit.h(quantum_reg[1])\n circuit.h(quantum_reg[0])\n \n #step 2\n circuit.x(quantum_reg[1])\n circuit.x(quantum_reg[0])\n\n #step 3\n circuit.ccx(quantum_reg[1],quantum_reg[0],quantum_reg[2])\n\n #step 4\n circuit.x(quantum_reg[1])\n circuit.x(quantum_reg[0])\n \n #step 5\n circuit.x(quantum_reg[2])\n \n #step 6\n circuit.h(quantum_reg[1])\n circuit.h(quantum_reg[0])", "_____no_output_____" ] ], [ [ "Below you can check the matrix of your inversion operator and how the circuit looks like. We are interested in top-left $4 \\times 4$ part of the matrix, the remaining parts are because we used ancilla qubit.", "_____no_output_____" ] ], [ [ "from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit, execute, Aer\n\nqreg1 = QuantumRegister(3)\n\nmycircuit1 = QuantumCircuit(qreg1)\n\n#set ancilla qubit\nmycircuit1.x(qreg1[2])\nmycircuit1.h(qreg1[2])\n \ninversion(mycircuit1,qreg1)\n\n#set ancilla qubit back\nmycircuit1.h(qreg1[2])\nmycircuit1.x(qreg1[2])\n\n\njob = execute(mycircuit1,Aer.get_backend('unitary_simulator'))\nu=job.result().get_unitary(mycircuit1,decimals=3)\nfor i in range(4):\n s=\"\"\n for j in range(4):\n val = str(u[i][j].real)\n while(len(val)<5): val = \" \"+val\n s = s + val\n print(s)\n \nmycircuit1.draw(output='mpl')", "_____no_output_____" ] ], [ [ "<a id=\"task4\"></a>\n\n<h3>Task 4: Testing Grover's search</h3>\n\nNow we are ready to test our operations and run Grover's search. Suppose that there are 4 elements in the list and try to find the marked element.\n\nYou are given the operator $U_f$. First run the following cell to load it. You can access it via <pre>Uf(circuit,qreg).</pre>\nqreg[2] is the ancilla qubit and it is shared by the query and the inversion operators.\n \nWhich state do you observe the most? ", "_____no_output_____" ] ], [ [ "%run ..\\include\\quantum.py", "_____no_output_____" ] ], [ [ "<h3>Solution</h3>", "_____no_output_____" ] ], [ [ "from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit, execute, Aer\n\nqreg = QuantumRegister(3)\ncreg = ClassicalRegister(2)\n\nmycircuit = QuantumCircuit(qreg,creg)\n\n#Grover\n\n\n#initial step - equal superposition\nfor i in range(2):\n mycircuit.h(qreg[i])\n\n#set ancilla\nmycircuit.x(qreg[2])\nmycircuit.h(qreg[2])\n\nmycircuit.barrier()\n\n#change the number of iterations\niterations=1\n\n#Grover's iterations.\nfor i in range(iterations):\n #query\n Uf(mycircuit,qreg)\n \n mycircuit.barrier()\n #inversion\n inversion(mycircuit,qreg)\n mycircuit.barrier()\n \n \n#set ancilla back\nmycircuit.h(qreg[2])\nmycircuit.x(qreg[2]) \n \nmycircuit.measure(qreg[0],creg[0])\nmycircuit.measure(qreg[1],creg[1])\n\njob = execute(mycircuit,Aer.get_backend('qasm_simulator'),shots=10000)\ncounts = job.result().get_counts(mycircuit)\n\n# print the outcome\nfor outcome in counts:\n print(outcome,\"is observed\",counts[outcome],\"times\")\n\nmycircuit.draw(output='mpl')", "_____no_output_____" ] ], [ [ "<a id=\"task5\"></a>\n<h3>Task 5 (Optional, challenging)</h3>\n\nImplement the inversion operation for $n=3$ ($N=8$). This time you will need 5 qubits - 3 for the operation, 1 for ancilla, and one more qubit to implement not gate controlled by three qubits.\n\nIn the implementation the ancilla qubit will be qubit 3, while qubits for control are 0, 1 and 2; qubit 4 is used for the multiple control operation. As a result you should obtain the following values in the top-left $8 \\times 8$ entries:\n\n$\\mymatrix{cccccccc}{-0.75 & 0.25 & 0.25 & 0.25 & 0.25 & 0.25 & 0.25 & 0.25 \\\\ 0.25 & -0.75 & 0.25 & 0.25 & 0.25 & 0.25 & 0.25 & 0.25 \\\\ 0.25 & 0.25 & -0.75 & 0.25 & 0.25 & 0.25 & 0.25 & 0.25 \\\\ 0.25 & 0.25 & 0.25 & -0.75 & 0.25 & 0.25 & 0.25 & 0.25 \\\\ 0.25 & 0.25 & 0.25 & 0.25 & -0.75 & 0.25 & 0.25 & 0.25 \\\\ 0.25 & 0.25 & 0.25 & 0.25 & 0.25 & -0.75 & 0.25 & 0.25 \\\\ 0.25 & 0.25 & 0.25 & 0.25 & 0.25 & 0.25 & -0.75 & 0.25 \\\\ 0.25 & 0.25 & 0.25 & 0.25 & 0.25 & 0.25 & 0.25 & -0.75}$.", "_____no_output_____" ], [ "<h3>Solution</h3>", "_____no_output_____" ] ], [ [ "def big_inversion(circuit,quantum_reg):\n\n for i in range(3):\n circuit.h(quantum_reg[i])\n circuit.x(quantum_reg[i])\n\n circuit.ccx(quantum_reg[1],quantum_reg[0],quantum_reg[4])\n circuit.ccx(quantum_reg[2],quantum_reg[4],quantum_reg[3])\n circuit.ccx(quantum_reg[1],quantum_reg[0],quantum_reg[4])\n\n for i in range(3):\n circuit.x(quantum_reg[i])\n circuit.h(quantum_reg[i])\n \n circuit.x(quantum_reg[3])", "_____no_output_____" ] ], [ [ "Below you can check the matrix of your inversion operator. We are interested in the top-left $8 \\times 8$ part of the matrix, the remaining parts are because of additional qubits.", "_____no_output_____" ] ], [ [ "from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit, execute, Aer\n\nbig_qreg2 = QuantumRegister(5)\n\nbig_mycircuit2 = QuantumCircuit(big_qreg2)\n\n#set ancilla\nbig_mycircuit2.x(big_qreg2[3])\nbig_mycircuit2.h(big_qreg2[3])\n \nbig_inversion(big_mycircuit2,big_qreg2)\n\n#set ancilla back\nbig_mycircuit2.h(big_qreg2[3])\nbig_mycircuit2.x(big_qreg2[3])\n\njob = execute(big_mycircuit2,Aer.get_backend('unitary_simulator'))\nu=job.result().get_unitary(big_mycircuit2,decimals=3)\nfor i in range(8):\n s=\"\"\n for j in range(8):\n val = str(u[i][j].real)\n while(len(val)<6): val = \" \"+val\n s = s + val\n print(s)", "_____no_output_____" ] ], [ [ "<a id=\"task6\"></a>\n<h3>Task 6: Testing Grover's search for 8 elements (Optional, challenging)</h3>\n\nNow we will test Grover's search on 8 elements.\n\nYou are given the operator $U_{f_8}$. First run the following cell to load it. You can access it via:\n\n<pre>Uf_8(circuit,qreg)</pre>\n \nWhich state do you observe the most?", "_____no_output_____" ] ], [ [ "%run ..\\include\\quantum.py", "_____no_output_____" ] ], [ [ "<h3>Solution</h3>", "_____no_output_____" ] ], [ [ "def big_inversion(circuit,quantum_reg):\n\n for i in range(3):\n circuit.h(quantum_reg[i])\n circuit.x(quantum_reg[i])\n\n circuit.ccx(quantum_reg[1],quantum_reg[0],quantum_reg[4])\n circuit.ccx(quantum_reg[2],quantum_reg[4],quantum_reg[3])\n circuit.ccx(quantum_reg[1],quantum_reg[0],quantum_reg[4])\n\n for i in range(3):\n circuit.x(quantum_reg[i])\n circuit.h(quantum_reg[i])\n \n circuit.x(quantum_reg[3])", "_____no_output_____" ], [ "from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit, execute, Aer\n\nqreg8 = QuantumRegister(5)\ncreg8 = ClassicalRegister(3)\n\nmycircuit8 = QuantumCircuit(qreg8,creg8)\n\n#set ancilla\nmycircuit8.x(qreg8[3])\nmycircuit8.h(qreg8[3])\n\n#Grover\nfor i in range(3):\n mycircuit8.h(qreg8[i])\n\nmycircuit8.barrier()\n\n#Try 1,2,6,12 8iterations of Grover\nfor i in range(2):\n Uf_8(mycircuit8,qreg8)\n mycircuit8.barrier()\n big_inversion(mycircuit8,qreg8)\n mycircuit8.barrier()\n\n#set ancilla back\n\nmycircuit8.h(qreg8[3])\nmycircuit8.x(qreg8[3])\n\nfor i in range(3):\n mycircuit8.measure(qreg8[i],creg8[i])\n\njob = execute(mycircuit8,Aer.get_backend('qasm_simulator'),shots=10000)\ncounts8 = job.result().get_counts(mycircuit8)\n# print the reverse of the outcome\nfor outcome in counts8:\n print(outcome,\"is observed\",counts8[outcome],\"times\")\n\nmycircuit8.draw(output='mpl')", "_____no_output_____" ] ], [ [ "<a id=\"task8\"></a>\n\n<h3>Task 8</h3>\n\nImplement an oracle function which marks the element 00. Run Grover's search with the oracle you have implemented. ", "_____no_output_____" ] ], [ [ "def oracle_00(circuit,qreg):\n ", "_____no_output_____" ] ], [ [ "<h3>Solution</h3>", "_____no_output_____" ] ], [ [ "def oracle_00(circuit,qreg):\n circuit.x(qreg[0])\n circuit.x(qreg[1])\n circuit.ccx(qreg[0],qreg[1],qreg[2])\n circuit.x(qreg[0])\n circuit.x(qreg[1])", "_____no_output_____" ], [ "from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit, execute, Aer\n\nqreg = QuantumRegister(3)\ncreg = ClassicalRegister(2)\n\nmycircuit = QuantumCircuit(qreg,creg)\n\n#Grover\n\n\n#initial step - equal superposition\nfor i in range(2):\n mycircuit.h(qreg[i])\n\n#set ancilla\nmycircuit.x(qreg[2])\nmycircuit.h(qreg[2])\n\nmycircuit.barrier()\n\n#change the number of iterations\niterations=1\n\n#Grover's iterations.\nfor i in range(iterations):\n #query\n oracle_00(mycircuit,qreg)\n \n mycircuit.barrier()\n #inversion\n inversion(mycircuit,qreg)\n mycircuit.barrier()\n \n \n#set ancilla back\nmycircuit.h(qreg[2])\nmycircuit.x(qreg[2]) \n \nmycircuit.measure(qreg[0],creg[0])\nmycircuit.measure(qreg[1],creg[1])\n\njob = execute(mycircuit,Aer.get_backend('qasm_simulator'),shots=10000)\ncounts = job.result().get_counts(mycircuit)\n\n# print the reverse of the outcome\nfor outcome in counts:\n reverse_outcome = ''\n for i in outcome:\n reverse_outcome = i + reverse_outcome\n print(reverse_outcome,\"is observed\",counts[outcome],\"times\")\n\nmycircuit.draw(output='mpl')", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ] ]
cb61a44c01245b710792e602130503a69e5caba7
1,020,907
ipynb
Jupyter Notebook
Example_Notebooks/sidelobe_tutorial.ipynb
tjgalvin/pyink
176177b87b10e215cc38c0d38b5993f5753ce746
[ "MIT" ]
null
null
null
Example_Notebooks/sidelobe_tutorial.ipynb
tjgalvin/pyink
176177b87b10e215cc38c0d38b5993f5753ce746
[ "MIT" ]
15
2020-05-12T11:23:39.000Z
2020-11-04T01:48:31.000Z
Example_Notebooks/sidelobe_tutorial.ipynb
tjgalvin/pyink
176177b87b10e215cc38c0d38b5993f5753ce746
[ "MIT" ]
1
2020-09-08T16:50:46.000Z
2020-09-08T16:50:46.000Z
526.783798
655,340
0.927952
[ [ [ "# SOM Tutorial\n\n**NOTE:** This tutorial uses supplemental data that can be obtained from [cirada.ca](cirada.ca). It can be unpacked any run from any directory, and contains another copy of this same notebook.\n\nIn this tutorial we will demonstrate the process of training and analyzing a SOM. As an example we will follow the process used to quantify the probability that a component in the VLASS Component Catalogue is a false positive originating from a sidelobe.\n\nThe process is divided into these steps:\n1. Create the sample\n1. Preprocess the images\n1. Train the SOM\n1. Inspect the SOM\n1. Map a sample onto the SOM\n1. Annotate the SOM\n1. Update the catalogue", "_____no_output_____" ] ], [ [ "import numpy as np\nimport pandas as pd\nfrom matplotlib import pyplot as plt\nimport pyink as pu", "_____no_output_____" ] ], [ [ "# 1. Creating the sample\n\nFor this exercise we have provided a 10,000 row subset of the catalogue used for the actual analysis. This is done to reduce download sizes and save processing time. The full catalogue (the VLASS Component Catalogue) is available at [cirada.ca](cirada.ca), and the subset is `catalogue.csv`.", "_____no_output_____" ], [ "The initial training sample was selected to include only components with a `Quality_flag` of either 0 or 1 and a `Peak_to_ring` < 3. For the main analysis we trained the SOM on a sample of 100,000 radio components\n\n sample = pd.read_csv(cirada_catalogue)\n sample = sample[sample.Quality_flag.isin([0,1])]\n sample = sample[\"Peak_to_ring\"] < 3\n sample = sample.sample(100000) # choose 100k components at random\n sample = sample.reset_index(drop=True) # Reset the DataFrame indexing.", "_____no_output_____" ] ], [ [ "sample = pd.read_csv(\"catalogue.csv\")\nsample", "_____no_output_____" ] ], [ [ "# 2. Preprocess the images\n\nBefore preprocessing the images (which creates an Image Binary), first make sure that there is an image cutout for each component in the catalogue. These are already provided in the `cutouts` directory.\n\nThe steps involved in preprocessing the images will depend on the science at hand. The only common requirement is that all images are normalized onto the same scale (generally 0 to 1) so that the SOM training is well-behaved.", "_____no_output_____" ] ], [ [ "import os\nfrom tqdm import tqdm\nfrom astropy.io import fits\nfrom astropy.wcs import WCS\n\ndef load_fits(filename, ext=0):\n hdulist = fits.open(filename)\n d = hdulist[ext]\n return d\n\n\ndef load_radio_fits(filename, ext=0):\n \"\"\"Load the data from a single extension of a fits file.\"\"\"\n hdu = load_fits(filename, ext=ext)\n wcs = WCS(hdu.header).celestial\n hdu.data = np.squeeze(hdu.data)\n hdu.header = wcs.to_header()\n return hdu\n\n\ndef scale_data(data, log=False, minsnr=None):\n \"\"\"Scale the data so that the SOM behaves appropriately.\"\"\"\n img = np.zeros_like(data)\n noise = pu.rms_estimate(data[data != 0], mode=\"mad\", clip_rounds=2)\n # data - np.median(remove_zeros)\n\n if minsnr is not None:\n mask = data >= minsnr * noise\n else:\n mask = np.ones_like(data, dtype=bool)\n data = data[mask]\n\n if log:\n data = np.log10(data)\n img[mask] = pu.minmax(data)\n return img.astype(np.float32)", "_____no_output_____" ], [ "def radio_preprocess(idx, sample, path=\"images\", img_size=(150, 150), **kwargs):\n \"\"\"Preprocess a VLASS image.\n \"\"\"\n try:\n radio_comp = sample.iloc[idx]\n radio_file = radio_comp[\"filename\"]\n radio_file = os.path.join(path, radio_file)\n radio_hdu = load_radio_fits(radio_file)\n radio_data = radio_hdu.data\n return idx, scale_data(radio_data, **kwargs)\n \n except Exception as e:\n print(f\"Failed on index {idx}: {e}\")\n return None\n\n\ndef run_prepro_seq(sample, outfile, shape=(150, 150), **kwargs):\n \"\"\"Non-parallelized preprocessing for all VLASS images.\n \"\"\"\n with pu.ImageWriter(outfile, 0, shape, clobber=True) as pk_img:\n for idx in tqdm(sample.index):\n out = radio_preprocess(idx, sample, img_size=shape, **kwargs)\n if out is not None:\n pk_img.add(out[1], attributes=out[0])", "_____no_output_____" ] ], [ [ "In order to preprocess the image cutouts for the sidelobe analysis, we adopt a simple algorithm.\n1. Estimate the rms in an image cutout\n1. Mask out all values below a signal-to-noise ratio of 2\n1. Apply a log scaling to the remaining data\n1. Scale the data on a 0 to 1 scale\n\nEach image is processed sequentially after the creation of an `pu.ImageWriter` object.\n\nIn addition, the index of the catalogue entry is recorded using the `attributes` keyword in order to track any images that have failed the preprocessing step. This occurs for one image in this sample, which we use to demonstrate how to handle missing entries.\n\nThe output image binary contains 9999 entries, which is a different size from the original catalogue. The `IMG_catalogue.bin.records.pkl` file is (automatically) used to keep track of the differences.", "_____no_output_____" ] ], [ [ "imbin_file = \"IMG_catalogue.bin\"\nrun_prepro_seq(sample, imbin_file, shape=(150, 150), path=\"cutouts\", log=True, minsnr=2)", " 62%|██████▏ | 6220/10000 [00:37<00:22, 164.54it/s]/home/adrian/.local/lib/python3.6/site-packages/numpy/core/fromnumeric.py:3373: RuntimeWarning: Mean of empty slice.\n out=out, **kwargs)\n/home/adrian/.local/lib/python3.6/site-packages/numpy/core/_methods.py:170: RuntimeWarning: invalid value encountered in true_divide\n ret = ret.dtype.type(ret / rcount)\n/home/adrian/Projects/pyink/pyink/preprocessing.py:76: RuntimeWarning: invalid value encountered in true_divide\n p = np.polyfit(binc[mask], np.log10(counts[mask] / np.max(counts)), 2)\n 63%|██████▎ | 6254/10000 [00:37<00:22, 164.75it/s]" ] ], [ [ "# 3. Train the SOM\n\nThe SOM training was performed using the `train_som.sh` script on the main sample of 100,000 components. The resulting SOM is provided here as training on the smaller sample will provide poorer results.\n\nA single training stage is run as follows\n\n Pink --train $IMG $OUT3 --init $OUT2 --numthreads 10 \\\n --som-width $WIDTH --som-height $HEIGHT --num-iter 10 \\\n --dist-func unitygaussian 0.7 0.05 \\\n --inter-store keep -p 10 -n 360 \\\n --euclidean-distance-shape circular\n\nThe parameters involved are as follows:\n* \\$IMG: The Image Binary\n* \\$OUT2: The output SOM file from the previous training stage.\n* \\$OUT3: The output SOM file for the current training stage.\n* \\$HEIGHT: An integer corresponding to the height of the SOM.\n* \\$WIDTH: An integer corresponding to the width of the SOM.\n* --dist-func unitygaussian: The distribution function is a gaussian whose amplitude is normalized to 1. The two following numbers are the dispersion (sigma) of the gaussian function and then the damping factor, which reduces the magnitude to the update to neurons that are distant from the best-matching neuron.\n* --euclidean-distance-shape circular: A circular aperture is imposed when computing the Euclidean distance in order to prevent large differences caused by bright components near the image edge being rotated out of the frame.\n* -n 360: The number of rotations. With n=360 each will be 1 degree.\n* -num-iter 10: The dataset will be iterated through 10 times during training.", "_____no_output_____" ], [ "# 4. Inspect the SOM\n\nOnce a SOM has been trained, the first step is to visually assess the neurons to qualitatively determine whether the training has been successful. This visual inspection is best for spotting large errors in the training process. One should look for the morphologies being identified by the SOM, ensuring they are indicative of the structures of interest in the training sample.\n\nHere we show the methods in `pyink` that are helpful when plotting the SOM or one of its neurons. In addition, `som.explore` can be used with the interactive matplotlib interface from the command line in order to step through the neurons one by one, with all channels plotted together.", "_____no_output_____" ] ], [ [ "som = pu.SOM(\"SOM_B3_h10_w10_vlass.bin\")\n\nprint(som.som_rank)\nprint(som.som_shape)\nprint(som.neuron_rank)\nprint(som.neuron_shape)\nprint(som.neuron_size)", "2\n(10, 10, 1)\n2\n(1, 213, 213)\n45369\n" ], [ "# This SOM contains only one channel, but when multiple are present\n# the user should select one at a time.\nsom.plot(channel=0)", "_____no_output_____" ], [ "neuron = (4,6)\nsom.plot_neuron(neuron)\n# Equivalent to\n# plt.imshow(som[neuron][0])\n# The `0` corresponds to the channel of interest.\n\n# som.explore() can also be used to navigate the SOM,\n# but this works best in the matplotlib interactive window.", "_____no_output_____" ] ], [ [ "# 5. Map a sample onto the SOM\n\nA quantitative exploration of the SOM requires that a sample be mapped onto it. During the inspection stage this should be the training sample, while the final results are obtained by mapping the entire catalogue onto the SOM.\n\nA sample can be mapped onto the SOM as follows. This is provided as the final step in `train_som.sh`.\n\n Pink --map IMG_catalogue.bin MAP_catalogue.bin SOM_B3_h10_w10_vlass.bin \\\n --numthreads 8 --som-width 10 --som-height 10 \\\n --store-rot-flip TRANSFORM_catalogue.bin \\\n --euclidean-distance-shape circular -n 360 | tee mapping_step.log\n\n* MAP_catalogue.bin: The output MAP binary file.\n* TRANSFORM_catalogue.bin: The output TRANSFORM binary file.", "_____no_output_____" ] ], [ [ "import subprocess\n\ndef map_imbin(imbin_file, som_file, map_file, trans_file, \n som_width, som_height, numthreads=8, cpu=False, nrot=360, log=True):\n \"\"\"Map an image binary onto a SOM using Pink.\"\"\"\n commands = [\n \"Pink\",\n \"--map\",\n imbin_file,\n map_file,\n som_file,\n \"--numthreads\",\n f\"{numthreads}\",\n \"--som-width\",\n f\"{som_width}\",\n \"--som-height\",\n f\"{som_height}\",\n \"--store-rot-flip\",\n trans_file,\n \"--euclidean-distance-shape\",\n \"circular\",\n \"-n\",\n str(nrot),\n ]\n if cpu:\n commands += [\"--cuda-off\"]\n\n if log:\n map_logfile = map_file.replace(\".bin\", \".log\")\n with open(map_logfile, \"w\") as log:\n subprocess.run(commands, stdout=log)\n else:\n subprocess.run(commands)\n\nmap_imbin(\"IMG_catalogue.bin\", \"SOM_B3_h10_w10_vlass.bin\", \n \"MAP_catalogue.bin\", \"TRANSFORM_catalogue.bin\", \n 10, 10, numthreads=8, cpu=False, log=True)", "_____no_output_____" ] ], [ [ "In order to explore the mapped dataset, one should load:\n* The catalogue\n* The image binary\n* A SOMSet containing the SOM, Map, and Transform binaries\n\nAs mentioned previously, the catalogue and image binary are not the same size as one image failed during preprocessing. Provided the `IMG_catalogue.bin.records.pkl` file is in the same dirctory as the main image binary, the `records` property of the image binary will be created. This allows us to trim the initial catalogue used the indices of the images that were preprocessed successfully. ", "_____no_output_____" ] ], [ [ "# Need to exclude the failed preprocessing entries from the sample.\nimgs = pu.ImageReader(\"IMG_catalogue.bin\")\nprint(imgs.data.shape)\nsample = sample.iloc[imgs.records].reset_index()\nsample", "(9999, 1, 150, 150)\n" ], [ "somset = pu.SOMSet(som, \"MAP_catalogue.bin\", \"TRANSFORM_catalogue.bin\")\n# Note that these can be initialized either from the file name (string) or \n# from the corresponding pu.SOM/pu.Mapping/pu.Transform objects.", "_____no_output_____" ] ], [ [ "We now show several helpful tools for visualizing either the preprocessed images or mapping statistics.", "_____no_output_____" ], [ "Plotting a preprocessed image can be done using `pu.plot_image`. This will plot a random index unless the `idx` argument is specified.\n\nFor multi-channel image binaries each channel will be shown along the horizontal axis.", "_____no_output_____" ] ], [ [ "# Plot a random preprocessed image.\n# Can select a specific index using the `idx` keyword.\npu.plot_image(imgs, idx=None)", "_____no_output_____" ] ], [ [ "One can also plot the best-matching neuron alongside any individual image by setting `show_bmu=True` and including a `pu.SOMSet` in the `somset` keyword. It is also recommended to set `apply_transform=True` so that the neuron will be transformed and trimmed to match the frame of the input image.", "_____no_output_____" ] ], [ [ "pu.plot_image(imgs, somset=somset, apply_transform=True, show_bmu=True)", "_____no_output_____" ], [ "# The coherence is a useful quantity to track during the training process\nprint(somset.mapping.coherence())", "8847\n" ] ], [ [ "In order to measure the number of matches to each neuron, use the `bmu_counts` function within the `pu.Mapping` object. This is an array with the same shape as the SOM (so normally 2 dimensions).", "_____no_output_____" ] ], [ [ "plt.imshow(somset.mapping.bmu_counts())\nplt.colorbar()", "_____no_output_____" ] ], [ [ "If each entry in the catalogue is associated with a label, these can also be mapped onto the SOM. This is done using the `pu.Mapping.map_labels` function. We demonstrate this by randomly assigning labels to each entry.", "_____no_output_____" ] ], [ [ "# Here we demonstrate how to map a series of labels onto the SOM\nlabels = [\"A\", \"B\", \"C\"]\nrandom_labels = np.random.choice(labels, somset.mapping.data.shape[0])\nlabel_counts = somset.mapping.map_labels(random_labels)\n\nfig, axes = plt.subplots(1, 3, figsize=(16,4))\nfor key, ax in zip(label_counts, axes):\n im = ax.imshow(label_counts[key])\n ax.set_title(f\"Label: {key}\")\n plt.colorbar(im, ax=ax)", "_____no_output_____" ] ], [ [ "# 6. Annotate the SOM\n\nThe annotation step is where ones attaches some kind of annotation -- whether it be a text label, image-based annotation, etc. -- to each neuron. A text-based annotation can be kept track of in a text file or spreadsheet while examining each neuron using `pu.SOM.explore` or another plotting tool. A script for creating image-based annotations is provided in `pyink/Example_Scripts`.\n\nFor the project covered in this tutorial our goal was to assign a probability that a component corresponding to a given neuron is a sidelobe. There are a variety of ways to accomplish this. We have done this by plotting, for each neuron, a random sample of 100 images that have been matched to the neuron. A group of radio astronomers then counted the number of false positives for each grid.", "_____no_output_____" ] ], [ [ "from itertools import product\n\ndef grid_plot_prepro(sample: pd.DataFrame, imgs: pu.ImageReader):\n \"\"\"Create a 10x10 grid of preprocessed images. See example below.\n\n Arguments:\n sample (pd.DataFrame): The table of 100 images to be plotted.\n imgs (pu.ImageReader): The ImageReader binary\n \"\"\"\n inds = sample.index\n data = imgs.data[inds, 0]\n if len(inds) < 100:\n blank = np.zeros((100 - len(inds), 150, 150))\n data = np.vstack([data, blank])\n data = data.reshape((10, 10, 150, 150))\n data = np.moveaxis(data, 2, 1).reshape((1500, 1500))\n\n plt.figure(figsize=(20, 20), constrained_layout=True)\n plt.imshow(data)\n plt.xticks([])\n plt.yticks([])\n\n for i, j in product(range(10), range(10)):\n if j * 10 + i >= len(inds):\n continue\n plt.scatter(150 * i + 75, 150 * j + 75, marker=\"o\", facecolors=\"none\", edgecolors=\"r\", s=200)\n\n for i in range(1, 10):\n plt.axvline(150 * i, c=\"w\", ls=\"-\", lw=0.5)\n plt.axhline(150 * i, c=\"w\", ls=\"-\", lw=0.5)", "_____no_output_____" ], [ "neuron = (5, 2)\nidxs = somset.mapping.images_with_bmu(neuron)\nsubset = sample.iloc[idxs].sample(min(100, len(idxs)))\ngrid_plot_prepro(subset, imgs)", "_____no_output_____" ] ], [ [ "In the above neuron the consensus number of spurious components was 55/100. This refers specifically to the component indicated by the red circle. This process was performed for each neuron and the results entered into a csv file.\n\nThe annotations can then be added as new columns to the original catalogue. In this case each radio component inherits the probability that its neuron corresponds to a sidelobe, so in each of the images presented in the grid the components will have a sidelobe probability of 0.55.", "_____no_output_____" ], [ "More complex approaches would include\n* Create labels for a sample of images, classifying each as either real or sidelobe. Map these labels onto the SOM and then measure the fraction of sidelobes that are matched to each neuron.\n* Using the previous sample of classifications, train another machine learning model (likely a neural network) that takes as input the Euclidean distance to _all_ neurons for a single image and is trained to predict whether an image is real or a sidelobe.", "_____no_output_____" ], [ "# 7. Update the catalogue\n\nThe sidelobe probabilities that we estimated for this SOM are provided in `neuron_info.csv`. This is a csv of a neuron's row, column, and sidelobe probability. We will now show how this information can be appended onto the original catalogue.", "_____no_output_____" ] ], [ [ "# First add the best-matching neuron and Euclidean distance info to the catalogue\nbmu = somset.mapping.bmu()\nsample[\"Best_neuron_y\"] = bmu[:, 0]\nsample[\"Best_neuron_x\"] = bmu[:, 1]\nsample[\"Neuron_dist\"] = somset.mapping.bmu_ed()\n\n# Read in the table with sidelobe probabilities\nneuron_table = pd.read_csv(\"neuron_info.csv\")\n# Set all values to -1 (null)\nPsidelobe = -np.ones((neuron_table.bmu_y.max() + 1, neuron_table.bmu_x.max() + 1))\n# Update values for each neuron using the corresponding number from the table\nPsidelobe[neuron_table.bmu_y, neuron_table.bmu_x] = neuron_table.P_sidelobe\n\n# Only components with low Peak_to_ring and S_Code != \"E\" should inherit P_sidelobe\nsample[\"P_sidelobe\"] = -np.ones(len(sample))\nlowPtR = (sample.Peak_to_ring < 3) & (sample.S_Code != \"E\")\nsample.loc[lowPtR, \"P_sidelobe\"] = 0.01 * Psidelobe[bmu[:, 0], bmu[:, 1]][lowPtR]", "_____no_output_____" ] ], [ [ "Since one component failed preprocessing, the catalogue does not currently match its original size. To recover the original catalogue we load it once more before merging in the new columns.", "_____no_output_____" ] ], [ [ "# First trim the table to just the new columns and a unique component identifier\n# This saves on computing time and makes the join cleaner.\nneuron_cols = [\"Best_neuron_y\", \"Best_neuron_x\", \"Neuron_dist\", \"P_sidelobe\"]\nupdate_cols = sample[[\"Component_name\"] + neuron_cols]\n\n# Use a left join to merge the colums\noriginal_cat = pd.read_csv(\"catalogue.csv\")\nfinal_cat = pd.merge(original_cat, update_cols, how=\"left\")\nfinal_cat", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
cb61a83b05c438addb0ffafb72b0f62271fb4ac3
35,792
ipynb
Jupyter Notebook
homework07.ipynb
Selen34/tvms
db6ffad71883a3ae360d427331e764d558ba8045
[ "Unlicense" ]
null
null
null
homework07.ipynb
Selen34/tvms
db6ffad71883a3ae360d427331e764d558ba8045
[ "Unlicense" ]
null
null
null
homework07.ipynb
Selen34/tvms
db6ffad71883a3ae360d427331e764d558ba8045
[ "Unlicense" ]
null
null
null
105.581121
15,576
0.860053
[ [ [ "### Задача 1", "_____no_output_____" ], [ "Даны значения величины заработной платы заемщиков банка (zp) и значения их поведенческого кредитного скоринга (ks): zp = [35, 45, 190, 200, 40, 70, 54, 150, 120, 110], ks = [401, 574, 874, 919, 459, 739, 653, 902, 746, 832]. Используя математические операции, посчитать коэффициенты линейной регрессии, приняв за X заработную плату (то есть, zp - признак), а за y - значения скорингового балла (то есть, ks - целевая переменная). Произвести расчет как с использованием intercept, так и без.", "_____no_output_____" ] ], [ [ "zp = [35, 45, 190, 200, 40, 70, 54, 150, 120, 110]\nks = [401, 574, 874, 919, 459, 739, 653, 902, 746, 832]", "_____no_output_____" ], [ "import numpy as np\nimport matplotlib.pyplot as plt\n%matplotlib inline", "_____no_output_____" ], [ "x = np.array(zp)\ny = np.array(ks)", "_____no_output_____" ], [ "b = (np.mean(x*y) - np.mean(x)*np.mean(y))/(np.mean(x**2)-np.mean(x)**2)\nb", "_____no_output_____" ], [ "a = np.mean(y)-b*np.mean(x)\na", "_____no_output_____" ], [ "y_hat = a + b*x", "_____no_output_____" ], [ "# кажется b должен рассчитываться по-другому и быть в районе 6. но что-то пошло не так... :(\ny_bx = b*x", "_____no_output_____" ], [ "plt.scatter(x, y)\nplt.xlim(0, 220)\nplt.plot(x, y_hat)\nplt.plot(x, y_bx)\nplt.plot(x, x*6)\nplt.show()", "_____no_output_____" ], [ "mse = ((y - y_hat)**2).sum()/y.size\nmse", "_____no_output_____" ] ], [ [ "### Задача 2", "_____no_output_____" ], [ "Посчитать коэффициент линейной регрессии при заработной плате (zp), используя градиентный спуск (без intercept).", "_____no_output_____" ] ], [ [ "def mse(b1, y, x, n=10):\n return np.sum(b1*x - y)**2/n\n\ndef mse_p(b1, y, x, n=10):\n return (2/n)*(np.sum((b1*x - y)*x))", "_____no_output_____" ], [ "alpha = 1e-6\nb1 = 1.1\nn = x.size\ncount = 1300\nfor i in range(count):\n b1 -=alpha*mse_p(b1, y, x)\n if i%100 == 0:\n print(f'Iter: {i} b1={b1} mse={mse(b1,y,x)}')", "Iter: 0 b1=1.23197488 mse=3421989.6468174844\nIter: 100 b1=5.604869386365851 mse=200410.0150326528\nIter: 200 b1=5.872388092043406 mse=130964.78688224145\nIter: 300 b1=5.888753970018103 mse=127194.06768724191\nIter: 400 b1=5.889755178368017 mse=126965.17583883367\nIter: 500 b1=5.889816428865025 mse=126951.1797107649\nIter: 600 b1=5.889820175960607 mse=126950.32350063794\nIter: 700 b1=5.8898204051950795 mse=126950.27112072892\nIter: 800 b1=5.889820419218856 mse=126950.26791630655\nIter: 900 b1=5.889820420076783 mse=126950.26772027086\nIter: 1000 b1=5.889820420129267 mse=126950.26770827845\nIter: 1100 b1=5.889820420132479 mse=126950.26770754442\nIter: 1200 b1=5.889820420132673 mse=126950.26770750014\n" ] ], [ [ "### Задача 3", "_____no_output_____" ], [ "В каких случаях для вычисления доверительных интервалов и проверки статистических гипотез используется таблица значений функции Лапласа, а в каких - таблица критических точек распределения Стьюдента?", "_____no_output_____" ], [ "**Распределение стьюдента используется когда небольшое количество данных (меньше 100), когда достаточно большое количество данных распределение Стьюдента приближается к стандартному**", "_____no_output_____" ], [ "### Задача 4", "_____no_output_____" ], [ "*4. Произвести вычисления как в пункте 2, но с вычислением intercept. Учесть, что изменение коэффициентов должно производиться на каждом шаге одновременно (то есть изменение одного коэффициента не должно влиять на изменение другого во время одной итерации).", "_____no_output_____" ] ], [ [ "def mse_p0(b0, b1, y, x, n=10):\n return (2/n)*(np.sum((b0 + b1*x - y)))\n\ndef mse_p1(b0, b1, y, x, n=10):\n return (2/n)*(np.sum((b0 + b1*x - y)*x))", "_____no_output_____" ], [ "alpha = 1e-6\nb1 = 1.1\nb0 = 0.9\nn = x.size\ncount = 1300\nfor i in range(count):\n b0 -=alpha*mse_p0(b0, b1, y, x)\n b1 -=alpha*mse_p1(b0, b1, y, x)\n if i%100 == 0:\n print(f'Iter: {i} b1={b0} b1={b1} mse={mse(b1,y,x)}')", "Iter: 0 b1=0.90119492 b1=1.231792117670224 mse=3422206.4675768376\nIter: 100 b1=0.9558210427846052 b1=5.59832616737201 mse=202292.9513681165\nIter: 200 b1=0.9802711135112138 b1=5.865260521163687 mse=132624.20558702468\nIter: 300 b1=1.002874321463162 b1=5.881432851448954 mse=128874.05427610718\nIter: 400 b1=1.0253634907700953 b1=5.882266717795455 mse=128682.14967954632\nIter: 500 b1=1.0478446139077957 b1=5.882162377401726 mse=128706.154575498\nIter: 600 b1=1.0703241742193168 b1=5.882000657181084 mse=128743.36488819688\nIter: 700 b1=1.0928025683284897 b1=5.881835435063288 mse=128781.38650885197\nIter: 800 b1=1.1152798205500916 b1=5.881670006623144 mse=128819.46123341825\nIter: 900 b1=1.1377559324257027 b1=5.881504573442136 mse=128857.54267701507\nIter: 1000 b1=1.160230904103923 b1=5.881339147850087 mse=128895.62800126639\nIter: 1100 b1=1.182704735648145 b1=5.881173730600784 mse=128933.71703181812\nIter: 1200 b1=1.2051774271165463 b1=5.881008321739933 mse=128971.80975729197\n" ], [ "plt.scatter(x, y)\nplt.xlim(0, 220)\nplt.plot(x, b0 + b1*x)\nplt.show()", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code" ] ]
cb61b95fe4447608395a640ee3b594cdc1aac096
72,171
ipynb
Jupyter Notebook
Section-2-Machine-Learning-Pipeline-Overview/Machine-Learning-Pipeline-WrapUp-for-Model-Deployment.ipynb
Wilmar3752/dmlm-research-and-production
a57f734268addbdefbfaf94d0f9ec3dcf8047eea
[ "BSD-3-Clause" ]
null
null
null
Section-2-Machine-Learning-Pipeline-Overview/Machine-Learning-Pipeline-WrapUp-for-Model-Deployment.ipynb
Wilmar3752/dmlm-research-and-production
a57f734268addbdefbfaf94d0f9ec3dcf8047eea
[ "BSD-3-Clause" ]
null
null
null
Section-2-Machine-Learning-Pipeline-Overview/Machine-Learning-Pipeline-WrapUp-for-Model-Deployment.ipynb
Wilmar3752/dmlm-research-and-production
a57f734268addbdefbfaf94d0f9ec3dcf8047eea
[ "BSD-3-Clause" ]
null
null
null
33.289207
348
0.367031
[ [ [ "## Machine Learning Pipeline: Wrapping up for Deployment\n\n\nIn the previous notebooks, we worked through the typical Machine Learning pipeline steps to build a regression model that allows us to predict house prices. Briefly, we transformed variables in the dataset to make them suitable for use in a Regression model, then we selected the most predictive variables and finally we trained our model.\n\nNow, we want to deploy our model. We want to create an API, which we can call with new data, with new characteristics about houses, to get an estimate of the SalePrice. In order to do so, we need to write code in a very specific way. We will show you how to write production code in the next sections.\n\nHere, we will summarise the key pieces of code, that we need to take forward for this particular project, to put our model in production.\n\nLet's go ahead and get started.", "_____no_output_____" ], [ "### Setting the seed\n\nIt is important to note, that we are engineering variables and pre-processing data with the idea of deploying the model. Therefore, from now on, for each step that includes some element of randomness, it is extremely important that we **set the seed**. This way, we can obtain reproducibility between our research and our development code.\n\nThis is perhaps one of the most important lessons that you need to take away from this course: **Always set the seeds**.\n\nLet's go ahead and load the dataset.", "_____no_output_____" ] ], [ [ "# to handle datasets\nimport pandas as pd\nimport numpy as np\n\n# to divide train and test set\nfrom sklearn.model_selection import train_test_split\n\n# feature scaling\nfrom sklearn.preprocessing import MinMaxScaler\n\n# to build the models\nfrom sklearn.linear_model import Lasso\n\n# to evaluate the models\nfrom sklearn.metrics import mean_squared_error, r2_score\nfrom math import sqrt\n\n# to persist the model and the scaler\nimport joblib\n\n# to visualise al the columns in the dataframe\npd.pandas.set_option('display.max_columns', None)\n\nimport warnings\nwarnings.simplefilter(action='ignore')", "_____no_output_____" ] ], [ [ "## Load data\n\nWe need the training data to train our model in the production environment. ", "_____no_output_____" ] ], [ [ "# load dataset\ndata = pd.read_csv('houseprice.csv')\nprint(data.shape)\ndata.head()", "(1460, 81)\n" ] ], [ [ "## Separate dataset into train and test", "_____no_output_____" ] ], [ [ "X_train, X_test, y_train, y_test = train_test_split(\n data,\n data['SalePrice'],\n test_size=0.1,\n # we are setting the seed here\n random_state=0)\n\nX_train.shape, X_test.shape", "_____no_output_____" ], [ "X_train.head()", "_____no_output_____" ] ], [ [ "## Selected features", "_____no_output_____" ] ], [ [ "# load selected features\nfeatures = pd.read_csv('selected_features.csv')\n\n# Added the extra feature, LotFrontage\nfeatures = features['0'].to_list() + ['LotFrontage']\n\nprint('Number of features: ', len(features))", "Number of features: 23\n" ] ], [ [ "## Engineer missing values\n\n### Categorical variables\n\nFor categorical variables, we will replace missing values with the string \"missing\".", "_____no_output_____" ] ], [ [ "# make a list of the categorical variables that contain missing values\n\nvars_with_na = [\n var for var in features\n if X_train[var].isnull().sum() > 0 and X_train[var].dtypes == 'O'\n]\n\n# display categorical variables that we will engineer:\nvars_with_na", "_____no_output_____" ] ], [ [ "Note that we have much less categorical variables with missing values than in our original dataset. But we still use categorical variables with NA for the final model, so we need to include this piece of feature engineering logic in the deployment pipeline. ", "_____no_output_____" ] ], [ [ "# I bring forward the code used in the feature engineering notebook:\n# (step 2)\n\nX_train[vars_with_na] = X_train[vars_with_na].fillna('Missing')\nX_test[vars_with_na] = X_test[vars_with_na].fillna('Missing')\n\n# check that we have no missing information in the engineered variables\nX_train[vars_with_na].isnull().sum()", "_____no_output_____" ] ], [ [ "### Numerical variables\n\nTo engineer missing values in numerical variables, we will:\n\n- add a binary missing value indicator variable\n- and then replace the missing values in the original variable with the mode\n", "_____no_output_____" ] ], [ [ "# make a list of the numerical variables that contain missing values:\n\nvars_with_na = [\n var for var in features\n if X_train[var].isnull().sum() > 0 and X_train[var].dtypes != 'O'\n]\n\n# display numerical variables with NA\nvars_with_na", "_____no_output_____" ], [ "# I bring forward the code used in the feature engineering notebook\n# with minor adjustments (step 2):\n\nvar = 'LotFrontage'\n\n# calculate the mode\nmode_val = X_train[var].mode()[0]\nprint('mode of LotFrontage: {}'.format(mode_val))\n\n# replace missing values by the mode\n# (in train and test)\nX_train[var] = X_train[var].fillna(mode_val)\nX_test[var] = X_test[var].fillna(mode_val)", "mode of LotFrontage: 60.0\n" ] ], [ [ "## Temporal variables\n\nOne of our temporal variables was selected to be used in the final model: 'YearRemodAdd'\n\nSo we need to deploy the bit of code that creates it.", "_____no_output_____" ] ], [ [ "# create the temporal var \"elapsed years\"\n\n# I bring this bit of code forward from the notebook on feature\n# engineering (step 2)\n\ndef elapsed_years(df, var):\n # capture difference between year variable\n # and year in which the house was sold\n \n df[var] = df['YrSold'] - df[var]\n \n return df", "_____no_output_____" ], [ "X_train = elapsed_years(X_train, 'YearRemodAdd')\nX_test = elapsed_years(X_test, 'YearRemodAdd')", "_____no_output_____" ] ], [ [ "### Numerical variable transformation", "_____no_output_____" ] ], [ [ "# we apply the logarithmic function to the variables that\n# were selected (and the target):\n\nfor var in ['LotFrontage', '1stFlrSF', 'GrLivArea', 'SalePrice']:\n X_train[var] = np.log(X_train[var])\n X_test[var] = np.log(X_test[var])", "_____no_output_____" ] ], [ [ "## Categorical variables\n\n### Group rare labels", "_____no_output_____" ] ], [ [ "# let's capture the categorical variables first\n\ncat_vars = [var for var in features if X_train[var].dtype == 'O']\n\ncat_vars", "_____no_output_____" ], [ "# bringing thise from the notebook on feature engineering (step 2):\n\ndef find_frequent_labels(df, var, rare_perc):\n \n # function finds the labels that are shared by more than\n # a certain % of the houses in the dataset\n\n df = df.copy()\n\n tmp = df.groupby(var)['SalePrice'].count() / len(df)\n\n return tmp[tmp > rare_perc].index\n\n\nfor var in cat_vars:\n \n # find the frequent categories\n frequent_ls = find_frequent_labels(X_train, var, 0.01)\n print(var)\n print(frequent_ls)\n print()\n \n # replace rare categories by the string \"Rare\"\n X_train[var] = np.where(X_train[var].isin(\n frequent_ls), X_train[var], 'Rare')\n \n X_test[var] = np.where(X_test[var].isin(\n frequent_ls), X_test[var], 'Rare')", "MSZoning\nIndex(['FV', 'RH', 'RL', 'RM'], dtype='object', name='MSZoning')\n\nNeighborhood\nIndex(['Blmngtn', 'BrDale', 'BrkSide', 'ClearCr', 'CollgCr', 'Crawfor',\n 'Edwards', 'Gilbert', 'IDOTRR', 'MeadowV', 'Mitchel', 'NAmes', 'NWAmes',\n 'NoRidge', 'NridgHt', 'OldTown', 'SWISU', 'Sawyer', 'SawyerW',\n 'Somerst', 'StoneBr', 'Timber'],\n dtype='object', name='Neighborhood')\n\nRoofStyle\nIndex(['Gable', 'Hip'], dtype='object', name='RoofStyle')\n\nMasVnrType\nIndex(['BrkFace', 'None', 'Stone'], dtype='object', name='MasVnrType')\n\nBsmtQual\nIndex(['Ex', 'Fa', 'Gd', 'Missing', 'TA'], dtype='object', name='BsmtQual')\n\nBsmtExposure\nIndex(['Av', 'Gd', 'Missing', 'Mn', 'No'], dtype='object', name='BsmtExposure')\n\nHeatingQC\nIndex(['Ex', 'Fa', 'Gd', 'TA'], dtype='object', name='HeatingQC')\n\nCentralAir\nIndex(['N', 'Y'], dtype='object', name='CentralAir')\n\nKitchenQual\nIndex(['Ex', 'Fa', 'Gd', 'TA'], dtype='object', name='KitchenQual')\n\nFireplaceQu\nIndex(['Ex', 'Fa', 'Gd', 'Missing', 'Po', 'TA'], dtype='object', name='FireplaceQu')\n\nGarageType\nIndex(['Attchd', 'Basment', 'BuiltIn', 'Detchd', 'Missing'], dtype='object', name='GarageType')\n\nGarageFinish\nIndex(['Fin', 'Missing', 'RFn', 'Unf'], dtype='object', name='GarageFinish')\n\nPavedDrive\nIndex(['N', 'P', 'Y'], dtype='object', name='PavedDrive')\n\n" ] ], [ [ "### Encoding of categorical variables\n", "_____no_output_____" ] ], [ [ "# this function will assign discrete values to the strings of the variables,\n# so that the smaller value corresponds to the category that shows the smaller\n# mean house sale price\n\n\ndef replace_categories(train, test, var, target):\n\n # order the categories in a variable from that with the lowest\n # house sale price, to that with the highest\n ordered_labels = train.groupby([var])[target].mean().sort_values().index\n\n # create a dictionary of ordered categories to integer values\n ordinal_label = {k: i for i, k in enumerate(ordered_labels, 0)}\n\n # use the dictionary to replace the categorical strings by integers\n train[var] = train[var].map(ordinal_label)\n test[var] = test[var].map(ordinal_label)\n \n print(var)\n print(ordinal_label)\n print()", "_____no_output_____" ], [ "for var in cat_vars:\n replace_categories(X_train, X_test, var, 'SalePrice')", "MSZoning\n{'Rare': 0, 'RM': 1, 'RH': 2, 'RL': 3, 'FV': 4}\n\nNeighborhood\n{'IDOTRR': 0, 'MeadowV': 1, 'BrDale': 2, 'Edwards': 3, 'BrkSide': 4, 'OldTown': 5, 'Sawyer': 6, 'SWISU': 7, 'NAmes': 8, 'Mitchel': 9, 'SawyerW': 10, 'Rare': 11, 'NWAmes': 12, 'Gilbert': 13, 'Blmngtn': 14, 'CollgCr': 15, 'Crawfor': 16, 'ClearCr': 17, 'Somerst': 18, 'Timber': 19, 'StoneBr': 20, 'NridgHt': 21, 'NoRidge': 22}\n\nRoofStyle\n{'Gable': 0, 'Rare': 1, 'Hip': 2}\n\nMasVnrType\n{'None': 0, 'Rare': 1, 'BrkFace': 2, 'Stone': 3}\n\nBsmtQual\n{'Missing': 0, 'Fa': 1, 'TA': 2, 'Gd': 3, 'Ex': 4}\n\nBsmtExposure\n{'Missing': 0, 'No': 1, 'Mn': 2, 'Av': 3, 'Gd': 4}\n\nHeatingQC\n{'Rare': 0, 'Fa': 1, 'TA': 2, 'Gd': 3, 'Ex': 4}\n\nCentralAir\n{'N': 0, 'Y': 1}\n\nKitchenQual\n{'Fa': 0, 'TA': 1, 'Gd': 2, 'Ex': 3}\n\nFireplaceQu\n{'Po': 0, 'Missing': 1, 'Fa': 2, 'TA': 3, 'Gd': 4, 'Ex': 5}\n\nGarageType\n{'Missing': 0, 'Rare': 1, 'Detchd': 2, 'Basment': 3, 'Attchd': 4, 'BuiltIn': 5}\n\nGarageFinish\n{'Missing': 0, 'Unf': 1, 'RFn': 2, 'Fin': 3}\n\nPavedDrive\n{'N': 0, 'P': 1, 'Y': 2}\n\n" ], [ "# check absence of na\n[var for var in features if X_train[var].isnull().sum() > 0]", "_____no_output_____" ], [ "# check absence of na\n[var for var in features if X_test[var].isnull().sum() > 0]", "_____no_output_____" ] ], [ [ "### Feature Scaling\n\nFor use in linear models, features need to be either scaled or normalised. In the next section, I will scale features between the min and max values:", "_____no_output_____" ] ], [ [ "# capture the target\ny_train = X_train['SalePrice']\ny_test = X_test['SalePrice']", "_____no_output_____" ], [ "# set up scaler\nscaler = MinMaxScaler()\n\n# train scaler\nscaler.fit(X_train[features])", "_____no_output_____" ], [ "# explore maximum values of variables\nscaler.data_max_", "_____no_output_____" ], [ "# explore minimum values of variables\nscaler.data_min_", "_____no_output_____" ], [ "# transform the train and test set, and add on the Id and SalePrice variables\nX_train = scaler.transform(X_train[features])\nX_test = scaler.transform(X_test[features])", "_____no_output_____" ] ], [ [ "## Train the Linear Regression: Lasso", "_____no_output_____" ] ], [ [ "# set up the model\n# remember to set the random_state / seed\n\nlin_model = Lasso(alpha=0.005, random_state=0)\n\n# train the model\nlin_model.fit(X_train, y_train)\n\n# we persist the model for future use\njoblib.dump(lin_model, 'lasso_regression.pkl')", "_____no_output_____" ], [ "# evaluate the model:\n# ====================\n\n# remember that we log transformed the output (SalePrice)\n# in our feature engineering notebook (step 2).\n\n# In order to get the true performance of the Lasso\n# we need to transform both the target and the predictions\n# back to the original house prices values.\n\n# We will evaluate performance using the mean squared error and\n# the root of the mean squared error and r2\n\n# make predictions for train set\npred = lin_model.predict(X_train)\n\n# determine mse and rmse\nprint('train mse: {}'.format(int(\n mean_squared_error(np.exp(y_train), np.exp(pred)))))\nprint('train rmse: {}'.format(int(\n sqrt(mean_squared_error(np.exp(y_train), np.exp(pred))))))\nprint('train r2: {}'.format(\n r2_score(np.exp(y_train), np.exp(pred))))\nprint()\n\n# make predictions for test set\npred = lin_model.predict(X_test)\n\n# determine mse and rmse\nprint('test mse: {}'.format(int(\n mean_squared_error(np.exp(y_test), np.exp(pred)))))\nprint('test rmse: {}'.format(int(\n sqrt(mean_squared_error(np.exp(y_test), np.exp(pred))))))\nprint('test r2: {}'.format(\n r2_score(np.exp(y_test), np.exp(pred))))\nprint()\n\nprint('Average house price: ', int(np.exp(y_train).median()))", "train mse: 1087435415\ntrain rmse: 32976\ntrain r2: 0.8258384535431164\n\ntest mse: 1405259552\ntest rmse: 37486\ntest r2: 0.7955128088451114\n\nAverage house price: 163000\n" ] ], [ [ "That is all for this notebook. And that is all for this section too.\n\n**In the next section, we will show you how to productionise this code for model deployment**.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ] ]
cb61d1e3c51ea081c4de1a15d963f0b482929623
23,081
ipynb
Jupyter Notebook
notebook/10D_Foreign_Language_Interface.ipynb
cliburn/sta-663-2017
89e059dfff25a4aa427cdec5ded755ab456fbc16
[ "MIT" ]
52
2017-01-11T03:16:00.000Z
2021-01-15T05:28:48.000Z
notebook/10D_Foreign_Language_Interface.ipynb
slimdt/Duke_Stat633_2017
89e059dfff25a4aa427cdec5ded755ab456fbc16
[ "MIT" ]
1
2017-04-16T17:10:49.000Z
2017-04-16T19:13:03.000Z
notebook/10D_Foreign_Language_Interface.ipynb
slimdt/Duke_Stat633_2017
89e059dfff25a4aa427cdec5ded755ab456fbc16
[ "MIT" ]
47
2017-01-13T04:50:54.000Z
2021-06-23T11:48:33.000Z
21.312096
532
0.491053
[ [ [ "Foreign Function Interface\n====", "_____no_output_____" ] ], [ [ "%matplotlib inline\nimport matplotlib.pyplot as plt\nplt.style.use('ggplot')", "_____no_output_____" ], [ "import numpy as np", "_____no_output_____" ] ], [ [ "Wrapping functions written in C\n----\n\n### Steps \n\n- Write the C header and implementation files\n- Write the Cython `.pxd` file to declare C function signatures\n- Write the Cython `.pyx` file to wrap the C functions for Python\n- Write `setup.py` to automate buiding of the Python extension module\n- Run `python setup.py build_ext --inplace` to build the module\n- Import module in Python like any other Python module", "_____no_output_____" ], [ "### C header file", "_____no_output_____" ] ], [ [ "%%file c_math.h\n\n#pragma once\ndouble plus(double a, double b);\ndouble mult(double a, double b);\ndouble square(double a);\ndouble acc(double *xs, int size);", "Writing c_math.h\n" ] ], [ [ "### C implementation file", "_____no_output_____" ] ], [ [ "%%file c_math.c\n#include <math.h>\n#include \"c_math.h\"\n\ndouble plus(double a, double b) {\n return a + b;\n};\n\ndouble mult(double a, double b) {\n return a * b;\n};\n\ndouble square(double a) {\n return pow(a, 2);\n};\n\ndouble acc(double *xs, int size) {\n double s = 0;\n for (int i=0; i<size; i++) {\n s += xs[i];\n }\n return s;\n};", "Writing c_math.c\n" ] ], [ [ "### Cython \"header\" file\n\nThe `.pxd` file is similar to a header file for Cython. In other words, we can `cimport <filename>.pxd` in the regular Cython `.pyx` files to get access to functions declared in the `.pxd` files.", "_____no_output_____" ] ], [ [ "%%file cy_math.pxd\n\ncdef extern from \"c_math.h\":\n double plus(double a, double b)\n double mult(double a, double b)\n double square(double a)\n double acc(double *xs, int size)", "Writing cy_math.pxd\n" ] ], [ [ "### Cython \"implementation\" file\n\nHere is whhere we actually wrap the C code for use in Python. Note especially how we handle passing in of arrays to a C function expecting a pointer to double using `typed memoryviews`.", "_____no_output_____" ] ], [ [ "%%file cy_math.pyx\n\ncimport cy_math\n\ndef py_plus(double a, double b):\n return cy_math.plus(a, b)\n\ndef py_mult(double a, double b):\n return cy_math.mult(a, b)\n\ndef py_square(double a):\n return cy_math.square(a)\n\ndef py_sum(double[::1] xs):\n cdef int size = len(xs)\n return cy_math.acc(&xs[0], size)", "Writing cy_math.pyx\n" ] ], [ [ "### Build script `setup.py`\n\nThis is a build script for Python, similar to a Makefile", "_____no_output_____" ] ], [ [ "%%file setup.py\n\nfrom distutils.core import setup, Extension\nfrom Cython.Build import cythonize\nimport numpy as np\n\next = Extension(\"cy_math\",\n sources=[\"cy_math.pyx\", \"c_math.c\"],\n libraries=[\"m\"],\n extra_compile_args=[\"-w\", \"-std=c99\"])\n\nsetup(name = \"Math Funcs\",\n ext_modules = cythonize(ext))", "Writing setup.py\n" ] ], [ [ "### Building an extension module", "_____no_output_____" ] ], [ [ "! python setup.py clean\n! python setup.py -q build_ext --inplace", "Compiling cy_math.pyx because it changed.\n[1/1] Cythonizing cy_math.pyx\nrunning clean\n" ], [ "! ls cy_math*", "cy_math.c cy_math.pxd\r\n\u001b[31mcy_math.cpython-35m-darwin.so\u001b[m\u001b[m cy_math.pyx\r\n" ] ], [ [ "### Using the extension module in Python", "_____no_output_____" ] ], [ [ "import cy_math\nimport numpy as np\n\nprint(cy_math.py_plus(3, 4))\nprint(cy_math.py_mult(3, 4))\nprint(cy_math.py_square(3))\n\nxs = np.arange(10, dtype='float')\nprint(cy_math.py_sum(xs))", "7.0\n12.0\n9.0\n45.0\n" ] ], [ [ "### Confirm that we are getting C speedups by comparing with pure Python accumulator", "_____no_output_____" ] ], [ [ "def acc(xs):\n s = 0\n for x in xs:\n s += x\n return s", "_____no_output_____" ], [ "import cy_math\n\nxs = np.arange(1000000, dtype='float')\n%timeit -r3 -n3 acc(xs)\n%timeit -r3 -n3 cy_math.py_sum(xs)", "3 loops, best of 3: 197 ms per loop\n3 loops, best of 3: 1.95 ms per loop\n" ] ], [ [ "C++\n----\n\nThis is similar to C. We will use Cython to wrap a simple funciton. ", "_____no_output_____" ] ], [ [ "%%file add.hpp \n#pragma once\nint add(int a, int b);", "Writing add.hpp\n" ], [ "%%file add.cpp\nint add(int a, int b) {\n return a+b;\n}", "Writing add.cpp\n" ], [ "%%file plus.pyx\n\ncdef extern from 'add.cpp':\n int add(int a, int b)\n \ndef plus(a, b):\n return add(a, b)", "Writing plus.pyx\n" ] ], [ [ "#### Note that essentially the only difference from C is `language=\"C++\"` and the flag `-std=c++11`", "_____no_output_____" ] ], [ [ "%%file setup.py \nfrom distutils.core import setup, Extension\nfrom Cython.Build import cythonize\n\next = Extension(\"plus\",\n sources=[\"plus.pyx\", \"add.cpp\"],\n extra_compile_args=[\"-w\", \"-std=c++11\"])\n\nsetup(\n ext_modules = cythonize(\n ext,\n language=\"c++\", \n ))", "Overwriting setup.py\n" ], [ "%%bash\npython setup.py -q build_ext --inplace", "Please put \"# distutils: language=c++\" in your .pyx or .pxd file(s)\nCompiling plus.pyx because it changed.\n[1/1] Cythonizing plus.pyx\n" ], [ "import plus\n\nplus.plus(3, 4)", "_____no_output_____" ] ], [ [ "Wrap an R function from libRmath using `ctypes`\n----\n\nR comes with a standalone C library of special functions and distributions, as described in the official documentation. These functions can be wrapped for use in Python.", "_____no_output_____" ], [ "### Building the Rmath standalone library\n\n```bash\ngit clone https://github.com/JuliaLang/Rmath-julia.git\ncd Rmath-julia/src\nmake\ncd ../..\n```", "_____no_output_____" ], [ "#### Functions to wrap", "_____no_output_____" ] ], [ [ "! grep \"\\s.norm(\" Rmath-julia/include/Rmath.h", "_____no_output_____" ], [ "from ctypes import CDLL, c_int, c_double", "_____no_output_____" ], [ "%%bash\nls Rmath-julia/src/*so", "_____no_output_____" ], [ "lib = CDLL('Rmath-julia/src/libRmath-julia.so')\n\ndef rnorm(mu=0, sigma=1):\n lib.rnorm.argtypes = [c_double, c_double]\n lib.rnorm.restype = c_double\n return lib.rnorm(mu, sigma)\n\ndef dnorm(x, mean=0, sd=1, log=0):\n lib.dnorm4.argtypes = [c_double, c_double, c_double, c_int]\n lib.dnorm4.restype = c_double\n return lib.dnorm4(x, mean, sd, log)\n\ndef pnorm(q, mu=0, sd=1, lower_tail=1, log_p=0):\n lib.pnorm5.argtypes = [c_double, c_double, c_double, c_int, c_int]\n lib.pnorm5.restype = c_double\n return lib.pnorm5(q, mu, sd, lower_tail, log_p)\n\ndef qnorm(p, mu=0, sd=1, lower_tail=1, log_p=0):\n lib.qnorm5.argtypes = [c_double, c_double, c_double, c_int, c_int]\n lib.qnorm5.restype = c_double\n return lib.qnorm5(p, mu, sd, lower_tail, log_p)", "_____no_output_____" ], [ "pnorm(0, mu=2)", "_____no_output_____" ], [ "qnorm(0.022750131948179212, mu=2)", "_____no_output_____" ], [ "plt.hist([rnorm() for i in range(100)])\npass", "_____no_output_____" ], [ "xs = np.linspace(-3,3,100)\nplt.plot(xs, list(map(dnorm, xs)))\npass", "_____no_output_____" ] ], [ [ "### Using Cython to wrap standalone library", "_____no_output_____" ] ], [ [ "%%file rmath.pxd\n\ncdef extern from \"Rmath-julia/include/Rmath.h\":\n double dnorm(double, double, double, int)\n double pnorm(double, double, double, int, int)\n double qnorm(double, double, double, int, int)\n double rnorm(double, double)", "_____no_output_____" ], [ "%%file rmath.pyx\n\ncimport rmath\n\ndef rnorm_(mu=0, sigma=1):\n return rmath.rnorm(mu, sigma)\n\ndef dnorm_(x, mean=0, sd=1, log=0):\n return rmath.dnorm(x, mean, sd, log)\n\ndef pnorm_(q, mu=0, sd=1, lower_tail=1, log_p=0):\n return rmath.pnorm(q, mu, sd, lower_tail, log_p)\n\ndef qnorm_(p, mu=0, sd=1, lower_tail=1, log_p=0):\n return rmath.qnorm(p, mu, sd, lower_tail, log_p)", "_____no_output_____" ], [ "%%file setup.py \nfrom distutils.core import setup, Extension\nfrom Cython.Build import cythonize\n\next = Extension(\"rmath\",\n sources=[\"rmath.pyx\"],\n include_dirs=[\"Rmath-julia/include\"],\n library_dirs=[\"Rmath-julia/src\"],\n libraries=[\"Rmath-julia\"],\n runtime_library_dirs=[\"Rmath-julia/src\"],\n extra_compile_args=[\"-w\", \"-std=c99\", \"-DMATHLIB_STANDALONE\"],\n extra_link_args=[],\n )\n\nsetup(\n ext_modules = cythonize(\n ext\n ))", "_____no_output_____" ], [ "! python setup.py build_ext --inplace", "_____no_output_____" ], [ "import rmath\n\nplt.hist([rmath.rnorm_() for i in range(100)])\npass", "_____no_output_____" ], [ "xs = np.linspace(-3,3,100)\nplt.plot(xs, list(map(rmath.dnorm_, xs)))\npass", "_____no_output_____" ] ], [ [ "### `Cython` wrappers are faster than `ctypes`", "_____no_output_____" ] ], [ [ "%timeit pnorm(0, mu=2)\n%timeit rmath.pnorm_(0, mu=2)", "_____no_output_____" ] ], [ [ "Fortran\n----", "_____no_output_____" ] ], [ [ "! pip install fortran-magic", "_____no_output_____" ], [ "%load_ext fortranmagic", "_____no_output_____" ], [ "%%fortran\n\nsubroutine fort_sum(N, s)\n integer*8, intent(in) :: N\n integer*8, intent(out) :: s\n integer*8 i\n s = 0\n do i = 1, N\n s = s + i*i\n end do\nend ", "_____no_output_____" ], [ "fort_sum(10)", "_____no_output_____" ] ], [ [ "#### Another example from the [documentation](http://nbviewer.ipython.org/github/mgaitan/fortran_magic/blob/master/documentation.ipynb)", "_____no_output_____" ] ], [ [ "%%fortran --link lapack\n\nsubroutine solve(A, b, x, n)\n ! solve the matrix equation A*x=b using LAPACK\n implicit none\n\n real*8, dimension(n,n), intent(in) :: A\n real*8, dimension(n), intent(in) :: b\n real*8, dimension(n), intent(out) :: x\n\n integer :: pivot(n), ok\n\n integer, intent(in) :: n\n x = b\n\n ! find the solution using the LAPACK routine SGESV\n call DGESV(n, 1, A, n, pivot, x, n, ok)\n \nend subroutine", "_____no_output_____" ], [ "A = np.array([[1, 2.5], [-3, 4]])\nb = np.array([1, 2.5])\n\nsolve(A, b)", "_____no_output_____" ] ], [ [ "Interfacing with R\n----", "_____no_output_____" ] ], [ [ "%load_ext rpy2.ipython", "_____no_output_____" ], [ "%%R\nlibrary(ggplot2)\nsuppressPackageStartupMessages(\n ggplot(mtcars, aes(x=wt, y=mpg)) + geom_point() + geom_smooth(method=loess)\n)", "_____no_output_____" ] ], [ [ "#### Converting between Python and R", "_____no_output_____" ] ], [ [ "%R -o mtcars", "_____no_output_____" ] ], [ [ "#### `mtcars` is now a Python dataframe", "_____no_output_____" ] ], [ [ "mtcars.head(n=3)", "_____no_output_____" ] ], [ [ "#### We can also pass data from Python to R", "_____no_output_____" ] ], [ [ "x = np.linspace(0, 2*np.pi, 100)\ny = np.sin(x)", "_____no_output_____" ], [ "%%R -i x,y\nplot(x, y, main=\"Sine curve in R base graphics\")", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ] ]
cb61d57dc0f6f87aa5fb4201fd4e390e4b8a216c
47,302
ipynb
Jupyter Notebook
src/datacleaning/Chapter 10/2_taking_measure.ipynb
vidyabhandary/DataScienceNotebooks
1a9a423ffe6bdcd803ed195da97a751e44567813
[ "MIT" ]
null
null
null
src/datacleaning/Chapter 10/2_taking_measure.ipynb
vidyabhandary/DataScienceNotebooks
1a9a423ffe6bdcd803ed195da97a751e44567813
[ "MIT" ]
null
null
null
src/datacleaning/Chapter 10/2_taking_measure.ipynb
vidyabhandary/DataScienceNotebooks
1a9a423ffe6bdcd803ed195da97a751e44567813
[ "MIT" ]
null
null
null
33.763026
2,192
0.40307
[ [ [ "<h1>Table of Contents<span class=\"tocSkip\"></span></h1>\n<div class=\"toc\"><ul class=\"toc-item\"><li><span><a href=\"#Import-the-pandas,-os,-and-sys-libraries\" data-toc-modified-id=\"Import-the-pandas,-os,-and-sys-libraries-1\"><span class=\"toc-item-num\">1&nbsp;&nbsp;</span>Import the pandas, os, and sys libraries</a></span></li><li><span><a href=\"#Import-the-basicdescriptives-module\" data-toc-modified-id=\"Import-the-basicdescriptives-module-2\"><span class=\"toc-item-num\">2&nbsp;&nbsp;</span>Import the basicdescriptives module</a></span></li><li><span><a href=\"#Show-summary-statistics-for-continuous-variables\" data-toc-modified-id=\"Show-summary-statistics-for-continuous-variables-3\"><span class=\"toc-item-num\">3&nbsp;&nbsp;</span>Show summary statistics for continuous variables</a></span></li><li><span><a href=\"#Create-a-function-to-count-missing-values-by-columns-and-rows\" data-toc-modified-id=\"Create-a-function-to-count-missing-values-by-columns-and-rows-4\"><span class=\"toc-item-num\">4&nbsp;&nbsp;</span>Create a function to count missing values by columns and rows</a></span></li><li><span><a href=\"#Call-the-getmissings-function\" data-toc-modified-id=\"Call-the-getmissings-function-5\"><span class=\"toc-item-num\">5&nbsp;&nbsp;</span>Call the getmissings function</a></span></li><li><span><a href=\"#Call-the-makefreqs-function\" data-toc-modified-id=\"Call-the-makefreqs-function-6\"><span class=\"toc-item-num\">6&nbsp;&nbsp;</span>Call the makefreqs function</a></span></li><li><span><a href=\"#Pass-the-marital-status,-gender,-and-college-enrollment-columns-to-the-getcnts-function\" data-toc-modified-id=\"Pass-the-marital-status,-gender,-and-college-enrollment-columns-to-the-getcnts-function-7\"><span class=\"toc-item-num\">7&nbsp;&nbsp;</span>Pass the marital status, gender, and college enrollment columns to the getcnts function</a></span></li><li><span><a href=\"#Use-the-rowsel-parameter-of-getcnts-to-limit-the-output-to-specific-rows\" data-toc-modified-id=\"Use-the-rowsel-parameter-of-getcnts-to-limit-the-output-to-specific-rows-8\"><span class=\"toc-item-num\">8&nbsp;&nbsp;</span>Use the rowsel parameter of getcnts to limit the output to specific rows</a></span></li></ul></div>", "_____no_output_____" ], [ "# Import the pandas, os, and sys libraries", "_____no_output_____" ] ], [ [ "import pandas as pd\nimport os\nimport sys", "_____no_output_____" ], [ "import watermark\n%load_ext watermark\n\n%watermark -n -i -iv", "watermark: 2.1.0\npandas : 1.2.1\nsys : 3.7.9 (default, Aug 31 2020, 17:10:11) [MSC v.1916 64 bit (AMD64)]\njson : 2.0.9\n\n" ], [ "nls97 = pd.read_csv('data/nls97f.csv')\nnls97.set_index('personid', inplace=True)", "_____no_output_____" ] ], [ [ "# Import the basicdescriptives module", "_____no_output_____" ] ], [ [ "sys.path.append(os.getcwd() + '\\helperfunctions')\n# print(sys.path)", "_____no_output_____" ], [ "import basicdescriptives as bd", "_____no_output_____" ] ], [ [ "# Show summary statistics for continuous variables", "_____no_output_____" ] ], [ [ "bd.gettots(nls97[['satverbal', 'satmath']]).T", "_____no_output_____" ], [ "bd.gettots(nls97.filter(like='weeksworked'))", "_____no_output_____" ] ], [ [ "# Create a function to count missing values by columns and rows", "_____no_output_____" ] ], [ [ "missingsbycols, missingsbyrows = bd.getmissings(\n nls97[['weeksworked16', 'weeksworked17']], True)", "_____no_output_____" ], [ "missingsbycols", "_____no_output_____" ], [ "# the missingbyrows value shows that 73.9% of rows have 0 missing values for\n# weeksworked16 and weeksworked17\n\nmissingsbyrows", "_____no_output_____" ], [ "nls97.shape", "_____no_output_____" ] ], [ [ "# Call the getmissings function", "_____no_output_____" ] ], [ [ "missingsbycols, missingsbyrows = bd.getmissings(\n nls97[['weeksworked16', 'weeksworked17']])", "_____no_output_____" ], [ "missingsbyrows", "_____no_output_____" ], [ "# Create a function to calculate frequencies for all categorical variables", "_____no_output_____" ] ], [ [ "# Call the makefreqs function", "_____no_output_____" ] ], [ [ "# change data type of each object column to category\nnls97.loc[:, nls97.dtypes == 'object'] = nls97.select_dtypes(\n ['object']).apply(lambda x: x.astype('category'))", "_____no_output_____" ], [ "nls97.info()", "<class 'pandas.core.frame.DataFrame'>\nInt64Index: 8984 entries, 100061 to 999963\nData columns (total 89 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 gender 8984 non-null category\n 1 birthmonth 8984 non-null int64 \n 2 birthyear 8984 non-null int64 \n 3 highestgradecompleted 6663 non-null float64 \n 4 maritalstatus 6672 non-null category\n 5 childathome 4791 non-null float64 \n 6 childnotathome 4791 non-null float64 \n 7 wageincome 5091 non-null float64 \n 8 weeklyhrscomputer 6710 non-null category\n 9 weeklyhrstv 6711 non-null category\n 10 nightlyhrssleep 6706 non-null float64 \n 11 satverbal 1406 non-null float64 \n 12 satmath 1407 non-null float64 \n 13 gpaoverall 6004 non-null float64 \n 14 gpaenglish 5798 non-null float64 \n 15 gpamath 5766 non-null float64 \n 16 gpascience 5684 non-null float64 \n 17 highestdegree 8953 non-null category\n 18 govprovidejobs 1833 non-null category\n 19 govpricecontrols 1859 non-null category\n 20 govhealthcare 1874 non-null category\n 21 govelderliving 1872 non-null category\n 22 govindhelp 1815 non-null category\n 23 govunemp 1811 non-null category\n 24 govincomediff 1775 non-null category\n 25 govcollegefinance 1875 non-null category\n 26 govdecenthousing 1847 non-null category\n 27 govprotectenvironment 1860 non-null category\n 28 weeksworked00 8603 non-null float64 \n 29 weeksworked01 8564 non-null float64 \n 30 weeksworked02 8556 non-null float64 \n 31 weeksworked03 8490 non-null float64 \n 32 weeksworked04 8458 non-null float64 \n 33 weeksworked05 8403 non-null float64 \n 34 weeksworked06 8340 non-null float64 \n 35 weeksworked07 8272 non-null float64 \n 36 weeksworked08 8186 non-null float64 \n 37 weeksworked09 8146 non-null float64 \n 38 weeksworked10 8054 non-null float64 \n 39 weeksworked11 7968 non-null float64 \n 40 weeksworked12 7747 non-null float64 \n 41 weeksworked13 7680 non-null float64 \n 42 weeksworked14 7612 non-null float64 \n 43 weeksworked15 7389 non-null float64 \n 44 weeksworked16 7068 non-null float64 \n 45 weeksworked17 6670 non-null float64 \n 46 colenrfeb97 1250 non-null category\n 47 colenroct97 8501 non-null category\n 48 colenrfeb98 8501 non-null category\n 49 colenroct98 8888 non-null category\n 50 colenrfeb99 8865 non-null category\n 51 colenroct99 8851 non-null category\n 52 colenrfeb00 8820 non-null category\n 53 colenroct00 8805 non-null category\n 54 colenrfeb01 8786 non-null category\n 55 colenroct01 8758 non-null category\n 56 colenrfeb02 8732 non-null category\n 57 colenroct02 8698 non-null category\n 58 colenrfeb03 8658 non-null category\n 59 colenroct03 8622 non-null category\n 60 colenrfeb04 8578 non-null category\n 61 colenroct04 8546 non-null category\n 62 colenrfeb05 8508 non-null category\n 63 colenroct05 8471 non-null category\n 64 colenrfeb06 8426 non-null category\n 65 colenroct06 8407 non-null category\n 66 colenrfeb07 8352 non-null category\n 67 colenroct07 8331 non-null category\n 68 colenrfeb08 8289 non-null category\n 69 colenroct08 8280 non-null category\n 70 colenrfeb09 8236 non-null category\n 71 colenroct09 8193 non-null category\n 72 colenrfeb10 8116 non-null category\n 73 colenroct10 8091 non-null category\n 74 colenrfeb11 8037 non-null category\n 75 colenroct11 7972 non-null category\n 76 colenrfeb12 7794 non-null category\n 77 colenroct12 7754 non-null category\n 78 colenrfeb13 7754 non-null category\n 79 colenroct13 7754 non-null category\n 80 colenrfeb14 7624 non-null category\n 81 colenroct14 7469 non-null category\n 82 colenrfeb15 7469 non-null category\n 83 colenroct15 7469 non-null category\n 84 colenrfeb16 7036 non-null category\n 85 colenroct16 6733 non-null category\n 86 colenrfeb17 6733 non-null category\n 87 colenroct17 6734 non-null category\n 88 originalid 8984 non-null int64 \ndtypes: category(57), float64(29), int64(3)\nmemory usage: 2.8 MB\n" ], [ "bd.makefreqs(nls97, 'views/nlsfreqs.txt')", "_____no_output_____" ] ], [ [ "# Pass the marital status, gender, and college enrollment columns to the getcnts function", "_____no_output_____" ] ], [ [ "# group counts and percentages for subgroups within groups\nbd.get_counts(nls97, ['maritalstatus', 'gender', 'colenroct00'])", "_____no_output_____" ] ], [ [ "# Use the rowsel parameter of getcnts to limit the output to specific rows", "_____no_output_____" ] ], [ [ "bd.get_counts(nls97, ['maritalstatus', 'gender', 'colenroct00'],\n 'colenroct00.str[0:1] == \"1\"')", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
cb61e66a9d8315547637ac746111dbd6037f9e16
37,163
ipynb
Jupyter Notebook
.ipynb_checkpoints/iris-regu0.001-checkpoint.ipynb
danhtaihoang/multiclass-classification
41244b77e6cc0537b1ed1750492d2d296f014416
[ "MIT" ]
null
null
null
.ipynb_checkpoints/iris-regu0.001-checkpoint.ipynb
danhtaihoang/multiclass-classification
41244b77e6cc0537b1ed1750492d2d296f014416
[ "MIT" ]
null
null
null
.ipynb_checkpoints/iris-regu0.001-checkpoint.ipynb
danhtaihoang/multiclass-classification
41244b77e6cc0537b1ed1750492d2d296f014416
[ "MIT" ]
null
null
null
94.803571
24,232
0.762667
[ [ [ "import numpy as np\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.model_selection import KFold\nfrom sklearn.utils import shuffle\nfrom sklearn.metrics import accuracy_score\n\nfrom synthesize_data_multiclass import synthesize_data\nimport ER_multiclass as ER\n\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.ensemble import RandomForestClassifier\n\nimport matplotlib.pyplot as plt\n%matplotlib inline", "_____no_output_____" ], [ "np.random.seed(1)", "_____no_output_____" ], [ "def inference(X_train,y_train,X_test,y_test,method='expectation_reflection'):\n if method == 'expectation_reflection':\n h0,w = ER.fit(X_train,y_train,niter_max=100,regu=0.001)\n y_pred = ER.predict(X_test,h0,w)\n\n else:\n if method == 'logistic_regression':\n model = LogisticRegression(multi_class='multinomial',solver='saga')\n\n if method == 'naive_bayes': \n model = GaussianNB()\n\n if method == 'random_forest':\n model = RandomForestClassifier(criterion = \"gini\", random_state = 1,\n max_depth=3, min_samples_leaf=5,n_estimators=100) \n \n if method == 'decision_tree':\n model = DecisionTreeClassifier() \n\n model.fit(X_train, y_train)\n y_pred = model.predict(X_test)\n \n accuracy = accuracy_score(y_test,y_pred) \n \n return accuracy", "_____no_output_____" ], [ "list_methods=['logistic_regression','naive_bayes','random_forest','decision_tree','expectation_reflection']\n#list_methods=['logistic_regression','expectation_reflection']\ndef compare_inference(X,y,train_size):\n npred = 500\n accuracy = np.zeros((len(list_methods),npred))\n precision = np.zeros((len(list_methods),npred))\n recall = np.zeros((len(list_methods),npred))\n accuracy_train = np.zeros((len(list_methods),npred))\n for ipred in range(npred):\n #X, y = shuffle(X, y)\n X_train0,X_test,y_train0,y_test = train_test_split(X,y,test_size=0.2,random_state = ipred)\n\n idx_train = np.random.choice(len(y_train0),size=int(train_size*len(y)),replace=False)\n X_train,y_train = X_train0[idx_train],y_train0[idx_train]\n\n for i,method in enumerate(list_methods):\n accuracy[i,ipred] = inference(X_train,y_train,X_test,y_test,method)\n \n return accuracy.mean(axis=1)", "_____no_output_____" ], [ "from sklearn.datasets import load_iris\n\nX, y = load_iris(return_X_y=True)\nprint(X.shape,y.shape)\nprint(np.unique(y,return_counts=True))", "(150, 4) (150,)\n(array([0, 1, 2]), array([50, 50, 50]))\n" ], [ "X", "_____no_output_____" ], [ "from sklearn.preprocessing import MinMaxScaler\nX = MinMaxScaler().fit_transform(X)\nX, y = shuffle(X, y)", "_____no_output_____" ], [ "list_train_size = [0.8,0.6,0.4,0.2]\nacc = np.zeros((len(list_train_size),len(list_methods)))\nfor i,train_size in enumerate(list_train_size):\n acc[i,:] = compare_inference(X,y,train_size)\n print(train_size,acc[i,:])", "0.8 [0.92086667 0.9526 0.94893333 0.94486667 0.9388 ]\n0.6 [0.9066 0.95106667 0.94773333 0.94426667 0.9332 ]\n0.4 [0.8796 0.95033333 0.94826667 0.94553333 0.92426667]\n0.2 [0.8108 0.93593333 0.93213333 0.9336 0.90153333]\n" ], [ "plt.figure(figsize=(4,3)) \nplt.plot(list_train_size,acc[:,0],'k--',marker='o',mfc='none',label='Logistic Regression')\nplt.plot(list_train_size,acc[:,1],'b--',marker='s',mfc='none',label='Naive Bayes')\nplt.plot(list_train_size,acc[:,2],'r--',marker='^',mfc='none',label='Random Forest')\n#plt.plot(list_train_size,acc[:,3],'b--',label='Decision Tree')\nplt.plot(list_train_size,acc[:,-1],'k-',marker='o',label='Expectation Reflection')\nplt.xlabel('train size')\nplt.ylabel('accuracy')\nplt.legend()", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb61ece24deedef13cd801899860bc86a2510b40
183,324
ipynb
Jupyter Notebook
AAAI/Learnability/CIN/Linear/MNIST/MNIST_CIN_1k_Linear_m_5.ipynb
lnpandey/DL_explore_synth_data
0a5d8b417091897f4c7f358377d5198a155f3f24
[ "MIT" ]
2
2019-08-24T07:20:35.000Z
2020-03-27T08:16:59.000Z
AAAI/Learnability/CIN/Linear/MNIST/MNIST_CIN_1k_Linear_m_5.ipynb
lnpandey/DL_explore_synth_data
0a5d8b417091897f4c7f358377d5198a155f3f24
[ "MIT" ]
null
null
null
AAAI/Learnability/CIN/Linear/MNIST/MNIST_CIN_1k_Linear_m_5.ipynb
lnpandey/DL_explore_synth_data
0a5d8b417091897f4c7f358377d5198a155f3f24
[ "MIT" ]
3
2019-06-21T09:34:32.000Z
2019-09-19T10:43:07.000Z
60.027505
11,202
0.691257
[ [ [ "# from google.colab import drive\n# drive.mount('/content/drive')", "Mounted at /content/drive\n" ], [ "# path = \"/content/drive/MyDrive/Research/cods_comad_plots/sdc_task/mnist/\"", "_____no_output_____" ], [ "import torch.nn as nn\nimport torch.nn.functional as F\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nimport torch\nimport torchvision\nimport torchvision.transforms as transforms\nfrom torch.utils.data import Dataset, DataLoader\nfrom torchvision import transforms, utils\n\nfrom matplotlib import pyplot as plt\n\nimport copy\n\n# Ignore warnings\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\ntorch.backends.cudnn.deterministic = True\ntorch.backends.cudnn.benchmark = False\n", "_____no_output_____" ], [ "transform = transforms.Compose(\n [transforms.ToTensor(),\n transforms.Normalize((0.5), (0.5))])\n\ntrainset = torchvision.datasets.MNIST(root='./data', train=True, download=True, transform=transform)\n\n\ntestset = torchvision.datasets.MNIST(root='./data', train=False, download=True, transform=transform)", "Downloading http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz\nDownloading http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz to ./data/MNIST/raw/train-images-idx3-ubyte.gz\n" ], [ "classes = ('zero','one','two','three','four','five','six','seven','eight','nine')\n\nforeground_classes = {'zero','one'}\nfg_used = '01'\nfg1, fg2 = 0,1\n\n\nall_classes = {'zero','one','two','three','four','five','six','seven','eight','nine'}\nbackground_classes = all_classes - foreground_classes\nbackground_classes", "_____no_output_____" ], [ "trainloader = torch.utils.data.DataLoader(trainset, batch_size=10, shuffle = False)\ntestloader = torch.utils.data.DataLoader(testset, batch_size=10, shuffle = False)", "_____no_output_____" ], [ "dataiter = iter(trainloader)\nbackground_data=[]\nbackground_label=[]\nforeground_data=[]\nforeground_label=[]\nbatch_size=10\n\nfor i in range(6000):\n images, labels = dataiter.next()\n for j in range(batch_size):\n if(classes[labels[j]] in background_classes):\n img = images[j].tolist()\n background_data.append(img)\n background_label.append(labels[j])\n else:\n img = images[j].tolist()\n foreground_data.append(img)\n foreground_label.append(labels[j])\n \nforeground_data = torch.tensor(foreground_data)\nforeground_label = torch.tensor(foreground_label)\nbackground_data = torch.tensor(background_data)\nbackground_label = torch.tensor(background_label)", "_____no_output_____" ], [ "def imshow(img):\n img = img / 2 + 0.5 # unnormalize\n npimg = img#.numpy()\n plt.imshow(np.reshape(npimg, (28,28)))\n plt.show()", "_____no_output_____" ], [ "foreground_data.shape, foreground_label.shape, background_data.shape, background_label.shape", "_____no_output_____" ], [ "val, idx = torch.max(background_data, dim=0, keepdims= True,)\n# torch.abs(val)", "_____no_output_____" ], [ "mean_bg = torch.mean(background_data, dim=0, keepdims= True)\nstd_bg, _ = torch.max(background_data, dim=0, keepdims= True)", "_____no_output_____" ], [ "mean_bg.shape, std_bg.shape", "_____no_output_____" ], [ "foreground_data = (foreground_data - mean_bg) / std_bg\nbackground_data = (background_data - mean_bg) / torch.abs(std_bg)", "_____no_output_____" ], [ "foreground_data.shape, foreground_label.shape, background_data.shape, background_label.shape", "_____no_output_____" ], [ "torch.sum(torch.isnan(foreground_data)), torch.sum(torch.isnan(background_data))", "_____no_output_____" ], [ "imshow(foreground_data[0])", "_____no_output_____" ], [ "imshow(background_data[0])", "_____no_output_____" ] ], [ [ "## generating CIN train and test data", "_____no_output_____" ] ], [ [ "m = 5\ndesired_num = 1000", "_____no_output_____" ], [ "np.random.seed(0)\nbg_idx = np.random.randint(0,47335,m-1)\nfg_idx = np.random.randint(0,12665)\nbg_idx, fg_idx", "_____no_output_____" ], [ "for i in background_data[bg_idx]:\n imshow(i)", "_____no_output_____" ], [ "imshow(torch.sum(background_data[bg_idx], axis = 0))", "_____no_output_____" ], [ "imshow(foreground_data[fg_idx])", "_____no_output_____" ], [ "tr_data = ( torch.sum(background_data[bg_idx], axis = 0) + foreground_data[fg_idx] )/m\ntr_data.shape", "_____no_output_____" ], [ "imshow(tr_data)", "_____no_output_____" ], [ "foreground_label[fg_idx]", "_____no_output_____" ], [ "train_images =[] # list of mosaic images, each mosaic image is saved as list of 9 images \ntrain_label=[] # label of mosaic image = foreground class present in that mosaic\n\nfor i in range(desired_num):\n np.random.seed(i)\n bg_idx = np.random.randint(0,47335,m-1)\n fg_idx = np.random.randint(0,12665)\n\n tr_data = ( torch.sum(background_data[bg_idx], axis = 0) + foreground_data[fg_idx] ) / m\n label = (foreground_label[fg_idx].item())\n\n train_images.append(tr_data)\n train_label.append(label)\n", "_____no_output_____" ], [ "train_images = torch.stack(train_images)\ntrain_images.shape, len(train_label)", "_____no_output_____" ], [ "imshow(train_images[0])", "_____no_output_____" ], [ "test_images =[] # list of mosaic images, each mosaic image is saved as list of 9 images \ntest_label=[] # label of mosaic image = foreground class present in that mosaic\n\nfor i in range(10000):\n np.random.seed(i)\n fg_idx = np.random.randint(0,12665)\n\n tr_data = ( foreground_data[fg_idx] ) / m\n label = (foreground_label[fg_idx].item())\n\n test_images.append(tr_data)\n test_label.append(label)\n", "_____no_output_____" ], [ "test_images = torch.stack(test_images)\ntest_images.shape, len(test_label)", "_____no_output_____" ], [ "imshow(test_images[0])", "_____no_output_____" ], [ "torch.sum(torch.isnan(train_images)), torch.sum(torch.isnan(test_images))", "_____no_output_____" ], [ "np.unique(train_label), np.unique(test_label)", "_____no_output_____" ] ], [ [ "## creating dataloader", "_____no_output_____" ] ], [ [ "class CIN_Dataset(Dataset):\n \"\"\"CIN_Dataset dataset.\"\"\"\n\n def __init__(self, list_of_images, labels):\n \"\"\"\n Args:\n csv_file (string): Path to the csv file with annotations.\n root_dir (string): Directory with all the images.\n transform (callable, optional): Optional transform to be applied\n on a sample.\n \"\"\"\n self.image = list_of_images\n self.label = labels\n\n def __len__(self):\n return len(self.label)\n\n def __getitem__(self, idx):\n return self.image[idx] , self.label[idx]\n\n", "_____no_output_____" ], [ "batch = 250\ntrain_data = CIN_Dataset(train_images, train_label)\ntrain_loader = DataLoader( train_data, batch_size= batch , shuffle=True)\n\ntest_data = CIN_Dataset( test_images , test_label)\ntest_loader = DataLoader( test_data, batch_size= batch , shuffle=False)", "_____no_output_____" ], [ "train_loader.dataset.image.shape, test_loader.dataset.image.shape", "_____no_output_____" ] ], [ [ "## model", "_____no_output_____" ] ], [ [ "class Classification(nn.Module):\n def __init__(self):\n super(Classification, self).__init__()\n self.fc1 = nn.Linear(28*28, 2)\n \n torch.nn.init.xavier_normal_(self.fc1.weight)\n torch.nn.init.zeros_(self.fc1.bias)\n\n\n def forward(self, x):\n x = x.view(-1, 28*28)\n x = self.fc1(x)\n return x", "_____no_output_____" ] ], [ [ "## training", "_____no_output_____" ] ], [ [ "torch.manual_seed(12)\nclassify = Classification().double()\nclassify = classify.to(\"cuda\")", "_____no_output_____" ], [ "import torch.optim as optim\ncriterion = nn.CrossEntropyLoss()\noptimizer_classify = optim.Adam(classify.parameters(), lr=0.001 ) #, momentum=0.9)", "_____no_output_____" ], [ "correct = 0\ntotal = 0\ncount = 0\nflag = 1\n\nwith torch.no_grad():\n for data in train_loader:\n inputs, labels = data\n inputs = inputs.double()\n inputs, labels = inputs.to(\"cuda\"),labels.to(\"cuda\")\n\n outputs = classify(inputs)\n\n _, predicted = torch.max(outputs.data, 1)\n\n total += labels.size(0)\n correct += (predicted == labels).sum().item()\n\nprint('Accuracy of the network on the %d train images: %f %%' % ( desired_num , 100 * correct / total))\nprint(\"total correct\", correct)\nprint(\"total train set images\", total)", "Accuracy of the network on the 1000 train images: 39.500000 %\ntotal correct 395\ntotal train set images 1000\n" ], [ "correct = 0\ntotal = 0\ncount = 0\nflag = 1\n\nwith torch.no_grad():\n for data in test_loader:\n inputs, labels = data\n inputs = inputs.double()\n inputs, labels = inputs.to(\"cuda\"),labels.to(\"cuda\")\n\n outputs = classify(inputs)\n\n _, predicted = torch.max(outputs.data, 1)\n\n total += labels.size(0)\n correct += (predicted == labels).sum().item()\n\nprint('Accuracy of the network on the %d train images: %f %%' % ( 10000 , 100 * correct / total))\nprint(\"total correct\", correct)\nprint(\"total train set images\", total)", "Accuracy of the network on the 10000 train images: 27.110000 %\ntotal correct 2711\ntotal train set images 10000\n" ], [ "nos_epochs = 200\ntr_loss = []\nfor epoch in range(nos_epochs): # loop over the dataset multiple times\n\n epoch_loss = []\n cnt=0\n iteration = desired_num // batch\n running_loss = 0\n \n #training data set \n for i, data in enumerate(train_loader):\n inputs, labels = data\n inputs = inputs.double()\n inputs, labels = inputs.to(\"cuda\"),labels.to(\"cuda\")\n inputs = inputs.double()\n # zero the parameter gradients\n \n optimizer_classify.zero_grad()\n\n outputs = classify(inputs)\n\n _, predicted = torch.max(outputs.data, 1)\n# print(outputs)\n# print(outputs.shape,labels.shape , torch.argmax(outputs, dim=1))\n\n loss = criterion(outputs, labels) \n loss.backward()\n\n optimizer_classify.step()\n\n running_loss += loss.item()\n mini = 1\n if cnt % mini == mini-1: # print every 40 mini-batches\n # print('[%d, %5d] loss: %.3f' %(epoch + 1, cnt + 1, running_loss / mini))\n epoch_loss.append(running_loss/mini)\n running_loss = 0.0\n cnt=cnt+1\n\n tr_loss.append(np.mean(epoch_loss))\n if(np.mean(epoch_loss) <= 0.001):\n break;\n else:\n print('[Epoch : %d] loss: %.3f' %(epoch + 1, np.mean(epoch_loss) ))\n \nprint('Finished Training')", "[Epoch : 1] loss: 0.726\n[Epoch : 2] loss: 0.639\n[Epoch : 3] loss: 0.571\n[Epoch : 4] loss: 0.515\n[Epoch : 5] loss: 0.473\n[Epoch : 6] loss: 0.438\n[Epoch : 7] loss: 0.412\n[Epoch : 8] loss: 0.390\n[Epoch : 9] loss: 0.372\n[Epoch : 10] loss: 0.356\n[Epoch : 11] loss: 0.343\n[Epoch : 12] loss: 0.331\n[Epoch : 13] loss: 0.321\n[Epoch : 14] loss: 0.312\n[Epoch : 15] loss: 0.304\n[Epoch : 16] loss: 0.296\n[Epoch : 17] loss: 0.289\n[Epoch : 18] loss: 0.282\n[Epoch : 19] loss: 0.276\n[Epoch : 20] loss: 0.271\n[Epoch : 21] loss: 0.265\n[Epoch : 22] loss: 0.260\n[Epoch : 23] loss: 0.255\n[Epoch : 24] loss: 0.251\n[Epoch : 25] loss: 0.247\n[Epoch : 26] loss: 0.243\n[Epoch : 27] loss: 0.239\n[Epoch : 28] loss: 0.235\n[Epoch : 29] loss: 0.231\n[Epoch : 30] loss: 0.228\n[Epoch : 31] loss: 0.225\n[Epoch : 32] loss: 0.222\n[Epoch : 33] loss: 0.219\n[Epoch : 34] loss: 0.216\n[Epoch : 35] loss: 0.213\n[Epoch : 36] loss: 0.210\n[Epoch : 37] loss: 0.208\n[Epoch : 38] loss: 0.205\n[Epoch : 39] loss: 0.203\n[Epoch : 40] loss: 0.200\n[Epoch : 41] loss: 0.198\n[Epoch : 42] loss: 0.196\n[Epoch : 43] loss: 0.194\n[Epoch : 44] loss: 0.192\n[Epoch : 45] loss: 0.190\n[Epoch : 46] loss: 0.188\n[Epoch : 47] loss: 0.186\n[Epoch : 48] loss: 0.184\n[Epoch : 49] loss: 0.182\n[Epoch : 50] loss: 0.181\n[Epoch : 51] loss: 0.179\n[Epoch : 52] loss: 0.177\n[Epoch : 53] loss: 0.176\n[Epoch : 54] loss: 0.174\n[Epoch : 55] loss: 0.173\n[Epoch : 56] loss: 0.171\n[Epoch : 57] loss: 0.170\n[Epoch : 58] loss: 0.169\n[Epoch : 59] loss: 0.167\n[Epoch : 60] loss: 0.166\n[Epoch : 61] loss: 0.164\n[Epoch : 62] loss: 0.163\n[Epoch : 63] loss: 0.162\n[Epoch : 64] loss: 0.161\n[Epoch : 65] loss: 0.160\n[Epoch : 66] loss: 0.158\n[Epoch : 67] loss: 0.157\n[Epoch : 68] loss: 0.156\n[Epoch : 69] loss: 0.155\n[Epoch : 70] loss: 0.154\n[Epoch : 71] loss: 0.153\n[Epoch : 72] loss: 0.152\n[Epoch : 73] loss: 0.151\n[Epoch : 74] loss: 0.150\n[Epoch : 75] loss: 0.149\n[Epoch : 76] loss: 0.148\n[Epoch : 77] loss: 0.147\n[Epoch : 78] loss: 0.146\n[Epoch : 79] loss: 0.145\n[Epoch : 80] loss: 0.144\n[Epoch : 81] loss: 0.143\n[Epoch : 82] loss: 0.142\n[Epoch : 83] loss: 0.141\n[Epoch : 84] loss: 0.141\n[Epoch : 85] loss: 0.140\n[Epoch : 86] loss: 0.139\n[Epoch : 87] loss: 0.138\n[Epoch : 88] loss: 0.137\n[Epoch : 89] loss: 0.137\n[Epoch : 90] loss: 0.136\n[Epoch : 91] loss: 0.135\n[Epoch : 92] loss: 0.134\n[Epoch : 93] loss: 0.134\n[Epoch : 94] loss: 0.133\n[Epoch : 95] loss: 0.132\n[Epoch : 96] loss: 0.131\n[Epoch : 97] loss: 0.131\n[Epoch : 98] loss: 0.130\n[Epoch : 99] loss: 0.129\n[Epoch : 100] loss: 0.129\n[Epoch : 101] loss: 0.128\n[Epoch : 102] loss: 0.127\n[Epoch : 103] loss: 0.127\n[Epoch : 104] loss: 0.126\n[Epoch : 105] loss: 0.125\n[Epoch : 106] loss: 0.125\n[Epoch : 107] loss: 0.124\n[Epoch : 108] loss: 0.124\n[Epoch : 109] loss: 0.123\n[Epoch : 110] loss: 0.122\n[Epoch : 111] loss: 0.122\n[Epoch : 112] loss: 0.121\n[Epoch : 113] loss: 0.121\n[Epoch : 114] loss: 0.120\n[Epoch : 115] loss: 0.119\n[Epoch : 116] loss: 0.119\n[Epoch : 117] loss: 0.118\n[Epoch : 118] loss: 0.118\n[Epoch : 119] loss: 0.117\n[Epoch : 120] loss: 0.117\n[Epoch : 121] loss: 0.116\n[Epoch : 122] loss: 0.116\n[Epoch : 123] loss: 0.115\n[Epoch : 124] loss: 0.115\n[Epoch : 125] loss: 0.114\n[Epoch : 126] loss: 0.114\n[Epoch : 127] loss: 0.113\n[Epoch : 128] loss: 0.113\n[Epoch : 129] loss: 0.112\n[Epoch : 130] loss: 0.112\n[Epoch : 131] loss: 0.111\n[Epoch : 132] loss: 0.111\n[Epoch : 133] loss: 0.110\n[Epoch : 134] loss: 0.110\n[Epoch : 135] loss: 0.109\n[Epoch : 136] loss: 0.109\n[Epoch : 137] loss: 0.109\n[Epoch : 138] loss: 0.108\n[Epoch : 139] loss: 0.108\n[Epoch : 140] loss: 0.107\n[Epoch : 141] loss: 0.107\n[Epoch : 142] loss: 0.106\n[Epoch : 143] loss: 0.106\n[Epoch : 144] loss: 0.105\n[Epoch : 145] loss: 0.105\n[Epoch : 146] loss: 0.105\n[Epoch : 147] loss: 0.104\n[Epoch : 148] loss: 0.104\n[Epoch : 149] loss: 0.103\n[Epoch : 150] loss: 0.103\n[Epoch : 151] loss: 0.103\n[Epoch : 152] loss: 0.102\n[Epoch : 153] loss: 0.102\n[Epoch : 154] loss: 0.101\n[Epoch : 155] loss: 0.101\n[Epoch : 156] loss: 0.101\n[Epoch : 157] loss: 0.100\n[Epoch : 158] loss: 0.100\n[Epoch : 159] loss: 0.100\n[Epoch : 160] loss: 0.099\n[Epoch : 161] loss: 0.099\n[Epoch : 162] loss: 0.098\n[Epoch : 163] loss: 0.098\n[Epoch : 164] loss: 0.098\n[Epoch : 165] loss: 0.097\n[Epoch : 166] loss: 0.097\n[Epoch : 167] loss: 0.097\n[Epoch : 168] loss: 0.096\n[Epoch : 169] loss: 0.096\n[Epoch : 170] loss: 0.096\n[Epoch : 171] loss: 0.095\n[Epoch : 172] loss: 0.095\n[Epoch : 173] loss: 0.095\n[Epoch : 174] loss: 0.094\n[Epoch : 175] loss: 0.094\n[Epoch : 176] loss: 0.094\n[Epoch : 177] loss: 0.093\n[Epoch : 178] loss: 0.093\n[Epoch : 179] loss: 0.093\n[Epoch : 180] loss: 0.092\n[Epoch : 181] loss: 0.092\n[Epoch : 182] loss: 0.092\n[Epoch : 183] loss: 0.091\n[Epoch : 184] loss: 0.091\n[Epoch : 185] loss: 0.091\n[Epoch : 186] loss: 0.090\n[Epoch : 187] loss: 0.090\n[Epoch : 188] loss: 0.090\n[Epoch : 189] loss: 0.089\n[Epoch : 190] loss: 0.089\n[Epoch : 191] loss: 0.089\n[Epoch : 192] loss: 0.089\n[Epoch : 193] loss: 0.088\n[Epoch : 194] loss: 0.088\n[Epoch : 195] loss: 0.088\n[Epoch : 196] loss: 0.087\n[Epoch : 197] loss: 0.087\n[Epoch : 198] loss: 0.087\n[Epoch : 199] loss: 0.087\n[Epoch : 200] loss: 0.086\nFinished Training\n" ], [ "plt.plot(tr_loss)", "_____no_output_____" ], [ "correct = 0\ntotal = 0\ncount = 0\nflag = 1\n\nwith torch.no_grad():\n for data in train_loader:\n inputs, labels = data\n inputs = inputs.double()\n inputs, labels = inputs.to(\"cuda\"),labels.to(\"cuda\")\n\n outputs = classify(inputs)\n\n _, predicted = torch.max(outputs.data, 1)\n\n total += labels.size(0)\n correct += (predicted == labels).sum().item()\n\nprint('Accuracy of the network on the %d train images: %f %%' % ( desired_num , 100 * correct / total))\nprint(\"total correct\", correct)\nprint(\"total train set images\", total)", "Accuracy of the network on the 1000 train images: 97.600000 %\ntotal correct 976\ntotal train set images 1000\n" ], [ "correct = 0\ntotal = 0\ncount = 0\nflag = 1\n\nwith torch.no_grad():\n for data in test_loader:\n inputs, labels = data\n inputs = inputs.double()\n inputs, labels = inputs.to(\"cuda\"),labels.to(\"cuda\")\n\n outputs = classify(inputs)\n\n _, predicted = torch.max(outputs.data, 1)\n\n total += labels.size(0)\n correct += (predicted == labels).sum().item()\n\nprint('Accuracy of the network on the %d train images: %f %%' % ( 10000 , 100 * correct / total))\nprint(\"total correct\", correct)\nprint(\"total train set images\", total)", "Accuracy of the network on the 10000 train images: 99.550000 %\ntotal correct 9955\ntotal train set images 10000\n" ], [ "", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb6205209e4158084719bad017c6d7bafe59fbe6
3,225
ipynb
Jupyter Notebook
notes/.ipynb_checkpoints/Untitled0-checkpoint.ipynb
q8groups/soap
9ea60b40795976c4ca2de3a03f242282df6be152
[ "MIT" ]
null
null
null
notes/.ipynb_checkpoints/Untitled0-checkpoint.ipynb
q8groups/soap
9ea60b40795976c4ca2de3a03f242282df6be152
[ "MIT" ]
null
null
null
notes/.ipynb_checkpoints/Untitled0-checkpoint.ipynb
q8groups/soap
9ea60b40795976c4ca2de3a03f242282df6be152
[ "MIT" ]
null
null
null
26.652893
151
0.48124
[ [ [ "empty" ] ] ]
[ "empty" ]
[ [ "empty" ] ]
cb6229a293b4543186392f5595e829f5eb421b59
12,576
ipynb
Jupyter Notebook
example.ipynb
meduz/tikzmagic
e66f24f039e406aeed5cb245ee3ff7dcd27bfe52
[ "MIT" ]
1
2020-12-17T14:20:16.000Z
2020-12-17T14:20:16.000Z
example.ipynb
meduz/tikzmagic
e66f24f039e406aeed5cb245ee3ff7dcd27bfe52
[ "MIT" ]
null
null
null
example.ipynb
meduz/tikzmagic
e66f24f039e406aeed5cb245ee3ff7dcd27bfe52
[ "MIT" ]
1
2021-07-02T07:30:42.000Z
2021-07-02T07:30:42.000Z
96
6,918
0.865458
[ [ [ "import tikzmagic", "_____no_output_____" ], [ "%%tikz\n\\tikzset{every node/.style={font=\\sffamily,white}}\n\n\\node[fill=red] at (0,0) (a) {This};\n\\node[fill=blue] at (2,0) (b) {That};\n\\draw[->] (a) -- (b);", "_____no_output_____" ], [ "%%tikz --scale=0.5 --border=10\n\\tikzset{every node/.style={font=\\sffamily,white}}\n\n\\node[fill=red] at (0,0) (a) {This};\n\\node[fill=blue] at (2,0) (b) {That};\n\\draw[->] (a) -- (b);", "_____no_output_____" ], [ "%%tikz -l 'bending,arrows' -p 'fontspec' -x '\\setmainfont{Arial}'\n\n\\draw[-triangle 45] (1,0) arc(0:355:1);\n\\node {Cycle};", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code" ] ]
cb622bbc8f2f05f34bf76914971ebf82f14b1c17
8,799
ipynb
Jupyter Notebook
completed/08. Working with APIs (Part 1).ipynb
cjwinchester/cfj-2017
db14686b0269303eb1db5942dd30b3a28775fb0b
[ "MIT" ]
4
2017-10-20T02:56:21.000Z
2019-04-10T14:59:31.000Z
completed/08. Working with APIs (Part 1).ipynb
cjwinchester/cfj-2017
db14686b0269303eb1db5942dd30b3a28775fb0b
[ "MIT" ]
5
2020-03-24T15:29:43.000Z
2021-06-01T21:50:07.000Z
completed/08. Working with APIs (Part 1).ipynb
cjwinchester/cfj-2017
db14686b0269303eb1db5942dd30b3a28775fb0b
[ "MIT" ]
2
2020-08-18T19:21:49.000Z
2020-12-15T04:28:34.000Z
36.6625
425
0.622457
[ [ [ "# Let's post a message to Slack\n\nIn this session, we're going to use Python to post a message to Slack. I set up [a team for us](https://ire-cfj-2017.slack.com/) so we can mess around with the [Slack API](https://api.slack.com/).\n\nWe're going to use a simple [_incoming webhook_](https://api.slack.com/incoming-webhooks) to accomplish this.", "_____no_output_____" ], [ "### Hello API\n\nAPI stands for \"Application Programming Interface.\" An API is a way to interact programmatically with a software application.\n\nIf you want to post a message to Slack, you could open a browser and navigate to your URL and sign in with your username and password (or open the app), click on the channel you want, and start typing.\n\nOR ... you could post your Slack message with a Python script.", "_____no_output_____" ], [ "### Hello environmental variables\n\nThe code for this boot camp [is on the public internet](https://github.com/ireapps/cfj-2017). We don't want anyone on the internet to be able to post messages to our Slack channels, so we're going to use an [environmental variable](https://en.wikipedia.org/wiki/Environment_variable) to store our webhook.\n\nThe environmental variable we're going to use -- `IRE_CFJ_2017_SLACK_HOOK` -- should already be stored on your computer.\n\nPython has a standard library module for working with the operating system called [`os`](https://docs.python.org/3/library/os.html). The `os` module has a data attribute called `environ`, a dictionary of environmental variables stored on your computer.\n\n(Here is a new thing: Instead of using brackets to access items in a dictionary, you can use the `get()` method. The advantage to doing it this way: If the item you're trying to get doesn't exist in your dictionary, it'll return `None` instead of throwing an exception, which is sometimes a desired behavior.)", "_____no_output_____" ] ], [ [ "from os import environ\n\nslack_hook = environ.get('IRE_CFJ_2017_SLACK_HOOK', None)", "_____no_output_____" ] ], [ [ "### Hello JSON\n\nSo far we've been working with tabular data -- CSVs with columns and rows. Most modern web APIs prefer to shake hands with a data structure called [JSON](http://www.json.org/) (**J**ava**S**cript **O**bject **N**otation), which is more like a matryoshka doll.\n\n![](https://media.giphy.com/media/Ud5r7tzmG4De0/giphy.gif \"russian nesting dolls\")\n\nPython has a standard library module for working with JSON data called [`json`](https://docs.python.org/3/library/json.html). Let's import it.", "_____no_output_____" ] ], [ [ "import json", "_____no_output_____" ] ], [ [ "### Using `requests` to post data\n\nWe're also going to use the `requests` library again, except this time, instead of using the `get()` method to get something off the web, we're going to use the `post()` method to send data _to_ the web.", "_____no_output_____" ] ], [ [ "import requests", "_____no_output_____" ] ], [ [ "### Formatting the data correctly\n\nThe JSON data we're going to send to the Slack webhook will start its life as a Python dictionary. Then we'll use the `json` module's `dumps()` method to turn it into a string of JSON.", "_____no_output_____" ] ], [ [ "# build a dictionary of payload data\npayload = {\n 'channel': '#general',\n 'username': 'IRE Python Bot',\n 'icon_emoji': ':ire:',\n 'text': 'helllllllo!'\n}\n\n# turn it into a string of JSON\npayload_as_json = json.dumps(payload)", "_____no_output_____" ] ], [ [ "### Send it off to Slack", "_____no_output_____" ] ], [ [ "# check to see if you have the webhook URL\nif slack_hook:\n\n # send it to slack!\n requests.post(slack_hook, data=payload_as_json)\n\nelse:\n\n # if you don't have the webhook env var, print a message to the terminal\n print(\"You don't have the IRE_CFJ_2017_SLACK_HOOK\"\n \" environmental variable\")", "_____no_output_____" ] ], [ [ "### _Exercise_\n\nRead through the [Slack documentation](https://api.slack.com/incoming-webhooks) and post a message to a Slack channel ...\n\n- with a different emoji\n- with an image URL instead of an emoji\n- with a link in it\n- with an attachment\n- with other kinds of fancy formatting", "_____no_output_____" ], [ "### _Extra credit: Slack alert_\n\nScenario: You cover the Fort Calhoun Nuclear Power Station outside of Omaha, Nebraska. Every day, you'd like to check [an NRC website](https://www.nrc.gov/reading-rm/doc-collections/event-status/event/) to see if your plant had any \"Event Notifications\" in the agency's most recent report. You decide to write a Slack script to do this for you. (Ignore, for now, the problem of setting up the script to run daily.)\n\nBreaking down your problem, you need to:\n\n- Fetch [the page with the latest reports](https://www.nrc.gov/reading-rm/doc-collections/event-status/event/en.html) using `requests`\n- Look through the text and see if your reactor's name appears in the page text (you could just use an `if` statement with `in`)\n- If it's there, use `requests` to send a message to Slack\n\nNotice that we don't need to parse the page with BeautifulSoup -- we're basically just checking for the presence of a string inside a bigger string.\n\n### _Extra, extra credit_\n\nLet's extend the script you just wrote with a function that would allow you to check for the presence of _any string_ on an NRc page for _any date_ of reports -- most days have their own page, though I think weekends are grouped together.\n\nLet's break it down. Inside our function, we need to:\n\n- Figure out the URL pattern for each day's report page. [Here's the page for Sept. 29, 2017](https://www.nrc.gov/reading-rm/doc-collections/event-status/event/2017/20170929en.html)\n- Decide how you want to accept the two arguments in your function -- one for the date and one for the string to search for (me, I'd use a date object for the default date argument to keep things explicit, but you could also pass a string)\n- Fill in the URL using `format()` and the date being passed to the function\n- Fetch the page using `requests`\n- Not every day has a page, so you'll need to check to see if the request was successful (hint: use the requests [`status_code` attribute](http://docs.python-requests.org/en/master/user/quickstart/#response-status-codes) -- 200 means success)\n- If the request was successful, check for the presence of the string in the page text\n- If the text we're looking for is there, send a message to Slack", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ] ]
cb622c1bd0ac4f2650340eb345a9e828bc4046a4
60,758
ipynb
Jupyter Notebook
4. Coursera - Convolutional Neural Networks/week1/Convolution model - Step by Step - v2.ipynb
lordzsolt/Data-Science
02241b541108f3b8e6031f260ddde0dd733988d0
[ "MIT" ]
null
null
null
4. Coursera - Convolutional Neural Networks/week1/Convolution model - Step by Step - v2.ipynb
lordzsolt/Data-Science
02241b541108f3b8e6031f260ddde0dd733988d0
[ "MIT" ]
null
null
null
4. Coursera - Convolutional Neural Networks/week1/Convolution model - Step by Step - v2.ipynb
lordzsolt/Data-Science
02241b541108f3b8e6031f260ddde0dd733988d0
[ "MIT" ]
null
null
null
41.360109
5,442
0.562181
[ [ [ "# Convolutional Neural Networks: Step by Step\n\nWelcome to Course 4's first assignment! In this assignment, you will implement convolutional (CONV) and pooling (POOL) layers in numpy, including both forward propagation and (optionally) backward propagation. \n\n**Notation**:\n- Superscript $[l]$ denotes an object of the $l^{th}$ layer. \n - Example: $a^{[4]}$ is the $4^{th}$ layer activation. $W^{[5]}$ and $b^{[5]}$ are the $5^{th}$ layer parameters.\n\n\n- Superscript $(i)$ denotes an object from the $i^{th}$ example. \n - Example: $x^{(i)}$ is the $i^{th}$ training example input.\n \n \n- Lowerscript $i$ denotes the $i^{th}$ entry of a vector.\n - Example: $a^{[l]}_i$ denotes the $i^{th}$ entry of the activations in layer $l$, assuming this is a fully connected (FC) layer.\n \n \n- $n_H$, $n_W$ and $n_C$ denote respectively the height, width and number of channels of a given layer. If you want to reference a specific layer $l$, you can also write $n_H^{[l]}$, $n_W^{[l]}$, $n_C^{[l]}$. \n- $n_{H_{prev}}$, $n_{W_{prev}}$ and $n_{C_{prev}}$ denote respectively the height, width and number of channels of the previous layer. If referencing a specific layer $l$, this could also be denoted $n_H^{[l-1]}$, $n_W^{[l-1]}$, $n_C^{[l-1]}$. \n\nWe assume that you are already familiar with `numpy` and/or have completed the previous courses of the specialization. Let's get started!", "_____no_output_____" ], [ "## 1 - Packages\n\nLet's first import all the packages that you will need during this assignment. \n- [numpy](www.numpy.org) is the fundamental package for scientific computing with Python.\n- [matplotlib](http://matplotlib.org) is a library to plot graphs in Python.\n- np.random.seed(1) is used to keep all the random function calls consistent. It will help us grade your work.", "_____no_output_____" ] ], [ [ "import numpy as np\nimport h5py\nimport matplotlib.pyplot as plt\n\n%matplotlib inline\nplt.rcParams['figure.figsize'] = (5.0, 4.0) # set default size of plots\nplt.rcParams['image.interpolation'] = 'nearest'\nplt.rcParams['image.cmap'] = 'gray'\n\n%load_ext autoreload\n%autoreload 2\n\nnp.random.seed(1)", "_____no_output_____" ] ], [ [ "## 2 - Outline of the Assignment\n\nYou will be implementing the building blocks of a convolutional neural network! Each function you will implement will have detailed instructions that will walk you through the steps needed:\n\n- Convolution functions, including:\n - Zero Padding\n - Convolve window \n - Convolution forward\n - Convolution backward (optional)\n- Pooling functions, including:\n - Pooling forward\n - Create mask \n - Distribute value\n - Pooling backward (optional)\n \nThis notebook will ask you to implement these functions from scratch in `numpy`. In the next notebook, you will use the TensorFlow equivalents of these functions to build the following model:\n\n<img src=\"images/model.png\" style=\"width:800px;height:300px;\">\n\n**Note** that for every forward function, there is its corresponding backward equivalent. Hence, at every step of your forward module you will store some parameters in a cache. These parameters are used to compute gradients during backpropagation. ", "_____no_output_____" ], [ "## 3 - Convolutional Neural Networks\n\nAlthough programming frameworks make convolutions easy to use, they remain one of the hardest concepts to understand in Deep Learning. A convolution layer transforms an input volume into an output volume of different size, as shown below. \n\n<img src=\"images/conv_nn.png\" style=\"width:350px;height:200px;\">\n\nIn this part, you will build every step of the convolution layer. You will first implement two helper functions: one for zero padding and the other for computing the convolution function itself. ", "_____no_output_____" ], [ "### 3.1 - Zero-Padding\n\nZero-padding adds zeros around the border of an image:\n\n<img src=\"images/PAD.png\" style=\"width:600px;height:400px;\">\n<caption><center> <u> <font color='purple'> **Figure 1** </u><font color='purple'> : **Zero-Padding**<br> Image (3 channels, RGB) with a padding of 2. </center></caption>\n\nThe main benefits of padding are the following:\n\n- It allows you to use a CONV layer without necessarily shrinking the height and width of the volumes. This is important for building deeper networks, since otherwise the height/width would shrink as you go to deeper layers. An important special case is the \"same\" convolution, in which the height/width is exactly preserved after one layer. \n\n- It helps us keep more of the information at the border of an image. Without padding, very few values at the next layer would be affected by pixels as the edges of an image.\n\n**Exercise**: Implement the following function, which pads all the images of a batch of examples X with zeros. [Use np.pad](https://docs.scipy.org/doc/numpy/reference/generated/numpy.pad.html). Note if you want to pad the array \"a\" of shape $(5,5,5,5,5)$ with `pad = 1` for the 2nd dimension, `pad = 3` for the 4th dimension and `pad = 0` for the rest, you would do:\n```python\na = np.pad(a, ((0,0), (1,1), (0,0), (3,3), (0,0)), 'constant', constant_values = (..,..))\n```", "_____no_output_____" ] ], [ [ "# GRADED FUNCTION: zero_pad\n\ndef zero_pad(X, pad):\n \"\"\"\n Pad with zeros all images of the dataset X. The padding is applied to the height and width of an image, \n as illustrated in Figure 1.\n \n Argument:\n X -- python numpy array of shape (m, n_H, n_W, n_C) representing a batch of m images\n pad -- integer, amount of padding around each image on vertical and horizontal dimensions\n \n Returns:\n X_pad -- padded image of shape (m, n_H + 2*pad, n_W + 2*pad, n_C)\n \"\"\"\n \n ### START CODE HERE ### (≈ 1 line)\n X_pad = np.pad(X, pad_width=((0, 0), (pad, pad), (pad, pad), (0, 0)), mode='constant')\n ### END CODE HERE ###\n \n return X_pad", "_____no_output_____" ], [ "np.random.seed(1)\nx = np.random.randn(4, 3, 3, 2)\n# print(x)\nx_pad = zero_pad(x, 2)\nprint (\"x.shape =\", x.shape)\nprint (\"x_pad.shape =\", x_pad.shape)\nprint (\"x[1,1] =\", x[1,1])\nprint (\"x_pad[1,1] =\", x_pad[1,1])\n\nfig, axarr = plt.subplots(1, 2)\naxarr[0].set_title('x')\naxarr[0].imshow(x[0,:,:,0])\naxarr[1].set_title('x_pad')\naxarr[1].imshow(x_pad[0,:,:,0]);", "x.shape = (4, 3, 3, 2)\nx_pad.shape = (4, 7, 7, 2)\nx[1,1] = [[ 0.90085595 -0.68372786]\n [-0.12289023 -0.93576943]\n [-0.26788808 0.53035547]]\nx_pad[1,1] = [[0. 0.]\n [0. 0.]\n [0. 0.]\n [0. 0.]\n [0. 0.]\n [0. 0.]\n [0. 0.]]\n" ] ], [ [ "**Expected Output**:\n\n<table>\n <tr>\n <td>\n **x.shape**:\n </td>\n <td>\n (4, 3, 3, 2)\n </td>\n </tr>\n <tr>\n <td>\n **x_pad.shape**:\n </td>\n <td>\n (4, 7, 7, 2)\n </td>\n </tr>\n <tr>\n <td>\n **x[1,1]**:\n </td>\n <td>\n [[ 0.90085595 -0.68372786]\n [-0.12289023 -0.93576943]\n [-0.26788808 0.53035547]]\n </td>\n </tr>\n <tr>\n <td>\n **x_pad[1,1]**:\n </td>\n <td>\n [[ 0. 0.]\n [ 0. 0.]\n [ 0. 0.]\n [ 0. 0.]\n [ 0. 0.]\n [ 0. 0.]\n [ 0. 0.]]\n </td>\n </tr>\n\n</table>", "_____no_output_____" ], [ "### 3.2 - Single step of convolution \n\nIn this part, implement a single step of convolution, in which you apply the filter to a single position of the input. This will be used to build a convolutional unit, which: \n\n- Takes an input volume \n- Applies a filter at every position of the input\n- Outputs another volume (usually of different size)\n\n<img src=\"images/Convolution_schematic.gif\" style=\"width:500px;height:300px;\">\n<caption><center> <u> <font color='purple'> **Figure 2** </u><font color='purple'> : **Convolution operation**<br> with a filter of 2x2 and a stride of 1 (stride = amount you move the window each time you slide) </center></caption>\n\nIn a computer vision application, each value in the matrix on the left corresponds to a single pixel value, and we convolve a 3x3 filter with the image by multiplying its values element-wise with the original matrix, then summing them up and adding a bias. In this first step of the exercise, you will implement a single step of convolution, corresponding to applying a filter to just one of the positions to get a single real-valued output. \n\nLater in this notebook, you'll apply this function to multiple positions of the input to implement the full convolutional operation. \n\n**Exercise**: Implement conv_single_step(). [Hint](https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.sum.html).\n", "_____no_output_____" ] ], [ [ "# GRADED FUNCTION: conv_single_step\n\ndef conv_single_step(a_slice_prev, W, b):\n \"\"\"\n Apply one filter defined by parameters W on a single slice (a_slice_prev) of the output activation \n of the previous layer.\n \n Arguments:\n a_slice_prev -- slice of input data of shape (f, f, n_C_prev)\n W -- Weight parameters contained in a window - matrix of shape (f, f, n_C_prev)\n b -- Bias parameters contained in a window - matrix of shape (1, 1, 1)\n \n Returns:\n Z -- a scalar value, result of convolving the sliding window (W, b) on a slice x of the input data\n \"\"\"\n\n ### START CODE HERE ### (≈ 2 lines of code)\n # Element-wise product between a_slice and W. Do not add the bias yet.\n s = a_slice_prev * W\n # Sum over all entries of the volume s.\n Z = np.sum(s)\n # Add bias b to Z. Cast b to a float() so that Z results in a scalar value.\n Z += np.float(b)\n ### END CODE HERE ###\n\n return Z", "_____no_output_____" ], [ "np.random.seed(1)\na_slice_prev = np.random.randn(4, 4, 3)\nW = np.random.randn(4, 4, 3)\nb = np.random.randn(1, 1, 1)\n\nZ = conv_single_step(a_slice_prev, W, b)\nprint(\"Z =\", Z)", "Z = -6.999089450680222\n" ] ], [ [ "**Expected Output**:\n<table>\n <tr>\n <td>\n **Z**\n </td>\n <td>\n -6.99908945068\n </td>\n </tr>\n\n</table>", "_____no_output_____" ], [ "### 3.3 - Convolutional Neural Networks - Forward pass\n\nIn the forward pass, you will take many filters and convolve them on the input. Each 'convolution' gives you a 2D matrix output. You will then stack these outputs to get a 3D volume: \n\n<center>\n<video width=\"620\" height=\"440\" src=\"images/conv_kiank.mp4\" type=\"video/mp4\" controls>\n</video>\n</center>\n\n**Exercise**: Implement the function below to convolve the filters W on an input activation A_prev. This function takes as input A_prev, the activations output by the previous layer (for a batch of m inputs), F filters/weights denoted by W, and a bias vector denoted by b, where each filter has its own (single) bias. Finally you also have access to the hyperparameters dictionary which contains the stride and the padding. \n\n**Hint**: \n1. To select a 2x2 slice at the upper left corner of a matrix \"a_prev\" (shape (5,5,3)), you would do:\n```python\na_slice_prev = a_prev[0:2,0:2,:]\n```\nThis will be useful when you will define `a_slice_prev` below, using the `start/end` indexes you will define.\n2. To define a_slice you will need to first define its corners `vert_start`, `vert_end`, `horiz_start` and `horiz_end`. This figure may be helpful for you to find how each of the corner can be defined using h, w, f and s in the code below.\n\n<img src=\"images/vert_horiz_kiank.png\" style=\"width:400px;height:300px;\">\n<caption><center> <u> <font color='purple'> **Figure 3** </u><font color='purple'> : **Definition of a slice using vertical and horizontal start/end (with a 2x2 filter)** <br> This figure shows only a single channel. </center></caption>\n\n\n**Reminder**:\nThe formulas relating the output shape of the convolution to the input shape is:\n$$ n_H = \\lfloor \\frac{n_{H_{prev}} - f + 2 \\times pad}{stride} \\rfloor +1 $$\n$$ n_W = \\lfloor \\frac{n_{W_{prev}} - f + 2 \\times pad}{stride} \\rfloor +1 $$\n$$ n_C = \\text{number of filters used in the convolution}$$\n\nFor this exercise, we won't worry about vectorization, and will just implement everything with for-loops.", "_____no_output_____" ] ], [ [ "# GRADED FUNCTION: conv_forward\n\ndef conv_forward(A_prev, W, b, hparameters):\n \"\"\"\n Implements the forward propagation for a convolution function\n \n Arguments:\n A_prev -- output activations of the previous layer, numpy array of shape (m, n_H_prev, n_W_prev, n_C_prev)\n W -- Weights, numpy array of shape (f, f, n_C_prev, n_C)\n b -- Biases, numpy array of shape (1, 1, 1, n_C)\n hparameters -- python dictionary containing \"stride\" and \"pad\"\n \n Returns:\n Z -- conv output, numpy array of shape (m, n_H, n_W, n_C)\n cache -- cache of values needed for the conv_backward() function\n \"\"\"\n \n ### START CODE HERE ###\n # Retrieve dimensions from A_prev's shape (≈1 line) \n (m, n_H_prev, n_W_prev, n_C_prev) = A_prev.shape\n \n # Retrieve dimensions from W's shape (≈1 line)\n (f, f, n_C_prev, n_C) = W.shape\n \n # Retrieve information from \"hparameters\" (≈2 lines)\n stride = hparameters['stride']\n pad = hparameters['pad']\n \n # Compute the dimensions of the CONV output volume using the formula given above. Hint: use int() to floor. (≈2 lines)\n n_H = np.int((n_H_prev - f + 2 * pad) / stride) + 1\n n_W = np.int((n_W_prev - f + 2 * pad) / stride) + 1\n \n # Initialize the output volume Z with zeros. (≈1 line)\n Z = np.zeros((m, n_H, n_W, n_C))\n \n # Create A_prev_pad by padding A_prev\n A_prev_pad = zero_pad(A_prev, pad)\n \n for i in range(m): # loop over the batch of training examples\n a_prev_pad = A_prev_pad[i]\n \n # Select ith training example's padded activation\n for h in range(n_W): # loop over vertical axis of the output volume\n for w in range(n_H): # loop over horizontal axis of the output volume\n for c in range(n_C): # loop over channels (= #filters) of the output volume\n \n # Find the corners of the current \"slice\" (≈4 lines)\n vert_start = h * stride\n vert_end = vert_start + f\n horiz_start = w * stride\n horiz_end = horiz_start + f\n \n # Use the corners to define the (3D) slice of a_prev_pad (See Hint above the cell). (≈1 line)\n a_slice_prev = a_prev_pad[vert_start:vert_end, horiz_start:horiz_end]\n \n # Convolve the (3D) slice with the correct filter W and bias b, to get back one output neuron. (≈1 line) \n Z[i, h, w, c] = conv_single_step(a_slice_prev, W[:, :, :, c], b[:, :, :, c])\n \n ### END CODE HERE ###\n \n # Making sure your output shape is correct\n assert(Z.shape == (m, n_H, n_W, n_C))\n \n # Save information in \"cache\" for the backprop\n cache = (A_prev, W, b, hparameters)\n \n return Z, cache", "_____no_output_____" ], [ "np.random.seed(1)\nA_prev = np.random.randn(10,4,4,3)\nW = np.random.randn(2,2,3,8)\nb = np.random.randn(1,1,1,8)\nhparameters = {\"pad\" : 2,\n \"stride\": 2}\n\nZ, cache_conv = conv_forward(A_prev, W, b, hparameters)\nprint(\"Z's mean =\", np.mean(Z))\nprint(\"Z[3,2,1] =\", Z[3,2,1])\nprint(\"cache_conv[0][1][2][3] =\", cache_conv[0][1][2][3])", "Z's mean = 0.048995203528855794\nZ[3,2,1] = [-0.61490741 -6.7439236 -2.55153897 1.75698377 3.56208902 0.53036437\n 5.18531798 8.75898442]\ncache_conv[0][1][2][3] = [-0.20075807 0.18656139 0.41005165]\n" ] ], [ [ "**Expected Output**:\n\n<table>\n <tr>\n <td>\n **Z's mean**\n </td>\n <td>\n 0.0489952035289\n </td>\n </tr>\n <tr>\n <td>\n **Z[3,2,1]**\n </td>\n <td>\n [-0.61490741 -6.7439236 -2.55153897 1.75698377 3.56208902 0.53036437\n 5.18531798 8.75898442]\n </td>\n </tr>\n <tr>\n <td>\n **cache_conv[0][1][2][3]**\n </td>\n <td>\n [-0.20075807 0.18656139 0.41005165]\n </td>\n </tr>\n\n</table>\n", "_____no_output_____" ], [ "Finally, CONV layer should also contain an activation, in which case we would add the following line of code:\n\n```python\n# Convolve the window to get back one output neuron\nZ[i, h, w, c] = ...\n# Apply activation\nA[i, h, w, c] = activation(Z[i, h, w, c])\n```\n\nYou don't need to do it here. \n", "_____no_output_____" ], [ "## 4 - Pooling layer \n\nThe pooling (POOL) layer reduces the height and width of the input. It helps reduce computation, as well as helps make feature detectors more invariant to its position in the input. The two types of pooling layers are: \n\n- Max-pooling layer: slides an ($f, f$) window over the input and stores the max value of the window in the output.\n\n- Average-pooling layer: slides an ($f, f$) window over the input and stores the average value of the window in the output.\n\n<table>\n<td>\n<img src=\"images/max_pool1.png\" style=\"width:500px;height:300px;\">\n<td>\n\n<td>\n<img src=\"images/a_pool.png\" style=\"width:500px;height:300px;\">\n<td>\n</table>\n\nThese pooling layers have no parameters for backpropagation to train. However, they have hyperparameters such as the window size $f$. This specifies the height and width of the fxf window you would compute a max or average over. \n\n### 4.1 - Forward Pooling\nNow, you are going to implement MAX-POOL and AVG-POOL, in the same function. \n\n**Exercise**: Implement the forward pass of the pooling layer. Follow the hints in the comments below.\n\n**Reminder**:\nAs there's no padding, the formulas binding the output shape of the pooling to the input shape is:\n$$ n_H = \\lfloor \\frac{n_{H_{prev}} - f}{stride} \\rfloor +1 $$\n$$ n_W = \\lfloor \\frac{n_{W_{prev}} - f}{stride} \\rfloor +1 $$\n$$ n_C = n_{C_{prev}}$$", "_____no_output_____" ] ], [ [ "# GRADED FUNCTION: pool_forward\n\ndef pool_forward(A_prev, hparameters, mode = \"max\"):\n \"\"\"\n Implements the forward pass of the pooling layer\n \n Arguments:\n A_prev -- Input data, numpy array of shape (m, n_H_prev, n_W_prev, n_C_prev)\n hparameters -- python dictionary containing \"f\" and \"stride\"\n mode -- the pooling mode you would like to use, defined as a string (\"max\" or \"average\")\n \n Returns:\n A -- output of the pool layer, a numpy array of shape (m, n_H, n_W, n_C)\n cache -- cache used in the backward pass of the pooling layer, contains the input and hparameters \n \"\"\"\n \n # Retrieve dimensions from the input shape\n (m, n_H_prev, n_W_prev, n_C_prev) = A_prev.shape\n \n # Retrieve hyperparameters from \"hparameters\"\n f = hparameters[\"f\"]\n stride = hparameters[\"stride\"]\n \n # Define the dimensions of the output\n n_H = int(1 + (n_H_prev - f) / stride)\n n_W = int(1 + (n_W_prev - f) / stride)\n n_C = n_C_prev\n \n # Initialize output matrix A\n A = np.zeros((m, n_H, n_W, n_C)) \n \n ### START CODE HERE ###\n for i in range(m): # loop over the training examples\n for h in range(n_H): # loop on the vertical axis of the output volume\n for w in range(n_W): # loop on the horizontal axis of the output volume\n for c in range (n_C): # loop over the channels of the output volume\n \n # Find the corners of the current \"slice\" (≈4 lines)\n vert_start = h * stride\n vert_end = vert_start + f\n horiz_start = w * stride\n horiz_end = horiz_start + f\n \n # Use the corners to define the current slice on the ith training example of A_prev, channel c. (≈1 line)\n a_prev_slice = A_prev[i, vert_start:vert_end, horiz_start:horiz_end, c]\n \n # Compute the pooling operation on the slice. Use an if statment to differentiate the modes. Use np.max/np.mean.\n if mode == \"max\":\n A[i, h, w, c] = np.max(a_prev_slice)\n elif mode == \"average\":\n A[i, h, w, c] = np.average(a_prev_slice)\n \n ### END CODE HERE ###\n \n # Store the input and hparameters in \"cache\" for pool_backward()\n cache = (A_prev, hparameters)\n \n # Making sure your output shape is correct\n assert(A.shape == (m, n_H, n_W, n_C))\n \n return A, cache", "_____no_output_____" ], [ "np.random.seed(1)\nA_prev = np.random.randn(2, 4, 4, 3)\nhparameters = {\"stride\" : 2, \"f\": 3}\n\nA, cache = pool_forward(A_prev, hparameters)\nprint(\"mode = max\")\nprint(\"A =\", A)\nprint()\nA, cache = pool_forward(A_prev, hparameters, mode = \"average\")\nprint(\"mode = average\")\nprint(\"A =\", A)", "mode = max\nA = [[[[1.74481176 0.86540763 1.13376944]]]\n\n\n [[[1.13162939 1.51981682 2.18557541]]]]\n\nmode = average\nA = [[[[ 0.02105773 -0.20328806 -0.40389855]]]\n\n\n [[[-0.22154621 0.51716526 0.48155844]]]]\n" ] ], [ [ "**Expected Output:**\n<table>\n\n <tr>\n <td>\n A =\n </td>\n <td>\n [[[[ 1.74481176 0.86540763 1.13376944]]]\n\n\n [[[ 1.13162939 1.51981682 2.18557541]]]]\n\n </td>\n </tr>\n <tr>\n <td>\n A =\n </td>\n <td>\n [[[[ 0.02105773 -0.20328806 -0.40389855]]]\n\n\n [[[-0.22154621 0.51716526 0.48155844]]]]\n\n </td>\n </tr>\n\n</table>\n", "_____no_output_____" ], [ "Congratulations! You have now implemented the forward passes of all the layers of a convolutional network. \n\nThe remainer of this notebook is optional, and will not be graded.\n", "_____no_output_____" ], [ "## 5 - Backpropagation in convolutional neural networks (OPTIONAL / UNGRADED)\n\nIn modern deep learning frameworks, you only have to implement the forward pass, and the framework takes care of the backward pass, so most deep learning engineers don't need to bother with the details of the backward pass. The backward pass for convolutional networks is complicated. If you wish however, you can work through this optional portion of the notebook to get a sense of what backprop in a convolutional network looks like. \n\nWhen in an earlier course you implemented a simple (fully connected) neural network, you used backpropagation to compute the derivatives with respect to the cost to update the parameters. Similarly, in convolutional neural networks you can to calculate the derivatives with respect to the cost in order to update the parameters. The backprop equations are not trivial and we did not derive them in lecture, but we briefly presented them below.\n\n### 5.1 - Convolutional layer backward pass \n\nLet's start by implementing the backward pass for a CONV layer. \n\n#### 5.1.1 - Computing dA:\nThis is the formula for computing $dA$ with respect to the cost for a certain filter $W_c$ and a given training example:\n\n$$ dA += \\sum _{h=0} ^{n_H} \\sum_{w=0} ^{n_W} W_c \\times dZ_{hw} \\tag{1}$$\n\nWhere $W_c$ is a filter and $dZ_{hw}$ is a scalar corresponding to the gradient of the cost with respect to the output of the conv layer Z at the hth row and wth column (corresponding to the dot product taken at the ith stride left and jth stride down). Note that at each time, we multiply the the same filter $W_c$ by a different dZ when updating dA. We do so mainly because when computing the forward propagation, each filter is dotted and summed by a different a_slice. Therefore when computing the backprop for dA, we are just adding the gradients of all the a_slices. \n\nIn code, inside the appropriate for-loops, this formula translates into:\n```python\nda_prev_pad[vert_start:vert_end, horiz_start:horiz_end, :] += W[:,:,:,c] * dZ[i, h, w, c]\n```\n\n#### 5.1.2 - Computing dW:\nThis is the formula for computing $dW_c$ ($dW_c$ is the derivative of one filter) with respect to the loss:\n\n$$ dW_c += \\sum _{h=0} ^{n_H} \\sum_{w=0} ^ {n_W} a_{slice} \\times dZ_{hw} \\tag{2}$$\n\nWhere $a_{slice}$ corresponds to the slice which was used to generate the acitivation $Z_{ij}$. Hence, this ends up giving us the gradient for $W$ with respect to that slice. Since it is the same $W$, we will just add up all such gradients to get $dW$. \n\nIn code, inside the appropriate for-loops, this formula translates into:\n```python\ndW[:,:,:,c] += a_slice * dZ[i, h, w, c]\n```\n\n#### 5.1.3 - Computing db:\n\nThis is the formula for computing $db$ with respect to the cost for a certain filter $W_c$:\n\n$$ db = \\sum_h \\sum_w dZ_{hw} \\tag{3}$$\n\nAs you have previously seen in basic neural networks, db is computed by summing $dZ$. In this case, you are just summing over all the gradients of the conv output (Z) with respect to the cost. \n\nIn code, inside the appropriate for-loops, this formula translates into:\n```python\ndb[:,:,:,c] += dZ[i, h, w, c]\n```\n\n**Exercise**: Implement the `conv_backward` function below. You should sum over all the training examples, filters, heights, and widths. You should then compute the derivatives using formulas 1, 2 and 3 above. ", "_____no_output_____" ] ], [ [ "def conv_backward(dZ, cache):\n \"\"\"\n Implement the backward propagation for a convolution function\n \n Arguments:\n dZ -- gradient of the cost with respect to the output of the conv layer (Z), numpy array of shape (m, n_H, n_W, n_C)\n cache -- cache of values needed for the conv_backward(), output of conv_forward()\n \n Returns:\n dA_prev -- gradient of the cost with respect to the input of the conv layer (A_prev),\n numpy array of shape (m, n_H_prev, n_W_prev, n_C_prev)\n dW -- gradient of the cost with respect to the weights of the conv layer (W)\n numpy array of shape (f, f, n_C_prev, n_C)\n db -- gradient of the cost with respect to the biases of the conv layer (b)\n numpy array of shape (1, 1, 1, n_C)\n \"\"\"\n \n ### START CODE HERE ###\n # Retrieve information from \"cache\"\n (A_prev, W, b, hparameters) = cache\n \n # Retrieve dimensions from A_prev's shape\n (m, n_H_prev, n_W_prev, n_C_prev) = A_prev.shape\n \n # Retrieve dimensions from W's shape\n (f, f, n_C_prev, n_C) = W.shape\n \n # Retrieve information from \"hparameters\"\n stride = hparameters['stride']\n pad = hparameters['pad']\n \n # Retrieve dimensions from dZ's shape\n (m, n_H, n_W, n_C) = dZ.shape\n \n # Initialize dA_prev, dW, db with the correct shapes\n dA_prev = np.zeros(A_prev.shape) \n dW = np.zeros(W.shape)\n db = np.zeros((1, 1, 1, n_C))\n\n # Pad A_prev and dA_prev\n A_prev_pad = zero_pad(A_prev, pad)\n dA_prev_pad = zero_pad(dA_prev, pad)\n \n for i in range(m): # loop over the training examples\n \n # select ith training example from A_prev_pad and dA_prev_pad\n a_prev_pad = A_prev_pad[i]\n da_prev_pad = dA_prev_pad[i]\n \n for h in range(n_H): # loop over vertical axis of the output volume\n for w in range(n_W): # loop over horizontal axis of the output volume\n for c in range(n_C): # loop over the channels of the output volume\n \n # Find the corners of the current \"slice\"\n vert_start = h * stride\n vert_end = vert_start + f\n horiz_start = w * stride\n horiz_end = horiz_start + f\n \n # Use the corners to define the slice from a_prev_pad\n a_slice = a_prev_pad[vert_start:vert_end, horiz_start:horiz_end]\n\n # Update gradients for the window and the filter's parameters using the code formulas given above\n da_prev_pad[vert_start:vert_end, horiz_start:horiz_end, :] += W[:, :, :, c] * dZ[i, h, w, c]\n dW[:, :, :, c] += a_slice * dZ[i, h, w, c]\n db[:, :, :, c] += dZ[i, h, w, c]\n \n # Set the ith training example's dA_prev to the unpaded da_prev_pad (Hint: use X[pad:-pad, pad:-pad, :])\n dA_prev[i, :, :, :] = da_prev_pad[pad:-pad, pad:-pad]\n ### END CODE HERE ###\n \n # Making sure your output shape is correct\n assert(dA_prev.shape == (m, n_H_prev, n_W_prev, n_C_prev))\n \n return dA_prev, dW, db", "_____no_output_____" ], [ "np.random.seed(1)\ndA, dW, db = conv_backward(Z, cache_conv)\nprint(\"dA_mean =\", np.mean(dA))\nprint(\"dW_mean =\", np.mean(dW))\nprint(\"db_mean =\", np.mean(db))", "dA_mean = 1.4524377775388075\ndW_mean = 1.7269914583139094\ndb_mean = 7.839232564616829\n" ] ], [ [ "** Expected Output: **\n<table>\n <tr>\n <td>\n **dA_mean**\n </td>\n <td>\n 1.45243777754\n </td>\n </tr>\n <tr>\n <td>\n **dW_mean**\n </td>\n <td>\n 1.72699145831\n </td>\n </tr>\n <tr>\n <td>\n **db_mean**\n </td>\n <td>\n 7.83923256462\n </td>\n </tr>\n\n</table>\n", "_____no_output_____" ], [ "## 5.2 Pooling layer - backward pass\n\nNext, let's implement the backward pass for the pooling layer, starting with the MAX-POOL layer. Even though a pooling layer has no parameters for backprop to update, you still need to backpropagation the gradient through the pooling layer in order to compute gradients for layers that came before the pooling layer. \n\n### 5.2.1 Max pooling - backward pass \n\nBefore jumping into the backpropagation of the pooling layer, you are going to build a helper function called `create_mask_from_window()` which does the following: \n\n$$ X = \\begin{bmatrix}\n1 && 3 \\\\\n4 && 2\n\\end{bmatrix} \\quad \\rightarrow \\quad M =\\begin{bmatrix}\n0 && 0 \\\\\n1 && 0\n\\end{bmatrix}\\tag{4}$$\n\nAs you can see, this function creates a \"mask\" matrix which keeps track of where the maximum of the matrix is. True (1) indicates the position of the maximum in X, the other entries are False (0). You'll see later that the backward pass for average pooling will be similar to this but using a different mask. \n\n**Exercise**: Implement `create_mask_from_window()`. This function will be helpful for pooling backward. \nHints:\n- [np.max()]() may be helpful. It computes the maximum of an array.\n- If you have a matrix X and a scalar x: `A = (X == x)` will return a matrix A of the same size as X such that:\n```\nA[i,j] = True if X[i,j] = x\nA[i,j] = False if X[i,j] != x\n```\n- Here, you don't need to consider cases where there are several maxima in a matrix.", "_____no_output_____" ] ], [ [ "def create_mask_from_window(x):\n \"\"\"\n Creates a mask from an input matrix x, to identify the max entry of x.\n \n Arguments:\n x -- Array of shape (f, f)\n \n Returns:\n mask -- Array of the same shape as window, contains a True at the position corresponding to the max entry of x.\n \"\"\"\n \n ### START CODE HERE ### (≈1 line)\n mask = x == np.max(x)\n ### END CODE HERE ###\n \n return mask", "_____no_output_____" ], [ "np.random.seed(1)\nx = np.random.randn(2,3)\nmask = create_mask_from_window(x)\nprint('x = ', x)\nprint(\"mask = \", mask)", "x = [[ 1.62434536 -0.61175641 -0.52817175]\n [-1.07296862 0.86540763 -2.3015387 ]]\nmask = [[ True False False]\n [False False False]]\n" ] ], [ [ "**Expected Output:** \n\n<table> \n<tr> \n<td>\n\n**x =**\n</td>\n\n<td>\n\n[[ 1.62434536 -0.61175641 -0.52817175] <br>\n [-1.07296862 0.86540763 -2.3015387 ]]\n\n </td>\n</tr>\n\n<tr> \n<td>\n**mask =**\n</td>\n<td>\n[[ True False False] <br>\n [False False False]]\n</td>\n</tr>\n\n\n</table>", "_____no_output_____" ], [ "Why do we keep track of the position of the max? It's because this is the input value that ultimately influenced the output, and therefore the cost. Backprop is computing gradients with respect to the cost, so anything that influences the ultimate cost should have a non-zero gradient. So, backprop will \"propagate\" the gradient back to this particular input value that had influenced the cost. ", "_____no_output_____" ], [ "### 5.2.2 - Average pooling - backward pass \n\nIn max pooling, for each input window, all the \"influence\" on the output came from a single input value--the max. In average pooling, every element of the input window has equal influence on the output. So to implement backprop, you will now implement a helper function that reflects this.\n\nFor example if we did average pooling in the forward pass using a 2x2 filter, then the mask you'll use for the backward pass will look like: \n$$ dZ = 1 \\quad \\rightarrow \\quad dZ =\\begin{bmatrix}\n1/4 && 1/4 \\\\\n1/4 && 1/4\n\\end{bmatrix}\\tag{5}$$\n\nThis implies that each position in the $dZ$ matrix contributes equally to output because in the forward pass, we took an average. \n\n**Exercise**: Implement the function below to equally distribute a value dz through a matrix of dimension shape. [Hint](https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.ones.html)", "_____no_output_____" ] ], [ [ "def distribute_value(dz, shape):\n \"\"\"\n Distributes the input value in the matrix of dimension shape\n \n Arguments:\n dz -- input scalar\n shape -- the shape (n_H, n_W) of the output matrix for which we want to distribute the value of dz\n \n Returns:\n a -- Array of size (n_H, n_W) for which we distributed the value of dz\n \"\"\"\n \n ### START CODE HERE ###\n # Retrieve dimensions from shape (≈1 line)\n (n_H, n_W) = shape\n \n # Compute the value to distribute on the matrix (≈1 line)\n average = dz / (n_H * n_W)\n \n # Create a matrix where every entry is the \"average\" value (≈1 line)\n a = np.ones(shape) * average\n ### END CODE HERE ###\n \n return a", "_____no_output_____" ], [ "a = distribute_value(2, (2,2))\nprint('distributed value =', a)", "distributed value = [[0.5 0.5]\n [0.5 0.5]]\n" ] ], [ [ "**Expected Output**: \n\n<table> \n<tr> \n<td>\ndistributed_value =\n</td>\n<td>\n[[ 0.5 0.5]\n<br\\> \n[ 0.5 0.5]]\n</td>\n</tr>\n</table>", "_____no_output_____" ], [ "### 5.2.3 Putting it together: Pooling backward \n\nYou now have everything you need to compute backward propagation on a pooling layer.\n\n**Exercise**: Implement the `pool_backward` function in both modes (`\"max\"` and `\"average\"`). You will once again use 4 for-loops (iterating over training examples, height, width, and channels). You should use an `if/elif` statement to see if the mode is equal to `'max'` or `'average'`. If it is equal to 'average' you should use the `distribute_value()` function you implemented above to create a matrix of the same shape as `a_slice`. Otherwise, the mode is equal to '`max`', and you will create a mask with `create_mask_from_window()` and multiply it by the corresponding value of dZ.", "_____no_output_____" ] ], [ [ "def pool_backward(dA, cache, mode = \"max\"):\n \"\"\"\n Implements the backward pass of the pooling layer\n \n Arguments:\n dA -- gradient of cost with respect to the output of the pooling layer, same shape as A\n cache -- cache output from the forward pass of the pooling layer, contains the layer's input and hparameters \n mode -- the pooling mode you would like to use, defined as a string (\"max\" or \"average\")\n \n Returns:\n dA_prev -- gradient of cost with respect to the input of the pooling layer, same shape as A_prev\n \"\"\"\n \n ### START CODE HERE ###\n \n # Retrieve information from cache (≈1 line)\n (A_prev, hparameters) = cache\n \n # Retrieve hyperparameters from \"hparameters\" (≈2 lines)\n stride = hparameters['stride']\n f = hparameters['f']\n \n # Retrieve dimensions from A_prev's shape and dA's shape (≈2 lines)\n m, n_H_prev, n_W_prev, n_C_prev = A_prev.shape\n m, n_H, n_W, n_C = dA.shape\n \n # Initialize dA_prev with zeros (≈1 line)\n dA_prev = np.zeros(A_prev.shape)\n \n for i in range(m): # loop over the training examples\n \n # select training example from A_prev (≈1 line)\n a_prev = A_prev[i]\n \n for h in range(n_H): # loop on the vertical axis\n for w in range(n_W): # loop on the horizontal axis\n for c in range(n_C): # loop over the channels (depth)\n \n # Find the corners of the current \"slice\" (≈4 lines)\n vert_start = h * stride\n vert_end = vert_start + f\n horiz_start = w * stride\n horiz_end = horiz_start + f\n \n # Compute the backward propagation in both modes.\n if mode == \"max\":\n \n # Use the corners and \"c\" to define the current slice from a_prev (≈1 line)\n a_prev_slice = a_prev[vert_start:vert_end, horiz_start:horiz_end, c]\n # Create the mask from a_prev_slice (≈1 line)\n mask = create_mask_from_window(a_prev_slice)\n # Set dA_prev to be dA_prev + (the mask multiplied by the correct entry of dA) (≈1 line)\n dA_prev[i, vert_start: vert_end, horiz_start: horiz_end, c] += mask * dA[i, h, w, c]\n \n elif mode == \"average\":\n \n # Get the value a from dA (≈1 line)\n da = dA[i, h, w, c]\n # Define the shape of the filter as fxf (≈1 line)\n shape = (f, f)\n # Distribute it to get the correct slice of dA_prev. i.e. Add the distributed value of da. (≈1 line)\n dA_prev[i, vert_start: vert_end, horiz_start: horiz_end, c] += distribute_value(da, shape)\n \n ### END CODE ###\n \n # Making sure your output shape is correct\n assert(dA_prev.shape == A_prev.shape)\n \n return dA_prev", "_____no_output_____" ], [ "np.random.seed(1)\nA_prev = np.random.randn(5, 5, 3, 2)\nhparameters = {\"stride\" : 1, \"f\": 2}\nA, cache = pool_forward(A_prev, hparameters)\ndA = np.random.randn(5, 4, 2, 2)\n\ndA_prev = pool_backward(dA, cache, mode = \"max\")\nprint(\"mode = max\")\nprint('mean of dA = ', np.mean(dA))\nprint('dA_prev[1,1] = ', dA_prev[1,1]) \nprint()\ndA_prev = pool_backward(dA, cache, mode = \"average\")\nprint(\"mode = average\")\nprint('mean of dA = ', np.mean(dA))\nprint('dA_prev[1,1] = ', dA_prev[1,1]) ", "mode = max\nmean of dA = 0.14571390272918053\ndA_prev[1,1] = [[ 0. 0. ]\n [ 5.05844394 -1.68282702]\n [ 0. 0. ]]\n\nmode = average\nmean of dA = 0.14571390272918053\ndA_prev[1,1] = [[ 0.08485462 0.2787552 ]\n [ 1.26461098 -0.25749373]\n [ 1.17975636 -0.53624893]]\n" ] ], [ [ "**Expected Output**: \n\nmode = max:\n<table> \n<tr> \n<td>\n\n**mean of dA =**\n</td>\n\n<td>\n\n0.145713902729\n\n </td>\n</tr>\n\n<tr> \n<td>\n**dA_prev[1,1] =** \n</td>\n<td>\n[[ 0. 0. ] <br>\n [ 5.05844394 -1.68282702] <br>\n [ 0. 0. ]]\n</td>\n</tr>\n</table>\n\nmode = average\n<table> \n<tr> \n<td>\n\n**mean of dA =**\n</td>\n\n<td>\n\n0.145713902729\n\n </td>\n</tr>\n\n<tr> \n<td>\n**dA_prev[1,1] =** \n</td>\n<td>\n[[ 0.08485462 0.2787552 ] <br>\n [ 1.26461098 -0.25749373] <br>\n [ 1.17975636 -0.53624893]]\n</td>\n</tr>\n</table>", "_____no_output_____" ], [ "### Congratulations !\n\nCongratulation on completing this assignment. You now understand how convolutional neural networks work. You have implemented all the building blocks of a neural network. In the next assignment you will implement a ConvNet using TensorFlow.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ] ]
cb622e852e6d1d8f0244c62d8c281a5b4f5c5792
39,952
ipynb
Jupyter Notebook
[6w] [jsy07] [capstone_3w]optimization(D-9_D-0).ipynb
GaYounKoh/stock_data
11a9b11f1511de2dedebf3c1652e2ff73a414f96
[ "MIT" ]
null
null
null
[6w] [jsy07] [capstone_3w]optimization(D-9_D-0).ipynb
GaYounKoh/stock_data
11a9b11f1511de2dedebf3c1652e2ff73a414f96
[ "MIT" ]
null
null
null
[6w] [jsy07] [capstone_3w]optimization(D-9_D-0).ipynb
GaYounKoh/stock_data
11a9b11f1511de2dedebf3c1652e2ff73a414f96
[ "MIT" ]
null
null
null
110.977778
12,100
0.735658
[ [ [ "c1=open(\"stock_2018.csv\",\"r\")\nlines=c1.readlines()", "_____no_output_____" ] ], [ [ "### 시험출력", "_____no_output_____" ] ], [ [ "l1=[1,2,3,4,5,6,7,8,9,10]\nprint(l1[4:6])\nfor line in lines[1:3]:\n print(line)", "[5, 6]\n243,050120,2018-01-02,10250,12050,10150,11800,26086769,307823874200,0.145631,-0.00423729,10108,9308.5,8621.17,8720.42,88.8317,-13027931.942863163,300377268,-0.0063048,18544682818.902584,3787360.0,6437619.607910971,423.884,10797.5,1073.04,11611.7,7005.26,10379.8,8274.83,9327.33,13150,8020,10610,11.6865,9820,9964.87,10402.2,585.305,43.1332,0.824498,1.08595,0.66153,31.6801,139.587,-1158.5,181.035,10597.5,12990.0,99.3894,67.2394,0.617452,22.3595,45.645,73.6842,-26.3158,1259.47,9897.76,44.7853,6.24021,42.056\n\n244,050120,2018-01-03,11950,12450,10900,11750,20460474,240410569500,-0.00423729,0.0723404,10538,9448.5,8675.83,8743.33,89.5348,-11047886.07189542,279916794,-0.000901337,15749296173.345072,4355959.690865422,3712347.1053974,422.088,10849.3,1107.11,11976.9,6920.06,10572.5,8362.5,9467.5,13150,8020,10610,11.5532,10120,10239.5,10676.9,684.352,44.7943,0.811475,1.12536,0.748867,31.8566,139.281,-1218.5,214.541,10715,12912.4,99.6947,66.769,0.606881,24.6104,44.5857,72.7096,-27.2904,1435.35,10067.0,44.1718,7.16213,36.4241\n\n" ] ], [ [ "## MIN_MAX SCALING", "_____no_output_____" ] ], [ [ "# min_max 스케일링 전에 전체 기간 범위에 대한 min값과 max값 추출\nmin_list=[]\nmax_list=[]\nfor line in lines[1:]:\n l_list=line.split(',')\n min_list.append(int(l_list[5])) # Low 행\n max_list.append(int(l_list[4])) # High 행\nprint(max(max_list))\nall_max=max(max_list)\nprint(min(min_list))\nall_min=min(min_list)", "1709000\n143\n" ], [ "# min_max 스케일링\n# x'= ( x - min(x) ) / ( max(x) - min(x) )\nnew_list=[]\nnew_list.append(lines[0])\nfor line in lines[1:]:\n l_list=line.split(',') #1번째 데이터일 경우 여기서 리스트로 만들고\n mini_list=[]\n for l_num in range(len(l_list)): #0,1,2,8번째 열? 제외하고 스케일링 #리스트 개수를 범위로 해서 스케일링\n if (l_num!=0) & (l_num!=1) & (l_num!=2) & (l_num!=8):\n mini_list.append(str((float(l_list[l_num])- all_min)/(all_max-all_min)))\n else:\n mini_list.append(l_list[l_num])\n new_row=\",\".join(mini_list)\n new_list.append(new_row)\n", "_____no_output_____" ] ], [ [ "## 100억이상인 데이터 필터링", "_____no_output_____" ], [ "### 데이터의 컬럼리스트 만들어내기", "_____no_output_____" ] ], [ [ "Code_list=new_list[0].split(',') #데이터의 Code리스트\nnew_code=[]\nnew_code.append(Code_list[1])\nnew_code.append(Code_list[2])\n\nfor i in range(9, -1, -1):\n for num1 in Code_list[3:]:\n if num1==\"Next_Change\":\n continue\n else:\n new_code.append('D-'+str(i)+'_'+num1)\nnew_code.append(\"Next_Change\")\nnew_code_s=\",\".join(new_code)", "_____no_output_____" ] ], [ [ "### 컬럼명 확인", "_____no_output_____" ] ], [ [ "print(new_code)\nprint(len(new_code_s.split(',')))", "['Code', 'Date', 'D-9_Open', 'D-9_High', 'D-9_Low', 'D-9_Close', 'D-9_Volume', 'D-9_Trading_Value', 'D-9_Change', 'D-9_MA5', 'D-9_MA20', 'D-9_MA60', 'D-9_MA120', 'D-9_MFI', 'D-9_ADI', 'D-9_OBV', 'D-9_CMF', 'D-9_FI', 'D-9_EOM_EMV', 'D-9_VPT', 'D-9_NVI', 'D-9_VMAP', 'D-9_ATR', 'D-9_BHB', 'D-9_BLB', 'D-9_KCH', 'D-9_KCL', 'D-9_KCM', 'D-9_DCH', 'D-9_DCL', 'D-9_DCM', 'D-9_UI', 'D-9_SMA', 'D-9_EMA', 'D-9_WMA', 'D-9_MACD', 'D-9_ADX', 'D-9_-VI', 'D-9_+VI', 'D-9_TRIX', 'D-9_MI', 'D-9_CCI', 'D-9_DPO', 'D-9_KST', 'D-9_Ichimoku', 'D-9_Parabolic_SAR', 'D-9_STC', 'D-9_RSI', 'D-9_SRSI', 'D-9_TSI', 'D-9_UO', 'D-9_SR', 'D-9_WR', 'D-9_AO', 'D-9_KAMA', 'D-9_ROC', 'D-9_PPO', 'D-9_PVO\\n', 'D-8_Open', 'D-8_High', 'D-8_Low', 'D-8_Close', 'D-8_Volume', 'D-8_Trading_Value', 'D-8_Change', 'D-8_MA5', 'D-8_MA20', 'D-8_MA60', 'D-8_MA120', 'D-8_MFI', 'D-8_ADI', 'D-8_OBV', 'D-8_CMF', 'D-8_FI', 'D-8_EOM_EMV', 'D-8_VPT', 'D-8_NVI', 'D-8_VMAP', 'D-8_ATR', 'D-8_BHB', 'D-8_BLB', 'D-8_KCH', 'D-8_KCL', 'D-8_KCM', 'D-8_DCH', 'D-8_DCL', 'D-8_DCM', 'D-8_UI', 'D-8_SMA', 'D-8_EMA', 'D-8_WMA', 'D-8_MACD', 'D-8_ADX', 'D-8_-VI', 'D-8_+VI', 'D-8_TRIX', 'D-8_MI', 'D-8_CCI', 'D-8_DPO', 'D-8_KST', 'D-8_Ichimoku', 'D-8_Parabolic_SAR', 'D-8_STC', 'D-8_RSI', 'D-8_SRSI', 'D-8_TSI', 'D-8_UO', 'D-8_SR', 'D-8_WR', 'D-8_AO', 'D-8_KAMA', 'D-8_ROC', 'D-8_PPO', 'D-8_PVO\\n', 'D-7_Open', 'D-7_High', 'D-7_Low', 'D-7_Close', 'D-7_Volume', 'D-7_Trading_Value', 'D-7_Change', 'D-7_MA5', 'D-7_MA20', 'D-7_MA60', 'D-7_MA120', 'D-7_MFI', 'D-7_ADI', 'D-7_OBV', 'D-7_CMF', 'D-7_FI', 'D-7_EOM_EMV', 'D-7_VPT', 'D-7_NVI', 'D-7_VMAP', 'D-7_ATR', 'D-7_BHB', 'D-7_BLB', 'D-7_KCH', 'D-7_KCL', 'D-7_KCM', 'D-7_DCH', 'D-7_DCL', 'D-7_DCM', 'D-7_UI', 'D-7_SMA', 'D-7_EMA', 'D-7_WMA', 'D-7_MACD', 'D-7_ADX', 'D-7_-VI', 'D-7_+VI', 'D-7_TRIX', 'D-7_MI', 'D-7_CCI', 'D-7_DPO', 'D-7_KST', 'D-7_Ichimoku', 'D-7_Parabolic_SAR', 'D-7_STC', 'D-7_RSI', 'D-7_SRSI', 'D-7_TSI', 'D-7_UO', 'D-7_SR', 'D-7_WR', 'D-7_AO', 'D-7_KAMA', 'D-7_ROC', 'D-7_PPO', 'D-7_PVO\\n', 'D-6_Open', 'D-6_High', 'D-6_Low', 'D-6_Close', 'D-6_Volume', 'D-6_Trading_Value', 'D-6_Change', 'D-6_MA5', 'D-6_MA20', 'D-6_MA60', 'D-6_MA120', 'D-6_MFI', 'D-6_ADI', 'D-6_OBV', 'D-6_CMF', 'D-6_FI', 'D-6_EOM_EMV', 'D-6_VPT', 'D-6_NVI', 'D-6_VMAP', 'D-6_ATR', 'D-6_BHB', 'D-6_BLB', 'D-6_KCH', 'D-6_KCL', 'D-6_KCM', 'D-6_DCH', 'D-6_DCL', 'D-6_DCM', 'D-6_UI', 'D-6_SMA', 'D-6_EMA', 'D-6_WMA', 'D-6_MACD', 'D-6_ADX', 'D-6_-VI', 'D-6_+VI', 'D-6_TRIX', 'D-6_MI', 'D-6_CCI', 'D-6_DPO', 'D-6_KST', 'D-6_Ichimoku', 'D-6_Parabolic_SAR', 'D-6_STC', 'D-6_RSI', 'D-6_SRSI', 'D-6_TSI', 'D-6_UO', 'D-6_SR', 'D-6_WR', 'D-6_AO', 'D-6_KAMA', 'D-6_ROC', 'D-6_PPO', 'D-6_PVO\\n', 'D-5_Open', 'D-5_High', 'D-5_Low', 'D-5_Close', 'D-5_Volume', 'D-5_Trading_Value', 'D-5_Change', 'D-5_MA5', 'D-5_MA20', 'D-5_MA60', 'D-5_MA120', 'D-5_MFI', 'D-5_ADI', 'D-5_OBV', 'D-5_CMF', 'D-5_FI', 'D-5_EOM_EMV', 'D-5_VPT', 'D-5_NVI', 'D-5_VMAP', 'D-5_ATR', 'D-5_BHB', 'D-5_BLB', 'D-5_KCH', 'D-5_KCL', 'D-5_KCM', 'D-5_DCH', 'D-5_DCL', 'D-5_DCM', 'D-5_UI', 'D-5_SMA', 'D-5_EMA', 'D-5_WMA', 'D-5_MACD', 'D-5_ADX', 'D-5_-VI', 'D-5_+VI', 'D-5_TRIX', 'D-5_MI', 'D-5_CCI', 'D-5_DPO', 'D-5_KST', 'D-5_Ichimoku', 'D-5_Parabolic_SAR', 'D-5_STC', 'D-5_RSI', 'D-5_SRSI', 'D-5_TSI', 'D-5_UO', 'D-5_SR', 'D-5_WR', 'D-5_AO', 'D-5_KAMA', 'D-5_ROC', 'D-5_PPO', 'D-5_PVO\\n', 'D-4_Open', 'D-4_High', 'D-4_Low', 'D-4_Close', 'D-4_Volume', 'D-4_Trading_Value', 'D-4_Change', 'D-4_MA5', 'D-4_MA20', 'D-4_MA60', 'D-4_MA120', 'D-4_MFI', 'D-4_ADI', 'D-4_OBV', 'D-4_CMF', 'D-4_FI', 'D-4_EOM_EMV', 'D-4_VPT', 'D-4_NVI', 'D-4_VMAP', 'D-4_ATR', 'D-4_BHB', 'D-4_BLB', 'D-4_KCH', 'D-4_KCL', 'D-4_KCM', 'D-4_DCH', 'D-4_DCL', 'D-4_DCM', 'D-4_UI', 'D-4_SMA', 'D-4_EMA', 'D-4_WMA', 'D-4_MACD', 'D-4_ADX', 'D-4_-VI', 'D-4_+VI', 'D-4_TRIX', 'D-4_MI', 'D-4_CCI', 'D-4_DPO', 'D-4_KST', 'D-4_Ichimoku', 'D-4_Parabolic_SAR', 'D-4_STC', 'D-4_RSI', 'D-4_SRSI', 'D-4_TSI', 'D-4_UO', 'D-4_SR', 'D-4_WR', 'D-4_AO', 'D-4_KAMA', 'D-4_ROC', 'D-4_PPO', 'D-4_PVO\\n', 'D-3_Open', 'D-3_High', 'D-3_Low', 'D-3_Close', 'D-3_Volume', 'D-3_Trading_Value', 'D-3_Change', 'D-3_MA5', 'D-3_MA20', 'D-3_MA60', 'D-3_MA120', 'D-3_MFI', 'D-3_ADI', 'D-3_OBV', 'D-3_CMF', 'D-3_FI', 'D-3_EOM_EMV', 'D-3_VPT', 'D-3_NVI', 'D-3_VMAP', 'D-3_ATR', 'D-3_BHB', 'D-3_BLB', 'D-3_KCH', 'D-3_KCL', 'D-3_KCM', 'D-3_DCH', 'D-3_DCL', 'D-3_DCM', 'D-3_UI', 'D-3_SMA', 'D-3_EMA', 'D-3_WMA', 'D-3_MACD', 'D-3_ADX', 'D-3_-VI', 'D-3_+VI', 'D-3_TRIX', 'D-3_MI', 'D-3_CCI', 'D-3_DPO', 'D-3_KST', 'D-3_Ichimoku', 'D-3_Parabolic_SAR', 'D-3_STC', 'D-3_RSI', 'D-3_SRSI', 'D-3_TSI', 'D-3_UO', 'D-3_SR', 'D-3_WR', 'D-3_AO', 'D-3_KAMA', 'D-3_ROC', 'D-3_PPO', 'D-3_PVO\\n', 'D-2_Open', 'D-2_High', 'D-2_Low', 'D-2_Close', 'D-2_Volume', 'D-2_Trading_Value', 'D-2_Change', 'D-2_MA5', 'D-2_MA20', 'D-2_MA60', 'D-2_MA120', 'D-2_MFI', 'D-2_ADI', 'D-2_OBV', 'D-2_CMF', 'D-2_FI', 'D-2_EOM_EMV', 'D-2_VPT', 'D-2_NVI', 'D-2_VMAP', 'D-2_ATR', 'D-2_BHB', 'D-2_BLB', 'D-2_KCH', 'D-2_KCL', 'D-2_KCM', 'D-2_DCH', 'D-2_DCL', 'D-2_DCM', 'D-2_UI', 'D-2_SMA', 'D-2_EMA', 'D-2_WMA', 'D-2_MACD', 'D-2_ADX', 'D-2_-VI', 'D-2_+VI', 'D-2_TRIX', 'D-2_MI', 'D-2_CCI', 'D-2_DPO', 'D-2_KST', 'D-2_Ichimoku', 'D-2_Parabolic_SAR', 'D-2_STC', 'D-2_RSI', 'D-2_SRSI', 'D-2_TSI', 'D-2_UO', 'D-2_SR', 'D-2_WR', 'D-2_AO', 'D-2_KAMA', 'D-2_ROC', 'D-2_PPO', 'D-2_PVO\\n', 'D-1_Open', 'D-1_High', 'D-1_Low', 'D-1_Close', 'D-1_Volume', 'D-1_Trading_Value', 'D-1_Change', 'D-1_MA5', 'D-1_MA20', 'D-1_MA60', 'D-1_MA120', 'D-1_MFI', 'D-1_ADI', 'D-1_OBV', 'D-1_CMF', 'D-1_FI', 'D-1_EOM_EMV', 'D-1_VPT', 'D-1_NVI', 'D-1_VMAP', 'D-1_ATR', 'D-1_BHB', 'D-1_BLB', 'D-1_KCH', 'D-1_KCL', 'D-1_KCM', 'D-1_DCH', 'D-1_DCL', 'D-1_DCM', 'D-1_UI', 'D-1_SMA', 'D-1_EMA', 'D-1_WMA', 'D-1_MACD', 'D-1_ADX', 'D-1_-VI', 'D-1_+VI', 'D-1_TRIX', 'D-1_MI', 'D-1_CCI', 'D-1_DPO', 'D-1_KST', 'D-1_Ichimoku', 'D-1_Parabolic_SAR', 'D-1_STC', 'D-1_RSI', 'D-1_SRSI', 'D-1_TSI', 'D-1_UO', 'D-1_SR', 'D-1_WR', 'D-1_AO', 'D-1_KAMA', 'D-1_ROC', 'D-1_PPO', 'D-1_PVO\\n', 'D-0_Open', 'D-0_High', 'D-0_Low', 'D-0_Close', 'D-0_Volume', 'D-0_Trading_Value', 'D-0_Change', 'D-0_MA5', 'D-0_MA20', 'D-0_MA60', 'D-0_MA120', 'D-0_MFI', 'D-0_ADI', 'D-0_OBV', 'D-0_CMF', 'D-0_FI', 'D-0_EOM_EMV', 'D-0_VPT', 'D-0_NVI', 'D-0_VMAP', 'D-0_ATR', 'D-0_BHB', 'D-0_BLB', 'D-0_KCH', 'D-0_KCL', 'D-0_KCM', 'D-0_DCH', 'D-0_DCL', 'D-0_DCM', 'D-0_UI', 'D-0_SMA', 'D-0_EMA', 'D-0_WMA', 'D-0_MACD', 'D-0_ADX', 'D-0_-VI', 'D-0_+VI', 'D-0_TRIX', 'D-0_MI', 'D-0_CCI', 'D-0_DPO', 'D-0_KST', 'D-0_Ichimoku', 'D-0_Parabolic_SAR', 'D-0_STC', 'D-0_RSI', 'D-0_SRSI', 'D-0_TSI', 'D-0_UO', 'D-0_SR', 'D-0_WR', 'D-0_AO', 'D-0_KAMA', 'D-0_ROC', 'D-0_PPO', 'D-0_PVO\\n', 'Next_Change']\n563\n" ], [ "#new_list사용\nnew_csv=[]\nnew_csv.append(new_code_s)#index목록을 새 데이터에 추가\nstandard=new_list[10].split(',') #최소 10번째 데이터 이후여야 하므로 기준이 되는 10번째 데이터를 변수에 저장\ni=1#현재 몇번째 데이터인지?\nfor line in new_list[1:]:\n \n new_data=[]\n l_list=line.split(',')\n date=l_list[2].replace(\"-\",\"\") \n #1번째 변수=코드\n if int(date)>=int(standard[2].replace(\"-\",\"\")): #10번째 데이터가 되기 위한 최소날짜\n now_code=l_list[1]\n pre=new_list[i-9].split(\",\")\n if pre[1]==now_code: #10개 전 데이터의 코드와 현재 코드 일치한지\n if float(l_list[8])>=1.00E+10:#8번째:traiding_value가 100억이상\n new_data.extend(l_list[1:3]) #2번째\n \n for m in range(i-9,i): #(lines.index(line))=지금데이터번호\n # 9일 전~당일 데이터를 추가\n d_list=(new_list[m]).split(',') #m일 전 데이터를 리스트화\n # 당일 데이터의 index~날짜\n # 11번째가 Next_change\n new_data.extend(d_list[3:11])\n new_data.extend(d_list[12:]) \n new_data.extend(l_list[3:11]) \n new_data.extend(l_list[12:]) \n new_data.append(l_list[11])\n new_data_str=\",\".join(new_data) #새로 만들어진 행을 str(콤마(,)구문 행)로\n new_csv.append(new_data_str) #새 파일에 저장될 행 추가\n i+=1", "_____no_output_____" ] ], [ [ "### 시험 출력", "_____no_output_____" ] ], [ [ "#print(len(new_csv[1].split(',')))\nprint(new_csv[1])", "050120,2018-01-15,0.005914479678521959,0.006967815329193724,0.005855961031262417,0.006821518711044868,15.2655406508561,307823874200,-8.359644428995521e-05,-8.368414518593422e-05,0.0053635266145733666,0.004961310396364354,0.00501939015376945,-3.169855640349075e-05,-7.6238532205229355,175.77663022710502,-8.368535506481819e-05,10852.097440512918,2.2162281571834272,3.7671242285989823,0.00016436951716849334,0.006234869272267954,0.000544246826972649,0.00671132809825515,0.004015701723432681,0.005990436882664845,0.004758636913445654,0.005374545675852339,0.0076115204490486916,0.004609513844634162,0.006125146808656313,-7.684288386915933e-05,0.005662849495305927,0.0057476254595908265,0.006003545059650984,0.00025883090276131935,-5.844070042139278e-05,-8.319918050486377e-05,-8.304618233123077e-05,-8.329454717392971e-05,-6.514289961067544e-05,-1.9972414309681915e-06,-0.0007616201940829455,2.2257567485166982e-05,0.0061178319777488695,0.007517890613433424,-2.552033318177004e-05,-4.4334078275712944e-05,-8.332034102326878e-05,-7.059718864714836e-05,-5.6970829039527585e-05,-4.056266849712995e-05,-9.908131575667245e-05,0.000653343141058614,0.005708353595414947,-5.747391385001787e-05,-8.002997910299107e-05,-5.907106328967257e-05,0.0069092966819341815,0.0072018899182318945,0.006294850885708986,0.006792259387415097,11.973108926024823,240410569500,-8.368414518593422e-05,-8.363933295764362e-05,0.005445452720736726,0.004993296688956419,0.005032796775856611,-3.128711179460891e-05,-6.465157161714187,163.8034376194146,-8.36821930313654e-05,9216.27498985876,2.5489650045998125,2.1723316259917596,0.00016331852226371196,0.006265181931548397,0.000564184130093975,0.006925038198046999,0.0039658438359675505,0.006103202315933984,0.004809940211498095,0.00545657126371604,0.0076115204490486916,0.004609513844634162,0.006125146808656313,-7.69208892259563e-05,0.005838405437084554,0.0059083352205597075,0.006164295783672946,0.0003167918673124784,-5.746864717176452e-05,-8.320680138829638e-05,-8.302312013234577e-05,-8.324343874297265e-05,-6.503961419826234e-05,-2.1763084915823815e-06,-0.000796731382438671,4.186482543594929e-05,0.006186591388278832,0.007472480143160019,-2.5341675751686656e-05,-4.460934999242183e-05,-8.332652702947058e-05,-6.927999241598332e-05,-5.7590717069947924e-05,-4.1132991233321456e-05,-9.965163849286395e-05,0.0007562657378586973,0.005807390554036997,-5.7832925750955174e-05,-7.94904839901759e-05,-6.236677498468274e-05,0.006850778034674639,0.008196706921644117,0.006704481416525783,0.007289667889121208,35.49958305463828,764364560400,-8.363933295764362e-05,-8.370024291090477e-05,0.005563660388201002,0.0050321179595484,0.0050492814787896235,-3.0259933979262165e-05,-14.121948026407177,199.30310435571846,-8.369979641362618e-05,12210.338083047025,2.9517080164715006,2.517243449298065,0.00016331852226371196,0.006428156364166223,0.0006244992998243856,0.007224829227957635,0.0039024856965796436,0.006286775312387169,0.004866510187803895,0.005576633972298442,0.008196706921644117,0.004609513844634162,0.006555258866013949,-7.706701028816339e-05,0.006051530350403809,0.006120874947406366,0.0064036955696117346,0.0003973187926198623,-5.625520450219065e-05,-8.32428395120247e-05,-8.300830906272439e-05,-8.317922506096181e-05,-6.49510169663114e-05,1.6045227892093962e-05,-0.0010085688855182148,5.7966231229412415e-05,0.0065625736969213925,0.007472480143160019,-2.5252317777321335e-05,-4.240138291267203e-05,-8.327691023883216e-05,-6.736140004693196e-05,-5.6757118939735745e-05,-3.9959750874414886e-05,-9.847839813395739e-05,0.001080049413145746,0.00589774334540573,-5.260920018468485e-05,-7.876381113223635e-05,-5.886613098696965e-05,0.007406705183640293,0.007640779772678463,0.006938556005563953,0.007055593300083038,8.154640792061594,170010147600,-8.370024291090477e-05,-8.365048760662829e-05,0.00566753098708669,0.005069177818857868,0.00506351906566787,-3.215043739762894e-05,-19.55843100889196,191.1483798819913,-8.371183814678465e-05,10000.019803587089,-1.3858602202591987,2.3090971742567192,0.00015547702353093325,0.006453670494371384,0.0006240721136993909,0.0074261918931777205,0.0039088466735367556,0.0064275126590463685,0.004951069633093934,0.005689282368273062,0.008196706921644117,0.004609513844634162,0.006722037010703646,-7.719738983425763e-05,0.0062528930156238935,0.006264655263723062,0.006564446293633698,0.0004362553449469441,-5.51283694305609e-05,-8.321678642507828e-05,-8.299935570969368e-05,-8.311619638155797e-05,-6.492708283958225e-05,-6.970156075084107e-06,0.00010474837859458105,7.232319614806855e-05,0.0065625736969213925,0.007472480143160019,-2.5207668049462304e-05,-4.464744563178779e-05,-8.332738315727999e-05,-6.674484757940542e-05,-5.6256491912430357e-05,-4.393135294527278e-05,-0.00010245000020481527,0.001363607370306585,0.005948771605816051,-5.373346043583518e-05,-7.846136920760486e-05,-6.376144990481943e-05,0.0072018899182318945,0.007757817067197548,0.007143371270972352,0.007435964507270064,9.638936435289787,211661434950,-8.365048760662829e-05,-8.37340363763615e-05,0.005791297926040623,0.00511248161782993,0.005081806142936477,-3.186176491069761e-05,-20.017431966842217,200.78739999894665,-8.371158973512705e-05,9466.497401980267,1.0257516732367915,0.2545905268048576,0.00015547702353093325,0.006490478723497636,0.000623674186898026,0.007677353927215677,0.0039052360730008418,0.0065770278027945,0.005048503180781072,0.005812774269584875,0.008196706921644117,0.004609513844634162,0.006722037010703646,-7.732057158673897e-05,0.006384559971957864,0.0064448341786351926,0.006797350509726677,0.0004911791917053328,-5.4049987798862045e-05,-8.32205743371154e-05,-8.300066067552756e-05,-8.305028448840366e-05,-6.495242141384563e-05,-6.643036836903266e-06,-1.901856035935131e-05,8.258268538561156e-05,0.0065625736969213925,0.007472480143160019,-2.5185372444856416e-05,-4.295350634956582e-05,-8.328931677723766e-05,-6.563515847142272e-05,-5.514375983479016e-05,-3.767524140404961e-05,-9.619388866359209e-05,0.0015509665232374622,0.005978733153212936,-6.811512022363486e-05,-7.803136833567701e-05,-6.750921814990956e-05,0.007406705183640293,0.007494483154529607,0.006528925474747155,0.006763000063785326,4.591443286360415,91801277100,-8.37340363763615e-05,-8.35066099738012e-05,0.005884342575183295,0.005141348866523062,0.005095511210124662,-3.278080026590874e-05,-22.382764034673468,196.19587303092067,-8.371609333022014e-05,7359.818330710005,-9.22955075200006,0.102555684881766,0.0001340738282957556,0.0064989639273502695,0.0006421192645142338,0.007769345240707678,0.003999322354064735,0.006720574044522157,0.0051060445666313795,0.005913309305576768,0.008196706921644117,0.004609513844634162,0.006722037010703646,-7.735258128678996e-05,0.006460166064217193,0.006493804177722,0.006867046218612792,0.0004739752945975,-5.408597676692666e-05,-8.320371921114523e-05,-8.30285155516231e-05,-8.300224067900356e-05,-6.521645755028068e-05,-3.640544527716479e-05,0.0009120131175399697,9.249223311254246e-05,0.0065625736969213925,0.007472480143160019,-2.517419538322984e-05,-4.874129315677087e-05,-8.352659584739975e-05,-6.687979157998592e-05,-5.710489526039921e-05,-4.874375093995577e-05,-0.00010726239819949825,0.0016042360478378237,0.005996639859274357,-7.474534147678828e-05,-7.82606912105577e-05,-7.233911322012315e-05,0.006733740740155554,0.008811152717869312,0.00664596276926624,0.008811152717869312,23.200553352328487,602627879200,-8.35066099738012e-05,-8.376828839393817e-05,0.006079502263793869,0.005205918341909241,0.00512652609317222,-3.2338340773979335e-05,0.8178724141341259,219.39651006491474,-8.366699975480686e-05,17908.73421707966,6.689921582695776,6.529362959387287,0.0001340738282957556,0.006683063591628791,0.000744930675884524,0.008298412330581201,0.003860598048871263,0.006982620546950388,0.005160952613355008,0.0060717778023556095,0.008811152717869312,0.004609513844634162,0.007029259908816244,-7.735258128678996e-05,0.006621092344180935,0.006850309885496563,0.0073190442500454984,0.0006175297289357741,-5.360085718114506e-05,-8.320033975926599e-05,-8.302766117937311e-05,-8.292600258535383e-05,-6.533525040421755e-05,4.924344166890496e-06,-1.4629661814885623e-05,0.00012879252038058188,0.006869796595033991,0.007472480143160019,-2.516857759309293e-05,-4.124891667354261e-05,-8.330232254659108e-05,-6.465210371610965e-05,-5.065777885452089e-05,-2.516301832160327e-05,-8.368166558114575e-05,0.0017133557693826927,0.006471460163138284,-6.745380099095478e-05,-7.709814220850544e-05,-7.021237002276961e-05,0.008050410303495261,0.011005601990102157,0.006733740740155554,0.007494483154529607,65.05996873933863,1439759321700,-8.376828839393817e-05,-8.364777444806675e-05,0.006214095152490817,0.005251170811835046,0.005146908138012719,-3.163371774232718e-05,-41.0701059632748,154.3364576439105,-8.373799668433346e-05,-5561.81610406617,7.492500293196455,-2.690322040721201,0.0001340738282957556,0.007207215115132512,0.0009908786984516552,0.008433414849808966,0.003994763751443216,0.007368258432390773,0.005133448849143024,0.0062508448629698095,0.011005601990102157,0.004609513844634162,0.008126484544932665,-7.69424240881478e-05,0.006733272590977478,0.006949440473954228,0.0074288252323044,0.0006169738017868083,-5.266479289958142e-05,-8.32186672144012e-05,-8.304083372687124e-05,-8.28658395641063e-05,-6.516981818841483e-05,5.6745532247578366e-06,-0.0007636683467370295,0.00014598412857249026,0.008018225047502513,0.007472480143160019,-2.516582721667173e-05,-4.890046387731682e-05,-8.353136160603258e-05,-6.652733376754169e-05,-5.860069040300036e-05,-6.0845114599992864e-05,-0.00011936376185953534,0.0018760024975758652,0.0065428529127949265,-7.104386148168044e-05,-7.720686985511367e-05,-5.971582174517821e-05,0.008869671365128855,0.008928190012388398,0.007318927212750979,0.007933373008976174,33.70424968268264,789063638200,-8.364777444806675e-05,-8.360905096213434e-05,0.006376484398636048,0.005305885747022718,0.005170807153553516,-4.028166195298963e-05,-49.03658475848438,188.04079100825874,-8.374491546103623e-05,-1156.0923835453605,-3.5625083825161044,-7.678695127832216,0.0001466851819666596,0.007413844458605957,0.0010290738195179584,0.008599958919909624,0.0041530040254977455,0.007607716736976821,0.005227178166458633,0.006417447451717727,0.011005601990102157,0.004609513844634162,0.008228892177636865,-7.67679800006671e-05,0.006933172290016075,0.007100769695767405,0.007584192240778485,0.0006435646751015444,-5.179561543183543e-05,-8.314195980120045e-05,-8.305194641798583e-05,-8.281227159440492e-05,-6.503048528928987e-05,-1.5961546226512807e-05,-0.0012010952350021097,0.00016547786034758907,0.008119169714025223,0.007472480143160019,-2.5164422769137496e-05,-4.7457335517249254e-05,-8.338316254666131e-05,-6.721615676443377e-05,-5.945436042922257e-05,-5.638488182451779e-05,-0.00011490352908406029,0.0019432170158181754,0.006644265728495714,-5.868934615359857e-05,-7.709100293353979e-05,-6.019707910024069e-05,0.007933373008976174,0.009337820543205195,0.007757817067197548,0.008928190012388398,34.09099005943739,897154258000,-8.360905096213434e-05,-8.372156464818297e-05,0.006588614494951889,0.0053783493879242085,0.005203723892637008,-3.674046453272567e-05,-32.62236406832369,222.1318647493617,-8.371731689661569e-05,7288.324424988746,1.9662157202846886,6.182183960467609,0.0001466851819666596,0.007607950811565859,0.0010624470040500756,0.008932578910932864,0.0042446266715120105,0.0078789506670248,0.005355041410720733,0.006616996038872767,0.011005601990102157,0.004638773168263933,0.008430781510682286,-7.69660071029934e-05,0.007238932221947184,0.007381951795849507,0.00789434107125406,0.0007354682106226559,-5.0920234987479935e-05,-8.31715269329148e-05,-8.303996179902707e-05,-8.274777234139545e-05,-6.488500793220264e-05,-1.345928886969251e-08,-0.0010738171772126047,0.00018607291306411243,0.008228892177636865,0.007472480143160019,-2.5163720545370382e-05,-4.457634547536745e-05,-8.325012742435441e-05,-6.65225352384664e-05,-5.712666419717975e-05,-4.6274907730722935e-05,-0.00010479355499026543,0.002049837991125062,0.006855927675633478,-4.349416013159673e-05,-7.646947638099619e-05,-6.094600074786831e-05,0.007986039791509764\n" ] ], [ [ "### \\n(줄바꿈 표시 삭제)", "_____no_output_____" ] ], [ [ "error=\"\\n\"\nfor i in range(len(new_csv)):\n new_csv[i]=new_csv[i].replace(error,\"\")", "_____no_output_____" ] ], [ [ "### 새로 만들어진 str행들을 다시 리스트 형태로 저장", "_____no_output_____" ] ], [ [ "new_array=[]\nfor new in new_csv:\n new_array.append([new])\nprint(new_array[1])", "['050120,2018-01-15,0.005914479678521959,0.006967815329193724,0.005855961031262417,0.006821518711044868,15.2655406508561,307823874200,-8.359644428995521e-05,-8.368414518593422e-05,0.0053635266145733666,0.004961310396364354,0.00501939015376945,-3.169855640349075e-05,-7.6238532205229355,175.77663022710502,-8.368535506481819e-05,10852.097440512918,2.2162281571834272,3.7671242285989823,0.00016436951716849334,0.006234869272267954,0.000544246826972649,0.00671132809825515,0.004015701723432681,0.005990436882664845,0.004758636913445654,0.005374545675852339,0.0076115204490486916,0.004609513844634162,0.006125146808656313,-7.684288386915933e-05,0.005662849495305927,0.0057476254595908265,0.006003545059650984,0.00025883090276131935,-5.844070042139278e-05,-8.319918050486377e-05,-8.304618233123077e-05,-8.329454717392971e-05,-6.514289961067544e-05,-1.9972414309681915e-06,-0.0007616201940829455,2.2257567485166982e-05,0.0061178319777488695,0.007517890613433424,-2.552033318177004e-05,-4.4334078275712944e-05,-8.332034102326878e-05,-7.059718864714836e-05,-5.6970829039527585e-05,-4.056266849712995e-05,-9.908131575667245e-05,0.000653343141058614,0.005708353595414947,-5.747391385001787e-05,-8.002997910299107e-05,-5.907106328967257e-05,0.0069092966819341815,0.0072018899182318945,0.006294850885708986,0.006792259387415097,11.973108926024823,240410569500,-8.368414518593422e-05,-8.363933295764362e-05,0.005445452720736726,0.004993296688956419,0.005032796775856611,-3.128711179460891e-05,-6.465157161714187,163.8034376194146,-8.36821930313654e-05,9216.27498985876,2.5489650045998125,2.1723316259917596,0.00016331852226371196,0.006265181931548397,0.000564184130093975,0.006925038198046999,0.0039658438359675505,0.006103202315933984,0.004809940211498095,0.00545657126371604,0.0076115204490486916,0.004609513844634162,0.006125146808656313,-7.69208892259563e-05,0.005838405437084554,0.0059083352205597075,0.006164295783672946,0.0003167918673124784,-5.746864717176452e-05,-8.320680138829638e-05,-8.302312013234577e-05,-8.324343874297265e-05,-6.503961419826234e-05,-2.1763084915823815e-06,-0.000796731382438671,4.186482543594929e-05,0.006186591388278832,0.007472480143160019,-2.5341675751686656e-05,-4.460934999242183e-05,-8.332652702947058e-05,-6.927999241598332e-05,-5.7590717069947924e-05,-4.1132991233321456e-05,-9.965163849286395e-05,0.0007562657378586973,0.005807390554036997,-5.7832925750955174e-05,-7.94904839901759e-05,-6.236677498468274e-05,0.006850778034674639,0.008196706921644117,0.006704481416525783,0.007289667889121208,35.49958305463828,764364560400,-8.363933295764362e-05,-8.370024291090477e-05,0.005563660388201002,0.0050321179595484,0.0050492814787896235,-3.0259933979262165e-05,-14.121948026407177,199.30310435571846,-8.369979641362618e-05,12210.338083047025,2.9517080164715006,2.517243449298065,0.00016331852226371196,0.006428156364166223,0.0006244992998243856,0.007224829227957635,0.0039024856965796436,0.006286775312387169,0.004866510187803895,0.005576633972298442,0.008196706921644117,0.004609513844634162,0.006555258866013949,-7.706701028816339e-05,0.006051530350403809,0.006120874947406366,0.0064036955696117346,0.0003973187926198623,-5.625520450219065e-05,-8.32428395120247e-05,-8.300830906272439e-05,-8.317922506096181e-05,-6.49510169663114e-05,1.6045227892093962e-05,-0.0010085688855182148,5.7966231229412415e-05,0.0065625736969213925,0.007472480143160019,-2.5252317777321335e-05,-4.240138291267203e-05,-8.327691023883216e-05,-6.736140004693196e-05,-5.6757118939735745e-05,-3.9959750874414886e-05,-9.847839813395739e-05,0.001080049413145746,0.00589774334540573,-5.260920018468485e-05,-7.876381113223635e-05,-5.886613098696965e-05,0.007406705183640293,0.007640779772678463,0.006938556005563953,0.007055593300083038,8.154640792061594,170010147600,-8.370024291090477e-05,-8.365048760662829e-05,0.00566753098708669,0.005069177818857868,0.00506351906566787,-3.215043739762894e-05,-19.55843100889196,191.1483798819913,-8.371183814678465e-05,10000.019803587089,-1.3858602202591987,2.3090971742567192,0.00015547702353093325,0.006453670494371384,0.0006240721136993909,0.0074261918931777205,0.0039088466735367556,0.0064275126590463685,0.004951069633093934,0.005689282368273062,0.008196706921644117,0.004609513844634162,0.006722037010703646,-7.719738983425763e-05,0.0062528930156238935,0.006264655263723062,0.006564446293633698,0.0004362553449469441,-5.51283694305609e-05,-8.321678642507828e-05,-8.299935570969368e-05,-8.311619638155797e-05,-6.492708283958225e-05,-6.970156075084107e-06,0.00010474837859458105,7.232319614806855e-05,0.0065625736969213925,0.007472480143160019,-2.5207668049462304e-05,-4.464744563178779e-05,-8.332738315727999e-05,-6.674484757940542e-05,-5.6256491912430357e-05,-4.393135294527278e-05,-0.00010245000020481527,0.001363607370306585,0.005948771605816051,-5.373346043583518e-05,-7.846136920760486e-05,-6.376144990481943e-05,0.0072018899182318945,0.007757817067197548,0.007143371270972352,0.007435964507270064,9.638936435289787,211661434950,-8.365048760662829e-05,-8.37340363763615e-05,0.005791297926040623,0.00511248161782993,0.005081806142936477,-3.186176491069761e-05,-20.017431966842217,200.78739999894665,-8.371158973512705e-05,9466.497401980267,1.0257516732367915,0.2545905268048576,0.00015547702353093325,0.006490478723497636,0.000623674186898026,0.007677353927215677,0.0039052360730008418,0.0065770278027945,0.005048503180781072,0.005812774269584875,0.008196706921644117,0.004609513844634162,0.006722037010703646,-7.732057158673897e-05,0.006384559971957864,0.0064448341786351926,0.006797350509726677,0.0004911791917053328,-5.4049987798862045e-05,-8.32205743371154e-05,-8.300066067552756e-05,-8.305028448840366e-05,-6.495242141384563e-05,-6.643036836903266e-06,-1.901856035935131e-05,8.258268538561156e-05,0.0065625736969213925,0.007472480143160019,-2.5185372444856416e-05,-4.295350634956582e-05,-8.328931677723766e-05,-6.563515847142272e-05,-5.514375983479016e-05,-3.767524140404961e-05,-9.619388866359209e-05,0.0015509665232374622,0.005978733153212936,-6.811512022363486e-05,-7.803136833567701e-05,-6.750921814990956e-05,0.007406705183640293,0.007494483154529607,0.006528925474747155,0.006763000063785326,4.591443286360415,91801277100,-8.37340363763615e-05,-8.35066099738012e-05,0.005884342575183295,0.005141348866523062,0.005095511210124662,-3.278080026590874e-05,-22.382764034673468,196.19587303092067,-8.371609333022014e-05,7359.818330710005,-9.22955075200006,0.102555684881766,0.0001340738282957556,0.0064989639273502695,0.0006421192645142338,0.007769345240707678,0.003999322354064735,0.006720574044522157,0.0051060445666313795,0.005913309305576768,0.008196706921644117,0.004609513844634162,0.006722037010703646,-7.735258128678996e-05,0.006460166064217193,0.006493804177722,0.006867046218612792,0.0004739752945975,-5.408597676692666e-05,-8.320371921114523e-05,-8.30285155516231e-05,-8.300224067900356e-05,-6.521645755028068e-05,-3.640544527716479e-05,0.0009120131175399697,9.249223311254246e-05,0.0065625736969213925,0.007472480143160019,-2.517419538322984e-05,-4.874129315677087e-05,-8.352659584739975e-05,-6.687979157998592e-05,-5.710489526039921e-05,-4.874375093995577e-05,-0.00010726239819949825,0.0016042360478378237,0.005996639859274357,-7.474534147678828e-05,-7.82606912105577e-05,-7.233911322012315e-05,0.006733740740155554,0.008811152717869312,0.00664596276926624,0.008811152717869312,23.200553352328487,602627879200,-8.35066099738012e-05,-8.376828839393817e-05,0.006079502263793869,0.005205918341909241,0.00512652609317222,-3.2338340773979335e-05,0.8178724141341259,219.39651006491474,-8.366699975480686e-05,17908.73421707966,6.689921582695776,6.529362959387287,0.0001340738282957556,0.006683063591628791,0.000744930675884524,0.008298412330581201,0.003860598048871263,0.006982620546950388,0.005160952613355008,0.0060717778023556095,0.008811152717869312,0.004609513844634162,0.007029259908816244,-7.735258128678996e-05,0.006621092344180935,0.006850309885496563,0.0073190442500454984,0.0006175297289357741,-5.360085718114506e-05,-8.320033975926599e-05,-8.302766117937311e-05,-8.292600258535383e-05,-6.533525040421755e-05,4.924344166890496e-06,-1.4629661814885623e-05,0.00012879252038058188,0.006869796595033991,0.007472480143160019,-2.516857759309293e-05,-4.124891667354261e-05,-8.330232254659108e-05,-6.465210371610965e-05,-5.065777885452089e-05,-2.516301832160327e-05,-8.368166558114575e-05,0.0017133557693826927,0.006471460163138284,-6.745380099095478e-05,-7.709814220850544e-05,-7.021237002276961e-05,0.008050410303495261,0.011005601990102157,0.006733740740155554,0.007494483154529607,65.05996873933863,1439759321700,-8.376828839393817e-05,-8.364777444806675e-05,0.006214095152490817,0.005251170811835046,0.005146908138012719,-3.163371774232718e-05,-41.0701059632748,154.3364576439105,-8.373799668433346e-05,-5561.81610406617,7.492500293196455,-2.690322040721201,0.0001340738282957556,0.007207215115132512,0.0009908786984516552,0.008433414849808966,0.003994763751443216,0.007368258432390773,0.005133448849143024,0.0062508448629698095,0.011005601990102157,0.004609513844634162,0.008126484544932665,-7.69424240881478e-05,0.006733272590977478,0.006949440473954228,0.0074288252323044,0.0006169738017868083,-5.266479289958142e-05,-8.32186672144012e-05,-8.304083372687124e-05,-8.28658395641063e-05,-6.516981818841483e-05,5.6745532247578366e-06,-0.0007636683467370295,0.00014598412857249026,0.008018225047502513,0.007472480143160019,-2.516582721667173e-05,-4.890046387731682e-05,-8.353136160603258e-05,-6.652733376754169e-05,-5.860069040300036e-05,-6.0845114599992864e-05,-0.00011936376185953534,0.0018760024975758652,0.0065428529127949265,-7.104386148168044e-05,-7.720686985511367e-05,-5.971582174517821e-05,0.008869671365128855,0.008928190012388398,0.007318927212750979,0.007933373008976174,33.70424968268264,789063638200,-8.364777444806675e-05,-8.360905096213434e-05,0.006376484398636048,0.005305885747022718,0.005170807153553516,-4.028166195298963e-05,-49.03658475848438,188.04079100825874,-8.374491546103623e-05,-1156.0923835453605,-3.5625083825161044,-7.678695127832216,0.0001466851819666596,0.007413844458605957,0.0010290738195179584,0.008599958919909624,0.0041530040254977455,0.007607716736976821,0.005227178166458633,0.006417447451717727,0.011005601990102157,0.004609513844634162,0.008228892177636865,-7.67679800006671e-05,0.006933172290016075,0.007100769695767405,0.007584192240778485,0.0006435646751015444,-5.179561543183543e-05,-8.314195980120045e-05,-8.305194641798583e-05,-8.281227159440492e-05,-6.503048528928987e-05,-1.5961546226512807e-05,-0.0012010952350021097,0.00016547786034758907,0.008119169714025223,0.007472480143160019,-2.5164422769137496e-05,-4.7457335517249254e-05,-8.338316254666131e-05,-6.721615676443377e-05,-5.945436042922257e-05,-5.638488182451779e-05,-0.00011490352908406029,0.0019432170158181754,0.006644265728495714,-5.868934615359857e-05,-7.709100293353979e-05,-6.019707910024069e-05,0.007933373008976174,0.009337820543205195,0.007757817067197548,0.008928190012388398,34.09099005943739,897154258000,-8.360905096213434e-05,-8.372156464818297e-05,0.006588614494951889,0.0053783493879242085,0.005203723892637008,-3.674046453272567e-05,-32.62236406832369,222.1318647493617,-8.371731689661569e-05,7288.324424988746,1.9662157202846886,6.182183960467609,0.0001466851819666596,0.007607950811565859,0.0010624470040500756,0.008932578910932864,0.0042446266715120105,0.0078789506670248,0.005355041410720733,0.006616996038872767,0.011005601990102157,0.004638773168263933,0.008430781510682286,-7.69660071029934e-05,0.007238932221947184,0.007381951795849507,0.00789434107125406,0.0007354682106226559,-5.0920234987479935e-05,-8.31715269329148e-05,-8.303996179902707e-05,-8.274777234139545e-05,-6.488500793220264e-05,-1.345928886969251e-08,-0.0010738171772126047,0.00018607291306411243,0.008228892177636865,0.007472480143160019,-2.5163720545370382e-05,-4.457634547536745e-05,-8.325012742435441e-05,-6.65225352384664e-05,-5.712666419717975e-05,-4.6274907730722935e-05,-0.00010479355499026543,0.002049837991125062,0.006855927675633478,-4.349416013159673e-05,-7.646947638099619e-05,-6.094600074786831e-05,0.007986039791509764']\n" ] ], [ [ "## 파일 저장", "_____no_output_____" ] ], [ [ "import numpy as np\n\nnp.savetxt(\"3w_scaling.csv\",new_array,fmt='%s', delimiter=',')", "_____no_output_____" ], [ "# ####메모#######\n# key를 code로,value를 해당되는 날짜들의 리스트로 해서 위치검색?(시간 오래걸림)\n# 4-딕셔너리-key를 Code로, Value를 날짜로?(value값에 나머지)\n# 4-맨 처음 행의 high, low를 max, min으로 설정하고 다음 행에서 더 크거나 작을 때 업데이트하는 식으로 max,min 값 추출", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ] ]
cb622f6ebdf92e66b0ee3fef8b4a43831874f8ed
57,816
ipynb
Jupyter Notebook
gaussian_process_midterm.ipynb
muatik/machine-learning-examples
41f24cb168a7be1a77cf369cc508849552c8072b
[ "MIT" ]
17
2017-09-18T11:24:39.000Z
2021-02-21T11:09:13.000Z
gaussian_process_midterm.ipynb
muatik/dm
41f24cb168a7be1a77cf369cc508849552c8072b
[ "MIT" ]
null
null
null
gaussian_process_midterm.ipynb
muatik/dm
41f24cb168a7be1a77cf369cc508849552c8072b
[ "MIT" ]
11
2017-08-01T14:04:24.000Z
2022-02-13T10:50:52.000Z
129.053571
20,032
0.865124
[ [ [ "import numpy as np\nfrom matplotlib import pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nimport seaborn as sns\nsns.set(color_codes=True)\n\n%matplotlib inline", "_____no_output_____" ], [ "# ilk terimin covarianci hep 1 cikar -2 - -2", "_____no_output_____" ], [ "x = np.array([-2, -1, 0, 3.5, 4,]);\ny = np.array([4.1, 0.9, 2, 12.3, 15.8])\n\nN = len(x)\nm = np.zeros((N))\nprint(x)\nprint(y)\nprint(m)", "[-2. -1. 0. 3.5 4. ]\n[ 4.1 0.9 2. 12.3 15.8]\n[ 0. 0. 0. 0. 0.]\n" ], [ "sigma = np.cov(x, y)\nprint(sigma)", "[[ 7.3 16.39 ]\n [ 16.39 44.037]]\n" ], [ "def cov(x1, x2):\n return np.exp(-1.0/2 * np.power(x1 - x2, 2))", "_____no_output_____" ], [ "K = np.zeros((N, N))\nprint(K)\nfor i in range(N):\n for j in range(i, N):\n K[i][j] = cov(x[i], x[j])\n K[j][i] = K[i][j]\nprint(K)", "[[ 0. 0. 0. 0. 0.]\n [ 0. 0. 0. 0. 0.]\n [ 0. 0. 0. 0. 0.]\n [ 0. 0. 0. 0. 0.]\n [ 0. 0. 0. 0. 0.]]\n[[ 1.00000000e+00 6.06530660e-01 1.35335283e-01 2.69957850e-07\n 1.52299797e-08]\n [ 6.06530660e-01 1.00000000e+00 6.06530660e-01 4.00652974e-05\n 3.72665317e-06]\n [ 1.35335283e-01 6.06530660e-01 1.00000000e+00 2.18749112e-03\n 3.35462628e-04]\n [ 2.69957850e-07 4.00652974e-05 2.18749112e-03 1.00000000e+00\n 8.82496903e-01]\n [ 1.52299797e-08 3.72665317e-06 3.35462628e-04 8.82496903e-01\n 1.00000000e+00]]\n" ], [ "plt.scatter(x, y)", "_____no_output_____" ], [ "cov(4, 4) # expected to be 1", "_____no_output_____" ], [ "def pred(xp):\n K2 = np.zeros((N+1, N+1))\n N2 = N + 1\n \n sigma22 = cov(xp, xp)\n K2[N2-1][N2-1] = sigma22\n \n for i in range(N):\n for j in range(i, N):\n K2[i][j] = cov(x[i], y[j])\n K2[j][i] = K2[i][j]\n \n for i in range(N):\n K2[N2-1][i]= cov(xp, x[i])\n K2[i][N2-1]= cov(xp, x[i])\n\n sigma12 = np.array(K2[:N2-1,N2-1])\n sigma11 = np.mat(K)\n sigma21 = K[N-1:]\n print(sigma12)\n mp = (sigma12.T * sigma11.I) * np.mat(y).T\n# sigmap = sigma22 - np.mat(sigma12).T * sigma11.I * np.mat(sigma12)\n# return mp, sigmap\n return mp, sigma22\n\npred(4)", "[ 1.52299797e-08 3.72665317e-06 3.35462628e-04 8.82496903e-01\n 1.00000000e+00]\n" ], [ "plt.scatter(x, y)\ndef p():\n x = np.linspace(-5, 20, 200)\n y = np.zeros(200)\n yu = np.zeros(200)\n yb = np.zeros(200)\n for i in range(len(x)):\n yp, sigmap = pred(x[i])\n \n yp = np.asarray(yp)[0][0]\n yu[i] = yp - np.sqrt(sigmap)\n y[i] = yp\n# plt.plot(x, yu)\n plt.plot(x, y)\np()", "_____no_output_____" ], [ "np.asarray(np.mat([[ 9.11765304e+27]]))[0][0]", "_____no_output_____" ], [ "def cov(x1, x2):\n return np.exp(-1.0/2 * np.abs(x1 - x2))\n\ndef pred(xp):\n K2 = np.zeros((N+1, N+1))\n N2 = N + 1\n \n sigma22 = cov(xp, xp)\n K2[N2-1][N2-1] = sigma22\n \n for i in range(N):\n for j in range(i, N):\n K2[i][j] = cov(x[i], y[j])\n K2[j][i] = K2[i][j]\n \n for i in range(N):\n K2[N2-1][i]= cov(xp, x[i])\n K2[i][N2-1]= cov(xp, x[i])\n \n sigma12 = np.array(K2[:N2-1,N2-1])\n sigma11 = np.mat(K)\n sigma21 = K[N-1:]\n# print(sigma12)\n# print(sigma11)\n mp = (sigma12.T * sigma11.I) * np.mat(y).T\n# sigmap = sigma11 - np.mat(sigma12) * sigma21.T\n return mp, sigma22\n\nplt.scatter(x, y)\ndef p():\n x = np.linspace(-10, 10, 200)\n y = np.zeros(200)\n yu = np.zeros(200)\n yb = np.zeros(200)\n for i in range(len(x)):\n yp, sigmap = pred(x[i])\n \n yp = np.asarray(yp)[0][0]\n yu[i] = yp - np.sqrt(sigmap) * 3\n y[i] = yp\n# plt.plot(x, yu)\n plt.plot(x, y)\np()", "_____no_output_____" ], [ "K", "_____no_output_____" ], [ "K[N-1:]", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb6231dfe692b16da13b1d6f33ec0a64743ac3da
146,565
ipynb
Jupyter Notebook
lessons/Recommendations/1_Intro_to_Recommendations/.ipynb_checkpoints/5_Content Based Recommendations - Solution-checkpoint.ipynb
s-arora-1987/DSND_Term2
3268d52d2271de59695f17b5bbfd618781295748
[ "MIT" ]
null
null
null
lessons/Recommendations/1_Intro_to_Recommendations/.ipynb_checkpoints/5_Content Based Recommendations - Solution-checkpoint.ipynb
s-arora-1987/DSND_Term2
3268d52d2271de59695f17b5bbfd618781295748
[ "MIT" ]
null
null
null
lessons/Recommendations/1_Intro_to_Recommendations/.ipynb_checkpoints/5_Content Based Recommendations - Solution-checkpoint.ipynb
s-arora-1987/DSND_Term2
3268d52d2271de59695f17b5bbfd618781295748
[ "MIT" ]
null
null
null
31.889687
1,685
0.576447
[ [ [ "### Content Based Recommendations\n\nIn the previous notebook, you were introduced to a way to make recommendations using collaborative filtering. However, using this technique there are a large number of users who were left without any recommendations at all. Other users were left with fewer than the ten recommendations that were set up by our function to retrieve...\n\nIn order to help these users out, let's try another technique **content based** recommendations. Let's start off where we were in the previous notebook.", "_____no_output_____" ] ], [ [ "import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom collections import defaultdict\nfrom IPython.display import HTML\nimport progressbar\nimport tests as t\nimport pickle\n\n\n%matplotlib inline\n\n# Read in the datasets\nmovies = pd.read_csv('movies_clean.csv')\nreviews = pd.read_csv('reviews_clean.csv')\n\ndel movies['Unnamed: 0']\ndel reviews['Unnamed: 0']\n\n\nall_recs = pickle.load(open(\"all_recs.p\", \"rb\"))", "_____no_output_____" ] ], [ [ "### Datasets\n\nFrom the above, you now have access to three important items that you will be using throughout the rest of this notebook. \n\n`a.` **movies** - a dataframe of all of the movies in the dataset along with other content related information about the movies (genre and date)\n\n\n`b.` **reviews** - this was the main dataframe used before for collaborative filtering, as it contains all of the interactions between users and movies.\n\n\n`c.` **all_recs** - a dictionary where each key is a user, and the value is a list of movie recommendations based on collaborative filtering\n\nFor the individuals in **all_recs** who did recieve 10 recommendations using collaborative filtering, we don't really need to worry about them. However, there were a number of individuals in our dataset who did not receive any recommendations.\n\n-----\n\n`1.` To begin, let's start with finding all of the users in our dataset who didn't get all 10 ratings we would have liked them to have using collaborative filtering. ", "_____no_output_____" ] ], [ [ "users_with_all_recs = []\nfor user, movie_recs in all_recs.items():\n if len(movie_recs) > 9:\n users_with_all_recs.append(user)\n\nprint(\"There are {} users with all reccomendations from collaborative filtering.\".format(len(users_with_all_recs)))\n\nusers = np.unique(reviews['user_id'])\nusers_who_need_recs = np.setdiff1d(users, users_with_all_recs)\n\nprint(\"There are {} users who still need recommendations.\".format(len(users_who_need_recs)))\nprint(\"This means that only {}% of users received all 10 of their recommendations using collaborative filtering\".format(round(len(users_with_all_recs)/len(np.unique(reviews['user_id'])), 4)*100)) ", "There are 22187 users with all reccomendations from collaborative filtering.\nThere are 46123 users who still need recommendations.\nThis means that only 32.48% of users received all 10 of their recommendations using collaborative filtering\n" ], [ "# Some test here might be nice\nassert len(users_with_all_recs) == 22187\nprint(\"That's right there were still another 31781 users who needed recommendations when we only used collaborative filtering!\")", "That's right there were still another 31781 users who needed recommendations when we only used collaborative filtering!\n" ] ], [ [ "### Content Based Recommendations\n\nYou will be doing a bit of a mix of content and collaborative filtering to make recommendations for the users this time. This will allow you to obtain recommendations in many cases where we didn't make recommendations earlier. \n\n`2.` Before finding recommendations, rank the user's ratings from highest ratings to lowest ratings. You will move through the movies in this order looking for other similar movies.", "_____no_output_____" ] ], [ [ "# create a dataframe similar to reviews, but ranked by rating for each user\nranked_reviews = reviews.sort_values(by=['user_id', 'rating'], ascending=False)", "_____no_output_____" ] ], [ [ "### Similarities\n\nIn the collaborative filtering sections, you became quite familiar with different methods of determining the similarity (or distance) of two users. We can perform similarities based on content in much the same way. \n\nIn many cases, it turns out that one of the fastest ways we can find out how similar items are to one another (when our matrix isn't totally sparse like it was in the earlier section) is by simply using matrix multiplication. If you are not familiar with this, an explanation is available [here by 3blue1brown](https://www.youtube.com/watch?v=LyGKycYT2v0) and another quick explanation is provided [on the post here](https://math.stackexchange.com/questions/689022/how-does-the-dot-product-determine-similarity).\n\nFor us to pull out a matrix that describes the movies in our dataframe in terms of content, we might just use the indicator variables related to **year** and **genre** for our movies. \n\nThen we can obtain a matrix of how similar movies are to one another by taking the dot product of this matrix with itself. Notice in the below that the dot product where our 1 values overlap gives a value of 2 indicating higher similarity. In the second dot product, the 1 values don't match up. This leads to a dot product of 0 indicating lower similarity.\n\n<img src=\"images/dotprod1.png\" alt=\"Dot Product\" height=\"500\" width=\"500\">\n\nWe can perform the dot product on a matrix of movies with content characteristics to provide a movie by movie matrix where each cell is an indication of how similar two movies are to one another. In the below image, you can see that movies 1 and 8 are most similar, movies 2 and 8 are most similar and movies 3 and 9 are most similar for this subset of the data. The diagonal elements of the matrix will contain the similarity of a movie with itself, which will be the largest possible similarity (which will also be the number of 1's in the movie row within the orginal movie content matrix.\n\n<img src=\"images/moviemat.png\" alt=\"Dot Product\" height=\"500\" width=\"500\">\n\n\n`3.` Create a numpy array that is a matrix of indicator variables related to year (by century) and movie genres by movie. Perform the dot prodoct of this matrix with itself (transposed) to obtain a similarity matrix of each movie with every other movie. The final matrix should be 31245 x 31245.", "_____no_output_____" ] ], [ [ "# Subset so movie_content is only using the dummy variables for each genre and the 3 century based year dummy columns\nmovie_content = np.array(movies.iloc[:,4:])\n\n# Take the dot product to obtain a movie x movie matrix of similarities\ndot_prod_movies = movie_content.dot(np.transpose(movie_content))", "_____no_output_____" ], [ "# create checks for the dot product matrix\nassert dot_prod_movies.shape[0] == 31245\nassert dot_prod_movies.shape[1] == 31245\nassert dot_prod_movies[0, 0] == np.max(dot_prod_movies[0])\nprint(\"Looks like you passed all of the tests. Though they weren't very robust - if you want to write some of your own, I won't complain!\")", "_____no_output_____" ] ], [ [ "### For Each User...\n\n\nNow that you have a matrix where each user has their ratings ordered. You also have a second matrix where movies are each axis, and the matrix entries are larger where the two movies are more similar and smaller where the two movies are dissimilar. This matrix is a measure of content similarity. Therefore, it is time to get to the fun part.\n\nFor each user, we will perform the following:\n\n i. For each movie, find the movies that are most similar that the user hasn't seen.\n\n ii. Continue through the available, rated movies until 10 recommendations or until there are no additional movies.\n\nAs a final note, you may need to adjust the criteria for 'most similar' to obtain 10 recommendations. As a first pass, I used only movies with the highest possible similarity to one another as similar enough to add as a recommendation.\n\n`3.` In the below cell, complete each of the functions needed for making content based recommendations.", "_____no_output_____" ] ], [ [ "def find_similar_movies(movie_id):\n '''\n INPUT\n movie_id - a movie_id \n OUTPUT\n similar_movies - an array of the most similar movies by title\n '''\n # find the row of each movie id\n if not np.where(movies['movie_id']==210156)[0]:\n return np.array([])\n \n movie_idx = np.where(movies['movie_id'] == movie_id)[0][0]\n \n # find the most similar movie indices - to start I said they need to be the same for all content\n similar_idxs = np.where(dot_prod_movies[movie_idx] == np.max(dot_prod_movies[movie_idx]))[0]\n \n # pull the movie titles based on the indices\n similar_movies = np.array(movies.iloc[similar_idxs, ]['movie'])\n \n return similar_movies\n \n \ndef get_movie_names(movie_ids):\n '''\n INPUT\n movie_ids - a list of movie_ids\n OUTPUT\n movies - a list of movie names associated with the movie_ids\n \n '''\n movie_lst = list(movies[movies['movie_id'].isin(movie_ids)]['movie'])\n \n return movie_lst\n\ndef make_recs():\n '''\n INPUT\n None\n OUTPUT\n recs - a dictionary with keys of the user and values of the recommendations\n '''\n # Create dictionary to return with users and ratings\n recs = defaultdict(set)\n # How many users for progress bar\n n_users = len(users)\n\n \n # Create the progressbar\n cnter = 0\n bar = progressbar.ProgressBar(maxval=n_users+1, widgets=[progressbar.Bar('=', '[', ']'), ' ', progressbar.Percentage()])\n bar.start()\n \n # For each user\n for user in users:\n \n # Update the progress bar\n cnter+=1 \n bar.update(cnter)\n\n # Pull only the reviews the user has seen\n reviews_temp = ranked_reviews[ranked_reviews['user_id'] == user]\n movies_temp = np.array(reviews_temp['movie_id'])\n movie_names = np.array(get_movie_names(movies_temp))\n\n # Look at each of the movies (highest ranked first), \n # pull the movies the user hasn't seen that are most similar\n # These will be the recommendations - continue until 10 recs \n # or you have depleted the movie list for the user\n for movie in movies_temp:\n rec_movies = find_similar_movies(movie)\n temp_recs = np.setdiff1d(rec_movies, movie_names)\n recs[user].update(temp_recs)\n\n # If there are more than \n if len(recs[user]) > 9:\n break\n\n bar.finish()\n \n return recs", "_____no_output_____" ], [ "recs = make_recs()", "\r \r\r[ ] N/A%" ] ], [ [ "### How Did We Do?\n\nNow that you have made the recommendations, how did we do in providing everyone with a set of recommendations?\n\n`4.` Use the cells below to see how many individuals you were able to make recommendations for, as well as explore characteristics about individuals who you were not able to make recommendations for. ", "_____no_output_____" ] ], [ [ "# Explore recommendations\nusers_without_all_recs = []\nusers_with_all_recs = []\nno_recs = []\nfor user, movie_recs in recs.items():\n if len(movie_recs) < 10:\n users_without_all_recs.append(user)\n if len(movie_recs) > 9:\n users_with_all_recs.append(user)\n if len(movie_recs) == 0:\n no_recs.append(user)", "_____no_output_____" ], [ "# Some characteristics of my content based recommendations\nprint(\"There were {} users without all 10 recommendations we would have liked to have.\".format(len(users_without_all_recs)))\nprint(\"There were {} users with all 10 recommendations we would like them to have.\".format(len(users_with_all_recs)))\nprint(\"There were {} users with no recommendations at all!\".format(len(no_recs)))", "There were 2179 users without all 10 recommendations we would have liked to have.\nThere were 51789 users with all 10 recommendations we would like them to have.\nThere were 174 users with no recommendations at all!\n" ], [ "# Closer look at individual user characteristics\nuser_items = reviews[['user_id', 'movie_id', 'rating']]\nuser_by_movie = user_items.groupby(['user_id', 'movie_id'])['rating'].max().unstack()\n\ndef movies_watched(user_id):\n '''\n INPUT:\n user_id - the user_id of an individual as int\n OUTPUT:\n movies - an array of movies the user has watched\n '''\n movies = user_by_movie.loc[user_id][user_by_movie.loc[user_id].isnull() == False].index.values\n\n return movies\n\n\nmovies_watched(189)", "_____no_output_____" ], [ "cnter = 0\nprint(\"Some of the movie lists for users without any recommendations include:\")\nfor user_id in no_recs:\n print(user_id)\n print(get_movie_names(movies_watched(user_id)))\n cnter+=1\n if cnter > 10:\n break", "Some of the movie lists for users without any recommendations include:\n189\n['El laberinto del fauno (2006)']\n797\n['The 414s (2015)']\n1603\n['Beauty and the Beast (2017)']\n2056\n['Brimstone (2016)']\n2438\n['Baby Driver (2017)']\n3322\n['Rosenberg (2013)']\n3925\n['El laberinto del fauno (2006)']\n4325\n['Beauty and the Beast (2017)']\n4773\n['The Frozen Ground (2013)']\n4869\n['Beauty and the Beast (2017)']\n4878\n['American Made (2017)']\n" ] ], [ [ "### Now What? \n\nWell, if you were really strict with your criteria for how similar two movies (like I was initially), then you still have some users that don't have all 10 recommendations (and a small group of users who have no recommendations at all). \n\nAs stated earlier, recommendation engines are a bit of an **art** and a **science**. There are a number of things we still could look into - how do our collaborative filtering and content based recommendations compare to one another? How could we incorporate user input along with collaborative filtering and/or content based recommendations to improve any of our recommendations? How can we truly gain recommendations for every user?\n\n`5.` In this last step feel free to explore any last ideas you have with the recommendation techniques we have looked at so far. You might choose to make the final needed recommendations using the first technique with just top ranked movies. You might also loosen up the strictness in the similarity needed between movies. Be creative and share your insights with your classmates!", "_____no_output_____" ] ], [ [ "# Cells for exploring", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ] ]
cb623e906912e51e1410d2ba6fe912cb52728cd8
224,702
ipynb
Jupyter Notebook
notebooks/evaluation_descriptors_hpatches.ipynb
vladjiss/hfnet
95cde13738efa485e81ff9884a9607a869ba8685
[ "MIT" ]
555
2019-04-09T17:35:05.000Z
2022-03-30T13:46:24.000Z
notebooks/evaluation_descriptors_hpatches.ipynb
vladjiss/hfnet
95cde13738efa485e81ff9884a9607a869ba8685
[ "MIT" ]
60
2019-04-15T12:47:50.000Z
2021-10-21T17:55:54.000Z
notebooks/evaluation_descriptors_hpatches.ipynb
vladjiss/hfnet
95cde13738efa485e81ff9884a9607a869ba8685
[ "MIT" ]
180
2019-04-10T07:39:58.000Z
2022-03-24T06:40:11.000Z
482.193133
105,908
0.934558
[ [ [ "import numpy as np\nimport matplotlib.pyplot as plt\n\nfrom hfnet.datasets.hpatches import Hpatches\nfrom hfnet.evaluation.loaders import sift_loader, export_loader, fast_loader, harris_loader\nfrom hfnet.evaluation.local_descriptors import evaluate\nfrom hfnet.utils import tools\n\n%load_ext autoreload\n%autoreload 2\n%matplotlib inline", "_____no_output_____" ], [ "data_config = {'make_pairs': True, 'shuffle': True}\ndataset = Hpatches(**data_config)", "_____no_output_____" ], [ "all_configs = {\n 'sift': {\n 'predictor': sift_loader,\n 'root': True,\n },\n 'superpoint': {\n 'experiment': 'super_point_pytorch/hpatches',\n 'predictor': export_loader,\n 'do_nms': True,\n 'nms_thresh': 4,\n 'remove_borders': 4,\n },\n 'superpoint_harris-kpts': {\n 'experiment': 'super_point_pytorch/hpatches',\n 'predictor': export_loader,\n 'keypoint_predictor': harris_loader,\n 'keypoint_config': {\n 'do_nms': True,\n 'nms_thresh': 4,\n },\n },\n 'netvlad_conv3-3': {\n 'experiment': 'netvlad/hpatches',\n 'predictor': export_loader,\n 'keypoint_predictor': export_loader,\n 'keypoint_config': {\n 'experiment': 'super_point_pytorch/hpatches',\n 'do_nms': True,\n 'nms_thresh': 4,\n 'remove_borders': 4,\n },\n 'binarize': False,\n },\n 'lfnet': {\n 'experiment': 'lfnet/hpatches_kpts-500',\n 'predictor': export_loader,\n },\n}\neval_config = {\n 'num_features': 300,\n 'do_ratio_test': True,\n 'correct_match_thresh': 3,\n 'correct_H_thresh': 3,\n}", "_____no_output_____" ], [ "methods = ['sift', 'lfnet', 'superpoint', 'superpoint_harris-kpts', 'netvlad_conv3-3']\nconfigs = {m: all_configs[m] for m in methods}\npose_recalls, nn_pr = {}, {}\nfor method, config in configs.items():\n config = tools.dict_update(config, eval_config)\n data_iter = dataset.get_test_set()\n metrics, nn_precision, nn_recall, distances, pose_recall = evaluate(data_iter, config, is_2d=True)\n \n print('> {}'.format(method))\n for k, v in metrics.items():\n print('{:<25} {:.3f}'.format(k, v))\n print(config)\n \n pose_recalls[method] = pose_recall\n nn_pr[method] = (nn_precision, nn_recall, distances)", "_____no_output_____" ], [ "# NMS=4, N=300", "580it [03:36, 3.31it/s]\n0it [00:00, ?it/s]" ], [ "# NMS=8, N=500", "580it [03:48, 3.03it/s]\n0it [00:00, ?it/s]" ], [ "error_names = list(list(pose_recalls.values())[0].keys())\nexpers = list(pose_recalls.keys())\nlim = {'homography': 7}\nthresh = {'homography': [1, 3, 5]}\n\nf, axes = plt.subplots(1, len(error_names), figsize=(8, 4), dpi=150)\nif len(error_names) == 1:\n axes = [axes]\nfor error_name, ax in zip(error_names, axes):\n for exper in expers:\n steps, recall = pose_recalls[exper][error_name]\n ax.set_xlim([0, lim[error_name]])\n ax.plot(steps, recall*100, label=exper);\n \n s = f'{error_name:^15} {exper:^25}'\n s += ''.join([f' {t:^5}: {recall[np.where(steps>t)[0].min()]:.2f} ' for t in thresh[error_name]])\n print(s)\n \n ax.grid(color=[0.85]*3);\n ax.set_xlabel(error_name+' error');\n ax.set_ylabel('Correctly localized queries (%)');\n ax.legend(loc=4); \nplt.tight_layout()\nplt.gcf().subplots_adjust(left=0);", "_____no_output_____" ], [ "# NMS=4, N=300", " homography sift 1 : 0.40 3 : 0.68 5 : 0.75 \n homography lfnet 1 : 0.34 3 : 0.63 5 : 0.72 \n homography superpoint 1 : 0.48 3 : 0.81 5 : 0.87 \n homography superpoint_harris-kpts 1 : 0.33 3 : 0.67 5 : 0.77 \n homography netvlad_conv3-3 1 : 0.51 3 : 0.79 5 : 0.85 \n" ], [ "# NMS=8, N=500", " homography sift 1 : 0.45 3 : 0.72 5 : 0.77 \n homography lfnet 1 : 0.36 3 : 0.65 5 : 0.72 \n homography superpoint 1 : 0.49 3 : 0.81 5 : 0.88 \n homography superpoint_harris-kpts 1 : 0.35 3 : 0.73 5 : 0.83 \n homography netvlad_conv3-3 1 : 0.51 3 : 0.81 5 : 0.88 \n" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb6253162463419f527724c424c857d67a763e5c
456,943
ipynb
Jupyter Notebook
notebooks/1-Using-ImageJ/Ops/transform/unshearView.ipynb
LorenzoScanu/MaterialeTirocinio
c78764438d774295d00fc8a4273e4c4f25c8ad46
[ "CC0-1.0" ]
null
null
null
notebooks/1-Using-ImageJ/Ops/transform/unshearView.ipynb
LorenzoScanu/MaterialeTirocinio
c78764438d774295d00fc8a4273e4c4f25c8ad46
[ "CC0-1.0" ]
null
null
null
notebooks/1-Using-ImageJ/Ops/transform/unshearView.ipynb
LorenzoScanu/MaterialeTirocinio
c78764438d774295d00fc8a4273e4c4f25c8ad46
[ "CC0-1.0" ]
null
null
null
2,175.919048
229,337
0.966624
[ [ [ "empty" ] ] ]
[ "empty" ]
[ [ "empty" ] ]
cb6256cdad750962217e671534827c1b97c75aad
179,379
ipynb
Jupyter Notebook
docs/examples/general/data_loading/coco_reader.ipynb
npanpaliya/DALI
dc13d3b9d1a2fe67931b05a2861e81cd50da4d2b
[ "ECL-2.0", "Apache-2.0" ]
11
2021-03-16T05:09:16.000Z
2022-03-29T12:48:44.000Z
docs/examples/general/data_loading/coco_reader.ipynb
npanpaliya/DALI
dc13d3b9d1a2fe67931b05a2861e81cd50da4d2b
[ "ECL-2.0", "Apache-2.0" ]
null
null
null
docs/examples/general/data_loading/coco_reader.ipynb
npanpaliya/DALI
dc13d3b9d1a2fe67931b05a2861e81cd50da4d2b
[ "ECL-2.0", "Apache-2.0" ]
3
2021-05-08T16:51:55.000Z
2021-07-22T09:02:44.000Z
915.19898
173,496
0.951527
[ [ [ "# COCO Reader\n\nReader operator that reads a COCO dataset (or subset of COCO), which consists of an annotation file and the images directory.\n\n`DALI_EXTRA_PATH` environment variable should point to the place where data from [DALI extra repository](https://github.com/NVIDIA/DALI_extra) is downloaded. Please make sure that the proper release tag is checked out.", "_____no_output_____" ] ], [ [ "from __future__ import print_function\nfrom nvidia.dali.pipeline import Pipeline\nimport nvidia.dali.ops as ops\nimport nvidia.dali.types as types\nimport numpy as np\nfrom time import time\nimport os.path\n\ntest_data_root = os.environ['DALI_EXTRA_PATH']\nfile_root = os.path.join(test_data_root, 'db', 'coco', 'images')\nannotations_file = os.path.join(test_data_root, 'db', 'coco', 'instances.json')\n\nnum_gpus = 1\nbatch_size = 16", "_____no_output_____" ], [ "class COCOPipeline(Pipeline): \n def __init__(self, batch_size, num_threads, device_id): \n super(COCOPipeline, self).__init__(batch_size, num_threads, device_id, seed = 15) \n self.input = ops.COCOReader(file_root = file_root, annotations_file = annotations_file,\n shard_id = device_id, num_shards = num_gpus, ratio=True)\n self.decode = ops.ImageDecoder(device = \"mixed\", output_type = types.RGB) \n \n def define_graph(self): \n inputs, bboxes, labels = self.input() \n images = self.decode(inputs) \n return (images, bboxes, labels) ", "_____no_output_____" ], [ "start = time()\npipes = [COCOPipeline(batch_size=batch_size, num_threads=2, device_id = device_id) for device_id in range(num_gpus)]\nfor pipe in pipes:\n pipe.build()\ntotal_time = time() - start\nprint(\"Computation graph built and dataset loaded in %f seconds.\" % total_time)", "Computation graph built and dataset loaded in 0.307431 seconds.\n" ], [ "pipe_out = [pipe.run() for pipe in pipes] \n\nimages_cpu = pipe_out[0][0].as_cpu()\nbboxes_cpu = pipe_out[0][1]\nlabels_cpu = pipe_out[0][2]", "_____no_output_____" ] ], [ [ "Bounding boxes returned by the operator are lists of floats containing composed of **\\[x, y, width, height]** (`ltrb` is set to `False` by default).", "_____no_output_____" ] ], [ [ "bboxes = bboxes_cpu.at(4)\nbboxes", "_____no_output_____" ] ], [ [ "Let's see the ground truth bounding boxes drawn on the image.", "_____no_output_____" ] ], [ [ "import matplotlib.pyplot as plt\nimport matplotlib.patches as patches\nimport random\n\nimg_index = 4\n\nimg = images_cpu.at(img_index)\n\nH = img.shape[0]\nW = img.shape[1]\n\nfig,ax = plt.subplots(1)\n\nax.imshow(img)\nbboxes = bboxes_cpu.at(img_index)\nlabels = labels_cpu.at(img_index)\ncategories_set = set()\nfor label in labels:\n categories_set.add(label[0])\n\ncategory_id_to_color = dict([ (cat_id , [random.uniform(0, 1) ,random.uniform(0, 1), random.uniform(0, 1)]) for cat_id in categories_set])\n\nfor bbox, label in zip(bboxes, labels):\n rect = patches.Rectangle((bbox[0]*W,bbox[1]*H),bbox[2]*W,bbox[3]*H,linewidth=1,edgecolor=category_id_to_color[label[0]],facecolor='none')\n ax.add_patch(rect)\n\nplt.show()\n", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
cb625d51d79a7fe2129647765d445f676540d2ea
205,298
ipynb
Jupyter Notebook
lab3_Classification/Classification.ipynb
johagg17/ArtificialFlyingObjects
86ac81f1adaae03ea67a75bd4da1fb47068bcbd5
[ "MIT" ]
null
null
null
lab3_Classification/Classification.ipynb
johagg17/ArtificialFlyingObjects
86ac81f1adaae03ea67a75bd4da1fb47068bcbd5
[ "MIT" ]
null
null
null
lab3_Classification/Classification.ipynb
johagg17/ArtificialFlyingObjects
86ac81f1adaae03ea67a75bd4da1fb47068bcbd5
[ "MIT" ]
null
null
null
56.633931
22,412
0.729924
[ [ [ "<center><h1 style=\"font-size:40px;\">Exercise III:<br> Image Classification using CNNs</h1></center>\n\n---", "_____no_output_____" ], [ "Welcome to the *fourth* lab for Deep Learning!\n\nIn this lab an CNN network to classify RGB images. Image classification refers to classify classes from images. This labs the *dataset* consist of multiple images where each image have a target label for classification.\n\nAll **tasks** include **TODO's** thare are expected to be done before the deadline. The highlighted **Question's** should be answered in the report. Keep the answers separated so it is easy to read for the grading. Some sections include asserts or an expected result to give a and expected results are given. Some sections does not contain any **TODO's** but is good to understand them. \n\nFor the **report** we have prepared an *Report.ipynb* notebook. The report should act as a summary of your findings and motivate your choice of approach. A better motivation show your understanding of the lab. Dont forget to include all **parts** in the report!\n\n\nGood luck!\n\n---", "_____no_output_____" ], [ "# Import packages", "_____no_output_____" ] ], [ [ "#!pip install pycocotools-windows", "_____no_output_____" ], [ "%load_ext autoreload\n%autoreload 2\n# Hacky solution to acces the global utils package\nimport sys,os\nsys.path.append(os.path.dirname(os.path.realpath('')))", "_____no_output_____" ], [ "# Torch packages\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n# local modules\nfrom torch.utils.data import DataLoader\nfrom utils.progressbar import LitProgressBar\nfrom utils.dataset import ClassificationDataset\nfrom utils.model import Model\nfrom config import LabConfig\nfrom collections import OrderedDict\nfrom utils import plot\nimport pprint\nimport torchmetrics\nimport pytorch_lightning as pl\nimport torchvision\n\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nimport numpy as np", "/opt/anaconda3/lib/python3.8/site-packages/setuptools/distutils_patch.py:25: UserWarning: Distutils was imported before Setuptools. This usage is discouraged and may exhibit undesirable behaviors or errors. Please use Setuptools' objects directly or at least import Setuptools first.\n warnings.warn(\n" ] ], [ [ "# Load config", "_____no_output_____" ] ], [ [ "cfg = LabConfig()\ncfg.todict()", "_____no_output_____" ] ], [ [ "# Example Task\nFirst we present an example task to get an idea of the implementation and how to structure the code.", "_____no_output_____" ], [ "## Example data\nFirst load the dataloaders for three datasets; train, validation and test. Feel free to test different augmentations, more can be found at the [pytorch doc](https://pytorch.org/vision/stable/transforms.html)\n\nNote that ToTensor and Rezise are required to reshape and transform the images correct. We do not want to apply augmentation to the test_transform that are applied on the validation and test dataloader.", "_____no_output_____" ], [ "### Augmentation", "_____no_output_____" ] ], [ [ "train_transform = torchvision.transforms.Compose([\n torchvision.transforms.ToTensor(),\n torchvision.transforms.Resize((cfg.IMAGE_HEIGHT, cfg.IMAGE_WIDTH)), \n])\ntest_transform = torchvision.transforms.Compose([\n torchvision.transforms.ToTensor(),\n torchvision.transforms.Resize((cfg.IMAGE_HEIGHT, cfg.IMAGE_WIDTH)), #, \n])", "_____no_output_____" ], [ "train_transform", "_____no_output_____" ] ], [ [ "### Create dataloaders", "_____no_output_____" ] ], [ [ "train_dataloader = DataLoader(ClassificationDataset(cfg.training_img_dir, cfg.CLASSES, img_shape=(cfg.IMAGE_HEIGHT, cfg.IMAGE_WIDTH),transform=train_transform),\n batch_size=cfg.BATCH_SIZE,\n shuffle=True,\n num_workers=cfg.NUM_WORKERS)\nvalid_dataloader = DataLoader(ClassificationDataset(cfg.validation_img_dir, cfg.CLASSES, img_shape=(cfg.IMAGE_HEIGHT, cfg.IMAGE_WIDTH),transform=test_transform),\n batch_size=cfg.BATCH_SIZE,\n shuffle=False,\n num_workers=cfg.NUM_WORKERS)\n\ntest_dataloader = DataLoader(ClassificationDataset(cfg.testing_img_dir, cfg.CLASSES, img_shape=(cfg.IMAGE_HEIGHT, cfg.IMAGE_WIDTH),transform=test_transform),\n batch_size=cfg.BATCH_SIZE,\n shuffle=False,\n num_workers=cfg.NUM_WORKERS)\n\nprint(\"Data batch generators are created!\")", "Data batch generators are created!\n" ] ], [ [ "## Visualise data\nTo get an idea of the dataset we visualise the data. ", "_____no_output_____" ] ], [ [ "t_x, t_y = next(iter(train_dataloader))\nprint(f\"x {tuple(t_x.shape)} {t_x.dtype} {t_x.min()} {t_x.max()}\")\nprint(f\"y {tuple(t_y.shape)} {t_y.dtype} {t_y.min()} {t_y.max()}\")\nplot.Classification.data(t_x, t_y, nimages=10,nrows=2)", "x (32, 3, 32, 32) torch.float32 0.0 1.0\ny (32,) torch.int64 0 2\n" ], [ "if False: # Set to true to visualise statistics of the data\n plot.show_statistics(cfg.training_img_dir, fineGrained=cfg.fineGrained, title=\" Training Data Statistics \")\n plot.show_statistics(cfg.validation_img_dir, fineGrained=cfg.fineGrained, title=\" Validation Data Statistics \")\n plot.show_statistics(cfg.testing_img_dir, fineGrained=cfg.fineGrained, title=\" Testing Data Statistics \")", "_____no_output_____" ] ], [ [ "## Create model\nHere is an simple architecture to train our network.", "_____no_output_____" ] ], [ [ "class SimpleModel(nn.Module):\n def __init__(self,num_channels:int=4, num_classes:int=3, input_shape=(10,10),**kwargs):\n super().__init__()\n self.conv_layer1 = self._conv_layer_set(num_channels, 32)\n self.conv_layer2 = self._conv_layer_set(32, 64)\n \n self.fc1 = nn.Linear(64*input_shape[1]//4*input_shape[1]//4, 64) # Calculated with the size. why //4\n self.fc2 = nn.Linear(64, num_classes)\n self.drop = nn.Dropout(0.5)\n \n def _conv_layer_set(self, in_c, out_c):\n conv_layer = nn.Sequential(OrderedDict([\n ('conv',nn.Conv2d(in_c, out_c, kernel_size=3, padding=1)),\n ('leakyrelu',nn.LeakyReLU()),\n ('maxpool',nn.MaxPool2d(2)),\n ]))\n return conv_layer\n \n\n def forward(self, x):\n # Set 1\n \n print(x.shape)\n out = self.conv_layer1(x)\n out = self.conv_layer2(out)\n \n out = out.view(out.size(0), -1) # Flatten (batchsize, image size)\n\n out = self.fc1(out)\n out = self.drop(out)\n out = self.fc2(out)\n \n return out", "_____no_output_____" ] ], [ [ "## Config", "_____no_output_____" ] ], [ [ "# Train model\nconfig = {\n 'optimizer':{\n \"type\":torch.optim.Adam,\n \"args\":{\n \"lr\":0.005,\n }\n },\n 'criterion':torch.nn.CrossEntropyLoss(), # error function\n 'max_epochs':5,\n \"train_metrics\":torchmetrics.MetricCollection([\n torchmetrics.Accuracy(num_classes=cfg.NUM_CLASSES,compute_on_step=False),\n ],postfix=\"_Train\"),\n \"validation_metrics\":torchmetrics.MetricCollection([\n torchmetrics.Accuracy(num_classes=cfg.NUM_CLASSES,compute_on_step=False),\n ],postfix=\"_Validation\")\n}", "_____no_output_____" ] ], [ [ "## Train", "_____no_output_____" ] ], [ [ "# Load model\nmodelObj = Model(SimpleModel(num_classes=cfg.NUM_CLASSES, num_channels=cfg.IMAGE_CHANNEL, input_shape=(cfg.IMAGE_HEIGHT, cfg.IMAGE_WIDTH)),**config)\n\n# Setup trainer\ntrainer = pl.Trainer(\n max_epochs=config['max_epochs'], \n gpus=cfg.GPU,\n logger=pl.loggers.TensorBoardLogger(save_dir=cfg.TENSORBORD_DIR),\n callbacks=[LitProgressBar()],\n progress_bar_refresh_rate=1,\n weights_summary=None, # Can be None, top or full\n num_sanity_val_steps=10, \n )\n# Train with the training and validation data- \ntrainer.fit(\n modelObj, \n train_dataloader=train_dataloader,\n val_dataloaders=valid_dataloader\n);", "/home/stud/j/johagg17/.local/lib/python3.8/site-packages/pytorch_lightning/trainer/connectors/callback_connector.py:90: LightningDeprecationWarning: Setting `Trainer(progress_bar_refresh_rate=1)` is deprecated in v1.5 and will be removed in v1.7. Please pass `pytorch_lightning.callbacks.progress.TQDMProgressBar` with `refresh_rate` directly to the Trainer's `callbacks` argument instead. Or, to disable the progress bar pass `enable_progress_bar = False` to the Trainer.\n rank_zero_deprecation(\n/home/stud/j/johagg17/.local/lib/python3.8/site-packages/pytorch_lightning/trainer/connectors/callback_connector.py:167: LightningDeprecationWarning: Setting `Trainer(weights_summary=None)` is deprecated in v1.5 and will be removed in v1.7. Please set `Trainer(enable_model_summary=False)` instead.\n rank_zero_deprecation(\nGPU available: True, used: False\nTPU available: False, using: 0 TPU cores\nIPU available: False, using: 0 IPUs\n/home/stud/j/johagg17/.local/lib/python3.8/site-packages/pytorch_lightning/trainer/trainer.py:1566: UserWarning: GPU available but not used. Set the gpus flag in your trainer `Trainer(gpus=1)` or script `--gpus=1`.\n rank_zero_warn(\n/home/stud/j/johagg17/.local/lib/python3.8/site-packages/pytorch_lightning/trainer/trainer.py:730: LightningDeprecationWarning: `trainer.fit(train_dataloader)` is deprecated in v1.4 and will be removed in v1.6. Use `trainer.fit(train_dataloaders)` instead. HINT: added 's'\n rank_zero_deprecation(\n" ] ], [ [ "## Test the network on the test dataset\nTo test the performance for a qualitative estimation we can plot the input, target and the models prediction. This is a good approach to see the performance and understand if the model is close to a correct decision. However, for big data, we probobly want to focus on a qualitative estimation. Therefore we can analyse **Tensorboard** logs to get a better understanding of the model.", "_____no_output_____" ] ], [ [ "# Create iterable from the test dataset\niter_dataloader = iter(test_dataloader)", "_____no_output_____" ], [ "# Take one batch from the test dataset and predict!\nX, Y = next(iter_dataloader)\npreds = torch.argmax(modelObj.predict_step(X,0,0),dim=1)", "_____no_output_____" ], [ "n_test = 10\ndf_result = pd.DataFrame({\n 'Ground Truth': Y[:n_test],\n 'Predicted label': preds[:n_test]})\ndisplay(df_result.T)", "_____no_output_____" ], [ "plot.Classification.results(X, preds)", "_____no_output_____" ] ], [ [ "# Exercises", "_____no_output_____" ], [ "## Metrics\n**TODO:** Does a high accuracy impy a good model, motivate your answer.\n\n**TODO:** Find an alternative metric which can show similar or better precision than accuracy.", "_____no_output_____" ], [ "### Accuracy", "_____no_output_____" ], [ "#### Training", "_____no_output_____" ] ], [ [ "iter_train_dataloader = iter(train_dataloader)\nX_t, Y_t = next(iter_train_dataloader)\n\ntrain_preds = torch.argmax(modelObj.predict_step(X_t,0,0),dim=1)\nprint(\"Train accuracy {}%\".format(np.mean(Y_t.numpy() == train_preds.numpy())*100))", "Train accuracy 100.0%\n" ] ], [ [ "#### Test", "_____no_output_____" ] ], [ [ "iter_dataloader = iter(test_dataloader)\nX, Y = next(iter_dataloader)\npreds = torch.argmax(modelObj.predict_step(X,0,0),dim=1)\nprint(\"Test accuracy {}%\".format(np.mean(Y.numpy() == preds.numpy())*100))", "Test accuracy 59.375%\n" ] ], [ [ "### Precision", "_____no_output_____" ] ], [ [ "from torchmetrics import Precision\nprecision = Precision(average='macro', num_classes=cfg.todict()['NUM_CLASSES'])", "_____no_output_____" ] ], [ [ "#### Training", "_____no_output_____" ] ], [ [ "precision(train_preds, Y_t)", "_____no_output_____" ] ], [ [ "#### Test", "_____no_output_____" ] ], [ [ "precision(preds, Y)", "_____no_output_____" ] ], [ [ "### Comparison between metrics, using tensorboard", "_____no_output_____" ], [ "#### Config", "_____no_output_____" ] ], [ [ "# Train model\nconfig = {\n 'optimizer':{\n \"type\":torch.optim.Adam,\n \"args\":{\n \"lr\":0.005,\n }\n },\n 'criterion':torch.nn.CrossEntropyLoss(), # error function\n 'max_epochs':5,\n \"train_metrics\":torchmetrics.MetricCollection([\n torchmetrics.Accuracy(num_classes=cfg.NUM_CLASSES,compute_on_step=False),\n torchmetrics.Precision(num_classes=cfg.NUM_CLASSES,compute_on_step=False),\n torchmetrics.F1(num_classes=cfg.NUM_CLASSES,compute_on_step=False),\n torchmetrics.AUROC(num_classes=cfg.NUM_CLASSES,compute_on_step=False),\n ],postfix=\"_Train\"),\n \"validation_metrics\":torchmetrics.MetricCollection([\n torchmetrics.Accuracy(num_classes=cfg.NUM_CLASSES,compute_on_step=False),\n torchmetrics.Precision(num_classes=cfg.NUM_CLASSES,compute_on_step=False),\n torchmetrics.F1(num_classes=cfg.NUM_CLASSES,compute_on_step=False),\n torchmetrics.AUROC(num_classes=cfg.NUM_CLASSES,compute_on_step=False),\n ],postfix=\"_Validation\")\n}", "/home/stud/j/johagg17/.local/lib/python3.8/site-packages/torchmetrics/utilities/prints.py:36: UserWarning: Metric `AUROC` will save all targets and predictions in buffer. For large datasets this may lead to large memory footprint.\n warnings.warn(*args, **kwargs)\n" ] ], [ [ "#### Train", "_____no_output_____" ] ], [ [ "# Load model\nmodelObj = Model(SimpleModel(num_classes=cfg.NUM_CLASSES, num_channels=cfg.IMAGE_CHANNEL, input_shape=(cfg.IMAGE_HEIGHT, cfg.IMAGE_WIDTH)),**config)\n\n# Setup trainer\ntrainer = pl.Trainer(\n max_epochs=config['max_epochs'], \n gpus=cfg.GPU,\n logger=pl.loggers.TensorBoardLogger(save_dir=cfg.TENSORBORD_DIR),\n callbacks=[LitProgressBar()],\n progress_bar_refresh_rate=1,\n weights_summary=None, # Can be None, top or full\n num_sanity_val_steps=10, \n )\n\ntrainer.fit(\n modelObj, \n train_dataloader=train_dataloader,\n val_dataloaders=valid_dataloader\n);", "GPU available: True, used: False\nTPU available: False, using: 0 TPU cores\nIPU available: False, using: 0 IPUs\n/home/stud/j/johagg17/.local/lib/python3.8/site-packages/torchmetrics/utilities/prints.py:36: UserWarning: No positive samples in targets, true positive value should be meaningless. Returning zero tensor in true positive score\n warnings.warn(*args, **kwargs)\n" ] ], [ [ "## Architecture\nModify the architecture of the SimpleModel to further increase the performance. Remember that very deep network allow the network to learn many features but if the dataset is to small the model will underfit. A simple dataset should not require a very deep network to learn good features.\n\n**TODO:** Modify the SimpleModel architecture. Force the network to overfit. How bad performance can you get from the network?\n\n**TODO:** Modify the SimpleModel and increase the complexity a little. Does the performance improve? If not, did you modify it to much or to little?\n\n**TODO:** Modify the SimpleModel architecture. Now combine the hyperparameter tuning and modification of the architecture to reach a performance that is close to the truth images. Explain in detail why the change was applied and if it improved the model a lot.", "_____no_output_____" ], [ "### The model", "_____no_output_____" ] ], [ [ "class SimpleModel(nn.Module):\n def __init__(self,num_channels:int=4, num_classes:int=3, input_shape=(10,10),**kwargs):\n super().__init__()\n conv_layers = 3\n self.conv_layer1 = self._conv_layer_set(num_channels, 24) # 2\n self.conv_layer2 = self._conv_layer_set(24, 24) # 4\n self.conv_layer3 = self._conv_layer_set(24, 40)\n \n \n ## Look up how width and height should change between convolutional layers. \n \n self.fc1 = nn.Linear(40*input_shape[1]//np.power(2, conv_layers)*input_shape[1]//np.power(2, conv_layers), 64) # Calculated with the size. why //4\n self.fc2 = nn.Linear(64, num_classes)\n self.drop = nn.Dropout(0.5)\n \n def _conv_layer_set(self, in_c, out_c):\n conv_layer = nn.Sequential(OrderedDict([\n ('conv',nn.Conv2d(in_c, out_c, kernel_size=3, padding=1)),\n ('leakyrelu',nn.LeakyReLU()),\n ('maxpool',nn.MaxPool2d(2)),\n ]))\n return conv_layer\n \n\n def forward(self, x):\n # Set 1\n \n \n out = self.conv_layer1(x) \n out = self.conv_layer2(out)\n out = self.conv_layer3(out) \n \n out = out.view(out.size(0), -1) # Flatten (batchsize, image size)\n \n out = self.fc1(out)\n out = self.drop(out)\n out = self.fc2(out)\n \n \n return out", "_____no_output_____" ] ], [ [ "### Config", "_____no_output_____" ] ], [ [ "# Train model\nconfig = {\n 'drop':0.5,\n 'optimizer':{\n \"type\":torch.optim.Adam,\n \"args\":{\n \"lr\":0.007,\n 'weight_decay':0\n }\n },\n 'criterion':torch.nn.CrossEntropyLoss(), # error function\n 'max_epochs':5,\n \"train_metrics\":torchmetrics.MetricCollection([\n torchmetrics.Accuracy(num_classes=cfg.NUM_CLASSES,compute_on_step=False),\n #torchmetrics.Precision(num_classes=cfg.NUM_CLASSES,compute_on_step=False),\n torchmetrics.F1(num_classes=cfg.NUM_CLASSES,compute_on_step=False),\n #torchmetrics.AUROC(num_classes=cfg.NUM_CLASSES,compute_on_step=False),\n ],postfix=\"_Train\"),\n \"validation_metrics\":torchmetrics.MetricCollection([\n torchmetrics.Accuracy(num_classes=cfg.NUM_CLASSES,compute_on_step=False),\n #torchmetrics.Precision(num_classes=cfg.NUM_CLASSES,compute_on_step=False),\n torchmetrics.F1(num_classes=cfg.NUM_CLASSES,compute_on_step=False),\n #torchmetrics.AUROC(num_classes=cfg.NUM_CLASSES,compute_on_step=False),\n ],postfix=\"_Validation\")\n}", "_____no_output_____" ], [ "def train():\n # Load model\n modelObj = Model(SimpleModel(num_classes=cfg.NUM_CLASSES, num_channels=cfg.IMAGE_CHANNEL, input_shape=(cfg.IMAGE_HEIGHT, cfg.IMAGE_WIDTH)),**config)\n \n # Setup trainer\n trainer = pl.Trainer(\n max_epochs=config['max_epochs'], \n gpus=1,\n logger=pl.loggers.TensorBoardLogger(save_dir=cfg.TENSORBORD_DIR),\n callbacks=[pl.callbacks.progress.TQDMProgressBar()],\n progress_bar_refresh_rate=1,\n weights_summary=None, # Can be None, top or full\n num_sanity_val_steps=10, \n )\n\n trainer.fit(\n modelObj, \n train_dataloader=train_dataloader,\n val_dataloaders=valid_dataloader\n );", "_____no_output_____" ] ], [ [ "### Todo1\n**Version 17 in tensorboard logs**, removed dropout since it purpose is to introduce regularization. Ran for 40 epochs. ", "_____no_output_____" ] ], [ [ "config['max_epochs'] = 40\ntrain()", "GPU available: True, used: True\nTPU available: False, using: 0 TPU cores\nIPU available: False, using: 0 IPUs\nLOCAL_RANK: 0 - CUDA_VISIBLE_DEVICES: [0,1]\n" ] ], [ [ "### Todo2", "_____no_output_____" ] ], [ [ "config['max_epochs'] = 5\ntrain()", "GPU available: True, used: True\nTPU available: False, using: 0 TPU cores\nIPU available: False, using: 0 IPUs\nLOCAL_RANK: 0 - CUDA_VISIBLE_DEVICES: [0,1]\n" ] ], [ [ "### Todo3", "_____no_output_____" ], [ "## Hyperparameter tuning\n### Task 1\nFrom the example approach we can see that the network performed very poorly. For the network to be consider \"good\" the truth images should match the predicted images. If the architecture can learn but is unstable (check loss/epoch in tensorboard), it is possible to tune the parameters of the network. This mostly involves changing the learning rate, optimizers, loss function etc. to better learn features. A network that have a to high learning rate create a increase in variance of the network weights which can make the network unstable.\n\n\n**TODO:** Perform hyperparameter tuning. Explain in detail why the parameters was changed and why it is considered \"better\".", "_____no_output_____" ], [ "### Todo1", "_____no_output_____" ] ], [ [ "class SimpleModel(nn.Module):\n def __init__(self,num_channels:int=4, num_classes:int=3, input_shape=(10,10),**kwargs):\n super().__init__()\n nr_conv_layers = 3\n \n conv_layers = []\n self.conv_layer1 = self._conv_layer_set(num_channels, 32) # 2 \n self.conv_layer2 = self._conv_layer_set(32, 128) # 4 128\n self.conv_layer3 = self._conv_layer_set(128, 150) # 128 (in)\n conv_layers.extend([self.conv_layer1, self.conv_layer2, self.conv_layer3]) #, self.conv_layer4])\n \n self.convolve = nn.Sequential(*conv_layers)\n \n print(self.convolve)\n \n \n feedforward = []\n \n self.fc1 = nn.Linear(150*input_shape[1]//np.power(2, nr_conv_layers)*input_shape[1]//np.power(2, nr_conv_layers), 150) # Calculated with the size. why //4\n self.fc2 = nn.Linear(150, num_classes)\n self.drop = nn.Dropout(0.8)\n \n \n feedforward.append(self.fc1)\n feedforward.append(self.drop)\n feedforward.append(self.fc2)\n \n \n self.ffd = nn.Sequential(*feedforward)\n print(self.ffd)\n \n def _conv_layer_set(self, in_c, out_c):\n conv_layer = nn.Sequential(OrderedDict([\n ('conv',nn.Conv2d(in_c, out_c, kernel_size=3, padding=1)),\n ('leakyrelu',nn.LeakyReLU()),\n ('maxpool',nn.MaxPool2d(2)),\n ]))\n return conv_layer\n \n\n def forward(self, x):\n # Set 1\n \n out = self.convolve(x)\n out = out.view(out.size(0), -1) # Flatten (batchsize, image size)\n \n out = self.ffd(out)\n \n return out", "_____no_output_____" ], [ "modelObj = Model(SimpleModel(num_classes=cfg.NUM_CLASSES, num_channels=cfg.IMAGE_CHANNEL, input_shape=(cfg.IMAGE_HEIGHT, cfg.IMAGE_WIDTH)),**config) \n# Setup trainer\ntrainer = pl.Trainer(\n max_epochs=config['max_epochs'], \n gpus=1,\n logger=pl.loggers.TensorBoardLogger(save_dir=cfg.TENSORBORD_DIR),\n callbacks=[pl.callbacks.progress.TQDMProgressBar()],\n progress_bar_refresh_rate=1,\n weights_summary=None, # Can be None, top or full\n num_sanity_val_steps=10, \n )\n\ntrainer.fit(\n modelObj, \n train_dataloader=train_dataloader,\n val_dataloaders=valid_dataloader\n);", "/home/stud/j/johagg17/.local/lib/python3.8/site-packages/pytorch_lightning/trainer/connectors/callback_connector.py:90: LightningDeprecationWarning: Setting `Trainer(progress_bar_refresh_rate=1)` is deprecated in v1.5 and will be removed in v1.7. Please pass `pytorch_lightning.callbacks.progress.TQDMProgressBar` with `refresh_rate` directly to the Trainer's `callbacks` argument instead. Or, to disable the progress bar pass `enable_progress_bar = False` to the Trainer.\n rank_zero_deprecation(\n/home/stud/j/johagg17/.local/lib/python3.8/site-packages/pytorch_lightning/trainer/connectors/callback_connector.py:167: LightningDeprecationWarning: Setting `Trainer(weights_summary=None)` is deprecated in v1.5 and will be removed in v1.7. Please set `Trainer(enable_model_summary=False)` instead.\n rank_zero_deprecation(\nGPU available: True, used: True\nTPU available: False, using: 0 TPU cores\nIPU available: False, using: 0 IPUs\n/home/stud/j/johagg17/.local/lib/python3.8/site-packages/pytorch_lightning/trainer/trainer.py:730: LightningDeprecationWarning: `trainer.fit(train_dataloader)` is deprecated in v1.4 and will be removed in v1.6. Use `trainer.fit(train_dataloaders)` instead. HINT: added 's'\n rank_zero_deprecation(\nLOCAL_RANK: 0 - CUDA_VISIBLE_DEVICES: [0,1]\n" ], [ "iter_dataloader = iter(test_dataloader)", "_____no_output_____" ], [ "X, Y = next(iter_dataloader)\npreds = torch.argmax(modelObj.predict_step(X,0,0),dim=1)\nprint(\"Test accuracy {}%\".format(np.mean(Y.numpy() == preds.numpy())*100))", "Test accuracy 93.75%\n" ], [ "confuTst = torchmetrics.functional.confusion_matrix(preds.detach().cpu(),Y.int().detach().cpu(), cfg.NUM_CLASSES)\n\nplot.confusion_matrix(cm = confuTst.numpy(), \n normalize = False,\n target_names = cfg.CLASSES,\n title = \"Confusion Matrix: Test data\")", "_____no_output_____" ] ], [ [ "## Augmentation\n**TODO:** Test if data augmentation help. Note that if we want to apply augmentation we need to make sure that the input and target perform the same augmentation. Otherwise, the data will not be correct!", "_____no_output_____" ] ], [ [ "train_transform = torchvision.transforms.Compose([\n torchvision.transforms.ToTensor(),\n torchvision.transforms.RandomHorizontalFlip(0.3),\n #torchvision.transforms.RandomVerticalFlip(),\n #torchvision.transforms.RandomRotation(15),\n torchvision.transforms.Resize((cfg.IMAGE_HEIGHT, cfg.IMAGE_WIDTH)), \n])\ntest_transform = torchvision.transforms.Compose([\n torchvision.transforms.ToTensor(),\n # torchvision.transforms.RandomHorizontalFlip(0.3),\n #torchvision.transforms.RandomVerticalFlip(),\n #torchvision.transforms.RandomRotation(15),\n torchvision.transforms.Resize((cfg.IMAGE_HEIGHT, cfg.IMAGE_WIDTH)), #, \n])\n\ntrain_transform", "_____no_output_____" ], [ "train_dataloader = DataLoader(ClassificationDataset(cfg.training_img_dir, cfg.CLASSES, img_shape=(cfg.IMAGE_HEIGHT, cfg.IMAGE_WIDTH),transform=train_transform),\n batch_size=cfg.BATCH_SIZE,\n shuffle=True,\n num_workers=cfg.NUM_WORKERS)\nvalid_dataloader = DataLoader(ClassificationDataset(cfg.validation_img_dir, cfg.CLASSES, img_shape=(cfg.IMAGE_HEIGHT, cfg.IMAGE_WIDTH),transform=test_transform),\n batch_size=cfg.BATCH_SIZE,\n shuffle=False,\n num_workers=cfg.NUM_WORKERS)\n\ntest_dataloader = DataLoader(ClassificationDataset(cfg.testing_img_dir, cfg.CLASSES, img_shape=(cfg.IMAGE_HEIGHT, cfg.IMAGE_WIDTH),transform=test_transform),\n batch_size=cfg.BATCH_SIZE,\n shuffle=False,\n num_workers=cfg.NUM_WORKERS)", "_____no_output_____" ], [ "#config['conv_out1'], config['conv_out2'], config['conv_out3'] = (32, 64, 128)\n#config['max_epochs'] = 20\nmodelObj = Model(SimpleModel(num_classes=cfg.NUM_CLASSES, num_channels=cfg.IMAGE_CHANNEL, input_shape=(cfg.IMAGE_HEIGHT, cfg.IMAGE_WIDTH)),**config) \n# Setup trainer\ntrainer = pl.Trainer(\n max_epochs=config['max_epochs'], \n gpus=1,\n logger=pl.loggers.TensorBoardLogger(save_dir=cfg.TENSORBORD_DIR),\n callbacks=[pl.callbacks.progress.TQDMProgressBar()],\n progress_bar_refresh_rate=1,\n weights_summary=None, # Can be None, top or full\n num_sanity_val_steps=10, \n )\n\ntrainer.fit(\n modelObj, \n train_dataloader=train_dataloader,\n val_dataloaders=valid_dataloader\n);", "GPU available: True, used: True\nTPU available: False, using: 0 TPU cores\nIPU available: False, using: 0 IPUs\nLOCAL_RANK: 0 - CUDA_VISIBLE_DEVICES: [0,1]\n" ], [ "iter_dataloader = iter(test_dataloader)\n\nX, Y = next(iter_dataloader)\npreds = torch.argmax(modelObj.predict_step(X,0,0),dim=1)\nprint(\"Test accuracy {}%\".format(np.mean(Y.numpy() == preds.numpy())*100))\n\nconfuTst = torchmetrics.functional.confusion_matrix(preds.detach().cpu(),Y.int().detach().cpu(), cfg.NUM_CLASSES)\n\nplot.confusion_matrix(cm = confuTst.numpy(), \n normalize = False,\n target_names = cfg.CLASSES,\n title = \"Confusion Matrix: Test data\")", "Test accuracy 96.875%\n" ] ], [ [ "**Question:** Did data augmentation improve the model? \\\n**Question:** What do you think have the greatest impact on the performance, why? \\", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ] ]
cb62623f86b14664c4d3686a6a1e2d034a916229
18,297
ipynb
Jupyter Notebook
CreditMax_ML_Personalized_Merchants/shop_suggestion/recommendation.ipynb
MrKLawrence/FinHack2019
c79817f48b9e890e0c76a3244e8d67862554203f
[ "MIT" ]
null
null
null
CreditMax_ML_Personalized_Merchants/shop_suggestion/recommendation.ipynb
MrKLawrence/FinHack2019
c79817f48b9e890e0c76a3244e8d67862554203f
[ "MIT" ]
null
null
null
CreditMax_ML_Personalized_Merchants/shop_suggestion/recommendation.ipynb
MrKLawrence/FinHack2019
c79817f48b9e890e0c76a3244e8d67862554203f
[ "MIT" ]
null
null
null
30.193069
154
0.332514
[ [ [ "import pandas as pd\nimport numpy as np", "_____no_output_____" ], [ "FAV1 = 'Aveda'\n#FAV2 = 'Optical 88'", "_____no_output_____" ], [ "df = pd.read_csv('ratings.csv') #importing the shop ratings data", "_____no_output_____" ], [ "shop_titles = pd.read_csv('shop_list.csv') #importing the shopID-title list", "_____no_output_____" ], [ "df = pd.merge(df, shop_titles, on='shopID') #merging the two dataframe, showing the shop title according to its shopID\ndf.head(10)", "_____no_output_____" ], [ "shop_matrix = df.pivot_table(index='user_id', columns='title', values='rating')\nshop_matrix.head()\n# converting the dataset into a matrix: shop titles as the columns; user_id as index; \n# ratings as values.\n# NaN = didnt rate\n\n# Matrix is built to compute the correlation between the ratings of a shop and the rest of the shops in the mall\n# pandas pivot_table is used to create the shop matrix", "_____no_output_____" ], [ "Fav1_user_rating = shop_matrix[FAV1]\n#create dataframe with the ratings of these shops from our shop_matrix", "_____no_output_____" ], [ "Fav1_user_rating.head()", "_____no_output_____" ], [ "similar_to_fav1=shop_matrix.corrwith(Fav1_user_rating)\nsimilar_to_fav1.head()\n# pandas corrwith function is used to compute the correlation between two dataframe\n# pairwise correlation of rows or columns of two dataframe objects", "/Users/chunyee/anaconda3/lib/python3.7/site-packages/numpy/lib/function_base.py:2522: RuntimeWarning: Degrees of freedom <= 0 for slice\n c = cov(x, y, rowvar)\n/Users/chunyee/anaconda3/lib/python3.7/site-packages/numpy/lib/function_base.py:2451: RuntimeWarning: divide by zero encountered in true_divide\n c *= np.true_divide(1, fact)\n" ], [ "corr_fav1 = pd.DataFrame(similar_to_fav1, columns=['Correlation'])\ncorr_fav1.dropna(inplace=True) #drop all null values and show all the related shops\ncorr_fav1.head()", "_____no_output_____" ] ], [ [ "## Results\nSuggested: Optical 88, Ray-ban", "_____no_output_____" ] ] ]
[ "code", "markdown" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ] ]
cb626de0098d53ff8a9ed760b8cff916e5dab7d7
79,116
ipynb
Jupyter Notebook
_notebooks/17_foundations.ipynb
maxlein/fastbook
bc466952eb99334b81ad936703bee6fa1f25f9a9
[ "Apache-2.0" ]
null
null
null
_notebooks/17_foundations.ipynb
maxlein/fastbook
bc466952eb99334b81ad936703bee6fa1f25f9a9
[ "Apache-2.0" ]
3
2021-03-30T04:51:31.000Z
2022-02-26T06:49:05.000Z
_notebooks/17_foundations.ipynb
maxlein/fastbook
bc466952eb99334b81ad936703bee6fa1f25f9a9
[ "Apache-2.0" ]
null
null
null
32.082725
1,139
0.575775
[ [ [ "#hide\nfrom fastai.gen_doc.nbdoc import *", "_____no_output_____" ] ], [ [ "[[chapter_foundations]]", "_____no_output_____" ] ], [ [ "# A Neural Net from the Foundations", "_____no_output_____" ], [ "This chapter begins a journey where we will dig deep into the internals of the models we used in the previous chapters. We will be covering many of the same things we've seen before, but this time around we'll be looking much more closely at the implementation details, and much less closely at the practical issues of how and why things are as they are.\n\nWe will build everything from scratch, only using basic indexing into a tensor. We]ll write a neural net from the ground up, then implement backpropagation manually, so we know exactly what's happening in PyTorch when we call `loss.backward`. We'll also see how to extend PyTorch with custom *autograd* functions that allow us to specify our own forward and backward computations.", "_____no_output_____" ], [ "## Building a Neural Net Layer from Scratch", "_____no_output_____" ], [ "Let's start by refreshing our understanding of how matrix multiplication is used in a basic neural network. Since we're building everything up from scratch, we'll use nothing but plain Python initially (except for indexing into PyTorch tensors), and then replace the plain Python with PyTorch functionality once we've seen how to create it.", "_____no_output_____" ], [ "### Modeling a Neuron", "_____no_output_____" ], [ "A neuron receives a given number of inputs and has an internal weight for each of them. It sums those weighted inputs to produce an output and adds an inner bias. In math, this can be written as:\n\n$$ out = \\sum_{i=1}^{n} x_{i} w_{i} + b$$\n\nif we name our inputs $(x_{1},\\dots,x_{n})$, our weights $(w_{1},\\dots,w_{n})$, and our bias $b$. In code this translates into:\n\n```python\noutput = sum([x*w for x,w in zip(inputs,weights)]) + bias\n```\n\nThis output is then fed into a nonlinear function called an *activation function* before being sent to another neuron. In deep learning the most common of these is the *rectified Linear unit*, or *ReLU*, which, as we've seen, is a fancy way of saying:\n```python\ndef relu(x): return x if x >= 0 else 0\n```", "_____no_output_____" ], [ "A deep learning model is then built by stacking a lot of those neurons in successive layers. We create a first layer with a certain number of neurons (known as *hidden size*) and link all the inputs to each of those neurons. Such a layer is often called a *fully connected layer* or a *dense layer* (for densely connected), or a *linear layer*. \n\nIt requires to compute, for each `input` in our batch and each neuron with a give `weight`, the dot product:\n\n```python\nsum([x*w for x,w in zip(input,weight)])\n```\n\nIf you have done a little bit of linear algebra, you may remember that having a lot of those dot products happens when you do a *matrix multiplication*. More precisely, if our inputs are in a matrix `x` with a size of `batch_size` by `n_inputs`, and if we have grouped the weights of our neurons in a matrix `w` of size `n_neurons` by `n_inputs` (each neuron must have the same number of weights as it has inputs) and all the biases in a vector `b` of size `n_neurons`, then the output of this fully connected layer is:\n\n```python\ny = x @ w.t() + b\n```\n\nwhere `@` represents the matrix product and `w.t()` is the transpose matrix of `w`. The output `y` is then of size `batch_size` by `n_neurons`, and in position `(i,j)` we have (for the mathy folks out there):\n\n$$y_{i,j} = \\sum_{k=1}^{n} x_{i,k} w_{k,j} + b_{j}$$\n\nOr in code:\n\n```python\ny[i,j] = sum([a * b for a,b in zip(x[i,:],w[j,:])]) + b[j]\n```\n\nThe transpose is necessary because in the mathematical definition of the matrix product `m @ n`, the coefficient `(i,j)` is:\n\n```python\nsum([a * b for a,b in zip(m[i,:],n[:,j])])\n```\n\nSo the very basic operation we need is a matrix multiplication, as it's what is hidden in the core of a neural net.", "_____no_output_____" ], [ "### Matrix Multiplication from Scratch", "_____no_output_____" ], [ "Let's write a function that computes the matrix product of two tensors, before we allow ourselves to use the PyTorch version of it. We will only use the indexing in PyTorch tensors:", "_____no_output_____" ] ], [ [ "import torch\nfrom torch import tensor", "_____no_output_____" ] ], [ [ "We'll need three nested `for` loops: one for the row indices, one for the column indices, and one for the inner sum. `ac` and `ar` stand for number of columns of `a` and number of rows of `a`, respectively (the same convention is followed for `b`), and we make sure calculating the matrix product is possible by checking that `a` has as many columns as `b` has rows:", "_____no_output_____" ] ], [ [ "def matmul(a,b):\n ar,ac = a.shape # n_rows * n_cols\n br,bc = b.shape\n assert ac==br\n c = torch.zeros(ar, bc)\n for i in range(ar):\n for j in range(bc):\n for k in range(ac): c[i,j] += a[i,k] * b[k,j]\n return c", "_____no_output_____" ] ], [ [ "To test this out, we'll pretend (using random matrices) that we're working with a small batch of 5 MNIST images, flattened into 28×28 vectors, with linear model to turn them into 10 activations:", "_____no_output_____" ] ], [ [ "m1 = torch.randn(5,28*28)\nm2 = torch.randn(784,10)", "_____no_output_____" ] ], [ [ "Let's time our function, using the Jupyter \"magic\" command `%time`:", "_____no_output_____" ] ], [ [ "%time t1=matmul(m1, m2)", "CPU times: user 1.15 s, sys: 4.09 ms, total: 1.15 s\nWall time: 1.15 s\n" ] ], [ [ "And see how that compares to PyTorch's built-in `@`:", "_____no_output_____" ] ], [ [ "%timeit -n 20 t2=m1@m2", "14 µs ± 8.95 µs per loop (mean ± std. dev. of 7 runs, 20 loops each)\n" ] ], [ [ "As we can see, in Python three nested loops is a very bad idea! Python is a slow language, and this isn't going to be very efficient. We see here that PyTorch is around 100,000 times faster than Python—and that's before we even start using the GPU!\n\nWhere does this difference come from? PyTorch didn't write its matrix multiplication in Python, but rather in C++ to make it fast. In general, whenever we do computations on tensors we will need to *vectorize* them so that we can take advantage of the speed of PyTorch, usually by using two techniques: elementwise arithmetic and broadcasting.", "_____no_output_____" ], [ "### Elementwise Arithmetic", "_____no_output_____" ], [ "All the basic operators (`+`, `-`, `*`, `/`, `>`, `<`, `==`) can be applied elementwise. That means if we write `a+b` for two tensors `a` and `b` that have the same shape, we will get a tensor composed of the sums the elements of `a` and `b`:", "_____no_output_____" ] ], [ [ "a = tensor([10., 6, -4])\nb = tensor([2., 8, 7])\na + b", "_____no_output_____" ] ], [ [ "The Booleans operators will return an array of Booleans:", "_____no_output_____" ] ], [ [ "a < b", "_____no_output_____" ] ], [ [ "If we want to know if every element of `a` is less than the corresponding element in `b`, or if two tensors are equal, we need to combine those elementwise operations with `torch.all`:", "_____no_output_____" ] ], [ [ "(a < b).all(), (a==b).all()", "_____no_output_____" ] ], [ [ "Reduction operations like `all()`, `sum()` and `mean()` return tensors with only one element, called rank-0 tensors. If you want to convert this to a plain Python Boolean or number, you need to call `.item()`:", "_____no_output_____" ] ], [ [ "(a + b).mean().item()", "_____no_output_____" ] ], [ [ "The elementwise operations work on tensors of any rank, as long as they have the same shape:", "_____no_output_____" ] ], [ [ "m = tensor([[1., 2, 3], [4,5,6], [7,8,9]])\nm*m", "_____no_output_____" ] ], [ [ "However you can't perform elementwise operations on tensors that don't have the same shape (unless they are broadcastable, as discussed in the next section):", "_____no_output_____" ] ], [ [ "n = tensor([[1., 2, 3], [4,5,6]])\nm*n", "_____no_output_____" ] ], [ [ "With elementwise arithmetic, we can remove one of our three nested loops: we can multiply the tensors that correspond to the `i`-th row of `a` and the `j`-th column of `b` before summing all the elements, which will speed things up because the inner loop will now be executed by PyTorch at C speed. \n\nTo access one column or row, we can simply write `a[i,:]` or `b[:,j]`. The `:` means take everything in that dimension. We could restrict this and take only a slice of that particular dimension by passing a range, like `1:5`, instead of just `:`. In that case, we would take the elements in columns or rows 1 to 4 (the second number is noninclusive). \n\nOne simplification is that we can always omit a trailing colon, so `a[i,:]` can be abbreviated to `a[i]`. With all of that in mind, we can write a new version of our matrix multiplication:", "_____no_output_____" ] ], [ [ "def matmul(a,b):\n ar,ac = a.shape\n br,bc = b.shape\n assert ac==br\n c = torch.zeros(ar, bc)\n for i in range(ar):\n for j in range(bc): c[i,j] = (a[i] * b[:,j]).sum()\n return c", "_____no_output_____" ], [ "%timeit -n 20 t3 = matmul(m1,m2)", "1.7 ms ± 88.1 µs per loop (mean ± std. dev. of 7 runs, 20 loops each)\n" ] ], [ [ "We're already ~700 times faster, just by removing that inner `for` loop! And that's just the beginning—with broadcasting we can remove another loop and get an even more important speed up.", "_____no_output_____" ], [ "### Broadcasting", "_____no_output_____" ], [ "As we discussed in <<chapter_mnist_basics>>, broadcasting is a term introduced by the [NumPy library](https://docs.scipy.org/doc/) that describes how tensors of different ranks are treated during arithmetic operations. For instance, it's obvious there is no way to add a 3×3 matrix with a 4×5 matrix, but what if we want to add one scalar (which can be represented as a 1×1 tensor) with a matrix? Or a vector of size 3 with a 3×4 matrix? In both cases, we can find a way to make sense of this operation.\n\nBroadcasting gives specific rules to codify when shapes are compatible when trying to do an elementwise operation, and how the tensor of the smaller shape is expanded to match the tensor of the bigger shape. It's essential to master those rules if you want to be able to write code that executes quickly. In this section, we'll expand our previous treatment of broadcasting to understand these rules.", "_____no_output_____" ], [ "#### Broadcasting with a scalar", "_____no_output_____" ], [ "Broadcasting with a scalar is the easiest type of broadcating. When we have a tensor `a` and a scalar, we just imagine a tensor of the same shape as `a` filled with that scalar and perform the operation:", "_____no_output_____" ] ], [ [ "a = tensor([10., 6, -4])\na > 0", "_____no_output_____" ] ], [ [ "How are we able to do this comparison? `0` is being *broadcast* to have the same dimensions as `a`. Note that this is done without creating a tensor full of zeros in memory (that would be very inefficient). \n\nThis is very useful if you want to normalize your dataset by subtracting the mean (a scalar) from the entire data set (a matrix) and dividing by the standard deviation (another scalar):", "_____no_output_____" ] ], [ [ "m = tensor([[1., 2, 3], [4,5,6], [7,8,9]])\n(m - 5) / 2.73", "_____no_output_____" ] ], [ [ "What if have different means for each row of the matrix? in that case you will need to broadcast a vector to a matrix.", "_____no_output_____" ], [ "#### Broadcasting a vector to a matrix", "_____no_output_____" ], [ "We can broadcast a vector to a matrix as follows:", "_____no_output_____" ] ], [ [ "c = tensor([10.,20,30])\nm = tensor([[1., 2, 3], [4,5,6], [7,8,9]])\nm.shape,c.shape", "_____no_output_____" ], [ "m + c", "_____no_output_____" ] ], [ [ "Here the elements of `c` are expanded to make three rows that match, making the operation possible. Again, PyTorch doesn't actually create three copies of `c` in memory. This is done by the `expand_as` method behind the scenes:", "_____no_output_____" ] ], [ [ "c.expand_as(m)", "_____no_output_____" ] ], [ [ "If we look at the corresponding tensor, we can ask for its `storage` property (which shows the actual contents of the memory used for the tensor) to check there is no useless data stored:", "_____no_output_____" ] ], [ [ "t = c.expand_as(m)\nt.storage()", "_____no_output_____" ] ], [ [ "Even though the tensor officially has nine elements, only three scalars are stored in memory. This is possible thanks to the clever trick of giving that dimension a *stride* of 0 (which means that when PyTorch looks for the next row by adding the stride, it doesn't move):", "_____no_output_____" ] ], [ [ "t.stride(), t.shape", "_____no_output_____" ] ], [ [ "Since `m` is of size 3×3, there are two ways to do broadcasting. The fact it was done on the last dimension is a convention that comes from the rules of broadcasting and has nothing to do with the way we ordered our tensors. If instead we do this, we get the same result:", "_____no_output_____" ] ], [ [ "c + m", "_____no_output_____" ] ], [ [ "In fact, it's only possible to broadcast a vector of size `n` with a matrix of size `m` by `n`:", "_____no_output_____" ] ], [ [ "c = tensor([10.,20,30])\nm = tensor([[1., 2, 3], [4,5,6]])\nc+m", "_____no_output_____" ] ], [ [ "This won't work:", "_____no_output_____" ] ], [ [ "c = tensor([10.,20])\nm = tensor([[1., 2, 3], [4,5,6]])\nc+m", "_____no_output_____" ] ], [ [ "If we want to broadcast in the other dimension, we have to change the shape of our vector to make it a 3×1 matrix. This is done with the `unsqueeze` method in PyTorch:", "_____no_output_____" ] ], [ [ "c = tensor([10.,20,30])\nm = tensor([[1., 2, 3], [4,5,6], [7,8,9]])\nc = c.unsqueeze(1)\nm.shape,c.shape", "_____no_output_____" ] ], [ [ "This time, `c` is expanded on the column side:", "_____no_output_____" ] ], [ [ "c+m", "_____no_output_____" ] ], [ [ "Like before, only three scalars are stored in memory:", "_____no_output_____" ] ], [ [ "t = c.expand_as(m)\nt.storage()", "_____no_output_____" ] ], [ [ "And the expanded tensor has the right shape because the column dimension has a stride of 0:", "_____no_output_____" ] ], [ [ "t.stride(), t.shape", "_____no_output_____" ] ], [ [ "With broadcasting, by default if we need to add dimensions, they are added at the beginning. When we were broadcasting before, Pytorch was doing `c.unsqueeze(0)` behind the scenes:", "_____no_output_____" ] ], [ [ "c = tensor([10.,20,30])\nc.shape, c.unsqueeze(0).shape,c.unsqueeze(1).shape", "_____no_output_____" ] ], [ [ "The `unsqueeze` command can be replaced by `None` indexing:", "_____no_output_____" ] ], [ [ "c.shape, c[None,:].shape,c[:,None].shape", "_____no_output_____" ] ], [ [ "You can always omit trailing colons, and `...` means all preceding dimensions:", "_____no_output_____" ] ], [ [ "c[None].shape,c[...,None].shape", "_____no_output_____" ] ], [ [ "With this, we can remove another `for` loop in our matrix multiplication function. Now, instead of multiplying `a[i]` with `b[:,j]`, we can multiply `a[i]` with the whole matrix `b` using broadcasting, then sum the results:", "_____no_output_____" ] ], [ [ "def matmul(a,b):\n ar,ac = a.shape\n br,bc = b.shape\n assert ac==br\n c = torch.zeros(ar, bc)\n for i in range(ar):\n# c[i,j] = (a[i,:] * b[:,j]).sum() # previous\n c[i] = (a[i ].unsqueeze(-1) * b).sum(dim=0)\n return c", "_____no_output_____" ], [ "%timeit -n 20 t4 = matmul(m1,m2)", "357 µs ± 7.2 µs per loop (mean ± std. dev. of 7 runs, 20 loops each)\n" ] ], [ [ "We're now 3,700 times faster than our first implementation! Before we move on, let's discuss the rules of broadcasting in a little more detail.", "_____no_output_____" ], [ "#### Broadcasting rules", "_____no_output_____" ], [ "When operating on two tensors, PyTorch compares their shapes elementwise. It starts with the *trailing dimensions* and works its way backward, adding 1 when it meets empty dimensions. Two dimensions are *compatible* when one of the following is true:\n\n- They are equal.\n- One of them is 1, in which case that dimension is broadcast to make it the same as the other.\n\nArrays do not need to have the same number of dimensions. For example, if you have a 256×256×3 array of RGB values, and you want to scale each color in the image by a different value, you can multiply the image by a one-dimensional array with three values. Lining up the sizes of the trailing axes of these arrays according to the broadcast rules, shows that they are compatible:\n\n```\nImage (3d tensor): 256 x 256 x 3\nScale (1d tensor): (1) (1) 3\nResult (3d tensor): 256 x 256 x 3\n```\n \nHowever, a 2D tensor of size 256×256 isn't compatible with our image:\n\n```\nImage (3d tensor): 256 x 256 x 3\nScale (1d tensor): (1) 256 x 256\nError\n```\n\nIn our earlier examples we had with a 3×3 matrix and a vector of size 3, broadcasting was done on the rows:\n\n```\nMatrix (2d tensor): 3 x 3\nVector (1d tensor): (1) 3\nResult (2d tensor): 3 x 3\n```\n\nAs an exercise, try to determine what dimensions to add (and where) when you need to normalize a batch of images of size `64 x 3 x 256 x 256` with vectors of three elements (one for the mean and one for the standard deviation).", "_____no_output_____" ], [ "Another useful wat of simplifying tensor manipulations is the use of Einstein summations convention.", "_____no_output_____" ], [ "### Einstein Summation", "_____no_output_____" ], [ "Before using the PyTorch operation `@` or `torch.matmul`, there is one last way we can implement matrix multiplication: Einstein summation (`einsum`). This is a compact representation for combining products and sums in a general way. We write an equation like this:\n\n```\nik,kj -> ij\n```\n\nThe lefthand side represents the operands dimensions, separated by commas. Here we have two tensors that each have two dimensions (`i,k` and `k,j`). The righthand side represents the result dimensions, so here we have a tensor with two dimensions `i,j`. \n\nThe rules of Einstein summation notation are as follows:\n\n1. Repeated indices are implicitly summed over.\n1. Each index can appear at most twice in any term.\n1. Each term must contain identical nonrepeated indices.\n\nSo in our example, since `k` is repeated, we sum over that index. In the end the formula represents the matrix obtained when we put in `(i,j)` the sum of all the coefficients `(i,k)` in the first tensor multiplied by the coefficients `(k,j)` in the second tensor... which is the matrix product! Here is how we can code this in PyTorch:", "_____no_output_____" ] ], [ [ "def matmul(a,b): return torch.einsum('ik,kj->ij', a, b)", "_____no_output_____" ] ], [ [ "Einstein summation is a very practical way of expressing operations involving indexing and sum of products. Note that you can have just one member on the lefthand side. For instance, this:\n\n```python\ntorch.einsum('ij->ji', a)\n```\n\nreturns the transpose of the matrix `a`. You can also have three or more members. This:\n\n```python\ntorch.einsum('bi,ij,bj->b', a, b, c)\n```\n\nwill return a vector of size `b` where the `k`-th coordinate is the sum of `a[k,i] b[i,j] c[k,j]`. This notation is particularly convenient when you have more dimensions because of batches. For example, if you have two batches of matrices and want compute the matrix product per batch, you would could this: \n\n```python\ntorch.einsum('bik,bkj->bij', a, b)\n```\n\nLet's go back to our new `matmul` implementation using `einsum` and look at its speed:", "_____no_output_____" ] ], [ [ "%timeit -n 20 t5 = matmul(m1,m2)", "68.7 µs ± 4.06 µs per loop (mean ± std. dev. of 7 runs, 20 loops each)\n" ] ], [ [ "As you can see, not only is it practical, but it's *very* fast. `einsum` is often the fastest way to do custom operations in PyTorch, without diving into C++ and CUDA. (But it's generally not as fast as carefully optimized CUDA code, as you see from the results in \"Matrix Multiplication from Scratch\".)", "_____no_output_____" ], [ "Now that we know how to implement a matrix multiplication from scratch, we are ready to build our neural net—specifically its forward and backward passes—using just matrix multiplications.", "_____no_output_____" ], [ "## The Forward and Backward Passes", "_____no_output_____" ], [ "As we saw in <<chapter_mnist_basics>>, to train a model, we will need to compute all the gradients of a given a loss with respect to its parameters, which is known as the *backward pass*. The *forward pass* is where we compute the output of the model on a given input, based on the matrix products. As we define our first neural net, we will also delve into the problem of properly initializing the weights, which is crucial for making training start properly.", "_____no_output_____" ], [ "### Defining and Initializing a Layer", "_____no_output_____" ], [ "We will take the example of a two-layer neural net first. As we've seen, one layer can be expressed as `y = x @ w + b`, with `x` our inputs, `y` our outputs, `w` the weights of the layer (which is of size number of inputs by number of neurons if we don't transpose like before), and `b` is the bias vector:", "_____no_output_____" ] ], [ [ "def lin(x, w, b): return x @ w + b", "_____no_output_____" ] ], [ [ "We can stack the second layer on top of the first, but since mathematically the composition of two linear operations is another linear operation, this only makes sense if we put something nonlinear in the middle, called an activation function. As mentioned at the beginning of the chapter, in deep learning applications the activation function most commonly used is a ReLU, which returns the maximum of `x` and `0`. \n\nWe won't actually train our model in this chapter, so we'll use random tensors for our inputs and targets. Let's say our inputs are 200 vectors of size 100, which we group into one batch, and our targets are 200 random floats:", "_____no_output_____" ] ], [ [ "x = torch.randn(200, 100)\ny = torch.randn(200)", "_____no_output_____" ] ], [ [ "For our two-layer model we will need two weight matrices and two bias vectors. Let's say we have a hidden size of 50 and the output size is 1 (for one of our inputs, the corresponding output is one float in this toy example). We initialize the weights randomly and the bias at zero:", "_____no_output_____" ] ], [ [ "w1 = torch.randn(100,50)\nb1 = torch.zeros(50)\nw2 = torch.randn(50,1)\nb2 = torch.zeros(1)", "_____no_output_____" ] ], [ [ "Then the result of our first layer is simply:", "_____no_output_____" ] ], [ [ "l1 = lin(x, w1, b1)\nl1.shape", "_____no_output_____" ] ], [ [ "Note that this formula works with our batch of inputs, and returns a batch of hidden state: `l1` is a matrix of size 200 (our batch size) by 50 (our hidden size).\n\nThere is a problem with the way our model was initialized, however. To understand it, we need to look at the mean and standard deviation (std) of `l1`:", "_____no_output_____" ] ], [ [ "l1.mean(), l1.std()", "_____no_output_____" ] ], [ [ "The mean is close to zero, which is understandable since both our input and weight matrices have means close to zero. But the standard deviation, which represents how far away our activations go from the mean, went from 1 to 10. This is a really big problem because that's with just one layer. Modern neural nets can have hundred of layers, so if each of them multiplies the scale of our activations by 10, by the end of the last layer we won't have numbers representable by a computer.\n\nIndeed, if we make just 50 multiplications between `x` and random matrices of size 100×100, we'll have:", "_____no_output_____" ] ], [ [ "x = torch.randn(200, 100)\nfor i in range(50): x = x @ torch.randn(100,100)\nx[0:5,0:5]", "_____no_output_____" ] ], [ [ "The result is `nan`s everywhere. So maybe the scale of our matrix was too big, and we need to have smaller weights? But if we use too small weights, we will have the opposite problem—the scale of our activations will go from 1 to 0.1, and after 100 layers we'll be left with zeros everywhere:", "_____no_output_____" ] ], [ [ "x = torch.randn(200, 100)\nfor i in range(50): x = x @ (torch.randn(100,100) * 0.01)\nx[0:5,0:5]", "_____no_output_____" ] ], [ [ "So we have to scale our weight matrices exactly right so that the standard deviation of our activations stays at 1. We can compute the exact value to use mathematically, as illustrated by Xavier Glorot and Yoshua Bengio in [\"Understanding the Difficulty of Training Deep Feedforward Neural Networks\"](http://proceedings.mlr.press/v9/glorot10a/glorot10a.pdf). The right scale for a given layer is $1/\\sqrt{n_{in}}$, where $n_{in}$ represents the number of inputs.\n\nIn our case, if we have 100 inputs, we should scale our weight matrices by 0.1:", "_____no_output_____" ] ], [ [ "x = torch.randn(200, 100)\nfor i in range(50): x = x @ (torch.randn(100,100) * 0.1)\nx[0:5,0:5]", "_____no_output_____" ] ], [ [ "Finally some numbers that are neither zeros nor `nan`s! Notice how stable the scale of our activations is, even after those 50 fake layers:", "_____no_output_____" ] ], [ [ "x.std()", "_____no_output_____" ] ], [ [ "If you play a little bit with the value for scale you'll notice that even a slight variation from 0.1 will get you either to very small or very large numbers, so initializing the weights properly is extremely important. \n\nLet's go back to our neural net. Since we messed a bit with our inputs, we need to redefine them:", "_____no_output_____" ] ], [ [ "x = torch.randn(200, 100)\ny = torch.randn(200)", "_____no_output_____" ] ], [ [ "And for our weights, we'll use the right scale, which is known as *Xavier initialization* (or *Glorot initialization*):", "_____no_output_____" ] ], [ [ "from math import sqrt\nw1 = torch.randn(100,50) / sqrt(100)\nb1 = torch.zeros(50)\nw2 = torch.randn(50,1) / sqrt(50)\nb2 = torch.zeros(1)", "_____no_output_____" ] ], [ [ "Now if we compute the result of the first layer, we can check that the mean and standard deviation are under control:", "_____no_output_____" ] ], [ [ "l1 = lin(x, w1, b1)\nl1.mean(),l1.std()", "_____no_output_____" ] ], [ [ "Very good. Now we need to go through a ReLU, so let's define one. A ReLU removes the negatives and replaces them with zeros, which is another way of saying it clamps our tensor at zero:", "_____no_output_____" ] ], [ [ "def relu(x): return x.clamp_min(0.)", "_____no_output_____" ] ], [ [ "We pass our activations through this:", "_____no_output_____" ] ], [ [ "l2 = relu(l1)\nl2.mean(),l2.std()", "_____no_output_____" ] ], [ [ "And we're back to square one: the mean of our activations has gone to 0.4 (which is understandable since we removed the negatives) and the std went down to 0.58. So like before, after a few layers we will probably wind up with zeros:", "_____no_output_____" ] ], [ [ "x = torch.randn(200, 100)\nfor i in range(50): x = relu(x @ (torch.randn(100,100) * 0.1))\nx[0:5,0:5]", "_____no_output_____" ] ], [ [ "This means our initialization wasn't right. Why? At the time Glorot and Bengio wrote their article, the popular activation in a neural net was the hyperbolic tangent (tanh, which is the one they used), and that initialization doesn't account for our ReLU. Fortunately, someone else has done the math for us and computed the right scale for us to use. In [\"Delving Deep into Rectifiers: Surpassing Human-Level Performance\"](https://arxiv.org/abs/1502.01852) (which we've seen before—it's the article that introduced the ResNet), Kaiming He et al. show that we should use the following scale instead: $\\sqrt{2 / n_{in}}$, where $n_{in}$ is the number of inputs of our model. Let's see what this gives us:", "_____no_output_____" ] ], [ [ "x = torch.randn(200, 100)\nfor i in range(50): x = relu(x @ (torch.randn(100,100) * sqrt(2/100)))\nx[0:5,0:5]", "_____no_output_____" ] ], [ [ "That's better: our numbers aren't all zeroed this time. So let's go back to the definition of our neural net and use this initialization (which is named *Kaiming initialization* or *He initialization*):", "_____no_output_____" ] ], [ [ "x = torch.randn(200, 100)\ny = torch.randn(200)", "_____no_output_____" ], [ "w1 = torch.randn(100,50) * sqrt(2 / 100)\nb1 = torch.zeros(50)\nw2 = torch.randn(50,1) * sqrt(2 / 50)\nb2 = torch.zeros(1)", "_____no_output_____" ] ], [ [ "Let's look at the scale of our activations after going through the first linear layer and ReLU:", "_____no_output_____" ] ], [ [ "l1 = lin(x, w1, b1)\nl2 = relu(l1)\nl2.mean(), l2.std()", "_____no_output_____" ] ], [ [ "Much better! Now that our weights are properly initialized, we can define our whole model:", "_____no_output_____" ] ], [ [ "def model(x):\n l1 = lin(x, w1, b1)\n l2 = relu(l1)\n l3 = lin(l2, w2, b2)\n return l3", "_____no_output_____" ] ], [ [ "This is the forward pass. Now all that's left to do is to compare our output to the labels we have (random numbers, in this example) with a loss function. In this case, we will use the mean squared error. (It's a toy problem, and this is the easiest loss function to use for what is next, computing the gradients.)\n\nThe only subtlety is that our outputs and targets don't have exactly the same shape—after going though the model, we get an output like this:", "_____no_output_____" ] ], [ [ "out = model(x)\nout.shape", "_____no_output_____" ] ], [ [ "To get rid of this trailing 1 dimension, we use the `squeeze` function:", "_____no_output_____" ] ], [ [ "def mse(output, targ): return (output.squeeze(-1) - targ).pow(2).mean()", "_____no_output_____" ] ], [ [ "And now we are ready to compute our loss:", "_____no_output_____" ] ], [ [ "loss = mse(out, y)", "_____no_output_____" ] ], [ [ "That's all for the forward pass—let's now look at the gradients.", "_____no_output_____" ], [ "### Gradients and the Backward Pass", "_____no_output_____" ], [ "We've seen that PyTorch computes all the gradient we need with a magic call to `loss.backward`, but let's explore what's happening behind the scenes.\n\nNow comes the part where we need to compute the gradients of the loss with respect to all the weights of our model, so all the floats in `w1`, `b1`, `w2`, and `b2`. For this, we will need a bit of math—specifically the *chain rule*. This is the rule of calculus that guides how we can compute the derivative of a composed function:\n\n$$(g \\circ f)'(x) = g'(f(x)) f'(x)$$", "_____no_output_____" ], [ "> j: I find this notation very hard to wrap my head around, so instead I like to think of it as: if `y = g(u)` and `u=f(x)`; then `dy/dx = dy/du * du/dx`. The two notations mean the same thing, so use whatever works for you.", "_____no_output_____" ], [ "Our loss is a big composition of different functions: mean squared error (which is in turn the composition of a mean and a power of two), the second linear layer, a ReLU and the first linear layer. For instance, if we want the gradients of the loss with respect to `b2` and our loss is defined by:\n\n```\nloss = mse(out,y) = mse(lin(l2, w2, b2), y)\n```\n\nThe chain rule tells us that we have:\n$$\\frac{\\text{d} loss}{\\text{d} b_{2}} = \\frac{\\text{d} loss}{\\text{d} out} \\times \\frac{\\text{d} out}{\\text{d} b_{2}} = \\frac{\\text{d}}{\\text{d} out} mse(out, y) \\times \\frac{\\text{d}}{\\text{d} b_{2}} lin(l_{2}, w_{2}, b_{2})$$\n\nTo compute the gradients of the loss with respect to $b_{2}$, we first need the gradients of the loss with respect to our output $out$. It's the same if we want the gradients of the loss with respect to $w_{2}$. Then, to get the gradients of the loss with respect to $b_{1}$ or $w_{1}$, we will need the gradients of the loss with respect to $l_{1}$, which in turn requires the gradients of the loss with respect to $l_{2}$, which will need the gradients of the loss with respect to $out$.\n\nSo to compute all the gradients we need for the update, we need to begin from the output of the model and work our way *backward*, one layer after the other—which is why this step is known as *backpropagation*. We can automate it by having each function we implemented (`relu`, `mse`, `lin`) provide its backward step: that is, how to derive the gradients of the loss with respect to the input(s) from the gradients of the loss with respect to the output.\n\nHere we populate those gradients in an attribute of each tensor, a bit like PyTorch does with `.grad`. \n\nThe first are the gradients of the loss with respect to the output of our model (which is the input of the loss function). We undo the `squeeze` we did in `mse`, then we use the formula that gives us the derivative of $x^{2}$: $2x$. The derivative of the mean is just $1/n$ where $n$ is the number of elements in our input:", "_____no_output_____" ] ], [ [ "def mse_grad(inp, targ): \n # grad of loss with respect to output of previous layer\n inp.g = 2. * (inp.squeeze() - targ).unsqueeze(-1) / inp.shape[0]", "_____no_output_____" ] ], [ [ "For the gradients of the ReLU and our linear layer, we use the gradients of the loss with respect to the output (in `out.g`) and apply the chain rule to compute the gradients of the loss with respect to the output (in `inp.g`). The chain rule tells us that `inp.g = relu'(inp) * out.g`. The derivative of `relu` is either 0 (when inputs are negative) or 1 (when inputs are positive), so this gives us:", "_____no_output_____" ] ], [ [ "def relu_grad(inp, out):\n # grad of relu with respect to input activations\n inp.g = (inp>0).float() * out.g", "_____no_output_____" ] ], [ [ "The scheme is the same to compute the gradients of the loss with respect to the inputs, weights, and bias in the linear layer:", "_____no_output_____" ] ], [ [ "def lin_grad(inp, out, w, b):\n # grad of matmul with respect to input\n inp.g = out.g @ w.t()\n w.g = inp.t() @ out.g\n b.g = out.g.sum(0)", "_____no_output_____" ] ], [ [ "We won't linger on the mathematical formulas that define them since they're not important for our purposes, but do check out Khan Academy's excellent calculus lessons if you're interested in this topic.", "_____no_output_____" ], [ "### Sidebar: SymPy", "_____no_output_____" ], [ "SymPy is a library for symbolic computation that is extremely useful library when working with calculus. Per the [documentation](https://docs.sympy.org/latest/tutorial/intro.html):", "_____no_output_____" ], [ "> : Symbolic computation deals with the computation of mathematical objects symbolically. This means that the mathematical objects are represented exactly, not approximately, and mathematical expressions with unevaluated variables are left in symbolic form.", "_____no_output_____" ], [ "To do symbolic computation, we first define a *symbol*, and then do a computation, like so:", "_____no_output_____" ] ], [ [ "from sympy import symbols,diff\nsx,sy = symbols('sx sy')\ndiff(sx**2, sx)", "_____no_output_____" ] ], [ [ "Here, SymPy has taken the derivative of `x**2` for us! It can take the derivative of complicated compound expressions, simplify and factor equations, and much more. There's really not much reason for anyone to do calculus manually nowadays—for calculating gradients, PyTorch does it for us, and for showing the equations, SymPy does it for us!", "_____no_output_____" ], [ "### End sidebar", "_____no_output_____" ], [ "Once we have have defined those functions, we can use them to write the backward pass. Since each gradient is automatically populated in the right tensor, we don't need to store the results of those `_grad` functions anywhere—we just need to execute them in the reverse order of the forward pass, to make sure that in each function `out.g` exists:", "_____no_output_____" ] ], [ [ "def forward_and_backward(inp, targ):\n # forward pass:\n l1 = inp @ w1 + b1\n l2 = relu(l1)\n out = l2 @ w2 + b2\n # we don't actually need the loss in backward!\n loss = mse(out, targ)\n \n # backward pass:\n mse_grad(out, targ)\n lin_grad(l2, out, w2, b2)\n relu_grad(l1, l2)\n lin_grad(inp, l1, w1, b1)", "_____no_output_____" ] ], [ [ "And now we can access the gradients of our model parameters in `w1.g`, `b1.g`, `w2.g`, and `b2.g`.", "_____no_output_____" ], [ "We have sucessfuly defined our model—now let's make it a bit more like a PyTorch module.", "_____no_output_____" ], [ "### Refactoring the Model", "_____no_output_____" ], [ "The three functions we used have two associated functions: a forward pass and a backward pass. Instead of writing them separately, we can create a class to wrap them together. That class can also store the inputs and outputs for the backward pass. This way, we will just have to call `backward`:", "_____no_output_____" ] ], [ [ "class Relu():\n def __call__(self, inp):\n self.inp = inp\n self.out = inp.clamp_min(0.)\n return self.out\n \n def backward(self): self.inp.g = (self.inp>0).float() * self.out.g", "_____no_output_____" ] ], [ [ "`__call__` is a magic name in Python that will make our class callable. This is what will be executed when we type `y = Relu()(x)`. We can do the same for our linear layer and the MSE loss:", "_____no_output_____" ] ], [ [ "class Lin():\n def __init__(self, w, b): self.w,self.b = w,b\n \n def __call__(self, inp):\n self.inp = inp\n self.out = [email protected] + self.b\n return self.out\n \n def backward(self):\n self.inp.g = self.out.g @ self.w.t()\n self.w.g = self.inp.t() @ self.out.g\n self.b.g = self.out.g.sum(0)", "_____no_output_____" ], [ "class Mse():\n def __call__(self, inp, targ):\n self.inp = inp\n self.targ = targ\n self.out = (inp.squeeze() - targ).pow(2).mean()\n return self.out\n \n def backward(self):\n x = (self.inp.squeeze()-self.targ).unsqueeze(-1)\n self.inp.g = 2.*x/self.targ.shape[0]", "_____no_output_____" ] ], [ [ "Then we can put everything in a model that we initiate with our tensors `w1`, `b1`, `w2`, `b2`:", "_____no_output_____" ] ], [ [ "class Model():\n def __init__(self, w1, b1, w2, b2):\n self.layers = [Lin(w1,b1), Relu(), Lin(w2,b2)]\n self.loss = Mse()\n \n def __call__(self, x, targ):\n for l in self.layers: x = l(x)\n return self.loss(x, targ)\n \n def backward(self):\n self.loss.backward()\n for l in reversed(self.layers): l.backward()", "_____no_output_____" ] ], [ [ "What is really nice about this refactoring and registering things as layers of our model is that the forward and backward passes are now really easy to write. If we want to instantiate our model, we just need to write:", "_____no_output_____" ] ], [ [ "model = Model(w1, b1, w2, b2)", "_____no_output_____" ] ], [ [ "The forward pass can then be executed with:", "_____no_output_____" ] ], [ [ "loss = model(x, y)", "_____no_output_____" ] ], [ [ "And the backward pass with:", "_____no_output_____" ] ], [ [ "model.backward()", "_____no_output_____" ] ], [ [ "### Going to PyTorch", "_____no_output_____" ], [ "The `Lin`, `Mse` and `Relu` classes we wrote have a lot in common, so we could make them all inherit from the same base class:", "_____no_output_____" ] ], [ [ "class LayerFunction():\n def __call__(self, *args):\n self.args = args\n self.out = self.forward(*args)\n return self.out\n \n def forward(self): raise Exception('not implemented')\n def bwd(self): raise Exception('not implemented')\n def backward(self): self.bwd(self.out, *self.args)", "_____no_output_____" ] ], [ [ "Then we just need to implement `forward` and `bwd` in each of our subclasses:", "_____no_output_____" ] ], [ [ "class Relu(LayerFunction):\n def forward(self, inp): return inp.clamp_min(0.)\n def bwd(self, out, inp): inp.g = (inp>0).float() * out.g", "_____no_output_____" ], [ "class Lin(LayerFunction):\n def __init__(self, w, b): self.w,self.b = w,b\n \n def forward(self, inp): return [email protected] + self.b\n \n def bwd(self, out, inp):\n inp.g = out.g @ self.w.t()\n self.w.g = self.inp.t() @ self.out.g\n self.b.g = out.g.sum(0)", "_____no_output_____" ], [ "class Mse(LayerFunction):\n def forward (self, inp, targ): return (inp.squeeze() - targ).pow(2).mean()\n def bwd(self, out, inp, targ): \n inp.g = 2*(inp.squeeze()-targ).unsqueeze(-1) / targ.shape[0]", "_____no_output_____" ] ], [ [ "The rest of our model can be the same as before. This is getting closer and closer to what PyTorch does. Each basic function we need to differentiate is written as a `torch.autograd.Function` object that has a `forward` and a `backward` method. PyTorch will then keep trace of any computation we do to be able to properly run the backward pass, unless we set the `requires_grad` attribute of our tensors to `False`.\n\nWriting one of these is (almost) as easy as writing our original classes. The difference is that we choose what to save and what to put in a context variable (so that we make sure we don't save anything we don't need), and we return the gradients in the `backward` pass. It's very rare to have to write your own `Function` but if you ever need something exotic or want to mess with the gradients of a regular function, here is how to write one:", "_____no_output_____" ] ], [ [ "from torch.autograd import Function\n\nclass MyRelu(Function):\n @staticmethod\n def forward(ctx, i):\n result = i.clamp_min(0.)\n ctx.save_for_backward(i)\n return result\n \n @staticmethod\n def backward(ctx, grad_output):\n i, = ctx.saved_tensors\n return grad_output * (i>0).float()", "_____no_output_____" ] ], [ [ "The structure used to build a more complex model that takes advantage of those `Function`s is a `torch.nn.Module`. This is the base structure for all models, and all the neural nets you have seen up until now were from that class. It mostly helps to register all the trainable parameters, which as we've seen can be used in the training loop.\n\nTo implement an `nn.Module` you just need to:\n\n- Make sure the superclass `__init__` is called first when you initiliaze it.\n- Define any parameters of the model as attributes with `nn.Parameter`.\n- Define a `forward` function that returns the output of your model.\n\nAs an example, here is the linear layer from scratch:", "_____no_output_____" ] ], [ [ "import torch.nn as nn\n\nclass LinearLayer(nn.Module):\n def __init__(self, n_in, n_out):\n super().__init__()\n self.weight = nn.Parameter(torch.randn(n_out, n_in) * sqrt(2/n_in))\n self.bias = nn.Parameter(torch.zeros(n_out))\n \n def forward(self, x): return x @ self.weight.t() + self.bias", "_____no_output_____" ] ], [ [ "As you see, this class automatically keeps track of what parameters have been defined:", "_____no_output_____" ] ], [ [ "lin = LinearLayer(10,2)\np1,p2 = lin.parameters()\np1.shape,p2.shape", "_____no_output_____" ] ], [ [ "It is thanks to this feature of `nn.Module` that we can just say `opt.step()` and have an optimizer loop through the parameters and update each one.\n\nNote that in PyTorch, the weights are stored as an `n_out x n_in` matrix, which is why we have the transpose in the forward pass.\n\nBy using the linear layer from PyTorch (which uses the Kaiming initialization as well), the model we have been building up during this chapter can be written like this:", "_____no_output_____" ] ], [ [ "class Model(nn.Module):\n def __init__(self, n_in, nh, n_out):\n super().__init__()\n self.layers = nn.Sequential(\n nn.Linear(n_in,nh), nn.ReLU(), nn.Linear(nh,n_out))\n self.loss = mse\n \n def forward(self, x, targ): return self.loss(self.layers(x).squeeze(), targ)", "_____no_output_____" ] ], [ [ "fastai provides its own variant of `Module` that is identical to `nn.Module`, but doesn't require you to call `super().__init__()` (it does that for you automatically):", "_____no_output_____" ] ], [ [ "class Model(Module):\n def __init__(self, n_in, nh, n_out):\n self.layers = nn.Sequential(\n nn.Linear(n_in,nh), nn.ReLU(), nn.Linear(nh,n_out))\n self.loss = mse\n \n def forward(self, x, targ): return self.loss(self.layers(x).squeeze(), targ)", "_____no_output_____" ] ], [ [ "In the last chapter, we will start from such a model and see how to build a training loop from scratch and refactor it to what we've been using in previous chapters.", "_____no_output_____" ], [ "## Conclusion", "_____no_output_____" ], [ "In this chapter we explored the foundations of deep learning, beginning with matrix multiplication and moving on to implementing the forward and backward passes of a neural net from scratch. We then refactored our code to show how PyTorch works beneath the hood.\n\nHere are a few things to remember:\n\n- A neural net is basically a bunch of matrix multiplications with nonlinearities in between.\n- Python is slow, so to write fast code we have to vectorize it and take advantage of techniques such as elementwise arithmetic and broadcasting.\n- Two tensors are broadcastable if the dimensions starting from the end and going backward match (if they are the same, or one of them is 1). To make tensors broadcastable, we may need to add dimensions of size 1 with `unsqueeze` or a `None` index.\n- Properly initializing a neural net is crucial to get training started. Kaiming initialization should be used when we have ReLU nonlinearities.\n- The backward pass is the chain rule applied multiple times, computing the gradients from the output of our model and going back, one layer at a time.\n- When subclassing `nn.Module` (if not using fastai's `Module`) we have to call the superclass `__init__` method in our `__init__` method and we have to define a `forward` function that takes an input and returns the desired result.", "_____no_output_____" ], [ "## Questionnaire", "_____no_output_____" ], [ "1. Write the Python code to implement a single neuron.\n1. Write the Python code to implement ReLU.\n1. Write the Python code for a dense layer in terms of matrix multiplication.\n1. Write the Python code for a dense layer in plain Python (that is, with list comprehensions and functionality built into Python).\n1. What is the \"hidden size\" of a layer?\n1. What does the `t` method do in PyTorch?\n1. Why is matrix multiplication written in plain Python very slow?\n1. In `matmul`, why is `ac==br`?\n1. In Jupyter Notebook, how do you measure the time taken for a single cell to execute?\n1. What is \"elementwise arithmetic\"?\n1. Write the PyTorch code to test whether every element of `a` is greater than the corresponding element of `b`.\n1. What is a rank-0 tensor? How do you convert it to a plain Python data type?\n1. What does this return, and why? `tensor([1,2]) + tensor([1])`\n1. What does this return, and why? `tensor([1,2]) + tensor([1,2,3])`\n1. How does elementwise arithmetic help us speed up `matmul`?\n1. What are the broadcasting rules?\n1. What is `expand_as`? Show an example of how it can be used to match the results of broadcasting.\n1. How does `unsqueeze` help us to solve certain broadcasting problems?\n1. How can we use indexing to do the same operation as `unsqueeze`?\n1. How do we show the actual contents of the memory used for a tensor?\n1. When adding a vector of size 3 to a matrix of size 3×3, are the elements of the vector added to each row or each column of the matrix? (Be sure to check your answer by running this code in a notebook.)\n1. Do broadcasting and `expand_as` result in increased memory use? Why or why not?\n1. Implement `matmul` using Einstein summation.\n1. What does a repeated index letter represent on the left-hand side of einsum?\n1. What are the three rules of Einstein summation notation? Why?\n1. What are the forward pass and backward pass of a neural network?\n1. Why do we need to store some of the activations calculated for intermediate layers in the forward pass?\n1. What is the downside of having activations with a standard deviation too far away from 1?\n1. How can weight initialization help avoid this problem?\n1. What is the formula to initialize weights such that we get a standard deviation of 1 for a plain linear layer, and for a linear layer followed by ReLU?\n1. Why do we sometimes have to use the `squeeze` method in loss functions?\n1. What does the argument to the `squeeze` method do? Why might it be important to include this argument, even though PyTorch does not require it?\n1. What is the \"chain rule\"? Show the equation in either of the two forms presented in this chapter.\n1. Show how to calculate the gradients of `mse(lin(l2, w2, b2), y)` using the chain rule.\n1. What is the gradient of ReLU? Show it in math or code. (You shouldn't need to commit this to memory—try to figure it using your knowledge of the shape of the function.)\n1. In what order do we need to call the `*_grad` functions in the backward pass? Why?\n1. What is `__call__`?\n1. What methods must we implement when writing a `torch.autograd.Function`?\n1. Write `nn.Linear` from scratch, and test it works.\n1. What is the difference between `nn.Module` and fastai's `Module`?", "_____no_output_____" ], [ "### Further Research", "_____no_output_____" ], [ "1. Implement ReLU as a `torch.autograd.Function` and train a model with it.\n1. If you are mathematically inclined, find out what the gradients of a linear layer are in mathematical notation. Map that to the implementation we saw in this chapter.\n1. Learn about the `unfold` method in PyTorch, and use it along with matrix multiplication to implement your own 2D convolution function. Then train a CNN that uses it.\n1. Implement everything in this chapter using NumPy instead of PyTorch. ", "_____no_output_____" ] ] ]
[ "code", "raw", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "code" ], [ "raw" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ] ]
cb626def83f6613d7ee9c89c76fd4a9880b6ec9a
32,286
ipynb
Jupyter Notebook
VacationPy/VacationPy.ipynb
angelaphuynh/python-api-challenge
f67393a5c967876e65daf1e004b7510aea585e70
[ "ADSL" ]
null
null
null
VacationPy/VacationPy.ipynb
angelaphuynh/python-api-challenge
f67393a5c967876e65daf1e004b7510aea585e70
[ "ADSL" ]
null
null
null
VacationPy/VacationPy.ipynb
angelaphuynh/python-api-challenge
f67393a5c967876e65daf1e004b7510aea585e70
[ "ADSL" ]
null
null
null
31.652941
183
0.357678
[ [ [ "# VacationPy\n----\n\n#### Note\n* Instructions have been included for each segment. You do not have to follow them exactly, but they are included to help you think through the steps.", "_____no_output_____" ] ], [ [ "# Dependencies and Setup\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport numpy as np\nimport requests\nimport gmaps\nimport os\n\n# Import API key\nfrom api_keys import g_key", "_____no_output_____" ] ], [ [ "### Store Part I results into DataFrame\n* Load the csv exported in Part I to a DataFrame", "_____no_output_____" ] ], [ [ "output_data_file = \"../WeatherPy/output_data/city_weather.csv\"\ndf = pd.read_csv(output_data_file)\nweather_data = df.drop(columns=[\"Unnamed: 0\"])\nweather_data", "_____no_output_____" ] ], [ [ "### Humidity Heatmap\n* Configure gmaps.\n* Use the Lat and Lng as locations and Humidity as the weight.\n* Add Heatmap layer to map.", "_____no_output_____" ] ], [ [ "locations = weather_data[[\"Lat\", \"Lng\"]]\nhumidity = weather_data['Humidity'].astype(float)", "_____no_output_____" ], [ "gmaps.configure(api_key=g_key)\nfig = gmaps.figure()\nheat_layer = gmaps.heatmap_layer(locations, weights=humidity, \n dissipating=False, max_intensity=100,\n point_radius = 1)\n\nfig.add_layer(heat_layer)\nfig", "_____no_output_____" ] ], [ [ "### Create new DataFrame fitting weather criteria\n* Narrow down the cities to fit weather conditions.\n* Drop any rows will null values.", "_____no_output_____" ] ], [ [ "sunny_cities = weather_data.loc[(weather_data[\"Max Temp\"] < 90) & (weather_data[\"Max Temp\"] > 75) & (weather_data['Windspeed'] <= 10) & (weather_data['Cloudiness'] == 0)]\nsunny_cities = sunny_cities.dropna()\n#narrowed_city_df = narrowed_city_df.reset_index()\nsunny_cities", "_____no_output_____" ] ], [ [ "### Hotel Map\n* Store into variable named `hotel_df`.\n* Add a \"Hotel Name\" column to the DataFrame.\n* Set parameters to search for hotels with 5000 meters.\n* Hit the Google Places API for each city's coordinates.\n* Store the first Hotel result into the DataFrame.\n* Plot markers on top of the heatmap.", "_____no_output_____" ] ], [ [ "sunny_cities['Nearest Hotel Name'] = ''\nhotel_df = sunny_cities\nhotel_df.head()", "_____no_output_____" ], [ "params = {\n \"radius\": 5000,\n \"types\": \"lodging\",\n \"key\": g_key}\n\nfor index, row in hotel_df.iterrows(): \n lat = row[\"Lat\"]\n lng = row[\"Lng\"]\n params['location'] = f\"{lat},{lng}\"\n base_url = \"https://maps.googleapis.com/maps/api/place/nearbysearch/json\"\n try:\n response = requests.get(base_url, params=params).json()\n results = response['results']\n hotel_df.loc[index, 'Nearest Hotel Name'] = results[0]['name']\n except (KeyError, IndexError):\n print(\"City not found. Skipping...\")\n \nhotel_df['Nearest Hotel Name'] = hotel_df['Nearest Hotel Name'].replace('', np.nan)\nhotel_df = hotel_df.dropna()\n \nhotel_df", "City not found. Skipping...\n" ], [ "# NOTE: Do not change any of the code in this cell\n\n# Using the template add the hotel marks to the heatmap\ninfo_box_template = \"\"\"\n<dl>\n<dt>Name</dt><dd>{Nearest Hotel Name}</dd>\n<dt>City</dt><dd>{City}</dd>\n<dt>Country</dt><dd>{Country}</dd>\n</dl>\n\"\"\"\n# Store the DataFrame Row\n# NOTE: be sure to update with your DataFrame name\nhotel_info = [info_box_template.format(**row) for index, row in hotel_df.iterrows()]\nlocations = hotel_df[[\"Lat\", \"Lng\"]]", "_____no_output_____" ], [ "# Add marker layer ontop of heat map\nmarkers = gmaps.marker_layer(locations, info_box_content=hotel_info)\nfig.add_layer(markers)\n\n# Display Map\nfig\n", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ] ]
cb628619794683f8c23053395ecacba6ff0e921c
45,508
ipynb
Jupyter Notebook
webinar/ez_cobalt_demo.ipynb
keceli/ezHPC
e9fda8d56476a9a0057fcc2c80b199f75f7a50aa
[ "MIT" ]
5
2020-10-28T16:29:32.000Z
2020-12-09T20:35:21.000Z
webinar/ez_cobalt_demo.ipynb
keceli/ezHPC
e9fda8d56476a9a0057fcc2c80b199f75f7a50aa
[ "MIT" ]
null
null
null
webinar/ez_cobalt_demo.ipynb
keceli/ezHPC
e9fda8d56476a9a0057fcc2c80b199f75f7a50aa
[ "MIT" ]
3
2020-10-28T16:38:17.000Z
2020-10-29T16:15:20.000Z
33.169096
745
0.485189
[ [ [ "# The magic commands below allow reflecting the changes in an imported module without restarting the kernel.\n%load_ext autoreload\n%autoreload 2\nimport sys\nprint(f'Python version: {sys.version.splitlines()[0]}')\nprint(f'Environment: {sys.exec_prefix}')", "Python version: 3.7.4 (default, Aug 13 2019, 20:35:49) \nEnvironment: /opt/anaconda3x\n" ] ], [ [ "Shell commands are prefixed with `!`", "_____no_output_____" ] ], [ [ "!pwd", "/gpfs/mira-home/keceli\r\n" ], [ "!echo hello jupyter", "hello jupyter\r\n" ], [ "hello = !echo hello jupyter", "_____no_output_____" ], [ "type(hello)", "_____no_output_____" ] ], [ [ "More info at IPython [docs](https://ipython.readthedocs.io/en/stable/api/generated/IPython.utils.text.html#IPython.utils.text.SList)\n\nAlternatively, try inline help or `?`", "_____no_output_____" ] ], [ [ "from IPython.utils.text import SList", "_____no_output_____" ], [ "help(SList)", "Help on class SList in module IPython.utils.text:\n\nclass SList(builtins.list)\n | SList(iterable=(), /)\n | \n | List derivative with a special access attributes.\n | \n | These are normal lists, but with the special attributes:\n | \n | * .l (or .list) : value as list (the list itself).\n | * .n (or .nlstr): value as a string, joined on newlines.\n | * .s (or .spstr): value as a string, joined on spaces.\n | * .p (or .paths): list of path objects (requires path.py package)\n | \n | Any values which require transformations are computed only once and\n | cached.\n | \n | Method resolution order:\n | SList\n | builtins.list\n | builtins.object\n | \n | Methods defined here:\n | \n | fields(self, *fields)\n | Collect whitespace-separated fields from string list\n | \n | Allows quick awk-like usage of string lists.\n | \n | Example data (in var a, created by 'a = !ls -l')::\n | \n | -rwxrwxrwx 1 ville None 18 Dec 14 2006 ChangeLog\n | drwxrwxrwx+ 6 ville None 0 Oct 24 18:05 IPython\n | \n | * ``a.fields(0)`` is ``['-rwxrwxrwx', 'drwxrwxrwx+']``\n | * ``a.fields(1,0)`` is ``['1 -rwxrwxrwx', '6 drwxrwxrwx+']``\n | (note the joining by space).\n | * ``a.fields(-1)`` is ``['ChangeLog', 'IPython']``\n | \n | IndexErrors are ignored.\n | \n | Without args, fields() just split()'s the strings.\n | \n | get_list(self)\n | \n | get_nlstr(self)\n | \n | get_paths(self)\n | \n | get_spstr(self)\n | \n | grep(self, pattern, prune=False, field=None)\n | Return all strings matching 'pattern' (a regex or callable)\n | \n | This is case-insensitive. If prune is true, return all items\n | NOT matching the pattern.\n | \n | If field is specified, the match must occur in the specified\n | whitespace-separated field.\n | \n | Examples::\n | \n | a.grep( lambda x: x.startswith('C') )\n | a.grep('Cha.*log', prune=1)\n | a.grep('chm', field=-1)\n | \n | sort(self, field=None, nums=False)\n | sort by specified fields (see fields())\n | \n | Example::\n | \n | a.sort(1, nums = True)\n | \n | Sorts a by second field, in numerical order (so that 21 > 3)\n | \n | ----------------------------------------------------------------------\n | Data descriptors defined here:\n | \n | __dict__\n | dictionary for instance variables (if defined)\n | \n | __weakref__\n | list of weak references to the object (if defined)\n | \n | l\n | \n | list\n | \n | n\n | \n | nlstr\n | \n | p\n | \n | paths\n | \n | s\n | \n | spstr\n | \n | ----------------------------------------------------------------------\n | Methods inherited from builtins.list:\n | \n | __add__(self, value, /)\n | Return self+value.\n | \n | __contains__(self, key, /)\n | Return key in self.\n | \n | __delitem__(self, key, /)\n | Delete self[key].\n | \n | __eq__(self, value, /)\n | Return self==value.\n | \n | __ge__(self, value, /)\n | Return self>=value.\n | \n | __getattribute__(self, name, /)\n | Return getattr(self, name).\n | \n | __getitem__(...)\n | x.__getitem__(y) <==> x[y]\n | \n | __gt__(self, value, /)\n | Return self>value.\n | \n | __iadd__(self, value, /)\n | Implement self+=value.\n | \n | __imul__(self, value, /)\n | Implement self*=value.\n | \n | __init__(self, /, *args, **kwargs)\n | Initialize self. See help(type(self)) for accurate signature.\n | \n | __iter__(self, /)\n | Implement iter(self).\n | \n | __le__(self, value, /)\n | Return self<=value.\n | \n | __len__(self, /)\n | Return len(self).\n | \n | __lt__(self, value, /)\n | Return self<value.\n | \n | __mul__(self, value, /)\n | Return self*value.\n | \n | __ne__(self, value, /)\n | Return self!=value.\n | \n | __repr__(self, /)\n | Return repr(self).\n | \n | __reversed__(self, /)\n | Return a reverse iterator over the list.\n | \n | __rmul__(self, value, /)\n | Return value*self.\n | \n | __setitem__(self, key, value, /)\n | Set self[key] to value.\n | \n | __sizeof__(self, /)\n | Return the size of the list in memory, in bytes.\n | \n | append(self, object, /)\n | Append object to the end of the list.\n | \n | clear(self, /)\n | Remove all items from list.\n | \n | copy(self, /)\n | Return a shallow copy of the list.\n | \n | count(self, value, /)\n | Return number of occurrences of value.\n | \n | extend(self, iterable, /)\n | Extend list by appending elements from the iterable.\n | \n | index(self, value, start=0, stop=9223372036854775807, /)\n | Return first index of value.\n | \n | Raises ValueError if the value is not present.\n | \n | insert(self, index, object, /)\n | Insert object before index.\n | \n | pop(self, index=-1, /)\n | Remove and return item at index (default last).\n | \n | Raises IndexError if list is empty or index is out of range.\n | \n | remove(self, value, /)\n | Remove first occurrence of value.\n | \n | Raises ValueError if the value is not present.\n | \n | reverse(self, /)\n | Reverse *IN PLACE*.\n | \n | ----------------------------------------------------------------------\n | Static methods inherited from builtins.list:\n | \n | __new__(*args, **kwargs) from builtins.type\n | Create and return a new object. See help(type) for accurate signature.\n | \n | ----------------------------------------------------------------------\n | Data and other attributes inherited from builtins.list:\n | \n | __hash__ = None\n\n" ], [ "SList??", "_____no_output_____" ], [ "!catt", "/bin/sh: catt: command not found\r\n" ], [ "readme = !cat /home/keceli/README", "_____no_output_____" ], [ "print(readme)", "['ALCF Mira uses SoftEnv for managing your environment. ', 'A default .soft has been created for you. ', '', 'For more information on SoftEnv, please see:', '', ' man softenv-intro', '', 'Though the default ALCF environment is enabled for you already in your .soft,', 'via @default, you will need to choose a set of MPI wrappers as no default is', 'available. For more information on the available choices, please see:', '', ' https://www.alcf.anl.gov/user-guides/overview-how-compile-and-link', '', 'Please edit your .soft file to choose an appropriate set of MPI wrappers for', 'your purposes.', '', 'Theta is using modules', '', 'myquota shows disk usage and limits for home', 'myprojectquotas show allocations for projects']\n" ], [ "files = !ls ./ezHPC/", "_____no_output_____" ], [ "files", "_____no_output_____" ], [ "files = !ls -l ./ezHPC/\nfiles", "_____no_output_____" ], [ "files.fields(0)", "_____no_output_____" ], [ "files.fields(-1)", "_____no_output_____" ], [ "pyvar = 'demo'\n!echo $pyvar\n!echo $HOSTNAME\n!echo $(whoami)\n!echo ~", "demo\njupyter02.mcp.alcf.anl.gov\nkeceli\n/home/keceli\n" ] ], [ [ "* So far we have used basic linux commands. \n* Now, we will demonstrate job submission on JupyterHub.\n* Job submission on Theta is handled with [Cobalt](https://trac.mcs.anl.gov/projects/cobalt/).\n* We need to write an executable script to submit a job.\n* Check ALCF Theta [documentation](https://alcf.anl.gov/support-center/theta/submit-job-theta) for more details.\n* \"Best Practices for Queueing and Running Jobs on Theta\" [webinar](https://alcf.anl.gov/events/best-practices-queueing-and-running-jobs-theta)", "_____no_output_____" ] ], [ [ "jobscript=\"\"\"#!/bin/bash -x \naprun -n 1 -N 1 echo hello jupyter\"\"\"\n!echo -e \"$jobscript\" > testjob.sh\n!chmod u+x testjob.sh\n!cat testjob.sh", "#!/bin/bash -x \r\naprun -n 1 -N 1 echo hello jupyter\r\n" ], [ "!echo -e \"#!/bin/bash -x \\n aprun -n 1 -N 1 echo hello jupyter\" > testjob.sh\n!chmod u+x testjob.sh\n!cat testjob.sh", "#!/bin/bash -x \r\n aprun -n 1 -N 1 echo hello jupyter\r\n" ], [ "#!man qsub\n!qsub -n 1 -t 10 -A datascience -q debug-flat-quad testjob.sh ", "Job routed to queue \"debug-flat-quad\".\nMemory mode set to flat quad for queue debug-flat-quad\nNo handlers could be found for logger \"Proxy\"\n<Fault 1001: \"The limit of 1 jobs per user in the 'debug-flat-quad' queue has been reached\\n\">\n" ], [ "# Earlier job id: 475482\n!qstat -u `whoami`", "JobID User WallTime Nodes State Location \r\n====================================================\r\n475782 keceli 00:10:00 1 running 23 \r\n" ], [ "jobid = 475482\n!ls \"$jobid\".*", "475482.cobaltlog 475482.error\t475482.output\r\n" ], [ "!head \"$jobid\".*", "==> 475482.cobaltlog <==\r\nJobid: 475482\r\nqsub -n 1 -t 10 -A datascience -q debug-flat-quad testjob.sh\r\nTue Oct 27 04:19:48 2020 +0000 (UTC) submitted with cwd set to: /gpfs/mira-home/keceli\r\njobid 475482 submitted from terminal /dev/pts/4\r\n2020-10-27 04:20:35,966 INFO: Starting Resource_Prologue\r\n2020-10-27 04:20:35,968 DEBUG: no enabling attribute found\r\n2020-10-27 04:20:35,968 INFO: Resource_Prologue finished with exit code 0\r\nTue Oct 27 04:20:58 2020 +0000 (UTC) \r\nTue Oct 27 04:20:58 2020 +0000 (UTC) Command: '/gpfs/mira-home/keceli/testjob.sh'\r\nTue Oct 27 04:20:58 2020 +0000 (UTC) \r\n\r\n==> 475482.error <==\r\n+ aprun -n 1 -N 1 echo hello jupyter\r\n\r\n==> 475482.output <==\r\nhello jupyter\r\nApplication 21737865 resources: utime ~0s, stime ~1s, Rss ~7164, inblocks ~268, outblocks ~0\r\n" ], [ "# %load https://raw.githubusercontent.com/keceli/ezHPC/main/ez_cobalt.py\n#%%writefile ezHPC/ez_cobalt.py #Uncomment to write the file\n#%load https://raw.githubusercontent.com/keceli/ezHPC/main/ez_theta.py #Uncomment to load the file\ndef qstat(user='', jobid='', \n header='JobID:User:Score:WallTime:RunTime:Nodes:Queue:Est_Start_Time',\n extra='',\n verbose=False):\n \"\"\"\n Query about jobs submitted to queue manager with `qstat`.\n Parameters:\n ------------\n user: str, username\n jobid: str, cobalt job id, if more than one, seperate with a space\n header: str, customize info using headers \n other header options: QueuedTime:TimeRemaining:State:Location:Mode:Procs:Preemptable:Index\n \"\"\"\n import os\n import getpass\n cmd = ''\n if jobid:\n cmd = f'--header={header} {jobid}'\n else:\n if user == '':\n user = getpass.getuser() #user = '$(whoami)'\n cmd = f'-u {user} --header={header}'\n elif user.lower() == 'all':\n cmd = f'--header={header}'\n else:\n cmd = f'-u {user} --header={header}'\n if verbose:\n cmd = f'qstat -f -l {cmd}'\n else:\n cmd = f'qstat {cmd}'\n if extra:\n cmd += ' ' + extra\n print(f'Running...\\n {cmd}\\n')\n stream = os.popen(cmd).read()\n if stream:\n print(stream)\n else:\n print('No active jobs')\n return\n\ndef i_qstat():\n \"\"\"\n Query about jobs submitted to queue manager with `qstat`.\n \"\"\"\n from ipywidgets import interact_manual, widgets\n import getpass\n im = interact_manual(qstat, user=getpass.getuser())\n app_button = im.widget.children[5]\n app_button.description = 'qstat'\n return\n\ndef qdel(jobid=''):\n \"\"\"\n Delete job(s) with the given id(s).\n \"\"\"\n cmd = f'qdel {jobid}'\n process = Popen(cmd.split(), stdout=PIPE, stderr=PIPE)\n out, err = process.communicate() \n print(f'stdout: {out}')\n print(f'stderr: {err}')\n return \n\ndef i_qdel():\n \"\"\"\n Delete job(s) with the given id(s).\n \"\"\"\n from ipywidgets import interact_manual, widgets\n im = interact_manual(qdel)\n app_button = im.widget.children[1]\n app_button.description = 'qdel'\n return\n\ndef i_show_logs(job_prefix):\n \"\"\"\n \"\"\"\n from ipywidgets import widgets, Layout\n from IPython.display import display, clear_output\n from os.path import isfile\n outfile = f'{job_prefix}.output'\n errfile = f'{job_prefix}.error' \n logfile = f'{job_prefix}.cobaltlog'\n if (isfile(outfile)):\n with open(outfile, 'r') as f:\n out = f.read()\n with open(errfile, 'r') as f:\n err = f.read()\n with open(logfile, 'r') as f:\n log = f.read()\n children = [widgets.Textarea(value=val, layout=Layout(flex= '1 1 auto', width='100%',height='400px')) \n for name,val in [(outfile,out), (errfile,err), (logfile,log)]]\n tab = widgets.Tab(children=children,layout=Layout(flex= '1 1 auto', width='100%',height='auto'))\n #ow = widgets.Textarea(value=out,description=outfile)\n #ew = widgets.Textarea(value=err,description=errfile)\n #lw = widgets.Textarea(value=log,description=logfile)\n tab.set_title(0, outfile)\n tab.set_title(1, errfile)\n tab.set_title(2, logfile)\n display(tab)\n return\n\ndef parse_cobaltlog(prefix='', verbose=True):\n \"\"\"\n Return a dictionary with the content parsed from <prefix>.cobaltlog file\n \"\"\"\n from os.path import isfile\n from dateutil.parser import parse\n from pprint import pprint\n logfile = f'{prefix}.cobaltlog'\n d = {}\n if isfile(logfile):\n with open(logfile, 'r') as f: \n lines = f.readlines()\n for line in lines:\n if line.startswith('Jobid'):\n jobid = line.split()[-1].strip()\n d['jobid'] = jobid\n elif line.startswith('qsub'):\n cmd = line.strip()\n d['qsub_cmd'] = cmd\n elif 'submitted with cwd set to' in line:\n d['work_dir'] = line.split()[-1].strip()\n d['submit_time'] = parse(line.split('submitted')[0].strip())\n elif 'INFO: Starting Resource_Prologue' in line:\n d['init_time'] = parse(line.split('INFO:')[0].strip())\n d['queue_time'] = d['init_time'] - d['submit_time'].replace(tzinfo=None)\n d['queue_seconds'] = d['queue_time'].seconds\n elif 'Command:' in line:\n d['script'] = line.split(':')[-1].strip()\n d['start_time'] = parse(line.split('Command:')[0].strip())\n d['boot_time'] = d['start_time'].replace(tzinfo=None) - d['init_time']\n d['boot_seconds'] = d['boot_time'].seconds\n elif 'COBALT_PARTCORES' in line:\n d['partcores'] = line.split('=')[-1].strip()\n elif 'SHELL=' in line:\n d['shell'] = line.split('=')[-1].strip()\n elif 'COBALT_PROJECT' in line:\n d['project'] = line.split('=')[-1].strip()\n elif 'COBALT_PARTNAME' in line:\n d['partname'] = line.split('=')[-1].strip()\n elif 'LOGNAME=' in line:\n d['logname'] = line.split('=')[-1].strip()\n elif 'USER=' in line:\n d['user'] = line.split('=')[-1].strip()\n elif 'COBALT_STARTTIME' in line:\n d['cobalt_starttime'] = line.split('=')[-1].strip()\n elif 'COBALT_ENDTIME' in line:\n d['cobalt_endtime'] = line.split('=')[-1].strip()\n elif 'COBALT_PARTSIZE' in line:\n d['partsize'] = line.split('=')[-1].strip()\n elif 'HOME=' in line:\n d['home'] = line.split('=')[-1].strip()\n elif 'COBALT_JOBSIZE' in line:\n d['jobsize'] = line.split('=')[-1].strip()\n elif 'COBALT_QUEUE' in line:\n d['queue'] = line.split('=')[-1].strip()\n elif 'Info: stdin received from' in line:\n d['stdin'] = line.split()[-1].strip()\n elif 'Info: stdout sent to' in line:\n d['stdout'] = line.split()[-1].strip()\n elif 'Info: stderr sent to' in line:\n d['stderr'] = line.split()[-1].strip()\n elif 'with an exit code' in line:\n d['exit_code'] = line.split(';')[-1].split()[-1]\n d['end_time'] = parse(line.split('Info:')[0].strip())\n d['job_time'] = d['end_time'] - d['start_time']\n d['wall_seconds'] = d['job_time'].seconds \n else:\n print(f'{logfile} is not found.')\n if verbose:\n pprint(d)\n return d\n\ndef print_cobalt_times(prefix=''):\n \"\"\"\n Print timings from Cobalt logfile\n \"\"\"\n d = parse_cobaltlog(prefix=prefix, verbose=False)\n for key, val in d.items():\n if '_time' in key or 'seconds' in key:\n print(f'{key}: {val}')\n\ndef get_job_script(nodes=1, ranks_per_node=1, affinity='-d 1 -j 1 --cc depth', command='',verbose=True):\n \"\"\"\n Returns Cobalt job script with the given parameters\n TODO: add rules for affinity\n \"\"\"\n script = '#!/bin/bash -x \\n'\n ranks = ranks_per_node * nodes\n script += f'aprun -n {ranks} -N {ranks_per_node} {affinity} {command}'\n if verbose: print(script)\n return script\n\ndef i_get_job_script_manual():\n from ipywidgets import widgets, Layout, interact_manual\n from IPython.display import display, clear_output\n from os.path import isfile\n inodes = widgets.BoundedIntText(value=1, min=1, max=4394, step=1, description='nodes', disabled=False)\n iranks_per_node = widgets.BoundedIntText(value=1, min=1, max=64, step=1, description='rank_per_nodes', disabled=False)\n im = interact_manual(get_job_script, nodes=inodes, ranks_per_node=irank_per_nodes)\n get_job_script_button = im.widget.children[4]\n get_job_script_button.description = 'get_job_script'\n return\n\ndef i_get_job_script():\n from ipywidgets import widgets, Layout, interact_manual\n from IPython.display import display, clear_output\n from os.path import isfile\n inodes = widgets.BoundedIntText(value=1, min=1, max=4394, step=1, description='nodes', disabled=False)\n iranks_per_node = widgets.BoundedIntText(value=1, min=1, max=64, step=1, description='ranks/node', disabled=False)\n iaffinity = widgets.Text(value='-d 1 -j 1 --cc depth',description='affinity')\n icommand = widgets.Text(value='',description='executable and args')\n out = widgets.interactive_output(get_job_script, {'nodes': inodes, \n 'ranks_per_node': iranks_per_node,\n 'affinity': iaffinity,\n 'command': icommand})\n box = widgets.VBox([widgets.VBox([inodes, iranks_per_node, iaffinity, icommand]), out])\n display(box)\n return\n\ndef validate_theta_job(queue='', nodes=1, wall_minutes=10):\n \"\"\"\n Return True if given <queue> <nodes> <wall_minutes> are valid for a job on Theta,\n Return False and print the reason otherwise.\n See https://alcf.anl.gov/support-center/theta/job-scheduling-policy-theta\n Parameters\n ----------\n queue: str, queue name, can be: 'default', 'debug-cache-quad', 'debug-flat-quad', 'backfill'\n nodes: int, Number of nodes, can be an integer from 1 to 4096 depending on the queue.\n wall_minutes: int, max wall time in minutes, depends on the queue and the number of nodes, max 1440 minutes\n \"\"\"\n isvalid = True\n if queue.startswith('debug'):\n if wall_minutes > 60:\n print(f'Max wall time for {queue} queue is 60 minutes')\n isvalid = False\n if nodes > 8:\n print(f'Max number of nodes for {queue} queue is 8')\n isvalid = False\n else:\n if nodes < 128:\n print(f'Min number of nodes for {queue} queue is 128')\n isvalid = False\n else:\n if wall_minutes < 30:\n print(f'Min wall time for {queue} queue is 30 minutes') \n isvalid = False\n if nodes < 256 and wall_minutes > 180:\n print(f'Max wall time for {queue} queue is 180 minutes') \n isvalid = False\n elif nodes < 384 and wall_minutes > 360:\n print(f'Max wall time for {queue} queue is 360 minutes') \n isvalid = False\n elif nodes < 640 and wall_minutes > 540:\n print(f'Max wall time for {queue} queue is 540 minutes') \n isvalid = False\n elif nodes < 902 and wall_minutes > 720:\n print(f'Max wall time for {queue} queue is 720 minutes') \n isvalid = False\n elif wall_minutes > 1440:\n print('Max wall time on Theta is 1440 minutes') \n isvalid = False\n else:\n isvalid = True\n return isvalid\n \ndef qsub(project='',\n script='',\n script_file='',\n queue='debug-cache-quad',\n nodes=1,\n wall_minutes=30,\n attrs='ssds=required:ssd_size=128',\n workdir='',\n jobname='',\n stdin='',\n stdout=''):\n \"\"\"\n Submits a job to the queue with the given parameters.\n Returns Cobalt Job Id if submitted succesfully.\n Returns 0 otherwise.\n Parameters\n ----------\n project: str, name of the project to be charged\n queue: str, queue name, can be: 'default', 'debug-cache-quad', 'debug-flat-quad', 'backfill'\n nodes: int, Number of nodes, can be an integer from 1 to 4096 depending on the queue.\n wall_minutes: int, max wall time in minutes, depends on the queue and the number of nodes, max 1440 minutes\n \"\"\"\n import os\n import stat\n import time\n from subprocess import Popen, PIPE\n from os.path import isfile\n\n valid = validate_theta_job(queue=queue, nodes=nodes, wall_minutes=wall_minutes)\n if not valid:\n print('Job is not valid, change queue, nodes, or wall_minutes.')\n return 0\n with open(script_file, 'w') as f:\n f.write(script)\n time.sleep(1)\n exists = isfile(script_file)\n if exists:\n print(f'Created {script_file} on {os.path.abspath(script_file)}.')\n st = os.stat(script_file)\n os.chmod(script_file, st.st_mode | stat.S_IEXEC)\n time.sleep(1)\n cmd = f'qsub -A {project} -q {queue} -n {nodes} -t {wall_minutes} --attrs {attrs} '\n if workdir:\n cmd += f' --cwd {workdir}'\n if jobname:\n cmd += f' --jobname {jobname}'\n if stdin:\n cmd += f' -i {stdin}'\n if stdout:\n cmd += f' -o {stdout}'\n cmd += f' {script_file}'\n print(f'Submitting: \\n {cmd} ...\\n')\n process = Popen(cmd.split(), stdout=PIPE, stderr=PIPE)\n out, err = process.communicate()\n print(f'job id: {out.decode(\"utf-8\")}')\n print(f'stderr: {err.decode(\"utf-8\")}')\n return out.decode(\"utf-8\")\n\ndef i_qsub():\n \"\"\"\n Submits a job to the queue with the given parameters.\n \"\"\"\n from ipywidgets import widgets, Layout, interact_manual\n from IPython.display import display, clear_output\n from os.path import isfile\n inodes = widgets.BoundedIntText(value=1, min=1, max=4394, step=1, description='nodes', disabled=False)\n iranks_per_node = widgets.BoundedIntText(value=1, min=1, max=64, step=1, description='rank/nodes', disabled=False)\n iqueue = widgets.Dropdown(options=['debug-flat-quad','debug-cache-quad','default', 'backfill'],\n description='queue',\n value='debug-cache-quad')\n iwall_minutes = widgets.BoundedIntText(value=10, min=10, max=1440, step=10, description='wall minutes', disabled=False)\n\n iscript = widgets.Textarea(value='#!/bin/bash -x \\n',\n description='job script',\n layout=Layout(flex= '0 0 auto', width='auto',height='200px'))\n iscript_file= widgets.Text(value='',description='job script file name')\n iproject= widgets.Text(value='',description='project')\n isave = widgets.Checkbox(value=False,description='save', indent=True)\n isubmit = widgets.Button(\n value=False,\n description='submit',\n disabled=False,\n button_style='success',\n tooltip='submit job',\n icon='')\n output = widgets.Output()\n display(iproject, inodes, iqueue, iwall_minutes, iscript_file, iscript, isubmit, output)\n jobid = ''\n def submit_clicked(b):\n with output:\n clear_output()\n jobid = qsub(project=iproject.value,\n script=iscript.value,\n script_file=iscript_file.value,\n queue=iqueue.value,\n nodes=inodes.value,\n wall_minutes=iwall_minutes.value)\n isubmit.on_click(submit_clicked)\n return\n", "_____no_output_____" ], [ "qstat??", "_____no_output_____" ], [ "i_qstat()", "_____no_output_____" ], [ "i_get_job_script()", "_____no_output_____" ], [ "i_qsub()", "_____no_output_____" ], [ "jobid=475487\ni_show_logs(job_prefix=jobid)", "_____no_output_____" ], [ "jobinfo = parse_cobaltlog(prefix=jobid)", "{'boot_seconds': 22,\n 'boot_time': datetime.timedelta(seconds=22, microseconds=22000),\n 'cobalt_endtime': '1603774625',\n 'cobalt_starttime': '1603774025',\n 'end_time': datetime.datetime(2020, 10, 27, 4, 48, 18, tzinfo=tzlocal()),\n 'exit_code': 'removal',\n 'home': '/home/keceli',\n 'init_time': datetime.datetime(2020, 10, 27, 4, 47, 17, 978000),\n 'job_time': datetime.timedelta(seconds=38),\n 'jobid': '475487',\n 'jobsize': '1',\n 'logname': 'keceli',\n 'partcores': '64',\n 'partname': '3828',\n 'partsize': '1',\n 'project': 'datascience',\n 'qsub_cmd': 'qsub -A datascience -q debug-cache-quad -n 1 -t 10 --attrs '\n 'ssds=required:ssd_size=128 test_i_qsub.sh',\n 'queue': 'debug-cache-quad',\n 'queue_seconds': 36,\n 'queue_time': datetime.timedelta(seconds=36, microseconds=978000),\n 'script': \"'/gpfs/mira-home/keceli/test_i_qsub.sh'\",\n 'shell': '/bin/bash',\n 'start_time': datetime.datetime(2020, 10, 27, 4, 47, 40, tzinfo=tzlocal()),\n 'stderr': '/gpfs/mira-home/keceli/475487.error',\n 'stdin': '/dev/null',\n 'stdout': '/gpfs/mira-home/keceli/475487.output',\n 'submit_time': datetime.datetime(2020, 10, 27, 4, 46, 41, tzinfo=tzlocal()),\n 'user': 'keceli',\n 'wall_seconds': 38,\n 'work_dir': '/gpfs/mira-home/keceli'}\n" ], [ "print_cobalt_times(prefix=jobid)", "submit_time: 2020-10-27 04:46:41+00:00\ninit_time: 2020-10-27 04:47:17.978000\nqueue_time: 0:00:36.978000\nqueue_seconds: 36\nstart_time: 2020-10-27 04:47:40+00:00\nboot_time: 0:00:22.022000\nboot_seconds: 22\nend_time: 2020-10-27 04:48:18+00:00\njob_time: 0:00:38\nwall_seconds: 38\n" ] ], [ [ "* There is 1 min overhead for a single node job, more with more number of nodes ", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ] ]
cb6287340e686937cdcaaff3399d004d30ea2c0f
3,226
ipynb
Jupyter Notebook
gs_quant/documentation/02_pricing_and_risk/00_instruments_and_measures/examples/00_instrument_basics/0000_view-trade-properties.ipynb
Harkirat155/gs-quant
aa9a1b3632332a955749d6d0cf8825c5f3c6013e
[ "Apache-2.0" ]
4
2021-05-11T14:35:53.000Z
2022-03-14T03:52:34.000Z
gs_quant/documentation/02_pricing_and_risk/00_instruments_and_measures/examples/00_instrument_basics/0000_view-trade-properties.ipynb
Harkirat155/gs-quant
aa9a1b3632332a955749d6d0cf8825c5f3c6013e
[ "Apache-2.0" ]
null
null
null
gs_quant/documentation/02_pricing_and_risk/00_instruments_and_measures/examples/00_instrument_basics/0000_view-trade-properties.ipynb
Harkirat155/gs-quant
aa9a1b3632332a955749d6d0cf8825c5f3c6013e
[ "Apache-2.0" ]
null
null
null
35.065217
1,013
0.614383
[ [ [ "from gs_quant.common import PayReceive, Currency\nfrom gs_quant.instrument import IRSwap\nfrom gs_quant.session import Environment, GsSession", "_____no_output_____" ], [ "# external users should substitute their client id and secret; please skip this step if using internal jupyterhub\nGsSession.use(Environment.PROD, client_id=None, client_secret=None, scopes=('run_analytics',))", "_____no_output_____" ], [ "#View IRSwap instrument properties\nIRSwap.properties()", "_____no_output_____" ], [ "#When you create an instance of IRSwap, properties can be set\nmy_swap = IRSwap(PayReceive.Pay, '5y', Currency.USD)", "_____no_output_____" ], [ "#View these property values by calling 'as_dict' on the swap instance\nprint(my_swap.as_dict())", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code" ] ]
cb62898eb8e3cddabe141738f4936ae2378999ce
112,622
ipynb
Jupyter Notebook
Frequentist Inference Case Study - Part A (3).ipynb
JasonCaldwellMBA/Frequentist_Case_Study
c8e89370f481985bfd366ccc76c6d00d640a88fd
[ "MIT" ]
null
null
null
Frequentist Inference Case Study - Part A (3).ipynb
JasonCaldwellMBA/Frequentist_Case_Study
c8e89370f481985bfd366ccc76c6d00d640a88fd
[ "MIT" ]
null
null
null
Frequentist Inference Case Study - Part A (3).ipynb
JasonCaldwellMBA/Frequentist_Case_Study
c8e89370f481985bfd366ccc76c6d00d640a88fd
[ "MIT" ]
null
null
null
68.839853
14,387
0.77737
[ [ [ "# Frequentist Inference Case Study - Part A ", "_____no_output_____" ], [ "## 1. Learning objectives", "_____no_output_____" ], [ "Welcome to part A of the Frequentist inference case study! The purpose of this case study is to help you apply the concepts associated with Frequentist inference in Python. Frequentist inference is the process of deriving conclusions about an underlying distribution via the observation of data. In particular, you'll practice writing Python code to apply the following statistical concepts: \n* the _z_-statistic\n* the _t_-statistic\n* the difference and relationship between the two\n* the Central Limit Theorem, including its assumptions and consequences\n* how to estimate the population mean and standard deviation from a sample\n* the concept of a sampling distribution of a test statistic, particularly for the mean\n* how to combine these concepts to calculate a confidence interval", "_____no_output_____" ], [ "## Prerequisites", "_____no_output_____" ], [ "To be able to complete this notebook, you are expected to have a basic understanding of:\n* what a random variable is (p.400 of Professor Spiegelhalter's *The Art of Statistics, hereinafter AoS*)\n* what a population, and a population distribution, are (p. 397 of *AoS*)\n* a high-level sense of what the normal distribution is (p. 394 of *AoS*)\n* what the t-statistic is (p. 275 of *AoS*)\n\nHappily, these should all be concepts with which you are reasonably familiar after having read ten chapters of Professor Spiegelhalter's book, *The Art of Statistics*.\n\nWe'll try to relate the concepts in this case study back to page numbers in *The Art of Statistics* so that you can focus on the Python aspects of this case study. The second part (part B) of this case study will involve another, more real-world application of these tools. ", "_____no_output_____" ], [ "For this notebook, we will use data sampled from a known normal distribution. This allows us to compare our results with theoretical expectations.", "_____no_output_____" ], [ "## 2. An introduction to sampling from the normal distribution", "_____no_output_____" ], [ "First, let's explore the ways we can generate the normal distribution. While there's a fair amount of interest in [sklearn](https://scikit-learn.org/stable/) within the machine learning community, you're likely to have heard of [scipy](https://docs.scipy.org/doc/scipy-0.15.1/reference/index.html) if you're coming from the sciences. For this assignment, you'll use [scipy.stats](https://docs.scipy.org/doc/scipy-0.15.1/reference/tutorial/stats.html) to complete your work. \n\nThis assignment will require some digging around and getting your hands dirty (your learning is maximized that way)! You should have the research skills and the tenacity to do these tasks independently, but if you struggle, reach out to your immediate community and your mentor for help. ", "_____no_output_____" ] ], [ [ "from scipy.stats import norm\nfrom scipy.stats import t\nimport numpy as np\nfrom numpy.random import seed\nimport matplotlib.pyplot as plt", "_____no_output_____" ] ], [ [ "__Q1:__ Call up the documentation for the `norm` function imported above. (Hint: that documentation is [here](https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.norm.html)). What is the second listed method?\n", "_____no_output_____" ], [ "__A:__ Probability density function. \npdf(x, loc=0, scale=1) ", "_____no_output_____" ], [ "__Q2:__ Use the method that generates random variates to draw five samples from the standard normal distribution. ", "_____no_output_____" ], [ "__A:__ Random variates. \nrvs(loc=0, scale=1, size=1, random_state=None)", "_____no_output_____" ] ], [ [ "seed(47)\n# draw five samples here\nsamples = norm.rvs(size=5)\nprint(samples)", "[-0.84800948 1.30590636 0.92420797 0.6404118 -1.05473698]\n" ] ], [ [ "__Q3:__ What is the mean of this sample? Is it exactly equal to the value you expected? Hint: the sample was drawn from the standard normal distribution. If you want a reminder of the properties of this distribution, check out p. 85 of *AoS*. ", "_____no_output_____" ], [ "__A:__ About 0.19. \nThis is what I expected because the normal distribution should be spread around 1. Then 1 is divided by the size (5 samples).", "_____no_output_____" ] ], [ [ "# Calculate and print the mean here, hint: use np.mean()\nnp.mean(samples)", "_____no_output_____" ] ], [ [ "__Q4:__ What is the standard deviation of these numbers? Calculate this manually here as $\\sqrt{\\frac{\\sum_i(x_i - \\bar{x})^2}{n}}$ (This is just the definition of **standard deviation** given by Professor Spiegelhalter on p.403 of *AoS*). Hint: np.sqrt() and np.sum() will be useful here and remember that numPy supports [broadcasting](https://docs.scipy.org/doc/numpy/user/basics.broadcasting.html).", "_____no_output_____" ], [ "__A:__ Approximately 0.96.", "_____no_output_____" ] ], [ [ "np.sqrt(np.sum((samples - samples.mean()) ** 2) / len(samples))", "_____no_output_____" ] ], [ [ "Here we have calculated the actual standard deviation of a small data set (of size 5). But in this case, this small data set is actually a sample from our larger (infinite) population. In this case, the population is infinite because we could keep drawing our normal random variates until our computers die! \n\nIn general, the sample mean we calculate will not be equal to the population mean (as we saw above). A consequence of this is that the sum of squares of the deviations from the _population_ mean will be bigger than the sum of squares of the deviations from the _sample_ mean. In other words, the sum of squares of the deviations from the _sample_ mean is too small to give an unbiased estimate of the _population_ variance. An example of this effect is given [here](https://en.wikipedia.org/wiki/Bessel%27s_correction#Source_of_bias). Scaling our estimate of the variance by the factor $n/(n-1)$ gives an unbiased estimator of the population variance. This factor is known as [Bessel's correction](https://en.wikipedia.org/wiki/Bessel%27s_correction). The consequence of this is that the $n$ in the denominator is replaced by $n-1$.\n\nYou can see Bessel's correction reflected in Professor Spiegelhalter's definition of **variance** on p. 405 of *AoS*.\n\n__Q5:__ If all we had to go on was our five samples, what would be our best estimate of the population standard deviation? Use Bessel's correction ($n-1$ in the denominator), thus $\\sqrt{\\frac{\\sum_i(x_i - \\bar{x})^2}{n-1}}$.", "_____no_output_____" ], [ "__A:__ Approximately 1.07.", "_____no_output_____" ] ], [ [ "np.sqrt(np.sum((samples - samples.mean()) ** 2) / (len(samples) - 1))", "_____no_output_____" ] ], [ [ "__Q6:__ Now use numpy's std function to calculate the standard deviation of our random samples. Which of the above standard deviations did it return?", "_____no_output_____" ], [ "__A:__ The first (regular) standard deviation calculation.", "_____no_output_____" ] ], [ [ "np.std(samples)", "_____no_output_____" ] ], [ [ "__Q7:__ Consult the documentation for np.std() to see how to apply the correction for estimating the population parameter and verify this produces the expected result.", "_____no_output_____" ], [ "__A:__ The ddof parameter of 1 returns the same result of about 1.07 as above.", "_____no_output_____" ] ], [ [ "# np.std? only returns the help in the Jupyter console.\nhelp(np.std)", "Help on function std in module numpy:\n\nstd(a, axis=None, dtype=None, out=None, ddof=0, keepdims=<no value>)\n Compute the standard deviation along the specified axis.\n \n Returns the standard deviation, a measure of the spread of a distribution,\n of the array elements. The standard deviation is computed for the\n flattened array by default, otherwise over the specified axis.\n \n Parameters\n ----------\n a : array_like\n Calculate the standard deviation of these values.\n axis : None or int or tuple of ints, optional\n Axis or axes along which the standard deviation is computed. The\n default is to compute the standard deviation of the flattened array.\n \n .. versionadded:: 1.7.0\n \n If this is a tuple of ints, a standard deviation is performed over\n multiple axes, instead of a single axis or all the axes as before.\n dtype : dtype, optional\n Type to use in computing the standard deviation. For arrays of\n integer type the default is float64, for arrays of float types it is\n the same as the array type.\n out : ndarray, optional\n Alternative output array in which to place the result. It must have\n the same shape as the expected output but the type (of the calculated\n values) will be cast if necessary.\n ddof : int, optional\n Means Delta Degrees of Freedom. The divisor used in calculations\n is ``N - ddof``, where ``N`` represents the number of elements.\n By default `ddof` is zero.\n keepdims : bool, optional\n If this is set to True, the axes which are reduced are left\n in the result as dimensions with size one. With this option,\n the result will broadcast correctly against the input array.\n \n If the default value is passed, then `keepdims` will not be\n passed through to the `std` method of sub-classes of\n `ndarray`, however any non-default value will be. If the\n sub-class' method does not implement `keepdims` any\n exceptions will be raised.\n \n Returns\n -------\n standard_deviation : ndarray, see dtype parameter above.\n If `out` is None, return a new array containing the standard deviation,\n otherwise return a reference to the output array.\n \n See Also\n --------\n var, mean, nanmean, nanstd, nanvar\n ufuncs-output-type\n \n Notes\n -----\n The standard deviation is the square root of the average of the squared\n deviations from the mean, i.e., ``std = sqrt(mean(abs(x - x.mean())**2))``.\n \n The average squared deviation is normally calculated as\n ``x.sum() / N``, where ``N = len(x)``. If, however, `ddof` is specified,\n the divisor ``N - ddof`` is used instead. In standard statistical\n practice, ``ddof=1`` provides an unbiased estimator of the variance\n of the infinite population. ``ddof=0`` provides a maximum likelihood\n estimate of the variance for normally distributed variables. The\n standard deviation computed in this function is the square root of\n the estimated variance, so even with ``ddof=1``, it will not be an\n unbiased estimate of the standard deviation per se.\n \n Note that, for complex numbers, `std` takes the absolute\n value before squaring, so that the result is always real and nonnegative.\n \n For floating-point input, the *std* is computed using the same\n precision the input has. Depending on the input data, this can cause\n the results to be inaccurate, especially for float32 (see example below).\n Specifying a higher-accuracy accumulator using the `dtype` keyword can\n alleviate this issue.\n \n Examples\n --------\n >>> a = np.array([[1, 2], [3, 4]])\n >>> np.std(a)\n 1.1180339887498949 # may vary\n >>> np.std(a, axis=0)\n array([1., 1.])\n >>> np.std(a, axis=1)\n array([0.5, 0.5])\n \n In single precision, std() can be inaccurate:\n \n >>> a = np.zeros((2, 512*512), dtype=np.float32)\n >>> a[0, :] = 1.0\n >>> a[1, :] = 0.1\n >>> np.std(a)\n 0.45000005\n \n Computing the standard deviation in float64 is more accurate:\n \n >>> np.std(a, dtype=np.float64)\n 0.44999999925494177 # may vary\n\n" ], [ "np.std(samples, ddof=1)", "_____no_output_____" ] ], [ [ "### Summary of section", "_____no_output_____" ], [ "In this section, you've been introduced to the scipy.stats package and used it to draw a small sample from the standard normal distribution. You've calculated the average (the mean) of this sample and seen that this is not exactly equal to the expected population parameter (which we know because we're generating the random variates from a specific, known distribution). You've been introduced to two ways of calculating the standard deviation; one uses $n$ in the denominator and the other uses $n-1$ (Bessel's correction). You've also seen which of these calculations np.std() performs by default and how to get it to generate the other.", "_____no_output_____" ], [ "You use $n$ as the denominator if you want to calculate the standard deviation of a sequence of numbers. You use $n-1$ if you are using this sequence of numbers to estimate the population parameter. This brings us to some terminology that can be a little confusing.\n\nThe population parameter is traditionally written as $\\sigma$ and the sample statistic as $s$. Rather unhelpfully, $s$ is also called the sample standard deviation (using $n-1$) whereas the standard deviation of the sample uses $n$. That's right, we have the sample standard deviation and the standard deviation of the sample and they're not the same thing!\n\nThe sample standard deviation\n\\begin{equation}\ns = \\sqrt{\\frac{\\sum_i(x_i - \\bar{x})^2}{n-1}} \\approx \\sigma,\n\\end{equation}\nis our best (unbiased) estimate of the population parameter ($\\sigma$).\n\nIf your dataset _is_ your entire population, you simply want to calculate the population parameter, $\\sigma$, via\n\\begin{equation}\n\\sigma = \\sqrt{\\frac{\\sum_i(x_i - \\bar{x})^2}{n}}\n\\end{equation}\nas you have complete, full knowledge of your population. In other words, your sample _is_ your population. It's worth noting that we're dealing with what Professor Spiegehalter describes on p. 92 of *AoS* as a **metaphorical population**: we have all the data, and we act as if the data-point is taken from a population at random. We can think of this population as an imaginary space of possibilities. \n\nIf, however, you have sampled _from_ your population, you only have partial knowledge of the state of your population. In this case, the standard deviation of your sample is not an unbiased estimate of the standard deviation of the population, in which case you seek to estimate that population parameter via the sample standard deviation, which uses the $n-1$ denominator.", "_____no_output_____" ], [ "Great work so far! Now let's dive deeper.", "_____no_output_____" ], [ "## 3. Sampling distributions", "_____no_output_____" ], [ "So far we've been dealing with the concept of taking a sample from a population to infer the population parameters. One statistic we calculated for a sample was the mean. As our samples will be expected to vary from one draw to another, so will our sample statistics. If we were to perform repeat draws of size $n$ and calculate the mean of each, we would expect to obtain a distribution of values. This is the sampling distribution of the mean. **The Central Limit Theorem (CLT)** tells us that such a distribution will approach a normal distribution as $n$ increases (the intuitions behind the CLT are covered in full on p. 236 of *AoS*). For the sampling distribution of the mean, the standard deviation of this distribution is given by\n\n\\begin{equation}\n\\sigma_{mean} = \\frac{\\sigma}{\\sqrt n}\n\\end{equation}\n\nwhere $\\sigma_{mean}$ is the standard deviation of the sampling distribution of the mean and $\\sigma$ is the standard deviation of the population (the population parameter).", "_____no_output_____" ], [ "This is important because typically we are dealing with samples from populations and all we know about the population is what we see in the sample. From this sample, we want to make inferences about the population. We may do this, for example, by looking at the histogram of the values and by calculating the mean and standard deviation (as estimates of the population parameters), and so we are intrinsically interested in how these quantities vary across samples. \n\nIn other words, now that we've taken one sample of size $n$ and made some claims about the general population, what if we were to take another sample of size $n$? Would we get the same result? Would we make the same claims about the general population? This brings us to a fundamental question: _when we make some inference about a population based on our sample, how confident can we be that we've got it 'right'?_\n\nWe need to think about **estimates and confidence intervals**: those concepts covered in Chapter 7, p. 189, of *AoS*.", "_____no_output_____" ], [ "Now, the standard normal distribution (with its variance equal to its standard deviation of one) would not be a great illustration of a key point. Instead, let's imagine we live in a town of 50,000 people and we know the height of everyone in this town. We will have 50,000 numbers that tell us everything about our population. We'll simulate these numbers now and put ourselves in one particular town, called 'town 47', where the population mean height is 172 cm and population standard deviation is 5 cm.", "_____no_output_____" ] ], [ [ "seed(47)\npop_heights = norm.rvs(172, 5, size=50000)", "_____no_output_____" ], [ "_ = plt.hist(pop_heights, bins=30)\n_ = plt.xlabel('height (cm)')\n_ = plt.ylabel('number of people')\n_ = plt.title('Distribution of heights in entire town population')\n_ = plt.axvline(172, color='r')\n_ = plt.axvline(172+5, color='r', linestyle='--')\n_ = plt.axvline(172-5, color='r', linestyle='--')\n_ = plt.axvline(172+10, color='r', linestyle='-.')\n_ = plt.axvline(172-10, color='r', linestyle='-.')", "_____no_output_____" ] ], [ [ "Now, 50,000 people is rather a lot to chase after with a tape measure. If all you want to know is the average height of the townsfolk, then can you just go out and measure a sample to get a pretty good estimate of the average height?", "_____no_output_____" ] ], [ [ "def townsfolk_sampler(n):\n return np.random.choice(pop_heights, n)", "_____no_output_____" ] ], [ [ "Let's say you go out one day and randomly sample 10 people to measure.", "_____no_output_____" ] ], [ [ "seed(47)\ndaily_sample1 = townsfolk_sampler(10)", "_____no_output_____" ], [ "_ = plt.hist(daily_sample1, bins=10)\n_ = plt.xlabel('height (cm)')\n_ = plt.ylabel('number of people')\n_ = plt.title('Distribution of heights in sample size 10')", "_____no_output_____" ] ], [ [ "The sample distribution doesn't resemble what we take the population distribution to be. What do we get for the mean?", "_____no_output_____" ] ], [ [ "np.mean(daily_sample1)", "_____no_output_____" ] ], [ [ "And if we went out and repeated this experiment?", "_____no_output_____" ] ], [ [ "daily_sample2 = townsfolk_sampler(10)", "_____no_output_____" ], [ "np.mean(daily_sample2)", "_____no_output_____" ] ], [ [ "__Q8:__ Simulate performing this random trial every day for a year, calculating the mean of each daily sample of 10, and plot the resultant sampling distribution of the mean.", "_____no_output_____" ], [ "__A:__ ", "_____no_output_____" ] ], [ [ "seed(47)\n# take your samples here\nfor day in range(365):\n print(f\"Sample for day {day + 1} was {np.mean(townsfolk_sampler(10))}\")", "Sample for day 1 was 173.00937310417513\nSample for day 2 was 170.2661643961573\nSample for day 3 was 174.34598844844118\nSample for day 4 was 170.785406800034\nSample for day 5 was 173.31770470569631\nSample for day 6 was 173.10858686641774\nSample for day 7 was 171.40439332248283\nSample for day 8 was 170.9704617042305\nSample for day 9 was 172.61510636545793\nSample for day 10 was 172.2913740885055\nSample for day 11 was 170.50358941424687\nSample for day 12 was 172.22018481582614\nSample for day 13 was 172.85834358816803\nSample for day 14 was 171.56620891479838\nSample for day 15 was 171.58204113512346\nSample for day 16 was 171.07473473402555\nSample for day 17 was 175.2047218243162\nSample for day 18 was 172.20101905509054\nSample for day 19 was 175.8140325675064\nSample for day 20 was 171.42567364667013\nSample for day 21 was 171.54879166928384\nSample for day 22 was 173.37962048578632\nSample for day 23 was 170.84926519404007\nSample for day 24 was 174.59322186598968\nSample for day 25 was 171.54718475118278\nSample for day 26 was 171.6096336712505\nSample for day 27 was 171.53767794655576\nSample for day 28 was 172.9149498945323\nSample for day 29 was 172.2981516718446\nSample for day 30 was 165.39551194077626\nSample for day 31 was 169.9597376836921\nSample for day 32 was 173.9465941840398\nSample for day 33 was 172.1342306069537\nSample for day 34 was 171.3984656489666\nSample for day 35 was 171.11161431266052\nSample for day 36 was 173.6267218608726\nSample for day 37 was 169.11050233231748\nSample for day 38 was 169.69609920441803\nSample for day 39 was 172.4816825903941\nSample for day 40 was 172.35465226352488\nSample for day 41 was 170.4018730294428\nSample for day 42 was 172.6410928824817\nSample for day 43 was 171.34876456725738\nSample for day 44 was 172.84629108546204\nSample for day 45 was 175.26564169319403\nSample for day 46 was 168.68677877662915\nSample for day 47 was 173.01832873955627\nSample for day 48 was 169.56393772762289\nSample for day 49 was 172.99035886037572\nSample for day 50 was 175.3707428706809\nSample for day 51 was 171.68166141416253\nSample for day 52 was 172.21351476973385\nSample for day 53 was 173.57719464559077\nSample for day 54 was 172.23443258433025\nSample for day 55 was 171.49321124063263\nSample for day 56 was 175.0569955524844\nSample for day 57 was 169.7489045337734\nSample for day 58 was 170.2576081367393\nSample for day 59 was 173.00527760461273\nSample for day 60 was 169.41958867850704\nSample for day 61 was 171.09210131077157\nSample for day 62 was 174.09652244869528\nSample for day 63 was 173.97372431777853\nSample for day 64 was 170.76960029551344\nSample for day 65 was 173.91299863576833\nSample for day 66 was 172.77281552568883\nSample for day 67 was 171.17248840522046\nSample for day 68 was 172.6754158361887\nSample for day 69 was 174.95950548649049\nSample for day 70 was 174.5280861190028\nSample for day 71 was 169.3587222486768\nSample for day 72 was 172.19628668598872\nSample for day 73 was 173.47675542556266\nSample for day 74 was 171.8867463490586\nSample for day 75 was 171.64766944047537\nSample for day 76 was 172.03472701707668\nSample for day 77 was 171.8514968514924\nSample for day 78 was 173.3504076956295\nSample for day 79 was 175.835465920465\nSample for day 80 was 173.01619729265536\nSample for day 81 was 172.87431639983677\nSample for day 82 was 171.27137361530023\nSample for day 83 was 169.08324493645043\nSample for day 84 was 173.68116250421124\nSample for day 85 was 170.11358709792825\nSample for day 86 was 171.1893750210999\nSample for day 87 was 169.25468169001886\nSample for day 88 was 169.2150994830036\nSample for day 89 was 171.596721889334\nSample for day 90 was 173.77774156427014\nSample for day 91 was 173.03004628460803\nSample for day 92 was 172.34242485010785\nSample for day 93 was 172.9855405060567\nSample for day 94 was 169.43469092853624\nSample for day 95 was 171.77975348011097\nSample for day 96 was 172.64844848584667\nSample for day 97 was 171.56408093054327\nSample for day 98 was 169.95379792250952\nSample for day 99 was 171.12137486338096\nSample for day 100 was 171.73249633181402\nSample for day 101 was 172.0630011932523\nSample for day 102 was 172.834180845258\nSample for day 103 was 172.38388837514353\nSample for day 104 was 170.54584084764085\nSample for day 105 was 171.99296673596194\nSample for day 106 was 173.42344336887487\nSample for day 107 was 170.69610500534776\nSample for day 108 was 173.36937790530678\nSample for day 109 was 174.7423134134954\nSample for day 110 was 171.57490485555303\nSample for day 111 was 171.25352997756042\nSample for day 112 was 173.72474189207932\nSample for day 113 was 172.441773023841\nSample for day 114 was 173.80765705457642\nSample for day 115 was 170.96908036245844\nSample for day 116 was 170.5646444001596\nSample for day 117 was 171.16932302392036\nSample for day 118 was 171.7865759467937\nSample for day 119 was 174.05858516618719\nSample for day 120 was 171.6143448222104\nSample for day 121 was 174.26791917555542\nSample for day 122 was 172.75750544038792\nSample for day 123 was 169.4482347475428\nSample for day 124 was 172.88259602544014\nSample for day 125 was 173.3483004197289\nSample for day 126 was 169.76084247981902\nSample for day 127 was 169.14347393153977\nSample for day 128 was 171.4217504513107\nSample for day 129 was 173.89834379492194\nSample for day 130 was 170.3495147245446\nSample for day 131 was 172.17203741079754\nSample for day 132 was 172.79214318105068\nSample for day 133 was 175.10499281941355\nSample for day 134 was 173.18876387302893\nSample for day 135 was 174.81414282425817\nSample for day 136 was 173.51971821349957\nSample for day 137 was 169.1832903415072\nSample for day 138 was 172.44643036845486\nSample for day 139 was 170.37438239142895\nSample for day 140 was 170.5128178545061\nSample for day 141 was 172.7603336967199\nSample for day 142 was 173.43295705827208\nSample for day 143 was 172.44986889632654\nSample for day 144 was 168.54115045199467\nSample for day 145 was 171.47237444495545\nSample for day 146 was 172.28422187204686\nSample for day 147 was 169.31812658254867\nSample for day 148 was 171.9843089839522\nSample for day 149 was 172.5937581563948\nSample for day 150 was 173.23557646642925\nSample for day 151 was 172.45242838151756\nSample for day 152 was 172.95373798288568\nSample for day 153 was 169.31196279581437\nSample for day 154 was 169.68424533261566\nSample for day 155 was 173.09559250773697\nSample for day 156 was 170.5045876184657\nSample for day 157 was 170.77385661410713\nSample for day 158 was 173.1993206004434\nSample for day 159 was 169.41827455268532\nSample for day 160 was 172.69828423736095\nSample for day 161 was 171.80810764017772\nSample for day 162 was 171.83850781893216\nSample for day 163 was 173.5753616187581\nSample for day 164 was 170.78294074321053\nSample for day 165 was 167.7625596819958\nSample for day 166 was 173.63599353895043\nSample for day 167 was 172.35793394439912\nSample for day 168 was 172.42914238209988\nSample for day 169 was 170.71751228569852\nSample for day 170 was 171.54000560683969\nSample for day 171 was 173.188789929698\nSample for day 172 was 172.62322681495678\nSample for day 173 was 172.6508432608691\nSample for day 174 was 171.42006139790251\nSample for day 175 was 172.4712247126185\nSample for day 176 was 170.3786488645244\nSample for day 177 was 172.73731268204696\nSample for day 178 was 172.3000446236936\nSample for day 179 was 170.7649928023232\nSample for day 180 was 169.9141005121299\nSample for day 181 was 172.42333039503097\nSample for day 182 was 171.61277999714807\nSample for day 183 was 170.60637508298126\nSample for day 184 was 171.76476298366762\nSample for day 185 was 170.41303623484504\nSample for day 186 was 172.47393077457045\nSample for day 187 was 171.3194342008746\nSample for day 188 was 169.5841940850787\nSample for day 189 was 170.52305891287497\nSample for day 190 was 174.13981403506384\nSample for day 191 was 171.57249535993967\nSample for day 192 was 172.92969865919665\nSample for day 193 was 170.71069014088408\nSample for day 194 was 172.70087709251987\nSample for day 195 was 171.02564174035243\nSample for day 196 was 174.5655176759607\nSample for day 197 was 173.2373307135623\nSample for day 198 was 169.72435883757208\nSample for day 199 was 171.4080637212518\nSample for day 200 was 172.82472795827337\nSample for day 201 was 172.68087344401215\nSample for day 202 was 170.73816995930957\nSample for day 203 was 173.27623446278108\nSample for day 204 was 174.00762188244605\nSample for day 205 was 173.13361473414275\nSample for day 206 was 170.84245444649585\nSample for day 207 was 173.38610121883\nSample for day 208 was 171.0638349843619\nSample for day 209 was 171.126280719832\nSample for day 210 was 172.73680722414176\nSample for day 211 was 170.48813262391832\nSample for day 212 was 173.8065513385304\nSample for day 213 was 174.987975821513\nSample for day 214 was 170.03229177775182\nSample for day 215 was 175.02529474715647\nSample for day 216 was 173.40098890648693\nSample for day 217 was 171.44694390778417\nSample for day 218 was 174.3025151813375\nSample for day 219 was 173.4280196820072\nSample for day 220 was 171.33423913799567\nSample for day 221 was 171.62893394353907\nSample for day 222 was 174.71937083523463\nSample for day 223 was 173.6777821451332\nSample for day 224 was 173.29205813062757\nSample for day 225 was 171.48099822052652\nSample for day 226 was 174.7643867716951\nSample for day 227 was 174.21143537234744\nSample for day 228 was 171.77420202846264\nSample for day 229 was 171.37841143093172\nSample for day 230 was 172.18616002136272\nSample for day 231 was 172.3111613339467\nSample for day 232 was 171.77236918473153\nSample for day 233 was 169.4252121074236\nSample for day 234 was 171.16984338312017\nSample for day 235 was 171.98592378485796\nSample for day 236 was 170.66765933964413\nSample for day 237 was 173.07633301699337\nSample for day 238 was 172.55483298565144\nSample for day 239 was 170.02605126977423\nSample for day 240 was 171.4680428484353\nSample for day 241 was 171.98907654608053\nSample for day 242 was 175.02655281778826\nSample for day 243 was 171.07855120204874\nSample for day 244 was 170.51520740788092\nSample for day 245 was 172.48598843478018\nSample for day 246 was 172.1474353242007\nSample for day 247 was 169.2709521164695\nSample for day 248 was 172.5087810017655\nSample for day 249 was 172.95952188635115\nSample for day 250 was 170.5105096194364\nSample for day 251 was 173.80365699123186\nSample for day 252 was 173.20783401436017\nSample for day 253 was 172.30853501437937\nSample for day 254 was 171.3292027460107\nSample for day 255 was 170.1284541620547\nSample for day 256 was 170.53153661961474\nSample for day 257 was 169.99233807038905\nSample for day 258 was 172.2060568309715\nSample for day 259 was 172.59375266931607\nSample for day 260 was 173.13187918050644\nSample for day 261 was 173.84225403798737\nSample for day 262 was 172.16900966778172\nSample for day 263 was 171.2740795246999\nSample for day 264 was 172.06848748155048\nSample for day 265 was 172.70806798793316\nSample for day 266 was 169.52191788351348\nSample for day 267 was 173.13995943698018\nSample for day 268 was 171.31446586385138\nSample for day 269 was 174.45944054257342\nSample for day 270 was 172.33779383789957\nSample for day 271 was 170.04050400074735\nSample for day 272 was 170.5897937787512\nSample for day 273 was 172.381119795683\nSample for day 274 was 171.2191777049789\nSample for day 275 was 174.13679937916376\nSample for day 276 was 171.58968685112407\nSample for day 277 was 172.14155987323056\nSample for day 278 was 170.14580076222987\nSample for day 279 was 173.8575126095746\nSample for day 280 was 171.22280004171273\nSample for day 281 was 174.50071744849237\nSample for day 282 was 172.88891068451716\nSample for day 283 was 169.31889881116254\nSample for day 284 was 170.69600548765348\nSample for day 285 was 171.42981400026548\nSample for day 286 was 172.50472870805683\nSample for day 287 was 171.51334191192277\nSample for day 288 was 170.08549988158256\nSample for day 289 was 172.5517746579218\nSample for day 290 was 170.35377108926656\nSample for day 291 was 173.3479274356198\nSample for day 292 was 168.98144965130814\nSample for day 293 was 174.43697752031915\nSample for day 294 was 174.24488590135522\nSample for day 295 was 171.75499841402396\nSample for day 296 was 172.2505806984\nSample for day 297 was 172.13537084694025\nSample for day 298 was 168.91730244778347\nSample for day 299 was 171.85383633190443\nSample for day 300 was 171.44332622752884\nSample for day 301 was 171.98065353587435\nSample for day 302 was 174.67545641644853\nSample for day 303 was 169.27456293913542\nSample for day 304 was 171.98544346762102\nSample for day 305 was 171.71523803475168\nSample for day 306 was 171.66213269382746\nSample for day 307 was 171.112298762341\nSample for day 308 was 170.77343371955163\nSample for day 309 was 172.20311106521876\nSample for day 310 was 169.99680356458154\nSample for day 311 was 172.95196752111943\nSample for day 312 was 176.75728819085288\nSample for day 313 was 171.8196727050369\nSample for day 314 was 170.71102865921227\nSample for day 315 was 168.0443984080638\nSample for day 316 was 172.71396733459656\nSample for day 317 was 168.70848675599822\nSample for day 318 was 171.76101124195003\nSample for day 319 was 173.73259618312758\nSample for day 320 was 172.39938678401919\nSample for day 321 was 172.4348027054093\nSample for day 322 was 172.4558659621563\nSample for day 323 was 170.7107801353672\nSample for day 324 was 172.51742285335624\nSample for day 325 was 172.24819759923054\nSample for day 326 was 174.39776477155866\nSample for day 327 was 172.01380734487162\nSample for day 328 was 172.8420396499487\nSample for day 329 was 172.34460031959003\nSample for day 330 was 170.78349067379074\nSample for day 331 was 173.0535374392026\nSample for day 332 was 172.81474736800695\nSample for day 333 was 170.59751924018238\nSample for day 334 was 171.81195373983633\nSample for day 335 was 173.14301505727445\nSample for day 336 was 173.0867067005878\nSample for day 337 was 172.49120180031275\nSample for day 338 was 172.79245354383073\nSample for day 339 was 168.77864347190467\nSample for day 340 was 172.85250539601859\nSample for day 341 was 171.51847211854056\nSample for day 342 was 168.79667318837573\nSample for day 343 was 171.98321088302234\nSample for day 344 was 171.93952803545892\nSample for day 345 was 173.98082900081164\nSample for day 346 was 170.42434934870033\nSample for day 347 was 172.80981621822798\nSample for day 348 was 171.43800984364023\nSample for day 349 was 169.13060750544295\nSample for day 350 was 170.68510962199605\nSample for day 351 was 171.6445361884467\nSample for day 352 was 173.21043754817526\nSample for day 353 was 169.11260696642978\nSample for day 354 was 170.3325375027076\nSample for day 355 was 171.78168029566487\nSample for day 356 was 172.3123996044116\nSample for day 357 was 170.1283454398062\nSample for day 358 was 174.0205474832662\nSample for day 359 was 170.3304690943122\nSample for day 360 was 171.93335633113756\nSample for day 361 was 170.89875086405297\nSample for day 362 was 175.5202730928333\nSample for day 363 was 171.85429378017477\nSample for day 364 was 171.2142851564963\nSample for day 365 was 172.26925019233641\n" ], [ "seed(47)\n# Or the Pythonic way\ndaily_sample_means = np.array([np.mean(townsfolk_sampler(10)) for i in range(365)])", "_____no_output_____" ], [ "_ = plt.hist(daily_sample_means, bins=10)\n_ = plt.xlabel('height (cm)')\n_ = plt.ylabel('number of people')\n_ = plt.title('Distribution of heights in sample size 10')", "_____no_output_____" ] ], [ [ "The above is the distribution of the means of samples of size 10 taken from our population. The Central Limit Theorem tells us the expected mean of this distribution will be equal to the population mean, and standard deviation will be $\\sigma / \\sqrt n$, which, in this case, should be approximately 1.58.", "_____no_output_____" ], [ "__Q9:__ Verify the above results from the CLT.", "_____no_output_____" ], [ "__A:__ This is approximately 1.58.", "_____no_output_____" ] ], [ [ "np.std(daily_sample_means, ddof=1)", "_____no_output_____" ] ], [ [ "Remember, in this instance, we knew our population parameters, that the average height really is 172 cm and the standard deviation is 5 cm, and we see some of our daily estimates of the population mean were as low as around 168 and some as high as 176.", "_____no_output_____" ], [ "__Q10:__ Repeat the above year's worth of samples but for a sample size of 50 (perhaps you had a bigger budget for conducting surveys that year)! Would you expect your distribution of sample means to be wider (more variable) or narrower (more consistent)? Compare your resultant summary statistics to those predicted by the CLT.", "_____no_output_____" ], [ "__A:__ The larger sample size of 50 is more normally distributed with a narrower range. This is expected as the sample size becomes more representative of the population.", "_____no_output_____" ] ], [ [ "seed(47)\n# calculate daily means from the larger sample size here\ndaily_sample_means_50 = np.array([np.mean(townsfolk_sampler(50)) for i in range(365)])", "_____no_output_____" ], [ "_ = plt.hist(daily_sample_means_50, bins=10)\n_ = plt.xlabel('height (cm)')\n_ = plt.ylabel('number of people')\n_ = plt.title('Distribution of heights in sample size 10')\n", "_____no_output_____" ], [ "np.std(daily_sample_means_50, ddof=1)", "_____no_output_____" ] ], [ [ "What we've seen so far, then, is that we can estimate population parameters from a sample from the population, and that samples have their own distributions. Furthermore, the larger the sample size, the narrower are those sampling distributions.", "_____no_output_____" ], [ "### Normally testing time!", "_____no_output_____" ], [ "All of the above is well and good. We've been sampling from a population we know is normally distributed, we've come to understand when to use $n$ and when to use $n-1$ in the denominator to calculate the spread of a distribution, and we've seen the Central Limit Theorem in action for a sampling distribution. All seems very well behaved in Frequentist land. But, well, why should we really care?", "_____no_output_____" ], [ "Remember, we rarely (if ever) actually know our population parameters but we still have to estimate them somehow. If we want to make inferences to conclusions like \"this observation is unusual\" or \"my population mean has changed\" then we need to have some idea of what the underlying distribution is so we can calculate relevant probabilities. In frequentist inference, we use the formulae above to deduce these population parameters. Take a moment in the next part of this assignment to refresh your understanding of how these probabilities work.", "_____no_output_____" ], [ "Recall some basic properties of the standard normal distribution, such as that about 68% of observations are within plus or minus 1 standard deviation of the mean. Check out the precise definition of a normal distribution on p. 394 of *AoS*. \n\n__Q11:__ Using this fact, calculate the probability of observing the value 1 or less in a single observation from the standard normal distribution. Hint: you may find it helpful to sketch the standard normal distribution (the familiar bell shape) and mark the number of standard deviations from the mean on the x-axis and shade the regions of the curve that contain certain percentages of the population.", "_____no_output_____" ], [ "__A:__", "_____no_output_____" ] ], [ [ "1 - ((1 - 0.68) / 2)", "_____no_output_____" ] ], [ [ "Calculating this probability involved calculating the area under the curve from the value of 1 and below. To put it in mathematical terms, we need to *integrate* the probability density function. We could just add together the known areas of chunks (from -Inf to 0 and then 0 to $+\\sigma$ in the example above). One way to do this is to look up tables (literally). Fortunately, scipy has this functionality built in with the cdf() function.", "_____no_output_____" ], [ "__Q12:__ Use the cdf() function to answer the question above again and verify you get the same answer.", "_____no_output_____" ], [ "__A:__ The two answers are the same.", "_____no_output_____" ] ], [ [ "norm.cdf(1)", "_____no_output_____" ] ], [ [ "__Q13:__ Using our knowledge of the population parameters for our townsfolks' heights, what is the probability of selecting one person at random and their height being 177 cm or less? Calculate this using both of the approaches given above.", "_____no_output_____" ], [ "NOTE: Assuming the following questions are using the actual population mean (172) and standard deviation (5) given in the description above.", "_____no_output_____" ], [ "__A:__ There is about an 84% chance of selecting someone that is 177 cm or less from this population.\n", "_____no_output_____" ] ], [ [ "norm(172, 5).cdf(177)", "_____no_output_____" ] ], [ [ "__Q14:__ Turning this question around — suppose we randomly pick one person and measure their height and find they are 2.00 m tall. How surprised should we be at this result, given what we know about the population distribution? In other words, how likely would it be to obtain a value at least as extreme as this? Express this as a probability. ", "_____no_output_____" ], [ "__A:__ This is VERY surprising. There is almost has no probability of it happening. It could be a measurement error or someone from out of town.", "_____no_output_____" ] ], [ [ "1 - norm(172, 5).cdf(200)", "_____no_output_____" ] ], [ [ "What we've just done is calculate the ***p-value*** of the observation of someone 2.00m tall (review *p*-values if you need to on p. 399 of *AoS*). We could calculate this probability by virtue of knowing the population parameters. We were then able to use the known properties of the relevant normal distribution to calculate the probability of observing a value at least as extreme as our test value.", "_____no_output_____" ], [ "We're about to come to a pinch, though. We've said a couple of times that we rarely, if ever, know the true population parameters; we have to estimate them from our sample and we cannot even begin to estimate the standard deviation from a single observation. \n\nThis is very true and usually we have sample sizes larger than one. This means we can calculate the mean of the sample as our best estimate of the population mean and the standard deviation as our best estimate of the population standard deviation. \n\nIn other words, we are now coming to deal with the sampling distributions we mentioned above as we are generally concerned with the properties of the sample means we obtain. \n\nAbove, we highlighted one result from the CLT, whereby the sampling distribution (of the mean) becomes narrower and narrower with the square root of the sample size. We remind ourselves that another result from the CLT is that _even if the underlying population distribution is not normal, the sampling distribution will tend to become normal with sufficiently large sample size_. (**Check out p. 199 of AoS if you need to revise this**). This is the key driver for us 'requiring' a certain sample size, for example you may frequently see a minimum sample size of 30 stated in many places. In reality this is simply a rule of thumb; if the underlying distribution is approximately normal then your sampling distribution will already be pretty normal, but if the underlying distribution is heavily skewed then you'd want to increase your sample size.", "_____no_output_____" ], [ "__Q15:__ Let's now start from the position of knowing nothing about the heights of people in our town.\n* Use the random seed of 47, to randomly sample the heights of 50 townsfolk\n* Estimate the population mean using np.mean\n* Estimate the population standard deviation using np.std (remember which denominator to use!)\n* Calculate the (95%) [margin of error](https://www.statisticshowto.datasciencecentral.com/probability-and-statistics/hypothesis-testing/margin-of-error/#WhatMofE) (use the exact critial z value to 2 decimal places - [look this up](https://www.statisticshowto.datasciencecentral.com/probability-and-statistics/find-critical-values/) or use norm.ppf()) Recall that the ***margin of error*** is mentioned on p. 189 of the *AoS* and discussed in depth in that chapter). \n* Calculate the 95% Confidence Interval of the mean (***confidence intervals*** are defined on p. 385 of *AoS*) \n* Does this interval include the true population mean?", "_____no_output_____" ], [ "__A:__", "_____no_output_____" ] ], [ [ "seed(47)\nsample_size = 50\n# take your sample now\nsample = townsfolk_sampler(sample_size)", "_____no_output_____" ], [ "mean_sample = np.mean(sample)\nprint(f\"Mean is: {mean_sample}.\")", "Mean is: 172.7815108576788.\n" ], [ "std_sample = np.std(sample)\nprint(f\"Standard deviation is: {std_sample}.\")", "Standard deviation is: 4.153258225264712.\n" ], [ "# 95% margin of error has 2 tails of rejection:\n# 1) 100% - 95% = 5%. 2) 5% / 2 = 0.025. 3) 1 - 0.025 = 0.975.\ncritical_value = norm.ppf(0.975)\nstd_error = std_sample / np.sqrt(sample_size)\nmargin_of_error = critical_value * std_error\nprint(f\"Margin of error is: {margin_of_error}.\")", "Margin of error is: 1.151203291581224.\n" ], [ "lower = mean_sample - margin_of_error\nupper = mean_sample + margin_of_error\nci = np.array([lower, upper])\nprint(f\"The 95% confidence interval is: {ci}.\")", "The 95% confidence interval is: [171.63030757 173.93271415].\n" ] ], [ [ "__Q16:__ Above, we calculated the confidence interval using the critical z value. What is the problem with this? What requirement, or requirements, are we (strictly) failing?", "_____no_output_____" ], [ "__A:__ Only using one sample. This may or may not accurately reflect the population. The t distribution is most likely better to find the confidence interval.", "_____no_output_____" ], [ "__Q17:__ Calculate the 95% confidence interval for the mean using the _t_ distribution. Is this wider or narrower than that based on the normal distribution above? If you're unsure, you may find this [resource](https://www.statisticshowto.datasciencecentral.com/probability-and-statistics/confidence-interval/) useful. For calculating the critical value, remember how you could calculate this for the normal distribution using norm.ppf().", "_____no_output_____" ], [ "__A:__ The confidence interval using the t distribution is a little wider.\n\n### Steps to calculate a Confidence Interval For a Sample\n1) Subtract 1 from your sample size. \n2) Subtract the confidence level from 1 and then divide by two. \n3) Lookup answers from step 1 and 2 in t-distribution table or calculate them. \n4) Divide sample standard deviation by the square root of the sample size. \n5) Multiple step 3 and 4. \n6) Subtract step 5 from the sample mean for the lower end of the range. \n7) Add step 5 from the sample mean for the upper end of the range. ", "_____no_output_____" ] ], [ [ "# 50- 1 = 49.\nfirst_steps = t(49).ppf([0.025, 0.975])\nstep_4 = std_sample / np.sqrt(sample_size)\nstep_5 = first_steps * step_4\nlast_steps = step_5 + mean_sample\nlast_steps", "_____no_output_____" ] ], [ [ "This is slightly wider than the previous confidence interval. This reflects the greater uncertainty given that we are estimating population parameters from a sample.", "_____no_output_____" ], [ "## 4. Learning outcomes", "_____no_output_____" ], [ "Having completed this project notebook, you now have hands-on experience:\n* sampling and calculating probabilities from a normal distribution\n* identifying the correct way to estimate the standard deviation of a population (the population parameter) from a sample\n* with sampling distribution and now know how the Central Limit Theorem applies\n* with how to calculate critical values and confidence intervals\n", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ] ]
cb629e3bed9256549c139eca836fb93719610859
4,981
ipynb
Jupyter Notebook
examples/WPE_Numpy_offline.ipynb
xinkez/nara_wpe
addd3a7e089a46bba34af874c77fd5fb07c0bdff
[ "MIT" ]
1
2018-11-11T19:34:46.000Z
2018-11-11T19:34:46.000Z
examples/WPE_Numpy_offline.ipynb
xinkez/nara_wpe
addd3a7e089a46bba34af874c77fd5fb07c0bdff
[ "MIT" ]
null
null
null
examples/WPE_Numpy_offline.ipynb
xinkez/nara_wpe
addd3a7e089a46bba34af874c77fd5fb07c0bdff
[ "MIT" ]
null
null
null
23.384977
220
0.538045
[ [ [ "%reload_ext autoreload\n%autoreload 2\n%matplotlib inline\n\nimport IPython\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport soundfile as sf\nimport time\nfrom tqdm import tqdm\n\nfrom nara_wpe.wpe import wpe\nfrom nara_wpe.utils import stft, istft, get_stft_center_frequencies\nfrom nara_wpe import project_root", "_____no_output_____" ], [ "stft_options = dict(\n size=512,\n shift=128,\n window_length=None,\n fading=True,\n pad=True,\n symmetric_window=False\n)", "_____no_output_____" ] ], [ [ "# Minimal example with random data", "_____no_output_____" ] ], [ [ "def aquire_audio_data():\n D, T = 4, 10000\n y = np.random.normal(size=(D, T))\n return y", "_____no_output_____" ], [ "y = aquire_audio_data()\nY = stft(y, **stft_options)\nY = Y.transpose(2, 0, 1)\n\nstart = time.perf_counter()\nZ = wpe(Y)\nend = time.perf_counter()\n\nz_np = istft(Z.transpose(1, 2, 0), size=stft_options['size'], shift=stft_options['shift'])\nprint(f\"Time: {end-start}\")", "_____no_output_____" ] ], [ [ "# Example with real audio recordings", "_____no_output_____" ], [ "WPE estimates a filter to predict the current reverberation tail frame from K time frames which lie 3 (delay) time frames in the past. This frame (reverberation tail) is then subtracted from the observed signal.\n\n### Setup", "_____no_output_____" ] ], [ [ "channels = 8\nsampling_rate = 16000\ndelay = 3\niterations = 5\ntaps = 10", "_____no_output_____" ] ], [ [ "### Audio data\nShape: (frames, channels)", "_____no_output_____" ] ], [ [ "file_template = 'AMI_WSJ20-Array1-{}_T10c0201.wav'\nsignal_list = [\n sf.read(str(project_root / 'data' / file_template.format(d + 1)))[0]\n for d in range(channels)\n]\ny = np.stack(signal_list, axis=0)\nIPython.display.Audio(y[0], rate=sampling_rate)", "_____no_output_____" ] ], [ [ "### STFT\nA STFT is performed to obtain a Numpy array with shape (frequency bins, channels, frames).", "_____no_output_____" ] ], [ [ "Y = stft(y, **stft_options).transpose(2, 0, 1)", "_____no_output_____" ] ], [ [ "### iterative WPE\nThe wpe function is fed with Y. Finally, an inverse STFT is performed to obtain a dereverberated result in time domain. ", "_____no_output_____" ] ], [ [ "Z = wpe(Y, iterations=iterations, statistics_mode='full').transpose(1, 2, 0)\nz = istft(Z, size=stft_options['size'], shift=stft_options['shift'])\nIPython.display.Audio(z[0], rate=sampling_rate)", "_____no_output_____" ] ], [ [ "## Power spectrum \nBefore and after applying WPE", "_____no_output_____" ] ], [ [ "fig, [ax1, ax2] = plt.subplots(1, 2, figsize=(20, 10))\nim1 = ax1.imshow(20 * np.log10(np.abs(Y[ :, 0, 200:400])), origin='lower')\nax1.set_xlabel('frames')\n_ = ax1.set_title('reverberated')\nim2 = ax2.imshow(20 * np.log10(np.abs(Z[0, 200:400, :])).T, origin='lower')\nax2.set_xlabel('frames')\n_ = ax2.set_title('dereverberated')\ncb = fig.colorbar(im1)", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
cb62ada71f00c9b3912601123e8f861cc2254246
25,587
ipynb
Jupyter Notebook
week4.ipynb
jitendra80830/Python-Programming
2aed091559503d61d78c19854d23c5f94f2464ef
[ "Apache-2.0" ]
1
2020-10-31T12:43:48.000Z
2020-10-31T12:43:48.000Z
week4.ipynb
jitendra80830/Python-Programming
2aed091559503d61d78c19854d23c5f94f2464ef
[ "Apache-2.0" ]
null
null
null
week4.ipynb
jitendra80830/Python-Programming
2aed091559503d61d78c19854d23c5f94f2464ef
[ "Apache-2.0" ]
null
null
null
24.138679
780
0.477899
[ [ [ "#week-4,l-10\n#DICTIONARY:-\n# A Simple dictionary\n\nalien_0={'color': 'green','points': 5}\nprint(alien_0['color'])\nprint(alien_0['points'])", "green\n5\n" ], [ "#accessing value in a dictionary:\nalien_0={'color':'green','points': 5}\nnew_points=alien_0['points']\nprint(f\"you just eand {new_points} points\")", "you just eand 5 points\n" ], [ "#adding new.key-value pairs:-\nalien_0={'color':'green','points': 5}\nprint(alien_0)\n\nalien_0['x_position']=0\nalien_0['t_position']=25\nprint(alien_0)", "{'color': 'green', 'points': 5}\n{'color': 'green', 'points': 5, 'x_position': 0, 't_position': 25}\n" ], [ "# empyt dictionary:-\nalien_0={}\nalien_0['color']='green'\nalien_0['points']=5\nprint(alien_0)", "{'color': 'green', 'points': 5}\n" ], [ "# Modifie value in a dictionary:-\nalien_0={'color': 'green','points': 5}\nprint(f\"the alien is {alien_0['color']}\")\nalien_0['color']='yellow'\nprint(f\"the alien is new {alien_0['color']}\")", "the alien is green\nthe alien is new yellow\n" ], [ "# Example:-\nalien_0={'x_position': 0,'y_position': 25,'speed': 'medium'}\nprint(f\"original position {alien_0['x_position']}\")\nif alien_0['speed']=='slow':\n x_increment=1\nelif alien_0['speed']=='medium':\n x_increment=1\nelse:\n x_increment=3\nalien_0['x_position']=alien_0['x_position']+x_increment\nprint(f\"new position {alien_0['x_position']}\")", "original position 0\nnew position 1\n" ], [ "alien_0={'color': 'green','points': 5}\nprint(alien_0)\ndel alien_0['points']\nprint(alien_0)", "{'color': 'green', 'points': 5}\n{'color': 'green'}\n" ], [ "# Example-\nfavorite_language={\n 'jen': 'python',\n 'sarah': 'c',\n 'edward': 'ruby',\n 'phil': 'python'\n }\nlanguage=favorite_language['jen']\nprint(f\"jen's favorite language is {language}\")", "jen's favorite language is python\n" ], [ "# error value of no key in list:-\nalien_0={'color': 'green','speed': 'slow'}\nprint(alien_0['points']", "_____no_output_____" ], [ "# Example:-\nalien_0={'color': 'green','speed': 'slow'}\npoints_value=alien_0.get('points','no points value aasigned.')\nprint(points_value)", "no points value aasigned.\n" ], [ "# Loop through a dictionary:-\n# Example:- 1\n\nuser_0={\n 'username': 'efermi',\n 'first': 'enrika',\n 'last': 'fermi'\n }\nfor key,value in user_0.items():\n print(f\"\\nkey: {key}\")\n print(f\"value: {value}\")", "\nkey: username\nvalue: efermi\n\nkey: first\nvalue: enrika\n\nkey: last\nvalue: fermi\n" ], [ "#Example:-2\nfavorite_language={\n 'jen': 'python',\n 'sarah': 'c',\n 'edward': 'ruby',\n 'phin': 'python'\n }\nfor name,language in favorite_language.items():\n print(f\"\\n{name.title()}'s favrote language is {language.title()}\")", "\nJen's favrote language is Python\n\nSarah's favrote language is C\n\nEdward's favrote language is Ruby\n\nPhin's favrote language is Python\n" ], [ "# if you print only keys:-\n\nfavorite_language={\n 'jen': 'python',\n 'sarah': 'c',\n 'edward': 'ruby',\n 'phil': 'python'\n }\nfor name in favorite_language.keys():\n print(name.title())", "Jen\nSarah\nEdward\nPhil\n" ], [ "# if you print a message with the any value:-\nfavorite_languages={\n 'jen': 'python',\n 'sarah': 'c',\n 'edward': 'ruby',\n 'phil': 'python'\n }\nfriends=['phil','sarah']\nfor name in favorite_languages.keys():\n print(name.title())\n if name in friends:\n language=(favorite_languages[name].title())\n \n print(f\"{name.title()},i see you love {language}\")", "Jen\nSarah\nSarah,i see you love C\nEdward\nPhil\nPhil,i see you love Python\n" ], [ "# print a message if not keys in dict....\nfavorite_languages={\n 'jen': 'python',\n 'sarah': 'c',\n 'edward': 'ruby',\n 'phil': 'python'\n }\nif 'earimed' not in favorite_languages.keys():\n print(\"earimed, pleaseb take our poll.\")", "earimed, pleaseb take our poll.\n" ], [ "# if you print in ordered:-\nfavorite_languages={\n 'jen': 'python',\n 'sarah': 'c',\n 'edward': 'ruby',\n 'phil': 'python'\n }\nfor name in sorted(favorite_language.keys()):\n print(f\"{name.title()}.thank you for taking the poll.\")", "Edward.thank you for taking the poll.\nJen.thank you for taking the poll.\nPhil.thank you for taking the poll.\nSarah.thank you for taking the poll.\n" ], [ "# if you print only value:-\nfavorite_languages={\n 'jen': 'python',\n 'sarah': 'c',\n 'edward': 'ruby',\n 'phil': 'python'\n }\nprint(\"The following language have been mentioned\")\nfor language in favorite_languages.values():\n print(language.title())", "The following language have been mentioned\nPython\nC\nRuby\nPython\n" ], [ "# use set methon and print unique language:-\nfavorite_languages={\n 'jen': 'python',\n 'sarah': 'c',\n 'edward': 'ruby',\n 'phil': 'python'\n }\nprint(\"The following language have been mentioned\")\nfor language in set(favorite_languages.values()):\n print(language.title())", "The following language have been mentioned\nRuby\nC\nPython\n" ], [ "# NESTING(MULTIPLE DICT):-\n# A list of Dictionaries-\n\nalien_0={'color': 'green','points': 5}\nalien_1={'color': 'yellow','points': 10}\nalien_2={'color':'red','points': 15}\naliens=[alien_0,alien_1,alien_2]\nfor alien in aliens:\n print(alien)", "{'color': 'green', 'points': 5}\n{'color': 'yellow', 'points': 10}\n{'color': 'red', 'points': 15}\n" ], [ "# For empty list:-\naliens=[]\nfor alien_number in range(30):\n new_alien={'color':'green','points': 5,'speed': 'slow'}\n aliens.append(new_alien)\nfor alien in aliens[:5]:\n print(alien)\nprint(f\"\\nTotal no of alien {len(aliens)}\")", "{'color': 'green', 'points': 5, 'speed': 'slow'}\n{'color': 'green', 'points': 5, 'speed': 'slow'}\n{'color': 'green', 'points': 5, 'speed': 'slow'}\n{'color': 'green', 'points': 5, 'speed': 'slow'}\n{'color': 'green', 'points': 5, 'speed': 'slow'}\n\nTotal no of alien 30\n" ], [ "# if you want to prin 1st 3 aliens is yellow:-\nliens=[]\nfor alien_number in range(30):\n new_alien={'color':'green','points': 5,'speed': 'slow'}\n aliens.append(new_alien)\nfor alien in aliens[:3]:\n if alien['color']=='green':\n alien['color']='yellow'\n alien['speed']='mediam'\n alien['points']='10'\nfor alien in aliens[:5]:\n print(alien)", "{'color': 'yellow', 'points': '10', 'speed': 'mediam'}\n{'color': 'yellow', 'points': '10', 'speed': 'mediam'}\n{'color': 'yellow', 'points': '10', 'speed': 'mediam'}\n{'color': 'green', 'points': 5, 'speed': 'slow'}\n{'color': 'green', 'points': 5, 'speed': 'slow'}\n" ], [ "\n# if you want to prin 1st 3 aliens is yellow:-\nliens=[]\nfor alien_number in range(30):\n new_alien={'color':'green','points': 5,'speed': 'slow'}\n aliens.append(new_alien)\nfor alien in aliens[:3]:\n if alien['color']=='green':\n alien['color']='yellow'\n alien['speed']='mediam'\n alien['points']=10\n \nfor alien in aliens[:5]:\n print(alien)", "{'color': 'red', 'points': 15, 'speed': 'fast'}\n{'color': 'red', 'points': 15, 'speed': 'fast'}\n{'color': 'red', 'points': 15, 'speed': 'fast'}\n{'color': 'green', 'points': 5, 'speed': 'slow'}\n{'color': 'green', 'points': 5, 'speed': 'slow'}\n" ], [ "# store imfomation about a pizza bieng ordered:-\npizza={\n 'crust': 'thick',\n 'topping': ['moshroom','extracheese']\n }\nprint(f\"you ordered a {pizza['crust']}-crust pizza with the following topping\" )\nfor topping in pizza['topping']:\n print(\"\\t\" + topping)", "you ordered a thick-crust pizza with the following topping\n\tmoshroom\n\textracheese\n" ], [ "# for multiple favorite languagees:-\nfavorite_languages={\n 'jen': ['python','ruby'],\n 'sahar': ['c'],\n 'edward': ['ruby','go'],\n 'phil': ['python','hashell']\n }\nfor name,language in favorite_languages.items():\n print(f\"{name}'s favorite language are\")\n for language in language:\n print(language)", "jen's favorite language are\npython\nruby\nsahar's favorite language are\nc\nedward's favorite language are\nruby\ngo\nphil's favorite language are\npython\nhashell\n" ], [ "# A DICTIONAY IN A DICTIONARY:-\n\nuser={\n 'aeintein':{\n 'first': 'albert',\n 'last': 'aeintein',\n 'location':'princetion',\n },\n 'mcurie':{\n 'first': 'marie',\n 'last': 'curie',\n 'location': 'paris',\n }\n }\nfor username,user_info in user.items():\n print(f\"\\nusername:{username}\")\n full_name=(f\"{user_info['first']}{user_info['last']}\")\n print(f\"full_name: {full_name.title()}\")\n print(f\"location: {location.title()}\")", "\nusername:aeintein\nfull_name: Albertaeintein\n" ], [ "#OPERATION ON DICTIONARY:-\ncapital={'India': 'New delhi','Usa': 'Washington dc','Franch': 'Paris','Sri lanka': 'Colombo'}\nprint(capital['India'])\nprint(capital.get('Uk', 'unknown'))\ncapital['Uk']='London'\nprint(capital['Uk'])\nprint(capital.keys())\nprint(capital.values())\n\nprint(len(capital))\n\nprint('Usa' in capital)\n\nprint('russia' in capital)\n\ndel capital['Usa']\nprint(capital)\n\ncapital['Sri lanka']='Sri Jayawardenepura Kotte'\nprint(capital)\n\ncountries=[]\nfor k in capital:\n countries.append(k)\n countries.sort()\nprint(countries)", "New delhi\nunknown\nLondon\ndict_keys(['India', 'Usa', 'Franch', 'Sri lanka', 'Uk'])\ndict_values(['New delhi', 'Washington dc', 'Paris', 'Colombo', 'London'])\n5\nTrue\nFalse\n{'India': 'New delhi', 'Franch': 'Paris', 'Sri lanka': 'Colombo', 'Uk': 'London'}\n{'India': 'New delhi', 'Franch': 'Paris', 'Sri lanka': 'Sri Jayawardenepura Kotte', 'Uk': 'London'}\n['Franch', 'India', 'Sri lanka', 'Uk']\n" ], [ "# L-12.\n# USER INPUT AND WHILOE LOOPS:-\n# How the input() funtion:-\n\nmessage=input(\"Tell me something,and i will repeat it back to you:\")", "Tell me something,and i will repeat it back to you:Hello everyone\n" ], [ " name=input(\"please enter your name:\")\nprint(f\"\\nHello,{name}!\")", "please enter your name:jitendr\n\nHello,jitendr!\n" ], [ "prompt=\"if you tell us who are you,we can personalize the message you se.\"\nprompt=\"\\nwhat is you name?\"\nname=input(prompt)\nprint(f\"\\nHello,{name}!\")", "\nwhat is you name?jitendra kumar\n\nHello,jitendra kumar!\n" ], [ "#accept numerical input\n\nage=input(\"how old are ypu?.\")\nage=int(age)\nprint(age>=18)", "how old are ypu?.21\nTrue\n" ], [ "# example:-\nheight=input(\"How tall are you, in inches\")\nheight=int(height)\nif height>=48:\n print(\"\\nyou are tall enough to ride\")\nelse:\n print(\"\\nyou'll be able to ride when you're a little older\")", "How tall are you, in inches75\n\nyou are tall enough to ride\n" ], [ "# print even number or odd:-\nnumber=input(\"Enter the number,and i'll tell you if it's even or odd number\")\nnumber=int(number)\nif number%2==0:\n print(f\"\\nThe number {number} is even.\")\nelse:\n print(f\"\\nThe number {number} is odd\")", "Enter the number,and i'll tell you if it's even or odd number4\n\nThe number 4 is even.\n" ], [ "#INTRIDUCSCING WHILE LOOPS-\n# The while loop in action:\ncurrent_number=1\nwhile current_number<=5:\n print(current_number)\n current_number +=1", "1\n2\n3\n4\n5\n" ], [ "# example:-\nprompt=\"Tell me something, and i will repeat it back to you:\"\nprompt+=\"\\nEnter 'quit' to end the program.\"\nmessage=\"\"\nwhile message!='quit':\n message=input(prompt)\n if message!='quit':\n print(message)\n", "Tell me something, and i will repeat it back to you:\nEnter 'quit' to end the program.quit\n" ], [ "#Using break to exot a loop\nprompt=\"\\nPlease enter the name of a city you have visited.\"\nprompt+=\"\\nenter 'quit' when you are finished\"\nwhile True:\n city=input(prompt)\n if city=='quit':\n break\n else:\n print(f\"i'd love to go to {city.title()}\")\n ", "\nPlease enter the name of a city you have visited.\nenter 'quit' when you are finishedkolkata\ni'd love to go to Kolkata\n\nPlease enter the name of a city you have visited.\nenter 'quit' when you are finishedquit\n" ], [ "#Example:-\ncurrent_number=0\nwhile current_number<10:\n current_number+=1\n if current_number%2==0:\n continue\n print(current_number) \n ", "1\n3\n5\n7\n9\n" ], [ "x=1\nwhile x<=5:\n print(x)\n x+=1", "1\n2\n3\n4\n5\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb62c0883eda4b808398326d3c34f6dc2369164d
30,757
ipynb
Jupyter Notebook
housing_Regression.ipynb
jsprecher/Ames-Housing
7639b6a61c3172372e4d1ea095f132c6aed3e065
[ "MIT" ]
null
null
null
housing_Regression.ipynb
jsprecher/Ames-Housing
7639b6a61c3172372e4d1ea095f132c6aed3e065
[ "MIT" ]
null
null
null
housing_Regression.ipynb
jsprecher/Ames-Housing
7639b6a61c3172372e4d1ea095f132c6aed3e065
[ "MIT" ]
null
null
null
109.067376
22,720
0.863511
[ [ [ "# House Prices: Advanced Regression Techniques", "_____no_output_____" ], [ "### **Goal**\nIt is your job to predict the sales price for each house. For each Id in the test set, you must predict the value of the SalePrice variable. \n\n### **Metric**\nSubmissions are evaluated on Root-Mean-Squared-Error (RMSE) between the logarithm of the predicted value and the logarithm of the observed sales price. (Taking logs means that errors in predicting expensive houses and cheap houses will affect the result equally.)\n\n### **Submission File Format**\nThe file should contain a header and have the following format:\n\n Id,SalePrice\n 1461,169000.1\n 1462,187724.1233\n 1463,175221\netc.\n\n<https://www.kaggle.com/c/house-prices-advanced-regression-techniques/>", "_____no_output_____" ] ], [ [ "import pandas as pd\nimport numpy as np\nimport sklearn as sk\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom pylab import *\nimport scipy.stats as stats", "_____no_output_____" ], [ "train = pd.read_csv('train.csv')\ntest = pd.read_csv('test.csv')\nsample_submission = pd.read_csv('sample_submission.csv')", "_____no_output_____" ], [ "def multi_levene(df, col_names, target):\n group_df = df.pivot(columns = col_names, values = target)\n pvals = []\n for i in range(len(group_df.columns)-1):\n pval = stats.levene(group_df.iloc[:,i].dropna(), group_df.iloc[:,i+1].dropna()).pvalue\n pvals.append(pval)\n\n return max(pvals)", "_____no_output_____" ], [ "text_columns = train.columns[train.dtypes == object]\ntrain['total_SF'] = train[['TotalBsmtSF','1stFlrSF','2ndFlrSF']].sum(axis=1)\ntrain['price_per_SF'] = train['SalePrice']/train['total_SF']", "_____no_output_____" ], [ "levene_df = pd.DataFrame()\nfor col in text_columns:\n levene_df = levene_df.append([[col,multi_levene(train, col, 'price_per_SF')]])\n \nlevene_df.columns = ['feature','score']", "C:\\Users\\jakes\\AppData\\Local\\Continuum\\anaconda3\\lib\\site-packages\\scipy\\stats\\morestats.py:2369: RuntimeWarning: invalid value encountered in double_scalars\n W = numer / denom\nC:\\Users\\jakes\\AppData\\Local\\Continuum\\anaconda3\\lib\\site-packages\\scipy\\stats\\_distn_infrastructure.py:903: RuntimeWarning: invalid value encountered in greater\n return (a < x) & (x < b)\nC:\\Users\\jakes\\AppData\\Local\\Continuum\\anaconda3\\lib\\site-packages\\scipy\\stats\\_distn_infrastructure.py:903: RuntimeWarning: invalid value encountered in less\n return (a < x) & (x < b)\nC:\\Users\\jakes\\AppData\\Local\\Continuum\\anaconda3\\lib\\site-packages\\scipy\\stats\\_distn_infrastructure.py:1912: RuntimeWarning: invalid value encountered in less_equal\n cond2 = cond0 & (x <= _a)\nC:\\Users\\jakes\\AppData\\Local\\Continuum\\anaconda3\\lib\\site-packages\\scipy\\stats\\morestats.py:2369: RuntimeWarning: divide by zero encountered in double_scalars\n W = numer / denom\n" ], [ "irrelevant_cols = levene_df.loc[levene_df['score']<0.05,'feature']", "_____no_output_____" ], [ "train_dummies = pd.get_dummies(train[text_columns])", "_____no_output_____" ], [ "train_corr = train.drop('price_per_SF',axis=1).corr()\ntarget_variables = train_corr.nlargest(11, 'SalePrice').index.tolist()\n\ntarget_vars = [word for word in target_variables if word != 'SalePrice']", "_____no_output_____" ], [ "from sklearn.model_selection import train_test_split\n\nX = train[target_vars].join(train_dummies)\ny = train['SalePrice']\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.33, random_state=42)", "_____no_output_____" ], [ "from sklearn import linear_model\n\nreg = linear_model.LinearRegression()\nreg.fit(X_train, y_train)", "_____no_output_____" ], [ "import sklearn.metrics as metrics\n\ndef regression_results(y_true, y_pred):\n\n # Regression metrics\n explained_variance=metrics.explained_variance_score(y_true, y_pred)\n mean_absolute_error=metrics.mean_absolute_error(y_true, y_pred) \n mse=metrics.mean_squared_error(y_true, y_pred) \n mean_squared_log_error=metrics.mean_squared_log_error(y_true, y_pred)\n median_absolute_error=metrics.median_absolute_error(y_true, y_pred)\n r2=metrics.r2_score(y_true, y_pred)\n\n print('explained_variance: ', round(explained_variance,4)) \n print('mean_squared_log_error: ', round(mean_squared_log_error,4))\n print('r2: ', round(r2,4))\n print('MAE: ', round(mean_absolute_error,4))\n print('MSE: ', round(mse,4))\n print('RMSE: ', round(np.sqrt(mse),4))", "_____no_output_____" ], [ "y_pred = reg.predict(X_test)\n\nregression_results(y_test, y_pred)", "explained_variance: 0.8662\nmean_squared_log_error: 0.0303\nr2: 0.8661\nMAE: 20078.3835\nMSE: 982845346.2991\nRMSE: 31350.3644\n" ], [ "plt.figure(figsize=(10,5))\nsns.distplot(y_test-y_pred)\nplt.title('Distance Between Real Price and Predicted Price')\nplt.show()", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb62dd2b4b72839bdd4d1862899819681285d19a
13,411
ipynb
Jupyter Notebook
11 - Dictionaries.ipynb
maralski/python-tutorial-101
7de8f2adf0e26cb83bbc482c1276d2dd950e8a8c
[ "MIT" ]
null
null
null
11 - Dictionaries.ipynb
maralski/python-tutorial-101
7de8f2adf0e26cb83bbc482c1276d2dd950e8a8c
[ "MIT" ]
null
null
null
11 - Dictionaries.ipynb
maralski/python-tutorial-101
7de8f2adf0e26cb83bbc482c1276d2dd950e8a8c
[ "MIT" ]
null
null
null
24.789279
934
0.522556
[ [ [ "## Dictionaries\n\nEach key is separated from its value by a colon (:), the items are separated by commas, and the whole thing is enclosed in curly braces. An empty dictionary without any items is written with just two curly braces, like this: {}.\n\nKeys are unique within a dictionary while values may not be. The values of a dictionary can be of any type, but the keys must be of an immutable data type such as strings, numbers, or tuples.", "_____no_output_____" ], [ "## Accessing Values in Dictionary\n\nTo access dictionary elements, you can use the familiar square brackets along with the key to obtain its value.", "_____no_output_____" ] ], [ [ "dict1 = {'Name': 'Zara', 'Age': 7, 'Class': 'First'}\n\nprint(\"dict1['Name']: \", dict1['Name'])\nprint(\"dict1['Age']: \", dict1['Age'])", "dict1['Name']: Zara\ndict1['Age']: 7\n" ] ], [ [ "If we attempt to access a data item with a key, which is not a part of the dictionary, we get a `KeyError`.", "_____no_output_____" ] ], [ [ "dict1 = {'Name': 'Zara', 'Age': 7, 'Class': 'First'}\n\nprint(\"dict1['Alice']: \", dict1['Alice'])", "_____no_output_____" ] ], [ [ "## Updating Dictionary\n\nYou can update a dictionary by adding a new entry or a key-value pair, modifying an existing entry, or deleting an existing entry as shown in a simple example given below.", "_____no_output_____" ] ], [ [ "\ndict1 = {'Name': 'Zara', 'Age': 7, 'Class': 'First'}\n\ndict1['Age'] = 8; # update existing entry\ndict1['School'] = \"DPS School\" # Add new entry\n\nprint(\"dict1['Age']: \", dict1['Age'])\nprint(\"dict1['School']: \", dict1['School'])", "dict1['Age']: 8\ndict1['School']: DPS School\n" ], [ "print(dict1)", "{'Name': 'Zara', 'Age': 8, 'Class': 'First', 'School': 'DPS School'}\n" ] ], [ [ "## Delete Dictionary Elements\n\nou can either remove individual dictionary elements or clear the entire contents of a dictionary. You can also delete entire dictionary in a single operation.\n\nTo explicitly remove an entire dictionary, just use the `del` statement.", "_____no_output_____" ] ], [ [ "dict1 = {'Name': 'Zara', 'Age': 7, 'Class': 'First'}\n\ndict1", "_____no_output_____" ] ], [ [ " Remove entry with key 'Name'", "_____no_output_____" ] ], [ [ "del dict1['Name']\n\ndict1", "_____no_output_____" ] ], [ [ "Remove all entries in dict", "_____no_output_____" ] ], [ [ "dict1.clear()\n\ndict1", "_____no_output_____" ] ], [ [ "Delete entire dictionary", "_____no_output_____" ] ], [ [ "del dict1", "_____no_output_____" ] ], [ [ "## Properties of Dictionary Keys\n\nDictionary values have no restrictions. They can be any arbitrary Python object, either standard objects or user-defined objects. However, same is not true for the keys.\n\nThere are two important points to remember about dictionary keys", "_____no_output_____" ], [ "More than one entry per key is not allowed. This means no duplicate key is allowed. When duplicate keys are encountered during assignment, the last assignment wins.", "_____no_output_____" ] ], [ [ "dict1 = {'Name': 'Zara', 'Age': 7, 'Name': 'Manni'}\n\nprint(\"dict1['Name']: \", dict1['Name'])", "dict1['Name']: Manni\n" ] ], [ [ "Keys must be immutable. This means you can use strings, numbers or tuples as dictionary keys but something like ['key'] is not allowed.", "_____no_output_____" ] ], [ [ "dict1 = {['Name']: 'Zara', 'Age': 7}\n\nprint(\"dict1['Name']: \", dict1['Name'])", "_____no_output_____" ] ], [ [ "## Built-in Dictionary Functions\n\nPython includes the following dictionary functions.", "_____no_output_____" ], [ "`len(dict)` gives the total length of the dictionary. This would be equal to the number of items in the dictionary.", "_____no_output_____" ] ], [ [ "dict1 = {'Name': 'Zara', 'Age': 7, 'Name': 'Manni'}\n\nlen(dict1)", "_____no_output_____" ] ], [ [ "`str(dict)` produces a printable string representation of a dictionary", "_____no_output_____" ] ], [ [ "dict1 = {'Name': 'Zara', 'Age': 7, 'Name': 'Manni'}\n\nstr(dict1)", "_____no_output_____" ] ], [ [ "`type(variable)` returns the type of the passed variable. If passed variable is dictionary, then it would return a dictionary type.", "_____no_output_____" ] ], [ [ "dict1 = {'Name': 'Zara', 'Age': 7}\n\ntype(dict1)", "_____no_output_____" ] ], [ [ "Python includes many dictionary methods. Here are some of them.", "_____no_output_____" ], [ "`dict.get(key, default=None)` for key key, returns value or default if key not in dictionary", "_____no_output_____" ] ], [ [ "dict1 = {'Name': 'Zara', 'Age': 7}\n\ndict1.get('Sex', 'Female')", "_____no_output_____" ] ], [ [ "`dict.keys()` returns list of dictionary dict's keys", "_____no_output_____" ] ], [ [ "dict1 = {'Name': 'Zara', 'Age': 7}\n\ndict1.keys()", "_____no_output_____" ] ], [ [ "`dict.values()` returns list of dictionary dict's values", "_____no_output_____" ] ], [ [ "dict1 = {'Name': 'Zara', 'Age': 7}\n\ndict1.values()", "_____no_output_____" ] ], [ [ "`dict.items()` returns a list of dict's (key, value) tuple pairs", "_____no_output_____" ] ], [ [ "dict1 = {'Name': 'Zara', 'Age': 7}\n\ndict1.items()", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
cb62e59b2b2eea55942471765caa1ec3d580424e
116,167
ipynb
Jupyter Notebook
200724-boston-house-price.ipynb
MartinTschendel/boston-house-prices
23d6484f4f25d9ea90ea32c9f6dd6569001979b5
[ "MIT" ]
2
2020-07-26T06:29:36.000Z
2020-08-08T03:22:29.000Z
200724-boston-house-price.ipynb
MartinTschendel/boston-house-prices
23d6484f4f25d9ea90ea32c9f6dd6569001979b5
[ "MIT" ]
null
null
null
200724-boston-house-price.ipynb
MartinTschendel/boston-house-prices
23d6484f4f25d9ea90ea32c9f6dd6569001979b5
[ "MIT" ]
null
null
null
71.443419
23,020
0.749834
[ [ [ "import pandas as pd\nimport numpy as np\n\nfrom matplotlib import pyplot as plt\nimport seaborn as sns\n%matplotlib inline", "_____no_output_____" ], [ "df = pd.read_csv('boston_house_prices.csv')", "_____no_output_____" ] ], [ [ "<b>Explanation of Features</b>\n* CRIM: per capita crime rate per town (assumption: if CRIM high, target small)\n* ZN: proportion of residential land zoned for lots over 25,000 sq. ft (assumption: if ZN high, target big)\n* INDUS: proportion of non-retail business acres per town (assumption: if INDUS high, target small)\n* CHAS: Charles River dummy variable (= 1 if tract bounds river; 0 otherwise) (categorical! assumption: if 1, target high)\n* NOX: nitrogen oxides concentration (parts per 10 million) (assumption: if NOX high, target small)\n* RM: average number of rooms per dwelling.(assumption: if RM high, target big)\n* AGE: proportion of owner-occupied units built prior to 1940. (assumption: if AGE high, target big)\n* DIS: weighted mean of distances to five Boston employment centres. (assumption: if DIS high, target small)\n* RAD: index of accessibility to radial highways. (assumption: if RAD high, target big)\n* TAX: full-value property-tax rate per \\$10,000. (assumption: if TAX high, target big)\n* PTRATIO: pupil-teacher ratio by town. (assumption: if PTRATIO high, target big)\n* B: 1000(Bk - 0.63)^2 where Bk is the proportion of blacks by town. (assumption: if B high, target small)\n* LSTAT: lower status of the population (percent). (assumption: if LSTAT high, target small)\n* MEDV: median value of owner-occupied homes in \\$1000s. (target)\n\n", "_____no_output_____" ] ], [ [ "df.head()", "_____no_output_____" ], [ "#get number of rows and columns\ndf.shape", "_____no_output_____" ], [ "#get overview of dataset values\ndf.describe()", "_____no_output_____" ], [ "df.info()", "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 506 entries, 0 to 505\nData columns (total 14 columns):\nCRIM 506 non-null float64\nZN 506 non-null float64\nINDUS 506 non-null float64\nCHAS 506 non-null int64\nNOX 506 non-null float64\nRM 506 non-null float64\nAGE 506 non-null float64\nDIS 506 non-null float64\nRAD 506 non-null int64\nTAX 506 non-null int64\nPTRATIO 506 non-null float64\nB 506 non-null float64\nLSTAT 506 non-null float64\nMEDV 506 non-null float64\ndtypes: float64(11), int64(3)\nmemory usage: 55.5 KB\n" ], [ "df.isnull().sum()", "_____no_output_____" ], [ "#check distribution of target variable\n#looks like normal distribution, no need to do logarithm\nsns.distplot(df.MEDV, kde=False)", "_____no_output_____" ], [ "#get number of rows in df\nn = len(df)\n\n#calculate proportions for training, validation and testing datasets\nn_val = int(0.2 * n)\nn_test = int(0.2 * n)\nn_train = n - (n_val + n_test)\n\n#fix the random seed, so that results are reproducible\nnp.random.seed(2)\n#create a numpy array with indices from 0 to (n-1) and shuffle it\nidx = np.arange(n)\nnp.random.shuffle(idx)\n\n#use the array with indices 'idx' to get a shuffled dataframe\n#idx now becomes the index of the df,\n#and order of rows in df is according to order of rows in idx\ndf_shuffled = df.iloc[idx]\n\n#split shuffled df into train, validation and test\n#e.g. for train: program starts from index 0\n#until the index, that is defined by variable (n_train -1)\ndf_train = df_shuffled.iloc[:n_train].copy()\ndf_val = df_shuffled.iloc[n_train:n_train+n_val].copy()\ndf_test = df_shuffled.iloc[n_train+n_val:].copy()", "_____no_output_____" ], [ "#keep df's with target value\ndf_train_incl_target = df_shuffled.iloc[:n_train].copy()\ndf_val_incl_target = df_shuffled.iloc[n_train:n_train+n_val].copy()\ndf_test_incl_target = df_shuffled.iloc[n_train+n_val:].copy()", "_____no_output_____" ], [ "#create target variable arrays\ny_train = df_train.MEDV.values\ny_val = df_val.MEDV.values\ny_test = df_test.MEDV.values\n\n#remove target variable form df's\ndel df_train['MEDV']\ndel df_val['MEDV']\ndel df_test['MEDV']", "_____no_output_____" ], [ "#define first numerical features\n#new training set only contains the selected base columns\n#training set is transformed to matrix array with 'value' method\nbase = ['CRIM', 'ZN', 'INDUS', 'NOX', 'RM', 'AGE', 'DIS', 'RAD']\ndf_num = df_train[base]\nX_train = df_num.values", "_____no_output_____" ], [ "#return the weights\ndef linear_regression(X, y):\n ones = np.ones(X.shape[0])\n X = np.column_stack([ones, X])\n \n XTX = X.T.dot(X)\n XTX_inv = np.linalg.inv(XTX)\n w = XTX_inv.dot(X.T).dot(y)\n return w[0], w[1:]\nw_0, w = linear_regression(X_train, y_train)\n\n#prediction of target variable, based on training set\ny_pred = w_0 + X_train.dot(w)", "_____no_output_____" ], [ "#the plot shows difference between distribution of\n#real target variable and predicted target variable\nsns.distplot(y_pred, label='pred')\nsns.distplot(y_train, label='target')\nplt.legend()", "_____no_output_____" ], [ "#calculation of root mean squared error\n#based on difference between distribution of\n#real target variable and predicted target variable\ndef rmse(y, y_pred):\n error = y_pred - y\n mse = (error ** 2).mean()\n return np.sqrt(mse)\nrmse(y_train, y_pred)", "_____no_output_____" ] ], [ [ "Validating the Model", "_____no_output_____" ] ], [ [ "#create X_val matrix array\ndf_num = df_val[base]\nX_val = df_num.values", "_____no_output_____" ], [ "#take the bias and the weights (w_0 and w), what we got from the linear regression\n#and get the prediction of the target variable for the validation dataset\ny_pred = w_0 + X_val.dot(w)", "_____no_output_____" ], [ "#compare y_pred with real target values 'y_val'\n#that number should be used for comparing models\nrmse(y_val, y_pred)", "_____no_output_____" ] ], [ [ "<b>prepare_X</b> function converts dataframe to matrix array", "_____no_output_____" ] ], [ [ "#this function takes in feature variables (base),\n#and returns a matrix array with 'values' method\ndef prepare_X(df):\n df_num = df[base]\n X = df_num.values\n return X", "_____no_output_____" ], [ "#traub the model by calculating the weights\nX_train = prepare_X(df_train)\nw_0, w = linear_regression(X_train, y_train)\n\n#apply model to validation dataset\nX_val = prepare_X(df_val)\ny_pred = w_0 + X_val.dot(w)\n\n#compute RMSE on validation dataset\nprint('validation', rmse(y_val, y_pred))", "validation 6.315378828176368\n" ] ], [ [ "Feature engineering: Add more features to the model<br>\nWe use the validation framework to see whether more features improve the model", "_____no_output_____" ] ], [ [ "#use prepare_X function to add more features\ndef prepare_X(df):\n df = df.copy()\n base_02 = ['CRIM', 'ZN', 'INDUS', 'NOX', 'RM', 'AGE', 'DIS', 'RAD',\n 'TAX', 'PTRATIO', 'B', 'LSTAT']\n df_num = df[base_02]\n X = df_num.values\n return X", "_____no_output_____" ], [ "#check if adding 4 more numerical features can improve the model\n#X_train should now be a matrix array with totally 12 numerical features\n#train the model\nX_train = prepare_X(df_train)\nw_0, w = linear_regression(X_train, y_train)\n\n#apply model to validation dataset\nX_val = prepare_X(df_val)\ny_pred = w_0 + X_val.dot(w)\n\n#computer RMSE on validation dataset\nprint('validation:', rmse(y_val, y_pred))", "validation: 5.114265782613434\n" ], [ "#above we can see that the RMSE decreased a bit\n#plot distribution of real target values (target)\n#and the predicted target values (pred)\n#after we considered 12 feature variables\nsns.distplot(y_pred, label='pred')\nsns.distplot(y_val, label='target')\nplt.legend()", "_____no_output_____" ] ], [ [ "Feature engineering: Add the CHAS feature to the model <br>\nActually it is a categorical variable, but it has only 2 values (0 and 1) <br>\nSo there is no need to do one-hot encoding <br>\nWe use the validation framework to see whether this additional feature improves the model", "_____no_output_____" ] ], [ [ "base_02 = ['CRIM', 'ZN', 'INDUS', 'NOX', 'RM', 'AGE', 'DIS', 'RAD',\n 'TAX', 'PTRATIO', 'B', 'LSTAT']\n\n#use prepare_X function to add CHAS as a feature\ndef prepare_X(df):\n df = df.copy()\n features = base_02.copy()\n features.append('CHAS')\n \n df_num = df[features]\n X = df_num.values\n return X", "_____no_output_____" ], [ "#check if adding 'CHAS' as a feature can improve the model\n#X_train should now be a matrix array with totally 12 numerical features and 1 categorical feature\n#train the model\nX_train = prepare_X(df_train)\nw_0, w = linear_regression(X_train, y_train)\n\n#apply model to validation dataset\nX_val = prepare_X(df_val)\ny_pred = w_0 + X_val.dot(w)\n\n#computer RMSE on validation dataset\nprint('validation:', rmse(y_val, y_pred))", "validation: 5.028887217906398\n" ], [ "#above we can see that the RMSE decreased a bit\n#compared to the plot above, the amount of predicted values for '30'\n#gets closer to the amount of real values for '30'\n#plot distribution of real target values (target)\n#and the predicted target values (pred)\n#after we considered 12 feature variables\nsns.distplot(y_pred, label='pred')\nsns.distplot(y_val, label='target')\nplt.legend()", "_____no_output_____" ], [ "#we could try regularization in case the data is 'noisy'\n#regularize with the parameter r\ndef linear_regression_reg(X, y, r=0.01):\n ones = np.ones(X.shape[0])\n X = np.column_stack([ones, X])\n \n XTX = X.T.dot(X)\n #add r to main diagonal of XTX\n reg = r * np.eye(XTX.shape[0])\n XTX = XTX + reg\n \n XTX_inv = np.linalg.inv(XTX)\n w = XTX_inv.dot(X.T).dot(y)\n \n return w[0], w[1:]\n\n ", "_____no_output_____" ], [ "#the bigger r (alpha), the smaller the weights (the denominator becomes bigger)\n#on the left 'column', you can see r, that growths with each step\n#in the other columns, there are written the weights\nfor r in [0, 0.001, 0.01, 0.1, 1, 10]:\n w_0, w = linear_regression_reg(X_train, y_train, r=r)\n print('%5s, %.2f, %.2f, %.2f' % (r, w_0, w[3], w[5]))", " 0, 33.95, -19.12, -0.01\n0.001, 33.87, -19.07, -0.01\n 0.01, 33.14, -18.65, -0.01\n 0.1, 27.33, -15.22, -0.01\n 1, 10.16, -5.17, -0.01\n 10, 1.76, -0.43, -0.01\n" ], [ "#calculate the RMSE after we used ridge regression\nX_train = prepare_X(df_train)\nw_0, w = linear_regression_reg(X_train, y_train, r=0.001)\n\nX_val = prepare_X(df_val)\ny_pred = w_0 + X_val.dot(w)\n\nprint('validation:', rmse(y_val, y_pred))", "validation: 5.029289239043889\n" ], [ "#run a grid search to identify the best value of r\nX_train = prepare_X(df_train)\nX_val = prepare_X(df_val)\n\nfor r in [0.000001, 0.0001, 0.001, 0.01, 0.1, 1, 5, 10]:\n w_0, w = linear_regression_reg(X_train, y_train, r=r)\n y_pred = w_0 + X_val.dot(w)\n print('%6s' %r, rmse(y_val, y_pred))", " 1e-06 5.028887619364775\n0.0001 5.028927369382458\n 0.001 5.029289239043889\n 0.01 5.032953471039654\n 0.1 5.071026300965349\n 1 5.268606931564101\n 5 5.3787614780926365\n 10 5.389695101912931\n" ] ], [ [ "as we can see from the new rmse, the ridge regression has no positive effect", "_____no_output_____" ], [ "Now we can help the user to predict the price of a real estate in Boston", "_____no_output_____" ] ], [ [ "df_test_incl_target.head(10)\n", "_____no_output_____" ], [ "#create a dictionary from rows \n#delete target value\npred_price_list = []\nz = 0\nwhile z < 10:\n ad = df_test_incl_target.iloc[z].to_dict()\n del ad['MEDV']\n #dt_test is a dataframe with one row (contains above dict info)\n df_test = pd.DataFrame([ad])\n X_test = prepare_X(df_test)\n #train model without ridge regression\n w_0, w = linear_regression(X_train, y_train)\n #prediction of the price\n y_pred = w_0 + X_test.dot(w)\n pred_price_list.append(y_pred)\n z = z + 1\n \npred_price_list", "_____no_output_____" ], [ "real_price = df_test_incl_target.MEDV.tolist()", "_____no_output_____" ], [ "#get average of difference between real price and predicted price\ny = 0\ndiff_list = []\nwhile y < 10:\n diff = real_price[y] - pred_price_list[y]\n diff_list.append(diff)\n y += 1\n\nsum(diff_list) / len(diff_list)\n", "_____no_output_____" ] ], [ [ "later on, we can also try other models and see, if the rmse can be further reduced<br>\nLastly, I want to check how increased or decreaesed feature variables will influence the target variable", "_____no_output_____" ] ], [ [ "ad = df_test_incl_target.iloc[0].to_dict()\nad", "_____no_output_____" ], [ "ad_test = {'CRIM': 0.223,\n 'ZN': 0,\n 'INDUS': 9.69,\n 'CHAS': 0,\n 'NOX': 0.585,\n 'RM': 6.025,\n 'AGE': 79.9,\n 'DIS': 2.4982,\n 'RAD': 6.0,\n 'TAX': 391.0,\n 'PTRATIO': 19.2,\n 'B': 396.9,\n 'LSTAT': 14.33}", "_____no_output_____" ], [ "#dt_test is a dataframe with one row (contains above dict info)\ndf_test = pd.DataFrame([ad_test])\nX_test = prepare_X(df_test)\n#train model without ridge regression\nw_0, w = linear_regression(X_train, y_train)\n#prediction of the price\ny_pred = w_0 + X_test.dot(w)\ny_pred", "_____no_output_____" ] ], [ [ "<b>Explanation of Features</b>\n* CRIM: per capita crime rate per town (assumption: if CRIM high, target small --> correct)\n* ZN: proportion of residential land zoned for lots over 25,000 sq. ft (assumption: if ZN high, target big --> correct)\n* INDUS: proportion of non-retail business acres per town (assumption: if INDUS high, target small --> correct)\n* CHAS: Charles River dummy variable (= 1 if tract bounds river; 0 otherwise) (categorical! assumption: if 1, target high --> correct)\n* NOX: nitrogen oxides concentration (parts per 10 million) (assumption: if NOX high, target small --> correct)\n* RM: average number of rooms per dwelling.(assumption: if RM high, target big --> correct)\n* AGE: proportion of owner-occupied units built prior to 1940. (assumption: if AGE high, target big --> not clear)\n* DIS: weighted mean of distances to five Boston employment centres. (assumption: if DIS high, target small --> correct)\n* RAD: index of accessibility to radial highways. (assumption: if RAD high, target big --> correct)\n* TAX: full-value property-tax rate per \\$10,000. (assumption: if TAX high, target big --> not correct)\n* PTRATIO: pupil-teacher ratio by town. (assumption: if PTRATIO high, target small--> correct)\n* B: 1000(Bk - 0.63)^2 where Bk is the proportion of blacks by town. (assumption: if B high, target small--> not correct)\n* LSTAT: lower status of the population (percent). (assumption: if LSTAT high, target small --> correct)\n* MEDV: median value of owner-occupied homes in \\$1000s. (target)\n\n", "_____no_output_____" ] ], [ [ "#check the against test dataset to see if model works\nX_train = prepare_X(df_train)\nw_0, w = linear_regression(X_train, y_train)\n\nX_val = prepare_X(df_val)\ny_pred = w_0 + X_val.dot(w)\nprint('validation:', rmse(y_val, y_pred))\n\nX_test = prepare_X(df_test)\ny_pred = w_0 + X_test.dot(w)\nprint('test:', rmse(y_test, y_pred))", "validation: 5.028887217906398\ntest: 7.746497710561239\n" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ] ]
cb62e79613c639fdf40fc4dccd00a5d49a25bdac
790
ipynb
Jupyter Notebook
Lesson2_settingUpPc/.ipynb_checkpoints/convert-notebook_from-terminal-checkpoint.ipynb
raafatzahran/Udacity-DataScience
a27eb164d840fb72fb9ab5f021e43856e60cf243
[ "MIT" ]
null
null
null
Lesson2_settingUpPc/.ipynb_checkpoints/convert-notebook_from-terminal-checkpoint.ipynb
raafatzahran/Udacity-DataScience
a27eb164d840fb72fb9ab5f021e43856e60cf243
[ "MIT" ]
null
null
null
Lesson2_settingUpPc/.ipynb_checkpoints/convert-notebook_from-terminal-checkpoint.ipynb
raafatzahran/Udacity-DataScience
a27eb164d840fb72fb9ab5f021e43856e60cf243
[ "MIT" ]
null
null
null
19.268293
72
0.549367
[ [ [ "### to convert a notebook to an HTML file, in your terminal use\n`jupyter nbconvert --to html notebook.ipynb`", "_____no_output_____" ] ] ]
[ "markdown" ]
[ [ "markdown" ] ]
cb62e8d67c4e1f50bf0c73f5e5cd4302aada292c
570,071
ipynb
Jupyter Notebook
known_systems.ipynb
jradavenport/IU-Aur
a8562543cfee78641b143c1c755599f076021f33
[ "MIT" ]
1
2020-08-05T03:15:31.000Z
2020-08-05T03:15:31.000Z
known_systems.ipynb
jradavenport/IU-Aur
a8562543cfee78641b143c1c755599f076021f33
[ "MIT" ]
2
2020-08-05T18:22:52.000Z
2020-11-03T00:47:26.000Z
known_systems.ipynb
jradavenport/IU-Aur
a8562543cfee78641b143c1c755599f076021f33
[ "MIT" ]
null
null
null
1,057.64564
42,904
0.955444
[ [ [ " Let's go through the known systems in [Table 1](https://www.aanda.org/articles/aa/full_html/2018/01/aa30655-17/T1.html) of Jurysek+(2018)", "_____no_output_____" ] ], [ [ "# 11 systems listed in their Table 1\nsystems = ['RW Per', 'IU Aur', 'AH Cep', 'AY Mus', \n 'SV Gem', 'V669 Cyg', 'V685 Cen', \n 'V907 Sco', 'SS Lac', 'QX Cas', 'HS Hya']\n\nP_EB = [13.1989, 1.81147, 1.7747, 3.2055, 4.0061, 1.5515, \n 1.19096, 3.77628, 14.4161, 6.004709, 1.568024]\n", "_____no_output_____" ] ], [ [ "I already know about some... \n\n- [HS Hya](https://github.com/jradavenport/HS-Hya) (yes, the final eclipses!)\n- [IU Aur](IU_Aur.ipynb) (yes, still eclipsing)\n- [QX Cas](https://github.com/jradavenport/QX-Cas) (yes, but not eclipsing, though new eclipses present...)\n- V907 Sco (yes, not sure if eclipsing still)\n\n\n1. Go through each system. Check [MAST](https://mast.stsci.edu/portal/Mashup/Clients/Mast/Portal.html) (has 2-min data), data could be pulled with [lightkurve](https://docs.lightkurve.org/tutorials/), \n2. if not check for general coverage with the [Web Viewing Tool](https://heasarc.gsfc.nasa.gov/cgi-bin/tess/webtess/wtv.py) \n3. and try to generate a 30-min lightcurve from pixel-level data with [Eleanor](https://adina.feinste.in/eleanor/getting_started/tutorial.html)\n4. For every system w/ TESS data, make some basic light curves. Is eclipse still there? Is there rotation?\n5. For each, find best paper(s) that characterize the system. Start w/ references in Table 1", "_____no_output_____" ] ], [ [ "from IPython.display import Image\n\nimport warnings\nwarnings.filterwarnings('ignore')", "_____no_output_____" ], [ "import eleanor\nimport numpy as np\nfrom astropy import units as u\nimport matplotlib.pyplot as plt\nfrom astropy.coordinates import SkyCoord", "_____no_output_____" ], [ "import matplotlib\nmatplotlib.rcParams.update({'font.size':18})\nmatplotlib.rcParams.update({'font.family':'serif'})", "_____no_output_____" ], [ "for k in range(len(systems)):\n try:\n star = eleanor.Source(name=systems[k])\n print(star.name, star.tic, star.gaia, star.tess_mag)\n\n data = eleanor.TargetData(star)\n q = (data.quality == 0)\n\n plt.figure()\n plt.plot(data.time[q], data.raw_flux[q]/np.nanmedian(data.raw_flux[q]), 'k')\n# plt.plot(data.time[q], data.corr_flux[q]/np.nanmedian(data.corr_flux[q]) + 0.03, 'r')\n plt.ylabel('Normalized Flux')\n plt.xlabel('Time [BJD - 2457000]')\n plt.title(star.name)\n plt.show()\n \n plt.figure()\n plt.scatter((data.time[q] % P_EB[k])/P_EB[k], data.raw_flux[q]/np.nanmedian(data.raw_flux[q]))\n# plt.plot(data.time[q], data.corr_flux[q]/np.nanmedian(data.corr_flux[q]) + 0.03, 'r')\n plt.ylabel('Normalized Flux')\n plt.xlabel('Phase (P='+str(P_EB[k])+')')\n plt.title(star.name)\n plt.show()\n except:\n print('Sorry '+systems[k])", "No eleanor postcard has been made for your target (yet). Using TessCut instead.\nRW Per 410193513 229136921858228096 9.03575\n" ] ] ]
[ "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ] ]
cb6302a20fcf888b53b9e1e68505fcffbc9f54e1
7,859
ipynb
Jupyter Notebook
beginner_source/blitz/autograd_tutorial.ipynb
taehwakkwon/tutorials
a1c864d9a5da16d91903d83c392983446a257fe1
[ "BSD-3-Clause" ]
null
null
null
beginner_source/blitz/autograd_tutorial.ipynb
taehwakkwon/tutorials
a1c864d9a5da16d91903d83c392983446a257fe1
[ "BSD-3-Clause" ]
null
null
null
beginner_source/blitz/autograd_tutorial.ipynb
taehwakkwon/tutorials
a1c864d9a5da16d91903d83c392983446a257fe1
[ "BSD-3-Clause" ]
null
null
null
7,859
7,859
0.667642
[ [ [ "%matplotlib inline", "_____no_output_____" ], [ "import torch\r\nx = torch.randn(1, requires_grad=True) + torch.randn(1)\r\nprint(x)\r\ny = torch.randn(2, requires_grad=True).sum()\r\nprint(y)\r\nz = torch.randn(2, requires_grad=True).mean()\r\nprint(z)\r\n\r\na = torch.randn(2, requires_grad=True)\r\nprint(a)\r\nprint(a.mul(2))", "tensor([-0.5571], grad_fn=<AddBackward0>)\ntensor(0.0818, grad_fn=<SumBackward0>)\ntensor(-0.1333, grad_fn=<MeanBackward0>)\ntensor([ 0.6497, -0.7352], requires_grad=True)\ntensor([ 1.2995, -1.4703], grad_fn=<MulBackward0>)\n" ], [ "x = torch.ones(2, 2, requires_grad=True)\r\nprint(x)", "tensor([[1., 1.],\n [1., 1.]], requires_grad=True)\n" ], [ "y = x + 2\r\nprint(y)", "tensor([[3., 3.],\n [3., 3.]], grad_fn=<AddBackward0>)\n" ], [ "print(y.grad_fn)", "<AddBackward0 object at 0x7faf2469df60>\n" ], [ "z = y * y * 3\r\nout = z.mean()\r\n\r\nprint(z, out)", "tensor([[27., 27.],\n [27., 27.]], grad_fn=<MulBackward0>) tensor(27., grad_fn=<MeanBackward0>)\n" ], [ "a = torch.randn(2, 2)\r\na = ((a * 3) / (a - 1))\r\nprint(a.requires_grad)\r\na.requires_grad_(True)\r\nprint(a.requires_grad)\r\nb = (a * a).sum()\r\nprint(b.grad_fn)", "False\nTrue\n<SumBackward0 object at 0x7faf2469db70>\n" ], [ "x", "_____no_output_____" ], [ "out.backward()", "_____no_output_____" ], [ "print(x.grad)", "tensor([[4.5000, 4.5000],\n [4.5000, 4.5000]])\n" ], [ "x = torch.randn(3, requires_grad=True)\r\n\r\ny = x * 2\r\nwhile y.data.norm() < 1000:\r\n y = y * 2\r\n\r\nprint(y)", "tensor([ 432.4510, -522.7213, -1447.4696], grad_fn=<MulBackward0>)\n" ], [ "gradients = torch.tensor([0.1, 1.0, 0.0001], dtype=torch.float)\r\ny.backward(gradients)\r\n\r\nprint(x.grad)", "tensor([2.0480e+02, 2.0480e+03, 2.0480e-01])\n" ], [ "gradients", "_____no_output_____" ], [ "y", "_____no_output_____" ], [ "", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb63144ccc6356736b21b79248b8d0565ac98c3c
10,859
ipynb
Jupyter Notebook
notebooks/forecast_followers/03_update_backtest.ipynb
piushvaish/instagram-growth-strategy
c0740094de376e4c2651c95a1182d408f24a941d
[ "Apache-2.0" ]
null
null
null
notebooks/forecast_followers/03_update_backtest.ipynb
piushvaish/instagram-growth-strategy
c0740094de376e4c2651c95a1182d408f24a941d
[ "Apache-2.0" ]
null
null
null
notebooks/forecast_followers/03_update_backtest.ipynb
piushvaish/instagram-growth-strategy
c0740094de376e4c2651c95a1182d408f24a941d
[ "Apache-2.0" ]
null
null
null
31.293948
398
0.596372
[ [ [ "A common use case requires the forecaster to regularly update with new data and make forecasts on a rolling basis. This is especially useful if the same kind of forecast has to be made at regular time points, e.g., daily or weekly. sktime forecasters support this type of deployment workflow via the update and update_predict methods.\n\nThe update method can be called when a forecaster is already fitted, to ingest new data and make updated forecasts - this is referred to as an “update step”.\n\nAfter the update, the forecaster’s internal “now” state (the cutoff) is set to the latest time stamp seen in the update batch (assumed to be later than previously seen data).\n\nThe general pattern is as follows:\n\n1. specify a forecasting strategy\n\n2. specify a relative forecasting horizon\n\n3. fit the forecaster to an initial batch of data using fit\n\n4. make forecasts for the relative forecasting horizon, using predict\n\n5. obtain new data; use update to ingest new data\n\n6. make forecasts using predict for the updated data\n\n7. repeat 5 and 6 as often as required\n\nExample: suppose that, in the airline example, we want to make forecasts a year ahead, but every month, starting December 1957. The first few months, forecasts would be made as follows:", "_____no_output_____" ] ], [ [ "import pandas as pd\nfrom sktime.forecasting.ets import AutoETS\nfrom sktime.utils.plotting import plot_series\nimport numpy as np", "_____no_output_____" ], [ "# we prepare the full data set for convenience\n# note that in the scenario we will \"know\" only part of this at certain time points\ndf = pd.read_csv(\"../../data/later/profile_growth.csv\")\n\n#df.columns\n\nfollowers = df[['Date', 'Followers']]\n\nfollowers['Date'] = pd.PeriodIndex(pd.DatetimeIndex(followers['Date']), freq='D') \n\ny = followers.set_index('Date').sort_index()", "_____no_output_____" ], [ "from sktime.forecasting.naive import NaiveForecaster\nforecaster = NaiveForecaster(strategy=\"last\")\n# December 1957\n\n# this is the data known in December 1975\ny_1957Dec = y[:-36]\n\n# step 1: specifying the forecasting strategy\n#forecaster = AutoETS(auto=True, sp=7, n_jobs=-1)\n\n# step 2: specifying the forecasting horizon: one year ahead, all months\nfh = np.arange(1, 13)\n\n# step 3: this is the first time we use the model, so we fit it\nforecaster.fit(y_1957Dec)\n\n# step 4: obtaining the first batch of forecasts for Jan 1958 - Dec 1958\ny_pred_1957Dec = forecaster.predict(fh)", "_____no_output_____" ], [ "# plotting predictions and past data\nplot_series(y_1957Dec, y_pred_1957Dec, labels=[\"y_1957Dec\", \"y_pred_1957Dec\"])", "_____no_output_____" ], [ "# January 1958\n\n# new data is observed:\ny_1958Jan = y[:-36]\n\n# step 5: we update the forecaster with the new data\nforecaster.update(y_1958Jan)\n\n# step 6: making forecasts with the updated data\ny_pred_1958Jan = forecaster.predict(fh)", "_____no_output_____" ], [ "# note that the fh is relative, so forecasts are automatically for 1 month later\n# i.e., from Feb 1958 to Jan 1959\ny_pred_1958Jan", "_____no_output_____" ], [ "# plotting predictions and past data\nplot_series(\n y[:-35],\n y_pred_1957Dec,\n y_pred_1958Jan,\n labels=[\"y_1957Dec\", \"y_pred_1957Dec\", \"y_pred_1958Jan\"],\n)", "_____no_output_____" ], [ "# February 1958\n\n# new data is observed:\ny_1958Feb = y[:-35]\n\n# step 5: we update the forecaster with the new data\nforecaster.update(y_1958Feb)\n\n# step 6: making forecasts with the updated data\ny_pred_1958Feb = forecaster.predict(fh)", "_____no_output_____" ], [ "# plotting predictions and past data\nplot_series(\n y[:-35],\n y_pred_1957Dec,\n y_pred_1958Jan,\n y_pred_1958Feb,\n labels=[\"y_1957Dec\", \"y_pred_1957Dec\", \"y_pred_1958Jan\", \"y_pred_1958Feb\"],\n)", "_____no_output_____" ] ], [ [ "… and so on.\n\nA shorthand for running first update and then predict is update_predict_single - for some algorithms, this may be more efficient than the separate calls to update and predict:", "_____no_output_____" ] ], [ [ "# March 1958\n\n# new data is observed:\ny_1958Mar = y[:-34]\n\n# step 5&6: update/predict in one step\nforecaster.update_predict_single(y_1958Mar, fh=fh)", "_____no_output_____" ] ], [ [ "In the rolling deployment mode, may be useful to move the estimator’s “now” state (the cutoff) to later, for example if no new data was observed, but time has progressed; or, if computations take too long, and forecasts have to be queried.\n\nThe update interface provides an option for this, via the update_params argument of update and other update funtions.\n\nIf update_params is set to False, no model update computations are performed; only data is stored, and the internal “now” state (the cutoff) is set to the most recent date.", "_____no_output_____" ] ], [ [ "# April 1958\n\n# new data is observed:\ny_1958Apr = y[:-33]\n\n# step 5: perform an update without re-computing the model parameters\nforecaster.update(y_1958Apr, update_params=False)", "_____no_output_____" ] ], [ [ "sktime can also simulate the update/predict deployment mode with a full batch of data.\n\nThis is not useful in deployment, as it requires all data to be available in advance; however, it is useful in playback, such as for simulations or model evaluation.\n\nThe update/predict playback mode can be called using update_predict and a re-sampling constructor which encodes the precise walk-forward scheme.", "_____no_output_____" ], [ "To evaluate forecasters with respect to their performance in rolling forecasting, the forecaster needs to be tested in a set-up mimicking rolling forecasting, usually on past data. Note that the batch back-testing as in Section 1.3 would not be an appropriate evaluation set-up for rolling deployment, as that tests only a single forecast batch.\n\nThe advanced evaluation workflow can be carried out using the evaluate benchmarking function. evalute takes as arguments: - a forecaster to be evaluated - a scikit-learn re-sampling strategy for temporal splitting (cv below), e.g., ExpandingWindowSplitter or SlidingWindowSplitter - a strategy (string): whether the forecaster should be always be refitted or just fitted once and then updated", "_____no_output_____" ] ], [ [ "#from sktime.forecasting.arima import AutoARIMA\nfrom sktime.forecasting.ets import AutoETS\nfrom sktime.forecasting.model_evaluation import evaluate\nfrom sktime.forecasting.model_selection import ExpandingWindowSplitter", "_____no_output_____" ], [ "#forecaster = AutoARIMA(sp=12, suppress_warnings=True)\nforecaster = AutoETS(auto=True, sp=12, n_jobs=-1)\n#forecaster = NaiveForecaster(strategy=\"last\")\n\ncv = ExpandingWindowSplitter(\n step_length=12, fh=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12], initial_window=72\n)\n\ndf = evaluate(forecaster=forecaster, y=y, cv=cv, strategy=\"refit\", return_data=True)\n\ndf.iloc[:, :5]", "_____no_output_____" ], [ "# visualization of a forecaster evaluation\nfig, ax = plot_series(\n y,\n df[\"y_pred\"].iloc[0],\n df[\"y_pred\"].iloc[1],\n df[\"y_pred\"].iloc[2],\n df[\"y_pred\"].iloc[3],\n df[\"y_pred\"].iloc[4],\n df[\"y_pred\"].iloc[5],\n markers=[\"o\", \"\", \"\", \"\", \"\", \"\", \"\"],\n labels=[\"y_true\"] + [\"y_pred (Backtest \" + str(x) + \")\" for x in range(6)],\n)\nax.legend();", "_____no_output_____" ], [ "df[\"y_pred\"].iloc[0]", "_____no_output_____" ], [ "df[\"y_pred\"].iloc[1]", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code" ] ]
cb631ff838e228a4ecfbaaa7a0357132341f7e6c
11,257
ipynb
Jupyter Notebook
docs/examples/Parameters/Parameters.ipynb
tomasstankevic/Qcodes
abe2a9a9bfb46b0801cd8dfe3ab6bc9f994e77d4
[ "MIT" ]
1
2019-01-16T13:52:56.000Z
2019-01-16T13:52:56.000Z
docs/examples/Parameters/Parameters.ipynb
tomasstankevic/Qcodes
abe2a9a9bfb46b0801cd8dfe3ab6bc9f994e77d4
[ "MIT" ]
12
2020-10-13T16:53:37.000Z
2020-10-14T17:16:22.000Z
docs/examples/Parameters/Parameters.ipynb
tomasstankevic/Qcodes
abe2a9a9bfb46b0801cd8dfe3ab6bc9f994e77d4
[ "MIT" ]
1
2020-05-03T22:47:40.000Z
2020-05-03T22:47:40.000Z
35.399371
362
0.548015
[ [ [ "# Parameters in QCoDeS\n\nA `Parameter` is the basis of measurements and control within QCoDeS. Anything that you want to either measure or control within QCoDeS should satisfy the `Parameter` interface. You may read more about the `Parameter` [here](http://qcodes.github.io/Qcodes/user/intro.html#parameter).\n", "_____no_output_____" ] ], [ [ "import numpy as np\nfrom qcodes.instrument.parameter import Parameter, ArrayParameter, MultiParameter, ManualParameter\nfrom qcodes.utils import validators", "_____no_output_____" ] ], [ [ "QCoDeS provides the following classes of built-in parameters:\n- `Parameter` represents a single value at a time\n - Example: voltage\n- `ParameterWithSetpoints` is intended for array-values parameters.\n This Parameter class is intended for anything where a call to the instrument\n returns an array of values. [This notebook](Simple-Example-of-ParameterWithSetpoints.ipynb)\n gives more detailed examples of how this parameter can be used.\n- `ArrayParameter` represents an array of values of all the same type that are returned all at once. \n - Example: voltage vs time waveform\n - **NOTE:** This is an older base class for array-valued parameters. For any new driver we strongly recommend using `ParameterWithSetpoints` class which is both more flexible and significantly easier to use. Refer to notebook on [writing drivers with ParameterWithSetpoints](Simple-Example-of-ParameterWithSetpoints.ipynb) \n- `MultiParameter` represents a collection of values with different meaning and possibly different dimension\n - Example: I and Q, or I vs time and Q vs time\n\nParameters are described in detail in the [Creating Instrument Drivers](../writing_drivers/Creating-Instrument-Drivers.ipynb) tutorial.", "_____no_output_____" ], [ "## Parameter\nMost of the time you can use `Parameter` directly; even if you have custom `get`/`set` functions, but sometimes it's useful to subclass `Parameter`. Note that since the superclass `Parameter` actually wraps these functions (to include some extra nice-to-have functionality), your subclass should define `get_raw` and `set_raw` rather than `get` and `set`. ", "_____no_output_____" ] ], [ [ "class MyCounter(Parameter):\n def __init__(self, name):\n # only name is required\n super().__init__(name, label='Times this has been read',\n vals=validators.Ints(min_value=0),\n docstring='counts how many times get has been called '\n 'but can be reset to any integer >= 0 by set')\n self._count = 0\n \n # you must provide a get method, a set method, or both.\n def get_raw(self):\n self._count += 1\n return self._count\n \n def set_raw(self, val):\n self._count = val\n\nc = MyCounter('c')\nc2 = MyCounter('c2')\n\n# c() is equivalent to c.get()\nprint('first call:', c())\nprint('second call:', c())", "first call: 1\nsecond call: 2\n" ], [ "# c2(val) is equivalent to c2.set(val)\nc2(22)\n", "_____no_output_____" ] ], [ [ "## ArrayParameter\n**NOTE:** This is an older base class for array-valued parameters. For any new driver we strongly recommend using `ParameterWithSetpoints` class which is both more flexible and significantly easier to use. Refer to notebook on [writing drivers with ParameterWithSetpoints](Simple-Example-of-ParameterWithSetpoints.ipynb). \n\nWe have kept the documentation shown below of `ArrayParameter` for the legacy purpose.\n\nFor actions that create a whole array of values at once. When you use it in a `Loop`, it makes a single `DataArray` with the array returned by `get` nested inside extra dimension(s) for the loop.\n\n`ArrayParameter` is, for now, only gettable.", "_____no_output_____" ] ], [ [ "class ArrayCounter(ArrayParameter):\n def __init__(self):\n # only name and shape are required\n # the setpoints I'm giving here are identical to the defaults\n # this param would get but I'll give them anyway for\n # demonstration purposes\n super().__init__('array_counter', shape=(3, 2),\n label='Total number of values provided',\n unit='',\n # first setpoint array is 1D, second is 2D, etc...\n setpoints=((0, 1, 2), ((0, 1), (0, 1), (0, 1))),\n setpoint_names=('index0', 'index1'),\n setpoint_labels=('Outer param index', 'Inner param index'),\n docstring='fills a 3x2 array with increasing integers')\n self._val = 0\n \n def get_raw(self):\n # here I'm returning a nested list, but any sequence type will do.\n # tuple, np.array, DataArray...\n out = [[self._val + 2 * i + j for j in range(2)] for i in range(3)]\n self._val += 6\n return out\n\narray_counter = ArrayCounter()\n\n# simple get\nprint('first call:', array_counter())", "first call: [[0, 1], [2, 3], [4, 5]]\n" ] ], [ [ "## MultiParameter\nReturn multiple items at once, where each item can be a single value or an array. \n\nNOTE: Most of the kwarg names here are the plural of those used in `Parameter` and `ArrayParameter`. In particular, `MultiParameter` is the ONLY one that uses `units`, all the others use `unit`.\n\n`MultiParameter` is, for now, only gettable.", "_____no_output_____" ] ], [ [ "class SingleIQPair(MultiParameter):\n def __init__(self, scale_param):\n # only name, names, and shapes are required\n # this version returns two scalars (shape = `()`)\n super().__init__('single_iq', names=('I', 'Q'), shapes=((), ()),\n labels=('In phase amplitude', 'Quadrature amplitude'),\n units=('V', 'V'),\n # including these setpoints is unnecessary here, but\n # if you have a parameter that returns a scalar alongside\n # an array you can represent the scalar as an empty sequence.\n setpoints=((), ()),\n docstring='param that returns two single values, I and Q')\n self._scale_param = scale_param\n \n def get_raw(self):\n scale_val = self._scale_param()\n return (scale_val, scale_val / 2)\n\nscale = ManualParameter('scale', initial_value=2)\niq = SingleIQPair(scale_param=scale)\n\n# simple get\nprint('simple get:', iq())", "simple get: (2, 1.0)\n" ], [ "class IQArray(MultiParameter):\n def __init__(self, scale_param):\n # names, labels, and units are the same \n super().__init__('iq_array', names=('I', 'Q'), shapes=((5,), (5,)),\n labels=('In phase amplitude', 'Quadrature amplitude'),\n units=('V', 'V'),\n # note that EACH item needs a sequence of setpoint arrays\n # so a 1D item has its setpoints wrapped in a length-1 tuple\n setpoints=(((0, 1, 2, 3, 4),), ((0, 1, 2, 3, 4),)),\n docstring='param that returns two single values, I and Q')\n self._scale_param = scale_param\n self._indices = np.array([0, 1, 2, 3, 4])\n\n def get_raw(self):\n scale_val = self._scale_param()\n return (self._indices * scale_val, self._indices * scale_val / 2)\n\niq_array = IQArray(scale_param=scale)\n\n# simple get\nprint('simple get', iq_array())", "simple get (array([0, 2, 4, 6, 8]), array([0., 1., 2., 3., 4.]))\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ] ]
cb6340cb51ae1047cc84fe76982abe732672efd7
1,476
ipynb
Jupyter Notebook
index.ipynb
marceltoebes/nbdev_test
d59a050453c1a89037f33b4ef7dde43045b4fbdb
[ "Apache-2.0" ]
null
null
null
index.ipynb
marceltoebes/nbdev_test
d59a050453c1a89037f33b4ef7dde43045b4fbdb
[ "Apache-2.0" ]
null
null
null
index.ipynb
marceltoebes/nbdev_test
d59a050453c1a89037f33b4ef7dde43045b4fbdb
[ "Apache-2.0" ]
null
null
null
15.702128
81
0.477642
[ [ [ "#hide", "_____no_output_____" ] ], [ [ "# Deck of Cards project\n\n> Bs project to check out nbdev functionality.", "_____no_output_____" ], [ "This file will become your README and also the index of your documentation.", "_____no_output_____" ], [ "## Install", "_____no_output_____" ], [ "`pip install your_project_name`", "_____no_output_____" ], [ "## How to use", "_____no_output_____" ], [ "Fill me in please! Don't forget code examples:", "_____no_output_____" ] ], [ [ "1+1", "_____no_output_____" ] ] ]
[ "code", "markdown", "code" ]
[ [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ] ]
cb63494338e5d63f2002a6904c0ee91af1706123
6,900
ipynb
Jupyter Notebook
Discussion Notebooks/Econ126_Discussion_Week_02_blank.ipynb
t-hdd/econ126
17029937bd6c40e606d145f8d530728585c30a1d
[ "MIT" ]
null
null
null
Discussion Notebooks/Econ126_Discussion_Week_02_blank.ipynb
t-hdd/econ126
17029937bd6c40e606d145f8d530728585c30a1d
[ "MIT" ]
null
null
null
Discussion Notebooks/Econ126_Discussion_Week_02_blank.ipynb
t-hdd/econ126
17029937bd6c40e606d145f8d530728585c30a1d
[ "MIT" ]
null
null
null
35.025381
431
0.403478
[ [ [ "import matplotlib.pyplot as plt\nplt.style.use('classic')\n%matplotlib inline", "_____no_output_____" ] ], [ [ "# Discussion: Week 2", "_____no_output_____" ] ], [ [ "# Import the NumPy module\n", "_____no_output_____" ] ], [ [ "## Exercise: Capital Evolution in the Solow Model\n\nSuppose that capital per worker $k_t$ evolves according to the following equation:\n\n\\begin{align}\nk_{t+1} & = 0.12 \\cdot 100 \\cdot k_t^{1/3} + 0.9\\cdot k_t, \\tag{1}\n\\end{align}\n\nwhere the first term on the right-hand side implies that the economy has a 12 percent savings rate, that total factor productivity equals 100, and that there is no growth in technology (or \"labor efficiency\"). The second term implies that the rate of capital depreciation is 10 percent (i.e., $1-\\delta = 0.9 \\Rightarrow \\delta = 0.1$). Assume that capital per worker in the initial period $k_0$ is given.\n\nThe *steady state* quantity of capital per worker is the number $k^*$ such that if $k_t = k^*$, $k_{t+1} = k^*$. Find $k^*$ by dropping the time subscripts in equation (1) and solving for $k$. Obtain:\n\n\\begin{align}\nk^* & = \\left(\\frac{0.1}{0.12\\cdot 100}\\right)^{3/2} = 1{,}314.53414 \\tag{2}\n\\end{align}", "_____no_output_____" ], [ "### Part (a): Simulate 100 Periods", "_____no_output_____" ] ], [ [ "# Create a variable called 'k0' that stores the initial quantity of capital in the economy. Set 'k0' to 400\n\n\n# Create a variable called 'T' equal to the number of periods after 0 to simulate. Set T = 100\n\n\n# Use the function np.zeros to create a variable called 'capital' equal to an array of zeros of length T+1\n\n\n# Print the value of 'capital'\n", "_____no_output_____" ], [ "# Set the first element of 'capital' to the value in k0\n\n\n# Print the value of 'capital'\n", "_____no_output_____" ], [ "# Use a for loop to iterate over the additional elemnts of the 'capital' array that need to be computed.\n# Hint: capital has length T+1. The first value is filled, so you need fill the remaining T values.\n\n\n# Print the value of 'capital'\n", "_____no_output_____" ], [ "# Print the value of the last element of 'capital'\n", "_____no_output_____" ], [ "# Plot the simulated capital per worker\n\n\n", "_____no_output_____" ] ], [ [ "### Part (b): Simulate 1,000 Periods", "_____no_output_____" ] ], [ [ "# Create a variable called 'T' equal to the number of periods after 0 to simulate. Set T = 1000\n\n\n# Use the function np.zeros to create a variable called 'capital' equal to an array of zeros of length T+1\n\n\n# Set the first element of 'capital' to the value in k0\n\n\n# Use a for loop to iterate over the additional elemnts of the 'capital' array that need to be computed.\n\n\n# Print the value of the last element of 'capital'\n", "_____no_output_____" ] ], [ [ "### Part (c): Evaluation\n\nProvide answers to the follow questions in the next cell.\n\n**Question**\n\n1. Why is the final value of capital computed in Part (b) closer to the true steady state than the value computed in Part (a)?", "_____no_output_____" ], [ "**Answer**\n\n1. ", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ] ]
cb635e4c72fdbf5244df4816290536ab0e74c89c
25,580
ipynb
Jupyter Notebook
nmt/Kabyle-English_kab-eng.ipynb
WangXingqiu/machine-translation
ba6e9556645c777d8a15dbb3bec11521a75744a9
[ "MIT" ]
3
2020-12-16T03:58:09.000Z
2021-06-06T07:25:35.000Z
nmt/Kabyle-English_kab-eng.ipynb
WangXingqiu/machine-translation
ba6e9556645c777d8a15dbb3bec11521a75744a9
[ "MIT" ]
null
null
null
nmt/Kabyle-English_kab-eng.ipynb
WangXingqiu/machine-translation
ba6e9556645c777d8a15dbb3bec11521a75744a9
[ "MIT" ]
2
2020-12-20T03:18:06.000Z
2021-06-06T07:25:55.000Z
29.134396
342
0.542885
[ [ [ "# 基于注意力的神经机器翻译", "_____no_output_____" ], [ "此笔记本训练一个将卡比尔语翻译为英语的序列到序列(sequence to sequence,简写为 seq2seq)模型。此例子难度较高,需要对序列到序列模型的知识有一定了解。\n\n训练完此笔记本中的模型后,你将能够输入一个卡比尔语句子,例如 *\"Times!\"*,并返回其英语翻译 *\"Fire!\"*\n\n对于一个简单的例子来说,翻译质量令人满意。但是更有趣的可能是生成的注意力图:它显示在翻译过程中,输入句子的哪些部分受到了模型的注意。\n\n<img src=\"https://tensorflow.google.cn/images/spanish-english.png\" alt=\"spanish-english attention plot\">\n\n请注意:运行这个例子用一个 P100 GPU 需要花大约 10 分钟。", "_____no_output_____" ] ], [ [ "import tensorflow as tf\n\nimport matplotlib.pyplot as plt\nimport matplotlib.ticker as ticker\nfrom sklearn.model_selection import train_test_split\n\nimport unicodedata\nimport re\nimport numpy as np\nimport os\nimport io\nimport time", "_____no_output_____" ] ], [ [ "## 下载和准备数据集\n\n我们将使用 http://www.manythings.org/anki/ 提供的一个语言数据集。这个数据集包含如下格式的语言翻译对:\n\n```\nMay I borrow this book?\t¿Puedo tomar prestado este libro?\n```\n\n这个数据集中有很多种语言可供选择。我们将使用英语 - 卡比尔语数据集。为方便使用,我们在谷歌云上提供了此数据集的一份副本。但是你也可以自己下载副本。下载完数据集后,我们将采取下列步骤准备数据:\n\n1. 给每个句子添加一个 *开始* 和一个 *结束* 标记(token)。\n2. 删除特殊字符以清理句子。\n3. 创建一个单词索引和一个反向单词索引(即一个从单词映射至 id 的词典和一个从 id 映射至单词的词典)。\n4. 将每个句子填充(pad)到最大长度。", "_____no_output_____" ] ], [ [ "'''\n# 下载文件\npath_to_zip = tf.keras.utils.get_file(\n 'spa-eng.zip', origin='http://storage.googleapis.com/download.tensorflow.org/data/spa-eng.zip',\n extract=True)\n\npath_to_file = os.path.dirname(path_to_zip)+\"/spa-eng/spa.txt\"\n'''\npath_to_file = \"./lan/kab.txt\"", "_____no_output_____" ], [ "# 将 unicode 文件转换为 ascii\ndef unicode_to_ascii(s):\n return ''.join(c for c in unicodedata.normalize('NFD', s)\n if unicodedata.category(c) != 'Mn')\n\n\ndef preprocess_sentence(w):\n w = unicode_to_ascii(w.lower().strip())\n\n # 在单词与跟在其后的标点符号之间插入一个空格\n # 例如: \"he is a boy.\" => \"he is a boy .\"\n # 参考:https://stackoverflow.com/questions/3645931/python-padding-punctuation-with-white-spaces-keeping-punctuation\n w = re.sub(r\"([?.!,¿])\", r\" \\1 \", w)\n w = re.sub(r'[\" \"]+', \" \", w)\n\n # 除了 (a-z, A-Z, \".\", \"?\", \"!\", \",\"),将所有字符替换为空格\n w = re.sub(r\"[^a-zA-Z?.!,¿]+\", \" \", w)\n\n w = w.rstrip().strip()\n\n # 给句子加上开始和结束标记\n # 以便模型知道何时开始和结束预测\n w = '<start> ' + w + ' <end>'\n return w", "_____no_output_____" ], [ "en_sentence = u\"May I borrow this book?\"\nsp_sentence = u\"¿Puedo tomar prestado este libro?\"\nprint(preprocess_sentence(en_sentence))\nprint(preprocess_sentence(sp_sentence).encode('utf-8'))", "_____no_output_____" ], [ "# 1. 去除重音符号\n# 2. 清理句子\n# 3. 返回这样格式的单词对:[ENGLISH, SPANISH]\ndef create_dataset(path, num_examples):\n lines = io.open(path, encoding='UTF-8').read().strip().split('\\n')\n\n word_pairs = [[preprocess_sentence(w) for w in l.split('\\t')] for l in lines[:num_examples]]\n\n return zip(*word_pairs)", "_____no_output_____" ], [ "en, sp = create_dataset(path_to_file, None)\nprint(en[-1])\nprint(sp[-1])", "_____no_output_____" ], [ "def max_length(tensor):\n return max(len(t) for t in tensor)", "_____no_output_____" ], [ "def tokenize(lang):\n lang_tokenizer = tf.keras.preprocessing.text.Tokenizer(\n filters='')\n lang_tokenizer.fit_on_texts(lang)\n\n tensor = lang_tokenizer.texts_to_sequences(lang)\n\n tensor = tf.keras.preprocessing.sequence.pad_sequences(tensor,\n padding='post')\n\n return tensor, lang_tokenizer", "_____no_output_____" ], [ "def load_dataset(path, num_examples=None):\n # 创建清理过的输入输出对\n targ_lang, inp_lang = create_dataset(path, num_examples)\n\n input_tensor, inp_lang_tokenizer = tokenize(inp_lang)\n target_tensor, targ_lang_tokenizer = tokenize(targ_lang)\n\n return input_tensor, target_tensor, inp_lang_tokenizer, targ_lang_tokenizer", "_____no_output_____" ] ], [ [ "### 限制数据集的大小以加快实验速度(可选)\n\n在超过 10 万个句子的完整数据集上训练需要很长时间。为了更快地训练,我们可以将数据集的大小限制为 3 万个句子(当然,翻译质量也会随着数据的减少而降低):", "_____no_output_____" ] ], [ [ "# 尝试实验不同大小的数据集\nnum_examples = 30000\ninput_tensor, target_tensor, inp_lang, targ_lang = load_dataset(path_to_file, num_examples)\n\n# 计算目标张量的最大长度 (max_length)\nmax_length_targ, max_length_inp = max_length(target_tensor), max_length(input_tensor)", "_____no_output_____" ], [ "# 采用 80 - 20 的比例切分训练集和验证集\ninput_tensor_train, input_tensor_val, target_tensor_train, target_tensor_val = train_test_split(input_tensor, target_tensor, test_size=0.2)\n\n# 显示长度\nprint(len(input_tensor_train), len(target_tensor_train), len(input_tensor_val), len(target_tensor_val))", "_____no_output_____" ], [ "def convert(lang, tensor):\n for t in tensor:\n if t!=0:\n print (\"%d ----> %s\" % (t, lang.index_word[t]))", "_____no_output_____" ], [ "print (\"Input Language; index to word mapping\")\nconvert(inp_lang, input_tensor_train[0])\nprint ()\nprint (\"Target Language; index to word mapping\")\nconvert(targ_lang, target_tensor_train[0])", "_____no_output_____" ] ], [ [ "### 创建一个 tf.data 数据集", "_____no_output_____" ] ], [ [ "BUFFER_SIZE = len(input_tensor_train)\nBATCH_SIZE = 64\nsteps_per_epoch = len(input_tensor_train)//BATCH_SIZE\nembedding_dim = 256\nunits = 1024\nvocab_inp_size = len(inp_lang.word_index)+1\nvocab_tar_size = len(targ_lang.word_index)+1\n\ndataset = tf.data.Dataset.from_tensor_slices((input_tensor_train, target_tensor_train)).shuffle(BUFFER_SIZE)\ndataset = dataset.batch(BATCH_SIZE, drop_remainder=True)", "_____no_output_____" ], [ "example_input_batch, example_target_batch = next(iter(dataset))\nexample_input_batch.shape, example_target_batch.shape", "_____no_output_____" ] ], [ [ "## 编写编码器 (encoder) 和解码器 (decoder) 模型\n\n实现一个基于注意力的编码器 - 解码器模型。关于这种模型,你可以阅读 TensorFlow 的 [神经机器翻译 (序列到序列) 教程](https://github.com/tensorflow/nmt)。本示例采用一组更新的 API。此笔记本实现了上述序列到序列教程中的 [注意力方程式](https://github.com/tensorflow/nmt#background-on-the-attention-mechanism)。下图显示了注意力机制为每个输入单词分配一个权重,然后解码器将这个权重用于预测句子中的下一个单词。下图和公式是 [Luong 的论文](https://arxiv.org/abs/1508.04025v5)中注意力机制的一个例子。\n\n<img src=\"https://tensorflow.google.cn/images/seq2seq/attention_mechanism.jpg\" width=\"500\" alt=\"attention mechanism\">\n\n输入经过编码器模型,编码器模型为我们提供形状为 *(批大小,最大长度,隐藏层大小)* 的编码器输出和形状为 *(批大小,隐藏层大小)* 的编码器隐藏层状态。\n\n下面是所实现的方程式:\n\n<img src=\"https://tensorflow.google.cn/images/seq2seq/attention_equation_0.jpg\" alt=\"attention equation 0\" width=\"800\">\n<img src=\"https://tensorflow.google.cn/images/seq2seq/attention_equation_1.jpg\" alt=\"attention equation 1\" width=\"800\">\n\n本教程的编码器采用 [Bahdanau 注意力](https://arxiv.org/pdf/1409.0473.pdf)。在用简化形式编写之前,让我们先决定符号:\n\n* FC = 完全连接(密集)层\n* EO = 编码器输出\n* H = 隐藏层状态\n* X = 解码器输入\n\n以及伪代码:\n\n* `score = FC(tanh(FC(EO) + FC(H)))`\n* `attention weights = softmax(score, axis = 1)`。 Softmax 默认被应用于最后一个轴,但是这里我们想将它应用于 *第一个轴*, 因为分数 (score) 的形状是 *(批大小,最大长度,隐藏层大小)*。最大长度 (`max_length`) 是我们的输入的长度。因为我们想为每个输入分配一个权重,所以 softmax 应该用在这个轴上。\n* `context vector = sum(attention weights * EO, axis = 1)`。选择第一个轴的原因同上。\n* `embedding output` = 解码器输入 X 通过一个嵌入层。\n* `merged vector = concat(embedding output, context vector)`\n* 此合并后的向量随后被传送到 GRU\n\n每个步骤中所有向量的形状已在代码的注释中阐明:", "_____no_output_____" ] ], [ [ "class Encoder(tf.keras.Model):\n def __init__(self, vocab_size, embedding_dim, enc_units, batch_sz):\n super(Encoder, self).__init__()\n self.batch_sz = batch_sz\n self.enc_units = enc_units\n self.embedding = tf.keras.layers.Embedding(vocab_size, embedding_dim)\n self.gru = tf.keras.layers.GRU(self.enc_units,\n return_sequences=True,\n return_state=True,\n recurrent_initializer='glorot_uniform')\n\n def call(self, x, hidden):\n x = self.embedding(x)\n output, state = self.gru(x, initial_state = hidden)\n return output, state\n\n def initialize_hidden_state(self):\n return tf.zeros((self.batch_sz, self.enc_units))", "_____no_output_____" ], [ "encoder = Encoder(vocab_inp_size, embedding_dim, units, BATCH_SIZE)\n\n# 样本输入\nsample_hidden = encoder.initialize_hidden_state()\nsample_output, sample_hidden = encoder(example_input_batch, sample_hidden)\nprint ('Encoder output shape: (batch size, sequence length, units) {}'.format(sample_output.shape))\nprint ('Encoder Hidden state shape: (batch size, units) {}'.format(sample_hidden.shape))", "_____no_output_____" ], [ "class BahdanauAttention(tf.keras.layers.Layer):\n def __init__(self, units):\n super(BahdanauAttention, self).__init__()\n self.W1 = tf.keras.layers.Dense(units)\n self.W2 = tf.keras.layers.Dense(units)\n self.V = tf.keras.layers.Dense(1)\n\n def call(self, query, values):\n # 隐藏层的形状 == (批大小,隐藏层大小)\n # hidden_with_time_axis 的形状 == (批大小,1,隐藏层大小)\n # 这样做是为了执行加法以计算分数 \n hidden_with_time_axis = tf.expand_dims(query, 1)\n\n # 分数的形状 == (批大小,最大长度,1)\n # 我们在最后一个轴上得到 1, 因为我们把分数应用于 self.V\n # 在应用 self.V 之前,张量的形状是(批大小,最大长度,单位)\n score = self.V(tf.nn.tanh(\n self.W1(values) + self.W2(hidden_with_time_axis)))\n\n # 注意力权重 (attention_weights) 的形状 == (批大小,最大长度,1)\n attention_weights = tf.nn.softmax(score, axis=1)\n\n # 上下文向量 (context_vector) 求和之后的形状 == (批大小,隐藏层大小)\n context_vector = attention_weights * values\n context_vector = tf.reduce_sum(context_vector, axis=1)\n\n return context_vector, attention_weights", "_____no_output_____" ], [ "attention_layer = BahdanauAttention(10)\nattention_result, attention_weights = attention_layer(sample_hidden, sample_output)\n\nprint(\"Attention result shape: (batch size, units) {}\".format(attention_result.shape))\nprint(\"Attention weights shape: (batch_size, sequence_length, 1) {}\".format(attention_weights.shape))", "_____no_output_____" ], [ "class Decoder(tf.keras.Model):\n def __init__(self, vocab_size, embedding_dim, dec_units, batch_sz):\n super(Decoder, self).__init__()\n self.batch_sz = batch_sz\n self.dec_units = dec_units\n self.embedding = tf.keras.layers.Embedding(vocab_size, embedding_dim)\n self.gru = tf.keras.layers.GRU(self.dec_units,\n return_sequences=True,\n return_state=True,\n recurrent_initializer='glorot_uniform')\n self.fc = tf.keras.layers.Dense(vocab_size)\n\n # 用于注意力\n self.attention = BahdanauAttention(self.dec_units)\n\n def call(self, x, hidden, enc_output):\n # 编码器输出 (enc_output) 的形状 == (批大小,最大长度,隐藏层大小)\n context_vector, attention_weights = self.attention(hidden, enc_output)\n\n # x 在通过嵌入层后的形状 == (批大小,1,嵌入维度)\n x = self.embedding(x)\n\n # x 在拼接 (concatenation) 后的形状 == (批大小,1,嵌入维度 + 隐藏层大小)\n x = tf.concat([tf.expand_dims(context_vector, 1), x], axis=-1)\n\n # 将合并后的向量传送到 GRU\n output, state = self.gru(x)\n\n # 输出的形状 == (批大小 * 1,隐藏层大小)\n output = tf.reshape(output, (-1, output.shape[2]))\n\n # 输出的形状 == (批大小,vocab)\n x = self.fc(output)\n\n return x, state, attention_weights", "_____no_output_____" ], [ "decoder = Decoder(vocab_tar_size, embedding_dim, units, BATCH_SIZE)\n\nsample_decoder_output, _, _ = decoder(tf.random.uniform((64, 1)),\n sample_hidden, sample_output)\n\nprint ('Decoder output shape: (batch_size, vocab size) {}'.format(sample_decoder_output.shape))", "_____no_output_____" ] ], [ [ "## 定义优化器和损失函数", "_____no_output_____" ] ], [ [ "optimizer = tf.keras.optimizers.Adam()\nloss_object = tf.keras.losses.SparseCategoricalCrossentropy(\n from_logits=True, reduction='none')\n\ndef loss_function(real, pred):\n mask = tf.math.logical_not(tf.math.equal(real, 0))\n loss_ = loss_object(real, pred)\n\n mask = tf.cast(mask, dtype=loss_.dtype)\n loss_ *= mask\n\n return tf.reduce_mean(loss_)", "_____no_output_____" ] ], [ [ "## 检查点(基于对象保存)", "_____no_output_____" ] ], [ [ "checkpoint_dir = './training_checkpoints'\ncheckpoint_prefix = os.path.join(checkpoint_dir, \"ckpt\")\ncheckpoint = tf.train.Checkpoint(optimizer=optimizer,\n encoder=encoder,\n decoder=decoder)", "_____no_output_____" ] ], [ [ "## 训练\n\n1. 将 *输入* 传送至 *编码器*,编码器返回 *编码器输出* 和 *编码器隐藏层状态*。\n2. 将编码器输出、编码器隐藏层状态和解码器输入(即 *开始标记*)传送至解码器。\n3. 解码器返回 *预测* 和 *解码器隐藏层状态*。\n4. 解码器隐藏层状态被传送回模型,预测被用于计算损失。\n5. 使用 *教师强制 (teacher forcing)* 决定解码器的下一个输入。\n6. *教师强制* 是将 *目标词* 作为 *下一个输入* 传送至解码器的技术。\n7. 最后一步是计算梯度,并将其应用于优化器和反向传播。", "_____no_output_____" ] ], [ [ "@tf.function\ndef train_step(inp, targ, enc_hidden):\n loss = 0\n\n with tf.GradientTape() as tape:\n enc_output, enc_hidden = encoder(inp, enc_hidden)\n\n dec_hidden = enc_hidden\n\n dec_input = tf.expand_dims([targ_lang.word_index['<start>']] * BATCH_SIZE, 1)\n\n # 教师强制 - 将目标词作为下一个输入\n for t in range(1, targ.shape[1]):\n # 将编码器输出 (enc_output) 传送至解码器\n predictions, dec_hidden, _ = decoder(dec_input, dec_hidden, enc_output)\n\n loss += loss_function(targ[:, t], predictions)\n\n # 使用教师强制\n dec_input = tf.expand_dims(targ[:, t], 1)\n\n batch_loss = (loss / int(targ.shape[1]))\n\n variables = encoder.trainable_variables + decoder.trainable_variables\n\n gradients = tape.gradient(loss, variables)\n\n optimizer.apply_gradients(zip(gradients, variables))\n\n return batch_loss", "_____no_output_____" ], [ "EPOCHS = 10\n\nfor epoch in range(EPOCHS):\n start = time.time()\n\n enc_hidden = encoder.initialize_hidden_state()\n total_loss = 0\n\n for (batch, (inp, targ)) in enumerate(dataset.take(steps_per_epoch)):\n batch_loss = train_step(inp, targ, enc_hidden)\n total_loss += batch_loss\n\n if batch % 100 == 0:\n print('Epoch {} Batch {} Loss {:.4f}'.format(epoch + 1,\n batch,\n batch_loss.numpy()))\n # 每 2 个周期(epoch),保存(检查点)一次模型\n if (epoch + 1) % 2 == 0:\n checkpoint.save(file_prefix = checkpoint_prefix)\n\n print('Epoch {} Loss {:.4f}'.format(epoch + 1,\n total_loss / steps_per_epoch))\n print('Time taken for 1 epoch {} sec\\n'.format(time.time() - start))", "_____no_output_____" ] ], [ [ "## 翻译\n\n* 评估函数类似于训练循环,不同之处在于在这里我们不使用 *教师强制*。每个时间步的解码器输入是其先前的预测、隐藏层状态和编码器输出。\n* 当模型预测 *结束标记* 时停止预测。\n* 存储 *每个时间步的注意力权重*。\n\n请注意:对于一个输入,编码器输出仅计算一次。", "_____no_output_____" ] ], [ [ "def evaluate(sentence):\n attention_plot = np.zeros((max_length_targ, max_length_inp))\n\n sentence = preprocess_sentence(sentence)\n\n inputs = [inp_lang.word_index[i] for i in sentence.split(' ')]\n inputs = tf.keras.preprocessing.sequence.pad_sequences([inputs],\n maxlen=max_length_inp,\n padding='post')\n inputs = tf.convert_to_tensor(inputs)\n\n result = ''\n\n hidden = [tf.zeros((1, units))]\n enc_out, enc_hidden = encoder(inputs, hidden)\n\n dec_hidden = enc_hidden\n dec_input = tf.expand_dims([targ_lang.word_index['<start>']], 0)\n\n for t in range(max_length_targ):\n predictions, dec_hidden, attention_weights = decoder(dec_input,\n dec_hidden,\n enc_out)\n\n # 存储注意力权重以便后面制图\n attention_weights = tf.reshape(attention_weights, (-1, ))\n attention_plot[t] = attention_weights.numpy()\n\n predicted_id = tf.argmax(predictions[0]).numpy()\n\n result += targ_lang.index_word[predicted_id] + ' '\n\n if targ_lang.index_word[predicted_id] == '<end>':\n return result, sentence, attention_plot\n\n # 预测的 ID 被输送回模型\n dec_input = tf.expand_dims([predicted_id], 0)\n\n return result, sentence, attention_plot", "_____no_output_____" ], [ "# 注意力权重制图函数\ndef plot_attention(attention, sentence, predicted_sentence):\n fig = plt.figure(figsize=(10,10))\n ax = fig.add_subplot(1, 1, 1)\n ax.matshow(attention, cmap='viridis')\n\n fontdict = {'fontsize': 14}\n\n ax.set_xticklabels([''] + sentence, fontdict=fontdict, rotation=90)\n ax.set_yticklabels([''] + predicted_sentence, fontdict=fontdict)\n\n ax.xaxis.set_major_locator(ticker.MultipleLocator(1))\n ax.yaxis.set_major_locator(ticker.MultipleLocator(1))\n\n plt.show()", "_____no_output_____" ], [ "def translate(sentence):\n result, sentence, attention_plot = evaluate(sentence)\n\n print('Input: %s' % (sentence))\n print('Predicted translation: {}'.format(result))\n\n attention_plot = attention_plot[:len(result.split(' ')), :len(sentence.split(' '))]\n plot_attention(attention_plot, sentence.split(' '), result.split(' '))", "_____no_output_____" ] ], [ [ "## 恢复最新的检查点并验证", "_____no_output_____" ] ], [ [ "# 恢复检查点目录 (checkpoint_dir) 中最新的检查点\ncheckpoint.restore(tf.train.latest_checkpoint(checkpoint_dir))", "_____no_output_____" ], [ "translate(u'hace mucho frio aqui.')", "_____no_output_____" ], [ "translate(u'esta es mi vida.')", "_____no_output_____" ], [ "translate(u'¿todavia estan en casa?')", "_____no_output_____" ], [ "# 错误的翻译\ntranslate(u'trata de averiguarlo.')", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ] ]
cb6363f3c6df8fc3124cd82415aa10af8857c754
545,918
ipynb
Jupyter Notebook
hafta-11/462_ders11.ipynb
yasarkucukefe/YBS462
b89e30d708011e6fba844bb8d75155462fe1cd97
[ "Apache-2.0" ]
1
2022-03-08T23:32:22.000Z
2022-03-08T23:32:22.000Z
hafta-11/462_ders11.ipynb
yasarkucukefe/YBS462
b89e30d708011e6fba844bb8d75155462fe1cd97
[ "Apache-2.0" ]
null
null
null
hafta-11/462_ders11.ipynb
yasarkucukefe/YBS462
b89e30d708011e6fba844bb8d75155462fe1cd97
[ "Apache-2.0" ]
1
2022-03-15T18:31:38.000Z
2022-03-15T18:31:38.000Z
356.80915
223,565
0.891269
[ [ [ "# Bir makina öğrenmesi modelinde amaç: Genelleştirme (generalization)", "_____no_output_____" ] ], [ [ "![underfit_overfit.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAp0AAAGLCAIAAAAK0L5yAAAAAXNSR0IArs4c6QAAAARnQU1BAACxjwv8YQUAAAAJcEhZcwAADsMAAA7DAcdvqGQAAFzaSURBVHhe7d2HX1Nn4/fx5z8STSAgqDiotc6qrfrT1rZaV62tbbWtvWtlo8hQZO8pQ/aQJSB7CbL3kDCFsAlZhzxXch1CRFBURjh836/zuu+cK8NoIZ9zTs74f0oAAADgCnQdAACAO9B1AAAA7kDXAQAAuANdBwAA4A50HQAAgDvQdQAAAO5A1wEAALgDXQcAAOAOdB0AAIA70HUAAADuQNcBAAC4A10HAADgDnQdAACAO9B1AAAA7kDXAQAAuANdBwAA4A50HQAAgDvQdQAAAO5A1wEAALgDXQcAAOAOdB0AAIA70HUAAADuQNcBAAC4A10HAADgDnQdAACAO9B1AAAA7kDXAQAAuANdBwAA4A50HQAAgDvQdQAAAO5A1wEAALgDXQcAAOAOdB0AAIA70HUAAADuQNcBAAC4A10HAADgDnQdAACAO9B1AAAA7kDXAQAAuANdBwAA4A50HQAAgDvQdQAAAO5A1wEAALiDg13PLyiQyWTsDAAAwEbCta539/T4+vmVlpWx8wAAABsJp7rOMEx0TAzpun9AwNjYGDsKABucvCXd65Gri8sCk1tEuYhhH/Ze8vZMH9fI0qF3PkHekeHjEV7y7gcBrBxOdb2uvp5EnU4ZmZnsKABscJLcu0c/++IzM9W0a6tAb5PxDvVtMn1x9XGngn3Ye0mybu/knXZvkrPzC5Lk/M/U8KRb8zsfBLByuNN1iUQSHBKi6TqZenp62PsAANRkVY7HePzf4qfY+Q8jHuzq7B19z+47qgf1vO9BACuGO10vLCrSjjqZomNiGAbbwjYEqVQaFxOD/SXhvd7uumLg5bP0ih5Rc3Z4UHR2w6BUNTjVVZoa7uvp5uoTHJvfOsqu0Sv6q5+lFrSMkY8VxUBVFnmWTNSYGeHn4RORXNwxTj9tyD2ZafktqjnV49Mqe2TDjZlRPh5+4ckl7arnaoiFJYnBXr4hCYVtY+Othem5jSMLfmIpRlpyooM8PfwfJ1f0SOhQX2V6WlHbpHqGUPRUZKQWd4hVNwdeZmZWdg83P4sMisypfFGQmlrcrrUcI++uTE8tbqVPZcbIS/u7ewVEZbzoVj0b1j+OdH14eNjP339e18lUV1/PPgI4bWR4mK+3CTtVwHu93XVJxl87DU9d+d5EX2+TvsnVuNfTLeHX9uvr7z3+/ZVL3580M9Dnm/2Z0KNKu9Z2eEnWP3v0T1z7aa/pni/PnPjClL/J6NCdHNVX9Vrb4VWP55++emW/ye6vTn192ITH1z9omUW/z1f0pt0+Zsgz3X/6wtnDZtsPX/7uc8MjjtVvLZoyoyUe35sKDPecOnfxwtEdAqNjd3MGGaU4/cZWw298O2a/Q5hOvm7M/y64m8xLMv82Mf6/C+e36fH19Hb89NDuCG/n9cRh9oHK6XyL/fwD98vJ8svYy4CrBwWGX5w8f+W7I2YGxif+jW/HsvH6x5GuP336dF7R6RQcEiKR0MVb4DJ0HZZo4a5v3rz317B60Vhf18CUKPGPbbyjNoWjdNV5qtL5GM/wXHCvKtlvdn2z3p6rYW2qlVxmNN/yS57gWrSI3PNm1/X0d/4U0co+yPrQFuOr0STsjCj11m7e/puJQlVHmaFihzNGmwQLdF1a6XBIYHzWq0b9s63ojvt1p/ph7+m6/pbdvz2uE431CgfGW9xPGpLlFbIwoDKZ/b/dhsce1smV06W2X/JNLgU0qP8tGFHJ/TOGhheCO9UPg3WMC11/9erVvJxrT4VFRezjgLvQ9VUmV2NnZkmmJeNj4+zMrKmpqW5hNzsza2J8or6+XipVb/KeNTo6Wl5WLhKRNM4ZHh7Oy81raW5h599EXiQpMWlgYICdX4KFu84zs8ybfTPyse7G6vbZneTloy1BFwSCE6405m90nbfbInd2tUGc+rcJuauZ3PNm17fs/e+55kHpN40FJ91a5Mrx5N9N+ad8WmezrBxLu7Ftga7LXjgc4u3777nm7cqEFbmlrcOK93RdYHYnf/YvpHgVeF5geDWyX/VXGn36l6n+t97kD5YWmJsJPrMonPuPIIr5SWD4XSA7B+vWuu/6zMxMRmZmtBbS8ojISHYmOjo2NpZ8srCPBo5C11eIWCz28faxsbKpq61jh5TKF+UvbG1s79rdJRlmh5TK2ppaaytrizsWiQmJ7JBS2dbWZmVhZf6fuaeHp0LBJqi/v9/S3JIM3rO7R7JNB8fHx60sVY8kr6CpuEwqu3f3HhkkE6k7HdTo7u42v6O6y/6ePaNgM/xeC3edf8JTa/91SU9JpOOta9+fOPaZyVYe39hgs+DrR41vd53/lXvD7LMkz27v4J1ybSC33uy6alDzoOx/tqtfStHmdUqw9Y+0uS+05fUux43e7vpU4h+GvB/8X2n6P+t9XVcvPbCY/ieXjYzPh/YwzFD8L6aG50LJw5ihqEs8/haesaFAMxnxNxl8Ycs+C9Yt7uw3pxEUHPz69Wt2BjYGdH2FFOQX0Kw63Hdgh5RKZydndtB+bjAoMIgOkml6epoOJiclawY1x6cUFxVrBmuqa+hga2urZvB5znM6SP6Dagbj4+LpoIbmKWTRQS7TROw9Fun63Kqzoifhphlv26Ertu4h8RlF9T2i1BtGi3T9hKfqptriXaePpw/SdF0YcFbAvxg1d5C7rOLegQW+Xxen3DTinXxUN/sKJNJ0Z2DSdWPDMz6ark/EXTXS6rrh/3m2zd6l2saeeH2n4GxQZ2/0FaMdV5+oN8mPRF/hC45Zp5dXVFbMTS9ruuhzYP1C14EL0PUV0tHeQdetI8Ij2CGlMikxiQY1JjqGHVKvxNNV89DgUHZI/R2Z431HMujn6zfDzNDBsdExf19/Mujj7aPZFC+Xy8mKPnkFchdZd6eDRElxyUPnh2ShYWhwiB3SQpYA/Hz8mpua2fkleF/XmaHIi0b8Y661syWVN3me5AnUX0gvX9eVsvK7R/imf6XMbu6XVD08yltgO7y82fMUj5R4YHYBYDTp+m6Ti+HCqaxb2wVHnWffprzO5ahg0a4rleOZ/+4yOOf88LLR9pvJI+ohWbXjYYHJ1VjNSzP9mQ9vWQWWsrOwbqHrwAXo+srpetVVWVE579t0oVBIkj8zw6aaIj1++6QRCrliaGiBJIun1uagqveur0+k3di1ZdsVv9oxRqkYbUm7d3rn5k2Cg3dfkOIuX9eVis7Iy9sFe847Rj8vLUjx+e2gMflTvnyr60pFd/TV3fy9vwWV9IyLRXWxtw/xd/wY2qlQCIN+MOZ9fjO8QtjTUhZn9/1O/ru6rhQXWOwVGOgbmf0vZ/bYOEVH2JVtW/ZecH3eNjI91V3ic+VzvsmVx9hvbt1D14EL0HVYovd2XSlrTzY/uZu/yYCvbyTYdfZOeF7AJRPDi5FkvXYZu05Wj0cqQv/37SETfeOdB380f+z/+3b2WfMwozWP/zphspn8hPM3Cw5ffZTXp36rYxV+Vw9s523i6+mZfPGTZ6jlccE7uq6UVtgf5W0+aF2sdYAQM1Tq9fsREwF5Zb1NBsYHr7sXsHvNw3qGrgMXoOuwvGTjfZ3tPWPz156XDTPaUvmi+fXcNoux+GuCrRfDF82qRCRsbe0ZfeMIAkI+3tfR0T/5ZsI/kHS0p7Wlo398xf6ysMrQdeACdB3WF0VXwDl9wQ+edeqN4orhcpezWw3Oes/tww7w0dB14AJ0HdYZZqTU7fw+A94OswNH95kabxEcuR7WsDa7HADXoOvABeg6rEfTvTWFGUlJKZmlLSKsqcNyQdeBC9B1AAAKXQcuQNcBACh0HbgAXQcAoNB14AJ0HQCAQteBC9B1AAAKXQcuQNcBACh0HbgAXQcAoJav68xAcdgjj8S6+ec5XHXo+gaErgMAUMvXdXnNo2M8o6sxc1dYXCPo+gaErgMAUOg6cAG6Du+g6MsPc3WNKFngoirjlTEebuFlQ4tcbkXekubpGv1ihNwtb8/wW/iRssYkV4+YlxPs7LtMT4nVl2iRd2T4eISXLPbHAnyK1eu6YqQxNy7E1ysgJr1cOHeNRKVc1JATE+Tj7hEYlVk7MLcVf7Hx90LXNyB0Hd5FXGy7b4vxOfVVTLUxg3E/GwuOOFYtdikzcepfxrzvfDtUVz+dd2HWOVOJv/ANz4f0vSfSktqgq8euRw2rb89dyxVgua1O15nXOfbf7eSZmB0/f+70l9t52w7fiFJf4kDe+vjabt6OA2d+unrp7D4jA+MTLmWqF1hsfCnQ9Q0IXYd3ktU4fynQ/zaw842wMz1hlw15ZzwWv4qaVteV4kFhZ8/oAksAS+y6KPoy3+hypLrr5MW6Fn4xgE+2Gl1nRKm3dm/ZfSmokV6ScCDv/gmB0dcutTJZxb0DhofsyulVjKYqnI4bHTDPFisXG18SdH0DQtfh3eStnt/yed96tWiFXdHh/+1Ww/Nh6rX4qa7S1HBfTzdXn+DY/NZR9mFaXVcMVGWl5reMs/UWdxXFB3r5h6dWdg/N6/pCLyXuLIm1PsETnLCMfVrQOk5eLDNt7sUW3pyp+hPTK3pkosbMCD8Pn4jk4g7NE96mGGnJiQ7y9PB/nFzRI6FDfZXpaUVt6s9dFUVPRUZqcQf5JFX0V2Wmv+wRtWSFh0RlV77IS0st6tDejKp6ZCH7TGasJScq0N0j8ElaRfcSP4ZhLa1G1ydSfjflf/mwem7ZdOzpH7tUI+JGt68FgsN/+6VV9Uxq/b7JFxlfEnR9A0LX4T0U3WHnBYKvXRs16+byRo+TPNNfYgcZaUv4tf36+nuPf3/l0vcnzQz0+WZ/JvSoPnkW3g6vEKb8c8yQZ3rw9I/fHNy9/ejJL7bMdn2Rl2JEadZnDpvo6ZvsP3P23zjhlNZ2+EU3Z6r+RP0T137aa7rnyzMnvjDlbzI6dCdHtEDamdESj+9NBYZ7Tp27eOHoDoHRsbs5g4xSnH5jq+E3dGuDynTydWP+d6pvIyQZ/2wTfHPxu52bN/E3G197aH2cb/JHkoh9nFJcYPGZ4ODdF1LVDgiBP+8zNjI7ff7CuS9Nt5ocuxPfhs0MOm4Vuq7o8DktMLyeorWcJ298dIpvcitDwoyUel/ZZ0x+tvT4e46ev+0SV6P+qV1sfCnQ9Q0IXYf3YQbjrpnyj7rUsFWSVTsd55v+kz7GiBL/2MY7alM4Sj9jpiqdj/EMzwX3ktmFus6IUv/exdt/M7Fb9Uryvox/j/A20a6/66Xe2A4/9/364psz1X/iZr09V8PaVB+ezGi+5Zc8wbVoTX01pJUOhwTGZ71q1D//iu64X3cKjjhWy97ddT2DPb9E1InGertejzd7nuLt+DmG3bFw8tntPbwTLrVypbjM7qBg2/nARvW6PCMqdThpbHQu9M2vM0DXrEbXhQFnBQZX47Q+cmVVjsf5eyxy6d5wirGOktSQhxZXvzLj6237xnt2x5TFxt8DXd+A0PUV1d/XX1JcspRpWES/PlYZHRmdd+9iU29vL/sc0p7p6Xn3Lja1t7ezz1misbRbO3lH71eowy4ps/1CsM+ymCRTPtbdWN0+u+YgH20JuiAQnHBVfeAs1PXJ1Bs7+cfd6mY/jxTdoedmt8O/46UW6frimzNlqj+Rt9sil25UV72Zv014p92b2VkN2QuHQ7x9/z3XbEeXCStyS1uHFe/u+pZ95nmzOyQrugK/Mza69GRA9c5Hn97YbXDat1WhlOZbmfG+sCiY229Z9OSaIe9coJCdBZ20GtvhpQUWX/B33UrTrHErhIHfGxt8H/JKVB5o/p9PyeyPo7zF/aRA/1L04OjC428vpy4EXd+A0PUVVVxUbP6f+VKm+vp69jlKZXNz87x7F5vy8/LZ55BwDInm3bvYlBCfwD5nqcQFFnsFX9iUklBOPbcw4x1zfElzKukpiXS8de37E8c+M9nK4xsbbBZ8/Ui1xX6Brks6fc8IBD8nzH3SSYut926d/X590ZdauOvv2pyp/hO/cm+YXYCQPLu9g3fKtYGd1ZhK/MOQ94P/q7dWot/dddUiwuxLK5mByKtbBRdDuxlmKOFXExP1sQPMUMQV/iYDvoGxoWB24gv0Nh+yLWWfBjppmbsuOGEeFRcbr5niE0uFCqWszvu0oeG+X4KKhGOTg40p9t/t4O27mTLAyOvdvt5qdOJuWvOIVD4pLHQ/Z2J07EGtbLFx9o96N3R9A0LXV9TLypcPnB4sZWptaWWfo1R2dHTMu3exqbysnH0O+U85MjLv3sWmzIxM9jlLJq28f4z/mXW+eDz9TzODU97qHeEVPQk3zXjbDl2xdQ+Jzyiq7xGl3jBavOvSvpAfDA2uxs11XfL8X1Mjddff9VKLdP0dmzM1WwjYOxbrujjlphHv5CPNBgRSaUa9jEG6bmx4xkfT9Ym4q0ZaXf/Gi6ySz2JESddNjL8L6OyN/Hmr8bXo16pXGIn6WZ93wib1RUVFpdZU24VfNJ22zF0nn61vTvq/J6kWRBWvSwN+P2LKVw8a7PnuTlQD/a2QNEf/fWwnHdfj7Tn5T3SjesF1sfElQNc3IHQdlkTe5HmK/4V5WtKfO7adDyGBI0EbirxoxD/mWjubRfKYkzzBsYeqTi60HV5abneEv+d25uwPm7zR4wRPvR3+nS+lHIm5wje8FKnebjm3HX7RzZlCxVK7Lld/O36VbkRXGU26vtvkYrhwKuvWdsFR59m3I69zOSpYrOuk+pm3PhOcfejy47Ydvz8dUQ/Jqh4c4Zn+HK15ZWYg49E/5sFl+EXTacvX9SVgxEPCtleDk7M/prNkYz3tTc0dA+Pz7lhs/N3Q9Q0IXYelUQiDvjfZfejodpPf4ti9xCbSbuzasu2KX+0YWeEebUm7d3rn5k2qvcFlC3ddKW8N+sF466GbUS8HJkbbs5y+3bWZ3W/uXS+lFD/9zVCw72ZUUY1wcq7ri2/O1PoTqcW6rlR0R1/dzd/7W1BJz7hYVBd7+xB/x4+hnQryl/3BmPf5zfAKYU9LWZzd9zv57+i6Upxv/TlvqwH/83+zZg+NU3SEXdjJ23XZLad9RDzVU+z30x6ByYUI7Den21a166sDXd+A0HVYImbgyTXjTfq7/srUbEiXtSebn9yt+iJZ30iw6+yd8LyASyaGFyNJWhfsOsndwHPXS5+rD9jZsuuUuf0vu9nv19/xUkqmL+XWYcEm/hYzq/zxua6TV1tkc+aSu05ee7Tm8V8nTDarXmGz4PDVR3l96vSOVfhdPbCdR15Zz+SLnzxDLY8LFu+6Ulppf1CwZZ9t8eyeegQzVOb96zGTLapX1tu849DPngULnI4XdAq6DlyArsMnko33dbb3jC1tJx41+eRAR1vn0DQ7O+cdL6UQiwaHxhf8QxbbnPkBJCJha2vP6PyzbsvH+zo6+j/4TCDapKM9rS0dAx/yzwNrZ8N1vbKiMiY6pr+vn50HTkDXAQCojdX1jvYOeoSM/V37GWZmbHSs4kVFW2sbezesW+g6AAC1sbre0txCu25rbcswTFhoGLkd8TiCvRvWLXQdAIBam67LW1K93SIrVBc1Xn7v3g5fkF8QEhTS2dlJbpeWlKoab2PLKLAjyPqGrgMAUGvT9em0Gzv4Z/w1p0tYVkvfb25YNExX31+9esUOwfqErgMAUBu668QDpwek6znZOew8rE/oOgAA9cldF7eXpKYWt2tfube7Mj21uJWe2GBKWJYS4efu4eYZGperuaixVtcXv0KwGjPW8vyJn6eHf3T6iyVe+XcpXZ+ZmaE3EuITSNf9ff3pLKxT6DoAAPXJXZdVP/iSt/N6ouYqTtP5Fvv5B+6XS5XS5ohf9hoZ7D7xw8UrPxz/XKAnMPsjSX1RY62uL35lAtWVf/1/3Wew7bMTFy+ePb5Tf8fxfxOXcOXf93Z9aHDokcsjx/uOXa+6aqprSNctzS2l0vkHfcI6gq4DAFCfvh1edbE1Q5Ors+dknMz+325D1fmQmeHE6zv5B+3mrkTscILPuxDSQ24vqeviknuHeDt/9J+98m+J0ynBtnNB7z2D4Xu7np+XT79W9/H2mZqcKiwo7Ovt06zBw3qErgMAUMvw/briVeB5geHVyH5Vv0ef/mWq/6236vSEqisR17QNs3uay0dbAi4b8k67NZKZpXRdmm++j29mrXXl3+HoK8b8s8HC94T9vV0nIaddDwkOYYdgnUPXAQCo5dhvjul/ctnI+HxoD8MMxf9ianguVL0VXamU9JaGP7j10/mTh77Ypi/QN9i6hT258RK6LhuKuGiot9lQoLnur8BYn8ffsu9u2Xs2mL+36wqFIiY6xsfLhx7tBhyArgMAUMvRdSUjSry+U3A2qLM3+orRjqtP1JvkFb0Jv33ONzz6k5VnSGxmUV2vKOXPrQt2feErBI+QhQX+Ubu0cu3r/lZW1gjH2S0Ai1nKfnPAMeg6AAC1LF1XKscz/91lcM754WWj7TeT1VfuZYaeXOILjrtorvUvb3I7zeedcKklt7W6LlnsCsGyKsdjfJNfo9Wb91WYgYwHt80Dy8eWv+uTE5O1NbW9vb3s/FKNV8W4eoSXDL35jhSdmQGuXhmtH3b9BlljoodbTJXmGlNLxohexjj/d+vmLbvHz3JCXQOzNMfii6fEs4tL5N+cnZF3ZPi8/Z7XO3QdAIBapq4rxQUWewUG+kZm/8thD1kbT7+5zWD7hQD2SsSpDmdM9PW2HLlXTu7T6vriVwhWdIRfNBHs/tEjp0195V+fa2Y804thn77fnIZMxu5d7+PlY/6feUpyCp1dKqYv7DxPcMq95c2CS3L/28vb9r9nWtc6XIKp+GtG/PNhqms9fpDRjL93GQj2fnftN4vgaOejvH3m+aphSXXI1cN/PBGpHyOpDbp67HqU+pgFrQs/cwi6DgBALVfXldIK+6O8zQet567cK2tPtDy1Q6C32dCAb7z7tEX488DLApNL4eQura4vfoVg1Tp/ie+vh01Vd23ib9l69Jp74RKu/LvErle9rLK2tH7g/GBiYiIzI5N03fWRK3vfEulC12UvHA7x9v73XH2xSOlwT6dwSH2Yvyjyqj7/ahTtuij6Mt/ociQ9FlE82NXZM8qx6y2i6wAA1LJ1fRGqKxF3LOGixu+4QrB0tLutpWNg4WsWL2CJXQ/0D6R7xWekZ7S2ttLbpPHs3Uvx/q4rBqoyMyq6ZaKGZxE+Xj6Pnxa3v7F7gLirOC7Q1zc8vaJ7aF7XmbGWnKhAd4/AJ2kVc2fkUQy8zMys7B5ufhYZFJlVVpKT7HJl+5Z9v3qnpBa2jY805abm1AwqxZ2lsZan+bzTltGpBbWdJbHWJ3iCE5axTwtax1VvKS2/RfU2FP3Vz9Iqe2TDjZlRPh5+4ckl7W98ySEWliQGe/mGJBS2jY23FqbnNq7MGf2XAboOAECtdNfXwBK7HvE4gra8qLBIJpVZWliS2zXVNezdS/H+rqs2ehud/Ona5yZ7j548vd9EoMf/0jxLpK6jQphy+7BAYHLg7A9njppuO/HVPoGm6+OVgT/vMzYyO33+wrkvTbeaHLsTT8/II8n828T4/y6c36bH19Pbfu7v3785/Blfz9js6Ldnb8d3Vrmot8MzolTbb/abbtYzPXDyh/8Fp1mfOWyip2+y/8zZf+OEU3Pb4SVZt3fyT1+9st9k91envj5swuPrH7ScfXe9abePGfJM95++cPaw2fbDl7/73PCIY7Wuruaj6wAA1Mbt+tjoWFRkVEx0zOSkao8Af19/0vWE+AR675Isrev6m02vPW5VbSdnRgssDwoMf4ol6WREaTd3CvbeSO5WpVLem3Hn4BY+23Vxmd1BwbbzgbNn5Cl1OGlsdC5UtWeBquv6W3b/9rhONNYrHBArpXmWZrxjjlWqV5FV066rnrXodnit79dVXdfT3/lTRKtqewAzmm99aIvx1Wj1u0u9tZu3/2aiUPW6zFCxwxmjTQJ0HQBA523crs+TnZVNuv7A+QE7vxRL67pgz+08clNNvWPBCU8S1cnUv7bxTj7SHC6g6A7+gd0OL823MuN9YaF1Rh7Rk2uGvHOBQtp1gdmdfM19n9r1LXv/ez777sTpN40FJ93IX2c8+XdT/ikf1emFqLG0G9vQdQAA3Yeuszo7Oulm+ZER9XF6S8GInlziCb5+1DCv69n/M+PtslDlUt11rQdIsm7t5n/l3ihXdPp+xze4Hj93WJu0yGq/garrzFDEFf4mA77B3Al5DPkCvc2HbEvJC6i6rk4v61O7rjqjgObdZf+znbxb8u7avE4Jtv6RNnehHXm9y3EjdB0AQOeh68rxcVVcFXKFtZU16XplRSUdXwJx2g0jwedWxW+eAW809qox75BTJWmgVkTVNF1neoMv8PV/iZ3rkCTnXzN99fr6SNTP+rwTNqkvtE/IU1FR20UerOq64f95tmlWpD+566fdm97uujDgrIB/MWruIHdZxb0D+H4dAED3bfSux0THkJb7ePsoFIrgwGBym4yw972fosP3jIB/9EHV7JZsghlJ/3Ongcn1FNXSwqJdV0rL7Q/yPv8nczZE8ia3r9n95mRVD47wTH+OHpitKjOQ8egf8+CyD+n6yBOycHAlku4ENxJzhW94ic4spetKWfndI3zTv1Lo08k9VQ+P8rAdHgBA923orpOWW1lY0c3vjQ2Nebl55IajgyN79xIwvfF/7NpsuPeCY0RmWWVVZcHTEMtvP+Pxv7IvVe/ztnjXlfK2gO93GBy8FfHy9fhoxzOnH7brze43p+gIu7CTt+uyW067+ow8fj/tEZhciJjdb25JXRcn3zDacuDPiOIaIZl5+puhYN/NqKIa4eSSuq5UdEZe3i7Yc94x+nlpQYrPbweNN28SfImuAwDoug3d9RlmxtnJmXa9t7dXKBTS20ODQ+wjlmCi4Yn1D/u2bVGfPEdvk8D02O+uOb1sKt/RddLO/jyXiwcM9VTP2n7S6u61verv11V3MUNl3r8eM9mifs3NOw797FlAz8iz5K4zvan/7DfW22RgZk5m+lJuHRZs4m8xs8ofX1LXyXNGKkL/9+0hE33jnQd/NH/s//t27b+IrkHXAQCojb4dvrenNyY6pqiwiDSeYRhbG1vS9bLSMvbupWIkIz0dTY2twsHJDyyffHKgo7WTniNuPuloT2tLx8D7z+qzCIVY9Hpo9nw+ZGZQM/M+zGhL5Yvm13Pvaiz+mmDrxfAlnO5vbaDrAAAU9pt7Q3hYOOl6WmoaO79RKboCzukLfvCsU5/sXzFc7nJ2q8FZ73kH9OkQdB0AgELX3zA0NDQ5wV64ZkNjRkrdzu8z4O0wO3B0n6nxFsGR62ENC25U0A3oOgAAha6ryGXyvr4+RqGrW5nXyHRvTWFGUlJKZmmLSGfX1Cl0HQCAQteVJOcPnR+a/2ceFhLGDsF6g64DAFDourK/v5/uBk8m0RA9kQusM+g6AACFriunpqbu2t0lUbe0sFTIFeNj47ExsQ73HaYm1Yegw3qArgMAUOi6SterrpTklI72DnJbKpFamn/4NVthTaHrAAAUur6AAP8A0nWy1s7Og85D1wEAKHR9AfSEsg72Duw86Dx0HQCAQtfnjAyPdHZ0KuSK3t5e0nUy9ff1s/eBbkPXAQAodJ01NDRkZam6BkxiQuLMzIz9PXtyOz9Pfa510HnoOgAAha6zql5W0XV0Kwsr0nV6/dbAgED2btBt6DoAAIWuswZfD9L1dQ93DzJLM29pYSmVSukDQJeh6wAAFLo+p7OzM/d5Lm3D5MQkXX1vamqi94IuQ9cBACh0fVGe7p6k6ynJKew86DB0HQCAQtcXlZmRSbru8tCFnQcdhq4DAFDo+nxtrW1NjU0Mw3S0d9BN8SPDI+x9oKvQdQAACl1/Q11tHW3585znCoXCxtqG3C4vK2fvBl2FrgMAUOj6GzLSM2jXPT08yWxaalpmRmZ/P85Oo+vQdQAACl1/Q0dHh/kdVdcT4hPYIVgP0HUAAApdn+/Vq1dVL6sUCgU7D+sBug4AQKHrwAXoOgAAha6/X39ff2FBITsDOgldBwCg0PUFzDAz5WXlpOUyqay7u5vuSTfQP8DeDboHXQcAoND1BbysfElbnhCfMDMz42DvQG7nZOewd4PuQdcBACh0fQFZz7Jo193d3MlsUmISuU2vBwO6CV0HAKDQ9QUMDg7SM9KkPk0ls62trTTzOPGczkLXAQAodH1h42PjXV1d9LZCobCztSNdLyosoiOga9B1AAAKXV+SmOgY0nV/X392HnQMug4AQKHrS1JfV6/aFH/HfHJikh0CXYKuAwBQ6PqiZDJZRnpGXGzc6MioTCqztrQmaX9R/oK9G3QJug4AQKHri6qprlGto/9nHuAXQGbDw8LJ7dDgUHov6BR0HQCAQtcXVVRYRLvu8sCFzNKD2i0tLCXTEvoA0B3oOgAAha4valo8/fDBQ9Ly7KxsMisWiy3MLcgsWY+nDwDdga4DAFDo+rvMzMxMTU6xM+SVA4JI1yPDI9l50BnoOgAAha5/gPKychtrm6TEJHYedAa6DgBAoesfQEZIZewM6BJ0HQCAQtffg2GYhPgEJ0en4qJidgh0D7oOAECh6+8xMjxirt4rnkyjo6PsKOgYdB0AgELX32N4eNj8Dtv1ifEJOjg1OSXsEtLboAvQdQAACl1/v5LikkcPH6Wlps3MzJDZihcVluaWTo5OM4xqFnQBug4AQKHrH6y/v5+uvre0tLBDsNbQdQAACl3/GN6e3qTrkRE4kF1XoOsAABS6/gGGBocmJ1XXcysvKyddt7SwpLOw5tB1AAAKXV+q4qJi0nIbK5ue7h6JREJukNnCgkL2blhT6DoAAIWuL1VYaBgJOZni4+LJbFxsHLnt6uJK74W1ha4DAFDo+lJlZmTSrpMbZLarq4vOvnr1ij4A1hC6DgBAoetLxTBMfl5+TnaOTMaeStbtkRvpemxMLJ2FNYSuAwBQ6PrHoxdot7a0Fk+J2SFYI+g6AACFrn88sVhsbWVN0k7W49khWCPoOgAAha5/MKFQ+CzzWW9PL7mdEJ9Auu7s6MwwDL0X1gS6DgBAoesfRi6TW1pYkpbftb07Pj6uOfdcfV09+whYC+g6AACFrn+YyYlJGnIydb3qIiOB/oFketWJveLXEroOAECh6x8sKjKKRN3xvuPEhOrybprd42ENoesAABS6/sEUCkVfX9/U1BQ7DzoAXQcAoNB14AJ0HQCAQteXB8MwdbV1tTW17DysLnQdAIBC1z/SxMSEv6+/s6Nza2srmU1PSzf/z9zJ0QkHvK0JdB0AgELXP1J1VTXdK97B3oHMDvQP0Fkc8LYm0HUAAApd/0j19fU05Pft79ORQP9AMhvgF0BnYTWh6wAAFLr+8dLT0t3d3KteVtHZhvoGWvr+vn46AqsGXQcAoND1ZTPDzDg7OZOu0wu0w2pC1wEAKHR9ORXkF5Cu4wpvqw9dBwCg0PVl0Nra2t7eTm5orvCWl5tH74LVga4DAFDo+qcqLSmlX6uXFJeQ2aTEJHL7/r37OL/sakLXAQAodP1TPU15Srvu7+dPZoeHhy3MLTSZh9WBrgMAUOj6p2pubra4owp5bEwsHYmJjiGzTg5OCrmCjsBKQ9cBACh0fRl0veqqrqrWVHzw9aD5HdUa/IvyF3QEVhq6DgBAoesrIjIiknTdw82DnYcVhq4DAFDo+oro6+vLzsrG0W6rBl0HAKDQ9WUzNDTk4+Xj6+07MjLCDsFqQdcBACh0fdmkpaaZq3eMDwsJY4dgtaDrAAAUur5ssrOyadfnXfplcnKSvQUrBl0HgHeYmZnp7u4uyC9g5zkNXV82Uon0cdjjRy6PWltUV2QnFHJFclKylaVVc3MzHYEVgq4DwNvkcnlzU3NiQqKDvQNd7xoYGGDv4y50fWX5+viSnyTyv+w8rAx0HQDmycvNs7G2oTmnk5Ojk2a9i8PQ9ZVFFhXpz1NHewc7BCsAXQeA8bHx0pLSoMCgvt4+MlteVk4/fr08vJ7nPO/rUw1uBOj6imhsaKyprpmZmSG3PT08yQ9WUEAQvQtWAroOsMElJyXTipMp61kWGZmcmCwrLduAHwvo+vJ7WfmS/myRnzMyW1dXR2eFQiF9ACw7dB1go+nt7VUo5s7VXVJcQj5mbaxsIiMiN/guTej68svLzaMhf/jgIZkla+2uj1zJbGhIKH0ALDt0HWAjmGFmXnW+Sn2a6uzkTD5Utb8sn5iYaGxslMvk7PwGhq4vv/7+fjtbO/IzFxbKHsheXVVNS49v2VcIug7AYQzDtLe1JyUm3be/Tz9L6ZSels4+ArSg6ytieHi4ublZcwl2ssru6a76lt3Lw4t+6Q7LC10H4B5GwbS2tiYmJNrfs9fOucsDl4z0DKFQiI/TBaHrq6S9vZ3+RFa9rGKHYPmg6/DxmLGWrDA3m//9/cdNcxvnoNSaIXaBfPkwoqpoZ4u/bty2CcvOCvXwf9al+V5YPCWe+454dkbenunjGlk6xKhHNxpS65aWloT4hHt372laTiY3V7ec7JyBfu4fgP6J0PWVpVAoSGwYRvXrGRYSRn40nRyd8A3QskPX4eMwrwtcz+0S6PF27f/6wk9XLp7au32L3vaj/8S1LOdFm0bT/9q7xWD/Nz/f/C8o1vGooZl5gVQ1LqkJ+uXI9WiR+kGS6pCrh/94op6RZN3eyTvt3rRhPynI56Qm5x5uHs9zng++HmTvg/dB11eQXC738/UjP5ek6IyCIe/K4o4Fmc3LzWMfAcsEXYePIWnw+87EYPtpu8Tm2bM9K4ZfBv+xjyf44naOaLnWlmUV9w4K9tzOVS8qSEXdncKhafUdw1GXjPQvsV0XRV7V51+NojPiwa7O3tFl326gixRyRVNj07PMZ+y8Wlpqmqe7J/moHBoaYodgydD1FTQ5OalZ5HzV+YqMJCYkktu2NrbTYvp7DcsDXYcPxwzG/7KTt+vP5NfzAi6udDqhzztq/0KiFHcUp6YVtU+x9xDynoq0tMJWuhzAjLU8f+Ln6eEfnf6ie3YNXzHw8ll6RY+oOTs8KPpZWUl2kuvFbQKzX32TU4tbx0Yac9OyagYZ5XRnSZzl1wL+19bRTwvraktjLU/zeacto1MLWpSK/upn5P/HyBtTDFRlkVeTiRozI/w8fCKSizvGtd+vWFgSH+rtFZpQ0D421laYmte4Pq4nSVZ7GhsaY6Jj7GxUexmTSTvhdBsnfBx0fQUpFArH+470R7arq4uMTE5M+vn4bYQTGa4ydB0+3GjSr0YGZnfyJez8HEVX0Hd8wcF7FTJZjdMRwbbryXQtmhDnW+/lHblXLlUqxyv9f91nsO2zExcvnj2+U3/H8X8T21Rr2JKMv3YanrryvYm+3iZ94x/+9+uZY7t4+oZmJ05/ax7XWf2AbodnRKlW3x0w0d9scvjUt/+FhNp+s990s57pgZM//BurvR1ekvXPHv0T137aa7rnyzMnvjDlbzI6dIfdlqDoSf/vsDHf5PDpH344YrrryI/n9vGOOer0HjxymbyhviH6SbSttS39bKSTj5cPzvCxXND1lTU8PJySnFJZUcnOw8pA1+GDyaofHt1ieC6kb4E1Q2mx5WcG+pejRUp5s/sZvvGvMWQFW2Uy65+9/KOutXKluOTeId7OH/0b1evyjKjE6ZRg27mgToW665s37/01rF401tc1MK2U5pubCY44VquiL6tlu6561qLb4ed1fbPenqthbartAcxovuWXPME11XOY4bQ/9/L3/p0kVL0wM1TieHKb3hbd7DrJeX19/ZOoJ/Ny7uvjW1RYNDo6yj4OlgO6DlyArsMHk5bY7dts9FP0Qj80sirHQwL++TDSfMWr4O8Ntl2OHFCFfTT1xo6tp73aFKpW7+ObWdM+qw1HXzHmnw0WKlRd55lZ5mnu+uSu83Zb5M5uVRCn/m1C7mpWKsee/mFi+H+e5M2wxlP/2q5z6+vkt5LkfN71V/x8/YqLisdG8Qu7ItD1NTMzMzM5gUuzLw90HT6YvMXrFE9w2rtj7jAzDXH2P6YGxr+nqtbFmYHIS9sNzz/uYZihuN9MBBdCuhVk7TjioqHeZkOBwNhwdtLn8bfsu1smVXWdf8KzWbMz+yd3nf+Ve8Psq0me3d7BO+XaoFS0+vwfb/uN1LlddeR1rl/xda3rUqnU2tJaO+fjY+PsfbAy0PVV0t/X7+nuGegfSLc4dQu7vTy8vD29cV6FZYGuw4eTvXQ4JDD8MbznrbCLS+7u37L1XDAJOMGIEv/YZnAuoLMv6vJ2k59i1LvZjTy5bMQ/apdWXllRMTdV1gjHGXXXT/m0al7207t+wlN1U22u612BZ3mGlyLmDnKXvXA4uJbr6zKZrK6uLioyKiY6hh1Sy87KLikuQc5XDbq+Sp5EPaFLrNFPoslsW1sbna2uqqYPgE+BrsNHkLcG/GjM++L32M65rekk42OVj85s55n9k64p5vizWzuNv3N2vWC4+49kuru5rMrxGN/k1+j+2ccwAxkPbpsHlo99QNdHosnCwcUndCe4kSc/6/OuRKpnltJ1kvF7BwQ7b6TOHo8nqXL+mr8G36/LpLK6WlXObazYje1WllYSydu7I8IqQddXSUZ6Bv2J9/f1pyOhwaFk1tnRWS6f/Y2Fj4Wuw0eRtUX9sp+/effpO0HJeVWNdeXPol1/PWi8WfC1dbb2yd7E+Rb7+fpb9ff8l6U50L0j/KKJYPePHjltI+KpnmKfa2Y804th7H5zS+u6OPn6Nt7ntyKKaoWTZOaG0ZYDf0YU1wiX1nWlojP86g7e3vP2MbklhSmeNw4J9PW2HF+trrM5j5jLuWq6Y+7v519aUiqVaC8rwapC11fJtHia/AJ4eXjRA9mJgYEBepqanOwcOgIfDV2HjyXvy/e5fdbMWG8T+RHi620xPXzJNrJqVCvqKtIKh0NbDPZZlWithTJDJb6/HjblqZ+4ZevRa+6F6r3ml951pjfl9gEDvt7mfeb5ZCb1n/3kbRiYmS+x6+QFRiqDb5/dZ2og2HPoe6tw35s7VM9S37VSNDm3tmK/Nac5D/ALIDmfGJ9gHwdrB11fS8lJyeRXwtLCcr28YZ2FrsOnYaRjfR1NzV2vJz9065l0tLutpWNgXBXtj6EQD78emmCfrRCLXg8t9aWY0daKiuaBuf3mxmKvG+pfCV/BD5P6+vr5OfdHznUOur6WJNMSeuIaPx8/7ED3KdB12HgUwoCzRoZnvevUXw0oRC9cTm8XnPZt/dAlk3eRSqVTk3Nn2xsWDWtyXlZaNjGBnOsidH0NtDS3eHp4xkTHSCSSxsZGuuRLlnnZu+HDoeuwATEjJR7nzbbyt35+8ODBnQIDwwN/PG5YluvVkJzX1tRGhEdYW1qnPk1lR9VqqmuQcx2Hrq8Bd1d32vLnOc/JbFRkFLlta22Lky59NHQdNipxX01BZnJi6rOSFtGnrqlLJVKSbZpz+hlFJmdHZ/ZuWCfQ9TXwOPQx/YWJi40js5MTk3ft7pLZsJAw+gD4UOg6wEdjc/74jZxb3LEICggqLyufnMTps9YZdH0NDIuGA/wCvL28hwbZ6xdVVlTS36WXlS/pCHwQdB3gQ9Gchz8Ot7K0eiPngcj5+oau64qQ4BDyS2VnY4et8R8BXQf4UEmJSfNy/qL8hfZecrBOoes6YYaZGR8bv2t3997dex3tHewoLBm6DvBuIyMj8y610tbWZmFuERwYjJxzDLq+xhrqG0jO3d3cyXvu6OjAYaAfB10HWFB/X39Odo6HuwddLydpZ+9QKhmGmZpCzjkIXV9jgf6B9PctNiaWHYIPh64DaMwwM52dnWmpaQ+cH9CPFzrZ2djlPs9lHwTcha6vscyMTPorFxwUzA6pMQpGodCchxLeA10HkMvlTY1N8XHx9vfsNS0nk4O9Q1JiUktLCz5SNgh0fY3JZDKS9pjomMHBQXZIqRRPiQMDAhPiE9h5eB90HTa46CfRNtZa11/5z/zRw0cZ6RnCLiHOZbnRoOu6KD8vn/5mlhSXsEPwTug6bDTT03MnhidCglQH1JDJ29M7LzdvvX8GwqdA13VLb2/vy8qXMqksLCSM/IpamFu0t7ez98Hi0HXYIMiHG8m2t5e3rbWtXDZ3grnmpubSklL8CgCBruuQgYEBSwtLknM3VzeyMP7I5RG5fc/u3vDwMPsIWAS6Dhw2MzMj7BJmpGc8eqj6TNBMjY2N7CMAtKDrOuRV5yvNbyz5KwwNDtnZ2pHb7q7uUqn6cs2wCHQduIcs3NfV1sXHxTvYO2g+Gchkf9eeDJKoa6+vA2ig6zqE/JZ6uKkOM71/7z49iWNLc4v5HdVvckR4BHZ+eQd0HThjYmIiLzfP38/fwtxC03IyPXB6kPo0tbOzc4bBRwG8C7quWxQKRWtLq/bZaQryC+hvdU52DjsEb0HXgTMGBwc1Lbe4Y+Hn45f7PLe/r5+9G+B90PV1ICY6hv6S19fXs0PwJnQd1iNS6/y8/AC/gIz0DHZIzdfbNzYmtramVixelsupw8aCruuujo6Oh84Pfbx8uru7vTy9SNdtrG0G+gfYu0ELug7rxfj4+MvKl2Rh/b79fbq8TqZHDx+xdwN8MnRdd9FD3cgUHhZOikU/BZydnFGvt6HroMtkUllLc0vq01Q3Vzf6S62Z7t+7T1fN2YcCfDJ0XXfl5ebR33x3N3cy29XVZWWhukxyYUEhfQBooOugmypeVAQGBNLfXM1EZoMCgvLz8nt7e7E/LCw7dF13yWXy7KxssrIuFArpSGNjY9azLHobtKHroAtIpAcG3vimTPsa52QBPS01rbWlVSaTsXcDrAB0HbgAXYe1wjBMT09PYUEhWQS/a3eX9Fv7RFKk4rExsdVV1RMTuAQzrBJ0fd3o7emNeByRmZEpk7IL+x3tHZMTqsPcAV2H1aRQKLq6uvLz8kODQ21tbDVr5HR6WfmSfRzAWkDX1w0fbx/6qUE3xbc0t1hZWrk9cqNnsNng0HVYaXK5vLOj83nO86DAIBurN66cRqb79+5HRUSVlpT29/fjK3NYW+j6uvEk6gn9BAnwDyCzdbV1FndUp6Nyd3Wfmpyij9mw0HVYac3NzfQXUDM5OjhGP4kuLyvXvsgywJpD19cN8tnh5+Pn7OTc2tJKR2qqa2jaPdw8pqY2dNrRdVguEomEJDwzI9PX27estIwdVY+TX7cHzg/iYuMqKyqHRbgaE+godH19q66qZtPu7rGR19rRdfgUoyOjtTW1qU9TvTy96C8UnSLCI9hHqGF3FlgX0PX1amhwKCoiKjkpuaSkhF4bhqxJbJwFmnnQdfggcpn81atXBfkFpNwO99+4Whqd3FzdyC9XU2MT+wSA9QNdX68iwyPpB1D0k+ia6hp64gs7G7u21jb2ERsJug5LNzMzQw9I054sLSx9vHzIKntDfcMG/1YL1jt0fb3KSM+gn0dODk5k9lXnq3t375FZC3OL8rJy+piNA12Ht6lWyjvZlfKoyCh2VC3QP5D8sjjed4yMiCwsKOx61SWX41rmwBHo+no1OTlJVtmdnZyrq6rpyLBo+JHLIxp7stqxoS7SjK4DNTI8UlNdk5Kc4u3pbWluSX8dyGRjZcMwDPsgpXKgf2B0dJSdAeAWdJ1TpsXTQQFB9IMsIT6BHd0AFApFc1MT+V92HjYMmUzW2dmZn5cf/jhc+wppmkmzUo6zt8IGga5zxNTUVFBgEFlfJysrpOhk7aS3p5e9D4C7oiKj5oVc9U25t+qb8rraurFRbMKBDQdd54iiwiL6oWZnayeXyUVDIvYOnTBeFePq6uIyOz1yc3P3DYpMf9EtZh/wDvK2dC+PiDLR3DbUTyaeEi+4Ys+IqqKdLf66cdsmLDsr1MP/Wdfsw6YXeQasDplU1tnBrpQ72DtMT0+zd8z+5Ds6OEZFRJHbwi4hvimHDQ5d54ie7h7adfM75iPDI+yoGsn8Gl/dmekLO8/jG+459vVXp+h0/NB+Ex5fT3DcPPP1e4ItyfzbxPD/PNuWqauSmqBfjlyPXmixZzT9r71bDPZ/8/PN/4JiHY8ampkXSMmwpDbo6rHrUTgLySojy6bVVdUpySleHl4W5nPHlJNJc2omYmpqCvtVAGhD17mj6mVVSFDI2zvDJ8QnkI/CmOgYqVSVqTWg7rrglHuL9nqUVPj01hcC3kGHF+/+1nOZuz4cdclI/9JCXZdV3Dso2HM7V70NQSrq7hQOqdcKRdGX+UaXI9H1lSaZlrS0tORk54SGhNKDO+ZNTo5OUZFRxUXF2OUN4B3Qdc4iq+l1dXVDg0MhwSH0Y9HVxXXexaFXyYJdJyGttD/K410I6WXX2BUjTc9jw7w9g6LTXnRpDiBmu94yUPMsyt8/JCG/STT7Moq+yvS0ojbNOcAUPRUZqcUddOO+XNSQHRPq5eblH/msZoAu0Ux3lsRZfi3gf20d/bSwdVw9psaMNecluV7cJjD71Tc5tbh1bKQxNy2rZpARd5bEWp/gCU5Yxj4t0H4GfDpGwSjkc8trZMFUk3A6WVla+fr4pqWmkZ/k8TH86wMsCbrOWYEBqiN0Le5YdLR35D7PpWfHtLayrnhRwT5i1SzcdYUw/OKOLSZ/pao+r5mBHMdvTAWGZie/P3f20DZDo0N/RTSoA63qusDs6zN7tx04ff7C17uN+aYXvKvU2Ren39hq+I1vh+Zb8OTrxvzvgrsVSnlrxE+7DE32f3fpp5/O7Nu+ZevpB6UTSkaUavXdARP9zSaHT317J1bIPo1QtEX9debYLp6+odmJ09+ax3VWP1Bvh58WpVmfOWyip2+y/8zZf+OE+JL9U5FV7braOpJqP18/a0trzVGaxNDQEPkRdXnoEhMdU1pS2tPTgwMcAD4Cus5NM8yM5pifiMeqc1x3dnRqzpcZGR4pnlrCLmvLRd11g8+vPgoKCg1WTcF+bo63vtsv0Ntxxr2avBFGlPbnLoHppZBG9bq3YiD/3tfG+l+51sho1/W37LoW1qx6x4yo3PGkieC0r2oZYdGuy17cO8I/aF9G/45TlQ7Htu+7k6OeW3w7vDTf3ExwxLFa9bWArJZ2XbWaj+3wn0QqlZIly/y8fPJz+PYZW1OSU9jHqa3qjyUAR6HrnEXWeOhHZ3FRMR2ZnJgMDQmlg473Hdvb2un4iqP7zfENd+003bNz23Z9Pb7e5l3Hf7kb8KydbkMfT7lpwvvKWZ1Uauzpn9tVI+z6+iH7Ss19ouhrAt5Zn453dF3e6Pp/fINjN30zXnZPvrnKh66vAqlEWvGiIiE+wd3NXfsyKnSyNLf08vRKTkquelk1bx9PAPh06DqXjYyM9PX1sTNqcpm8pLjEylJ1Mnkypaelr8bJOt7cDq8YrY34fZ/h1mN3swfpN+uKDp+zfMEfyVrravIG9695pn9l0K4bXQxnH0pIi2w+5+23KnrXdnhmpNzz8kFDsgCxyWjnl5f+cUmoZg+UQ9dXwtTUFPlhY2fIfxixWFNxOjk7OkdFqo5DU52xVfbmfhYAsKzQ9Y1CIVf4+/mTVaUnUU8GBgbIipTmA7e+vp590Ap5+/t1cbXHaUMD0wuhzapyKoQB5/j6v8RqHa0kq3rwJe/zO7ls18+H9s11vcDajHfUvkLddWPDMz6ark/EXTWiXVdTjHWUpYS43vnp1E6evtEZ30bVH4+uLwuFQtHT3UOWEaOfRD90fkh+kMjPFXufmrend2BAYGZGZmND48TEBDsKACsPXd8oOto7aMjJVFdXJ5fLyco6e1jwHfPB14Ps41bCQvvNSRs8zxrqbTvr00w6SlL9GW/vn6nDs/FWdAX+KND/MUg4u98ce/gZIW9yPc03/iNxlNyVdWu74KhzLfuy8jqXowJV12WjZYFW//qUzu5TL292P8Pn/xSlivlI9GUj/sUnC5zmZrGuj8Rc4RteilzOE+OsT2NjY+wubz6qXd40P050euD8gH0cAKwpdH2jICEnq+b0I1hzVemB/oEA/4DEhEQ6u1IW6jqpcs2jE1s3G18MbpMrZfWep7fxP/8toEg4NjnYmOLwzXaB2Y3UAdJS9X5zevyjf0dW942Jmp7e+z8Tw0O2Jaov5hXCoB+MeZ/fDK8Q9rSUxdl9v5Ov7rpCXud62sDwtE1qy4hUPiksdv1hp/7RR6q98JTi5OvbeJ/fiiiqFWqOj6MW67r46W+Ggn03o4pq5j9jYygtKY0Mj3S876hJuGa6Z3cvNCT0ec7z1tZW7XPAAcAaQtc3kOHh4dznudVV1TMzb1zqTfu8mwq5oqG+gZ1ZLgt3XamceuF8jKe//VJUl0KpeF3u99tx1UnoNvH19D//5r/oBnq8sno7/Dl7n+v7TbZs4m/m7z17N/OVqrcqYxV+Vw9s55Gn6Jl88ZNnqOVxAd0OL2l58veJbfTVNgl2nrgdRY+aUzK9KbcPGPD1Nu8zz1cPaCzWdaYv5dZhwSb+FjOr/Nk/l6PID8bQ0BCJNDuv5ufrpwm5apc3D3aXNx07VzEAsND1jWtqcsrf1//+vfs52TnskFKZl5tHPr4DAwJfD6zFvyEjHupq63o9OW8BgJKKuruHxG9vDpeP93V09M/b711FNtbb1tTS3j8x7+UU4uHXQxOqfi+VQiwaHBr/kGesG2NjY2RJ7lnms6DAIDtbO/Jf39baVvsiv/l5+WR9vSC/4FXnK1wSDUD3oesb18vKl5r1sM7OTjoY/jicjliYWzxNeToxjj2euEY8JW5paXme8/xx2GMH+/kHlJPJ2tJ6WITdBAHWK3R94xoZHrG1saUf5ULh3MnX6urqnByd6LiVpVVyUvLoCE7HzQXFRcV03/V5k8UdC3c394T4hPKy8t7eXpzlDWBdQ9c3tG5h97PMZ3W1dey8UtnX25eZkUlGsrOy7WxUW2XJZGluGR8Xj+9T1wvVQWg9PWWlZeS/mvbubAX5BZqWuzxwiX4SXVRY9OrVK5kUW9cBuANdhzlyuVxz/FJLcwtJQu7zXM2VtchaHT32nX20LmEYprenh/wvO7/BzMzMkJ/5l5UvU5JTfLx8rCzY8w6RSfusgv39/eQ/aFtr27QY+64DcBa6DnOmpqZsrdkt85qD36RSaWFBofYXsRGPI6YmNddb0wkjw8N8vU0b6jrcIyMjdbV1GekZgf6Bmu9TtKe7dneDg4I1e04AwAaBrsMbSCocHRydnZx7e3rpiFymOoNNQnxCdlY2PQL+gdMDRqFba8YbrevVVdXaCaeTtZW1n69fWmpaTXWNSIQvTQA2KHQd3iPrWRbNhpOj08T4RGVFpfb38URRYdGaf/XOya5LJJKO9o6C/ILIiEiyREVqzd6hVJKfcPJfxNLc0tPDMykxqeJFRX9//4b9GgIAtKHr8B70iHY6ae8YL5PJFHLV/ln0Ln8//6qXVWt1fDM3ui6Xy4VCYUlxSWxMrKuLq/kd9p+dTqlPU9nHqb9QJ4/UPqEQAACFrsN7zDAzT1Oeent6k8CzQ0pldlY2KY2drR0ZvH+PvdC7asTGjqw+ktizj1st67frkmkJCXlCfAJZ8ybr35p/Sc1E1tTJ+np+Xr7mmxEAgHdA1+FjaFru4ebBKJiG+oawkDDtK22TcZL8wcGVvJyMlnXRdbKENDQ0VFdbp70v2/T0tOYfjU72d+1DQ0LJklNTU9PkxIY8JT0AfAJ0HT5GRnoGjVBsTCw7pFSmJKfYWtvOu9LXI5dHmRmZ3cJu9kErQze7TtbFX3W+Ki0pJavjPl4+NlY29N9k/iVNvbyDg4LJPylJvvZVzAEAPgK6Dh+ptbW1oaFBc26yyclJTcvJ6mZ8XLzmwHcykXXQeRebWV460vWBgYHamlqyqh0eFu7sxF49b95kaW4Z8TiCfQIAwHJD12F5yGVyDzcPmq5nmc/ICMMw7a3t9Pqed23vaq+JRj+JJquwxUXF7W3tZIGAHf0Eq991uVz+euB1X28fO6+24HHk9+/dDw4MTk9Lr3pZ1d/Xj7O0AsCKQtdh2UyLp/Pz8ktLSjVHt/f29mryRsJGB6cmpzSDdLK/Zx8UEPQ05WnFi4puYbdU+sGXQ13RrpO/ztDgUFNTU1FhUXJSclBgkOo4fvXO6v6+/uyD1Hy8Ved6I8s3MdExBfkFra2tExO4cA4ArCp0HVaQRCLx9fGl8dYc9S4UChfc8Vt7umd375HLo9znufQpVH19fUdHB/mPOzo6OjkxKZmWaA70+vSuzztHOllAiY+LDw0OffjgoYX53P6A8yayLs4+QY28KxxEDgBrC12HlaWQKxobG7WP0erpZg95J1NaalpnZydZxU9MSNScwlYzaR+xLZfJ592rmcgq8p3/7pCu29nYuTxwSUlOYZ+jFhwY7O3p7ePlQ1amfb19vTy83F3dHz189MDpgeN9R/u79uRZZDnDz9ePfYIaecy8P4WsoDs7OZNXI6vsxUXFzU3NQ0NDunbePQAAdB3WQEF+wV3bu57unuNj43Skq6tLU9CoyKjKikrymPb29oz0DBLg6CfR5L+pleXc5UzmTbf+ukW6/s/f/5DbkeGR9DWpu3Z3tR+52EQWCNgnqD1NeRoeFk6WNsjbaGhoeD3wGieBAYB1AV0HnTA9Pe3t5U0T29zcTAdJTTXdpV/PSySSYdFwUWFRcFBwcmJyW0sbWW+ur6svLCgkXc/JziF3NTY20qdTJMzZWdlkynqWRSbymLzcPPL4kuKS8rJysgBRXVXdUN+w0kfiAQCsDnQddIVCoWhpbhkaHGLnlUrxlPi+PXsCHFJiOjgxPqG5DinpNB2k368nJSZpn0QdAGADQtdBp01OTpI1bLJWPcOwh7+LxWJLC3a3O81X6QP9A5rt8GTVnA4SZOWerK8j9gCwcaDrsP709vSGhYYlxCeQFXo68nrgtabrURFRdJBhGJeHLnQJQHsXPLLGX1ZahtgDACeh68AFdDu8s6NzoH/g4Gv2pPQzMzOak9oGBgTSQcLXmz30jiwZsEPqzQAVLypeVr7UbBgAAFiP0HXggsWOX29rbXN1cfXy9BJ2CdkhpdLB3oF23d3VnR1SKh+HPaaD2rvTSyVSUvrysnKcJA4A1gt0Hbjgg85L097e7vLQ5dHDR01NTeyQUunm6ka7TqrPDimVcbFxdFD7vHJymbzqZVVxUfFaXWweAOAd0HXggk8/3xxZoXd75Obs6EyazQ4plQH+AbTrVpZWmu3z6WnpdNDVxZWOEGSFnqzZ5z7P1XzlDwCwJtB14IJP7/qC+vv7vb28nRydykrL2CGlMjIiknbd/I75tHiaDpKi08H79+5rTknLKJi83LzYmNje3rnT7QEArCh0Hbhghbq+IJFIFOgf6HjfkTSbHVIqk5OS2dj/Zz48PEwHiwqL6IiFuQV5Fh0ksU99murn60fW7+kIAMAyQteBC1az6wuamJiIioh64PwgJzuHHVKf6k4Te80Z8iteVGgG29ra6CDDMNFPosmyAkn+vCvQKOTYZQ8APgC6Dlyw5l1fkEKhyH2eG+AfQFbc2SGlsrGhUdP1jo4OOlhfV68ZJOGngyT2oSGhFncsyCuMjozSQWpYNPwRV7MFgI0AXQcu0M2uL6apqSkuNk57O3x/f7+1FXuofUtzCx3s7OjUxF5zEr0ZZsbf15+MODo4dr3qooOETCbraO/QbO0HgA0LXQcuWF9dXxBJck52TnMTe80bYmpy6oHzA9p1zcVshgaH6AiZ4uPi6SCJPT3ZjqWFZWVFJR0kpqamql5WaRYUAGAjQNeBCzjQ9QVJpVIS5p6eHnZevdtdSHAI7XpDfQMdlEqkdIRMIUEhdJDw8/Gjg2mpaeyQUkn+lfJy8wryC3DlWQBOQteBC7ja9cUIhcLxcfbS9VTWsywLcwsrS6v6+np2SOva857unuyQUhnoH0gHgwKC2CGlcnBw8EnUk7DQsIH+AXYIANYndB24YKN1fUGTk5PzVsFJ4x3sHZwcnBoa2DV7wsPNg3bdydGJHVIqQ4ND6eBd27vskPr6Oi4PXWysbbT3+yMk0xIckQ+gs9B14AJ0feleD7wma+o+3j7a3+VHP4mmXXe476A5GT5Zg6eDZCILDXTw1atXFuYWZCQoMEj7kDwyXlxUPDE+wc4DwBpB14EL0PVPRFb0c5/nJiUmdXd3s0NKZVlpGY26s6OzJuFpqWl0kEyaq+m0tbXRETtbu6HBITpIkBX9sNAwnIEHYDWh68AF6PoKEQqFFS8qxkbn/mFJ+J0cnEjCXV1c5TJ2s39pSSntOpnq69gv+Nta2diTSftq9zHRMdZW1qHBodovyygY8mdNTGB1H+BToevABej6apJKpQP9A9oXqheLxbExsTZWNuFh4ZrN+J2dc8ffa66mQ+KtGXyW+YwOMgzj5elFRmytbbX3++sWdsfHxZOHSSVvnIRHJpPhtDwAi0HXgQvQdd3U1NRESp/7PHdmhl0ImJqacrjPXv++uqqaDpIVdzpCpqjIKDpI1uAdHRzpYMTjCDpIlBSXWFla2drYvih/wQ6pj/Qjs4UFheg9ALoOXICuryOSaUlZaVlHO3sOXSohPoH0mwRbe2+++/fu065rH5T/wIk9V4/2lfKTEpPo4KOHj9ghpXJoaMjH2+e+/f3iomJ2SG3w9WBtTe288/ADcAa6DlyArnPAyMjIvNYODg6S1ffYmFjtffEy0jNowqOfRLNDSuXj0Md00MrCSvMidFmBTpoT7JPlCTri5Oik2XufYZjkpGRXF9fsrGzNTgPE2OhYZUUlWT5g5wHWA3QduABd31B6eno6OztJjNl59RJAcFAwWVkvLytnh7T257e/a//2lfLJ1NLCnmG3rrZOM6j5dmBqauqurerEPpYWlo0N7Hl8CfIKDvYOQQFBZL2fHVKfxa8gv4AsBGi/K0IikYjFYnYGYFWg68AF6DosqLOjs6iwSHuFm6y4B/gHkFoH+gdqdvHr6uoyv8N2vbWllQ52d3fTETKlp6XTwYmJCc0jY6Jj6ODMzIynh+e8QYIsZFhZWFlbWpPks0NK5fjYeHJSckR4hObSvdRA/0BtTa321gJKs0QCsEToOnABug4f5O3d63p7e9NS07T3xmcUTOrTVEtzSzdXt+HhYTo4w8w4OaoO8yOT9rX27e/Z00FvT292SKl0e+RGB21tbDV7DsbFxtFBknzN+QE1V+91cnCanmZDLp4Su7u6k8GwkLCpqSk6SBQWFPr5+JE/XftrC5FIRJYeWlvZhRKNYdHw5AR7TiHYINB14AJ0HVYIqTt7axZZJigpLqmvq1fI2dV9ggTVy8PLx8uns6OTHaLb/NUr95qd/AnNLn4k9pp18fS0dDpIpq4u9vK7mu8RyKT5dkD7QEHN+X3JW7pvz+5jmJ+XTweJ+Lh4MmJtZa197AB5qx7uHmTSfA1BkHeSl5v3NOXp21f6b2psevsfQbOpA3QQug5cgK6Dbhp8PUhKrFlZJ8hK9vOc59FPotva2tgh9f4Bvt6+FncsIsIjNA/u7++/Z3ePhNnSwpL0lQ6SR9J+k0mzM4Hq24HZQc0XAQzDWFlY0UEvTy86SNBtAGRyuL/AAQV2NnaaXQSam5rpoMsDF4lEQgenp6dVFwW+Y/4k6olm0wJBFh3CQsLIEo928skbq6yo1L4gIUUWRHA5wZWDrgMXoOvAATLZ/EPvSDjr6urmnYavu1t1up7SklLt1WjSeGdHZ39ff+1PP7L+TTcYaF+5Jyoyitbaz8ePHVIqY2Ni6SBZuddsRVjwnMHaWxFqa2rpYE93j2aQvDE6KJfJ6akJyUSeRQeJZ5nPyJKKg72D9t6Ifb195I2RZQWy4MIOqTeWtDS3VL2smrd5YIaZ0f5iAuZB14EL0HWABZEV/fGxNy7pS1bHX1a+JCvW2t+7T4xPJCYkBvgHaFJNDPQP0Kv/hQSHaE4vSFa+baxtyKCFuYVmh8T+/n7abzJpuq59uiHNVgRSaBJ1OkjW++kgQU84SCbtMxBkZ2XTQXdXd3aILAH09ZFlAjJIXlOzZENukMUF8v4rKyq1z4QoGhKRRQqRSMTOzyLLLvOOXOASdB24AF0HWAmkkW8fp0e3rmv2JaTaWtsiwyNzn+dq73ZAZslCgOsjV7I6zg4plRHhEbTWmhMJE/QgBTK5ubqxQ0rl4zD2tATmd8w1byMlOYUd/M9cc0wBWVLRDJJ3QgdHR0atLFXfRJAlCe09CqMiVFssHrk80r7KEXkW+eOSk5LnbR2RSqRVL6vWV1PQdeACdB1AN2nvW6DR0dGh3VSCZDszI5Nk9fXA3Kc3eYynh6edjZ32oQfNTc3WVtYkzI73HTVHBNTX1dOok4m8OB3UvvIQWcKgg9o7KMTHxdNBuVxOd2Ugk/ZJiwl/X38yaGtjS09XsOBfR9eg68AF6DrAxjE+Nt7U1DTvYMWG+oboJ9HaFwWWy+Rk5d7KwsrXx1fzpQOjYNzd2D0HNUcZEJqTFkdGRLJD6u8sNKcroNckbG5uLiwq0uxFqJvQdeACdB0AlmiGmSF51t5BjxgZGUlKTMpIz5i3O0JBfoHFHYtA/0C6GNHY2Ojr5xccElJXV6ez39Cj68AF6DoArBDtHfFo1+kUHRPz9iF8ugBdBy5A1wFgFWh3nU4ZmZm69smDrgMXoOsAsAre7jqZ/AMCSsvK3j79wFr54K7n5OQEBgXp8kT+lQMCA+cNYuL25OHpQbru6eU1bxwTJkyYlnEiCdcuuvYU9vixjqxafHDXx8fHh3Qb+advaWlhZ2BjaGtrI13v6Ohg5wEAVsCLFy/m5ZxOiUlJ83bEW0PYDg9cgO3wALAK3t4OHx4R0ap1qn9dgK4DF6DrALAKtLseEBhIVt918AI26DpwAboOAKtA0/WsrKx5Z5zVHeg6cAG6DgCrgHQ9Ni6ur2/udPc6CF0HLkDXAWAViKendf8U8eg6cAG6DgBAoevABeg6AACFrgMXoOsAABS6DlyArgMAUOg6cAG6DgBAoevABeg6wCqRt6R7PXJ1cVlgcosoFy39kuTyjgwfj/CSoXc+Q96e4ecWXvbuB30Y8ZRYofp/eXumj2tk6XK+tI5A14EL0HWAVSLJvXv0sy8+M1NNu7YK9DYZ71DfJtMXVx93qpO5JJKc/5kannRrfufp2iRZ/+zhn/BsWqZzukmqQ64e/uOJSH076/ZO3mn35XppHYKuAxeg6wCrT1bleIzH/y1+ip3/QOLBrs6e0fdc3FQ8KHz/g5ZMFHlVn381St119Z/fu2wvrUPQdeACdB1g9S3QdUV/VWb6yx5RS1Z4SFR2w6BUNTjVVZ7yOMDd1cMrKD63ZZRdpVcMVGWm5beMM6pbWekVPTJRY2aEn4dPRHJxBxmdfVBWqvpB6sdnVHTLRA3PIny8fB4/LW6ffZSKWFiSGOzlG5JQ2DY23lqYnts4Mm8Lu7izNNbyNJ932jI6taCFvNXqZ+T/x+hLq96AVFSfHubr7vk4tWaIvEm5qDk3LsTbLzq7WfsPYsZanj/x8/Twj05/0S1mB3UKug5cgK4DrL4Fui7J+Geb4JuL3+3cvIm/2fjXmNfTzY9/+5y/dc+xHy9d/PGrPcabeftuJPSq0j63HV61sV3/xLWf9pru+fLMiS9M+ZuMDt3JUX9Vr7UdXvV4o5M/XfvcZO/Rk6f3mwj0+F+aZ6kfpehNu33MkGe6//SFs4fNth++/N3nhkccq99cFWdEqbbf7DfdrGd64OQP/8Zqb4dXv4Gvr14y27bz0Jmv927fzP/yTljojYN7D5w8+5WZ8WaD4/eKJtUvMl7p/+s+g22fnbh48ezxnfo7jv+b2KZza/zoOnABug6w+hbpup7Bnl8i6kRjvV2vJ0VJ100Eh6yLR+kK79RLx6MC/g+hPWT2za5v1ttzNaxNtfrLjOZbfskTXItWbS2f13X9zabXHrdOqx9VYHlQYPhTrIghwb61m7f/ZqJQVVhmqNjhjNEmwVtdV9HeDj+v65s37/s9Tv0Kk4WW+wz0eEctnqvW2hlRxt+7Bbv+fS4ha/wl9w7xdv7o36j+GzOiEqdTgm3ngj5gp4JVga4DF6DrAKtvka5v2Weep97+TsjHuhtq22b3kpePtgT8aML/2qOR7fRc13m7LXJJOdXEqX+bkOI2k5vzui7Ycztv9lHTaTd2kLua5ePJv5vyT/m0auo6lnZj2wd3nWdmlc++6/HYq0b8o6615B5C0eX/rUD/58QppTTffB/fzLpg9i+nVA5HXzHmnw0W6lbY0XXgAnQdYPUt0nVVkmkSVSQ9ZeGO/1797vRhs50GW4wE+gb8r9zf7joZbJh9kuTZ7R28U64Nqpvzuv71o7lHZd3arXopSZvXKcHWP9LmvuqW17scN/rQrrPvSmU68RcjdqMCwQyEnTfU/zlhkhmKuGiot9lQIDA2nJ30efwt++6WzZVeF6DrwAXoOsDqW6zr33jNrjsrepJ+3yMwOnjN2i0sLqO4rkeU8sf2hbuudTDb4l2nj6c0XRcGnBXwL0bNHYguq7h34O3v11Xe1fW5N8B2vXde15UjTy6T9Xi7tPLKioq5qbJGqL1bnQ5A14EL0HWA1fferjNDkVf0eSdc2C3aZE262f2EgN3EvVxdl8vK7x7hm/6VMru1X1L18Chv4e3wI09+1uddiWR3yfvQrqv+vnyTX6P7ZzPODGQ8uG0eWK7ap16HoOvABeg6wOp7//r6eOrf2zfvuuhbR8qnGG1JvXd2mx6fd+B+OUnusnVdqeiMvLxdsOe8Y/Tz0oIUn98OGm/eJPhyoa6Lk28YbTnwZ0RxjfAjuq5UdIRfNBHs/tEjp21EPNVT7HPNjGd6MQz7za04dH0DQtcBVt/7u66UdSTdOWPK42/hbdU32Hvmv8hc/6uGgivh/dqd/tSuk/aOVIT+79tDJvrGOw/+aP7Y//ft2o+cw/Sm/rPfWG+TgZn5x3SdzAyV+P562JS3iXzg8LdsPXrNvXBQt1bWCXQduABdB9BdsrG+zvbesbdXn5cHM9pS+aL59dx+c2Px1wRbL4YvElyFWPR6aPyT3ox0tLutpWPg015k5aDrwAXoOsCGpegKOKcv+MGzTr1GrRgudzm71eCsd8vbq+sbBLoOXICuA2xczEip2/l9BrwdZgeO7jM13iI4cj2sQSfP8Lo60HXgAnQdYIOb7q0pzEhKSsksbRFt2DV1Cl0HLkDXAQAodB24AF0HAKDQdeACdB0AgELXgQvQdQAACl0HLkDXAQAodB24AF0HAKDQdeACdB0AgELXgQvQdQAACl0HLkDXAQAodB24AF0HAKDQdeACdB0AgELXgQvQdQAACl0HLkDXAQAodB24AF0HAKDQdeACdB0AgELXgQvQdQAACl0HLkDXAQAodB24AF0HAKDQdeACdB0AgELXgQvQdQAACl0HLkDXAQAodB24AF0HAKDQdeACdB0AgELXgQvQdQAACl0HLkDXAQAodB24AF0HAKA42PXSsrKJiQl2BjYGdB0AgOJg12EDQtcBACh0HbgAXQcAoNB14AJ0HQCAQteBC9B1AAAKXQcuQNcBACh0HbgAXQcAoNB14AJ0HQCAQteBC9B1AAAKXQcuQNcBACh0HbgAXQcAoNB14AK5TFZaUiKXy9l5AICNCl0HAADgDnQdAACAO9B1AAAA7kDXAQAAuANdBwAA4A50HQAAgDvQdQAAAO5A1wEAALgDXQcAAOAOdB0AAIA70HUAAADuQNcBAAC4A10HAADgDnQdAACAO9B1AAAA7kDXAQAAuANdBwAA4A50HQAAgDvQdQAAAO5A1wEAALgDXQcAAOAOdB0AAIA70HUAAADuQNcBAAC4Qqn8/1/a1csWTWMXAAAAAElFTkSuQmCC)\n\nUndefitting (az öğrenme), Optimum öğrenme, ve Overfitting (aşırı öğrenme)", "_____no_output_____" ], [ "![overfit.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAssAAAEsCAIAAAC6w2krAAAAAXNSR0IArs4c6QAAAARnQU1BAACxjwv8YQUAAAAJcEhZcwAADsMAAA7DAcdvqGQAAIWkSURBVHhe7b2HVxRb9v79/jXzm5k7850rSs4ZiQoCYg6YEQQURQRRQMCEAiomVIIBBUSCqAgiiJKDRMk5iZIzzLu9dW5P3+pAQ1d3Vzf7s57Foqprd1dX19nnOVWnzvn//osgCIIgCMI06DAQBEEQBGEedBgIgiAIgjAPOgwEQRAEQZgHHQaCIAiCIMyDDgNBEARBEOZBh4EgCIIgCPOgw0AQBEEQhHnQYSAIgiAIwjzoMBAEQRAEYR50GAiCIAiCMA86DARBEARBmAcdBoIgCIIgzIMOA0EQBEEQ5kGHgSAIgiAI86DDQBAEQRCEedBhIAiCIAjCPOgwEARBEARhHnQYCIIgCIIwDzoMBEEQBEGYBx0GgiAIgiDMgw4DQRAEQRDmQYeBIAiCIAjzoMNAEARBEIR50GEgCIIgCMI86DAQBEEQBGEedBgIgiAIgjAPOgwEQRAEQZgHHQaCIAiCIMyDDgNBEARBEOZBh4EgCIIgCPOgw0AQBEEQhHnQYSAIgiAIwjzoMBAEQRAEYR50GAiCIAiCMA86DARBEARBmAcdBoIgCIIgzIMOA0EQBEEQ5kGHgSAIgiAI86DDQBAEQRCEedBhIAiCIAjCPOgwEARZmquXL/n5nC788oUsIwiCLAU6DARBlsZyvdm63//z7OkTsowgCLIU6DAQBFkadBiIgjH+tXq0uJgsIJIBHQaCIEuDDgNRJPqfJRT99u9SNY2Zvj6yCpEA6DAQ+aam/euByD2lTSVkGZEM6DAQxWBhdrb5tE/h3/4OKtfVn6irIy8gEgAdBiKvDI0OhTwP1PfWBO0K27qwsEBeQCQAOgxEAZgfH6/btYeyF/DP7I8f5AVEMqDDQOSP+fm5hLwnlv4mlL3YeXVLaSPeT5Us6DAQeWd2aKjabhNlL9pDLy5im0TyoMNA5IySxiKwFJS3sPA3BqsxNz9HXkMkBjoMRK6ZHxuvNDGj7EXvw0dkLSJh0GEg8sTM7LTdBWvKXgQnBA6NficvIBIGHQYi73Rculz0z399T00ly4jkQYeByBlvyl4fiNxT0/6VLCNSAR0GogBM1DeQ/xCpgA4DkTMWFxexU6f0QYeBIMhyQYeBsJfWvpaOwXaygMgUdBgIgiwXdBgIGxmbGotMvW7oo+N+14WsQmQKOgxEvphqbZ2oqycLiIxAh4Gwi8XFxYySNNsgS6o7p0PIxoHhAfIaIjvQYSByxHR3d7mufsk6lbHycrIKkQXoMBAWUdtRc+iGM+UtTM7o3Xtze2pmiryGyBR0GIi8MDs4WGFsWvi3vxf9+z8jhYVkLSIL0GEgbCH8VRjlLUA+j7y6h7rICwgLQIeByAVzw8NVlla/7MU///XzQy5Zi8gIdBgIW4jPiQFvsePK5sKGz2QVwhrQYSDsZ358vNrO/tewWn//59DrTLIWkR3oMBC2MDs3+/JzEg7QyU7QYSAsZ2F6unbbdmrUzoEXiWQtIlPQYSAIsjToMBCW0//kKWUveh/FkFWIrEGHgUibxcXF1MKXN9Kuk2VEHkCHgbCf7ltRXRGRZAFhAegwEKnS0tfsEnWQ6s5Z0VxG1iKsBx0GgiDLZdU5jMnJyeKiwvdZWfkfP3a043iR0mNmdvrumygjHx3KXvjFnRZ/oAvOr5n3MbexsZGsRSQAOgwhzM7ODg8Pz81hFyJFgPyas7NkGRGDVeQwent6wq5c0dPSNDbQt7fdaG1hrrpW6fDBA/l5eYuLi2QjRDKUNBZtveRAeQuHkI35tXnkhZUC7vBicLCmqoq68jotdTUtNVUVpTUOdrZpr17NzMyQjRDmQIfBCxjcZ0+e2Fpbw5GhtGv7tjeZmWg15JGxsbHHcbEWZqacX9PGwiLh2VP4lckWyPJZLQ7jc8EnDRVl8BNv374d/P6dUn19/aXQUH1tLR/vUziZluRIL06lvIXhae0badcnpyfICyvldUY6+AnwFpxcwBH8ynYbbH7+/Ek2RRgCHQaND9nZ0FyBtkpcbGxtbW1La2tlVdWNyIj1xsYWpibfGnAOT4kzOzQ08e0bWRCPlORkdWVlaLFwJxMQrAFlZmSQ7ZBlsiocRklxMbRxH8fHcbwFt8BnWJmvP3fWD69kSIjRyZGNgRYHIvY0dDEwTQDYC9W1SrREwC015XVoMhgHHQY3+Xl5UPE8ffKElkxAff390G4x0tdraW4mWyMSYGF6usZhc/GatcN54l4QfZmUJDylwKuQdsjWyHJQfIcxMjKiraH+IDqalgi4VVNTs97Y6El8PIlZTUAevBER7u/n63fG5+rlS2WlpeQFRmnvb5tfmCcLYgBNQ+G5gBKYjH17dpMYhAnQYXCoq6v7ZS+e8rEXHAVfCDI1NBgeHiYxqwlIuR+ys1+lvMxIT4N8IomWG7xn4zF36tnUwZRXZO2KgF0VJaXANgWfPpEYRGQU32HExcRs3exIK/+8ehwft9HaalVdxij88uWAs7PaurXHXI9eDA25fOniKS8vHQ31zfab0l6JVWglx9kzZ0RJByAVpTXYiGQQdBgcznh7+/qcpiUQXm1z2hzz6CGJWR001NefP3tWQ1XFxsJi57ZtkHj1tLQ2WFnGx8aOjY2RjZig82oYZS+6wiPIqpViv3EDd94Qou1bnEgMIjKK7zDANwi6P8Ktru5uKAxQ6ZIwRSczI0NDRTnsypWGb9+4j0N7R0f0/XsGujo3IsLJpsuhvb/t2B0XCU0pAm0j8EO0Yi9IsGXQ+XMkEhEbdBgUP3/+hIJTWFTIXWr46snjx1C5rp5Gy8ukJDgyp7xOfPr0iXMQunt6IP06brKzsbTo7+8jm4rHQGISZS+aPI+TVSulqrJSxBYLCBot2L1muSi4w+hob1de8zu4B84ZL0Q+p05duXSRRCo0lL1ITX1FOwIcFRcXG+rqRIYvw2RAJn2R/8zUV1/fW/No1CGyllGy3r7l7YolRPAVSCQiNugwKJISXzg5ONDKC19B5aqjoV79tYpEKjRgL7TUVN+8yaQdBEr9AwMnTxwHk9HXJ67JGPlSWPTPf4G9qHHasiD2U2NBAefBN3AnDSECL3L18mUSiYiGgjsMKN762lq0012QLl+66HfGh0QqLmNjY9rqaklJibSvTxOYDHUV5bq6OhImlP6ffR73XKkHRuwuWH+pLyAvMEri8wTIYrRiL0Rq69aSSERs0GFQ3ImKOu7hQSssgmS3weZDdjaJVFzq6+ugxSLIXlACk+Hl6blr+zYSsyJmv38vWacC9qLCyGT2xw+yVgxcDh3kzhhLysvTg0QioqHgDqOuthaaEbRzXZBCg4MD/P1JpOLyJD7ecZMd7bvz1SmvE+fO+pEwwbyveGfpb0LZC//4M8MTkurdRrWTaGVeiDRUVUgkIjboMChu3Yg8eeIEraQIkoOdbda7dyRScTnn53fKy4v23XnV0tqqqapS/fUrCVsRPffulyirTra0kGXxOLjPmTtjLCl3N1cSiYiGgjuMwYEBOC0aGhpo5zpfHT1y+NaNGyRScbGzsY6NjaF9d74qKCiASnpkZIRE8iP0eRDlLSzPmb4rf0PWSoZP+fnQVOIu8MJlaWZGIhGxQYdB8Tguznn3blpJ4auBwUEjPd3iokISqaAMDw9Dlvj8uYD29fnKx9vbz+c0iVwpjFy9oPDxPsWdMZZU0PnzJBIRDcXv6XnA2Tn82jXaic6rhm/f1NatbW1tJWEKyuTkJJST+vp62tcXJEiRFeXCZg959D4a7MWxOy4Dw/1klcSYm5sz0NHmLvBCBF4k+t49EomIDToMio72dtW1SnX1dbSSwqvMzEw4XRV+hNns91kbrCxp312Q3mdlQUohkSwAdl70Rou68jp8YHW5KL7DePf2zXpj496+Ptq5TtO1sKv7nfeSGOnS19d3KzLS3fXoAee97m6ut2/dHBwQd8IOQQwODkJR6ezspH19QbKxtMgXOqDN/MJ8ZmmG1EZEhYMjYkaAagAH3WIQdBgcDu3fF3b1Cq2k8OrokcNhV66QGOkyNTX1JjMz5uFDMNkvEhK6uiTybBfFy+Tk3Tu20767IJWVlUHBJJEsYH5+3lhfjztvCJG5qcmqGs6AERTfYczNzjrY2Xocc+vr76ed7hxlZKRDvfWlQCL9E4UA7aHj7u5Q5A7uc7518+ajhw/g7749u9XWrT154nh3N/N5YWJiAorKt8a/PKEqRMYG+uVlLJoBdWhoSF9bW3nN79wln1fqKsqXQkNIDMIE6DA45H74oKup8fnzZ1ph4VZSUiKkFOnPrdjb2wu2Rl9H28bC4ujhwx5ubru2b4MM43bU5cvnz2QjRklLTd3u5ET7+oJUWFiouZzeUYsLC5NNTWRBMoAP4zv/AE2wTcKzpyQGERnFdxgANNw3WludPO7Z2dVFO+NBqamv4KRPT00lW0uL9rY2MyNDL0/PiooK2i6Vlpa6u7lamJlKovEBVcXzhATaJ/IV7AZ4HajUSeR//9v1vZP8JztaW1r0tbWEmAywF35nfLC1wSzoMLi5e/u2gY52cXExrchQgpQC9iIn+z3ZWlo0NTUa6ekePrD/7Zs33PtTXVMTcuGCtrpazMMHZFPmKCkuhvLY3dPD/YmC9PTpr4niSKQItPj6Ff/f7z9zP5JlCQCJ4uyZM5A0uHMITWAvQoICSQCyHFaFwwD6+vo222/S09KCklZUVNTY2FhdXf3s6dMtjg5Q8FKSk8l20qKzowPsRcC5c7QSyNHA4KCvjw+YjN6eHhLDEA/u39+xdQvt4/jK38/X28uLipqfn4vKiDQ8rV3SWEStkSFgMuDI8LY8IK2rKK25EBiA9oJx0GHQuBERDi0TvzNnysvLOUXmw4cPbkddwJdLf64syl5cCg3l7AxNeXl5upoajJuMhYUFGwvzJ48f0z6Or7ZvcYL8QyKXojPsGjWyVus5yfavpEyG6tq1vGNjwBrVtUpoL1bManEYFF8KClyPHIYzhjp7rM3NYx89GhsdJS9LkVMnThz3cKcVP5rAZLi5HPH39SUxDEENSvju3Tvax9EE7R4tdTWqm2ffz94jN/dTz4ycfOBJvY9sgaSQn5d3cJ8z52IGNKQirl8Xf0gfhC/oMHj5WlUFFhz8hLmJsd0GGxNDA2iuXAwJlv7NESgOdjY2FwIDaaWYpvz8PCj7jA8CFvPooZODff/AAO3jaKI+XcTeUb0PHlL24ttR10WpdPMqLSnxcHMFSwHGUUtNVUNVBf738vSsqqwkWyDLZ3U5DA7j4+Nzc3NkQeoMDgxAViorK6OVQF59/vxZ9DIpOk8fP4ZUCO0t2sdxBPbC0sz0/NmzsHF+bZ71eTPKXlxPuTozx7q+8RMTE3Ozs2QBkQzoMAQBxbnwy5cPOTklxcUyaa4A0HbS09ISZfDiM6e9z3h7kzCGGB0dgdaa90kvISajsrLSzNgI2gAkRig9d+5S9qJu527xB+5cFvBrFhUWfsjOLi4q5L5BjKyMVeowZMud21EiPlIP2r7FSRJ3Tx/Hx4HJePTwAa1vSl9//8vkZAsz03Nn/WbnZiNTr1PewvKcae7XHBKMrD7QYbAZd1fX4KAlLmBQKioqUldex3jd2dvbCybj5InjbW1ttE8E5efng70IDb5AthYK5+ZIjdPW+fFxshaRT9BhyAAohxHh12mFUJAuX7x41vcMiWSUjPS0TRs36Gppnvc/e//e3QfR0ZdCQ81NjM2MDKPv31tcXPSK9qDsxaEbzj1D3SQMWZWgw2AzupoaeXl5tNQhSBusLCUxljmYjB3btmqpqfr6+GRnZ5eXl4ObSXj2bMfWLRoqyiJevRgtKyNXL3bvnZ+cJGsRuQUdhgxwPXL47p3btGIvSDciI054uJNICVD45Qs4mCMHDx7av8/by+tNZibnjkN+bR7Yixtp4XPzMrujhLAEdBisBRoD8NNUVlXRUocg7dy2NTUlhQQzzdeqKh/vUwY62ipKa8BYbLS2gubKz+WMwtkb/aDh8BEp3xxBJAQ6DBlw6sSJ62FhtGIvSBdDgv39GO7sKTqtfcyM/4/IO+gw2AzU5YIeneWV46ZNb99kkkhWAp6J/IfIOegwZEDMwwfbRB6jxsHO9tkTTOuIjEGHwWY2bdzwOD6Oljr4qq29XUtNtaa6mkQiiCRBhyEDqLmCCgqWnisoJydbW0N9XCrdnSanJ8h/CMIDOgw2ExcT4+RgT8sefHXvzp0tjg4kjAXMCZ1YEZF30GHIhnNn/Q7t3y9kIHNQb1+f8+5dInbAFpP2/ratlxwS8rD+QPiDDoPNjI6OaKmpvs/KouUQmrp7emwsLF4mJZEwWfM9NbVEad3PDx/IMqJwoMOQDYODg7Y21p7uxwSZDLAXri5HHO3sltVJamUUNXyx9DfR99Y08dUfHBkkaxGEC3QYLOfB/fsGOtpfvnyhZRKOwF4c3L9v62bHqakpEiM7ptrbG/YfpB4bqTA2XZyfJy8gigU6DJlBmYz9e/fm5GTTcsG7d+/27NoJ9uKH5Id8eZH/zPC0NtgLmwDzipZyshZB/go6DPZz+9ZNMBnpaWkDkFz+mlKqqqog1YC9GJH1XYmpjo62oAtFv/2bshe1W7fP9PeT1xCFAx2GLPn+x6MiOhrqDna2QYEBYVevBAWct9tgo6eleeXSRSlcvbiRRgbU2h22DUe8QISADkMuiI+NBZNhY2lxOyoqMzPzfVbW8+cJhw/sV12rdNzdXbb2Ym50tH7fgcL/9w/KW5RpaA0kJeFjI4oNOgzZMz4+/vzZswuBAT7ep4IDA5MSX0xKZaiZiNRrlL049cBzArt5IkJBhyEvTE9Pp7xMdt69y8p8vZmxEbRerodd7elmRfuhytIKvEWpumbn9fD5MRyvU/FBh7FK6RzsWO9nCPbi3GPf+QW8CYosAToMRHTmodn0lc8DsT+y3g++TMHRtFYP6DBWLxXNZaHPg9BeIKKADgNZkrmRkcHk5IaDh4r+9X9lGlp4BwRBh4EwRl1tbcA5/13btzlustu7a9etGzf6+3EudQUBHQYiiLnR0f5nz+r37C3657+oPha/9Pd/TjQ0kC1WytDQ0PNnz6Ju3rgZGRkfF9vZ0UFeQOQEdBgIA5QUF4OrUFmrpLpWCeohShoqyipKa44ePtTb00O2Q+QWdBgIX2Z6e4vXKHGMBZiM+r3OAwnPZ4fE6qhe/bXK0/0YJBBNVRW1dWshscA/ymt+d969K//jR7IRwnrQYSDikvcxF1IAx1jQBDnCxEC/s7OTbI3IJ+gwEEHUbHYq+u3fDYcODya/ZGSMzvdZWWApIHVwZxKOINs8jL5PNkXYjQwcxuzs7NDQ0NjoKN6lkybXUq48yY0nC8wB7Qnu6xZ8BZnCQFeHJb3ZkZWBDgMRxGRj4+z372RBbDIzMpZMKWAybt+8SQIQFiM9hwHGAk6dvbt2cc4SXU2N0OALzU1NZAtEYjzPf0o9mPq57hNZxQTgEa3Wr+f8oEIEGSHA35+EIXIIOx1GVWWlv6/vdicnuw0227c4BZ4/V1dbS15D5JDR0RF15XXcqUOQoN3yTex+HoikkZLD+FpVZWygb2FmGhkRUVlV1dbe3tTc/O7dO49jblD3eHt5gf8gmyJMU/ytkBq10/vhiYWFBbKWCQo+fVITLR2A4IeW+XiCyIphm8MoLytzcrDXUlM9c9o74dmz1Fevnj19CplEQ0UZrEb1169kO4RRfubkjBaXkAUJ8OjBAw1VFe68IUiqa5XgpydhCFuRhsMoKy3V1lC/eSOyf2CANpYtqK6+zt52o5vLkRl8SFoCdA52WJ0zA3uxK2wr32G1Jicn377JjI+LjYuJSU1JWdY45e5urrRiL0SaqirPnjwmkSxmbnb2Q3b208ePYx89epmcjDd3KFjlML58/gze4lrY1ba2Nlo+aWltvRgaoqOhXlFeRrZGGGKssrLo3/8BDX8Sdil0cXGxq6urrra28du34eFhslYEINBYX487aQgXmIyfP3+SYHbT29tbV1fXUF//nbnbSXKBxB1GR3s75ILo+/dpiYBbLS0t0Bw5fdKLxCAMAZZi59UtYC+sz5t1fe8ia/8EssDFkGD4daDupwT/Q6E9deJ4TTWf0XJ4AWtIK/NCpLzm92tXr5BIVgKF/2ZkpL62FrSDNdVU4S8cEBWlNS6HDhUVFpKNVivscRilJSVwrsbGxtDSCLfu3bmjra5W/bWKxCBiMz85WWFkUvi3v5fr6s/09pK1f2VsbOxxfJyFmSmcKmrr1kLZgVLveuRIcZFIxWd0dISTLkSRuvK6yooKEsxKqPFV7TdugL2F1Er1L9m7aye0YZi9nMxaJO4wQi4EQUuXVv55VVVVBUcfnzhgloAnZ8FeGProlDbRL2xC8oU0zfeWJ/XQaUZ6GtlUMButrWixwnX5YiiJZB+dHR1GerrqKsq0fQZBloR0GX3vLtl0VcIehwEJ+urly7QEwqugwACXQwdJDCI2becDqFEuxir5+7ay0lKqucJddkBU8YFfbWqpyRD6+/toscIFn/Xl82cSzD5ampsNdXX43vSBxGtrYy2FiS1ljmQdBnhSOOdyc3NphZ+vjh45fCmUvTWQ3NE+0G7iqw8O48mHOLLqTyh7QTvpaRLFZOzZuYMWJURQed+/e4dEsgzKXgh6Oo7SKjcZLHEYtTU1GirKTc3NtOzBq7r6OvjJWltaSCQiBpMtLdSMZZ1Xw8iqvwL2An4X7vJCE7y6e8d24SZjYmKCFiVc8PuytsMN2As9bS1wV7R95gh23sbSQuFNhmQdxru3b6CZSyv5gpSelmZhakIiESZo7m26lnKF9lTw3OysgY427XTnK6hxu7vp91a4ef7smYjdskDwblDqSCTLsLfdKNxeUALXVV62Su/us8RhhFwIOuXlRUsdguTu5no97CqJRMSgxecM2IsyLW2+U4r0dHfzvfhHEzTcjx11ITECsLOxoUUJkba62pLXRWQCWCV9HW0h9oISmAwHO1sSo6BI1mFADbRvz25asRekwsJCLTVVEslKpqenO9rb6+vroMkrvw+/ZGZkCG9tcAQZIezKZRLGDyjeUMhpUYIEZwIJYxmVFRXU/dElBS7Ew82NhK0yWOIwwDRE3bpJSx2CFHb1io/3KRKJrJSZ/v6i3/4NDqPnHv9xriBL8L3fyiuodIWP/J2akiJqdlJRZm2/roRnT5e8SEwJTIaInVTkFMk6jOTExN07d9CKvSDl5+fraWmSSJbR1NQIjSddTQ0oIVp/1KmGujpwfsvjOPlbHB24T3Hh0tZQB19FIvlx++ZNdeWlMwJU4SXFRSSGZXh5eopyAYMSbDnQ308iVxMscRhuLi53bkfRUocghV+/durECRLJMoaHh2MfPdri4GCkrwftXRsLc9YODvTjfXbxf9aUrFXmO9/61NSU6M0MMCKXQkNIJD+g5QZHgxbFV1AS+/pYOu2RtblIowSBoEJxPXKEhCkiknUYXz5/NtTV7entpZV8vop59Giz/SYSyRoWFxcvhYZCwTju7p6Tkz0wOAi72tffn/n69dHDh6DivHeHpX0L+DIzM0M7xYULLPbXqiU65IcEBQlvwcBRepOZSbZmH3paWrQdFiItNVU2fxfJwRKH4e/ne/6cP3feECKfU6cuhgSTSNYAKSXsyhVoqW9z2hwbG5Obm5ufn5f66pWn+zEobgecnVn4QOPsjx8jBfz7VOZ9zBXxqgMlaJuRSAHU1tSAZRF+iwEOVE72exLAMqA5KnqLBQTfVIFHapCsw1hYWABv/jg+nlby+WrThg3Pnsg4f9GA/Q88529uYlxRUUHbW0oFBQUGOtqR4eEkgPVA8qKd38IFFWqB0GffKSKuXYNCxZtoNP94CJa1uYACshVtt4UIDkji8wQSuZpgicOAs1FfW6uru5tWEnnV1tampa7GtlEx5ufn/XxOw8GE1EHbYVBjY6O7m+tGayvWts55eZXyUvRrGCBob5BIwXxraNDT0uRrXKAxAwU2Py+PbMo+SoqLRLxFwhG0W0mwwiFZhwE8jovdtHHDkpcxUlNfwSk1ybJuO/fv3rEwM62uqaHtLbeKi4uN9HSTEl+QGJnyrbvhRtr1sclRsszD2Ogo7eQWLijkJcXFJFgoA/39t2/dNNTT5cTabbBJTkxk22/KC5gGzj4vKUimqSkpJHI1wRKHAUA+efggmlYMeXXr5s3tW5xIDGsIOOdvY2lRV19H21uO+vr7T544bmW+fmxsjMSwGygOjDsM4OePH48ePDAzNgI/ASUUzCJ4C10tzfCwMOHdz2UOJMxlXdQBocNYORMTE1scHQ7scxbS7MjKegcn0IsEdjUNZ2ZmjA30U1+9ou0tr2IePbKzsaY9siET3O4c0ffWPHxzP1nmAXZyWelAec3vyy3PU1NTkB3m5KcnrK2NNe1bC5Ga8joRLZeCwR6HkZmRAedwdnY2rRhyK/P1a2hHfsjOJjHsoKurC+rXyspK2t7SNDA4uNl+U3xsLAljN/l5ecuqUI309UikCEC++lpV9SEnJ/t9VmlJiVxklZbm5uXeJVHgSTMk7jCA0dGRHVu37NqxPT8/n1aWOjs7H0RHQy5gm70AXqW8tLYw5zvSOU3dPT0Gujoyv3CX+zUH7AUorzqXrOLH1UuXhHeb4NbeXTtJmOLy7MkT0a9qmpuasMFKSh/2OAwg4ekTaNempLyklUQQVM/PnyfADwrll2zNGi5fDHV1OULbYb56+uSJlfl6uRj2EVpiupoa3GVEiCDzXL0s7PE0xWCjlSXtiwsSeBEPN1cSpohIw2EA4+Pj5/39odg7OdjfiIyIj4t7+CDa389XR0N908YN796+IduxiZMnjosydCAlvzNnmO1TNjEx8aWg4O2bTDDvtTU1ZK1gZuZmtly0B3vhfneJJ86hIaUsmsWGdMC2VqAkgEMt4qge0FZ7HCcfLUvGYZXDAFKSk3U0NOw22Ny/d7e4uLi6pqaoqOh2VJSNpYW+ttab16/JdmzCUFcnMzOTljr4qrevD75FWWkpiWSIqampoaEh+EuWGSI8LEyUB8pAkHlYfo+DERJfPBex0aK2bm1piQRnkpM5UnIYFGOjo0/i491djzrv3nVo//5zfn5sPriwhw+il77dSyns6pUz3sxM9NfW1nYp5NfUTWZGhpvtN4EDg+aag50dnLVChpd58iEO7IXBaa3Gnm9klWCCAs4vOUIOpIyd27auksHzH0VHL9nfEzawtjCXl7vjjMM2hwFMTk4mJb7Y5rSZukoPNhHO2FcpLxmvQRlhfn4edrK6upqWOgTJcdMmpppekHjjY2O5x/jfYGUJa2A92UIw/c+edYZHjHz+Qpb50dfXJ0qFCjnnuLs7iVFoIFEb6estea9ETXndFkcHEqOgSNVhyBfHjrpAk4hW7AUpNDg4wN+fRIoBtL2gJjvmevT9+/ecN+/o7Iy+f9/W2hpabHz7BI1NjVn4G4PDuJQo0nWUxcVFfz9fISaDshfs76TJIHdu3RJiMih7sdrmReSGhQ6Dm7m5OfIfW6EcxtevXznlWrgcN9kx4jCoZz2cHOwfx8fV19e3d3TA38fx8U4ODr/uNCUnk+0EUO+8r/Bvf2/1O0uWBVBVWSn8QiBkm317dgsfXEeRaG9rM9DVEWIywF5APl/W3LPyCDoMgVy9fEmUOdso7d6x48F9/gPeiQ7YC2iNCepb2tff7+HmxtdkJOQ9AXth4qs/NCpqFQgmI+zKFXXldbTGB+wAlIoTHu6ryl5QxMXEQC6mHRDwFsprfofkuJrtBcByhyEXGOnpZr5+TSvXfNXT26unpSX+EPVgIOB8FpRS0lJTwWS8TEoiW/MDvAU4jLqdSw/ICyZDW0Od92IG5BPVtUoH9zmvHntBASbDxNCAbzdYsBcOdrYKby8AdBgCaWtrg4JRW1tLK5a8+vLlC5xGYs5h09TUCPW98EdXwGSA6dm7axeJ+ZOLicHgMEKfB5FlkaEun9rZWOtra+lqalitN4u4fk2OnsVnnKmpKUi40OAz1NXR0dRYb2J8ITCgtbWVvLyKQYchPtBocTl0iFao+epxfLyNhbmY9yg/5uZCff/mjbCeH2/fvAGTIaS7VV9cPDiMUlX1RRF2ZmpyMvHF8w2WFnCqUM33X80VT4+K8nKyxSpjbnY2MyODGkYZGiog+OfQgf0Fnz6tkg7j6DCE4XrksK+PD61M0jQwOOjqcsTf15fErJSggPPHPTxob86r5uZmcDN1tbUk7E9a+1p6hrrJAoIwDToM8enu/vW0anl5Oa1Q09Q/MAAN3Mfx9CmRl8uOrVtv3oikvTmvom4JGzhkor4BHAboZ04OWSUCA/390GSCRvz4OJ+BxlchQ0NDLc3N0FYZGRkhq1YH6DCEAYXESE83NDiYViY5glzgfdILkm9vTw+JWRFjo6PQkoA2B+39+Qo+UXxDgyDLAh0GIwQFBFiZr68RPIhfX38/tDRsLC3ErJtrqquhKdLS0kJ7f161trVB8hEyOUCN42ZwGPV7nckygogMOowlAONpamjg6X6ssKiQVjI/fvx4+MD+DVaWvb29ZOuV8iE7G/IO7f0FKSvr3bJGrUEQ8UGHwQgLCwvnzvqZm5pA9qCVa1B9fb2ry5Fffa0GBkjASrkUGnryxHHa+wvS6VMnQ4MvkEgevqelUZcxRopYOnkhwlrQYSxNZ2fnGW9vdeV1O7ZugXIbGR4eGhzsaGcHxj/wnL/4uQBIeZm8c9s2WrEXpPLychFH3kUQpkCHwRSLi4sR165p/jE40MMH0e/fv8/NzU1OSnI76gLl+vDBA2L26KIAexERfp2WOgTpRmTECQ8PEsnDwuxspYkZOIwmD0+yCkFEAx2GqPz88SPm0UNof5w87nn+7NlnT54wODRCelrqdicnWrEXpKKiIg1VFRKJIFIBHQazUIMD7diyxczYyEhfz9bG+sqli21tbeRlsTnh4Q6+gZY6BOnWzZvuQkeWnGxq6rh0eXF+niwjiGigw2AFxUWFBro6Ik5zn/jihY2lBUQ19zbNzK6uB8AQWYEOQ74IPH/u3NmztNQhSAHnzkGriUQiCHOgw2AFCwsLkMETnj2jlXy+2rNzx93btxcXFx1CNm4ItPhUy96JjBGFAR2GfJH/8SM0Wrp7emjZg1fQsDHS0/2YK2wyI74sTE0tKO6UXQgjoMNgCw+jo7dudhwYHKSVf5oKCwvVldcNDQ2VNZXq/zHPmSjDhCOImKDDkC+gBWJjYf44Pp6WQHj19OkTa/OVjL3RHRVVpqXdcsZvKOP13M+fZC2CcIEOgy0MDw+bm5j4+/kKMRk1NTWQ6C9fDIXtqVG29lzbToUjiERBhyF3PHvyxNTQoKKigpZGuFVZVWVqZLiCsTcWFxbKtHWpZ0x+6f/9o8LIpH6vc+u5gNFiPrNNLUxPizJmF6JgoMNgEZ2dnWAyfH18Oru6aIkAVFJaAlne39cXWhszczOW/ibgMB5/WKVTfSJSBh2GPHIpJAQMhCCTAfbCzNgo5MKyxwKmmGxq7rweXuOwufAfv/3Pavzt7/1PnpIt/mRhaoq8+v/+UfTbv4vXKJWqqJVpaJXr6k/U1ZGN/gTetnqTA7xtjdOW2q3ba3fsrNu5u37P3sZjHrM/fpCNEDkBHQa7AJPhaGenq6UZFHC+oKCgtq6uqqrqVUrKfue9qmuVQkOCqYuZuV9zqFsk/T9XxQjf8K1HR0dW4VQp7AEdhpxyKTRUU1XF1+d0cXExx1vA/9CS0VJTFTIMhujMj439yM7uuXuv5Yxf7fYdo6X06VTmRkc5/oOm8ZoastGfjFVW0rbhaJ4nA0B4q/+5gcSk6U7FnxReHkGHwUaKCgs93Y9xpsyBdsbNyIj+/v+ZCd9Yb7AXbneOkGUFZXFxMT8v74DzXmo8f5CWutql0JDOjg6yBSIt0GHIL9Vfq6gRfYwN9DdYWsBf+N/H+5SQcTyZZXFubqTg88/cjz+y3g9lvvmemjqYnNz/LKEvLp73ssRMX1/ntesdV652XLrcHnqxPTikLTCo9VxA8ylvsgUXPfejOf6jysoaFueWGpa7pbn5QmCAk4O9jYXFZvtNAf7+jd+wK5ukQIfBaqanp+d5nkEfmxw1OaMHDiO1MIWsUkTa29qgVoNUSHkLjmANGA7Ij7xHBpEc6DDknR9DQ2WlpfkfP8JfRgb1YgODL1NqnLYW/2cNx2eU6+hNNDSQl/9Kc1PTnp07VJTWqK1by8knquvWwpodW7bU19Pv1yDigw5D/ugYbPe452rqqw9Wg6xSOMBeGOrqUNMz8pWGirLnsWNoMqQGOgyEtSzMzg7n5zd7ny767d9gMkqU1o18KSSv/QnYCz0tTc7VUF5pqavxziiJiAk6DHlFge3F8PCwoZ6uEHtBCUzG2TM+JAaRMOgwEPYzVl5eqqpWqqI2kJhEVv1BW1ubcHsBglfBZHwTcP0DWRnoMBDW8ejBA04fFOECF9LXtyr6usocdBgKz8Ls7Ghx8cDzFx1Xrn5PSyNrueh/ltB9K2rkSyGbhw+f6ujgHZzjhKfHki0WEJiMwwcOkBiECVjhMAYHBm7funnAee/WzY57du4863umsqKCvMYc8/PzdbW1XwoKiosK25kb/18QIyMjFeXlBZ8+wXdhcAYTuaOjvb2kuAgOe21NzdzcHFkrmIWFBVNDA1rJFyQwIhHXr5FIRJLIl8OYmZl5nZ5+60Zk2JXLd2/f/vL5M3lBAsxKcVzLxcVFUQrRchmvrW09FwBNf05vhka3Y+Q1LqrtHalXS1XVmr1P/8jOXpiZIa9JFzgIog8RNjg4KIq9oKSstKarCx9LYQwZO4wfQ0PeXl5q69bu27P7QXR04osXj+PjwGFoqattdXQs/PKFbCce8Cn3794xNzHWUlO1MDM1MzZSXau0Y+vW9LRUSWSHlubmwPPnNFVVDHV1rdabGehoa2uoXwpZXU9AzM3OZmZk7N6xHQ61mZGhpZkZ/KZw5O/cjvr+/TvZiB+lJSVqPL07hchIT5dEIpJEXhzG0NBQxLVrhro61hbmJ48f9/Xx8Tjmpq+tZWdj/fTxYzgtyXbiQZ3ee3buoHoiw0nu5GCf+DxhSjIPVA8PD0ffu2tlvp6qKXU1NfzO+DTU15OXxWCmv7/eeT/HWIBKlFWrbe06rlwlW3DRdOJkhZEJ98bFvys1e52S2pOiVZWVPt6ndDQ04CAor/nd1Mgw/FpYb08PeVkAcOhEvCYKgh80PCyMRCJiI0uHARW/ve3Gwwf2l5WVcR7UptTW3n4jMgIq6byPyx4tn0ZJcTEYi13bt4F96evvp96/paXl1o0b4DY2bdwgvMJbLo/jYsEwnfD0+PTpE+frfPjwgZqaOeXlS7KdQvPz58/N9pvA0kVGRDQ1N1MHAQ5+clISJGUNVZUvBQVkUx4yX7/WVlejFXshgkRDIhFJIhcOY6C/f4OV5d5dOzMy0rnHxu3q7o6NjQHP4eHmKr7JKPj0ycTAwNzUBHJUUVFRTU1NaWlp9P37tjbWelqar1IYLuMxjx5CJtyxdcuL588rKiqqq6shn3if9FJXUXY9cnhiYoJstyJ6ox9QXqHcwLArPGJKhFbQZHNLd9Ttajt7js/4kZ1NXpMY8GNCDodM7utzOjc3Fw5CZWVlWmoqVB+QV8+f9RNyacfn1CnujLGk4KiSSERsZOYwRkZGwF64ubhwan1ePXv6FIoWlGcSs3zAXkB19SA6mvbOlOCjj7keZdBkvEhIgJY6lH/aB1HKzMyErwNNH7L18ukZ6p6Zk81lSdEBe+G4yc7l0CFBU8XGxcZCphBkMtLTUpflMECiXy9FVgz7HQZlL8Dc9w8M0E45SmB2IeeIaTIgHUEpBj/B91NeJifDqy+T/tLNUBxiHz3S1dJ8//497YNAjY2Nu3fs2Ldn99TUFNl6+SwuLjZ5eA68SFzBkN4zfX0996MbXd3IssQAe2FnY+Pu5tra1kY7CCBwXRutrU4e9xRkMtxdj3KniyXlvHsXiUTERmYO487tqG1Om4XYC0q3b92y22BDYpYJZByoyQTZC0qwA25HXXZu20ZixKC8rAySy/usLNpHcAtMt4aK8oq7K7vcOmDhb/zqSzJZZiX7nfeCvejt66N9d26BydBQVenu5nNxNT8vT/RLmiCwIyQSkSTsdxjH3d2hLAuyF5TAZNhYWDyOW+FY+1+rqqCMP46Po70tt968+dWQgDxAYsQgPS0NTu+PHz/SPoKjjs7O7Vuc4FuTAEVkenoa7IWHm5uQyuJb4zcwGWd9z5CYP5gfH5/49m2mv9/fz5c7Yywpj2MS90yrB9k4DDCbZsZGyUlJtBOFV51dXXpaWivrkHHrxo39e/fQ3pBXbW1tWupq4vctPXni+Hn/s7Q359XJ48eDAs6TmOXwfWSQGik8r5p+5+j7r+s9T25ERESGX4+Liens7CQvSJ26ujrwBy0tLbRvzavDBw9cD+Nzr3dychJ8Ia3MC5LqWqUzp/mM9IcwDssdRm9vL5wMlVVVtNOMV/FxcRusLKHtTiKXw7GjR6Hw0t6QV/fu3nWwsyMxYmBrbR0bG0N7c5rAM0GJY3wgh9nZ2eqvVQWfPpUUF60gn/AO771iXiYn21haLNkW/fr1K5wA3H3dKtdbFP7t791RUSkvk6E9w503hOiXg1ypAUV4kY3DyH6fBQ5jyZOGUlBggJenB4kUGSghxgb6qa9e0d6Nr/zOnDl90otErgio49XWrS0pLaG9M68+5uZCu2R8fJxEikzKl2SwF2Z+BtOz02TVHzW6l6enitIaKBhQwEDwj/Ka3w/uc2aqn+yy8Pf19T7pRfvKfJX5+rW+jjbfC7wXg4N5h/LkK/i+cARIGCJJWO4wIsPDD+7fRzvH+Kq7p8dAV+dTfj6JFJnu7i4436Amo70hrzo6O3U01EuKi0nkiigqLITGVVd3N+3NeXXK60SAvz8VJT79/X0R16/ramlCQtNUU6Xyyc6tW7PevhXxMZaBF4nlBoaTTc1kWTy2ODrcu3OH9pX5ytXlyJVLF0nYf/9bv2cvOIxWv7PT09Oi33iFzLOaH/1jHNk4jAf37x9zPUo7PwTp5ctkBztbEikyZaWlBjrawi+ZcpSfnwcml0SuiNSUlM32m2hvK0hW5utzst+TSJE5cf8YOAzf2P812aGFAVlA0INYkA2fP6NPcihpwDRkZ2fTvi9fDQwOmhoafC7g08mmo70ddp72dXgF333H1i0kBpEwLHcY+/bsfvTwIe0cE6QTnh63bkSSSJG5d+f2of37aW8lSP5+frSL9ssFzPq5s0tfEwXl5uZqqakyMr7th+xsKHp8b1PCSid7+7HRJQb6mxseLl6zFqr2Mg0t8U1G47dv8Llt7e20r8xXb95k6mtrkcj//rf5lDfsxrfDv24hhV25LEqjBVJK4DnGvBoCyMZh3IyMBN9NOz8ECc4ba/P1JFJkst9n2W/cSHsrQWpoaIDTS5wnzeJiYlwOH6K9rSDt3rljuX3BxqfGjXx0wGG8KXtNraHsBXfx4JWUTcbCwgJ8aJUIV6opbd3s+Do9nQT/ldwPH4SbDPjuNpYWCjO9AvthucNwcrBPSkqknWCCdN7/7KWQEBIpMkEB54ODAmlvJUgPoqMPHdhPIlfE4YMHhPch46i1rQ1+muHhYRK5FDN9/YOvUnmrf8pecIoYr6CShnbUkiZjtLikeI0SIyYjPy8Pijnt+wpSfX097CTnsmjTCS/YB2pgj/Hxccg2wk0GvLpp4wbRDyMiCrJxGDEPH4peH794/nyLgwOJFBkoLbY21rS3EqTa2lo4w2bEGD0mPi728IEDtLcVpJ3bti33kbbP9QVUJ4zh8V/D1YEZEvG6n4rSGqmNw7G4uKi85veKigra9xUkyFZvMjNJMA+UyeDfnFJV2WBpifZCmrDcYezesf1xfDztBBOk06dOrmCgtnNn/S6GhtDeSpBiYx7t27ObRK6I/Xv3xjx6RHtbvurs7ISfZnBwkEQuxWByMlS9xf9Zw/38SEtzs3B7QQmqYVGc02hp2f9MRnMLWbt8PuTkiJ7GG5uaYA85BqjR1Q12oOnESWpRuMlAeyEhZOMwKsrLoIJs7+ignSJ8ddzdPTgwkESKTPXXr/ARotzFBL19+5b78toKgJpyg5Ul7W35qn9gwNTQYLm3gW+/vgn2Yvc18sxL4ovnIvZdgpJzKXTZzbUVY2pkmJ6WRvvKfNXb1wfHvLSkhETyo7e3NzwsTFdTQ23dWi01VU01VTBMDna2aa9eiWMHpUN3dxd8u6LCwsZv31bWr5BVsNxh+Pv6+vr40M4xQbK1tk55uewHssKuXDnlJVIfI9D1sDBPdz7DYooO5D14E9rb8lVlVRX8NKKXiK6ISKh6v9psJMt/EHjOf8lropSgFdEmwpjIoyWlxb//YTI0tVdsMsrLygx0tLmHNhGiwqJCMEmcsla/7wB8eouvH7UITExM3Ltzx0hPF9ot1Jel2jD6OtpRN2+Mji4x7TsbkLsn82XjMAAnB/v79+7SThFeNXz7BqdCc1MTCRMZOM8gj8TFxtLekK/c3VwvhgSTyBUB5662hnpOTg7tnXmVlppqpK+33CfyLyeFGProXEu5Qi2Cm+Eu88IFdbOEhhrk5drVK0cOHaR9Zb5KePYMKi1RCgykTrCkuR8+FHz6tIIzQcrAiQe7Ck1qSMSQvDRVVcAVWZiZJjx9MimtX0ESsNxhVFVWwnkONR/tNOMV1ZxYQYkoLirU1dLs7OykvSGvoEaEEvoyWaynypMSX4jYfAcjcsDZmYSJQFfkjV8OY8P/OreNjY0JatzzCrYMDhKpyffrdskfJqNcR29hRYN2QNkHQ5Cenk77ynwFe+Xu6koi//vfSlMz+Oium7fI8p/Mz89nv8+6GRl5KSQE/r57+0b8QdgkTWNjo98ZH+q6NSQWMyPD6Hv35OKKi8wcRnJiIqTdb43faGcJTT6nTu133ktilsnj+DjHTXa0N+RVXX0dONnW1lYStlIuBAYc9/CgvTmvDu3fH3H9OolZDpMzkz/HfsA/4+Pj3AV+SYFFq/76lXoTSUP1txelK8Y2p80Po6NJmKIACXHfnt18kzVYDXCWMnyQWExY7jAAOKPCr1+jnWY0Qd1/wHnv1cuXSMwysbfd+OjhA9p78iozMxOaxdPT/3vmawWAH9XRUF+y33Rff/96Y+Ost29JmAh034qCqrfK6n/jDL3PyoLzk/t0FS7Rh+ofLS4uUVYdyiC9x1YAZEtRnhLq6u4G48gZx29hdrZEaR18zR/vJT7eqESB0+Dwgf3QSqFdYaIuwzx68IBsx1Zk5jDm5uZOHvcEky7EZJz3P2tmbLTiWcrAmK83Mfb386O9Lbda29qcHOxhT0iMGLQ0N4PHjI0Rdus06tZNPS3NJQfSF05fXx/3qbakIHdIdOYnGr6nTzvY2Tb/OVg4X10IDDQxMPjJMwWiXAMtoUMH9vPtOEIJ0gR8697eXhIgV7DfYZQUF2mpqcbHCRwOC+wFtFiszNcPDgyQmGWS+OIF/IKVlZW0d+ZWU3Oz3QablbUiaFwKDYXsJPxJiiuXL0GWE/E5UoqeO3d/OQwLK7L8x/USOHTc56pwQd1GIkVgXryHP6G8QJlKePaM9sW5Bb+sr48P+D/u25Hw/1R7+/zyxwVgD2BSOXPf8BX8EDEPH5KtWYnMHAZAmQwo8LGxMbQOEzk52S6HDoljLyg6Ozr+MBm+fEewBnMDBfjIoYNitjY4FH75AtU531szUAbu3I7S1lD/WlVFtl4pw8PDtPNMuOAErSgvJ8GSZ3Z21t3VFUxGfX097SCAevv6ggLOQ5pubVl5/y92cjE4WIi9oKS6Vmmj9f8yuxzBfocBQBMWCmBE+HXeWrmmpua4hztkm57ubrL1iggNvgB5SZDJAHuxaeMGz2NujFx4h7x0cJ/zjq1bBd2auREZoa+jvdwxgnvuR4PDqDT73wN6L5OSluUw1FWUSaRUyPuYCz+rIJMBqfWs7xkLUxP5vUAoiKOHD4mSUtJevSIB7EOWDgMAk/H08WNba2t9bS2v457nz/mfOe0NlRPUxFCSxWzrU4DJgJxurK93+dLF8vLyjs7OltbWj7m5J48fh6rX0/0YU/aCIv/jR11NDXDT4DPAwYBzamhoeBAdDftgqKtTVlpKthODhYUF+AjaeSZEymt+F72fOSOAyTh14gT46+MeHjk5OXDAofBDUg67cgW8hbW5ueLZi4mJiSVzASU4LMVFhSRMfpALhwHAsd3i6KCtrgbtCqiTXqWkPI6PO3rkMCRiV5cjYtoLCkhNUADBKHMPIQrF/Orly5BnmLIXFFOTk3t37bQ0M4X2CadrPNSpGRnp+533wm7UVFeTTUWmLy4eHEa5jh5ZXv5Q/dBsI5HSAkwG7CHUuG/fvOEc8+6ensfx8dBKVEh70VBfryLapPPmJsbcF29YhYwdBgUcnc8Fn25GRkLRvR4Wlvji+QqGvBQCVMlgKY4cPMj5ScARB57zX/H8IMKBygZsE3dnTDAcSYkvxJmgiEZ4WBg0IzjvL0RgL2Q1VWBTU+OFwADuttGh/fs+ZGfLXXdoUUh8ngAnFeebChH8Ih5u/+uPJi/Ii8OgqCgvO+Ptvd3JCYre7h3bwy5fZrYGAh8DjRPqitQ2p812G2zgfwmd3uDXoZEKtklLXQ1aX1scHU0NDaC5EhkePtDfTzZaDj/eZRX9818VxqacagksEbTxuM9SIYKa/nYUvfvkshirWkm3MPgF4XeE/TQzMoSD4LjJTldL08bSIj42dskhOuSRAH9/OKloB5+voNEizfvgy4IVDkNqQFn9/v37z58/GRn/bknAJ0EKkMQTBL29vSLaWzj5igpl2WKGhDs8PDw4OMj+50vF4YDzXtqRFyJIHCRMfpAvhyEdoBjmfviQkZ6Wk/1elAc4xaS2pibr3bvMjAxoj4k1n+r8PO9MqjciIkS8jAGZR5zJqPufPSv8f/9ouxC8smb31OQkfH04CO/evgEfydq2u5jA9xL9qhI0WsSc9UJyrC6HIY9kVbw999j35edEsvwn169eFdIDiBKco24uR0gAIkkcbG1pB1+4JiYmSKScgA5DsRkcGNDR1IC6ivss5RWklADxxtVu8vAs/NvfQY3ungsSaHWMFBbOj8lx706KsdFR2pEXrl3bGZgeXBKgw2A7FxOD9b01T9znM3rP5YuhQkwG5ALXI4fZ/6i3YuC4yY52/IWLwVtm0gEdhsLzraFBR0NdiMmAlHLC00PMC8ALs7PUeN6g2i3b5hgd1OFHdnbRP/9VrqM3Viluh3rZ8vPHD9rBF66tmx1JJMtAh8F2DkbuBYcRlcF/oqboe/d0NTVo43tqqqqA8wi5EIT2QmqAmeP+CYQLfiASJj+gw1gNgMkw1tfjvT6vprwOnMeZ095M3V/uvHadMhmVZuunO7vIWvH4+TGv6Ld//3pPU7NZMe7jsIG5uTnaTyBcRw4eJJEsAx0Gq1lYWDDzMwCH8bZM4PwdMzMzaa9eOe/aZW1hDtXANqfNj+MUs+sTm3n7JpM3L/OV6lqlc37/G8lYXkCHsUqAui3r7dsdW7dwzlgwxKHBF8QcOICXgecvCv/xGxiCUnXNscpKsnZFLM7NdV4NK/x//4B3KzcwnOnrIy/IM3t27uT8BMIFP9CLhAQSxjLQYbCajsF2sBegpp5GsgphJaL3xldRWlNfX0fC5Ad0GKsNaLoMDg6OjIxIrjflcF5e8Rqlon/+a0KMx/rGKiqqNzn8eUXEfLpDQR5bzXr3TsRGC2zG2n5d6DBYzYeqbLAXRj46c/PLGLNPsYF8B1lvcGCAbV0ZHty/v2TfW9hAzCk3ZQU6DEUCavSuGzc7rzEw8KiYTNQ39D95ShaWz3RPD3XpAtR61n9ls5+wk7m5OWN9vSX73kJKuRAQQGLYBzoMVhP99i44jD3XtpPl1c33799v3bihp6XJKV3btzi9z8pizwAbwYGBQkwGvORkb8/sWC9SAx2GIjGQ8Byq5FI1DbLMPqa7u0W8dtJ4zKPSzPxnTg5ZXg4zMzOvUl7u3rHdar0ZCP6BRfY8V/+toYGa7UyQIKXAPrO52zg6DFbj88gLHEbQs3NkeRXzOC5O9Y+plnnLmDlrRvSDnBgUcB6aHWp/naaIWrN1s6NczBDNF3QYisSPt+9+tfv/8RtZZhkz/f2F/+8fxf9ZU21r1+h2DPTN5Wj9Xue6XXvIFlzMjY7yDu8hCk+fPIbmCq2bPGQYXS3NJ/HxZCNZU1Ndrammyvd2CayElMLyGZvRYUibLwUFHm6uVEMcTm4HO1s4mwW1az/V5kVlROZ+XYk9VySeP3sqZHi7P2YU02dkjHlGaG9rA5/BfTHj8IED8jhSODfoMFhLR3v7tatXjh11OeC81/PYsYfR0UvOKThSVETdWRBzWjIJ0RsTS+0eTUX//NcCQ8/HPXr4gNYM4Ba89OgBW2Z+HujvDw8L09ZQB0uhra6mpa6munbtRivLlJfJ7B/GEB2G9Gj89s3OxkZPSys4KPDjx48VFRWFRYWxMY8c7ezg7ImLiSHbIX/lY27ukqPnwgbmpiasKm9zc3NDQ0OQHZid+EZWoMNgIQ319S6HDsLJ73bU5c7tqEcPH968Eblj61aoivzO+MDpR7bjYeLbN6rOZme/yMW5ufHa2sGXKR2XrzQdP9F04mTzKe9Wv7M996PnmWiyJz5/vmRKAZPxPOEZCWABU1NTn/LzM9LT3r7JFH/6TKmBDkNKgL0w1NUJvhBEm0WW0ps3mbpamtH375GtES62OW2mFX6+gqwKxY/EIEyDDoNt1NbU6Glpnvc/WwuV8V/zyefPnw/u32dvu1GQyfh1G+IPh7GyKULkGmiH6Gioc6cOQYKGn2I0D2QIOgxp0NPdDfbi8qWLtETArfz8fDAZCZjB/0pjY6OIM7CAwIuQMIRp0GGwirq6OrAXkeHhtDTCUV9/v9tRFzAZP3/8IDFcLMzMUA5jOC+PrFo1pKel0vpeCBJslpaaSsKQFYEOQxpcvhjqcugQLQXw6mVysrGB/qysB+L8MTT0taqquKiwrrZW5r2Ub0SEC7ldyqs+hRhsh4Wgw2AV+/bsDgo4T0sgNIHJ2LVje8S1ayTmrxT/rgQO4/vqq0F3btvGnTGEa8eWLSQMWRHoMCTO5OQktDbeZ2XRyj+v+gcGLM1MZXipv7SkxMPNVUVpjbryOk1VFdW1StrqalcuXezqYmZY3xVw9swZWpkXIvAiNdXVJBJhFHQY7KGxsRFK6LfGb7QEwquMjHRDXR2+l/q/HXZp2H9w+NMnsiwZerq7w65cNtTVhayivOZ3XU2NAH//piZZjh9oqKfLnTSEy0BHm4QhKwIdhsQBx2BrbU0r+YJ0IzLi4D5nKnBwZJD6RwrMzc15HnODGpr3lgTkMlj5/NnKR8URB9/T3rT9ESLYVTnqAyVfoMNgD8FBQSc8PWipg68GBgdtLCxkcql/dnb21IkTyn80V7gLKZVk9u/dI6uZDYSPMEGTlpoqCUNWBDoMiRN9794x16O0ki9I6WlpG62sIGpodEjfW9PS36Sx5xv1PpID7IWHmxvfR645Ul2rJBOTEXblCq/pESRoJHW0t5NIMVhcXBwdHfn+/Tv7HwaTGugw2MO+PbtjY2NoqUOQfLy9wwXcKJEcYC+OHDwoJKWA7XDcZCcTk2FqaEDbGSEyNtAnYciKQIchce5ERYnY4AC9eZMJqRyiyppKwWGARiclPkbT5Yuhwu0FJTAZXwoKSIy0KCstFb0fhrWFOQlbKY2NjUEB5zW5OoJt3+L0Oj1d5p1jZA46DPawdbPji+fPaalDkOB8DrkQRCKlBbSplkwpYDI2229iarJW0TnldULERgtsdvK4JwlbKcPDwzGPHm5z2my13gwS1N6dO18mJ0+xe5gsBkGHIXGePn7svHs3rdgL0tOnT7Y4OkBUUsFzsBe2Qb+uZ0iU8fFxUewFpf17+YypJ2mgWNJ2g6/AFogzEh94CA83N8gpvIYGjo++ttYqv/+CDoM97N+799HDh7TUIUjeJ70iw6U6/0hDfb2IVbi6inJO9nsSJi0qystV14rUaIE2VUV5GQlbPtPT02fPnIE34W6xgGBRS001MjxcclPKsQe2OIzR0ZG4mBgHW1s49PCTGOnpnjpxorxs5b8ue2hra4NvVFdfRyv5fHXA2flGRDhEhb28BA7D4+5R6k0kx7MnT2gFQIiYug2xLBKfP6fdyuUrOHNWfNEVcsHBfc7CnZaGqkpZaSkJWH3IncOAxnH2+ywf71NHDh50OXTQ74xPwadPipHTw65cOXp46WfTQH39/WbGRlnv3pFIqQDVqujXHffs3EHCpMhGaytIZbQ94dUGK0sSsHwmJyfhq4GFor0nR5DTfE97s2dOJQnBCodx784dqB6cHBzi4+K+fPlSVlaWnZ3t7+erpa4GDXrpV2mMc/jgAeGDYVCqqKgAL0I9b+l25wg4jOspV6l3kBy7ti/j2S2ogx89fEAipUhIkLAZxUCwY+JcYzjufkxILuAI9qGzo4PESBhoCKanpb5ISMhIT2tpbiZrZYd8OYzHcbEWZqYmBgbBF4Ki79+7f+9u4PnzBjraULW8TEoiG8ktnZ2dkCiqa2poCYRXSUmJ602M5+boMzPPj0lq+r252VnR7QUIanrpj/e/5IxiINgAyiAJWCbgG3Zs3aKuvERKgawFJoPESJ6pycnu7q72trYlB5VnENk7DGiyG+rq5ORk08oGqK293dfHB0qIvJuM/I8fdTU1Pn8uoH1BbnX39Ozbs/vkieNUyIZAC3AYLz9LPBvaWFjQznshgnRwPSyMREoRaHqe8/ODrMp79RVKKVT84lxdANMgSoMGpKa8LjgokIRJBqgMXqenb3Gwhy8LDhucN/yFbw1GULazyMqRw7h8MRS8xfPnCb19fbQiBm0YPS0taNKQTeUWV5cjp7xOcH87XoERcdxkd/f2bRLzJ3PDw8X/93vd7r1Tra1kFXMMDg5yF5klBYW3orycBEuRXzOK/fFAPm1/QLASXhLnuXcoqsJbRBxB0W5uaiJhEqOyosLL05M7ecKJkZaaKoXuZTJ2GA/u3wd7UVxcTCsb3PL38wOTIWSMfbkAyrm+tpYgkwG574Dz3u1bnKi5N4fHf4K9AFU0S/w+0QYrS85pt6TgHI24Lu1+6RzKy8qOHXWBfQBXoammCk0lOKS3b90U89wICQoC60D7poIEiUNyHeChwB86sB++He1DKcFHgwGVfs84CnlxGFcvXzI1NCgvL6cVMY6gDILJkPcR+sEWmxkbnTt7lvbtOAJ7sXvHjr27dk5MTJCYP+m6cbPwb38v+vd/wGqQVczR29vLfdIuKTjbS4qLSLB0aW1pOXPam/ITkEyof+AvrBTzqiFkctrXFCTVdWv9/XxJmASYmpqCmgW+FLe9oARH3kBHZ8XXaURElg4DTn1o2b99+5ZWNni1Z+cOqEhImNwCJgOapHA+lZaWcr5ae0fHvbt3bSwtOPYCqGqtoBzGzzE+I/4yy/69e2hnnhBBCXwcF0ciZcRAf39Zaenngk/VX78y4sGXNQIPHAEJ3dUWbi8owauyMhly4TC+NTSADxNiLyjl5eVBzpX34V872tvBZLgddSko+Eu7pX9gAJqnWxwd+NqL2e/fS1XUwGG0njtPVjEKfCL3GbukoOaDX40EywJon8BZfTMyEgT/iN+UFf2aKCUwN3OSuZYA9gLOAeH3f3U01CVqMmTpMBKePbXfuIG7bAhSSspLaJdI6GeQJhXl5d5eXnBK2dpY79q+DbKAlrqak4P9y6Qk2vjcQ6NDFS3SuHgIyUhDRdSenpAOBgcGSKSisKzbxuARE188J5GMEh4WJkpfEDAZMbLoCiMXDiPwnL/XcU9a9uArTpdquaa7u+v82bMaqirbnJyCgwLDrlw5f87fyny9kb4efDteewE0urqBvSj+XWlaYr0fRJyqkJKxvp6sLstJiJLiYuHtBF59//6dBDMKuE9RUgqYDMndIpClw4BW+4PoaFrJ5yuqR3Tuhw8kUs6BSvp9VtbL5OTMjIzqrzJ+BnJmZkZXS5N2zvEV2AvPY24kTIHQFPlRGhA4jJSXySSSOaanp7VFm+8RZGJgIP0OGex3GGNjY/BT5ufn0bIHX4GxhmqYtwukPDI8PBwfGxMUcP6Mt3fIhSD4aoJGiht6+xbsBagvVoJXIt9kZopYxcLvFX1P0SaULi4qBM9H+6bCNdDfT4KZo62tTcRLKfBj3b0dRcKYRpYOw8RAPycnh1byBWn/3j0JT2UzcLWCUVJcdO3qFX8/X0hJULx7e3sf3L8vSr8kaOtXf1XAuZ43LqcnipryuqLCQhLJHOBaRDc60C75kJ1NIqUF+x1GQ309+D9a3hCkzq4u+DqKd0FOCFMdHaWqv+6P1DhtZfap3e7uLjgh09NSc7Lfd3Z0gG8z1tcTpXqDtMN36le5pqlpGdNBg+BASWLs4OCgQNGvzhrq6kjIbcvSYejraIvY4AAdOXggPi6WRCIrIjkx0cbSAk47TgEA9wr/ux45cvrkSeEmA6LyPuaSN1Isnj15Inqbw9zEWBJjKhx3P0b7IOG6EBBAIqUF+x1GRXmZkZ4uLW8IEWR2aOeR4FVAi68f2IsSpXVMPUICBaHg06fDB/bDkQRvp/3Ho0/wv/PuXU8fP9bRUBduMiDhFH75Qt5LsRBxkEAQHCI3FxcSxii6miJdmaYEv0VxEfMNJ0CWDsPGwjw19RWt2AvSFgeH1JQUEoksn+BAgUNKwFkOL5068WswXd77dtC2BimqvQAmJychM9K+NV+BIYPUScIYZe+uXbTPEq7j7u4kUlqw32H86uapokzLG4LU0dkJX2dwUHqTC8qcxbm5Vr+zTE2mCvbC1+c05A2+NgLWH3B21tHU4Hu7BF6FFoui2gsgKfGFiI0W1bVKkhjHb2FhgfZBwgXuMPP1axLMKLJ0GCEXgjyOudFKPl+Vl5fDGdnfL999v2WIEHvBEWyQkZ5+7eoVaHxwVoIZT3j2lG+XMUXi+bNnUNQ535qv4Pg42G4EO0JiGAUagrSPE64z3tIbqIeC/Q5janJSV1PjfVYWLXvwVeKLF+Ymxgo/oqKEIPZCaGcL8BZHDx++feumoa4OlB0tdTWoxmAl/L16+XJ3dxd5L0Vkenp6vbGRKCll947tJIZRlusw4NfJzMggwYwiS4fR2tICvqGhoYFW+Hl1/py/+DPQyAuf6wtSC1NaehkbxhHaCnCcaacUX1F34+bn538MDfV0d3Oenl0NPE8QZjIgF2xxdJDcSBjBQYFL5iOOYGdu3bhBIqUF+x0GcCk01M3FhZY9+GrX9m3378r9uFuyIj42VpSHFGCbqJs3IaUUFRa+Tk9PT0v9lJ9Pe2hOUenr67MwMxWSeMFs7dy2VXKNN9F7joNgZ758/kwiGUWWDgM4fPDA0SOH+/r7aeWfW7m5uVpqqmyYo6Szs/Njbm7m69d5H3Mld0HlTOwpfW/NwKf+ZFlsjh4+JPyGKEdwnr17+4aErT7gu1ubr4ekwN1RS1NVBWr0c35+Er2QU1dXJ3rvMGWlNV1d0m4CyoXDaP9jDqD8/HxaDqEpMzMTflOw0SRMdiwuLoKPhz2Ro6daoH1sZmTIfUIKkZ6WphQGjmQng4OD0CyBE5LWeIBzDxKyq8sRiZqt82fPqorc01NyP5OMHcbQ0NCmjRuOuR4VZDLAXuhoqD+Ol+UoT5AFwFK4HDoIJ8pGays4aWwsLeD/4+7ukugd43RxEziMJ7krnyaUm97eXtGrLtCOrVtI5GqltKTE74zPnp07tzltPnLw4LMnT8bGxshrkgTOK9pvwVeQmw4fOEBipIhcOAwg+v49XS1N2iBU3Hr37h20WBJfvCABMqK5qSk4MFCba3YMONmgAcPgjZufH3IlMegF7CTUkZzdFi5otLxOTyeRq5Lqr1/PnPamfmgovPra2pdDQ6XQxbixUdRHWuA3igyX1NgwMnYYAGUyQDGPHnV2dXESQWFRIfww8OVlay/m5+dPnjhuoKtzKTS0traWs3uVlZWB58/ramoEBwWRTZlgfGqcGs2z6Bsz3uVDdraI3RgpQQueRCLSpfDLF1pbh68guctkDBV5cRhA9L27YDLu3I6CPM4psKDGpqaI8OsytxfQWDx14gQUNGii5ORkNzc3t7W3l5eXX7540UhPF9owjEzDNFLwuei3f5fr6E02t5BVDBEUcJ77hFxSq+cGt3DAO0JjlSxIhf1794jiBaGSldwledk7DGBiYgJainY21pAXtm523L1zh621NRwa39OnZTsAA2UvoMw3fPvGnao4qqyqWm9sxKDJ4IwXPjTKzCXctNRUraVmEaRJji7YKhg52e+FmwwoFJIYjUMU5MhhAJmvX0MmATMB5ffK5UtQeXseOwZHb8+undAEJxvJArAX7q6uDna2dfV1tGQC6unt9ffzMzM2EtNkjH+tLl6jVPi3v1cYGM0wPebHcp+s3rdnN4lEpMvY2JiTvb1wkwH2oqJcgj0QWOEwOMBXzUhPe5mc/D4riw13SaNu3txgZSnIXlCqqqoyNTJMeMbMaGDJBS/AXmwMtCDLYgNHclkOA2o4EonIgg85OYa6OlDsab8LpAk4zST0zLooyJfDoKisqAi7fNnP5/RZ3zPh18IkPcmTKJzx9gZ70dzcTEsj3DrvfxZMxor7WU+1tZWqqoO9KNPQmpLArNTeXie4z8wldeSgDG7qIRRgMrY4OvA1GZqqKmDBJWovAHY5DFYBrQ0jfb201FRa+edVbGyMrbU1I1fAriSFgsPwvMfY4NytLS3L6odhZ2NNIhEZMTc39/ZN5s5tW7X/mLddR0Pdefeu3A8fZPtopTw6DLbR2dEBDr66upqWQHjl5OAQ++gRCVsOi/Pz1Xb21MhaE3V1ZC2jRIZfF/HZNBB836AAiUyxhogIVEz5Hz8ecN4LPwf8cOA2lNf8bm5q8jg+Tgo9zNBhCCQ9LdVqvVn/wACt8POqq7tbX1vrS0EBiRQDl1sHwGHcSLtOlpkA6iruMi9EGqoqSYnM36KGqlEBZq1b5aDDEJ+rly8dPXKYlj346snjxzYW5ivwlN1RUWAvQD9zcsgqpmltbRXx2TQQOIzamhoSiciUwcHButraqsrKtrY2qfUIQYchEG8vr8uXLtJKviD5+py+FBJCIsXgWsqVfeG73pUz+cho1rt3wm/FcaSlpjrF3IhSfX19EdevGerqUG+urqLsecxNEgPYIVIAHYb4GBvov87IoKUOvuru6dHV0lzuFeyJb9+Kfvs32Itm79NklWTYu2snVaiX1Gb7TSSGCaampl4mJR05dHCLo8P2LU4nj3sWfPok5e6TyLJAhyGQwwcPRN+/Tyv5ghR25Yr0h1kUkfn5efguvLf2aYLWxquUlyRGPKDtdd7/rIrSGtqHwhr4lE0bN/RKbOZoREKgwxATKIZwAKuqqmipQ5Ac7Gyz3r4lwaKxMD3dHhxSrm8wL+Gr39BOEOVGCRT2/Lw8EiMec3Nz4deuaaurcU8QqPzHdAfmpibpqalkO4RloMMQiKvLkbt3btOKvSBdDA3x9/MlkexjdnYWTIaQYfggF0DjgGwtHmAvvL28hH+WmbERmgz5Ah2GmEAdCQewuqaGljoECVr/bzIzSfBymJfKGP8fc3OFmwwo5kwNRA2HzsPNTV1ZSEpZ+zhOloMaIIJAhyGQoIDzvj6nacVekI4ePnQrMpJESoDJycmkxBc7tm4xNzE2NTK0t914+9bNgf5+8rIIgMm4evmSlpoqrREAacLG0iLr3TuyndgEnju35IjCv0yGkaF0RrJaGS3NzXACbLC0NDHQtzZf7+XpwYZRZWUIOgzx0dHQ+PDhAy118NXA4KCxgb6k5wYrKiy8Hnb1nJ/fhYCA6Hv3erq7yQuikfcxV09Li/fiqIaqio6G+srsES+Li4tgL0S4BLv2yWNmRimUECMjI0/i40MuBPn7+oZdubJK7u+gwxBIRXm5lrpaW3s7rfDzqra2FqpMRsbJ4QXOwhsR4WALuJ0BCIqcitIaz2Nuy6qnpyYnE188P3Lw4BZHx13bt50+dbKkuIi8xgT9/X0iProCaUi2Y6kJAnyb865d8C24m2h/3N9Za7fBpq62lmy3ykCHIT5+Pqd9vL1p2YOvMjLSDXV1ZmZmSCSjQEqBqo6aNYNTWiGfKCutObjPeVlOGtotr9PTnRzsIQHCm8BfaPy8Snk5PT1NthAbqIlFuSMDgs2gFidhbKK3t/es7xnYPU4Op5p28BMwNcwBa0GHIYytjo63o6JohZ9XocHBLocOkhhGWVhY8PfzFdJPE17abL+JPeUqMjx8ydYGRxamJmxz8WAvoNhT6ZKvwHSuzr7x6DDEp6a6GuqYltZWWgLh1aED+yOuM/lAGYf5+fnTJ72EFFKo+XKy35Otl4OEnheDQ0HbQ0GCYxvz6CEJYw093d2mhgaCUoq6srJiP82LDkMYH3Jy4KzNfP2aVv659TwhAbaR0CX0c2f9lqywwWRA21pCzZ3lYqSvR9s9IYJcVlFeTiJZwPj4uHB7QQlMRmtrK4lZNaDDYIS9u3a6uhwRPtdjXGwspBRo+JIY5oAWy3H3Y0umFCgCTN3jEJPenh7l5QznY2ZsRCLZAfyIQuwFJUjgFwICSIDCgQ5jCV6lvITSnpyURMsCoP6BgdiYR/AqI+MQd33v3H55s1/c6eHxn9Sazo4OEZ87h5TB1GMg4rC4uCj6g/IgqKpZNZXrk/h4jb/eiuIryBdnz5whMasGdBiM8GNoyN52o5uLS29fHy2fUIqNjdFSU13WVNpzP392RUT+yHq/sNS9iez3WUIuiHJLW12NDdOsfykogARL2zfhYtXQO2dOey/ZYgGprFWqqa4mMYoFOoylSXv1Sk9L09bGOvr+/fLy8rr6upLSkqhbN63Wm0GTnalpDnK/5uh7axr56MzNk2lBQi4EqYmWDkCwe1SUDKGexxNdkMXYM+8i2CMLM1PaHgqS2rq1w8PDJHJ1gA6DKcBkONjZWpqZ3rp5k3PHpK+/H5oxe3bugAp1WfYCGKusokbZmlmq6/eOLVu4T2Mhgt1IfJ5AwmRHTvZ78Fu0fROuFY+2zjg/f/6EREHbPb4CF+Jz6iQJUyzQYYjE5OQklLfN9psoQwp/d2zdmpaayuC9iYdZ98Fh7ArbSpb/+99lmXfYpfp6iQwSvCyWtc8aqiqf8vNJpKyp/lolSmuD0q/8++I5iVwdoMNgkNnZ2Yz0NMghUAOZmxhbma8Ht21mbHTndtT379/JRiLzIzv7l8P4f/9YFDoGqOjTeVOysWBsdqQVU1RYKHq/LkrQziHBsiY+NkaUa6KUIPmMjY6SSAUCHcaykdDFw7PxPuAwzj0mg2qAp6GdgsIFTj//40cqVoZ4eXqKnsWgnoavSSJlzbJaS1Ax3L51i0SuDtBhSILW1laoRAs+far++nXFcxoP5+VR1zAWhDZ4st69W9YkiFCQSaTs+Pnzp+i+H2S/cQOJZAEB5/xpuydEcLS/NTSQSAUCHQZb2HHVCRxGzPsH1OLExATtFBQuqB1lOy01RWVFhYgZQU15XcgFxma9F59lTUILDiPq5g0SuTpAh8FaxmtqyF2Svj6yih/paanay3EYoBWbHgY5fdJLxJSioarChu5oHGDPaXsoROrK66oqK0mkAoEOgxXMzM0YntYGh5FX/T+XIOI9PEpQCKu/VpFImbJ5k50oGQG2aWtrIzEsoLysTPReL5qqKqutrkWHwVpmensphzEudLAWaIEs6yYm1HkkUqbUVFeL6DB0NDUYHIdDfC6GBNP2UJjW/K6QT6ihw2AFjd3fwF6Aur53klUi90OmtN7EmCVjS/T19ZmbGAvfc3iVVU+RAPPz80Z6urT9FCQVpTWDAwMkcnWADoO1LMzMUA5D+HyqIyMjojdalNf87nrkCImUNRHXry35CAx8NTbcJubmQ06OiE/ugIwN9NnTg4RB0GGwgrdlmWAvzPwMuOdr/tbQIGKfBmiaPIln0Yi5lMng22BSU17HQntBce/OHVG6lcGP4ul+jMSsGtBhsJnK9RbgMNpDQsmyAAL8/UU0GVBIS0tKSBgLEGIyqPEx8z7K/h4xDUjmpoYGtL3lK0g7D+7fI2GKBToMVjA4Mviu/E1SAf3xBNcjh0UZHsfE0GBCKtMdiQ7sT8LTJ1brzaDwa6mrgeCLgOe4GBzczqabI9z8/PFDX1trySE94ButwmE90WGwmbYLwVVWNr0xsWRZAK0tLcIvLlKCbRzt7EgMa8hIT7PfuAH2jfMVIKUoK61xOXSQtT0Y4mJilpykCQTm6edPMgySgoEOg9XMzMwcPrBfiMlQ/eNpN0kM/8cU1V+r3mdlZb5+/bngE3ueHBFEc1OTnlCTAfaCPU/YShN0GGxG9Duk2e+zhJsMOMOtzdcva1ZFaQJm4kZEeIC/f3BQ4P27d5c7W5uUWVhYOHPaW3grEewFs5NDsQp0GGxnbnbW59QpFaU1tNMU0oTKWqWtmx1XW4cASQMmw9LMDIo9zWfA8dfX1lqd9gJAh6Ew5OflgY3gW+3BSltrqx9DQ2TT1YTwZ31XDJgM39Onf6VrnlvecLQV214A6DDkg97e3hsREaZGhpAa4EzV09L0O+OzOqfgkgLQIvxc8Onwgf3a6mrgMzRUVcDJZWZksGpAYimDDkOR+P79+707d4wN9H+d3irKkFXgx929Y/u7t29W20k+XlPTHhxSpqXTdZPPCDctPr4/3r4jC2JQUV523N0dUjfl7eAfIz1d+AkU3syhw0AQZGnQYSge0LxuamosLyur/lrVJ3QsDcVjcX6+/8nTSjNz6jEcEJgJ8tqfLC4sFP/f7/BSjdPWyaZmslYMfv74UVNdXVZa2tjYqJBPjvCCDgNBkKVBh4EoDD8/5lWZW3K8RbmeQcflqxPfvpGX/2R+fLx263Zqm6J//+d7Whp5AREZyTqMgf7+W5GRG62sdDU1dDTUbSzMw6+FsbxvjvQZnhju/dE7/+eEZ9Lkx9DQvTu3Lc1MNVVVNFSUjQ30LwYHd7S3k5cR5E9Y4jCmp6dfpbz0PX3azcXlhIdH2JUrrS0t5DXkj2b3SJEs7+vPzc6+Tk8/fdLr4P59rkcOXwwJaeSpuWXL0Nu3HG/R6Oo2WlwsvJ/sj3dZZVra1PadV8NYMuyQvCAphwHn2fmzfmrr1u7bszvxxYuCgoLPnwtevkw+fPCA6lolby8vNkwNzBIS8p7oe2tuu+xIlqXCwsLChcAA+C3AWEDNwZHaHz0cXQ4dHBsbI5siCAscBniLiGvXDHS0bSwsLoWG3rkdFRkR4e7mCknm0P59FeXlZLtVzHRnV7m+AVSE47LooTU3N3frxg19bS3ulEL1cNy+xamosJBsJ2sW5+YqTc2q7exHS8vIqqWYHRyscdhMmYymE15oMkRHIg4D7IW7q6u97caysjJqbmJuVVVVbXPafMDZWXImA85mL08Px012G62sdmzdGnXz5gpmLJQaEanXwGEcu+NCliUP2IuTJ47TvAW31JXXOdnbo8lAOMjWYYC9gMaJo53t64yMgcFB7nzS0NBwMSRYS0218MsXsrUE6Ghvv3v7dsiFIND9u3e6u7vIC2wCar4KAyOoBVt8zpBV0gLsxbGjLkJSCliND9nZZGtZM9PXv1yXsDAz03Ti5C+T8Y/fxhRxAhEJIRGHAbU72Ivm5mbuRMCttvZ2cLXQ8iABzFFWWgrGQldT40JgYHJSUnp6enxc3N5dO6HKPO9/llWj1nPwjfUGhxGcEECWJY/fGR8huYASZTJWSXckZElk6DB+jQpz8MAWR4fWtjZaJuEoNjYGTIYkGspfq6rcjrpABQn7EHj+fMC5cwf374NFT/djdULnAZEJPXfuQi1Y/H+/z0lxKnDIEsLtBSVWmYwVAKak9az/j/dy/BWkD/MOo7amBk61b43faCmApra2Nj0tLWabHRXlZdrqauHXrnV0dtI+rrCo0MnB3vXI4Vn2PYt1IGIPOIz7b++QZQkDrTEhI0pxC37HrHcMPKmFKAAydBjJiYnw6ULsBaWoWzc3WFkyewUbEpSmqkpQwPnqmhruz6qqqvL384NsU14m6pV26TA79KPot3+DyVhyfE8GyXr7Fhok3KlDkPR1tKX/NKw0zRZCg3mHcdb3zOlTJ7lLoyBdCAz0cHMlYWLTUF+vo6F+985t2qdw1NLa6mBne9zdnQSwhg2BFuAwUgtTyLKEuRgcLPokoju3bSVhyOpGhg4D2gb37t6lFWdedff0GOjqMDgkGrgH8BCxsTG0D+Lo3p07kHNYMqcxh6bjJ8BhVJqZk2XJs2PrFu6kIUTQaMl8/ZqESYX24JBKUzPh89ojkoNhhzE1OQnnUEFBAa0o8hW0A1SU1gwxNOSIp/sxMDe0j6CpsbERUkZFOYuaHTOz02AvQEUNEryLzAFaeFpqqrRiL0TKa37v7PzfdK/IqkVWDqOyogLKbHtHB60s81VocLCbCzP9maCk2FiY34iMoH0ETVcvX7a33Uhi2MFoadmv7gJ/+/tYpTSsT3NTE6Rx7qQhXNucNpNIydP74CF1KMBnkFWIdGHYYXR3d8E51NffTyuHggS5o66ujgSLwUB/v+papYqKCtr78wpcyBlvbxLGAjoHOyiH0dbfSlZJktHREe7SvqQ0VVVYPqgt1ARjY2MzkhnxF+EgK4fxIiHBec9uWikWpLdv3liYmpBI8Sj49MlAV6e7p4f2ETSB/9bRUC8rLSVh7KDcwFBq1Wrm69da6mrcSUO4IKWQSAkz0dBA3TBqOHBwcU5SYwEwe1dO8WDYYbS2tqqsVaIVQiEy1NVlZFq86Ht3nXeLlIYKiwrVldex5ymJ0qYSymFMTktjctTv37/TCrxwaampQqolwWxibm4u+33W4QP7Oe0nI329sCtXOjs6yBYIo8jKYcQ8fOh65DCtFAtSfn6evrYWiRQPT/djwReCaO/PV/5+fqdPepEwdtBx+Wq1veNAUhJZliTJiYnLvSxKIiXJwszMV+sNYC/KtHXnRkbI2qVoqK8PDgzcu2vnFgcH5927LoWENDU1ktd4mBsdpY7z4sICWYXwwLDDGBoagnOopaWFVgj5qqe3F+wIIwPmnD979mJIMO39BUlt3dqWZgaGgGWK0cmR5t4msiBhZmdnuUv7koJj9bWKXbeZgbraWnMTE1NDA7AUJSUljU1NtXV1GRnpRw8fUl2rdNb3DPgPsinCELJyGInPEyDX04qwIP26hmFmSiLFY72xcVbWO9r781V6evoGK0sSxg6k2bB+k5mpvZxrGGBHSKQk6bx2nbo/MvypgKwSSvXXqp3btqqrKHsd94yPi0tKTIyNjQGXCQlw765d4DzIdlyMFpdQHzGY/JKsQnhgvqfnZvtN0ffv0QohXyU8e2Zlvn6BCQMIbYhrYVdp7y9Iulqaq3nOMNG7ZYHgWLHtBkRVZaWelmbY1St8b8ZVV1fb2lhDamDhQ0NyjawcBqR+qJPalnqQhFJwEGOdx3U01D9/Fqk/2YcPH4wN9EnY6uPXdevl9MOQQufx2R8/iv+zBur+tvMiDQFQVlqqraF+KTSUd4SFxsbGoMAAfW0tvv15G/YfhE+psrQiywgPzDuMpMQXttbWtN+Jr7Y5bX704AEJE48LgQGB58/T3p+voFpSXvO7hAbG7uzoeJ+VlZqSkvXuXXtbG1nLMt69fbPkk+uUYLOI69dIGDvo7u6CXBB16ybtZ+VWY1OTg52t3xkfEoMwgawcBgCJ4s7tKNqvzKuu7m6oCb58/kzCxMPU0CAnJ5v2EXyVmZlpbb6ehDHN3Nzc0NBQb2/vxIQ07qKujD07d3DnDSH69QD827ckTGJ0R0VBxQ8mA6wGWSWYivJysLDR9+/TflZu3bpxAxwn7/AnI18KqcsY49XVZBXyV5h3GFOTkwY6OsLrAFBcbCz8rsPDwyRMPNLTUq3M19MG++OrlJSXpkaGzF5FX1xczPuY63LooOpaJfuNG8Gk29tuhP8P7d/3ITubkYs0DALf3Vhfj1by+QqaJmybcfHyxdCjItyVr6yshOPPzoEX5RQZOoxXKS/NTU2EjOBHKSL8up2NNYkRm/3OeyMjlniQhNKVy5eOHj5EwpijuakpODCQ+wbEru3bXqenS388iSXJyX4v4ngYRnq6UriDuTA9PZjyqi9WpBFB9u/dI8od9vPn/F1djpCYP4HMT3WqbT0nvfES5QvmHQZQWVEBjk9IsyM+Lk5TVeVLgUh3yERhZmbGQEc78/Vr2gfxChIHGFISxgRgIPx8Thvo6lwKDa2treV8UH19/dXLl6FEnTxxnG0jY1Jjh9AKP01QQ7/PyiIB7GB8fBx2+/3795yDLEQuhw5dvXyJRCJiI0OHAXXqsaNHHe3shJgMaINCZczgMx2ZGRmWZqb9AwO0D6Kpr7/fxMAAqlgSxgRQB5/x9lZbt/a4u3tOTk57R0dPby/klps3Ii3MTM2MDNk2lijkQK/jnkteGYVvxOBoJYwAmRC80ZLjQ4Lg+ENKbOO5Mt0VHgEOo1RVnSwjf0UiDgP4w2RoHDl08M2bTO7fKTs7293NlVl7QRF25co2p81d3d3cH0cT7AycJQy2yyl7YW1hXltXR/ssSg3fvm20tmKtyeCbFFTXrWWhvQBgl+BQ046wIKWlpjL14CICyNBhALN/mAy7DTYpKS9p/W+qa2oCz59n1l4A8InQPHjx/Dn3Z/HqcXzcehNjBks32IuTxz1tbay5myscgeO5FnZVX1tLRJOxuLAwx9B1YuHAEQCToS7YZIC9yM/LI1uzhkshIV6enrSDLEhuR13Cr4WRyD8Z/1pN3SiZapXGcANyh6QcBtDd3QW1vr6OtqWZmfPuXfv27IbqAWq1iyHBkuijMDExsXPbtv179wp6hB3MjZaa6ktGn+CKefQQGjqC7AUlMMgbrCzv3I4iMX+lqrUivzZPOoNh0Bjo74+4fl1XUxN8BhwZkKaaKjj682fPCnlGS4Ys67GCwsJCqT15vxqQrcMAoMq/E3XL1NDA3NQkKDDgRmQEpBdow4AbhtRfI4Eb4empqVAowNfSTi2OMl+/hnPs3ds3JIAJggICwF4Ib1WHX78GJmPJllJ31O3i35Xqdu0hyxIGTMaD6PtGenrc7RYwFsprfodiy7bh1Sk8jrndunGDdngFKezqldOnTpLIP1mcmyv693/AYQy+SiWrEC4k6DAopqamcrLfQ93wIiEh6907ifZXGhsd3erouHWz48uXydyXNxsaGi5fuqilrpbAaH5cWFiwMDN9npDA+SBB+tX5w9CA7w1U74cn9L01ryZfJMtSZ2ZmJvfDh0RorD17BrlydFTUZ8elD7jD3Tu2046tIH369AnsLIlExEbmDoMCChHU6wH+/ic83H28T92MjJDomLNQKLTV1eLj4np6e7nPLmjGPHr4EPxH6ismB/sfHByEKrmkpIT7s/jqgLNzZHg4CRNAz737UPNVmkmqFypfwGeAJzvn5+fm4uLl6QGN/lYWN+6PHXW5HbV0J2JKEeHXT544TiK5GM7Lm+7AgY/5I3GHIWWggrxzO8rEwMDSzBT8qfdJr4P790GhPbR/H+MjR33IzjY20O/t66OdiLzq6++HhldmRgaJ5OLkA09wGGEvL5NlRDBfCgqM9HRFOeAgqBUcbG1JJCI2LHEY0ift1Strc3Njfb2LoSFwUsXFxoYGBxvq6m60tnrD9BQbt2/d3CvaVbr0tDQoC8KfJO+LiweHUa5nQJYRHvz9fEV8CBHk63M65EIQiUREQ9EcBsXs7OybzMx7d27fiIiIffSosVEi1/yDAs6f9z9LOwsFCbKS7+nTJJKLE9Hu4DCup1wly4hgFhYWrMzXP336hHZs+cpxk93jOOlNL6nwrFqHAcCJB80JaJHv3bUTBA3ZvI+5khjVClzLkj0/KPUPDKw3Nv6Ym0si+dH/5Okvh6GjR5ZXB/OTk5NNzeOi9VPJ//hRlLHhQe0dHToa6iyfQoGFKKbDkA6QaCLDw2knoiDduR3F+7ATcPz+MXAY4a/oHYgQvsTFxDja2S15GSPz9WtIB+Pj4yQMEZvV7DCkhrbIw3yBdu/ckZyYSCL5IZO7JDKn585d0b812ERba+uYR49ox5ZXd+/cdnKwJ2GIyKDDWDk+3qfCrlyhnYiCdPNGpOexYySSC497ruAwbqRdJ8uIUMA0gMNwOXRISLPjw4cPYC8ex8eRGIQJ0GFIAS011cLCQtr5LEjOe3YnvnhOIvnR+ceDlNW2dmR5dTCYnAzfukRpHVleirRXr3Q1NXJzc2mHl1tZWe+01NWy3r0jMYjIoMNYORHXrrkddaGdi4J08rjnpRA+Ux0eu+MCDuNmegRZRpbi58+fWx0dnXfvKioqoh1ksB3xcXGQC9BeMA46DClgYWaamvqKdlYLko2FhfBHylt8fKGurd/rTJZXB8P5+fCtQQvT02TVUsTHxmqrq2VkpNOOMKWXL5PB+SU+F2bmEEGgw1g5ra2tauvW1tfX085IXjU1N6urKNfX85mn3u3OEXAYURmRZHmZ9PX1ffn8OSf7fVFh4Y+hIbJW0RkdHfE9fVpded2OrVvu3rn9PCEBLEVQwHkDHW0bS4v0tDSyHcIc6DCkwJVLF11djtCyB1/l5ORoL3UfsMZxM1S0bReCybLITE9Py+8dxon6BsphTHcuY0jfFwkJYCMcN9k9eviwoqKi4du38vLy6Pv37TdugOOc9uoV2Y4fQ68zRz5/mRsdJcsIF+gwxOLQ/n2XQkNphZ9X4dev7dm5g8T8Fe+HJyzPmd5/e4csi0zhly8uhw6pKK3RVFWBsgF/VdcqnTpxnIVToUqInz9+PHrw4PDBA9u3OO3dtfP0qZNfCgqkOavkqgIdhhTo7OiAUsx3rC2aPNzclnyuYaav7+fHvImGBrK8FO1tbaHBFzjjlENuOXr4EDRdyMtywuzQEOUwxioqyCrRGBsbe/bkiYOdrfKa36mvv8XRIfF5wpIjLJQorYOPG3j+giwjXKDDEIuy0lKo2hOePaOVf269TE7WUFFmcAzThYUFcBLUUDZULuBIZa0SZKgrl3C0bIRh0GFIB0/3Y9Aa6ezqoqURbj17+lRDRaW1pYXEiM3c7OypEycgn6j9dXqRX2vWrd1obdXT3U02ZT2Lc3OUw/iZ+5GsWibQSpmcnBSxrQI2bmWGZpWADkNc8j7mgsl49PAB72Ti/QMDj+PjwF4w2EUI7MXJE8f5jvbNkbryOjQZCLOgw5AO4+Pju3ds/2UyOjtp+YQSZS8g7ZAAsflj2hcXISkFGi2mRoZyZDKK1yhBlS+dQTZ/ZL2nHMb82BhZhXCBDoMB8j9+pAYzjoyI+Pz5c1VV1ZcvX27dvGlpZmqkr8fsBB+3b90SMvg/R2AyUl4mkxjJMzY6OtDfz+YJphExQYchNcBkgMMw0tO9fPFiXT2ZkQAaMElJibt37AArwKC9ALy9vIS3WEBgMsyMDOWlc0a5rj5U+X1x8WRZkrSeOw+f9XXj6npgR3TQYTADNZjxnl07qbuYWmqqu7ZvS09LnWV0qmV4Nz0tTe6SL0QbLC1ImMT4+fPng/v3TQwMuD808XnC5OQk2QJRFNBhSJOFhYXcDx8OHzygorTGUFd3vbExmAAzY6M7t6O+f/9ONmKCzo4O3putfKWpqvLsicR//bm5uXdv3+zfu9fafL2FqclWR8e4mBhowJCXRWN26IfoD5KISYWRCTiMjis4ZCJ/0GEwj+Q6G4JlWbK1wdGvCQ6Ki0mkBEhPTYWWDe/+aKiqgM3Cwe8UDHQYMqGvr6+yogJKU0N9PdS+ZC1zXA4NVf9r3wshslpvRsIkQ+qrFENdHVpKAWcDe3gpNARcF9mONUx1dFC3SEaKMN3xBx2GPOHh5sZd9oQLmiaS641B2QvaJ3ILkoJE/Q0iZdBhyBc/st6PVVbNL3XjUldTg7vYCpfq2rXVXyX1qNqL5wlCUgrYjpMnjrPNZPTFPwZ7UbxGaYHRa9WKBDoMGTMw3P8kN/7em9vzC/NklWB2bd9GK3jC5cMz1zAjlJaUCLcXlMBkdHcv45F0hM2gw5Ajfj1P8Y/foPL78U5YJ7C52VnuAruktNRUc7Lfk2BGyUhPWzKlgMnw9eEztZMMGXr7tnbb9kZXN7KM8IAOQ8ZUt3/V99YEDU8Mk1WC2btrF63UCdfZM2dIJKMcOXhQlHu34DCu4iMtigI6DDliqr2dunovfDCMqakp7gK7pLTV1bLeviXBzLGwsGBq+L++XEIEaae9rY2EIfIAOgwZ09bfSjmM7qGlm/tnTnuL2C0LBBX8rcgVDhUqhJ7ubmWlNbTPEiRo9EAWI5GIPIMOQ44YLSmlHMbcyAhZJQC1dWu5C6xwQUopLmJ+AK7cDx/gnWmfxVdqyutCgy+QMEQeQIchYwZHBimH0dBVT1YJBoq36BlBQn4f6hhNNVXaZwmShory54JPJBKRZ9BhyBEjBZ8ph7GwlL/3POamInKDQUdDfVoCz2gcO+pC+yAh0lJXI2EC6Lp5q+d+9ExvL1lGZAo6DBkzNTNFOYyyplKySig2Fha0IidI+/bsJjGMcvf2bREbHCBIB5kZGSQSkWfQYcgRPz/mUQ5jcX6J3l1lpaWqa0VqtKirKF8Pk8gzmY6b7GifJVxCLosuzMwUr1kLX7zn3n2yCpEp6DBkj5GPDjiMj9UfyLJQ3mRmitLLErYpLSkhMYzy4P593idUBenXjVs5n/K4uKgwODDQ45ibp/uxi8HBq2faFxroMOQIzjWMeRHGyHKwsxUlpaitW9vb00NiGMXBdiPts4RLyHA7P95lUV98msUjkHZ1daWnpUJRSnmZXF5WRtYqKOgwZI9NgDk4jIwSUWcETXj6VHhGgFcl1OUbyMzI0FRVoX2iIMGe1FRXk0h5IzUlxcbSAhIrp+8LNe0LZGRmx2mVC9BhyBHTXV1URTsuQukb6O+3MDURnlKgFHzKzycBTHNo3z7axwkR7AkJ40eThyd862p7R7IsGRZmZr6npsJfsiwyBZ8+Hdq/D/KJlpoqCBpg8HWszdcnPH2iqKMUrgqHMTc3J4nbh0yx7bIjOIyEvKdkWQRePE9QV17HW9PDGjhrJWcvgKmpKfgI2ucK0kYrSxImb9yKjKTNAsUtyAtPHz8mm64O0GHIEYuLi0X//k+pqvqwaLaAMhka/FoOGirKYD4kZy8AsPIiXhaFPTl90ouE8bAwPU3NSNIb/YCskgyDr1LhU0rVNefHljGM+pVLl7ibK9yCr+9gu3FkqW658ogiO4zenp7wa2GcMa2h/Jzw8GDhWJOBT/097h7Nrlxes3hsdDQ+Ntbc1IRzmtrZ2LxMSpLCsxtXL10SpSsGFJukRLmc0Tjq5g3IBbSvQ5PqKjMZ6DCAqcnJxOcJri5Hdm7bumfnzlMnTuR9zGXhWJOAKPdHuIG8kZyYuNHaSkVpjeYfLWwo43pampHh4X19fWQjyTAzMyPiwF/Cr4n2P3lKXbmRdDfPGofN8Cn1zvvIsgiAvRCeM+FVhTQZiukwoMwHnT8Pp+Oh/fvTUlOrq6vr6us+ffoUcO4cnMqb7Tcp0khQc7Ozo6Mj80t16WKQwYEBEwN9FaGXVX8VGDtbeXxUtbW1VcRHgiEXMztJBJtZ5Q5jbm4OmitQ49raWN+IjHj69Mnj+LiQCxcMdXVtLMxTXr4k28k/DfX1+R8/fsjJKS8rY3ZaJSFE370rvAIGwQbOu3eRAB7mx8ZL1TWh4m90O0ZWSYYf77MpH/Pzg6jzz2W9fbvktwNBq8bzmGR3XvoooMMAe+HjfQqKfXl5OWfKY446OztPnzppaWaKw02KAxy9XyZDwHNuUFTAXoDvIVvLFcFBgaKkA5CGivLtqFskTNFZzQ4D7MVxd3fwFu/evaPlk+6ensfx8Toa6jGPHpKtkRVxKTRESLmDl7ZvcRIye/P39Ayo9Yt++/d0RydZJQEW5+cr11vAB9U4bSGrRGCb02ba1xEkyKgD/f0kTCFQQIdxITAA7EV9fT0tF3A0MDjo4+0NJmO5U/Yh3PR0dx89fAiKBHde0FBVUV2rBA5PTu3F5OQk37vRgmSgoy3Nq0cyZNU6jMXFxRMeHnYbbBobG2mZhKOPHz+CyXgcF0tikBVxPSwM8gmtTwbkE1h5cJ+zEHtBMVZZ2f/0GVmQDAMJz6kLGKMiPwPSUF8vqCXGK/juNyIiSKRCoGgOo6+vD87I0tJSWgqgCUzGZvtN8bGYEcQFfEZ4WNgB573Qwjh0YH/0vXs/f/wgr8khjd++iZ4OKA0ODJBghWbVOoyCT5/ARwqxF5TevXsH3nR4eOmx/2XCHFt3jMb3799vR90yNTKkiqGupsY5Pz8oleRlmbIwNVWmpQP24tuRo2SVCMQ8fEjzTMJlb7uRRCoEiuYwbkSE73feSyv8fPU4Pm6DlaXkZlpH5JGK8nJ15WWkg3Vrfm9tbSXBCs2qdRhuLi4hFy7QsgdfbXNyevRAsk8xrIzvqanFa9YOfyogy3IC25LzQFLSrwsY//htsqWFrBIBqJKg0fuXpCFU602MSaRCoGgOw9zEOCXlJa3k81V3T4+ulqbCD3iCLItvDQ3LvYahYPdNBbE6HUZvby9UD9U1NbTswVfPnj61NjcnkaxhYWamwtAYqsai3/499OYNWYusiB/Z2d1Rt8mCaNyJilrywTRuWVuw7hQSB4VyGOB5oXrg28GTrzbbb3qTmUmCZUro8yCPe65VrRVkGZERExMTInbzpKSvrTU3N0eCFZrV6TC+fP5sYWZKyxuCVFNTA4dIas9fiM5Ue3u5geGv9vff/9n/TLI9FaSJXNz6SU1JEX2IQtCh/ct4CJb9KJTDmJ+fh1+oqqqKVvIFaZvT5ox0UUfSlCgbAy30vTXTil6RZUR2BJ0/J2KbQ0NF+aYEZq9lJ6vTYXzIzrbbYEPLG4LU0tICh4idQxrM9PdXWVr9Mhl/+3v3rSiyVp6ZHfpRYWjceu78knOvyJbJyUnRHQY0byQ6XqL0UbS7JHpamh8+fKCVfEEyNzHOz8sjkTJlX/gucBgPs3C2HtnT0tws4o0S2GyVdPMEVqfDKC4qNDMypOUNQaqsqoJDxNpni+ZGRmqctlAmoy3oglx3QZtsaqo0Ww9fpOi3f4/X1JC1bCXkQpCIjRZjA30FezZN0RyGj/cpXx8fWsnnq/fv3+tqarBkNPjTj7zAYVxKDCbLiEy5HnZ1yXslkDJiHz0iAauA1ekwhoeHNVRUPn8uoGUPvrpzO2rrZsnOiCEmC1NTDfsP/qqY//V/Ux0dZK288T0tvfj3X6ODF/7jt8EUObju29PdDU3fJcfxU12rlPZK0S5jK5rDqKqs1FJTbW1roxV+XrkddbkUGkrCZM3V5IvgME4+8CTLiKy5dlWYyQB78eghG58akByr02EA/r6+3ie9aNmDVwODgzYWFq9S2D645+LcXLPXqd4YuXxQf358vPnkKeoyTKm65khhIXlBYvx4l9X78JH413uamhqFmwywFwlPlzE1lbygaA4D2Lltm6f7sb7+floK4FZSUqKGinJ7WxuJkTUx7x+Aw9h7fQdZRljAi4QEcxNjbp8BCQK8xQZLi9fp6WSjVcOqdRh1dXXqKsqFhYW0HEJTzKNHhro6bJ5hUd6ZbGmpMDKh7EXtlm0zEp4tBZgdHCxV04CPa7vAwNVlMBm21laQQGg3YTVUVXQ01JOTEsl2ioUCOozBgYGN1lbHPTwEmQzKXmS/Z9EE3K9L0sFhWJ83I8sIO4C2y6f8fGjFHjl4wOXwoYBz/qUlJeS1VcaqdRjA7Vs3DXS0hZiMp0+eaKqqsKRTl6IyPz7+y2H847fuW1GLkp9tbnFurnbrdrAXxf/3+1R7O1krNmWlpR5urvraWmA1wFhstt+UmpIys/yJ4OUFBXQYAGUybG2sH0RHd3Z2chLB2zdvjh4+BK1SVtkLoPhbITgM0MwstoEQNrKaHQYQdfMm1Aq3bt5saW3l5BNQZWXl+XP+YC8kOr+5FGgLDIKae4HdUxWOf60eq6wkCxKmPSSUul4ymKw409pJH8V0GMCveZZfPHewtdVSVwOr4bjJzthAH3JE2OXL4DnIRqyh/2dfZOr1px/jp2bkbzJSZDWwyh0G8PZN5o6tWzVUlD3c3IICzp87e3bfnt2qa5WOu7tXf60iG8knP96+o2rTMm3dgcQkKVwhYDlDmW+oA9Lqf46sQlaEwjoMDrU1NR+ys9+9fVP45QvYDrIWQZDlgA6Doqa6+taNyOCgoEshIQ+j7/f29pIX5JnZoR+tZ/0L//EbVa1WWdkMy/qSzEBS0lh5OVmQLpNNzcVrfj2rUm3vuKC49y+kg+I7DARBxAcdhsIz2dzScOgwZTJAdbv3ymSoiYn6hhqnrZTRkcloWjUOm+HTS9U0pNCZVOFBh4EgyNKgw1gljBYXV9vZUyaj555UxwCcHfrRHhzCuZTSsP/g3M+f5DUpMtXW9tVm48jnL2QZEQN0GAiCLA06jNXD4uLi97T0KitrqPLJKgkzUd/QfNqn6F//R3mLcn2DH1myHDxbrgc8ZRXoMBAEWRp0GAgw/rW6KyJytKR0gdEJ3ipNzChvUfyfNZ1Xw1j+SAsiOugw2MX8gkINSo8oDOgwEKDJ8zixAr8r1Tvv67kfPV5TI36Lvy82tlzfAN5tjpXzxiErBh0GW8iuzNoQaOEYYkuWEYRNoMNAgFb/c+V6BpTJ4KhEaV1f/GOyxZ8szs2NFhf/zMn5npra//RZz737ndeuN7q6VRga816iWJiZkeEUqRPfvuEDuhICHQZbyKvO1ffWNPTRWcBzHWEf6DAQDlPt7WAaGo+5l6qqUyZjIOE5ee1P5sfGOBaEptGSUrIRCxivri7+XenbYZcFHPFdAqDDYAt1nbXUsJ7fRwbJKgRhDegwEF4WFxcn6uoHEpN4x9WGlyg/AfV3mZZ2pdn6ajv7b0eOdt++M93dTTaSNdOdXWUaWrCT8Jc9e6VIoMNgC4Mjg5TDaOiqI6sQhDWgw0CWy/zYGJvvPsz++FFp+quHafEaJZmM/LEaQIfBFuYX5g1Pa4PDyK/F+ZMQ1oEOA1Ek5sfGq+0dwV4U/fNfMh/AVIFBh8Ei7INtwGG8/JxElhGENaDDQBSG+fHxms1O1E2cweRkshaRAOgwWMShG87gMO69uU2WEYQ1oMNAFIZmr1OUveiLjSWrEMmADoNF+MZ6g8MIeR5IlhGENaDDQBSGmb7+SlOz3kcxZBmRGOgwWERN+9eib4W9P3rIMoKwBnQYiCKBw4ZKB3QYCIIsDToMBEGWCzoMBEGWBh0GgiDLBR0GgiBLgw4DQZDlgg4DQZClOeDsvNHaKiM9jSwjCIIsBToMBEEQBEGYBx0GgiAIgiDMgw4DQRAEQRDmQYeBIAiCIAjzoMNAEARBEIR50GEgCIIgCMI86DAQBEEQBGEedBgIgiAIgjAPOgwEQRAEQZgHHQaCIAiCIMyDDgNBEARBEOZBh4EgCIIgCPOgw0AQBEEQhHnQYSAIgiAIwjzoMBAEQRAEYR50GAiCIAiCMA86DARBEARBmAcdBoIgCIIgzIMOA0EQBEEQpvnvf/9/LyWfCSLPJOUAAAAASUVORK5CYII=)\n\nOptimum öğrenme ve Aşırı öğrenme (overfit)", "_____no_output_____" ] ], [ [ "", "_____no_output_____" ] ], [ [ "![image.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAA9MAAAJPCAYAAAB/3ofJAAAgAElEQVR4nOydZ1RVR9eAH+6Fi3QUQaSICioCVlBQ7D3GxMSe3usbk5j2JbGXGFOMLRo7scVookk0JioWxEIRFUQiiqgIItKL1Fvm+wEoKOAFiSXOsxZr6T1nZvbM7Nl7zzlzZgyEEAKJRCKRSCQSiUQikUgkeqO41wJIJBKJRCKRSCQSiUTyoCEn0xKJRCKRSCQSiUQikdQSOZmWSCQSiUQikUgkEomklsjJtEQikUgkEolEIpFIJLVETqYlEolEIpFIJBKJRCKpJXIyLZFIJBKJRCKRSCQSSS2Rk2mJRCKRSCQSiUQikUhqieG9FkAikUgkEsn9TAmn/1zJ/ou6qi8r7en+9Ag6Wt3Z8/niggKUpqYYouHinjXsSvdi5BhfGt+tx/7FBRQoTTG9k8hIc4G9a/4mw2s0o3xt9Xpjobmwh3U70/AcNZautv/ddxya+N2s3Z1F+zGj8bH5t+tZTEGBEtM76kyJRCK5Pf9dqy2RSCQSiaQeKOLsrgAC1mzkj+3b2HHz344jxOffSf7FxKwbzzOfbydbB6Al4cB6ArZGkF7N/L2+KT61jgnjPmNH1h0WqL1I8IbV/BaRgb45aS8G8dPqrRzLuEuVvUdoL+xn/erfOP5v17M4mg3vjmXy9iy9+0AikUjqinxkJ5FIJBKJ5LYonUbx7eYJtDOq75zzORMSymWtf31nrDcFsUcIS9LS7U4zMvZj/JotqC2a6h1gGfu9T8CvJZg3lSFZvZAfS2hoEvdQnSQSyUOEtNwSiUQikUjuGO2VSIJOFtKiRzdampX/mELU/iiKWvTA19UMbWoUB6J0uHexIG53CBm2XrRUJRCbKRC6MwTvPkx7/87X8xTXLhB2OISYpCKs2/ZiQA83LCuuqdPlcuHoEY6fTiTfvBlevj3p7GxaXjipUUFEC3e8zePYE5KGrXcfunnYoqqQRVFCOMGxmejQcTZ4N0c6dMfX8sJNcnrTy9+DxiooSDrGwcNRJGQWoTBvShvfvnRvbY0SQJvLlbhYcp0b0sRahUg9SXCUljZ9WnDt4G4Ox+Vh5uyBTw9fWpZVRJuXTFxsLs4N7bFWphF9IAqte1+a5wWz59A58syc8OjSky6ulpWWExYmhbMvKIqrNKFNt950MDhNSKIl3j09aKTQkfnPQY4mmeDesysuJtX3W431ubWXa2hTLdlnD3MwIo5UtRXNPLvS3acZZjdnocvg9L6DHL1QRKPWPvh1c6exof46BKDJjOVIcATnUvIxtHGlc+9eeNmpoDCBo8FnyBKgPXOAwMMdGezvVn3lJRKJ5A6Rk2mJRCKRSCR3jDr6J2ZOSuK5n7vS0rVsKlYSzebpE0l6YTO+ri3RnPyZWVMu4emZRNixHDDx4fH+xURc1qIjnN9+LsHAoxONALJDmP/Sz5wtdsDZJJ3zy5ay7vHZrJgyoPQ76rxI1k78nOVHNTT3aoFJ5kaWz19O/4+/YuJwN1RoiN40jamXvPBMCuV4Dph2h18Wjcbu+qxUR0FsIL+HJaPTQsSWjagNPPC2vElO0+7w2zw6BH3K+3MPo3b2wtXWgKz4GJYvWEzPz1fz9YhmKDX/8OusiZwduR7fVpZoYzYze2I8nr7ZRJ5ugHNjNZfjF1FoN4yZATMYYKdAE7OZ2ZPOMGKdH24tStOf9/QjO/IfGjg3Rp0Uz5JCO4bO/pGpA+1QoOXK7hmMn/onGTZtad04n03LV+LqpuFEai++7+ZBI5WG89u+YsoWRz7Y4YOLSVVf9amJ3/xJzfW5JU01bbqgN7ELPmTWz3Eom7vTzCSTjYu/xarXe8yY8RSe5c83RDoHZrzApqxGtHLUcmnlPOa3eYWvF71NZws9dSh+Ex+++Q1Rxu54tbAg/8Jalsx3ZNzclbzbOpa9W0O5rNVC+BY2lxjIybREIvlXkZNpiUQikUgkt0WXeZSN337J3xV/VJjTftRbDHGtRThRfJo4sw/5cc9jNCooxMrZiL/fHsw32ueY98M4GiuKOQDoMpIxfSWAv8e60YBcwmY/y3vbfiXof/0YZVtMxJIpLIl05O2A+Tzvbga6bE4seYf35sykhdcqXnQtLa7k9FnMPljPzsdsKCyyqjCRBlDQaPBEvstL4tGvtDy9YCljbBUU76lCTtUepn1/ENXwRaz/tBtWCqDwFEteepG1v+8hcfjLNK+qvppYTmk/ZOX2Z3EzgbyIr3n5rV/Zsm88A8Y1qSoBZ6I1vB/wJ0+7mUDeUea+8BZbf9nL//o/RePsPXz/9Q4Ke0xh7RfDcVbpyIpYzAfvr0ZnXZ6HIa4jJvOVbwPcqtsYLns3y29Xn6pfT9/UphYY7Z3B7I2XaTvhR2Y/444ZWjJC5/He+3OZtsyd9RM6lSbUZXDF/D1+WPkibUx15JxYxDtvBfDd+n6sf8u9BqUpR0301g2ENxjBgk2f09UUKIxmyctv8OeOUF71G8yn83O5POQrtM8t5PuxtnrkKZFIJHVHbkAmkUgkEonktoiiFM5EniCy4t+JaC7litplZGBNtxGj8LCxxN65CdWtQFbY9GLUE240AMCSjn7tMNemk5qqg5Lj7NqbTKMBLzDWvWw9sMKaTs+Ppasihv37L1Uozp8nRnliY2mPk10N651vJ6d5T95bEcC3b/pSPj/VqE2xsTWBa7lcq263KwNr/EePxq2saIt2XfCw0JGZmlpdAqx6jmHk9QTt8fG0QJuRSpoO8kMDOZLTksdefRRnFYCChj6v8FxvawxutB4NW3ejT99OOBlXU0xd68PNbaojfM8Rsl2G89oY97Jl3Ups/F7nhT4WJO7ZxamSsoRKZ4a98TRtTEtltOr0AmP8jDh3YF/1hd3UNmbmJoi0cH7ftJuopDy0Ju14e+MR/po5CHM9c5FIJJL6Qr6ZlkgkEolEcluUDo8xbX09bECmaIxdk2peeVbEyoaGFW4zUKlQoUGjFejyLpOcpSFj+4f0/9ugQiIdmmIdjZKvAI6l6Ro3wU6P4m4rp6ElNhaFbF8zmfnR8VxOTiYlvQAMBTQVVPtIQdEIO9uKFTHCyBC02upSKGhkZ1thibUBhkZGoNOhE1rSLidTqGyKs1PFEE6Fk5Mdiqha1K+u9eGmNtVmkJxSiNK5BS0r6YYZbm4OsD+FFA14ASicaOGmqnSPk2NjiL6qr9C4PfUxr/8zlYBFnxC40BCr5p3w6/MIo58dTgebunS0RCKR1B05mZZIJBKJRFJPCCpPwzRotDffo0Cpz7o4A0W1y+cUSgVKA0Nchk/j06H2t9xn2Mi1ws2KajbTuh2V5dSl/MnUF6dxSOXD0EeH0c/NlVaerbmyeAQT/6GGyacCRS2jLUUNCVQqFWgLyC/QgVm5gDrycvJqdRRU3evDTW1qiKEhiJISSgDT678LSkrUoGpAg/KbRQlFBTquvwpHoNNpwbjiBLtmHVJY+/Dyom2MSjhOyOEwjoYc5OCGmQSFpbBszdt41qINJBKJ5E6Rk2mJRCKRSCR3jIGhEYYUkn/txkRIk5RAsvp235QpMDAARM1vQyth1ho3R4hOKcChQyfsywrQZQSxfF4QJo++i5dLdeubq5BdYQCIGmaQOrIO7uBglgtPBSziHa+yV7Dai4Qn5oNOq7/sd4SSJu29cGQn0ZF5jBloVfqz+hxHI6tbNl4V9VgfpR2tXW3QHTjBibxR9LUoLyKVqKhLGLgMpEV5tKm7yJnYImhaNuVWnyfixFVUrqXfS99Wh3Q5HFu/gO3qIXzySlcGu3Rh8NNvcWHlCzy17DgnM3R4NlBQqk53p0ckEsnDjfxmWiKRSCQSyR1j2LotroaJBK75lcikK5yP2MaiL38l/rYpjTE1NUIknmB/eDRJeXoUZuTJ42O8MQhZwoxlQZzPLqTgSgQbZszmx6AEFHaWtZK9gakpRiKRE/vDialSAAXG1paY6FI5FRpLtha0ufHsXTiN9TEaRHERRXdp7mbUfhzP+GkJnDOB77YEczRkF6snfs5PcWWvbw0ANFzY/i1Tp6wiLLeqXOqzPio6jxqJe8Eu5k5aTcilXPIzz7J30URWHDOjx5ihuJS/mdals3vedH6NvEJe5jn2LJzFpnh7howZCOihQwozTPNjCFw5lwU7z5JVrKEg+Rh7whPBwZ22jRRgbIaJkSDx+D6OnkqqWyNLJBKJnsjJtEQikUgkkjtG6fQ4b7/hjyr8W1577BHGjf+B8z4v80R1W0Jfx5hOA/rTNDuQb96eQEBksT6l0Xzsl3z9ZnvSfvqQsX270Xvoa6y82IoXvpzD07XZXRxQdR5IX4ds9s55iw9XnqjyHvM+b/HBcCfOLXuOwX5d6TnwNdZmD2byO91QZZ3nXNot69n/HZROPDFrPu/76ziw4CMmfL6YIyajeXVQYwyMTco2bNOREbOPnX+HcrGo6sXf9VkfI/dXmDPnRVpeXM2E4b3o038MU/5U0+PjBUwe5nhjSbihD6MeV/PT24/Sr/8oJu8wYMis7/mwp2VZ1W6nQ4a0fXkWH/bVsmfyGAb5+dD70bfZlNOND754i44qwLgT/fs7kL1nDuMnrKhbG0skEomeGAi5DkYikUgkEkl9oc0jJTEDhY0zdhb6f62sLcwkq8AIKxsLarXHWUkOyUlplJja4WhvWbu0lQUgK7MAI2sbzGvIRJ13leQMDdYOjlipqr/v30KXE8/JeB2ObVthe31z8nx2fTyYaRlvsGXlczjU4lVJ/dZHR1HmZa7kGWPraId5dc80SrJJTtXSyMGGBlXJqocOqXOTSUrJx8CyCU72ljd9t6ilMDOTAiNrbCzudMc8iUQiqR45mZZIJBKJRCJ5QNBeXsdbIxajfXkVC97wxBwdOZHL+Wj8KgqfXsePb7nLDXEkEonkLiEn0xKJRCKRSCQPCrocji77iKk/niDf0p7GqnyuXs2nkf94vpz9HJ5m91pAiUQieXiQk2mJRCKRSCSSB4yilBiiYhJILzbG1q0jnVrb1H2Ju0QikUjqhJxMSyQSiUQikUgkEolEUkvkbt4SiUQikUgkEolEIpHUEjmZlkgkEolEIpFIJBKJpJbIDR//bXSphG36hWMZ5avpDTBQGmHW0AG3Lj3wdbVG/4NDStGmhrN57XZOpmqx9nuW/xvhUd9S35biggKUpqZ3pECaC3tYtzMNz1Fj6Wqrz3MdDRf3rGFXuhcjx/jSWD4KekjRkRayiS3HMig/PdXAQIGRWUOaunWhu58bDevbsmkusHfN32R4jWaUr209PYW83/RZS1rEZtZtO0GqxoouT3/CSK8qvsAsLqBAaYrpHbVx7etee3shqSu6nGi2rdnC0ctqLDr0p5tBLOdtH+GlAS3K7iimoECJaTVKcD/0lT4y3A9ylknCuZ2r2JvfhXFPdsaqFqLUhy+uD6psywq2ov7bWsO5XavZm9OZ0aN8aCRNwh2g5syOFey9UPV55Cjt6f70CDqaJfwrfrB++rFmm1Rtqn9r/Pyrun+P0MSxa1Ug17zHMdKnkT4JHpox+h+u2n2CLpWILQEErN/Mn9u3sWP7H2z/bSOr503hvXFP8vbyE1yrVX7JbJ36PvO2niBdXYRW3O3tRoqJWTeeZz7fTnY1dldftBeD+Gn1Vo5l6JuRloQD6wnYGkH6HZYteZDRkRrxKz+uWseWbdvYsX0bf277nV9WzWPG+LGMeX0Jx/PquUjtRYI3rOa3iBsT+HrI9L7SZ13yFqa//zW/n0hDXaxGGBncck9JzHo+eOozdmTdqcC1r3vt7YWkbuQS9M0HfPlTCJcLiyjS5HDyt9X8dOBi6eXiaDa8O5bJ27OqHQv3Q1/pI8P9IGcpas7vW8OP2yLJ1XsXm/rzxfXBzW1ZfGodE8bdsBX139ZqLuxdQ8Afx8mWO//cIWriAgMIWLORP7aX+tRKfzuOEJ/Pv+QH66EfS2L46b1xNdqkKhLxz4Z3efZfGD83+8n7x87cIerz7F+7mm2R2fomeGjG6L1+mPnQYNR5PBuWjKZh+Q+Fl9j5xVtMW/kdvw74kRdb6vl+WpNI/MUSnEbMZMlHne7Bzp35nAkJ5bLW/45zMvZ7n4BfSzBvqq8aGtP1vbVsKrFA7ySS/y5Gnfnfzz8w4voD0kKSdn/JuxNXM++ngQS80ar+DJyxH+PXbEFt0bQejeb9pc+axDguFjvw2NTlfOxTtWUpPhtKaGIJfndcWu3rXnt7IakT6vOcPJVFw0FzWTqjNw0oIXtAZ55U2ZVez48lNDSJmlzAg9JXD4qcVVN/vrg+uLktC2KPEJakpVs11yX3H0qnUXy7eQLtqg0s/w0/WA8UnyUsJBF199okKiIuNISkklol0lOc+vKTkgeF+2o8PFSYNKPfEz35YdffnL+ogeuTaR15549y5Ng/XC4wx9nLD39vZ0wBXWYsh0PCuHgNdOmn2LOrhLa9fGluAtrss4QERxB3VY2Viydd/H1wLj9rUnuVk0EnEW27YHZ2N2HptnTu7U9bWxXocrlw9AjHTyeSb94ML9+edHY2rUboQhLCg4nNFAjdGYJ3H6a9f1cszwcTLdzxNo9jT0gatt596OZhi4oCko4FE3LyIplFCszt3ena159W1qV11eYlExebi3NDe6xVgtSTQZzUutO7xTUO7T7EuTxTnDy64u/riqUCQEte8lnO5Dajob01yvSTBEdpadOnBdcO7uZwXB5mzh749PClpaWiktxJ4XsIPpkCdm3x7d0BxekjJFp2oZdHI9BlcvpgOEmmbRnYxeXf6G3JXcEEp75P0MNhB9viz6PhxmRal3eBiMMRxCblY+7sRZeePtys5oVJ4ew7EMVV0YQ2fr3oqDzDkQRzOvfyxEbkciUullznhjSxViFSTxIcqaZ1X1cKDgVy6GwOpk4e+PTww7Vsjab2tvdU1meVuEr0gUjUbfrRMv8gew/GkWPqSNuuPfF1taq0jKhGWatbb6TL4dzhAxw7m0KxlQseXbrj42IB6MiKPURI+EXyEWTE7Ga3pi3+fi2peFxtUUI4B2Iz0KHhbPBujnTojl+LQk4FRaFz74p53G7C0hrTqbc/HnbGUJDE8eDDRF/MoEhhjn0bX3r5tylbgl/FWL5de1ZhL6LU7vRxzedw4EHickxx9OiCv59b5WWyhUlE7A3iZIrAzr0bPTopOXs4AXPvXnjYKNBl/sOh8ERM2/bCx8Wkjrr330CXe5aQQ0eIzwFFXhz7dxnSqpsXnI8l0bIBTmapHA0+Q5YA7ZkDBB7uyGB/t1vyqdxXCtBkcuZwMMfirpBv2JiW3r3o2a4Jqhqlqdl/lJVEzrnDBB2OJVVjgVOHXvT1caRBpUpd42LoIUJPXaa4YVt6DOyBq2VVOlX6my7vPBGHj5Xaimbt6NrDGydTgEIuhgVzVrSmh18LTCvIkBK5n5P5Lvh1a4WloqY8blCYGMbeoJOkq5rR3r8LtXtxU8++OFU/X6rJjOVIcATnUvIxtHGlc+9eeNmpbmnLBlciCI7NRIfuuq3wtbxx3fjyUQ6eFbTu6UfzCu2iTYkkKCofl27dcLNU1CI+0XD5+D5iClrg170VN0TWkX7qAMezmtLV3x1ruRbzztBW4Qf1icEq+QIL7N196e3fGmu9v3HUkHnmMAcjznL1miE2rj707NUOOxVQmMDR/afJFKC5ySYVJB3j4OEoEjKLUJg3pY1vX7q3tkZJIQnhQcRmCISmfPx0w83i9jp3O39RlZ/0vp74GhfDDhEafastKruh2vi/6v64SvSBKLTufXDJDWbPoXPkmrjQddBA2tkakH0ulMOhp0k3aoHv4L64VxoAWrLPHuZgRBypaiuaeXalu08zKh9RX0hSWCAHotMxcu5A9y5VWKlazSH+u8jJ9D2jmITI02Sq2uDZurwbcolaM5EpPxxF3aIdLUwy2fTDfFYO+JjZU57AJS2CPzft43yRlsIT2/g5pR3jOnljFj6fT6b/TJxhc9ybmZC14XvmWfXinS9mMtbLFDTRbJ4+mUQvLy6HRpCDKd34jfmDr7B+4ucsP6qhuVcLTDI3snz+cvp//BUTh7vdGujoCjmzZwvhl7XoCOe3n0sw8OiE5aZpTL3khWdSKMdzwLQ7/PJdJ4I/Hc+8IyU4e7XCxiCTCzE/sGhJL/5v1Xc82UyJJmYzsyedYcQ6P9wstZz+ZRZT4r3omn2C2AbONFYnce77uTQZNpuV0wdhq9CU3nNmFD/6taJFzGZmT4zH0zebyNMNcG6s5nL8IgrthjEzYAYD7BSgTSZw5ttM/zODxu6tsSnYyMpVbrhqjpPW64fSybTmHNu//pzfHD6Wk+kHnOKESE6nqWgzyv36qo28yDVMnfQD4SUueLU0JfvnpSxY1pcJX03jiVbGgJYru2cwfuqfZNi0pXXjAjYvX0nLVlpOJHdnQXdPbMQ//DprImdHrse3lSXamM3MnhiHh18uUacb4OKg5OqZxcyzGcL01V8w2F5Rqt813lNZny21pWXEe3YjJ/IfGrg4oLx6hqXf2TBoRgAzHrFHoY+sVcxQdGnBfP/xNDaeUeDi3hyTzA0snWuJ/zuzmfpMW9KObeOXfeco1BZy8s+NpF0cS/uuLTGrEJQWxAayNSQJnRYitmxEbeCBr/NZNk+fSIJnB66ERZCDCV11W/nO5yATx3/LkRJnPFvZosg8T8zSBfzQexKrvn0SR2VVY1mP9rzZXsR54psbSWwDF5oqr3J2yVwaDZnJilmPYK8AbfIuZv9vKjvSbWjTxpbCn5cT4NoK7fFk/L7vjoeNCk38NuZO/JWmH+146CfTIj2Sv34OJC5fy7WYv/k56yJPtrbiysKJbG3zDb0nlLB3ayiXtVoI38LmEoMqJ9OV+sr0Ar9MeIO5J4xxb9cCi/wLbPh+Hg5Pf8eyD/ywqEoQ9Tl+/aRm/4Euh2PLP+DzVTEoWnjS0jKbzUsXEDB8Nos/74clANmEfvcim8+U4NCsARnxS1m+5nFmrJpKf9ubdUpBXuSPTJn4AxHqFni2NCFr4w8sXDaAD76eyuOuhlwLXcaUX5sz+Y9vebT8A8Di46yb/CkHfBfSy7/VbfJQAVqS/p7K+zN2ktHYnVaN8tiwRIGtlRrRsKrGqIJ/wxffxpdq4jfx4ZvfEGXsjlcLC/IvrGXJfEfGzV3J+36WFdqyK2MTAvk9LLmSrfBueKOtX1WHsnLSr7hM/YOvhjUqe1BYTOSaSUwM8mNeL3/c8iJZq3d8YoD65E9MX2bEu1uWMdahrG80Z/l15sf83uYbtvR017NxJdWiqcoPVq03MwJmMNBOgTp+M5+N/5aQMl9gkHmemKXz+aHvFAK+Hk7T206oNZzf9D5vf3uCBu7tcbG4RsK671ng8BRfrfwIX00se7eEcFmrRVy3SS7Eb/6E9+ceRu3shautAVnxMSxfsJg+U9by5bAGnNnzK6FJWnSifPz44UZVOreMvh9+zaQn3TCG2/iLqv2kd0O4nS2qLv5f0e8jZk99Ejfj6vsjvq0P2SdOY9y8CSWX4lix4RDvPif4bVUkxk0tyDp3lqXrn2DOusn0tlaALpXgeR8y6+c4lM3daWaSycbF32LZYzwzZz6DpxmgTWLXtP8xc2cmjdu0otG1dSxbZoulmjLbClQ5Riu310ODkPy7qKPF9yO9RdcB48T7EyaIjyZMEB+9/45486nBolePkWLGjouipOzWwvAvxZM+PcSbAafFNSGEEFqRdWyBeL6br3h5VVzpTUVBYno/b/HU4tNCLYTQZuwUn/fvLAb9b504XZpIaNJDxNyxXYXfk3PFiSIhRFGgmNizo+jaf7z46VS6yElOEFcLCsTROY8J356vijXlCbVZ4vjCZ0RPv+dFwDl1NRXKEFve8hF+r28UaVohhCgSez7rIbp07icmrD8l0nOSxaWrBSJrxweir89j4ssj2UJblrIgeqF4xsdbvLjqQmlV9k0UA3xGiSWn1UKIIhE0qY/o0qmXeGd9nCgQQgiRIyK+fkL4dn1VbLqqvX6P7+jvxWl1WfpOHUX/t9aJuNIEIvfoV2KUTxfx5sYUIYRWZO76PzHYZ6D45I8EUSyEENoMEbHoWdG7UycxfE54Wb0zxNkje0XQ8cQ76mrJ3UItTi0cIXw79xfPji8bUxPeE++9/pQY5t9djJm8XVwoH1QFYeLrx71Fn1dXipjran5MLH62m/B/Zpk4qy4fQ95i6Ee/i0ulSiKyji8WL/foKLoOmSXCioUQRfvElL7eYtz3peOuOt0b7eMtXlubJIRe91TW59IyOoouvd8UG24kEN+O8BZ+L68VSRo9Zb0ZbYbY/Wk/4dvvbbHh+lhPF+HznhG9fB4Tc4+WllUUNEUM8hkhvo+ubuwLkbv1f6Kbz2tiU2rZqL5uW94RG6LTRc6VBHElP0v89WEP4fv4THE46/roF6eXjBO+Pk+JFec0t9Rd7/a8rb14Uvj6vCTWlzZWab0HfSi2JRSX9f1xsfSFHqJL58Hiy9Cy3zLOiNB9+8SJxKJq6/1QURwiZg/2FiO/iyr1TerTYslobzFg0r7S6xmbxTtdvMVbP6det+03U7GvSo59LZ70eVTMCc0vu1ogTi0aK3oNnCgCc6tOr4//KD7+rRjp00O8seKkKM1GI67s+EgM8RkmvokoLtOpTmLQ+I3iXJlO5RyZJUZ4dxVvb0q9Rc5yW9H3tdXidCVb4Sd6vLBKnFMLoY5bKp716S7e23Kj7gXB08RQn8fEvBMleuWhzdgpPuvnLR756A+RWFIqd2rQdDHSp4PwfX6VuKTRt6Nu54uviMTa+OIafWmJOP71Y8Jv2Bci7Ho3nhSLx3UTj0zaJfJubkshROYvb1ayFZWuq+PE8qe8Re93tpTJLoQoCBYzB3mLEXOPixJRTXyyqGJ8UiB2f9xd+D69TMRrhNAmbxRvd+siXv7xoihvwpLIuWKkzxAxO6RA30Z9SMkX29/zEV16PSU+mz1bfFXxb85C8VZuvcEAACAASURBVHd5PKinHxzl4yPe2JAshCj3BbNEaPYNX3Bq0Rjh6/OsCLigETf34y2UHBNzH/cWw2eHinLVK4xeKJ7pPlBM2Z1TVuhW8W5Fm5T1p/ikp7cY8cURcaPYaLF4rLfwe26VuKARQogc8fs7FcdPTTr3rFgdV9oG+viLm/1k9baoi3j756uldaoy/l8oXujmK15cGVd1QWUxQ9cBE8TvCaVBT8HROWKEdwfhO3SS2HO1VObcA1PFo97+4pO/84UQWpG58xMx2LuveHd9eVkakR42Tzzf3VuM/PaYKBJakbnzYzHIZ6D45I9LpX5Ac1UcmP648O3oLZ5fEa9ne92mb/9DyEUvdwutlpKS4tK/oiLUQoGiKJHQP7YTka4DSjj+9z6SGw7g2afcy5ZaKLDu/AKjuhoQs29/ldkWhAVyJMeFYW+Mxb1sfYbSxo9XX+6DeeIedkeXlN1pgHX3kYzwtMGyaTPslMfZtTeZRgNeYGx5QoU1nZ4fTReDGPbvv1Sr6hlY+/PEKE9sLJvibGeCeY8PWBownzd8y5enalCbNsamAVzLrX53KAPrnowa5Ubp8z5LvHw8sdBlkppazcYNBtb4jx6NW9kDQot2XfCw0JGZmgpcIyzwMNkth/Py0GalT7IVjfB++UV6VVzuomhEq2796N3JqVZ1ltxrtGjVxTfGlVqgVBSTFPoH20NT0QIlx3eyP7kh/Z5/Fo/rat6Z58b6oTyzl33xGvJDAzmS05LHXn0U51IlwbrTSzzftxG3bsFVgVt0z492VpCZnl67eypnilXPMYy8kQDfdlaIjDQydNRN1vww9hzKodnjbzL6+li3ocvrr9DXKpl9u6IoqS6tXhhg3X0UI71ssLRvhr2pOf4frGTVvLfxKx9nGjUNGtlixjXy8vQdy7drqyrshV87rMkgPV0H+aHsOZRDy+Gv8kiz0vdYCutOPP9in0q7iioatca3b186Oj1Uz9HvGgZmZpiSxtGtP7MnMolrWhM83/mZA7tnMaDK19Lo4T/U/LP3AEmWfXjqmXZlb7eV2A96nznfTeaJ5mUdrLChx+gncS3TKcvOfniaa8lITb2lzFJb0Yh+zz+NewVb8ewYPxQxezlwSYthi6EM6aDh+O69lLqkPEJ2HiDTdQhDvYz0yqPw6F5Cc1149IWhOBmVym3b83VGd6p50bu+3PDF9jjVxhfX6EsNMDM3QaSF8/um3UQl5aE1acfbG4/w18xBmNdWSMMWPPJIBzTHdrHvaqk9uHZkJwczXRn0aDuMSqqLT8bSVVF1fKJoMpAhvsb8s2snFzQAxUT9vZfLDv0Z6v1wrzjRF1GUwpnIE0RW/DsRzaWadsWrUm8EWenpQLkveIsuVjd8gamNLSZcI7c6X1ApfzPMTCA9fAu/BEaSlKelgdd41h/ezfSBllWnMe/JeysC+PZNX24Ua4qNrQniWi7Xqiq2Rp07TVBQqc7V2V9UaYt0ZKSlUVP8P9pXwen9Vcf/ZRnT0P8JhjQrXYtn4uFBc0Mljv1G0MuudNWrWVt3mhkUkZeTj458wvccIdtlOK+NKS9LiU3X13ixryVJ+3YTXVLI0b0h5LgM4/mhzqWr/JR29Hh9HNfNlJ7t9bAgl3nfJZTuo5n1fYUNyIC8yMWMf3s1Mxe1Y/NUL5KvZKLN2M7/9fm7UlqhKUY0TK4iVy3pySkUKpxp4Vp5xwgzNzcc2M/VKxpoB6DApkmT68dw6fIuk5ylIWP7h/T/u2IYrkNTrKNR8hWgpd71M2jcBLsKy3UMLRtjUfgH6yZ/R/T5RK4kp5BeAIZCYC+qN8wGDe1oXCEfAyMjjNCi01WTRtEIO9tKCTAyBK1WgDaNy8mFGDZthlNFTVc54WhnwEm9aye5L1G6M+qLxRU2IAPyolk+/k1WTZ9H21+/oNPlZLI0Gfz1cW92VVJzDSVac5KTS0i7nEyhsinOlZUEJ6cmKI7WUP4tuqfC0BCEVle7eypnSiM72wrH5RlgZGQIQocObZ1k1WYkc7VIgXML18obFhq3pIUj7E5JoRhu8/1qTVS2LWCIpY0FhX+sZfq8k5xPukzylXQKMESIJlQ7/GvdVlXbC0MEWh1oU5O4UqSkqbNzJUencnbGzqCmjpXUJ4atnmbCG/8wc9UiPtu9EEPr5nTo1pchY57lsY42VR4NeXv/oSY5ORXsHHCsqNSGjnTo6QhAMYCBFTaNKiqIMSoVaNTam0rUkXc5mSxNOjs+6s3Om22FrhHJV7TQ0pGBj/iw4ptA9iWP5mnzYHYdysfjpaG4GurIvm0eJaRfvkKBgT2OzSpopaIRzZytUZyrQwPfRJ19cU2+FEPcnvqY1/+ZSsCiTwhcaIhV80749XmE0c8Op4NNbQ/4VOIw+BG8l33D3r3JjHzWnIN/HyTf8yUGuxmiy9YnPmlaOUuFDX2G+rPo80B2n3kFt5bh/L3vKi2GD8Pz7u/U+kCidHiMaetr2oCsCm6jN6W+YA3T5p3kfGIyV1LKfUFT9NoowLAVYz96g3+mrWLxJ7v53tAal47d6P3IWJ4e3pEqVc/QEhuLQravmcz86HguJyeTUqr00FRUWWx9x8S3UJMt0uXVIf6/nhHWNo1u2FEDJUoDAywbNry+z4qiQQOMDaBICNBmkJxSiNK5BS0r9XMDmjd3gMAUrhank5ZSiMLeCedKZsoFZ2sDzqBve900Rv/DyMn0PcSi/Sge8Qxg3qlIErQdUCgMUDYfzuTPH6XJTWsGDAyrPtPN0NAQRAklJVBxlwKhLkGNMQ0a3Bi8CqWi0r+VBoa4DJ/Gp0PtbzkjzbCRa+0qo1BUCIp0pOyYxCvTglH5PMYjwwbg1rIVbVtfYenIT/mnJguqVNby3G0Fimq1WIWREWgLrlGo48b3n7pccvL+4/v0P6xYeDJyaDsCvj7FyfNafJQKDJQuDJs6iaH2Nw8qJdbNjTC8pAJtAfkFlZSE3Jxrtzlmoybdq809N6WoIYFKVQdZDQ0xRFCivtlIlFBcAioTkzs+FaCibUGXwl+TX2TGwQZ0GTaER/o/haubB25py3j6k8iacql1W9VoL1QqjNBSkF+ADrPrNk6Xk8M1OfzvHgprvF/9ni1jEog8cpiw8BAOH1jP7P2hXF2xljduOctcH/9hgFJhACUlpZPmCml1OgWK6+pogIFeDkWBUqnAwLA5j02dyCO32ApDGrY0BBTY9R+K78Lp7NmdwICGOwnTdOJ/g5uh1CsPI4wvqjAQJRQX6aiwUxbVPS+uNXX2xTWPP4W1Dy8v2saohOOEHA7jaMhBDm6YSVBYCsvWvM2tX87fRky7AQzxW8iswN1cGtSQXWEaOrwzhGbKuscnFj0epVejCezbfYpnOuzkUI47Yx+px1MdJFVQg96U+4JgI7yHPcrQ/q1wbeWB25UljP0sRr/JNAqsfV5jwR9jSThxhJDQMEIPHmDjrP2EX13J6re8bvFfupQ/mfriNA6pfBj66DD6ubnSyrM1VxaPYOI/VRdb7zHxLdRgixTKOsX/16/XynEalj6kLimhckQgKFGrQdWABoYqVCoQxcVUNlO663bq32+vBwtpY+4l+XHEXxEo7BpipTCjlZsjnEyhwLEDHcsdsS6doB/mcch0GB3a3bw5lhK71m40FkFEHc9lRL/yJS860k5EkUgzBrSspovNWuPmCNEpBTh06MSN4vaxbP5BTB8dj1eVe3EpMDAARNVP90ozyeLwnwfIbPYMqxa+d/2psPZiGIn5AqG7S2ftKe1p7+WA2BVNVN5T9Lcq/Vl9LpyTqTKa/m+Sz7n4ywgDK6ytFZi1aoWDwUlSrjnQvlPTMoOvIyNoKQsPGDP4f150ae+FIzuJjsxjzMAyJdGc5+iJK7XcYfffRkmTOsiqtGtNSxtB8Ilj5I3sf33DJ13aCU5eAufeLTEEbn5XVxUGCgNA1BgE6bIO8teBTJo9HcB377UrC3S0XAy4SK7QodOnoHpAad8BTwfYfTKSvHEDKW0tDRfCj5PygB/3eU9RKCh1AfqMDh05EWv5fpuaQZ++QpchLngPeZo3zq/k5bFLOR6Vgc7rpkBML/9hhKtbcwg7T3yuDs/ydft5gUwePov0F9awsFntqmXWqhUORJNS6Ej7TvYVbMUPLDxgwtDxnrgAWPfhkd7zmLh/K5vNj2HQ9VP6l0W/+uTRyb019uzlzOlCsCtbHqm9Svz5zFqe3XsXfbEuh2PrF7BdPYRPXunKYJcuDH76LS6sfIGnlh3nZIbulsn07W2FNb0e7Y3Fp/v4/WdzTuDLxwOblLZZdfFJRhDL5wVh8ui7eLlU8QjQxJehA5ry18GdbE4Po7D9Swx2qe1bc0l9Ue4LXJ4JYP67FXxB2CWuCR1aPUyILucoPy3YhnrIZ7zUdQguPkMY99Z5Vr8whhXHosjQeWGPAQrKbZKOrIM7OJjlwlMBi3jH67rSE56YDzptmUoaVB4/eumcfvXWx09Wprr4P4MDS78j2KSq+L+OKO1o7WqD7sAJTuSNou+NgICoqEsYuAyghcqGhq2bIvbEElvJTJ3jQlZZpeo6Rv+jyG+m7xK6jBgC//idbWV/WzcsYcY7U/jjijU9nhyEg9IIjyfH4m0QwrKpSzkQn01RwRWOrZvBNz8GkaCwrTJflc8onnQvYPc3k1hz5BK5+ZnEBS5iyvJjmPUcw5Dm1TgSI08eH+ONQcgSZiwL4nx2IQVXItgwcw5rgi6isKvmWxSMMTU1QiSeYH94NElVff6sUGFlZYJIO0VYbDZatOTGB7J4+hr+0QiKC4vq1Ia1x4h2457HT7ubrz/4hq3B4YTtWsmUieuJ0wLlX5lqzrNj7iSmrwq5S3JJ6gWRQUzgjTG1besGlk9/h6lbkrDwH8UQFyVGnk8y2tuA8KVTWLb/HFmFBaQc28CXX60m6IIBdtYKjNqP4xk/LYFzJvDdlmCOhu7ix8mfseGMhpo/mr771ElWlQ8jRrpTuOsbpgYcITGngMy4QBZP/IEI4+6MHNZS76eqhiYmqEQCx/aEElPl4AeFygorEx3pp8I4k60FbS7n9yzii7Wn0IhiCgvv0iMKo/aMeb4b2sAv+fibXzkYHkrg6klMWncGTYXG0pzfzrzJUwgIyb07cj3oGJthYiRIPL6Po6eSbnOzAjPTa5zetYL583YSl1mMpuAyxwPDuIwD7h6Nbg1C9PIfhrR4bBR+hodZPn0tYUm5FGbG8td3yzlQ7Ip/j1rOpKHMVkDYkimsCIonu7CAlIh1fPFlAAcSlDS+/nrGDL+hfbGJ3cTG4w3wH3rjG3x98lB1GMWI9gXs+m46myNTyc+9SPDS6ayP1lSQRsO5bV8zdepqjla7xchd9MUKM0zzYwhcOZcFO8+SVayhIPkYe8ITwcGdto1uDSUbmJpiJBI5sT+8Wlth5vcofWxi+WXDcRr0HErvGw1ZdXwyYzY/BiXUEJ+oaD90EM0Sf2fdviI6DRl4y27RmnN/MHfyVNaEV793i6R+KPcFadEVfEHgQmatjUGrpy9QmJmR/88uVs/9jl1nMynWFJB8LJCjSeDQ1qN07Bma0EBVbpOSMba2xESXyqnQWEqLjWfvwmmsj9EgiosoEgCGmDRQ3Rg/RTd0bnoNOqePvyj1kzXrfmUqxP9TKsb/0/k6IIhL1cT/dUNF51EjcS/YxdxJqwm5lEt+5ln2LvqcFRGm9BgzFBelivYjR9Oh8G/mz/iZqNR8ci8eYPn0NZwqN1NG+rXXw4J8M32X0J79nW+m/V72PwWGJlY0admRp6a+wauPlT3Bbj6WmXPzmf3lGv5v1HK0gIFpM7q9+CUTn61mEZWROy9+M4fimd8S8N7jfK8BVI3xGPwJcz96DAcloKkqoZLmY7/k6/yZfLXmQ8Yu1wIGmDbrzgtfTuFp1+pUw5hOA/rT9NCffPP2cR5fsJVbj7y3oPebH/PYpW9Z/nw/VqiUKMxbM/i1abzd4DOWXzgHdKtN89UZpdMIps8rYf689Xz/8RYU9u70GvMGA9Z8T1yDsg0kdOnE7PuLnQ6eTH3l7sglqQc0Z9k2Zxrbyv6rMDTBokkLOoybyqtvPF6q+zRn9OzvKJg1m7Ufj2K1FjAwwdHveWZOfgE3QwAnnpg1n5K589i44CO2KprQptcYXhuynsUxpd8a3TevqJV6yHoLRri/PJ/vVF/x9cp3GbFQAxhh4zGYCfM/5nEn/d/cqDr2p69jMDu++R8nYuezfWIVN1n05fVPRpD07VJe6r8UlVKBmdtgXpr6LqYTF3Mx/gr0tK9jA9QGJU5PzmJu8VwWbFjAp1sV2LXpxeg3HuGnRadoUNZYuowYgv76i6Ye/+Olbg9XAFAnjDvRv78Dh/+cw/gTwwkPnF7j7YYerzDtkyQmLZzE07+WOiOlVWv6fzib16vcdEs//6F0fJIpX2fxxexVvPfYfLQYYOzgx9hZk3ja1RBtbfe/UTZn9Oy55M/8knUfjWRlqQPGuftLzJryLBVdYgPvYQxw2sqGwj4M7WFZyzxcefrL2eRNncOilwfxjVBi3vpRhvW2Z2tGeUY6MqL3svOv5rR790W6VPne4276YkPavjyLDxM/Y/HkMWzVlP5m1XoAH3zxFh1V3LTcHlSdB9LX4RB/zXmLE8MX8EfvKrI18WboQCd+X19Ir0d7VDgmTZ/4RF21pO5DGdRmDUsTuzO4r90tLadLj2b/XztwafcuL3TVo+qSumPRl9c/eozzX/1w3ReYt36EV2ZOwPKzRVw8lwbdal7CjKEHL878hKTPFzJ17K+lK6gMrWg14ENmvNmpdK8P44707efAwXKb9NfbTBh2nm+XPsfgZSqUCnNaDXmN6RMsmbjoPOfStPg6GtOhfz+aHiwfPzuYrEdMrI+/UHXsTx+H4Jp1/yaUdYn/64iR+yvMmVPC7G9XM2H4QrSAqrEXgz5ewIRhjqWfibg+w8zZOUz/cj6vDZ6DUFrQauhj9Lr8C6XbN9Z9jP4XMRD6rdWS3FVKyElOIq3YlCZO9ljouVJCV5hJ8pU8jG0dsbWoxXOSkhySk9IoMbXD0d5Sr+8ntYWZZBUYYWVjUcP9avJSk8nQWOPgYHUHGxzVFR058VGc1znStpUdDcp/zv+b/xsymczX/2DFc453XSrJvUGdk0xSWjEmdo7YWaoqfEMbz8l4HY5tW2F7fdPXAvZ8OpjJKS+zedVLON8nKwXvWFZdIZnJV8hT2eJoZ1G3p6naQrIyCzCytsG8JmOhziM1OR2NtSMOVnd/9KPL4XxUPDrHtrjZ3djNtyDw/xj2eQovblnN883uk4594NBSmJlJgZE1Nvo6KHUuVxJTyDewpImzPbd3Ufr6Dx1FmcmkFJrTtKk1xvWw3q4k5zKX00owaeKEvb71q3UeWvJTk7haaEZT58aY3Cy3OppFL67BdcW3DDWtInl5LnfZF6tzk0lKycfAsglO9pY12xB9bUVN1DY+0Say/vXRrHeYza8z+1Wx07iaUwteYJ3bSr56tIaGldQf6lxSkzPRWDvcgS9Qk5ucyNV8AyzsnbG/xYDcapPUeVdJztBg7eBIdcVWOX7UOVxJTKPY1A4HO0tUdbEpddZ9NTnJiaQVm2LnaIdlnQrXFx1FmZe5kmeMraMd5lUNZm0+aUkpFJg54NzYpOrlzPXRXg848s30fYkKK4eWZd/46Y/CpBFOLW/zlK/K4qxwaFm70pQmjWh829MmjLCwc6Ga00/uAoJrRxbxzhIdL65cxGueFqDLIWrdZsKLXRnXpck9k0xy9zGycqBFFWourh1hyVuL0b60igVvemKOjpyo9Ww+XETzsV1uWSZ4L7ljWRUmNHJqSR2sxA2UJjS01eOoGSML7Fzu3ehHXCPk+zdZqn2ZZYvexMMCdDlR/PTzEYpajMXb4T7q2AcOJSaNbKnVgUNGljRtWZs3//r6DwUNGjnRvDay3AaVlWOVtqJ+81BiZudSzf7AWhK2B3DMdQhPNajyhhu53GVfbGTpQAt9u1FfW1ET+sYnmkIKdZARtIKtp5owZHyPKo/s0iZsZ02EK4OfuU3DSuoPI0vsXO501Y8Rlg4tqT6XW22SkUUTbueCqhw/RlY0rWVMXEXGddR9ozrF/3VDQYNGzrSoKSBQmmHrcpvNxOqjvR5w5JtpyX8aXU44Kz6axNrIa1jYN8Y4P5Wr+Y3o/s5XzHyuXdkZe5KHGl0OR5d9xNQfT5BvaU9jVT5XU/Jp6P8/Zn/xAu3u4XzwFh4kWe85OnLCl/J/k34kMt8S+8YqClJTyG/oz5tzZvNce9lYkvsXdfpVcq2bYCNfeeiF9vJa3nziOyI1xrg+PZ+lH3bDuqo3ZOp0UnOtsZMNK5FI6gk5mZY8BBSSEhPJPwkZlBjb4tqxM61sHp5dBiX6UZTyD1ExF0kvNsbWrSOdWtvc8ZFR/xYPkqz3nMIUTkfGkJBRjMrWjQ6dWyOHv0TyH0ObRkzwUZJUrfD1a4W1XHgikUjuEnIyLZFIJBKJRCKRSCQSSS15CD8Tl0gkEolEIpFIJBKJ5M6Qk2mJRCKRSCQSiUQikUhqiZxMSyQSiUQikUgkEolEUkuU06ZNm3avhfjvU0xBARgZKUBzgb0BG4lQN6etkxkG91q0uqKJY9eKTZxUtKZtU/2OmNCc28XqTccRrm1xMKlrzSu05R1Tn3lJ7g/+g2MNDfG7V/JLhJYWHo6Y6lMRzTl2r/yZE8IVdweT+6LumvhdBGw6hq6lBw56VUKvXDm3ayW/HBe0bOuAUXx92JiHlHocL8WlgxAFoLmwhzU/h6Fu7omjmeyTu4E2NZxNPyzlp9/3cCyvKT3a2t5rkW6Dhot7AtgUrqa5h1M1Nk6feyT3Dzpyon9j5dI1/L4njDQDHTE793K1cXtaNiyLuYoLKMCIOwrBKuTxb9uainatztTazt5bvdfE7WTV5hNQW59aH337APGQVPMeUhzNhnfHMnl7FjoA7UWCN6zmt4iM0v8/qKjPs3/tav6MytE7ieb8Xtav/p3IrDrueXdzW94J9ZmX5P6gJIaf3hv33xtrqLm4fx0BfxwnS9+KqOPZv3Y1205kcb/sMKk5v5/1q3/neGZ99oaaC3vXEPDHcbJFPdiYh5l6GS/FxKwbzzOfbydbV55tED+t3sqxjAd7FD4w6JLZOvV95m09Qbq6CK14ELau15JwYD0BWyNIr1ZN9LlHct+Qu5/vPpzFzyFJFBYVocmO4vfV6wm+qAWgJGY9Hzz1GTv0dmq3cnMe/56tKeGfDe/ybAW7VmdqbWfvrd6X+tQ/iMrW36fWR98+aMiD9v5t8mMJDU1C63+vBfkPUJ9tKfvlv0fxWcJCElF3v9eCSCQPIMZ+jF+zBbVF0zsIDPI5ExLKZWlY7x2aROIvluA0YiZLPur0Hzoyz5iu761lU4kFTWXket+jPh9JTFZDBn67gqm9TaAkm/6dR2BkZwxA8dlQQhNL8LuDMuojD/0oIi40hKSSegguam1nHzy9v3v9cv/wgHTNA0phAkeDz5AlQHvmAIGHOzLYp/yijmsXQjkScorLxda49xqEv6tlpaUCurzzRBw+RmxSPubN2tG1hzdOptUVpiX1ZBAnte70csnjYOAh4nMb4OI7mP7tbTHIPkfYwRBi01U07zaYPu7WFcrSkn32MAcj4khVW9HMsyvdfZphVrkyJIUFciA6HSPnDnTvUtVTKh15549y5Ng/XC4wx9nLD39vZ6oVuQo0mbEcCY7gXEo+hjaudO7dCy87VdVt6e9WmqggiePBh4m+mEGRwgJ7d196+7cuO2dSS2pUENHCHW/zOPaEpGHr5YYqoYq8NJmcORzMsbgr5Bs2pqV3L3q2a4KqFvJL7hGFCRzdf5pMAZqyPu3eufyijmsXwwgJiSapyIrWPQfT0+3msXaBiMMRpWPN2YsuPX1wrkFxtaknCY5U07pPC/IP7eHQ2WwaNOvKwIEdsVVkcz78EKExqRi6+DKgnyeNKpx5qsuJI+RABGevFmPVzAPv7l1xsbipOknh7N0fRZqRM+39u1b5drm2MlfZbEnh7DsQxVXRhDZ+veioPMORBHM69/LERlzlZFAUOveumMftJiytMZ16++NhZ3zTmDPHvo0vvfzb0NCwUuYcDdxPdJohjh160KVyC5ISuY+ThS3x7+Z63dZoU09y4EQezbp1w82yrIf0KasKNJePE3Qqn2Z+/rS2utHbuowYDkZk0KRLD9wbycVZ19HmciUullznhjSxViFSTxIcpaVNnxZcO7ibw3F5mDl74NPDl5aWVbVbIQnhwcRmCoTuDMG7D9PevxvO5Zd117gYeojQU5cpbtiWHgN74Fopn1r6D+1Vog9Eom7TB+e0PQSGJ1Ji5UqXAX3xtDG8fs/JoJOItl0wO7ubsHRbOvf2p62tCrTZxB05wLGzV1Fbu+DRpQfezcxuKiOHc4f3EXImFY25E+1798PbwaSCyLlcOHqE46cTyTdvhpdvTzpXGoQaMs8c5mDEWa5eM8TG1Yeevdphp9L3epUVr9Zf6zJjORwSxsVroEs/xZ5dJbTt5Utzk5tyKO/b3i7kBgdy5FwODVz8GDSwPY0NsjkXepDw02kYtejOwL7uWFfoJm32WUKCI4i7qsbKxZMu/j44Xx/AVbW3HzZXjhCldqeXcxr7doeTWGKJa9eB9PayuSUIFdcuEHY4hJikIqzb9mJADzdK1URLXvJZzuQ2o6G9NSpxlegDUWjd+9I8L5g9h86RZ+aER5eedLkplipMCmdfUBRXaUKbbr3pYHCakERLvHt68F83AdX3VyEXw4I5K1rTw69FhXGmJSVyPyfzXfDr1gpLxW1i0Vv63IY2zkrSjp8jGyV5cfvYbdgGPy/B+dhLWJo4YJ56jAOxGejQcDZ4N0c6dKe7m2X18V8VFCWE35KHd/lF3TUuhh0iNLo+bE0hCeFBxGYIhOaGXWtRGM2BKB3u9sSfFgAAIABJREFUXSyI2x1Chq03vfw9aKyCgqRjHDwcRUJmEQrzprTx7Uv31taUhqQV7ayS9LK4vXeLaxzafYhzeaY4eXTF39f13uh9YSLhgUGcSjfCqUMPfKoKPmqIuavql+5ulkABSceCCTl5kcwiBeb27nTt60+r/8iB8PKb6X8RXV7Y/7N33oE53d8ffz0jS2RIIiIRQexNpsTeW6nR0qlKqa5ftV9KqQ50F9VSe1OlqB1BELLNkCAIyZOdyB7P+Pz+eLLzJHlitKV5/0Xuvedz7jnnfc659/l8PpctP+7hQkI6WRlJKLKsGdXLhFNbfYhKv82RDTsIVKRyL/AAv28/SkLTAfRopl1DkXlxA3NmzGaDfxz5ubGE7N/A5sN3sXLtQSsrXcGnJOTnqczbf50rW1eyNyKNlHBfdm8/jMJUyonFC9lzLY7b5w+ye8dRElsOpXsTYySaRE7/MIN3Pt9JaHwOWfcD+GvjBg5HmtK+ewdsDQB1DEcXTObdZb7czcom9tzvbPjrBlkP0jBxm8CoLhZABpc2fsQ7c9ZyLr6A3NhgDmzcwNG71nTt3horGaiifNhyIoNOY8fQVQeTVVE7+fC1Wey4/AC1Mo0bp3aybsNxstoNwt3iYkVb9m2LMup3/vf6e/x2JoY8ZRbx1/zYu20z+yNt6DWgLRZSJQHLJrPgcASXtqzlj1NnOBMrQX01iKulZfU0YtcHr/Lx1kukq5U8uHGKXWs24JvVjoHdGmH0hGOlFo+GIq5djC/xqWcPE8K2FXJt/XYCY1O5F3iQXdsOo3AaQE/nIq5t5JO3Z7PurIL8XAWhBzax6WAU9br2oLW17ic2ZdByps/ZT8SVrazefY20lGuc/GM7R+7XQX56CV/uCCchJoQjO7dxRNGEgb2dqSPRkHzmJ96dvoAdwXHkZt0naP8mNh+MwLi9Fx0aGAFqYg7PZ9rMpfjeySZXcY4/1u4lMiudNMMujBntipVUD51VUfhuPkFm53E852KlYz2PmrhjC3lzxo/4RmWTFx/M/g27CQk/xfYj2XhM6ImDJoDlb3zC4YjL7Fi3E7+zp1E4jKS/yWFmv/4eq8pwbgt/3bClz4A2mEtBHXOYhW+8zfLjd8jKVRDw+xr+iswiI82QTs+PwcVayfkfJjPf15zBz7lQtIROGbSM6XMOYTxwDK420mJ+Vz6Wits+mziZ2ZnnR7tgdqckx3TBh8UzFhFoOZShHYuaDBW3Nr3PzNWpuE3qT9NaYpdAGcwv02Zz2GgAo91tUActZ/qcA9wM38rqPyNITQzn5J7t7DwSS5MBvWlWfk2iJpOgTT+wNyyBjKwMkhSZWHfrQ4u0U2zziSL9ziE2bQ8gLi2aoAPb2Xk0nib9exXKqb5+VKbv/utX2bP1JMmigOjTO1iz5Rx07ktXOyNQBrD8jU85GnGR7Wt34nfmDPGNRzHAJpSlb0/ni50hxOdkExOwj80bD3DDtAPeHWwxADTpIax+ZwqfbgsluSCfpLC/WL/2EInOffBuaook8yKbPp7OvHX+xOfnogjez+aNh4m2dsG7tRUyVNze+T6TZ23l6gMNBWmRnN65mk3Hs2g72ItGRtUd13HP1dRr67gjrPz1INfj08hITyL2biZ2vXrQopyvivJX5OWtrP7zOmlJ4dr8FVsHyYlFfPXHNeJun+fwrh34JLZkcI8mGEs0JPl9zzszFvJ7SGH+2reRTQcjMe3Qg/a2BpXYexjWJz5g3t5rhP+xlVPJUBB9mt9/20IAnejj0hBD1ESf3MLxqHTuHtrAzvMK0u4FcmjHdo7FN6V/z2bUkWh7nLmHjOg3xh0bdTC/TJvDochwtv+2h8iUBK757mHnjsMomg6gl7MpktJ57nY2efFB7Nuwm9CrJ9nuk4fXuO7YPxu9vA5U5q8ITDp0p4OtATG7Z/PB0hs0fm4ALYvWwxaEsfKt99mp7MWLvRzJ1dmL3qGeS3daW8l1+jxGlcXVoOvEp2WRmaIgOt0Kd8dLLH5vJQmdX8AlYStL94SRkJ5FZlIsWdZe9DA8UrH/W+9DRpvBdHMsTwYNGYEbK8owCX3oXHPkthVdurfBqnzJL85r8WRkluQ152vLmDb/MJEXt7D+95OcPROH43P9MTz4EVPe/xX/mDwKsuK4fupPdmzZS5Rdf/q2MkNaJs9aEPbzVOYduMG1ravYF5FC0jVf/ty+nWMKJ/r3bo5pTeK+yaPFfVHd/vn4HbJyYgj4fS0HbmTxIM2YruNG06WetPKe+5Yd/fq3RARV9Euf5kn8Mes1Plx5htg8JZlx4Zzeu5Vt+27SoH9/Wpk9A2+0RC2eLFJ+FzPdXMT0HYlCLYQQeSfE/D6dhXvfmWLnrRztOennxOKRXYX3WztFkloIkRMovhnpIvq8uU5cz9Keok4LFT9P8hTdX10rbil1DZQnTs3rLdy69hOz9kSLAiGEyAkS347sKlxdhogFPglCKYQQGafE54O6il6zDotsoRapRz4Wg1z6iHe3XBfaoVQi+fy3YpKHixj7Q5jIE2qReuQjMdB1gPh43z2tXFWC8Fs4Unh0dhGvr4sWQgiRG7RYjHbtLt5aXyRHLdJCl4pXunmIyWtvas85+pHo5fqiWHNLpUP/AhH2zQjhOfwrEZhd+Kecy2LFC93EkHlHRaYuW4o0cejD7sJj5Jci4IG66CJxdfl44eH6klh/RyWEyBPH53QXbl37ig+2XBXJ6QpxLyGngqyC0G/EaNdhYklAdik5E0TPAXOFT0ZNHF6LfwwZe8S7OrjmVpprGYHi61Fdhdeb20RcKa71nrJGhJfi2oqXugnvSavEDZ1cEyLvxFzRv0sXMej9PeKelmwi9JuRwqNzFzH8kyMiTimEEFki4KthwsPzXbE/XQh1yhHxST8XMfDtzSW8Tg4USyd1Ex4jvxEhOdpz5vR1EUNm7RP3C4QQQi2STn8hxrl2Eh4v/ipuKvXUOeeImO3tIl7+7ZbQxbYiXYbO2ivu5WvHSQtbISZ37yzcB38pAvOFEHk+Ym6PzsK930yx9UqySI+LFnHZRZz7QvinlXDu+i8vCA/XF8XqWyoh1Cni6Md9hMeAD8VfWuMIdeJpsWiMi3B1fUGs0ioojn3kJTwmrhJRpRTMO/GJ6Oc6TvwaoRQl/K5irHJyyuQYtUL8/pan6PbKWnGnaIyCy2LZGBcxbOFZkVN9RP23kHdCzO/jIl74+bpQiqIY7yz6Td8sbhbRJ/hrMdbVTby1Pb4SISli93RX4Tl1u7aWiRKuDHxnuygpeV+KMS7uYsbORCFEZfVjmXi1VP3QrW9n4db7LbE1slBw1hWx6hVP0W38CnFNKUrF8Dti29VkkR53TyRkpYij/+srPPrOEFuLiKhKFgHfjhfers+Jn8LyhBD54sL3I4Vnjyli7ZX0wnPixKGP+guPEUtEaH6OCF4yQnj0mCI2lhRpEbZ8kujh+YpYf0spREGo+H6kixi1KEAUVZXcK8vEJK8BYv6xjOqPV4A+9VoIkXdKLOzrIl5cofWjbtMV5q8PSvJXyNcjhEfnrmLkpz4iUSmEEOnCb8FA4dH9Q3EkuyhndC2Tv1TJ58X3E9yF5+jvxYW8SuydU9ibdOkl3t4SWci7LHH115dFD7fxYmW4UpT0L33E+ztuilxtlIiAr0YID49pYleiuvgcj3E/i+vKUv7v9ZbYWhygQeK70S7C681tIkGtO8+lBi8Tr3l3Fu7DFouQ/EoM9AxAt7+09cbzuW9FWJ4QypsrxUuuXuK93UU9lRC5ZxeKYa4jxI8XCirtRVe85Cm8X16jrUc6fS5E/vkvxDDXUWLpJW0NUF7/Wbzg2lvMP5EnhBAiY8/bopvrm2JnolpU2f/NPSoqa8HKyqgq17iJGTsStPdXRa55bU0luUaki70zy+U1n9miT5cuYtC7W0R4crqIuxcvctIOiI97uIgxX50TJS3pFbFigovwfLmwDpXJs0Xc6ClmbrlZyI10EfLNc8LDbYrYrqhZ3HebslXEPWzc66jbqoRT4stRLsLVdZJYG6US+vXcFf2SdvD/RB/XEWLxuQeixCzLxCRXF/Ha2juV2PzpwjPwOuBphBSrnuMZ5Vw498q8Kx7t66JKTiBRAwVhRzipsKLvKxNpXTh9SmrZlZfGuyMJ98XvnrpyyfW8GTm0sXadlEk72jSTI3Poy+hettrpVKataOEoIT/jATmabIKOn+OB0yjeHN+6cKqlDGvPqbza24z7x49ytSCXYN/zpDsN55Whjlq5Mlu6T32BLsWzbwoIO3wCRb3+vPRikRwpll1fZay7hPATJ/WwiQTTuiaIpCD27jzGpZhM1CYdmLH9HIe+GEhdndfUxfv/1rD2x+m4FU3jVCmpY10fE7LIyCzZ/EBi6c1zY9thbd4QR1uTCpIkpqbUIYngPTs4fjGGLLUJ7WbuwO/Yl/Q3q3B6LZ4aSLEuzTWzTnh0NEednECyuohr9ej7yku0LcW1lyd4Iov05USUqgrR9fAePRRHLdlo084ZA5k9vUb3xU5LNlq1ckSqyiA9U01OoA/n0hszfNqEEl5buzPljX5YKnw5dqmA3GBfAjKcGPbqUBoZaPW38Z7KBNeSHfMfSedCZAf4cC69GSOmDMPRUDuOZZfXeaWPVbndRSVYeo3l+fbWmNs1xq5OEedm4GlZwjljq/qYkkVmpgZygzl5PoMmI15jsNY4SOt7M+UFN/Tb978IeoxVFaQN6DfUE6NrPhy7rbVJwaVD+N6zp+9wNypmgVpUgMQS73HjaF5Enw5utDXTkJqYWDM5Umu6jxtNScnzpF1dNSmJiVRVP8Z5SKupHzIchkxhbMtCwabteXGCF0a3T+FXzAMJll7PM6adNeZ2jthqAvH1T6fxyLcYV0REmTUeUyfTy+w+J45epkAZzgm/WMx7T+TF9uaF59gx4P1v+W7e8zipwjjqq8Cq/6tMKCnSdHllAu7ScE6evAcSU0xNIDloN7t8LhKTqca4/Tts8T/GwgFm1R+vAH3qdU18Ug/v50rlrzbNkMsc6D2mF/XlAHVp2bIxkrwM0rM1hfnLqUz+kll7MmVyb+reP86xK0WDl7N3oWukjYYweVzLQt6Z0m7SC3QzisLv1K0Slax7Mva55oV5wpzOnh2oq04mMbEyrkuw6DGe54sDtCOu7cxQpySSpNGd5+q5vsHLvSz/FV84eJLQ7S933pjcB/OYE/hcLkDedCiDO6kIO+aL1sTZBBzxI9V5MEPbG1TRi3oiu36C08W9qG6f648q+r8vB1KjFkxnrtGQkpREdbnm+kl9etXSalvSbcxY2lqbY+fYAJO6PXhv9Xq+e8uDkpa0Dtb1TRBZGWRVEsYSyx6MHdu8kBvmtHdth5lIJbnSjdR0x70mNYmUh417HXVbZtuDNyZ0KbXUUf+euzTqdv8/Vq7/iWkeFsUzxJR1bLA2hqyMzMo0eqpQu2b6H4EEC2srZKX+b2hkBCoVaqEhM1ZBmiqZg7N6caR05GtUFGisUMSpoVklc5MsrUtNiZMgk0rAwqqY2EiNMTKUgFIg1Cko4nOROTalWZldSkxp3tweTsYTn59MUnwuUrtGOJaKFqmVE46WEm4BaDJRxKWiTvmL//U+XEYdocpH1FPoYRM5zV/8iKnXFrB++cf4LJNj0aQLnr2HMO6lUXSy1nW/csytzcjdt5HPfrzM7fsK4uKTyUGOEA0pvdBUYtMA2yqmc8lbTOSDadf4Yu1y5hxbhtyyCZ269WHw+JcY0dmaZ3Ym2DOPilwzNjIEtQoVRVxL4dBHvThanmvquigUamhVSZqUWGJVav6pRC5HJjGnXr2Sd5TGRkZALkKoSVbEkyt1pKlz2S2BjJo1xZ4jJMRnk5waR47EDofGpclWD2dnW2RhgL46N67KJmqSYhXkyhri2Kj0vRnSqFEDpMGlz5Vi3aBBKfsVcW4TC3+8zO2YWBRxRZxrgBCgTo4lPldCA4fGpQqMFMvmztSXhlalWDlUP1bVkGLVexjdf5rNiaORTG7RjOBDviQ4jWBwx9qdEPSC1Arb+qWyn8QAAzmo1TXcLV1igXXpudoSIwwNQaVUP2L9kNLI2bnMvhbGjRphTTCKWDU00Z5TOobVKQoS8qQ4NnUuuzmXaQuc7eFUQjwqpYS4RKhv36jMOXL7znS3B01KGIo0FSl/fUi/w2VIiCpfg5UiDuTdmDBrGtc+W8uKj4/xs9wSp87d6DVkAhNHdcZa3qLq4+WLjj71WgV6F6ty+UsqkyKRWFCveIG0FGNjAyQooYr8Zdq8OfacJCFOBR0q2rsIskbONCvrKBxsICQ2Fmiq/ZuFNfVKh4mhIYaoUFUab1KsbOuXye9yAwPQaNCIqvKcLdJL1VroKUbl/jJu0gQ7fEiIV4HMgQFDXFn9rQ8nFOOYaHaGo2ezaPPqUJzlGh7o04s6QGU+1x8P0/9VgieWa3RAaoNtg1Jjyc2xNsvlr42f8tOVKGIVCuKTc0AuoKGo9Msaknq22JRJswYYoEaj+fvivrK6beXUGEtJZNEN6t1zl4bc3Aaz3H1s/vQHrty+T5wiHq1ZBHbVF/KnArUP0/8QJJLKkoMUmUyKRN6EEQvmMsSu3OQBiZx6zapwm1RWg4QmRy4HUVBAAZTafEFQUKAEQ2OM5YYYGoLIzydPAyV7OGgo5rlUhlQqQdZkFJ9+MowGFVS20ksbqaUrk5fvZ2x0GOf9Awk+f4YzW7/gVGA8qzbOoF35CzTxHPr0NT4/bYDL8GEM7dcC5xZtaR73CxPmhJcltlRatV2klrhM+Znd46O5eM6fwKDz+PttYdHJABJWb2Ja+2dnT9T/GqrlmsyJ4QvmMbQC12RYNqnK71KkNcigci3ZKChLtkL+GWJsYoiRkSESUUB+GbIJ8vPyC8P5UXXWwtDQENQ5ZOdowLRIhoaM9KwKn+uQykqNUcS5M8a4DR/MkH4v4ty8Lc2TVjHx44taFQwNMUSQn5+LhpINUUReHgVl6qYEBGUeioVSiaoGY1ULM2+G9Lbh+PGjhL/ciWN+D2j54pBK34/UojxqFuOVQ0LlNHy0+qFWKdFQ6jufKhUajDE0Kun+y8SwXI4cQYGyAhHRlj1jZBIVUgkoC/LKDqbRoJFKkcqkyCRynEZ9xuyhdhX2JJBbOQNSLF3fZOm+CURfOMf5gEACzvix/cuTBCWsYd309tUeL8tkPep1jZ5mHlP+UhagxAhj47IP5hWgVqEq6yhUGjAyKjVfRSKt8fdapVXcRGV5LjM98yn/XGL1qNxfSlTF/pJi228oHssWcvxYNANtjhBQ0IUZgxojq2EvqtPnNUC1/Z/eLdiTyzU6BFK2PB5gwWufcdbQlaHDhtO3uTMt2rUkbsUY5l6r9FkTZDXp2wtHfsxxX1ndRqMp0bsmPXcxNMQfnMcbn53G0HUEQ4b3p3mzFrRpGcfK52dz7V/z8c5HQ+007ycNqRQJIGrw9sW0RQvsSSI+14GOXbrQuUsXOnfpRKMH5/hzf7D27fPjgMyWls7WaG5e4ELpmRaaRC5duofEyZmmhta0btkQER1BRG7JKeqEW9wp/parKS2aO0BiPDkOnQr17ULnTo48OPcHf4Xo8bZPk07ops/5bG0ocic3Bk2cybzl21g5tR2qm2FcTtFUsKUm7QyH/FJxmriEn+bNYOJzA/BoZ0fmvXtkCQ1V/nhSRpaG9JBNfDV/LWFyJ1wGT2TG/OVsWj2NNqqbhF162r9T/F+BBCkPwTVJEvFZ9mW45ph+nj/3BaN4XFxDhm3L5tiIm1wKyyj1dw3JFy5xn8Y0a2aEVeuW2BFN5PXSZIvhWmRJDD66zjIadGyPA3e4crEU8VW3Cb4QV2VpK+Jc4xe+4oe5hZxrb0fm3btkCA0aNUit29CiIdyLiKDkLtTEXosgpVi4FLmBDHKyyBalzrkbQ14NxqoeJrgO74+94gzHdpwgMKcDgwY3rZ1p8sQgRaJNrDVokSqrH41JP/8H+6usH2puhYWRUZygVdwNCiNO7kybFrqbTZltS5pZC25eCKVs2bvA5XvQ2LkZcoPmODeBhNtRpWRD1vE5DO7xBpsSnGnuAEnxOdh36lKsc8fG6QTs3keoQoUmPZgtn3/K+lA5Tq6DeWHmAn7atoYpbVXcCr1EUlrVxyvM8NSnXj+xl0SV56+kwvzlXNVLfkB9M4ywUsZU3QnigkKOc5sWT0xnnXlOeYvgizVcpvDUoYp6c7Gcvyx7M6SXOZEn/2Tn4WAk7kPpV/iUWVkv6ph+jj/3PVovKpFK0L5NRb/+rzoZeuFhc41Ej7ymIe3MQc6kOfHCkuV8Mv0lRg3oRtsGmdy/nw0a9d/02PhwcV9Z3U6MukNRq69vz13Wt2n4H/AjtfFEvlo2j2mTnqNft3Y0yLzH/WyB0Dwb3XXtw/SThpEpJgaC+2EnCL4ao9clBu1GM84FAn+Zz+pTUTzIzSE+ZDOLlqzH764MG52fJXkYGNJ17PO0zjnK9/PWcf5eBtmpN/BdPpfVoaZ0Hz8UJ5khHZ8fS8fcw/z0+Q4uJWaTcdeP3xZu5GpxIjWg7egJuEjOs2rBSvyiHpCXE0fo5s/5dsMpoqX1q1dFakqd7HB81nzP0iM3SMtXkaMI5XjQfbBvTRsraQVbSg0tsDDRkHQlkMgHalBncNtnGV9uCkct8snNrSJ1lZGlwLROFtePruanH49wMzUfVU4sYT6BxGJP67a6dkOuxb8OchOMDQX3Qo4TeDWGLD0u0XJNQtDK+aw6eYu03BziQ7ey+Ot1nLojwdby8Xne0HUso1vncOzbeWw8d4/0nFRu+Szn05VBGHmNY2gzOYadxvJ8pxyO/rCQ3y8mkp0RzZnfFrP9WknX8jh0Nuj4ApM81fgs+YAfdp8mOOAoGz6dw9ZIFVUtJiziXPLVUpw7vpyvNl1FVcQ5w06MGduJnCPf89XOCyRlZxB9ehVfb71W8qszclq0bYE85iibd10gJi6K0L9+4utdN2s2lj527zCcAU3vs2+jL3ldhzDg2d2+918AI+rUMUDcv8DJoCvE6LUcrrL6sZBv1p/iXpX1Q0PaiZ/5ZvdlkrMfcNt3OUu238R28Av0s6mEB4aujHm+NblHv2XB+nPcz8gm9YYPK+b9RphpT8YObYJM3oxhY7sh91/Jl5sDiMnMJS3yAD+uOkV+8+54O3dk5HgXJOd/4fNVp7j9IJecuBC2fr6IDaeikdqaIzU1JfvaUdZ9/wNHb6SSr8pBEepDcAzYt2mLjVnVxyt+8EKfeq2Xkx4K5fNXRnYqN32WM/+3UEx7jGdwk6oH16Sd4NfFu7mSlM2DKF9+WbSNm7aDGTfA5onpXCHPnT/KurmfsO1m4Zu4Z3jhdGX++nRVCHXK+MsUz6F9sI7YwbYwI7yG9i6OvZJe9NMyvehXi9fjF/1ovajcxARDcZ8LJ4MIV2iq7/+qk6FXsimVa+bXJNfIMTE2rCavSTGyNMdEk8jVgAi0JSsK32WfsSVchcjPI+9v+hH2oeJeR92+67eSLzZeLq7b+vbcZX1bgIWFCSLpKoERD1CjJiPKhxULN3JNJcjPLXx9rrrFX9/O4/P1QU/cPk8CtZPdnjSMutCvnz3+B5bwzoVRBB3oXf01siaMW/Q92V8sZvOs51mjBiR1cPR6nS/nv4TzY/SaQes3WLKkgEXfreODUctQA4Y27Rn40VI+GO6g/QXH+SW+XJTBwsU/8eagJQiZGS2GjqBn7B+kFKs8gS++z2bR4o38b+xvaFVuTLfXFjP3peZ6aCKnzeQv+fD+HFZ8Op49Ku3fLFr25/++mk5nQ0BTzpY+83jzo1Hc+24lr/dbiaFMSt2WQ3hjwUyMP1nJ3VtJ0K2SQl3eLz6f8NnHMcxbNo+Jf2hTh8yiJf0+XMTULrXrK58KGHWmT18Hzhz4hvfCIvhub5/qr5E1YdyiH8j5chGbPhrLOjUgMcHB8xW++PRVmj/ODGnQmtd+XIbBN4tZ/95IflYBBta0GTSL72c9RyMt2Xhx0RIyP1vE8skD+VbIqNtiCCP62PNH0UtzfXRWVnffjXjuy58o+P5Hti+dxR5pA1r1HM+bg7ewItwYo8IXyxVg1oepH48hphTnTJsP4vUF71Jn7gruRsVBDyeaTVrMV5kL+OanyQxdIpDVbcGgkX2w31V8EzQaOZOpQfNZ98PrjP5WglEDdyZOfp7spWE1GMu2ervLWzB4cDs2Lb+N+9C+WNe+GXuCMKJL/340PHuAb2eEMXLpQT7W46qHrx8yHNw7kr5+KsMW5aGRW9Fm+Fy++7gXlb9TMqD15O/4Kv9zflj3LmOWqQBDrNsN5sOfZjGs8GWLw3MLWZL6OUvWzGT0TyqQGGPvMZGF817GWS6DCYv5JvsLvt74IRN+UwMS6jT24tXF85noLAfa8toXHxPzyTIWTPgDNYDcghb9P+Tzt7pgKKfq47o016dePykYtOa1b5eQ/8V3JfnL0Ia2gz7m+1kjtJ/aqWq/Rgd3OqSvZ8bgr8jTyLFqO5w5P/6Pno/xhWUFlM9zMjta95zAlIEb+PWGSQ03RHzKoI+/CmHsMpz+jfawNbc3Q7ublxzQpxfVa4ZQRRh27kdv+9McWjKdC6OWcmRONf2fHjL29ap+3IfLNUZ06teXhmeqzmt1e8/gg+G3+W7lywxaZYhMWpcWg99k4QfmzF1+m1tJajye3LujElQT97q/CCmn2aRFfJWxgK+L63ZLhozsTeyuwl+0zfowddYIbn/9a9me+4sPMJ+zvLDndqjgl7+mf8Sw21/z2yt9WW0oQ1q3JYPe/JL3zGez4s4toBtokrnqe4jDTToy/3X3v8FIjxcSUZM5kbV4SKjJTU0lx8ASa7Oarb0tSI8lNqkAkwaNsKvhtTWDhrzUWOIyjajvYEtdXQ8R6mySYuLJMbXH0cakkl9rC0hXxJCUX4cGjex4GJWVGQpi4rORmDfZ0ErEAAAgAElEQVSgkZ15uTc+OmypzCBRkYrK0h57i5o8+OqWFXc/nmyJOQ0c7TCrfd30lOHhuaZMVxCTlI+JrQO25oZPdDaCJjcVRVwmRvUdqK8zyNRkJ8aQkGtKQ0cbTCpR5mF11qRHcTlKg0ObFtQv3nk1h+OzB/Fp/GR+X/s6jlV15spMEhXJqCwdquScOjuRmPhcTO0dsankJtTZ8dxPAptGdrrzjp5jVQ41sVun8cIGOz7f8yV9anfnf+JQ56aSlmOAhbUZNax4+teP/JMsGPIRN57fwua3m5EZG0de3YY0rEmMaHJJjY0j06g+DrZmun9d0OSSqkggt25DGloaVeRYQTqKmCQK6tjiYGeu436VZCjuk5AtwczOEbsKfK/uuE6lqq/XTxDV56/SyMfv08H8L3IsG7a9TbOMWOLy6tLQ3kLnC4PHqqfOPJfN0Y8G8VnKNHaveRn7/8DLtZr5SzeeSH1U55KWmoOBpTV1i1uwqvo//WToByXpivsk5dfB1sEWc8Pq70jfvKbMTECRosLS3oGHKlmPiCrjPvlNdq15tfDlvW5UW7f16bkr+EVJZqKCFJUl9pVwX3llKW9scmbTt8NrdL//BtQ+KvwtkGFiVf+hPsViaOFAU4vHrpAOSDG2cqRpVfsvyEyp7+RcjRxDLOyb8SgqG5jb09S8sqM6bGlgjq1TpRdUAd2yGjZ7GFm1+Hfg4blmYGH/N3ENpCZWNGpWJdkwtXWiWTVyHlZnkXWOX6avQP36Wpa+1Y66aEi/tIXf/fNoMsGNhtX9xGVghq1T9U+lMlNbqksZMlM7mpg++lgVoSIvVwOpfqz7/TK2A2fgVfsg/bdAZmKFzUN9e+xh64ch9Rycaj6c1AQrx2ZUue2Q1ASrRk2qGNoC+2ZVaWyAuX0zKq8q1R3XqVT19foJovr8VTkMrRx4CE89FIrz3OS1LJ1WmOcubuaPgHycJ7ph+x94kIZH81cRnkh9lJlQr37ZRFF1/6efDP1gUONco29eMzBrwEOVrMeEquPeA7tq6nu1dVufnruCXwwws3Wq/DNn6vsc2hhMs0GTqpb7L0Xtw3QtalGLWvzHIGs4kjdfO82C9a8ybLcdNobZJMRnU897Jotebf9sFAZ1An+8PYqlF1QYOb/AN290rmR6Wy1qUYtnEcV5bt2rDPujMM8lZGPl/S6LX2n9bOS5WtSiHKqK+0X/1riX2dH946UMs7X+pzV5KNRO865FLWpRi/8o8uKvcSn8Lsn5RtRv3pkuLa1rOC333ww1yVf9CI0xwtnTk+aWtRuPPVPQZBATEU2eTXOa2z7Uz+C1+FugIeN+BPfzrHFu0eAfWaecFx/OpfDoZzTP1aIWulEb938fah+ma1GLWtSiFrWoRS1qUYta1KIWtagh/iMrRmpRi1rUoha1qEUtalGLWtSiFrV4fKh9mK5FLWpRi1rUoha1qEUtalGLWtSihpB99tlnn/3TSvyjUN3Bd/12QpRNaNPIVPst8/wccjDAQFrJ8WcGKm4dWc2uS1JatG6I8d94c6o7x9m4IxBlk3Y4mOozsIq7x9ezM0hJk7aNqFN4SX5ODhgYIK3keNUib3FszQ4uCGda25s8tG9LdKjRRSUxVotnCPlow+FpcKyKqGNr2BWipmlbB/04o5fYKI6v20GouhltHOo8cs6swK8nyB1N+hX2r/6VzXt8CE4E9fUjnIizootzyU64D8X3ZxHPYp1U3eTo6p1clrakTUP9Vveqbh1l3c4whHMb7E1037k6MYidv65k297jhGY2pHub+o9T6xpCV618tLz1LNXz/zz04fKzwnd1AiHbV/Lblj/xDcmkoWdb6j+WrTX04dPj6BUehid/j+/0yYvPEv7z/QDqu5zeuo4/Q1LQAPlXN/PBC3M4mKbRefzZgpLbJzayYf9FMv7mlfPqu6fYtm4PoSn6WlVNtN8W1u8JIVkDkE/45neY9MlfPNDoOq4HlFGc3LSO/RfSeLjbL6+DnleVj7FaPBsoCGfbey/w6V9pT0muUHL35GbW7wvjsYai+g6nNq1jX+ij5swCrm19l5dK8asgfAv/9+KT4k4Gp779PxZtOU9Mbi65qkzC965jy8k7lerzX8UzWyeVtzm5aR0HLqXrfYnqti9b1u3lYlolVUSjYM+C9/lxzwWSlXmoxT+9BVC5Wpl/ha3vTnikvPVs1PNaAPpx+Zngu4a4Pz7jw+93czFZSb5KYPA4nvn06QMeW6/wEDz5m3xXbV58xvCv3CH9b4WRJ+9s3I3SrCFyIDPiHIExarr903o94zDyfJ/1fxRQt6G+IWiE+3ub2FlghvaSbCLPBxCr9q7k+N+B8jroh5zaGHs2kX+DwPP3UXr904o8K8jjZsB5YgpKDJp/I4CA+wV4PonhlLe5fDUNi4HfsPKLvpigJH1AF0YaFv2KWFGf/ypqc1gNoLpP1N0CGo35gl9mdfkX7KZbrlZmRBAQEEMNy1hZic9EPa+F3ijXNz+dUHE/6g75js/z2apZdH5cxNSnD3hsvcJD8OSZ8N2/D8+YLTUkX/UjLKk+XXu1x0YKoCb+4kkupdnhUvw3DclXTnEhywkv97rE3Ywgw7EeFukXOR2RigYNN04f41wnL7wcS2Rn3Qng3PmrxOZb0rrnQLydzav+aT8nhrDT/ly5m0Ke1Ay71h708m5JZV9oUSdewu+ShtZuZtw8dp6U+i709G6LjSHkxIRyxv8S0al5SOs2pJVHH7xaWiIDUCdwxe8S6tZ9aJJ5muNnb5Fp2oi2bj1wK6dj7v1AfE9dJtmwMR293XS+wVU/uMH50yHcTFBi4dQON29XHE2LDhaN1RunDO1YGSZOuA8cQIf6Eh7cCsA/4DrJBk3xGNSH1pa6LaTOVHAzIgPHenZYGgoSL5/isro1vZpmcfbYWW5l1qFRW3e8PZwxL/RjpuIGkRmNqWdnRFzIaSJSBUITyelj/nT0dse8+LglhtIiF1RhN32gSiXS/zShN+PIltvQzKUnPTo0wJBcooPK69CN5mbSKv2eFx1UMcaamwOgybxNiH8oETHZ1G3cAffuLjSqo6+iTx/UiZc5fVFJy95NyT57nLM3HmDc2J0BAzpTX/qA20FnCQhPRO7kQf++7bAq5TRN+k3O+4VwIyEfi8ZtcfFyx8kMIJe7gae5IVrS3bMpJeZTk3jpFBczHPHwbomFFNBkcDf4HKHX75NdtzHt3Hvg0riUwUv5Pktmg7NLD7p3tNP9reLcaIJPXidVgCrSDx//znh1a46ZFNCkc8vfj9Ab8eRbONHWzQtXJzPQJBPuF0pyfRd6tLfR8lSdwOVTF0i1daFnh/ravxWel96oG54tzJGSQ0zoac5fvktqnhRTu1a49/KmpZW80K7aPNLKzYxbx86TXL8rPb3aUd8IcmOC8D15iSQDRzp6u+v5C07V41UODWnXfDkTfJe8ei3o6tWN1jZlOxZN5h1C/EO0Me/YHrcerjjW0foxOugUESkCodLyq1VjGXciUtCgKuaOZ3NtfqtcTpFNL6Fp7U7dm8cITLKhSy9v2tqWeFKTcYPzZ88RlS6QZdzi5FFDWnp2hDsR3KtrTCMbSQV9ivn+H4POHFaDOlmzPKcurg09nTI543OWqAxjnDwG0a9jfSQPbhF45jwRyYY06TaI3q0tS42l5sENf86E3CRRaUHjdu54uTbGtIz8XGICffC7koyBYye83HQxQkPm7WDOhV4jNqcuju098XZxRJ/UrEmNwP98IHezQJN8leNHC2jj3YSMwMuINm6Y3jhGYHJ9uvbypk19Q1A/4OY5P0JvJKC0dKKtW3dcGhcX30ewRVmbFtdSi3QunY4kTYC6MG8N8m4OqEiN9OdMyA0SsuRYO7vSo2cHbA0r8dLTUs+fElTVC+rDH33spE6/xbkT/txIVFG3UUe693XDocwX5jRk3Q3k3LkrFbmszijumxtYykiu1t+FyI0h+PhJriSAbetu9OgkJeL8fcxde9DGSke0qhO44ncRZaveOCYdxyfoPgUWzrj16027MrWkao5WsKd1C+wlyYTeyQZNEuE+Rylo0xP3pibVyiqUSPotf075R5CoMsOhU0/6ujpgrKMP0PKptA10nONpxuVTleSEKp8jSvPIEkOhx7PA3+U7Xagyv0H1eadmeenvxDO2ZlqQ7fctby88jcXQEXQyl4LqJls/mMF3u+5gO3wEHcwkoI5i+4dvsS7dmxe9Evht2mwOG/Wjn+YgP/95gcT0LDKTYsmy9qJPyzRObfUhKv02RzbsIFCRyr3AA/y+/SgJTQfQo5nuNQfKqN/53+vv8duZGPKUWcRf82Pvts3sj7Sh14C22ma+/DXnlzJt/mEiL25h/e8nOXsmDsfn+mN48COmvP8r/jF5FGTFcf3Un+zYspcbNr3p38YCqTKYX6bN4VBkONt/20NkSgLXfPewc8dhFE0H0MvZFAlqYg7PZ9rMZfjeySYnxp9d6/7iZtYD0kzcGD+qCxZSDUl+3/POjIX8HhJHbtZ9gvZtZNPBSEw79KC9rQEUjnXw+mW2r/qTyLQkwn13seNILKZSX77+fBcRcVEEHNrF78cSaTG0J010LMZWBi1n+pyDGA14HjcbNSE/T2XegRtc27qKfREpJF3z5c/t2zmmaEL/3s6YSpSE/DyVuYeM6Pdca25v+YG9YQlkZGWQpMjEups32Zvf0h4f446NVEnU77OqtpsqCt/NJ8jsPI7nXKwqNh6qKHZ98Cofb71EulrJgxun2LVmA75Z7RjoYcHFTeV16EPz5D+q8HtrJEEbWVY+xtpYkHlxA3NmzGaDfxz5ubGE7N/A5sN3sXLtQSurZ61V0EIbA/uJuLKV1buvkZZyjZN/bOfI/TrITy/hyx3hJMSEcGTnNo4omjCwtzN1JBqSz/zEu9MXsCO4MEb3b2LzwQiM23vRoYEhMbtn88HSGzR+bgAti9bqFFxi1Yx32Z7XnRf7OGGQeZHN/5vB3LX+xOXnEhfyF5s3HOR2PRe821gjL+/7m37sWrOe4+ltGNCtcYX9BTSZgWz5cQ8X49PJykhCkWWNZ++2mKWcZvnM6Xy+I4i4nGxiAvexZeMBIkw64NXBlMBvp/HFGQsGj+iMuRRUt7fy8fQl7LplzZDCv6mjdzB72q8ke7xE30bR/DHrNT789QyxeUoy469xZu9Wtu27Qf1+A2htLtXmkbkHibi0lQ2/n8T/dCz2IwdjcXYB02YuxfdONrmKc/yxdi+RWemkGXZhzGhXdNZD5a1qx6sA9W1ObPbhVvJVDuz0JTo7g0ifrazf5o+yQz9c7I2QAJkXN/LJ27NZd1ZBfq6C0AOb2HQwinpde9C6Xg5Bm35gb1g8GZkZJCkyMK1zn0NHLpBQiju92liQXZUcazkoA1j+xiccjrjMjnU78Tt7GoXDSIa0NyvO3ZpYX377eR9X49LIykpFEZ1GPbdmhC+ZyS+xHXm1T/1y+mj53kZXIn+moSEjUEcO07NO1jzPafP+vP3XubJ1JXsj0kgJ92X39sMoTCWcWLyQPdfiuH3+ILt3HCWx5VC6NzFGoknk9A8zeOfznYTG55B1P4C/Nm7gcKQp7bt3wNYAUMdwdMFk3l3my92sbGLP/c6Gv26Q9SANE7cJjOpiAWRwaeNHvDNnLefiC8iNDebAxg0cvWtN1+6tsZKBKsqHLScy6DR2DF3LkUh97wgrfz3I9fg0MtKTiL2biZ2XAUfenc/RiItsX7sTvzNniG88igE2oSx9ezpf7AwhPiebmIB9bN54gBumHfDuYIvBw9qiEpvOPWREnwEajizdw4WEkrw1qm9Lbu98n8mztnL1gYaCtEhO71zNpuNZtB3sRSMdbxOfinr+FEF3LziERjc36uDPHeq5dKe1lRyo3E5Rdv3p28oMKRrSQ1by/ptz2R6aREF+EhcPrGXdQQVOfXvSzDi6hMvrt+vmsjKYX6bN5rDRAEa7WxBWqb+d6N+7OaYSUCuO8tUbM1jqc5usvHhC/tzA3pCr+G09Rp73eLztdfC/cJz916+yZ+tJkkUB0ad3sGazP5rOfXFpaERlHD1y24ou3dtgJddlz/vkp18lLDKOBxnpJMdGk9WwF91bqLlcjSw06YSumslbc7cRmpRPftIFDq5dw6G4JvTsnMSfP5XtA0b1bVvmlnT1CqN6pLP8jU8r5IS+RoeYraufvGVHv/6tMZOW4tEYd2zUVTwLNCl8FvibfFc+L2oS/fhJR36LMGmPd8cGGKLSmXc2+mTSepAXjkbVHX8iVNQf4hmDOnazmOruId7dnSzUQghVzCbxprub8PbwFh/ufyCEEEIVvV5Mdu8nPjuZI0TeCTG/j4t44efrQimESN31lujm+qbYmajWCsw7Ieb36Szc+84UO2/laP+Wfk4sHtlVeL+1UySpdWmRJg592F14jPxSBDwoOiFHXF0+Xni4viTW31Hp1D3PZ7bo06WLGPTuFhGenC7iouNFTtoB8XEPFzHmq3OiRNQVsWKCi/B4ea24oyrR0a3XW2LrzUIdM4LEd6NdhNeb20SCWgh1yhExp6+LGDJrn7hfIIQQKpF4aqF43rWT8Hhlrbin0p7zSb+uYuDbm8X1LK0YVfJ58f0Ed+E5+ntxIa+UPfp/IPZGF2jVCV4ixrh0Eh5D54njCUrt8Kfmi2Eu3uLjw9m67/XEXNHfdaz45bpSCJEnTs3rLdy69BQzt9wU2jtIFyHfPCc83KeInQnq4nM8xv0sriuFECJF7J7uKjynbi/0Qbnj+tgt54iY7e0iXv7tltDlkYLQb8Ro12FiSUDRPeSIq8sniJ4D5gqfDF066Of3CjGWEyi+Geki+ry5rtju6rRQ8fMkT9H91bXillKnCZ965J2YK/p36SIGvb9H3CsQQogcEfrNSOHRuYsY/skREacUQogsEfDVMOHh+a7Yn14Uoy5lYlSdHCiWTuomPEZ+I0JyhFDeXCVedvUS7+1OFEVeyD33hRjuOkJ8H5ovhMgRwUtGCI/uU8S6q8UGF2HLJ4meHhPF6khlJb5/QfQaMFscSa/khjL2iHfdXMT0HYXjqlPEsdl9hUffGWJribIi6MdJoqfrCPF9cJaI3fKG8PKcLnYnqYUQaqHYMkV4ubsLr24zxd4UtRBCJWI2Txbeff4njmcIkXbw/0Qf1xHiq7NpJfd2fYV4ydVFvLzqptauRXnknc3ianK6iIuOE5kV+K8WSae/EONcOwmPF38VNyuJMX3Gq+hYHzG3R2fh1uctsTVCaz/1gzCx/EV34Tl2mbhaIIpjvveUNSK8VMyveKmb8J60StxQCiFEutg7szS/hMjY87ZO7lQpp1Af934zxdYrySI9LlrE6UpL+efFokEuYsy3YaJACCGUkWLVCy6izxyfwhMq6vNfxUPVyUry3IqXqspzhbWhaz8xa0+01i85QeLbkV2Fq8sQscAnQSiFECLjlPh8UFfRa9ZhkS3UIvXIx2KQSx/x7pbrQjuUSiSf/1ZM8nARY38IE3lCLVKPfCQGug4QH++7p5WrShB+C0cKj84u4vV10UIIIXKDFovRrt3FW+uL5KhFWugy8Wo3DzF5rTb+c49+JHq5vijW3NJd10XeKbGwr4t4cYW2xyiJx3fEtqvJIj3unkjIShFH/1cuV6iSRcC344W363Pip7C8h7RF5TYtrpUpv4uZpfNWQaj4fqSLGLUooPj63CvLxCSvAWL+sQzdEp+Cev40oUIveC9e5FTBH++X12hzeBV28iyyU36Y+HGUi+gzZbW4WuhOVdwB8b8BLmLkokCRVwWXvabtEIlqUa5vrsLfblPEdoW6pBYO/FDsj87X6p4SLFa87CXcug4V3wTnV2KIwt6291tia2ShLtnhYt0b3YXX+J9FuLJqjr62plxNLG1PkSf85vcRnuN/FhGFuUcfWflh34nnXbuLaasvC635VCLu4Cwx2HWI+Dowr2IfoAvlz9GVE3L06SfL8aSKZ4FuU7aKuL/Rd2Xyolp3ftP2QqPED6F51eSd9OqP/8N4ml/e6YS0QXe6tVJzNTCEHDRkBAdxs8FABndUcTXkEnloSPL3J9LUnR5uJtUL1ErFqud4RjkXnm/eFY/2dVElJ5CocwV/Xbz/bw1rf5yOW9EvFyoldazrY0IWGZlVLPuXWNJtzFjaWptj17gBJnV78N7q9Xz3lgcloupgXd8EsjLIKhYlwaLHeJ5vXqijWUdc25mhTkkkSQO5wb4EZDgx7NWhNDIAkFG/x1TGdSmZH5ET6MO5dCeGT5tA68KZFzJrT6ZM7k3d+8c5dqWg2B71vJ9jcGPtNBuTtm1pIpfh0HcMPW21Uz9NW7WikSSPzPRsvTc5kFj2YOzY5mjvwJz2ru0w06SSqNvIVUNvu1Whj6kpdUgieM8Ojl+MIUttQruZO/A79iX9zXQO+lB+Lwg7wkmFFX1fmVhsd6llV14a744k3Be/e+oa3/5TA2k9vEcPxdEAwIQ27ZwxkNnTa3Rf7OQAprRq5YhUlUF6prowRhuXiVGptTtT3uiHpcKXY5cKkDcdyuBOasKO+RbyM5vAw34kNx3IkI6GUBDGUV8F9fq/wsR2xQany6sv4C6L5KRvVKHvEwnevQOfizFkqk1oN3M7p44tZpC5nveWHcjxs+k0HvkW40qUxW3qG/SxUHDi6BXqeXvRUn2FoOAsIIOQ4AgaDBpOZ64QEpYHmhTO+4dj5NobNzOo2/3/WLn+J6Z3K5rCqUJpZE39OpCdmVkytsSSbs+Po521OXaN7ZBU4L8UG++pTHCtesdivcerABn2g95gXCvt5DipRRdenuiN0d1T+N1QFcZ8Pfq+8hJtS8X8yxM8kUX6ciJKpZeJ9ZcjwdJrLM+3t8bcrjF2z/DyiX8OVdfJyvOcJ9Jq8py0njcjhzbWrjc2aUebZnJkDn0Z3ctWu1bNtBUtHCXkZzwgR5NN0PFzPHAaxZvjWxdO65Zh7TmVV3ubcf/4Ua4W5BLse550p+G8MtRRK1dmS/epL1BSEgsIO3wCRb3+vPRikRwpll1fZZyHlPATJx/BVhIsvZ5nTDtrzO0csdUE4utfLlfIrPGYOpleZvc5cfQyxdW3RrZ4GNVMMTWB5KDd7PK5SEymGuP277DF/xgLB+gsfLrF/Mvq+VOH0r2gYwNkVfBHdv0Ep++pq7STKLSTMtyXM7Hm9Jw4iXaF7pTZDeLdb35g3thmhXleN5fVydp+Ure6OvwtUklO0UB2AMfPptNs1BSGNNYSTGrlymuv9tY5S7MsZDgMmcLYloW61GnL+HHdMLztx+monCo5ev1kKY6Ws2fF7r9qvmtlKbnm60eMeW9enNQBs0L97Aa+z5IfPmNMs0d5pCqXE0we9jlC97OAJjWJyvYHfHK+K0S27vzm9ubr9DaP4dSxSxRUmXfMq8lL+jZlTw7P2JppQNaI7l7OrNoTwKX8HuQHXcWo8zzGOERx+FAQkQVtifIPx9htAW6mQL4+QiVYWFuVWm8iwdDICFQq1DoXHcoxtzYjd99GPvvxMrfvK4iLTyYHOUI0pMqFilIbbBuUmjIhN8faLJe/Nn7KT1eiiFUoiE/OAbmAhqKUKClWtvXL6Cg3MACNBo1QkxwbR47EDofGpVwutaKxoyXSWwBqkhXx5Eodaepcdl2jafPm2HOShDgVdNDKtixtD4kMmUSCeb16xVOrpMZGGEpAKfTfyU9SzxabUrcuMTDAADUazUPsBqi33aoQ0WIiH0y7xhdrlzPn2DLklk3o1K0Pg8e/xIjO1jrWaT2M3zVkxipIUyVzcFYvjpSek6dRUaCxQhGnhmbP5lRvJJZYlZreKZHLkUnMqVevJEsbGxkBuQhReYwaNWuKPUdIiM8DmT0Dhrry2zc+nFCMY6L5WY6eSafNy8NoKQdNSiyKNBWpB2bRr6zBUeWrqatQlPH9Jz6lfT+JEZ1t9Fqjp05RkJAnxbGpc9lNh4ya0dQBjsXHo2rUg27Ov/Bn4EXyeikJvmJEp7njcLzzF/uCr5LfVYH/FQO6zvbEHMDcBrPcfWye/yNXb99HERuHNqwFDUrHV5k8Uhn/6+HsbIssrPJ7kOs7XgVIsHN0LGOnOo6NsCGY2DglmakK0lQpHPqoF0fLx7y6LgqFGlpVJR9KuFONnCYAUqwbNHjm1lb+u1BVnXzEPGdpXWrPBAkyqQQsrEqaOakxRtqCg1CnoIjPRebYlGZliGdK8+b2cDKe+PxkkuJzkdo1wrFMSXTC0VLCLQBNJoq4VNQpf/G/3ofLqCNU+Yh6iocxUtFIZeKx0lxh2gJneziVEI+KjjW3xcOoJm/BhFnTuPbZWlZ8fIyf5ZY4de5GryETmDiqM9Z6kujfVs+fOpTJ4fryp3o7KRUKErHFvlHpSJNj37kn9lDYE1fCZXVlPW/V/lYnxhCXJ6Oho2OZhw7DRg40kFyszhA0cnam9LJYw4YNsSEAxf0H2OjL0fK9dXnoxXclCkUi2NrjUMZ8DnTq4aD9d1XvmKtE+Rr1sM8RVT0L6L7iyflOi0rzm3FTnBrC8YQEVHLXqvPOY8pLTwrP3sM0cpp096LxOl+Cr1wk/6KKdm+50LzxBezWXSTkuj8Rl2R0ne2J/u9YQSKpgac08Rz69DU+P22Ay/BhDO3XAucWbWke9wsT5oRX/TCNFFmptz2a+AMseO0zzhq6MnTYcPo2d6ZFu5bErRjD3GtlRUmllblTgpGRIRJRQH6ehtK7CpQml1wuB1FAQQGU3m1BKAtQYoSxcSl6VjrWI0Ame2zNbk3sVimklrhM+Znd46O5eM6fwKDz+PttYdHJABJWb2Ja+wqDPoTfpchkUiTyJoxYMJchduVe9Unk1Gv2DNK0GFJqEkqVxmhBAQUYYmxiAEip328YnssWcPxYNANtDnM+vzPTBztp40smRSaR0XjkZ8wZZldhbZ2sXtMS30+I5qK/P0FB5/E/vYVFJ800j2IAACAASURBVM6h+G0zMzrqseOFXI4cQYGygrLkF4ChiQkG8qZ4ezmxwSeAyxdUXCxoz9SuzjS57MDa88GE+9/mkqQL73uZAxriD87jjc/OYOw2nEHD+jGhWXPatExi9fiPuFTOriV5pDL+C/Lz8qvgQk3Gqwi1utyvyxoNAiOMjQpjXubE8AXzGFoh5mVYNjGg+jed+sopPFv2zE3E+teh8jr5iHlOWpPaIEebJgooyzxBQYESDI0xlhtiaAgiP5+yJbFUwymVIZVKkDUZxaefDKNBBZWteBSUiccqcoVWZeOS+6+RLR5KMyxd32TpvglEXzjH+YBAAs74sf3LkwQlrGHd9Pb67Uj+b6vnTx1K53D9+KOPnaRSCRKUFJRLrxqNBqm0RG6Nel6o2t+GhhigJic7Bw2mxTVXk5lBph7OU6uUaCj1LV+1GjXGGJrIa8DRsr11BejF98KXVwUF5aqTBo1GivQRS4y0bPP/0M8RlT8LVIIn6DugyvymLM5v1eedx5KXnhCeyS5d3qoHHg23ELLnAKoHrRjpao5hPXc6W/zBuXWHiaYr73XT/SgtkUoA8UjZWZN2hkN+qThNWs9P73YodLCau4H3yBKaSt/s6ZBE2pmDnElz4sX1y5nZvjBU1HcJup8NGrWeakqxat0SO3yJvJ4LtoXTLNQJRN1OLZyGLcO2ZXNsxCkuhWUw5v/Zu8+AKI42gOP/Kxy9xIKKvRs19l5i713sNHs3idEUYy8xvokaWxKNXbCDil3sGuwlAmIvqDSRDke7u30/IBoV9O4AJXF+38SbuWd3b3b32Z3SMqPbhI6Iq9d4TAla56mkTo5MBkiZPZXOif2mI/aSB8t2p9H2+yHUbV+S2u0HMOL+Kgb3Xc6Va5Hoqpq9EoO+x/3135hl+fI44E9YUlGq1cxI7nQ8O/4HS0+b03FsFUpmb2f9R2T9G332/Df6ecarKNvP6djMjknHd7LN+iLU/5ZWGTcglhUoV1SGX2gCDtVrkvFnXeQJVv56AlWH0ZR45s4y72Rafz+M+s+P/fBHaxnu+BtX/45AW61oJhcfGXJAet4bQ2FfgTL5JU5dvUy8Y6sXD+90EVfxewTFm5VBiZIKTRpS2P083nshpmI3atuYkK9eLey2nMN9/xN01cfSyE4Oukh8954kqoQTqxZ/ycuf9RqC4iR0uqy6yWbV/p8QeOsta03qoo38PgAtj2/fIomiz7vLaQm+6keosiz9yptgaVseB5kfYQkOVKtZ5MVvPvLEcpacNKXdmKqUQvZGG8+07byznpzyZjwfK2Ouk1md5yJP/MGSk+Z0HJdD5zmFPRXK5kd38ipX43vR4kXDe8q1a4+QlWxDaVV+PqlQBOnITW6+ckm8y4MX66JaUr5cUfALQ120OjVenig4uXwhp8w7U/2znDkzZ3mueJp+rijRpkzu3azJ5aT/rNO3Wxd7kU2Ld5PWfhKD6rWnZJ329Bt1nzVufVh5+RqRuqq8nsvlQBC5fD3/93t3+/kUKz32k7JceUpyngf34tBVzZioLZ6jP3RlboQrq5fm3Bkzg6Jwdao4gI/f38T3a4MtAGncu3CVp+88eFruXrlCXO922D2fCT7kWgDhyrJ8Wt42B9uoPu29KGXLlYLz97kXp6NKxoSD8YeZ2m0OEa7r+NPx1fuAzL37MzmXR2RP9o7d8zqyvBf6G7/H6ec3eexFPN5y3omITuLY0vd9XtLff/NRvUkVmtTPz93DPgSVqEUdezmY16BuNRP8T50jtcbnNMxiySYzCwtMpMdcPX6B60+M668hV9lia64jwv88t2K0oI3j/uElzNlwHa2UQlKSvq1AjqmdDea6pwScu0l6Vfc4umQGHtc1SCnJJOtZlap6L3pWU3No4Uy2/f2UxLiHnFo+Ew//l2+PVHV60aOSGp9fprD+zCPiEqO4c3gp0/68jGXTPrQvlZc6SZpiYWGC9Pgqxy/48+qhyon9JsfSIoEbh1ay6NeD3IlKQaMO5srh8wTjQKXK+ZC/FkNIqn7H/fXfmEmVHvSuDed/n8bKE/eISVITdsmdufPWcvKhggI2ckDLw/0LmT7tT87E/BcHiOnn9d9orDqKu4eXMnX5BUwb9abjiwc+ltTv3JKCN7ew8bIpjTu2IH9GkzepQtc+tZGf/4MZfxzjXlQS6tDLbJ4zj7XHHyCzz4elRTw3fFazZFHGsQ/h6qFzPKIIFStn0c1baY6ZSuLRpSOcD3hCgqoOPR0rkXToF6avPcPjWDVRdw7z2+Q/uGTaCMfO6TfIJlWaUq/AA44eeUiJ2nUpKAfT6vWobhrAX+eSqPJ5k/SZtuUqbG3NkSICuHAzBi1a4u8d4bdZG7iukUhJSsp6v1XvhWP1f7b/IE7/+RObA98yNjkb3wcSkQcX8uPWq4TGRXHv8CJmrg+kYPsBtLGXP//Ny7iwfBorjt8lOklN2OWN/PS/NZx4IMPeTg4oMTdTIQVd4ui59DauNDdHJQVx+ci5f7Sdd9WTU96MR/twH4unTmO1b0wOfk/eZ8x1Mqvz3I8/reVkUMZ5LieoqNXLkUrqQyyYsoazj+JIjLrN0aWTWXnZkiZ9OlJSoaKaYy+qJR1g0awtXHuaSNzDk/w5cz0BL5qECZV79KW27Cwrpi/n5L0YktWhXHafyc9rT/BIXvBtQRgY8mvnirhEom4f5rcpf3LF8nN6dSyVe2+jTS0xN5F4fOUYFwOeILe0JDHwEGsWLOTQ7ShSNGpCLh/m4hNw+LRy5rP+Zz+IXL6e//u9bD9Ts2g/Sr32k7JMF3o2UHJm+Qw8zj0hPimKW/sWsOpEMmWaNKVEbvzQTKrRx7Uh2sM/8c0vnpy+cBafVZOZ6nGH9Eeyma2Jk0FH9LFl/Ox5jYj4GB4cXcqsdQEUaN+PVgVMX7bRadltoyZ61KWkdJdeNFD68ufMDZx/EkdS1E32L/yTk8llaNyk5Iv7gIz2lCk9PpNzeUQ2ZevYPZfV+W3yCi5bpJ/fTN5x3ilg/e7z0oe8HuelV405SMVnTerxidceTGvWpZQSwIradaugOvo3nzVpkuUFQVWrDS0c/mL/vFFc7baYg5OM+HrrFgz7phuP5i9nUKvlqBRyrCp0YMj0sZj9sJyHdyOgYVG9qrJqPoqvuz1i4QoX2q1QoZBbUb79MKaONWfK8vvcjdBSv4AeFSnLMuCnucRPn8fSwW35RVJgVaETnZsVZkfk88+YVGLgL/NImT2ftV92ZZkGUBWgcrtvWTCxCw4KQL95gd4DU2q2bkWRv/byy+grdF28g8//8b967bd39NJTVh7CjG+fMGXJFAZ4pm+4wrYCrSbMZXhNFaB7LYZdeh33N35jMz6n99wFJM7+CfeJjqzSAjILijcaxJxpzpRVAuiIvn6cQwcKUnHkUBrZ5c5ezfNMKjHw1yWY/PzTy9+oSX4+bTeRBRO7U+wfNwKmNTvRusR23BOb06HJPyeoUFCq70/8op7DvHXf0C99h2NerAEuc6fhUl6JgkyOvU05Wn41l5F1sliDwbQGLVoW5fTen/nyyk3mH5jF54MXsVD1P35e9QU9l2gAE/JXbsf4Rd/QNSNYVTUa1/uEnXtUVK9bOv2kbF6bOlVNOXz5M5o2eb7eNNY0G/Ud3Z78wp+uLVmpUiC3LEebITMYY/kDyx/cBZpmHpuyLP3nziN+xtyX7b98B7q0cMAzy6Gf2fg+lNTs0xXtptF0n5eETvkJVXrOZvFXzZ6/XShF77kLUc+Zy4ZverFGC8jMKdrAldlT3SinBDCleqtWOJzew4KxV7j160Gm1mhFi6Kn2PfLGK7eXMSeWc3eXU+Ozd/3Zjw/WAdy4sB+ClYcwZDGH0+jNOo6qSilx3kuZ5hUGsK8eanMnb+G8d2WoAVUBarS9pvFjO/8vFdJWWfmzI1j5k+LGNZuHpLCmvIdu/B5sCcZl0RFqb7MXpDI3J/W812vP0kPuQQNB/7EZOdyWX6/ERFTafB8fkyZxcI1GecKFfmrtGfCool0ys2Lr2lNWrVywHfvPMZd7caFwzMZOPtbnvywhOl9PdObj9KW8q0nMGtkTXJnSdfcv57/6+nTfpqPZnzn+8xf/up+mjnehslLn++nokXpNvNnomfPZe24zizVgMzMgXr9f2SSSzmU2se5ETzFesxhQcoCFm9czPc7FBSq1IzeI9qyYdltzN66rJGCovWqEbtuBJ1/SkanzMennScz/9vn15IcbKP6tHdF0R5M+zmaH+eu5ssui9Aiw9ShAX1/nIJTOSXoatCipQOn/9Ge3mD62mf2ZnIdtW7B8IlduP+/P169n5w9HptJS5/fT+pz859d2Tl2GfQ5v1V++3lHyTvPS2lRH+56LJPe3hfh46RNIjpKjYldfqyy0wk/LY6nIVFo7BxwsM3eJSgtPpyQSA12DkXJXlVaEp8+ITzJkiLFC2CexUMFXVIUIaHxmBYsSkHrvPvMRZsURbTaBNv81pmOl8iR/ZYWR+jjMBJlNhQqXpjXd8cbMehz3LP4jaXGBhMckYp5oWIUtv6QI0Dyvhz5jabFEvo4ghQLexzsbVC93h5eHHtrChUrwrsPiZakqCjUJnbk/+eHdUlEhYQSrypIUXvrbD7FTCPhaQjPNHY4ONgaeHOrX/vPue9LIyYkHO0nDuTP4svSYkN4EpGCuX1R7G1Ub3SXeqN9ZdF23lVPTnnXOeejkY3r5Ps7z+lIjgomNN6UgkXtscqs4WkTiXgShtrSgeIFzLP43aQSG/KEiBQLChUrrMd5IDshJxEVHEq8aU6cK/SV2XkrjbiQx4QnyrAuXJzC7+E+4L1cz/8D3nWu03c/6ZKiCAlPwqpIEexMc7Gjqi6W+9fuoSv6KeXsX86jnXhgIp2mPWPIztW4FMvklXjKcaZ3+Ibbjh64jylDfHAoyVZFKJLpRqURG/KYiBQL7IvaY/PGxdwQ+tSlIzkqhLAkK4oUsePV3ZfFfQCGfoYczSOMYuyxy7K+d53f3nXeef/nJX2IZFoQBEEQBEEQhJynDWbj8B4s1w5hxdIRVLYGXezfrBo/jnXq/qzxGE2lzHKiV5LpSv/VrrR5m7HH7iMjdoEgCIIgCIIgCDlPUYTOIwZyespqBrffTuECKtRPw0n8pDFj/ucmkrG8TBw7vYg304IgCIIgCIIg5J6kMAL/vs6jyBRUBctRvVYF8r9tyIQujic3g0guUO6VLsbCB2DosfvIiGRaEARBEARBEARBEAz031waSxAEQRAEQRAEQRBykUimBUEQBEEQBEEQBMFAYui4IAiCIAjvX4oatcICCwPvRFLUahQWFtm8gUlBrVZgYeiX/4PmwRHcD0ZQpVdf6hWUZwRn1Da9pCPWfxfuOy4QkmZNtVaNkd28R4H2brQqnVO3bBruHlrD0dha9O5Vh3zGvlYxaluzv98FQRDyEvFmWhAEQRCE9yolwJ3x/SaxL1pnSCmuu4/D6Yc9xBhS7I1q/Nn4RV+m7okmO9VoH55g05odXI5Mr8W4bXpN3HEWTpjDlrNPSEpORhNzjV1rPDj1UJuNSF+XxoOj61nrfYUYI2fNMWpbc2i/C4Ig5CXi0aAgCIIgCO+V+uYZzj/R0tCgUoncOnuOYG3j7H154k3OnXtCdqsxbfAVaz1TsSqSfitl3Da9Ku3+31yP/oQ281cyvZk5pMbQqlZPTOxNsxdsDjNqW3NovwuCIOQlIpkWBEEQBCGHaYi65cvpS7cJT1CSv2wdmn7+GfYqSA66wKmbUejQcfuUD2eqN6JRORsA1E8uc9r3GkFRycitilCxfgsaVbBDQRJBF05xM0pC0t3ilI8v1Ro3pJy1HHRxPLh4his3HpNoVYKq9ZtSq7hF5mElBXHx1C2iJdDeOslh3xq0a1wu/f+0Mdw5c5LLt8NJsytJ5bpNqF3CMsst1MaHcOdmHMU/KYxZ6KVMt0kTdZMzpy5xNywRZf6y1Gr2OVXtVZnUpiPu9hl8z94lBgXxd47ho6xIg6oS928+wsbcgQLm8NTvBH7aSjQrncBfPn9xN96CYpXr0bh+WWz+2ddQ/YQrp3zxfxhJstyawpXq06xxBewUuXn81Dy5fIqzfg+JSpZjVbgS9Vo0pryd4q37XRd/n0u+l7n5JBGrEp9Rr0ltimVx+ARBEPIaxYwZM2Z86CAEQRAEQfiv0HB/61cMnriRgBgdqdG3OLV1JRuOJFC5fQNsrq5nyc6rPI1NID4imIT8jWjxqQX3tk1k6Fd/4PskmdSEUG6c2MkWj13cLtCc1hXlXNywkF1XwolLiCMiJJ78DVvwqdyPDd+OYsoaX8JSkgi5uBv39QcIyl+bxpXy8XruqIs/j8evO7gaHktCXAQhCfnp1rIyuqcnWTRmFLO3XiJMnciTc964r9/LbcvPaPyZPZktqZp2YSmjJu3DtE0PKt5zf2ObmqoOMmHgRLb4xaBNi+b2ia2sWXeEhCrtaFDs9TfNOkKOLuf33f6ERScQHxlCUGw+6hW/xk9fLie8xgBalNZxadlwpuy9TeDGFXjfjCQi8Cg7N2/GJ6QUrZuXxVIGafe28d2gL/nz9BOS0xIICzzJrk3u7L5VgGZtKmMr13D/8AaOx9fAsUdtPnljwJ8Rx69cBJ4TBzJh+WmCk9OID73OqV0b2eR9k/zN21JRfiHT/R7/9zomjf6edb6hpCQFc2n3OtwPPCRfnaZUzKd35i8IgvDhSIIgCIIgCDkl9bK0oGttqdvcc1Li8z8l+S+RnBq1kab5xEmSJElR20dKDesMk7Y+1aZ/IHqv9G3T2lLPH89IMc//JKn9pd/61pbqu6yWHmgkSZIiJa9RdaQGwzdLEVpJkiS1dHFeF6l+06HS+hsJ6WW00dKVpU5S0wau0tq7aZnHF7lNGlu3tjRqy1NJK0mSpI2UDn3XUqrfcrS0MaMezTPp3C99pMZ1ukuLriRnWk3ysclS6zq9pN9vpGWyTanSlZ+7SA06/yidz9gJaj/pt34NpQ5TDknxWey6lLOzpU51ukmLr6VKkiRJaTeWSf3qNJemHUuWJClZOjGluVS35ufSWI87klqSJEmKlS793F2qX2+otDVcK0lStLR/QhOpftc50rmXO1IKWNpHql/HWVr7QCNJklry+aaRVH/ACumeJpMgjDh+0fu+llrU6SL9dCZGenn4lkhOdWpLA1c/yHy/q89LP3etLbUYtkZ6efguS785N5CauK2Wsjp8giAIeYmYgEwQBEEQhJwjs8TSHJ5d8GL74b95Eq/FrOo4PHx9mNnGOvMyVk35cuVa5o+sj+3zOxNNmgX5C5pDQhwJmc1YlXqFQ0dDyNfajb6VnnfHlttR07Uv9eTXOX78kX7xJp7nqG8sJbqOpHdGPYr81B8+mGbWjzl2yI9Ug3YAgAxLK3OkiAvs2urDtSfxaM0/Y/TmM+yf3RYrg+v7R812TenVqxzmANhQtU4VrHVRPH2qA6xo/PUqVv86irovdyQW+QtiTgJx8XpM/WXE8bNq8jXL1y5iRH3b5zPbakizKEB+M0iIi8+0TOqVgxwPyUdL1wG8PHy1cO7TAPn1o5x8lJOTrgmCIOQOMWZaEARBEIScoyxP34kjCJyxmt++9WGZ0o6SNRrSrENfBnSrQf7Meu8qbchvncSe9VNZ5H+P4JAQwp6pQSlBEYnMJp3WxQcTEq0hcs8EWh2Q/fN/0KToyBcSCpR5Z7jayBDCk+UUL1321e7cluUp6wAnwsPQAJmNdM6aknL9v2F44HTWLv2Ww0uU2JaqSYPmHejt3I3qme4E/cg+safAP4rLTEwwQYtOJwEm2OS3Jsl7PTN+9eP+4xBCw56hRokkFSHTHflG6IYfP6VNAayTvHGfuhD/+48JDQkj/fBJFJYyPXrEB4cQrXnGvonNOPjK4dOQqstHSKgWyoiu3oIg5G0imRYEQRAEIQfJsaszjMXefQm6eoaz585z7vRJNs85zoXwVawZVfWNErqwvUwfOIO/VHXo2KkzLcuVpXyVCoT+1pPJgZnngHKFHIVMScluM/i+Y+E31vpU5iurX7hKJUokUtNSgX/MfCWlkpoGKjOzN8Ze60NuV4fBS3fTK+gKZ33Pc/HsaU5vnM2J82GsWD+aKpkNxNaHQpF1PLow9k8dyKxTJtTu3ImOrcpTtnxlyoX+Tt9J1/VLpg0+fjrC9k1hyIxTqOp0oUPn1pQrU55PK4Sy3PF7AjM/eigUcmTKUnSZPpkOhV87ejIln5QRt6iCIOR94kwlCIIgCEKO0cVeZNPi3aS1n8Sgeu0pWac9/UbdZ41bH1ZevkakripmchkgPU/udESf3sfp6JL0X7uUsVWfZ5nah1x4nAg67fN0TI5MBkjP31RbVqBcUfAPU+NQvSYZ+Zgu8gR//noC805fULVkJgHK5aRXk16rwr4CZfJLnLp6mXjHVmR0ZNY9vYrfIyjRpoxeN0uyf26TLpbLHovZk9aeb4fUo13JurQbMIoHq9zov+IKfpE6qryeQOYAXfRp9p+MoqTTWhZ98dnzN+1aHp5/RIKkQ6tHMm3w8dNF47v3JFElnFi95MsXDwm0D8/zOFFC0j3vWv7afrcsXx4H/AlLKkq1mhkPQ3REnviDJSfN6TiuCpkdPkEQhLxEjJkWBEEQBCHHyC0tSQw8xJoFCzl0O4oUjZqQy4e5+AQcPq1MPjmYWVhgIj3m6vELXH+SiKmdDea6pwScu0mMFrRx9zi6ZAYe1zVIKckkSwCmWFiYID2+yvEL/jxJrkLXPrWRnf2dWStOcD8mCXXoJTbOmsu6E0HI7W0yD9DUEnMTicdXjnEx4Amo6tDTsRJJh35h+tozPI5LJOr2YX6b8idXLD+nV8dSer2ZfmWbQnRYJF7n8KoFLD54m+gUDeqQyxy58BgcKvFpvty5/ZKrbLE11xHhf55b6TuS+4eXMGfDdbRSCklJ786mDT5+IanY2pojRQRw/mYMWrTE3TvMbzPXE6iRSElKTq/4tf1uUqUHvWvD+d+nsfLEPWKS1IRdcufHn9ZyMkhBARs5oOXh/oVMn/YnZ2L0GO8tCILwnolkWhAEQRCEnKOszMDZ39Jc58P0vi1pUrcR3cZsIrbhBGaNrIkKUNVqQwuHGI7OG8WEVVexaj6Kr7sV4+4KF9o1qEfTNsPYENOOqWMbooq+z90ILWBKzdatKBJzmF9Gj2ft3xpK9f2Jn0dWI2LTBPq2aEizjsNY9bA8bj/NY0DZLN4nm9akVSsHYo7MY9z4lYAJlQbP50e3Mjxc8wU9mzWmXd/J7EttyoRFM+jkoF8n71e3yZ9PB89hQgstR6b2oW2DOjTrNJqtsQ35+sdR1DBsALb+rFsw7JtuFL27nEGt6tO4YWtGrY+h7fSx1FdF8/BuxLvrMPj43aHZyG/oUuwOf7q2pEn9BnQYvo6YdjMY3UBF9IO76fW+vt8Vpeg9dwHDP4tgy0RH2jRqRJfhK3hYfhBz5jmTfvh0RF8/zqED53iozqV9JgiCkA0yScp0ZghBEARBEIRsSCMu5DHhiTKsCxensPVrya02iegoNSZ2+bF63jU4LT6ckEgNdg5Fsc0i4dQmRRGtNsE2v/XLCcNSYwl5EkGqhT1FC9tkui70a7WQFBWF2sSO/Nb/+LQuiajgUOJNC1LU3trwsXCZbVNcCE/CEpHZFKJYYZv3M74uLY6nIVFo7BxwyGpHvrsSA49fGvFPQ4jU2OHgYJvFhG2Z7/fU2GCCI1IxL1SMwtbGDiYXBEF4/0QyLQiCIAiCIAiCIAgGEt28BUEQBEEQBEEQBMFAIpkWBEEQBEEQBEEQBAOJZFoQBEEQBEEQBEEQDCSSaUEQBEEQBEEQBEEwkEimBUEQBEEQBEEQBMFAIpkWBEEQBEEQBEEQBAOJZFoQBEEQBEEQBEEQDCSSaUEQBEEQBEEQBEEwkEimBUEQBEEQBEEQBMFAIpkWBEEQBEEQBEEQBAOJZFoQBEEQBEEQBEEQDCSSaUEQBEEQBEEQBEEwkEimBUEQBEEQBEEQBMFAIpkWBEEQBEEQBEEQBAOJZFoQBEEQBEEQBEEQDCSSaUEQBEEQBEEQBEEwkEimBUEQBEEQBEEQBMFAyg8dgCAIwn+FJElcvHiRsNBQ1ElqLC0tKV6sODVq1vzQoWXblSuXCX4STKI6ESsrK8qULkPlKlU+dFiCIAiCIAgfjEimBUEQsikqMpLtntvZumUzsbGxlChZEnMzcxLViQQ9fEiRIkXo338APXo6Ym1t/aHD1VtiYiI7dnix0cOdp0+fUqJkSSzMLUhMTOT+/XtUqFABF1c3OnbshEql+tDhCoIgCIIgvFcySZKkDx2EIAjCv9XxY8f44ouxVKxYiR49e9KiZatXEsukpCR8Dh1k186dBAc/4c+Vq6lTp84HjFg/N2/exM3Vmfz58+PYqzft2nfAzMzsxf/HxcWxd89udu7wQqFQ4OGxicJFinzAiAVBEARBEN4vkUwLgiAY6eDBA3wzcQKTfphM23bt3/n5bVu3sGL5cpavWEHDho3eQ4TGuXnzJq4uTvTo6ciw4SPe+lmtVstPP87B39+PjRs3i4RaEARBEISPhkimBUEQjHD8+HG+GDeG6TNm0bxFC73Lee/ayeJFv7J+gzs1a9bKxQiNExwcTI/uXfVKpDPodDrmzplNQIA/u3btxupf1JVdEARByPvOnj3D3r17iYh4SlpaGtbW1lSvXgNHx17Y2dl96PAM8vTpU7Zu3cKtW7dISIjH0sKS0qVL06dvP0qUKPGhwxMMJJLpPCw1JYWDBw9y5owvMTExyOVyPvnkEz5v1oxWrVqjVP77hrzfunWLHV6ehIaGkpySjLW1DdWrVaNnT0dxAy78q3Tr2plWrdswwMnZ4LK/LVvKwwf3WbtuQy5Elj0/zpnN/Qf3+d/P8w0qp9PpGDTQlT69++I2cGAuRScIgiB8LHQ6HRs93PHwcCci4hltTjxpgQAAIABJREFU2ralWLFimKhUxMXGcvbMGe7cuU3Hjp0YNXoMpUuX/tAhv9WdO7dZtmwph318qFWrNrXr1MHCwpKkJDUBAf6c8fWlceMmjBkz9j8xcenHQiTTeVBk5DNWr1qFp+d2LC0tada8BZ/kyweSxLNnzzh29AgymYw+ffoyaPCQf8WERseOHmXVqpVcu/Y3LVq0pHSZMpiamhIXF8fZM2d49OgRPXr0YPiIkTg4OHzocAXhrS6cP8+IEcPYtXsvlpaWBpePiIjAsUc3du/eS7ny5XMhQuOo1WqaNG7IvP/9Qq3atQ0uv3fPHjZ6uONzOP0cJQiCIAjG0Gq1fPfdN1w4f4FBQ4bQpk3bV+btyHDr1i22bN7IubNnWb/ePc+uMhEQ4I+bqwvNmrdggJMzpUqVeuMzoaGheG7fxs4dXvz++3KaNG36ASIVDCWS6TwmLDQUZxcnChYsiJOTM/UbNHzjplSj0eD712k83N3RSTrWrdvAJ5988oEifrd169ay6NeFOLu40q17j0xj9fPzw8N9A3du32Ljxs2UKFnyA0QqCPoZO2Y0dp/Y8dX4CUbXMWPaVPLly8es2XNyMLLs2bJlM+vWrcVj42ajyqekpNC9a2cWLVpC4yZNcjg6QRAE4WOg0Wj4+uuvCAwMZOnS3yhob//OMmvXrGbL5k2sWbue6tWrv4co9RcQ4M9AN1cGODnj6vbunlv79u5hwfxfciShjo2N5cnjJxS0L4i9HvtRMJz8QwcgvPTkyRP69+9L1aqfsXjJMho0bJTp2x2lUkmz5i347Y/l5M+XH2enAURGPvsAEb/b+nXrWPTrQhb+upiBgwZnmfRXq1aNn3+ZT5MmTXF2HsCjoKD3HKkg6C8gwJ9GjbKXLDZq3JiAAP8ciihnBF6/Tt269Ywub2pqSo2aNQkMvJ6DUQlCzklKSuLGjRtcuHCBa9euERYW9qFDMoparSYwMJAL58/j5+dHVGTkhw5JEHLM+vXrCPAP4Lff/9ArkQYYNHgI/Qc4MXrUCNLS0nI5Qv3pdDrGjR2DY6/eeiXSAJ06d+Gr8V/zxRdjSUxMNPg7JUniRuANVv65ksW/LsbL04sA/wCD6xH08+8bdPsfNmrkcGrWqs2kHybr1UVSpVIxd97/mDJ5El+PH8/6De7vIUr9Xb16hQULfmHR4qVU0/Mp4YRvvkWj1TJ69Cj27tufyxEKgnHi4+Oxtsne8Apraxvi4uJzKKKcER8fT5FsDrOwtrImLi4uhyIShJxx984dNm7aiPeunWg0GqytbUhJSSY+Pp769Rvg5ORMq9Z5fy6Shw8f4OHuzo4dXmg0WqytrUlKSiI5OYnWrdvg4upG3bp1P3SYgmA0rVaL+4b1DBs+gvz5CxhU1sXVjd3e3hw8cIAuXbvmUoSGOXH8OElJSQwcNNigcl27dWf7tm3s2rUTJyPmZjl+/DjPItJftMnlcpKTkw2uQ9CPeDOdR5w540twcAjjv55g0FhDpVLJt99+z6VLF7l161YuRmi4DevX07FTJ70T6QxffPkVISHBnD9/LpciE4TsMTU1JSUlJVt1JCcnY2ZmmkMR5Yyc2K6U1JRMx7UJwocgSRLTp02la9fOhIeH879f5nPsxCm89+zloM8Rdu3eS9XPPmP27Jm0b9eGx48ff+iQs7Rk8SI6dmjPo8eP+d/P8zl24iTee/bic+QoHps2Y2VtzbChgxk2dAip2WzHgvChHD16hNTUNFq1bmNwWYVCgWPv3qxbtyYXIjOOu/t6unbrjomJicFlHXv1wsNDvxdloSGhZIzclclkNGnShFKlSzHAeQDfT/qejp06Gvz9gn5EMp1HrFu3jq5du2Jubm5w2Xz589O6dRvWrVubC5EZJyIigkOHDuLYq7fBZc3NzenYqRPuG/LeTMeCAFCoUGEePnyYrTqCgh5ib18ohyLKGfaFCvHo0aNs1fEo6JHe3fIEITfpdDq+/+5bTpw4zqbNW5k1e84by9EVLFiQwUOG4rXTm5q1auM0oB8PHz74QBFnbcniRXh4uLNqzVrm/jSPmrVe3Y4SJUry9YSJ7NjpTfjTcEaOGpHthPpG4A12e+9G0ompdYT357CPD23btjW6l0iXzl3w9/fn6dOnORyZ4VJTUvD19aVDB+MS2XbtO3D/3r23XpclSeL0qdOsXLkS3798X/y9WvVquLq5Uq5cOZQmebvHzb+dSKbzgPj4eE6eOE73no5G19G9Z0/27tlNXplP7rCPD59Wrkzp0mWMKt+1W3eOHj2S7bdkgpAbevbsyQ7P7UaX12g0eO/aiWOvXjkYVfZ169Yd379O8+yZcXMwBAYGEhT0kHbt2udwZIJguLk/zuHixYv8vvxPihUv/tbPKhQKvvt+Ek0/b8aA/v2JjY19T1G+26ZNG/HwcGfJ0t+oUKHiWz9rY2vL4iXLeBYRwbfffmP0d14PuI6npyd/X/2bEydOGF2PIBgqOjoa+0LGP2i2sbXFzMyM6OjoHIzKODHPzyPGPmA2NzfH2to6y21JTkpmy+YtHD92HCQICAhAq9EaHa9gHJFM5wFRUekTh2RnSahixYqTkpJCQkJCToWVLRHPInBwKGp0+aIORdFqtcTExORgVIKQM3o69iIsLIzLly8ZVf7E8ePIZLI8l3SWLVuWOnXqsnOHl1HlPbdvo0uXrtja2uZwZIJgmGfPnrF58yZ+/OknChlwYz7+6wkUKlSILVuMm9E+p2m1WlYsX85XX0/Qexk9a2trfvzpf/j4HDJqMs8L5y+ww2sHkk6iQIECYgy28F7JZDKy+15Ip9PlTDDZlDFsMzsvunQ6Kcvhn5cuXeLO7TsAVP2sKoOHDEahVBj9XYJxRDKdB6SlaVAoFMjlxh+OjLEYeWUGQ01aGspsNGjF8+49eWV7BOGfLC0t6dO3H4t/XUhsrGEPfMLDw1m+/HdcXNzy5GRHAwcOYvu2bdy9c8egchcvXODokcO4uLrlUmSCoL9NmzbyWbXqlC9fweCyffr2Y6OHOxqNJhciM8yxY0dJS0ulZctWBpUrUqQIjRs30Xu85T/Fx8cjSRL2hexxG+iGlbWVwXUIgrHy5ctHeLjxs+xHR0eTmppKvnz5cjAq49ja2iKTyQgPDzeqfGJiIgkJ8VmuhNOocSPKlC1Dh44d6OnYE5VKlZ1wBSOJZDoPsLOzQ6PREB9v/My+0VFRAHnmjZCtnR2xMcZ3k8tIUPLK9gjC674e/zXFi5dgzOhRRD1vf+8SEhLM6JHDqV+vPkOGDs3lCI3TslUr3Nzc+GLcGL0T6osXL/Ddt98wY+YsKlWqlMsRCsK7eXpup5cRc3YAtGjZEo1Gg6/vXzkcleF27PCic5euRj1469HTkR1G9DJp2aolrVq3YtCgQVhaWRpcXhCyo337Dhw6eIDU1FSjyu/Z7U3NmrUoWLBgDkdmOJVKxeefN2P/vr1GlT+wfx+VKlWi+D+GqajV6hdvuuVyOU7OTtStJ3qPfEgimc4DChQoQNmyZTl69IjRdRw5cpg6deqiUOSN7h1169bj0qWLRj8gOHb0KJUqVcLaOnvLDwlCblGZmrLst98pX648I4cP4+CBrC/+SUlJeO/ayagRw/m8WTPm/e/nbPVEyW1ffjWeAQOcGDd2NF6enqjV6kw/Fxsbw4b16/jum2+YOm0avXv3ec+RCsKbJEkiPCyMsmXLGlVeqVRSsmQpQkNDczgyw4WGhBo990jpMmWIi4sjKSnprZ+Li43De5c3aanpPcFkMhmNmzTGNI+tNiB8HJq3aIG1tTWHfQ4ZXFaj0eDluZ2BgwblQmTGcXF1Ze+e3UbNAeTl5Ymzi+uLf6vVatauWcvOHTvRatPHRhuyApCQO/JeH8OPlKvbQNatXUP37j0MLqvRaNixw4uZM2flQmTGqV69OuXKlWfvnt30H+BkcPkdO7wYPmxELkQmCDnHxMSERYuXsHbNatasXsmSxb/SoWMnypQpg7mFBYmJidy6eYNDBw9SuHBhxowdxwAj2oM+oiIj2bJlMz4+PkRHR6PRpGFtY0P1atVxcXWlatXPDKrvq/FfU6JkSVavWsWK5b/TvkNHKlSoiIWlBYkJCfj5XeOwjw+VK1dh8eIltGjZMle2K7c9CgoiNCwUtToJKysrSpQoYdAYWyHvSUtLQ5IkVKbGJ4MqlcroN2M5KTU1FZXK8CV1AEyfd/lMTU3NcqWQ27du473Lm6SkJFKSU+jTTzwQEz4smUyGq6sbq1atok7degadj1f+uQKliQlt27bLxQgN07Tp59ja2rFyxXLGfvGl3uW2btlCdHQ0Xbt2A9Lb8eaNm4l8FklUVBR169V95Y218OGIZDqP6NGjJwvmz+ewjw9t2rY1qOwOL0/MTE1pbcSafLnJxdWVRb8upEvXblhZ6T/m6tjRI0RHRdOla9dcjE4QcoZCoWDosOEMGTqM06dPsXPHDg7s349anYiVlRXFihVnxZ+rqFevXq58f0hICAsXzufA/v1UqVqVHj17Yl+oEEqlkri4OHz/+ov+/fpSsWJFxo77gubNW+hdd8+ejvTs6ciF8+fZum0rBw/sR52kxsrKitKly7BtmyeVq1TJle3KTWlpaRw6dJDVq1Zy48YNVCoVcrkcrVZLamoqjRs3ZsiQYTRu0uRDhyoYQaVSYW5uTnR0lNEPRqKjo7G1+fDDjGxtbYyeiDM6JgaZTJZlD6/jx45z+tRpIH2N+WrVqxkdpyDkJGcXVwIDAxk9cjhLlv1O0aLvntD2t2VL2b9vL+s3eOSZXpqQ/nDg9z/+wMXZCaWJCSNHjX5nGU/P7fy5YjmrVq/BzMwMgL179hIcHAxAt27dRCKdh8ikvLKWksDevXuY9P13TJ8xi+Yt9Lvh3e29i0W/LuT335fTpGnTXI7QMKmpqYwYMYyoyCgWL12mV0J97uwZfpj0PXN+nPviaZwgCJkLDg7GaUB/ylcoz5Ahw7Kc7TcuNhZv712sXbP6o29b/v5+DB0ymKSkJCSdDpWJCXLZyy73Op2ONI0GnaTDoWgxNmxwp3CRIh8wYsEYw4cNxcHBwaA3QRkeP3qE04B+HD12nCJFjF9lIycs+nUhly5dYtGSpQaXXbtmNX//fZWNGzOfmTzAP4AdXjso4lCEXr168Um+zCc5EoQPQZIkpk2bwpEjR3B2dqVjp06ZPhi6evUKWzZv4kbgDTa4e1CuXLlci0mr1RqdqN+5cxtnpwHUql2bAU7OfPpp5Tc+8/DBA7Zu3YLPoUOsWr3mxUz6Oq0Ob29v/P38admqJU2aige9eYlIpvOYQwcPMnHi14wZO46u3bpnOTNfUlIS27ZuYcP69SxfsYKGDRu950j1k5KSwsgRw4iKimbqtOmUKl06089ptVp8fA7xy//mMWv2HKO6uwvCxyQ4OBhnp/7Uql2H7yf9oNe4KV/fv5g6+YePNqH29/fD2WkAkjY9iX7bPpMkiTStBhtbW7Zv9xIJ9b/MmTO+jB0zBu89e7Ps4pyVBb/8TGJiIkuWLsul6PQXHh5Oi+af475xMyVLltS7nEajwbFHN6ZOnU77Dh1e/D0xIfGVScWuB1ynUqVKYjkdIc/y8vLEw92de/fu0rx5C4oWK4ZKpSI2NpZzZ88QHh5O9x49GTp0mF5vsA0RHR3Ntm1b8dy+nZCQYNLS0jA3N6d8hQo4OTnTqWMng4aTBAU9ZMWKFezZ7U358uWpVbsOVlZWqNVqAgL8+fvqVdq2bcfwESOpXPnNZDsoKIgSxUsgk+etcdKhoSFcuniJ2LhYFAoF+fLlo0mTplhafhwTGIpkOg86fvw4c2bPJC4ujk6dOtOydZv0afEliWeRz/A5eJCDBw9QuEgRZs/+kTp16uR4DImJiXjv2sX1wOvEx8dhqjLF3t6ebt27U6FCRYPqSklJYeqUyezdu4eaNWvR09GRMmXKYmpmSnxcPL6+f7FzhxeSJPH99z/QuUuXHN+e3KbRaEhISMDc3BzTbIzTEz4+xj7p7tatC2XKlOGHyVMNmoDE1/cvfvj+O7y99+i9bm1u02g0ub5MWOD16/Tv3xdJq3sxlvRd/plQe+3Yhb29fa7GKOSsTh07UK9+fcaMHad3mbt37jB82FDWrV9PrVq1czE6/X0xbixJycn8OPcnvScu3LxpI9u2buH4iVMolcr0oQ0HD3Hjxg1GjByBjY1NLkctCDnL39+PvXv28OzZM1JSU7C1saVa9ep06dIVCwsLAGKiYwgJCUFpoqRChZfL4t28cRMLCwuKFSuGXPHuNpSWlsacObPZ4eVJhQoV6enoSPkKFTA3MychMYFLFy+yY4cX6sRERo8ei9vAgQZtS2xsLJ6e27lz+zbxCfFYWlhSqlQpevXu86+6zpw/f47Vq1Zx6tRJzM3MSb8VkaHVaZEkCUfHXri6DaR0Fi/S/itEMp1HSZKE719/sXGTB2d8z5CUlD6brqWlJc2bt2CAk/OL7h856fHjx6xbt4YdXl4UKVKEevUbYGNjQ0pKCo+Cgjh16iQ1atbEzW2gwRM8hIeHs2XzJry8PAkPD0+fIEal4rPPquHq5kabNm3z5Lq7WUmIj8fLy4u1a1cTEhLy4u+Wlpb07t0HF1c3MaZFeMP9+/dZt24t+/ftJSEhAZ1Oh6mpKRUrVmTwkKG0bdvune3g0qVLDB0yCO89+4x68jt92hQKFijIjA80aWHG0/6NHh5ERDxFq9WiVCopVKgQzi6u9O7dJ8dv9vv26U1gQABmBj7skiSJVI2Gno6OH2x/Cca5ceMGA91c6NylK6NGj3nn52/evMFXX4zDxcWVL78a/x4i1E94eDhOTv2p/Gllpkyb/s6EeucOL5YtXcrKlauoV78+Dx48YN+efS+W8KvfoD7t2uedCZoEIbsOHjjIjRs3iI9LX0GmRIkSDBz8MsGd//N81Go1ZmZmVKtejaZNm2a57FtaWhrjxo4mKOgRU6ZOo2IWyz1KksRfp08zZ/ZMhgwZyugxY3N8ux48eIDvX7507twZu0/scrx+Y+l0OiZ8/RU+Pj4o5HKUCiWK185LGq0GHekv1GbOnEXffv0/TLDvgUim/yXS0tKQy+W5OqnCjRs3cHVxokaNmvTp14+aNWu98ZmoyEh27drJtq1b6Nd/ABMnfmPUd0mSRFpqarZmW/2QFsz/hbVr16QfD52EUqFAJpMhSRJanQ5kkJySQuPGjVn462KxXrbAvXv3mDJ5ElevXsVUZYqM9DUiZbx8A4pMhrm5OWPGjsPNLesn3V9+OQ4LcwsmfPOtUbH4+fkx/ssv8PU9g9V7XH5OrVYzffo09u3dg8pEhaTToZDLX207chkajYbuPXowbdqMHOnpcffuXTp36oClmblRS5JptBo0Oh3nzl/8aLqt/VfcuXMbV1cXatasiZOTS6Y3xnFxcezft5c1q1cxfPgIvSYIet/CwsJwdupPseLFGTx4aKYT/4WGhrJt6xZ2e3u/SKR1Oh2/L/s9PZGWQdOmTWnWvFmeXppPEAzlc8iHc2fPAemTgpYsWRJnV2cgPfFbMH8BSeqXS8TVql2Lzl06v1GPRqNh7NjRPAoKYsmy37Gze3cCe+vmTb4YN4ahQ4fp9dDOECuWryA8LJzixYszaEjeWO4rPZEez9EjRzBRKN55LtFoNKRo0pg+YyZ9+/Z7T1G+XyKZFgC4efMmLs4DcOzVm6HDhr/z8/fv32PcmNH07tPX6IT632rqlMns2rUT5fOncVnR6XRodFqKFS/O5i3bRLe6j9i9e/fo26cXKcnJqJQmWV58JElCo9WQptUyYsRIvvjyqzc+ExcXR4P6ddngvjHLOQj04ebihKvbwPd2cVOr1bi4OHHn9m2Usrc/GNRotWglHZ9Vq8aaNeuynVBPnTKZ3d67UCmNW2IIIFWr4Ztvv8PZ2SVbsQjv36OgIH77bRn79++jbNlyNGzUCGtra1JSUnj44AHHjh2lQoUKDBk6jI4dO+Xodz9+/JiNGz047HOI6OhoJEnCzs6OFi1b4eLialD3x/DwcJYtW4L3rl2ULlOGxo2bYGVlRXJyMtevB3D2zBmaNGnK2HFf8FnVz16Mq7x//z5HfI7QuUtnHIp+2AnVXnf79i3u3LlDYmIiFhYWlC5dmipVqn7osIQ8LCw0jH1799GufTuKFS8GQHRUNNevX6dkyZIkpyTz+NGj9N+UpQUlS5aiXLlyREVG4efnx71793BxcUFl+uZwH+9du5g//2fWrnfXK5HOcONGICOGDeXgQR9KGDC3wds8CnrEurXrAHBxdaF0mbefK4KCHnL/3n0SEhKwsLSgePHiBg/L1MfMGdPx8vTUK5HOkJFQL16ylDZtDFux6N9AJNMCCQkJtGrZnO49ejJsuP5rO9+/f4+xo0czYeLE/+zTptdNmzqFnTt3YJJJl5bMZLxxLFa8BFu2bstyiRIhc2lpaSQnJSMh/WsfRmQk0qkpKZia6DdWV6vVkqJJyzShvnPnNt27deXkad9sxTVn9ixKFC/O1xMmZqsefajValxdnLlz+zYmz3txvEtG26maAwl1g/p1SVYnYZKNYSQpqal8Vr06GzdlPjOykPfFxsbi5eVJgL8/cXFxmJqZUqRwEXr07JnjCVxk5DMm/zCJU6dO0aBhIzp17kyhQoWQISPiWQQHD+zn1MmT1K9fnx/nzsPBQf8kNy4uDk/P7fj7+REfH4+5uTklS5WiT5++WFpasm/vPsqUKfPKjL+SJBk0t0Ju0mg0HDp0kFWrVnIjMBBzc3Nk6X10SEpOpnTpMgwdNozOnTr/a3uvCbnD75of3t7eSDoJe3t7Rowcgex5b6Yjhw+zatWfBAQEYGZmhlwmRyfpSE5O5tPKlRk2bDht27ZD8ZZrUC/HHjRt1hwnJ2eDY5v03beULl2aHyZPye5mAuC53ZPA64EULFiQUWNGZfoZnU7HsWNHWbVqJX9fvfpiuyVJR3JKCuXKlWPYsBG079AhywmNDREVFUXjRg0wV5ka3FM2JTWVYiWKs//AoWzHkdeIZPpfLjUlhf0H9uPh7k5w8BNSUlIwNzenXPnyDHQbRLPmzd/55MjdfQNbtmxm/QYPg79/t/cutm7ZzCGfI8ZuQrZcOH+etWvXEBDgT2JiIiqVioIFC9K3X3969OiZo10yL168iJurM2YqU70S6QySJJGq1eDq6sZEI7vlfqwuXrjIgf0HsLWz5cuvDF/iJi9o27Y1YcEhek96lUGr1ZKclsq6dRuoV7/+i7//ffUqw4YN4cChw9mK69eFCzA1NWXatOnZqkcfs2fNZPv2bagUSoNu6DPazsCBg7KV9Fep/CmmSmW2hsmkpqVRtHhx9h84aHQdwschMvIZzk4DcChalC+/Go+DQ+YzDD99Gs5vy5ZxI/A6GzdtztYyXFqNFl9fX06fPo1Wkz4HwbgvxmFtk7ce4MbHxzOgfz8ePnwAkvTG0nSSJJGalgZyGQUKFGTbdk8KFCjwASMW8gq/a37s2rULJLCzs6Nj546UK1eOZ8+e0a9vb8LDw9N/U6/1/tJJuvTflCz9N7Vl6zYKFSqEVqPlr7/+QqlU0rhJYwIC/Onfry/ee/YZ9fD+0qWLTJ70PX/5njV4BYHM3Ll9h/Pnz/Ppp59Su86bkyHGxMTgNKAfjx49At3ztvTKdkukPW9Ltra2bNm6Pduznf/xx++s+OMPTIy4lkqShDolmU2btlCjZs1sxZHXiEEz/1I6nY4lixfRsGF9pk2dwu2bN0lKVCNpdSTGJ3Dl0iXGjRvD500bs3XrlrfWtWmjB46OvYyKo1279jx79gxf3+y9JTOUj88h2rRuycCBrviePk18bBySVkeKOomghw/55ef/0aB+XWbOmE5ycnKOfOfaNatQKhQGJdIAMpkMOTI2bdpIakpKjsTyschIfrQa7QeOxDgXLlzgyePHqEwM716sUChQyhWsXr3qlb9bWlmiVquzHVtiYiJW72H8r1qtZvv27SiQGfxmLKPtbNzoQWpqqtEx6HQ58/vRajU5Uo/w3xUVFYWLsxMlSpZ8/sY565tXe/tCTJ8xk+o1auA0oD9hoaFGf+/Vq1c5cfwEWo0WSytLunXvlucS6YT4+PSb/6CHmCpNMFOZvpJIQ3qbN1WpUCmURD6LoG+fXkRGPvtAEQt5SeHChTE3N6dw4cIMGz6McuXKERn5jL59ehHx9OnL39Rr92hymRwzlSmmShOinn/+6dOnnD59mpMnTnLixAkiIyM5efIkDRo0NLoXXJ06dTE1NeXq1Ss5sbmUr1AeZxfnLBPpfn378OTx4/TtNs1su1+2pbiYWPr0diQ4ODhbMW1Yvw5JpzOqrEwmQ6lUsmHD+mzFkBeJZPpfSKfTMX78l6xauRJtmgaVQompSoWJUolSocBEqcRMZYqZiYr42DhmzZzB0iWLM63rypXLhIeH07Zde6NiMTUzo3OXrmzb9vaEPSft8PLiqy+/IDw0DAtTs1e2XalUYmqSfvJQyuV4eXkyeJBbthPqp0+fcuzYMZRGvtlSKhRoNBr2H9ifrTg+NhnJtCaLJObRo0f8/L95dO7UgaZNGtG4UQPat2/LlCmTCQwMfJ+hZirjAYyx3SuVSiUnT5545Sa7UKHCSJLEwwcPshXbnTu3KVqsWLbq0Ie39y5kMoyeqV+pUJCWlsaB/ca3HUtLSySy1wlLQhITCX4kHj58gPeuXbi7b2Dbtq2cOnUSjUa/BykrV67AxsaGmbPm6PWbl8vlTPphCqVKl2HJ0iVGx1yrVi0KFSpE7Tq1GTN2DFWqvjlBWVYSExM5dOggmzdtZKOHB3v37iEqMtLoWLIyZMhggh4GYaJHDxWZTIaJQsmziAj69++HzsgbeOG/w76QPW4D3XBxc8HcwpyUlBT69unNs4gIg35T0VFR9OntSPUa1bG0skSr0XLq5CliY2LIXyB/tmLMlz8/sbGx2arjXTR7zn3YAAAgAElEQVQaDQP69yU0JFj/7VYqSYiLp2+fXsTFxRn1vSkpKURGRhp9Hwwgk9LnSfiv+fesQyQAL6ejP3702DsH/8tkMlQmJii0cpYv/wOAcV+82lX20aNHlCpVGjMzM6NjqlipEp7bthpd3hA7d3gxZcoPqJQm7xz/qFQoUUgS1wMCGDzIjTVr1xu9nadOpq+hZ+hb6QwymQxJp2P/vn10797DqDo+RlZWVgAkJyWTkpLyYtxs4PXrzJ//C+fOnaVBg4YMcHahQP4CyBVyYmNiOHXqJH16O1KpUiW+/Go8TZt+/t5jT0xM5Pjx41hko20p5HLMTM3Yvdub4SNGAmBjY0PrNm3w8vJkgpGT//n5+fHk8RM65fBkS5nZumUzklYHRl5/M9rOli2b6Na9u1F11K5dh7NnfFFmYzEEuVxBg4aNjK9AyNN0Oh0nT5xg1aqVXL586fk4XgAZaZo0zM3NcXUbSP/+/cmfP/Nux8nJyWzftp1Zs/VLpDPI5XIGDR7MmFEj+e677/V6aBPgH8DFCxdxcXVBaaJErpAzZOgQlCb6f++DBw9Yt24tO7w801cLeX5900kSKSkptGvfnsGDh1KtWjW968zK33//jb+/HxamZno/XMxIfkKCgzl54gQtWrbMdhzCv9s/12Deu3cPT58+NWj40IuEOjqagwcP0KRJEw4dPERgYCAyGWR74KskPR//b7wkdRLXrl2jcOHCFC9R/I3hSYcP+/D4+Rtpg7ZbqSQ+Pp7t27YyZOgwg+NKSIh/UZexZDIZCQmJRpfPq8Sb6X8ZT8/tek9Hn0GhUGCqNOGPP37n0qVLr/xfYkLii8XujWVpaUliYu43juDgYCZN+l6vRDpDxokzICCAZdl46h8dHWV02QxymYxnz0R3NUMULFgQS0tLSpYqSXJSeu+CK1cu4+w8AIeiDmz32snP8xfQtm07atWuTY0aNWnWvAVTp81g9559NGrchNGjRrJ3z573HntkZCQ6ne6NboyG0mjSCAsPf+VvLi5uHNi/z+h2t8NzO927d38vy2KFh4cb/RAqg0IuJywszOjygwYPRqvTYewUIVqdjuSUZKMmpRHyPo1Gw8gRwxk3bgx+1/7G0sw8fcZ5mRyFTIap0oTU5GRW/bmCFs2b4+fnl2k9+/buwe4TO+rWq2dwDJ9+WpkyZcriuX3bWz8XHR3NRo+N7PDawePHjzl9+vSL/zMkkd6925sO7dvivXMHSrkclUL5fHvlmMgVmKtMOXb0KH379GLRrwsN3p7XrV2zGqXSsDkTIP0aLgNWr16Z7RgE42g0Gnx8/s/eeUdFcXZx+NnCLh072FsssXeNHXuvYKPYuyZRY0lTk2iK3dgbIthRATWxa6JYAHuLBRUFY6EXgW0z3x8rRARhd8F8Gvc5J+fEmXnL6M7Me+977+8eYuXKFcyb9wvLl/3Knj27/5V1H0DwuWBOB53m+fPnmY5vWL8OBOOF9dIdtBvWr6d27doULVaU1q1b41CgANHRUSbPUxRFoqOjcSiQtwimx48fc/jQYXw2+aDVZI2IWb9und5oN+G+EUQ2bvQyKdLD2lqfFpYXqS1RFPNsc7yLmI3p94z169YCGF0jUiaTIZfJ8PLK//zLF8nJ/0rt1c2+PlgqlUYr8uZHzrIgiJDHMFGQmEPVjMTewZ4pU6cweMhgHAo4EBx8jqFDhjB8xCg+nzQFR0fHHNo64Dl4CD/M+ZGvvprBnt27/8WZQ2pqao6qoYYiQZLhEU6nYcOGlC1blqWLFxn9YTt75gwnThzHw8MzT/MyFJVKBfnwd5CXVI0mTT7ByckJtVZjUnutTouzs3OOvzcz7ydarZZxY8dw7txZlBbZ51zqd3UssJDJQRDwcB+UrUEdHBxMq1atTX7mndu0ISQ0JMe5em3w4l7YPQDKVyhP7Tq1jR5n3969zJg+DUuFEguZPNsSj+lOeCuFknXr1ubJoE5OTubw4UPITXQsKuQWhIaG8uTJ3ybPwYzxPHv2jCWLF9GyRXO+/242165d4/nz59y6dYu1a1bTrGkTZn77DXfv3nmr87h8+TLHjh7j5o1/UreuXr1KeHi4SXokoP9NPXnyN9euX2PsuLE0a96Mtm3bce7sWeLi4kzqMzj4HDqdjnr1suY4G0O647hAwQIoLTOr2YfdvcuNG9dNLvNoIZcTHx/PqVMnjW5rZWWFnZ0dujysY0VEypUrZ3L7dxWzMf0aarWaffv24jZoAI0a1qfax1WoV7c2nTt1YPWqlf9XIYzz588TGRlp8kMkl8k4fuxYJu9e2bJlCQ9/kKeF6l9//UWZMvlTV+9NqFQqtm/fZrI9m56zfODAAZPa2zvY59koEkWBggUNr1toJjOpqamMHz+OMWPH0n+A4aXYmrdowU+/zOObb77i0cOHb3GGmbG1tUWn0+XJi5tOAYesv5sVK1dz4eIF5v3yk8FjnD1zhq+/msGPP/3MR5Uq5XlehmBlZZXnvwMRMc8Ou2nTZqDV6dAYmPuajkqjRhBFJk58P9XkzeTM7FkzOXfurD7aywBjT6lQIOr0BvXrO2WJiYlG1aZ9HQeHAiTmkG8pl8tp2bIl1tbW9OrdCw9PDwoVKmTUGKGhoUyfPtXgCK90o3r9unW5ipm+iaioKH2UjokRKlKpFIWFgr//NhvT/xa3b9+mZ49uhIaGMmXqVHb7B/LDnLnM+PIrZn//A5u3bmfR4qXExcXRp3cvjh7NW3WJnEhK0juTXxUGu33rFpZGpAy8TrrQ3Z3b/+TvVq1alZo1a7Fvb6BJfe7288PVtV+eyjjCP8Z0cafiWc7dvnMbKysrk5+ldMfgnTumOUAGDhxksnNcFEV0goCbu4dJ7d9lzMb0K6xbu5aWLZqxYP486jdoyLwFC/Hx3cKy5Stxc/fk+InjtGjejCmTJ2U83HlFEATu37vPzRs3uXjhImdOnyE0JBRBl9XzExjoj0xq+k6XTCrDytKSgwf/MSjr1q2Ho5NTpmPGkJaWxm/79xtl3JjC6aAgdDqdycIH6WE9u3b7mdS+ceMmpKSm5mlnWSqT0bJVa5Pbf+j4+++hcKFCuPbrb3Tbxo2b0Kx5Czb5eOfbfDQaDTdv3CT4XHCm43GxcXit9+LGtRuULlU6z8JXUrmM8uXLZzleqlQptmzZRkhICN989SX37t17Yx9JSUls2ezL11/N4Ic5c+nZ07TcY1OoWLEiOl3e1LQFUaRixY/y1EfHTp34/oc5qLUagw3qdEPay8ubatUNF3Qy834QGxvLnj27kUukRqVjKBUKJMC2bVszHbewsNCXojERjUaNxSu16DUaDUePHOX2Kwv+Bg0aMH7ieGrVNi2PedXKFcilMqMivGQvq1isXLHcJMdYfkTpSGXSjFQfM2+Xmzdu4O42kO49erJ02XJatWqdbVnBWrVrM+u77/nm25lM+vwzfvtt/1uZT/qz+ep3JCk5Ke+ikqJI0suoL41aw+VLl2nTph27/PyMTsm7euUKwcHn8sVQfP78OVKZlJKlslYCSEpKynNOtqDTkWyiDePm7k6aKs2k3WmNVkPRosVo2vS/pz1iFiB7yY9z57B3byAzvvqapk2bZfH6VKlalU6dO3P//j0WLVyAp6c73t4+Rqu7pqWlkZyc/E/dRBE2+2au7yyTyWjYqGHGnw8dPERsbCzJSS9wcLBHlYcPiqATiHntJeHm5s7WLZtNEsY6dPAAxYoV5ZO3LMwTHRON3Mgata8jlUh5/lruqaFUqFCBunXrcv3qNSxN8Dqm74i5uLiaNP6HTHJSMvcf3OfI4SO49jfdadN/wAC+mDyJSZ9PzlOucGRkJJcvXebG9RuoVCrkcjkNGzZEKtO/M1JTU4mMjCQyMpJWrdqg1Wp5HBnB1SuXjB5Lq9MiCALde/TM9nypUqXYunU7ixYuYNgQT2rWrEXXbt0o5uiIhdyCxMRETp8+xaGDB6lUqRJLf12Os7OzyfduCp6Dh3Dp0iVEE3K84B9vtufgIXmei4uLK6Io8u03XyMCUglZQlxFUdSrx0skGYb0q3W+zZhGWloa+/ft48SJ48TF68MoCxYoSNt27ejatVued3NMYeeO7XqRTlOctKLIZl8fxo0bj8XLUNOixYoREfHI5PlERERQ9KXA0oMHD9i/bz9xsXHYXbWjXNlyKC2VSKQSk2vYPn78mDNnTmNjaXx7C7lcXwYzKIjmLVoY1dbOzg6tVmvyOwD0Ie7/hsbDh05yUhKegz3o168/w4aPMKhN23btsVAomD5tKuXKlaN69Rr5OqeiRYsikUoyOXKsLC3zbFRKJBKsXgqEXr9+nX179doqTT9pyoTxY1m+YpVBNc6vXbvKlMmfM236jDzXcQYYNWrUG/UPrCyt0Ido5kEETCrF0sR3SPHiJejQsSN/nvgDqcTwcpc6nQ6dIDJu/Pg8R3m+i5h3ptGHef32235WrFpD8+YtcgyfqFChIgsXLcHe3h53t4HExhomTCUIAiHBISxdvJT9+/7x3kllUiwUFsjkMmxtbSlSpAglSpTI1Pb2rdvcvXOXggUK0aFDF9p16IxDNmGfBs1DFLLUa+3duw+xsbGsXrXSqL7uhYWxZvUqg1+4eUE/57x5ISUS8lSrdviIkYgS08QXdIKOnj17YWdeDBjN478fE7AngHJly9G6pek7+3Xq1MXJyYnjx4+b3IcgCOzYvoOLFy6iUqmQSCQUcyyWSYjF1s6Who0aUriwvsSGXC43OSRLEPXPZ7qqeXaULFmShYsW8+dJ/SLXz28nc77/jq+/msGqlfqdJN/NW9m9J+BfN6QB2rRpi62trdHh1emke7ObNWuWL/Nxde3H7wcO0bN3b7SCgEanQ6PTodKo0Qo61DotcoWCocOGc/zEn2ZDOo8kJyczd84PNGvahDVrV1O2XDnat+9Au3btKVO2LMuXLaNZ00/4+eef8qV+ujFs3boFUTDtuyKXyVGlqQh6RQCsR4+eHDt6jOTkZKP7U6lUHDzwOz179uTypcv4bvIlLjYOiURCjZo1Mpx1eWHPnt0os6lHawgSiQSZRJplN94QHB0dsbW1RWtihIr2ZbpMdhE6ZvIXv11+ODk5Gb2ua9myFR07dcbLyyvf5zTIfRCTJk+iUeN/hP2KlyiBRqsxOYVI7zTVUfzlert27doULFgQgHr1GlCjeg0mjB/L5ctvdoKnC7NN/vwzPv3sc4YMGWrSXF4nJyHB4iVKoFKr8yYChkjx4llDyA1l/vyF1KhZA41Oa9A8dDodKq2G4SNG0M+EyML3gQ9+ZzokOJiAAH+8fTZTysCaq0qlkp9/mc/nn01k1coVfP3NtzleHxkZye/7f8/Ig/j78d9oNJoMb/bUaVPfWEZDFESaNmvKgwcPuHH9BlKpFLlclkmQSC6XG1wDUyaTY/daQXobGxt8fDfj7jYIqVSaUYInJ+7du8fECeMZMHDQv/Jw2NvZ51n+SxTBzs4+9wvfgLNzG+rWrcfVK5cNqu2XjkqjwcrahvETJpo89odM+fLlkUqlCAI8fPSIAoUKmtxXiRIliTZS90DQCRkLWalUSq1atbh16xZ16tShdp3amfK4QJ/X1blLZwASEhKY+e23XLl6JdOujNLSElUuOgVqjQaNTstgAz/QRYoUYcKEiUx4x35nUqmUUaNGs2jRQnSCYJSyt07QoRNFxo4dm69zqlChAt9//wPTpk3n8OFDPH36lNSUFGxt7Shbtixt27XLeD+bMZ3ExESGDR2CKIr8+PMv1K/fIMs1w4aPIDQkhLVrVnP50iXWb/DK0XmUn0RHR2P5Sli1MUgkEuRyWaY83jp16lCxYkV+27+P/gMGGtXf0SOHsbe3p0WLlqSmpmJtbY2dnR3de3SnRMkSuXdgAJEREQhaHWQjOGYIEonEpJ13hULBgAED2bplc+4XZ4MgCnTr1j3Lu9ZM/iIIAr4+mxgydJhJ7fv1H8DQwR5Mnz4jUwmrvJJd5Ejz5i2wsrJCnZaGhQk6QlqdDgsLC1q31juYpTIprZ1b47/Hn7CwMCZNmsL2HVv5YvIkSpYsSe++LlSuVBlLK0uSk19wPjSEAH9/5HIZX3719b9mJDZs2JBChQqRGJ9gkviaVqtFRKRDh44mz0GpVLLBy5thQwdz/fp1EEQUFlnLdAmCgEanRScIjBgxkkmTp5g85rvOB78zvdHbi27dexhsSKdjYWHB8OEj2bXLL0cvdFpaGtu2bMswpKvXqM7ETydmWqjlVI9SIpXQoGEDXPu5UrZ8GY4dP8ylixcy5Y60aOlM9Rq1cg1VSw9frFEjawhO5cpV8N28lQD/PUyb+gUXLpzPpgf94mP9urWMGzOKAQMHMvlfejiq16hBah5zlpFIqF/fdJVFmUzG+g1e1KxVC83L8NucEEXxpSFtxU4/v3wJ//kQUSgUFC6sF9m5c+t2LlfnjIXCwuDohLjYOHb57cJvZ+Y8e2dnZyZ+OpEWLVvkurhzcHBgzty5ODo5ZnhxraysaNOmPSVKvvmdk25Ir1ixiooVKxo033eZocOG07t374wPqyHoBB1qrRY3dw+jDRNDsbW1pU+fvowbN54pX0xl9JgxdOrc2WxI5wPx8fF4eLhhZWXFr8uWZ2tIg95Aa9S4MctWrASJhCGDPUlMTHzr8xNFUe+EzkPIoSiKpKalZjo2dOgwNnlv5P79N2sYvM69e/c4cvgInp6DkUgkWFtb4znEk5GjRuabIQ3od/7zEGEpkUCqiWlm7h6epKlURusnCIKASq022cAzYzgXL14kISGB9iYaWhUrVqRGzZoc+P23fJ6ZHlWaiidPngD6dbO7h6fpz69EL6alUPzjTKtRowYlSpagabOmFHMsxvTpXxJ0+iwDBg5ib2AAkyd9ztDBnnw1YxrXrl5l1uzZnPjjZL4b0hcvXGTL5i38/tvvWactkTBk6DAkUtPuW5BAXxfXPJensrS0xHuTL7O/+55SZcqQokpDo9ORplah0mjQCjpepKXSqHET1m/w+k8b0vCB70xHRETw5x9/sH2naaJU9erXp0TJkuzetYvBQ7LP57O0tMTdw50Dvx/AuY0z5SuYHqbUt68LC+bPIzE+IUOIq2jRYtja2WFrZ4eTU3GuXLlIdFT2dfI0Wi0FCxakRYuW2Z6vUqUK/gF78d7oxZfTp1HM0ZFGjRpjZ2eHSqUiIuIRp06epEGDhsybv4C2bduZfC/GUrFiRWrXrs3NGzewVBifW6f/IKvynHeZ7pEbP24sQUGnUCoUSCXSTMJogiCg1moQgcKFC7PJx5eyZf97pQD+TcqUK0tUVDRRUVHEx8VTwERV9IT4hFx1DlQqFX/+8SchISEZQoARjyIoXaY0YFwtV9DnC27dtp1BAwfw8GE41SpURG5hQd16DRAEgaevlHsRBAGNVotW0LF8+Uqc27Qx8g7fXb7/YS6CCIEB/mh1+tIk2UV3iKKIWqNBQMTN3YMvv/zq/zBbM3ll2bKlWCot+WX+gkwL1jdhZWXFwoWL+HTiBFavWsm06TPe6vwkEok+TzsP4ZISiTSLQ61Hz57cuXuHiePHsWzFSipUyNkZdub0aUKDQ6lcqQq1atfJOJ6fO3vpFCxYIM81Yu3tTUtVKlGiBG5u7uzcuQMkEoMiVISXaRidOnWmWrVqJo1rxnCio6IoXrx4npyJpUqV5vkb1qB54fq16+zftx9rG2smTpyIRCph0CA31q5ZjSAIKI2IMFFr1GgFATd390zHJVIJw0cMz/RdsrW1xcPD818rJQn6d9O9sHvI5XI6dOiQZc3h4uLKr0uXoFKrURrwbk0nI9pt8OB8madCoaBPn7706dOXy5cucS74HIkJCchkMgoVKkyHjh0/mE2kD3pn+tzZs3xcrRolSpj+j92hQ0eCgk7leE3xEsUZNmJYngxp0IePdu/eA0H8Z2cnJiaau3duIQgC1jY2fNK0BRUrZi15I4oiSCUMHTY8x/DkkiVL8vU33xJ0+qw+/0MUiYyMICkpkcqVq7B332/4+G7+Vw3pdIaPGAkSiUmLAbVWQ7169fIl50qpVLJ+gxcBgfvo3KUraq2G5NQUVBoNKao0klJeUKt2HZYtW8GJP05Srpw5zyuvNG/enNu3b1G7bh2TDenYmBiuXbuaaw3IbVu2ce7sOQSdgI2NDV27dc2kqvn48WNCgoM5cfw4wcHnePQo97BHOzs7tm3bzpChw7gbdofExASkUin1GzSiUJEiqDRqNDotL9JSadCoEVu2bqNN27Ym3ee7zJw5c5k5czbFHJ1IVatQqdWoNWrUGg1qjRqVRk2KKo0SpUsxZ+6PZkP6PSU5KYndu3YxcvRogwzpdJSWlowYOYqdO3eQmpqae4M88vHHH6PRmZbLL7zcla6ejcr7F19MxdW1H2NHj2Ld2jVEZWNcxMTEsHrlKi5fvIyFhQUKhSLPgkq5UbNmbWRGqHhnQSqlbh5q6H47cxY9e72MUMllh1onCGgEHa1at2bhosUmj2nGcDRaTd5+H+ijNrV5ULR/E0WKFEGtVhMfF68PLX55bKO3D4IootIYFnGWbkiv3+BF8eJZoz5eXR+npqSyft16bly/kT83YSAVP9I74LRaLWFhYVnOOzg44Lt5K0glRty33pBeuWr1W1mT1qlblzFjxjJt+gymfDGVocOGfTCGNIBEzI8iqO8p69atJSQkmJ9+nmdyHwcO/M7ewAD8/HZnOn7p4iWKFCmSsZuVX4SHP6Bnjx6IOl0mj5S9vQN16tbHxtaWc2eDiHtFGE0URTQ6LfYODvx+4NB7K4Kl0+no3asHD8PDjcpZTn+J+PhuoUGD7EMN80JyUhKPIiJITkrC0soKR0dHHB0d832cD53vZs/iydMnzJn7k0nt161dw80bN/DdvCXH6+7eucuOHTto2rQpzZs3R6FUIAgCf5w4gY+PN8HBwRQqVBhrG2tSU1KIiYmhXv36eHoMpl379rmmW6jVavbt3cvlS5exsFCQkprCgwdh1KtXHzd3D6NTTt5XQoKD2bZtK+Hh4bx48QJbW1sqVKiAm7s7devW+39Pz0we8PbeyO5du9i4ycek9u6DBuDhORg3N/fcL84DBw8cYNrUKW+MksgJlVpNuQrl2bvvzSGtJ44fZ9OmjYSEhNCocROKFi2KVColKiqK4HNn6dC+EwULFqJy5cp06dblrecEp6am0qRxQ6Rizull2SEIAi/SUjl8+ChlypY1eQ6iKPLj3Dn4+vpgqbQEUcikqK/V6UACaSoVPXr05KeffzFNbd2M0Zw8+SfffP01ewJMq7MM8OWM6dSpU4fx4yfk48z0bN28lbCwMGxsbBg3YVyGqn1ISAjDhg5GJpVmiRRMR6fToRMFtDod6zd4GVSBxn+PP9euXgOgVOlSNG/enMpVKufvTb0B743ePHr4iDJlyzBkaPYRlVeuXMHDfRASJNlWpgD9fWsF/X2vWLnq/yJC+iHwQRvTXl4bCDp1inkLFprcx759ezl88ADbtu/MOJaWmsaihYvQarV079GduvXq5sd0M7h+/RpugwYi6oRMBrVUKsXe3oH4l2VHILMh7ee3G6c8KPi9CyQlJTFo4AAePXyIhQF1K9MN6VWr19DKXOP5vebBgwd07dKJzVu3UaaMcYu55ORk+rv25aeffsm046vVarl08RINGjTIlIOUmJiYsbB9+DCc4cOHkZyURM9evenVq3dG+RrQ73gHBPgTGOCP3MKCtWvXU6VKlVznlJiYyL7AfXTr0c3oEntmzLzLjB83lgoVKxosnvc669et5dnTpyxesjSfZ5YZrVZLs6ZNSEtJNUrMRxRF1Fotc+b+SI+e2Zete5X79+9z5PBhYmJiAJGChQrh7NyGCuUrcO/ePT6u9nEe7sI45sz5QV8SzMhSk2lqFXXq1svVGWkoz58/Z+uWzWze7JspR97a2poBAwfi4TH4g9rZehdITEykRfNmLF76K7VqGV/HPDExkV49uuHju4U6derk3iAbLl++zI3r10lKTkKpUOJU3Aln5zZYWlrqq86sXI2dvR39+vfLtGkRFhbGhvXr2Ls3EIuXorwSJIiIyOUWqDVqunfvwYiRI6lUyTCDOCoqikMHD3H/3n0AijkWY8zY3AV684N79+5x8fxF2ndon2M03sOH4WxYv449e/Ygk8nQvXbfKrWKTp06M3LUaHOqxFvkgzamAwMCWLV6JZu3bDO5j3Vr1/A4MpLlK/4pK3Xu7DkOHzqMTC5j0uRJeU70z47r16/h4e6mF8ESRCzkmT+MoiiiUFpibWuDWq1ix06/bENa3kfSDerw8AcZKoKvlvp4tU6sWqMxG9L/Ib77bjZ/nDjOsKEjKV22DDVq5l7PMjEhgc8/m0ihQoXw2rhJrwyuE7h06RKnTp4iMTGRTp07ZSq7kc7Dh+G4DRpIi5at+PSzz3PMJdNqtaxbu4b9+/bi47vFIIPajJn/Ih7ubrRydqZXr94mtd/lt5PQkBA2eG3M55llZf/+fUyb+gUKuQUWBuzW6h3UOj6qVIlt23cYHMZ+7eo19u3bh6OjI8OGmS4glFeePXtG7149SE5ONtigVqnViBLYvmNnvtcQFgSB+Lg4kpOTsbG1wcGhgNG75mbyj7xEgG329SHo1El27fY3qp1apWLf/n34+vpw/959atasib29PWmqNMLDw0lKTMK1nytubh7otDpKliz5Ru2SxMREAgL8eXD/PvHx8Tg4OFC+fHl69upNgQKmpYjdvXOXoFNB1KlbJ2NzLC01jSNHjlClahUqVqiITP7/jZ5ITkoiMDCQsHthJMTHY2dvT5kyZejdqzeFXpbpNPP2+KDfWM5t2jBz5jdcu3aVmjWN98JptVr279vLN9/OzHT8wvkLAFSvXj1XQ/rKlSvs3RtA1PMoVGoV9nb2VK9Rg759XXIMx65RoyanTp1mz57dbNiwnujo6JdiKvrz9g4OtF/C6VwAACAASURBVGrpjIWFBSNHjcTR6b8TdmxnZ0fg3n0cP36M9evXcfnSJaysrDLyzTQaDdY21gweMpQB/QeYXyT/IWbOnIVElPDgwQMePHiAUqGgUg5hV1FRUUyZ9BklS5ZixYqVSKVSHj18RGBAIHFx+ggOiUSSrSJ/REQEboMG0rJVayZP+SLXucnlcsaOG49MJsPTw40tW7fz0UcfmX6zZsy8pyiVSlRpKpPbp6Wl6b9n/wLdunVHo1bz9ddfASLyHAxMQRDQCgIVP/oIH9/NBhnSgk7g8OHDhASHAPpyeQkJCSZrP+QVR0dHdvrtxtWlDy+SX6CQv/l+xfRcVIkEX98t+W5Igz6irlDhwubv9DuC5+AhdO3SievXr1GjRk2D20VFRbFzxw6++vpro8ZLSEhg8GAPEhMS6eviQtel3TOVxhNFkZCQYHb7+dG5UweWr1hJ2XJlM51/9fdrb2+Pp2f+CGylU6lyJSpVzqxFdOnSJS5d1P+nUCqoVKkS3bt3R6E0rdRebhw5fARLS0uat2ie7fNqa2eXRVDNzL/HB21M29vb06tXb3bu2GGSMX38+DFkcnmmem3JyckvQ7mgdp3ab2wbGBDApk3ehIXdpU3bdpQtVw6lUkliYiJ7du9m8aKF9OzZi+EjRrxRCdrWzg7PwUPw8BxMaGgoT/7+m5TUFGxtbXF0dOLPE3+SmppKYEAgI0aNyLR7+74jlUpp16497dq15969e9y69ReJiYlYWlpSrGgxGjdpYvZu/weRSCRMnzGDefPmIQoihw8dJjQ0lJ69e2FjY5Nx3eXLl/Dfs5s/TpygffsOzF+wMGNX2dbWlviEeACqVatGK+dWFC1aNMtYy35dSo0aNQ0ypF9l1OgxxMTEsHDhfFatWmNQm2fPnnHu7DkqVKhAzVqGL2Cy48GDB9y4cZ2kxEQsLBQULlyYT5o2xdLSMk/9mjFjKKVKleLOHdPL2N29eydftANeX2i/id59+oJEwnezZ6ERdIg6ISPaS0REEAQEEVRqFU2bNmXZ8pWZ3jc58ezZM86H6ktNlilTBpd+Lv9aHe03UapUKXb67WbUyBE8fBiOXCZDLpVlrBEEUUTzsiKFg0MBlq9YQZ26+ZuuZubdpHz58kyePIVJn33KgoWLqW1AuPbTp0+ZOGEcn3zyCZ07dzF4rHRD2t7egZWr1mTrnJJIJDRu3ITGjZtw8MABJk4Yz7LlK2jZohWnTp3iyd9P6D/w36nx/CpWVlYUL1GcJ38/Qa1S8zjycSZDOjw8HEdHx4y87rxw9cpVzp45C+gjXJo1b0bNmjWRyv47a/r3nQ86zBv0uUzdunbml3kL+KRp7oIE6URFRTFu7GjcBrkxYuSojOMqlYpbf90iOjqaZs2aYWmVdQH73exZHDx4gIGD3OjWvUe2oiPXr19jx7ZthIaG4uO72aRch7C7YWzdshWAjp060rhJY6P7MGPmXeRF8gs2em0kNjYWlVrFkSMHsbG1RSaVIggCTo7FqVK1KsWKOpKSmoK7h3um/KrQkFBKly6NU3GnbPuPjY2lRfOmrNvgReXKxodrR0ZGMmhAP44cPW5Q3t/2rdu5c+cOlStXZsCgAUaPJwgCx48fY8P6dVzKFKkhotXpkEql9OvXHw/PwZQunb+iiGbMvM7Nmzfp59oX/8B9FCxY0Ki2UVFR9O3dk737fjM6siMhIYFdfjvx89tJZGQkarUaa2trqlevjpu7Bx06dMzRyZqWlsa+vXtZv34tDx48yDiuVCrp3acvQ4YMpUKFCkbNCSAkOITY2Fg6dOjwzi2Az58/z4b16zhx4rg+bewl9Rs0YOSIUbR2dv5POeLNGIbPJm8WLlzAhImf0qlzl2yNQp1OR1DQKZYuXkyzZs2Y++NPRv1W+vdzRWlpyc+/zDM4XeLggQPM++UnFi5czNkz5wDo0LEDTT5pYvC4+UlCfAJ//fUXcrmcBg31ArdarZb5v8xHq9NSpnQZKlepTI0aNbAzsaycRqMhYE8Af/31l/6ABMaNG0eRokUAEAXx/5Y2YkbPB29MA/j772Hmt9/w3Q9zaNmyVa7Xp3vh6tWtx7z5C4xSmvxu9iyOHDnM8pWrDfK8e21Yz84dO0w2qPfs3sP1a9cpXrw4I0aNMFqx1IyZdxVBJxAaGoqFwgK5XE58fByCIPDs6TNu37qT6dqmzZrSrr3h5dxWr1rJ0aNHWb12ncnz+2Ly51SvXoOp06bnem3wuWAOHTyEjY0NU6ZOMWqcuLg4Bg3sT2RkZA4aAv8o5E6aPIXRo/8dERUzHy5ugwZQq3YdfUlDI1i1cgX374Wx0dtwJXBBEJg37xe2btlMhQoV6eviQuXKVbC0siI5KYlz584S4L8HURSZNn0GPXrkLhqmVqlITEpCqVRia2tr1LczNCSUUqVLUfw9EvzU6XQkJiYiCgL2Dg7myC4z7N+3jxUrlvHs2TM6dupM3bp1sbG1JTUllbCwu+zftxdBEPH09GT0mLFGPSMhwcGMGTMK/8B9Bkd5pPPLzz8i6AQ+adKMmzdvIpVJGTZ8GCVKvB1doHQn3aFDh4iPj0MikVCwYCG6du1K7z59s0SaREZG4u3lnck51X9Af6pU1TvmBZ1gklMtMiKSoKAgZFIZrv1dM46vXb0WhVJBsWLFKFq0KBUqVKBwEXPaxL+J2Zh+yW+/7WfG9Gn06z+A3n364uSUdccqNTWVw4cO4r3Ri2bNmvPjTz8b5YXb5O3N2rWrWbFyNaWM2B3asH4du3f5ceKPk0a/dJKTk7l08RKfNP3E/HE080Fw4vgJwsLCKOBQAIcCDhkCJMUci+Xe+CX9+7nSvmNHkwWUAI4eOcKmTRs5cOBQrtdGRkTitcELgE8//9RgoZS4uDj693Pl2dMnBpWL0+p0qLUaJn76GWPGjDVoDDNmTOHs2TOMGjmCb2fOok1bwxxZ6btOXhs3GVzGUBAEZkyfRuj5UGbP/p7qNbLP69XpdBw5fJh5v/zErFnf0dfFxeB7MRRREPn999+5cP4CDgUcGDVqFFbWeQ/zNGPm/0lISAjbt20lLOwuL168wNLSihIlSuDi4krbdu1MWltOnDAeewcHJk02znkMcC8sjGFDB3Pk6DH8du4iIT6BatWq4dIvf5/pqKgoFi6Yz759e7GQy/VGsFSCKL5MIZHpI+F69+nLlClfZKrKoUpTERYWxu3btwkPD2fipxMzUs1OHD/Bndt3qFuvLjVr1jT6HfGqMZ6UlMTihZnrsHfr3o169fXlJf9+/DcXLlzAyckJJycnHJ0cDY4CMGM4ZuvqJV27dqNokaKsWbOafi59aNLkE+rUrYudnR1paXpFwSOHD+Ho6MjoMWNxc3PPduH6/Nlz4uPjsbG1yRTeKQgC3t5ejB03wShDGmD4iJH8ceIE/v57cHf3MKqtra0tLVq2MKoN6BUR/XbuYMeO7URHR+tFvaytqVatGsOGj6B58xbmXW4z7yTObZxxbpO3Worx8XEUKVIkT30UKVqEhPh4g651Ku6kVxkXBB5HPjbImE5JSWFAf8MNaSCj/uayX5eiVCoZOnSYQfMzY8ZYPvmkKYsWL2HK5EmkpqbStVv3HK8P8N/Dr0uX8Ouy5QYb0gBffTmDkNAQVqxcnSmV43VkMhmdOnemYKGCfDl9GlKZlN69+xg8Tm7odDoC/AO4cf0GAMWKFTOHXpr5T9CoUSMaNcpa7cJUYmNiOHbsKJu3mlZJp+JHH1G9Rg32799P3759iYiIyPc0xqioKFxd+hIbE41CbqH/dmYThKpFS8CePZwPDWHb9p0ZBrXSUkn1GtWpXqN6Ju0GURC5fPkySYlJHDxwkCOHj1Cvfj06d+ls8Nxe3dXWp6D0JiIiguioaKKjozOtXSIjI7l08VLGn/W6M9Mz8rtN3SU3kxmzMf0KjRo3plHjxkRERLBz5w5u3rhBUlISVlZWODo6snrNulxfKKGhoVw4f4GPKn3EILdBGcf//OMPUlJSaNvO8FDTV+nr4sKWzb5GG9OvIugEnj17RvESbw49S0lJYc6cHwgM8MdCboEoCEilUiykMtSpaVw4f57Q0FAKFy7MF19Mo3uPHibPx4yZdxWdTsgwPE1FJpOh0+kMulYul9OpcyccHBwoXcYwZ9vu3bt4+sRwQzpjLJkMURRZvGgh/fr1NzraxYwZQ2nfvgO/LlvOpM8/Y9u2rfTq3YeOHTtlVKpITEjgwIEDBATsITYmhlWr19KsWTOD+79w4TwHDvzOlm07cjSkX6Vx4yZ88+1M5s6ZQ+fOXfJNmC8uNo67d+8CULdeXbp165bJmA4NDeXWrb9ITk7G2sqKMmXK0qJlS3PEmJkPjojISKxePgOmUqNGTR6Gh1OqdClKlc67WOGrREVF0c+1L7GxMSjkFjl+X+UyOTKpyOPISAYO6JfJoE4nU3sJ9OrVi0uXLvHXX3+h0+ooVKiQyXNVKBTUrFXzjcKldvZ2VKhQgSdPn5CakopDAYdMQmnbt21HrVZTrXo1atSs8VZK+X4ImN/i2VC6dGmmGKngm86bHrqdfjvo1r1HjnVqc6JT5y6sXLGcS5cuUrduPaPbR0ZGsnvXblJepDBp8qRshdFSUlLw9HDnzp3br3jiMhsUcvThLfGxsUyfPpWk5CQGDXIz6Z7MmHlXsbe3Iz4+IU99JMQnYG/vkPuFL0kXLzEUrw3rwUC14texkMtR67QEBPjj5mYup2Hm7dG6tTNBp8+yb99etm7ZzKIF81EqlYiiiFqtpnr1GoweNYau3boZrXy72deXTp27ZJuWlRPObdqydu0a9u3di2u/fka1fRNFihZh4MCB3Lt3jzZt2wD61LDAwADWr1vH338/xvLlfUskEjRaLdbW1ngOHsLAAQPNpaHMfDC8eJGcZyeujY0N0dFRmY5p1BouXLhA48aN8xQVMm3qF8REx+RYNu5VJBIJFjI5kZGRzJ41k8VLluZ4bfkK5SlfoTxpqWncvHmT2rX/qfxz4/oNHj16hLOzc7brdGP5+OOP+fjjjwF9xOmrZUBTUlK4d/8eoiDy6NEjjh87TucunXOsRGQme8x7+/lMRijHa6nojyMjqVK1qsn9WlpaUrpMGb3IkAkUKlSIpKQkNBoNd+7cyXI+JSWFwZ4e3L1zB4VMnuOunEQiQWGhQCm3YM4P37N16xaT5mTGzL+NVqslLi6OpKSkLM/oq9Sv34A///wjT2P9+ecJ6tevn6c+3sSZM6d5/vw5FnLTnHMAoiDoDXIzpKamEhsTg1plem1kM2/GxsaGAQMGsnffb5w8dZpdu/3ZvSeAU0Fn8A8IxMXV1WhDOjo6mkOHDpqU+yyRSOjTpy+bNxsucpYdGo2GmOiYjD+XLVc2w5COj4+nT++e/DR3Ds+fPcVaaYlMIkUulSGTSFHKLVClprJuzWo6dmxPWFhYnuZixsz7grW1DSkpKXnq48WLF9ja/CP8lZaaxqqVqzh86DAhISEm9xsREcHZs2ewkMmMclRLJBLkEikHDx4gNiYm9waApZUl9erXQybXr7d1Oh3Hjh4jNCSUdWvX8ezpM5Pu4U3Y29tnEmlTKpS4uLhQrXo15BZy1Gp1ll11M4ZhNqbzmfSQsddfFC9SUrDOY705G2sbXrx4YVJba2trypcrD8Ctv25lOb9i+TJu375l1AtELpejlFvw/Xezefgw3KR5mTHztklJSWH79m307NGNah9XoXGjBtSvV4fq1aoyftxYTp8+ncWwdnN353TQKZ49M+1jlpAQz5HDh/Hw9MyPW8jC2TNnkEmN+9i/jkJuQUREBM+fP8/Hmb0/hN29y8yZ31K7Vg1q16pBkyaNqFGjGo0bNWDJ4kU8ffr0/z3F/yROTk5UqVKFKlWqGByanR3Xrl3Dyak4FSpUNKl9y1at+euvv1CZ6EARBZE9u/ewfv16wsMzf/8SEhIYOKAffz9+jIVMjtJCkeVZlUgkWMgtsJDJUaWlMaB/P+7du2fSXMyYeZ8oXboUL168IOLRI5P7uHnzJmXK/hMmbmllSclSep2i48eP8yLZtLWyr68PlkqlSeXgZDIZSoWS7Tu2mzS2KIjUrlMbqUxKXFwcGzZseKtONplcxsfVPsbF1YXPPvuMzl06U658ubc23n8ZszGdzxQvUZyPq31MpUqVMh23scknT9xrEvzGUPVj/c542L0wNGpNxnG1SsW2bVuRIjF6cS6Xy1Eqlfj45M3Db8bM2+DXX5fSrGkTvDd60blLV/x2+3PoyDF+P3iYdRs2UqhwYT77dALt27fldFBQRrty5crTuEkTdvntNGncAH9/qn78MdWrZ68snB379u5j5YqVnA46neu1cXFxCIJh+dhvQiKRIJVKSUzIPpw9Ojqaa9euEhx8juvXrxEbG5un8UwlMTGRmzduEBx8jitXruTZyH38+DEuffvQrVsXAv39kYpgZ22DvY0tdtY2qFJT8d64kdatWjB2zOhMYXFm3h2SEhPztIuSLvKXmJhoUvsDBw5w+9ZtVGkqIiMyR4yNHDE8w5A25JuqtFCgVqUxcEB/0tLSTJqPGTP/T/7++28WLVpI/36udO7cke7duuDp4c7mzb5Z3qGFCxehTZu27Nmz26SxHjy4z9Url3Fxcc10vFOnTlgoLNCoNVy6dOkNrXNmb2AA5KHGkSgI+Jt4X3ILOa1at2LwkMHY2tmiVCrz5HA0BhtbGxo2apjx59iYWEKCDdvhf/z4MQvmz6Nnj244t25Ju7Zt6NfPJdt/+/8q5pzpfKZq1apUzSacu1y5cly/ft3gEiGvk5yczIMHDyhb1nTBhspVKnPmzBmKFC5C8otkCioKAvDb77+h1WpRyEz7OUhF2OW3ky++mGp0qJ4ZM28DURQzarr/Mm8B9bIJty5QoABVqlRhzNhx+O/Zzdixo1m6dBnObfRhmhMnfsqQwZ5UqFCBzl26Gjz2HydO4L3Ri1Wr1hg158ePHxMdpVfO/38hiiKng4LYsGE9Z86cRi6XI5PJ0Gq16HQ6Wrd2Ztjw4TRu3OStz+Xq1av4+Hhz4Pffkcvl2NrakZqayosXyTRq1BgPT0/atm2HzAihuEcPH9K/vysvkpOxsbTKsvuQ7mAAkFpaceZ0EIMG9mfzlm3Y29vn6/2ZyRsKhQK12vSwfJUqLaMfYzl75iznQ88DUK9+PZq3aJ5x7saN61y7dhVrSyujnNMKuQUqVRp79wbSr19/o+dkxsz/g7t37zB//nyCTp2kYcNGdOzUiQIFC6LT6nj27Cnbtm5h/rx59OzZky+mTst4j3p4ejJu7FhGjR5j9Lpxl58fHTt2ylJxw8bWhpo1a3L71m2T9YkSExOxtDC9dJRUKiEuLu6N5zUaDUmJiVgoFG+sX1+6dGlGjR5FUmJShlhjfpKUlIRarcLOzj7b91/Eowh8fHzQaXU4FXeiTJky2fZz585tfpw7l7Nnz2BlaYUo6DLu5/nTp9y+dYuff/qRXr16M33Gl2/lXt4VzMa0iSQnJxMQ4M+xo0eIi4tHp9Nib+9Aw4YNGTBgIE7FMytmDxgwkIkTxjN69BiUJqiH/rZ/P+UrlKdmzVomz9nOzo6Jn07Mctx/zx5EQUAiNy1kVC6Xo9ZqOXXqJB06dDR5fu86giBw+9Zt4uLiSE1NRafTERUVxfXrV7l58wbJycnY2dlRrJgjrv360aZNW6MW+mbyB1EU+eqrLzkdFMTK1WszlajLDqVSyYCBgyhUuDCffjqBBQsX0bFjJ+rVq8/yFSuZMH4cSCR07twl17H/OHGC72bPZP6ChTRvYXhJOlEUM3Z+CxsgRFSwYEGTwtBeH1MQBOxf7u6Fhz9g2NAhPH/+HCkSbK2sM8ZQyi3QCQJnT58mKOgUpUuXxmvjpkz5V/lFfHw848eN4dq1a3To2IkNXt589EqkT0xMNAH+/syaNZO5c35g+YqVBr0XHzx4wID+rqSmpOSq0Aogk0qRSiQ8DA+nfz8Xtm3faXD9bzNvH0cnR548eYJKpUKpVBrd/mH4QywtLU1a4FWsWJFz9ucoXrw4Xbpmfi9s9PJCIbdAamSUl0QiQdQJbFi/zmxMm3kvuHHjOkMGe9LauQ3bd/pRokTWb+2AgYO4du0qq1etxMPDDW9vHwoWLEiTJp9QoUIFZn37DXN/+tlg4/fI4cP8/tt+tm7LPpS6bbu2dO7S2eS1lyAIkKeyrxK0Wm2mIyqViv3797Fh/XrCwu5mHJfL5XTt2o2hw4ZTrVq1TG1sbW0zRaLevn2btNQ0k8XBbt++ja+vD/v2BpKamppxvGrVqrh7eNKjR8+MNNUSJUtQsGBBoqOiORN0hjKDshrT169fw91tEDqt9p+1QjZ/51ok7N0byKVLF9mydft/9hsqEXNS4TGThdiYGBYvWcTewL2UKlWSjp064+joiFQqIyEhnpMn/+TC+fO0bu3MmLHjqFnzH7n6jh3a0X/AQHr07GXUmKIoMrC/K2PGjMPF1TX3BkbSqWMHnjx+jMJETx6AThSZOn06AwcOyva8KIqkpaX933euU1NTsbS0zHUhrVapuf/gPoJOoFp1/UtOFEV+nPsjOm3m8Nrdu3YA+vdvw4ZNcHIqzv0H94h8HEH//gMYPWas2aj+F/nzzz+YMnkS3j6bjVb5PXDgd5YuXkzQ6TMZH5aTJ/9k3Ngx1Ktfn74urjRt2izT70cURUJDQtjlt5Nz586yaPESOnUyvGYk6HMsly7WK4COGDmCEiVzNlLPnj3DiOHDsFIoTc6bVmnUFHN05OixE4SHP6B/P8MMTVEU0Wi12DnYs9Nvd74a1PHx8Xh6uFG4cBG+nTU7x91grVaLzyZvdmzfhvcmn1wNag8PNy5fvIilwjjDSxRF1FotAwYN4quvvjaqrRnjEASBU6dOcuzoUeJf1mgvUKAAzm3a0qpVq0wOJFEUad++LZ6eg+nStZvRY303exYODg7MmTPXpLkmJSZhaWmJheKf76ZaraZO7ZooLRQmldYTRZEXaans8Q/MUOD9LxMdHc2uXX7cvnWLxMRE7OzsqFixIi6urhQvnv+OOjP5x+XLlxk+bAgDB7kxZOiwXK9Xq9V889WXPHv2DB9fXwoXLkJcXByeHm4ULebI3B9/ytWgPnrkCHPnfM+Spb/S1sQoz9yoW6cWCAJyEyM1NVottvZ2BJ0+C8CuXX7MnfODvkymIGLxUiFcREQQRARRQK3RUK1adVauWp1tWPdff/2F304/ZDIZw4cPx6m44euamJhoPv/sMy5evECbtu3o1as3ZcqWRaFQkJiYyOmgU+zevYu42DgmT5mSUZ3nfOh5fv/tdxQKBdOmT8tUi/rypUt4erqDIKI0ILJHFEU0Oi2OTsXZvn3Hf7JygdmYNoKoqCj9Qq9IEYaPGJnt4k0QBFavWAXAmbNB/PTzzxmlrPz8djLvl5/5dfkKKlWqbPC4ixYu4NTJPzl67IRJHvhXEbMppdOurTPReVQG1okCn0+egqfn4IxjsTExbN+xnS2+vkTHRCOKIlKpFCcnJwYPGYqLi+tbD/tITk5mz+5dbNu+jfAHD9Dp9GEoTk7Fce3XjwEDBmYKFQoPD+fypcv8dfMvNBoNhYsUZvyE8RnnV69aTVxsLH///XeGaFXIuTMZ/9+ydRscHPSeN0EQiHwcgVKpYMnSX00OOzJjHEOHeFLxo0qMGTvOpPZuA/szZOiwTI6hp0+esGXrFnbu2I61tTWVK1fB2sZaX1oiLIz4+ARcXF1xc3OndGnD6kS/yqOHj/De6A3A9BnTUVrm/py3cW5FTFQUChND0tQ6LTNmfEmzZs1xde1LWkqqwQ61Vw3qXbv2ZInEMYWEhAQ83AdRuHARfvz5F4OfF59N3mzZ7IuP7+Y35qjfv3+fLp07ZhvabQharRZBAmfPhZjrcL4FNBoNGzd6sW3rFl68SKFN2zYUK6ZfVEY9f87x48ewsrJiwICBDBs2HMXL76CX1wYCAvzx2rjJqPFiY2Pp3bM7u/cEZJuWlVO7nGrCPn/+nObNPsHeJvvwTUPQCDqWLP2V1q2dTWr/PnDz5k2WL1vK8ePHsbS0RKfR6g0MUUQml5GmUtG0aTMmfvqpSaVAzbxdRFGkQ/t2tGvfnuEjRhrcTqvVMnXKZEqVKsVPP/8C6PU/PNzdUKlVuLr2o3OXrlk2XS5dvIif307OnA5i6a/LDDKkdTodcbFxFClaJNdrX2XkiOEEnzuL0tTvqlZD+w4dWbBwEdu3b+P772ajkMuR56CfIIgCWp2OAgULstNvdxaDOjUllTWr15CYmEihQoUYOXqkQbZATEw07m6DKFW6DFOnTc/x3RV06hSzZ33L55MmM2TIUGJjYlm+bDlFixVl4KCBGTvKoijSqmUL4mJjjHJMpzulO3fpwi/z5hvc7n3BbEwbyNMnT3D3cKNy5SrMnDUbufzNXquN671ITU3FxtYGX19v1q7bQKNGjQBYumQxmzf7smz5ykyhi29i8aKF/HHiOFu2bqPcSzXuvLA3cC9Xr1yleo3q9O7TG4DevXpyP+yuyYtyAK0oMGv2d/Tq1Ru1SsXMmd8SGBiAUqFEFARkMhkS9LoOOp0OiVSKVqdl4MBBfPnV13kOWX0dURRZtGghvj6bKFmqFC4urtSqXQcbGxtSU1K4G3aXPbt2ce3aVbp378Hs775Hp9OxeOFifZgPgESvPDt02NCMhb2X1wYWzJ+H0sIiW8+llZUVZcuVp2y5CigUCjRqNUePH6FatWqsWr3GpPw8M4YTdvcuPXp0Y7d/IEWLFjWpj4AAf3bt3MGBg4ezfPzUKhVHjx0l4lEEyclJ2NjaUrJESdp36JCxk20qWo2+bFfRYobNe/NmX+b/8ovRJTxA7z0XgHPBIUyZPImgU6ewNPK3KYoiKq2GLl265svHccniRfx58k9WrlpjtONp+a9LuXXrFtt3ZC8Y9+03X7M3MBBFDu/t13ZWrQAAIABJREFU3FDrtHwxdRoeHm9Hof1DRa1SMX78OB4+DGfIsOE4O7fJ8u+v0Wj4848/2LhxAyVKlGDVqjVYWlqSmJhIG+dWDHJzx3PwEMPGU6v5csY0EGGTj6/B87xw/gIHDhygTZs2fNL0k2yfuYiICNq2aZ0nY1orCvz88zw6duqUcez27dts9NrA0aNHefEiGYlEgp2dHV26dmPw4MH5sjZ4E2q1moMHDrBly2bCwu7y4sULrK2tKVeuPAMHDqJ7jx5GvfuCg88xfNgwpBKQy2TIpFl38AVBQKPVohV0LFu+4q3tQpoxjRMnTjB92lT8A/cavcFz+/ZtRo8czsmTQRk7lOn12H18NvH0yRNq1a6DnZ0dKpWKh+EPeP78OX37uuDu4WHQb/1e2D22bdsGInz97ddGPYtnzpxm5IjhJkV9CaJISloqu3f7c+PmDWbN/Bal3CJHeyGd9N3bAgULsmu3f5b1S8SjCLy9vREFkQYNG2RJL3mduLg43AYNoHSZMnz3/RyD5nD1yhUmT/qMKVO+wMNzMMnJyVlEj08cP87EieOxzKZCQW7odDrSNGqCTp/N0bB/H3mvjemoqChSU1Kxs7OjYKGCb3Ws4cOGolAq+O77Obkafju2bScmOoZGjRtx7/49Nnl7cSroTMZLZ+mSxWza5E3vPn3p09clixdKEATOnDnNzh3beRgenm+GNMAuv13cvHGTmrVqZhjTP/30Izu2bcXCxLAWQRB4kZbKb78fpHSpUowcOYJLly4il0hzDG/W6nToBIE27dqyaNGSfDOo03Nmz5wO4ttZs6lTp+4br71y5QrLli6hQAEHVq9ZR2BAILGxsdStW5eaNWtiY2uTce3NGzfo06cXVkrLXEP4ZDIZpUuX5cWLZJ49f4ZGp2XsuPGMNXG31IxhrFu3ltNBQSxYtNjkPlRpabRt05qDhw6/1UVqXklJSaFvn15GqQaD/rlTazVMmz6DTp0606plc6wtrZCZsmP7sq8zZ4PzpKysVqtp1bI5U6fPoGXLVka3j4+Pp1ePbvjt2pNteGyTxg1JS0nFIg/GtEqtpkatWm/M1TNjPGlpaYwZPZLYuDiWLF2Wq8hbUlISkz//DFtbW9asXYe1tTXXrl1lyGBP3D088XglMio7NBoNX86YRlxsLJt8NhucvxcbG8uaVWvQaDRUqVKF/gOzz2mOj4+nUcP62NnYIJWY9j1TaTWsXrOWpk2b8ejhQyZPnsT169dQKhRIkGTkYguiiCjR52M2atSYRYuXZBFkyis7tm9jydIlKCws6NPXhbr16mNtbU1qago3rl/PCA8dPXoMI0eNyrW/oFOnGD16JHKp1CDnvUarQa3VsmDhIroYIQJp5u0y2NODKlWrMmr0GJPajxs7mpYtWzFhQlYNn5DgYG7cuEFiUiKWSkscnRxp374DNjY22fSUPQ/DH7LJWx+tYmik16s4t25JTFS0QSHMr5KmUlHho4ps2bqdT5o0QiaRGvXNSd+97evqyqxZs7OcPx10mgcPHtC7d+9Ma9PsWLBgPsHB51i2fKVBhnQ6oaEhTPtiCidPnaZgwax21YD+rty4ft3odKl0NDodI0ePZvz4CSa1f1d5r0tjHfz9IN4bvfNUoN0Q7t27x9mzZ/j0088NMvjSH/oXL17Qr39/7B0c2BsYmHH+s88nsWTpr4Q/eIBr395M+2IKvy5dwprVq1gw/xf693Ph5x/n0rBhQwIC9+Xrgj697Marnn83N3fSVKp/dmSNRK3RUKdOHb0C4agRXL50EYVMnmuesFwmw0Im4/jRY0ye/LnJ47+KKIp8/fVXnD1zmuUrV7/RkNbpdJw6eYrTJ4OYNfs7EhISGTN6JF26dmHsuLE0+aRJlpfVunVrURiYC6fT6QgPv09U1HOkEglS9CJygQGBubY1YzpxcXEUzuOCUvlSlCg+Lj6fZvV2sLa2Ztv2nRQvUQKNTpulVnZ2pBu/kyZNZvDgIWzbugVLpdIkQxpePsNyucklxNI5dPAgFhYKmjc3XLTtVQoUKEDbdu3w8ck+3DcpKcloQajXkUgkxMX9f8qD/VdZs3oVUdHR/LpshUFq6XZ2diz5dRkJCfGsXLkCgJo1a7HR24fNvj58MWUSwcHnsjwLqrQ09u0NZOgQT6MNaYDjR4+j0WiwtrGmW48352cXKFCA4sWLo9WaVrZOp9Oh0WioWvVjHj16RL9+Lty5fQsbSysUcgssXqrry14+dwqZHBtLK65cvkQ/175ER0ebNG52bNzoxc8//8SkyVPw2+2Pm7sH1apVo1y5cnz8cTVcXPuxddsOZn/3HevWrWXRooU59peamsqECeMMNqQBLOQWKORyvpgymZiY/Ls3M6YjCAJnz56hQ8dOuV/8Bjp07ETQqVPZnmvUuDFDhw3js88+Z/SYMfTq1dsoQxr0a2/Qb2oolMZHA37/wxy0gg61EZU1VBo1ogRmzpqNv/8epFLjDGnQf2NkUgm7d/llW0q3abOmuLm75WpIq1Uq/HbuwMNjsFGGNEDDho2oVLkyftl803U6HRcvXsxTSqgg6Dh29IjJ7d9V3mtjOt2wfdub65u8N9LauQ1FixUz6HoHBwfsHRz+x95ZR0dxtXH4mdU4CfbhULy4a9HiDlHcrViRunuhUKC0RYp7SHCH4K6lUKw4JARCgHiyuyPfH0tSQrLJSoI1zzk9p2TuzNxNdube134ver05TcTX15+FCxekGtO4cRPmzV/Alq3bqF69OigKMdHRuLu58+6Ysezff5CxY8fbnaqaHqIocuf2HQAKPlXjWKxYMerWrYdRtL0lj6IoIAgMHDSYVasCOX36dIa1Ic+iUqnQqtWE7NzJzh07bL7/s+zdu5edO7Yz47eZqT7j00RHRbE6KJhzf50F4ObNW0yd/gvh4eGsWhWY7jmRkZFs27YVjZ1Gh5ubO9Wr1eSvM3+xf99+u66RgxWkowlgD4KgQnGk2aQNPHz4kMuXL2M0GG0+19PTkxUrV1GkaFGSTEaSjGmdYsn1zSZJJNGQxNix4xg02BxF2rx5E4rs2OeUZZmNGzc4dI2dITto07atQ9kp7dt3JGRn+u+QrFojXuFErpcOo8HAihXLGTx4aJpUwoxwdXVlyNBhBK5ckeIcrlKlChs3baHCmxX44rPPCPDz4dNPPuLbb77mww/ep3OnDqxcsZyePXqyfEWgTYa0yWgi4kEEAM2aNct0Y9+v/wAElX3vIEmWadOmLQkJCfj5epMYn4Beq8vwuTCvoRoeRkbi5+vNo4cP7br30yxZvIjp06YyZep0mjd/2+L9BUGgXv0GzPj1N1auWMH0aZYzgtasWY2iKDaXk2k1WnRaLcuWLbPpvByyh+S+7I6k6ebOnZuo6OxzVieLF3p4eNi1H2jUqDG//TYTkyRiNBkzfO8rioLBaERWFObPX0jNmrWYP28u2BkcSt4/r1u3Ns0xQRBSfZ4b12+ke43NWzbj4upK/QYN7JqDj48vy5ct5cjhI8z8fSZr15jnEh0dDeCQY1olCCl/n9eJV9uYfqIu96wMfVazY8d2OnbsZPX4Rk0a06tPL+o1qA9Au/btuXbtKjdvpv3ilyjxBqNGj+Hrb75l4qSf+OTTz+jcpUuKwEpWEhsbazbOBSjzTL32uPETUACjyfoNfXKNR+kyZWje/G3zC0RRbH7QVCoVAjB37hybzkuPpUsW07lLV4uG9OPHj1kdtJrIB2Yvd7Xq1WjQsAFubm707NWb5cuWpvvi3LtnD056J7tVuQ1JSdwLv/vkWnu5eOGiXdfJIWM8PT0djmAYjUZiYqLx9Mze0pFkzv51lsAVgfwx5w+7zvfy8mLjpi3MmPEbVatVIz4pEVGRkRQFSVEwSiKoBAJ69GDX7r0phjSYNx0qOzf+yagEVYZ9Na3h8aPH5LfSWWmJ/PnzExMTk+7z6+bmjuygIawoCl5er1ed14tk48YNuLi40vCttzIf/Ax169XHyyt3qg1noUKFeP+DDzl46DDvvjuOqlWrUbxYMerWrcv0X35l2/ad9O7T1+Yol1anZdjwYXj7eFOterVMx/v4+JqdVzbuS2RZxiiaGDBwIFMm/0RCfLzVkS1BENCqNURGRjJ7jm397Z/l8ePHTJz4Iz9MnESVKta14ixVujTTZ8xg9uxZ3L51K81xRVH4Y84cuw0MFIXFixZiNNrucMwha0nOzHNk3y2Kol1q99ZSt15dAnoEUK9+Pbuv0ax5c3799Xc0Oh1GSTQbzLKMoigpbSWTjAaMoglnVxfmzptPnbp1iYiIICwszLHorSSxf99ei8dFk0jwqmCWLF7C6VOn0xw/cvgwzZo1tzuw0Kz524SHh3PvXjgPIh7w+JF5fU+OcjsSaFAUbI6Wvwq80p8oJZ06Lj7b7qEoClFRUfyvQFq5emtxdnbGwyMXjx49eqE1mF5eXgwaMoiEhIQ0irRVq1Zl7rz5DBzQH0zGTL3HyYZ0yVKlWLJ0GSdOHOf+/fs421lHodNoOXfuHFeu/GOT0vnT3L51iyNHDjNuwnsWx7i6uuLp5cWjhw9p1aYVxYoXTznWomUrfp0xg3379qZRUjWndzq2GT958jh58ubFxcWVdWvX4eXlZVOLgxwyp3GTpkyfPo1HDx/a3X5h+/ZtFClalBIlSmTx7NLn2tVrAJR4w/77qVQqmjVvTrPmzbl58wYXLlwgJiYGnU6Hl1du6tevn65IkCRJgOORfEcdmqJoQu3gAqvRalEUBVEU0whYNW/enK1bNjt0fUGtolXr1g5dI4d/OXToEC1atrRrwycIAi1atuTQwYMEBHRPdczJyYmOnax3fluDSqWiYqWKVo11d3fn/Q8+ZOKPPyAIglVGgyzLmCSRDh06UrhwEbZt22qzAJIgCKgQCFy5grFjx9ktiLhqVSDlypWndu06Np1XpkxZGjRsyLJlS/nomRZy9+6Fc/duGB6u1mcgPI1GrSEhIYF//rlMpUqVMz8hh2zDzd0dJycnQkND7Y5Oh965k6VZl8+iVqspW9a+feTTNH/7bY4cOcaWrVuY+8cc/vnnnxTVeYBKlSozeMgQWrZslWIgxsTEmANEDkRvBUHg0SPLJUUarSZlzd2xfQclS5bE0+vfbJuYmBiKFkvbG9patFotbm5uJCSY+1EnZ9q4u7uj0WiRZYV0dAOtQlZkhx3nLyOvdGS6YMGClCxVkoKFHG/LkhGKoiA4uOFUqQRkB9MpswpLrV3q1avP3HnzkRSzSIDRZEoT5TF744wkmYyUKl2GJUuX4+bmxo7t21E9k4JiCyqVCie9nh0OpHqH7AqhRo2a6fbpS0an09Gxc0e6dOuaypBOPtayVat0082zIr1TlmWuXb+Km7sbxUsUz1H2zgbKly9P9Ro1WLNmtd3XCApcSd8+fbMkXTwzEhISuPskY6FU6VJZcs0SJd6gXbv2BAR0p1s3b5o1a2ZxY+3q6ubwd1tRFNzdM693zQh3D4+U9EF7iY6OxtnZOV0l8EGDBqdEFuxBlERkWcbXx9ehOebwL9HR0ekK3FhL7ty5U9IOs4sL5y9gMtpe/tS3bz/GT3gPg8mYaYRakiRMkkiLVq2YOOknVgWuRK/T2ZUFpVGrUWSFTRs32nwumNeo5cuW4e1r3/fc28eX1auDSUxMTPXz6OgYc0cPO9+pgiCg1WqJjnbsHZFD1tC+Q0fWrV1j17myLLNh/To6d+maxbMyI9mpV2AJnV5Ply5d2bR5K8dPnGL7jp3sDNnFiZOnWbN2HW3btksVaVWr1SnRa7tRyNS53KFjB5ydnTEajaxfvz7V/dRq9RNHuf1IkpTyHCeXxQiCQPv27ZEV+9ZRRVEQVCr8/AIcmtvLyCttTNeuU5tevXvRuEnjbLuHIAh4eHgQaUPq6O1bt1i6aAkrli4HzGmj0dHRNtVpZSVxsXGsWrmK6KjMNx716zfg8JGjjBozBvdcHiQZjSkpo6IsEZ+USI2aNZk5azar16xNqXWLiIhweJ6iKDqULvro0SOLHq/QO6Ep/6/Vai32HsyfP3+6c8jl6QkOGleyIuPh4cHwd4bTo2cPcufJSRnNDvr3H8Ca1avtEuPZvSuE+/fv4/2cjKbr166DYnYmvfHG889aqVmzpsPpzwhQp05thy5RrWo1Dh886NA1Dh08QJUqVdM9VrpMGarXqGG3LoQkK/j4+uHm7u7QHHP4F61Wa5PAz7MYjcZsdUjGxcURHBzML9N/4cGDBzafP3DgID786GMUgZQ0UUmWkZ/8ZzSZMEkSiUYD3bx9mDz5Z1QqFYcOHUSW7NusCoKALEkcO3bUrvPDw8MJD79LkyZN7Tq/du06SJLElStXUv1cq9U6LDAqy3KOA/oloX///uwK2WlXSdWBA/sxGk3Zos5uSDLwyy+/ELIzJEVPISvx9PSkRIk3KF68hMXuFV5eXg4b07KikC9vxpF7N3c32ndonzKvp50IXrlzc//ePbvvHxsbS0JCAuXKlaNO3TqpykIHDR5it2NalEScnJxo2y7jtl6vIq+0Mf28aNasOVs3W58iaDKJxMTEpHjNd2zfTuEiRShVKmsiT7agKArr1q3j0qVLLJi/wCqvnYeHBwMHDmL/gUMsWLSY777/gU8//5yfpvzMjp27WLR4CY0aNU7lZRalrKlbF+3Y7CYjSxJqTVpv/rWr19iwbj0hO0Iy9dap1ep0P0vjxo1JTExEsnNDoCgKKpWatm3b4ezsnOZYDllHs2bNadiwISOGD+X+/ftWn7dv316+/eZrvvjyK4vZG1lN4SKFqVe/HuXLl7e5X2dW0K//AEQrlcDTQ5ZlDEYjffr2d2ge/gEB/P33Oa5dvWrX+aIosnbtGnr17m1xzNdff4NOr8dgsy6ERP7/5WfkyNerlceLpmDBgty4ft3u82/cuM7/CmRfmczFCxdBAYPRgGcu+xzhffr05eixE3z8yacULFyI+MQEYhPiiU2Ix9XdjaHDh3Po8FG+/OrrFJGv6OjobE0RzYiYmBicnJwcehflypWL2GeyTJJTeu1dP2VFxmg0Zph1lsPzo2zZctStW4/vvv3Wpjr2e/fuMWP6NPr06ZMtjpGDBw8SGxPLsWPHMBgMWX59a/D09KRy5Sp2OW4hOXor0L6D5a4ByVSoWIFBQwbRuUtnNNp/I9mtWrUiJGSn3b+DzZs2UqlSZRo1bkSbtm2oVLlSyrFy5cpRq1YtTLJk075Bls1BuYEDB6WbPfaq81oY04mJiXZ7cq2hX/8B7Nix3eqo6bMLYdCq55c2+iwhO0PM0S+gWfNm6RqbllCpVNSuXZt27drTrZs3LVq0pJiFOow8efI4bBSq1WqHRJ88cuVKt53RyRMnAIiPj8s0dS4qKopcHmk9jgULFqJZs2Z2Ow3EJ4Z+p6dq+WJjYlmzeg1bNm+x65o5pI9KpWLKz1OpVas27wwbwoULFzIcL4oi69au4cvPP2PipJ/o1Knzc5qp2YvdqnUrfPx8nts9n6ZatWqUKPGG3RFCo8lE1apVKV26tEPzyJMnL23atCUw0L4ezrt370Kr0dCiRUuLY8qUKcuqoNU4OTub25hk8r5KrmMtVLgQq4JWkydP1vbw/a/j7ePDrpCdRNuh6hsTE8P2bduzNe0++b1RtkxZtDr7N3/Ozs74+wewfUcIly5f4c8zZ7l46R8OHznG8OHvpKk7NRsZDgj8oNhdL63T6TA5kC0A5neC9hlDycPDw6H102gyUblyFYv7jxyePz9PnUZsbAzvTRiHwYoocFhYGO8MG0r9Bg3t7k+dETExMRw7egyAOnXqWIwcPw8GDR4MT9VW24JJFHF1dc1wLXuaQoUKpfy/JErEx8fTqFFjcnl62tUhR1EU1qxenaFj+tffZlK0aFFMknUGtSTLGCWRdu07MHjIUJvn9CrwShvTSYlJfP/d9/w08SdupKOUnVVUqFCBKlWqMmf2TKvGJ0dX1Wo1W7duITw8nG7ez3+zfPTIUY4cPgJA5SqVqVot/RTIrKBe3fqontSK2IOiKIiSZLPoydPUrl2bEyeOp/QYBHPdx8NIc6uQypkokyqKwt69e6hTp266xwcPGYooSTbXoiiKgoxCr169U6m0//XXX/x97m9OnzpNbEysTdfMIWNUKhU/TpxEu/btGTF8KAMH9GPTxo08evQIURQxJCVx5/Zt5syeRbcunVi0cAHTp8+gbdvXL/0oMyZMeA9RlmxWHzaJJiRFZuy48Vkyj8FDhrIrJIQ1q4NtOu/C+fNMnjSRESNGZuosK1WqFEHBqylWvDjxSYkYRRPikw1B8n/JbcTikxKpXqMmKwODHGoDk0P6VKpUmQoVKrJ2je21lxvWr6Ns2TJUq149G2YGRoORe+HmNMkKFStk2XUFQcDV1TXD72nBQoUcKr0QBMFiN4vMyJs3L7IsExYWZtf5MTExRD1+TP78aVNUBw0emvKs2UJy+82hw7LeAMvBfjw9PVm8eCkmo5EB/fuybt3aNLXyAA8iIpj7xxwGD+xP8+bN+f77HxxqgWiJUydPIYoiTs5ONGrUKMuvbwstW7bCxcXFZie1oigoAvTp289mzYS4uDgWLVrE8mXLkUSJXr16s2TxQpvbUK1ZvZq4uFjKlinLlX+upLvf9fT0ZGVgEMVLlMCYQfswWZZJMpgVz7t27cqPP07Mlr/9y4CgvOI5pr//+juRkZE0aNiAFi1bZNt9bty4Qe9ePXirUSPGT3g/wyjzubPnOLBvP1qtlhUrlzJ12nTefjv75maJ6KhoFi9eTN68efHz97O7rZM1GI1G6terg2QSbW5UD+aWXF558rLXwR7MnTq2p137Dvj4+gHmljsrlplr17v37IFXbsuR7+PHjvH5Z59y8NBhi579n36axKKFC9CpNVb9PpPTRMuULcuixUtStWUxGU38POVnDAYDTZs1zdba//8y0dHRrF4dzIrly7l162aqYw3feouePXvRrFnzdP+ecXFxrF2zmqDgIO6GhT1RwnelaNGi+Pn706VL1zRp+9agKAqxsbF4eDgm3JVVrF2zmk8++RidRmvV82sSTRhFkanTptO6dZssm8fxY8cYPHgQI0aOtMoBeeHCBd4dPYphw4czxEaP97lzZ1m8aBGbN29KpUbu6uqKr58/PXv2pHjx56Po/l9l//59jBzxDt99/6PVPVGPHTvKRx98wLRp02n+9tvZNjfRJHLl6hVKlyrtUGTaVvbt28uId4bjpNXZnM0mKwoJSYmsXr2WChWtUx9/liGDB1G4SBFGjBxl87nLly1l3949rF6zLt3jfr4+XL50Ca2VYmTJXUMKFCzE1m3bX9uN+KtMYmIiwcFBLF++jHvh4dSpWxdPTy8kSeL+/XucOnmSOnXq0KNnL1q1Sr8bwvHjxzl9+pS5A4VWS958+Wjduo1Nit+KrPDXX3+hKArVa2SPk80WTp06Sb8+fRAAvRUp7cnf9cpVqjJv/gKbSy0uX7pM4MpAACpVrkTHTh0ZNWoEd27f5pdffyOXFaUqGzesZ9rUn5k1ew6nT/7Jw4cPqVe/Hq1at0p3fEJCAqtXB7NwwXzCw8PRabXmUlJBQK1Rk5SURI0aNek/YAAtW6Z/jdeFV96Y3rplKyeOn6BgoYIMHjI4W+91+/ZtevfuScWKFRk0eAjFihVPd1xUVBTBQavYtGED3//4I40bN8nWeWVEfFw8eif9c+nr9vOUySxauADtk6bz1pL8Epnw3vv07tPXoTmsWhXInNmzWLRkGXq9nsgHkezbu5eYmFj69Otj0QBWFIWx746mUqXKfPjhRxneY8Yv05k1ayYalRqtxvJnFSURWVGoWLky8+YtSNfoSv7+euTyYMyYMSktCHLIHuJiY4mOiUGj0ZArVy6LThODwcCkST+yOjiYwkWK0K2bN2XKlMXZxZmE+AQuXbrE2jXBPHjwAF8/f8aNG29TDdj169dZtmQZpUqXwsfX56UQ1lm7ZjUff/wROq0WlaBKt6WP+TsNJpOJqdOz1pBOxmxQD6R69ep4+/hRr379NM/YtatXCQ5exbatWxk1eozNhvTTiKJITHQ0CYmJuLm54eHhkbNpf46sW7eWzz/7lC++/Iomz7QkfJaDBw7w+Wef8MUXX+Ht82JKI7IbRVFo9FZDYqOj0dlYW2gwGnijVCnWb9hk9/0PHNjPuLHvsm7DJps29LIs4+/nw5gx79LFglJzXFwcfXr34trVK5nuE5L3BQULFWL5isCc7JBXgBMnTnD8+DFioqNRazTmMqZWrdJ1SiYlJbFu3VqWLllCWFgotWqbU7NFk4nQ0FAuXDhP69Zt6Nu3X7ZloGQ3J0+epF/fPgiKYl5XLawroigioVDFTkM6mb179rL/SUDq7RZvU6duHUaNfIdbt24zesy71K6TfuZnVFQUQasCWbliOTNnzaZE8TeY+8dcAPr270vx4unbOk9jy9/+deSVN6YvXbzEqsBVIMB7779nV5TIFu7evcv3333L7t27qFa9Om3btiNf/vyo1Wqio6LZt28vu3eFULFiJSa89z51LHx5o6OjeRARgcFowN3NnQIFC2bJhvqvM3+hUqmoXOX592J89PAhXbp0IjoqymqDOnnBLFS4MMGr16aK3NpDUlISPXt0x9nFmYmTJlv1UlIUhYk//sCJE8cJClptVQ+8VasC+WX6NKKiolAhPOkraG5IL8syglqFoij4+Pjy4YcfpUrvfpqI+xHMmjkLNzc3+vbvSx47eyPnkHUYDAaGDhnEo0ePGPPuOKpWq2Zx7KlTJ5n2888UL16MX2b8ZvUzHBwUzIXzFyhQoABDhg3Jqqk7zJUr/7BwwQLWrVuLVqtFkWQkWXrS1kaFJEv4+PjSp2+/bFUfv3v3LsuXLWXVqkDc3NypXKUyrq5uJCUmcuv2LS5dvEibtu3o06cvVatmX/lKDs+HLVs2896E8VSoWJGu3bxp1qx5ikjD8WH+AAAgAElEQVSNKIrs3bOHtWtWc+7cOX6cOPG56hq8CJYsXsSkiRPRajSorXTsSJKEQTQxecrPDpWrKIpC+3ZtKFe+PB9/8pnVjvHffp3Bzh3b2b17r8X1DszRrAH9+3Hu3FnUggqNRo36qaa1sixjFEUUFN54oyRLly1/ofWvOWQ9cbGx9O/fl8ePH+PrF0Dbdu3SiH5ev36N4KAgtm7ZzPvvf2Ax0JKYmIhWo00lwPUycebMGb7/7hvOnj2LXqdDAAQEFFLvFf38/Hnv/Q8cEv9TFIWgwCAuXb6Ej48PFSpWwGg0Mn3aVAIDV5I7dx46d+lKsWLF0Ov1REdHc+jgAUJCdlKpUmXGT3iPWrVqsWDeAkJDQ/H08mTUqFE5QR4reOWN6aTEJKZMmUK5cuVo3aY17s+pdcn9+/dZtSqQnTt3EBUVhSSK5M6dh5o1auEX4E+FCunXWR06eJDFSxaxd88e1Go1er2e+Ph4cuXKhZ+fPz179U4lKGALZ/48w8YNG1FQ8PH1sTiH7OReeDi+vt5ER0WjyyBqC6k9zysDg7KsdVhsbCx9+/TG1dWVb7//HldXN4tjTSYTUyb/xIkTx1m2bAVFihSx+j6KonDw4AEWLVzI5cuXSUhIQK/X878C/yMgoDsdO3ayShX6+vXrlCheApU6Jxr2okk2pKOiopn2y4yU1m8ZER0dxeiRIyhatKhVBnVCQgJTp0xFkiTatmtL7WfaSt2/f59ly5ayZfMmoqKikGUZV1c3ateuTf8BA6hcOePa/6wgNjaWTZs2En73LnFxcbi7u1OkSFHatW/vsMPLFgwGA9u3beP6jevExsbg7OxC/vz5ad++fY4g2GvGw4eRBAUFsXLFch4+fPikB7VAVNRjvLxyExAQgK+fP3nzpv27nz//N8uWLeOffy4THx+Pi4sLJUq8QffuPahVq5ZN80hMTGTJ4iUUKFCAps2avrBSjM8/+5S1a9egVWduUCcb0sOGDWfU6DEO3/vu3bv07BFA9Ro1+ejjTzI1qH//7Ve2btnMkqXLrRYjPHr0CIsWLmDPnj1oNBo0ajWSJGM0GWnQoCH9+venUaPGOVkirxnJhrRWp+Onn6agz0Qs768zZxg/7l3Gj5+QrkG9edNmLl26RNOmTalZq2am9//zz9OE7NxJZGQksiyTJ29eGjVqTMOGDe3+TNbwzz+XWbhgAcePHyM2NvZJi9Z8+Pv507lL1yzrIGI0GLl2/Rpvvvlmqp8nJiayYcN61q9by/2ICIxGIx7uHlSpWpXevfuk2AuSKLF7926OHj2Kf4A/ZcuWzZJ5ve688sY0mA1qJ2f71Cuzitu3b7Nxw0YeRj6kdOnS9OjVI9XxmzdvMHzYUB48eECHjp3o2s2bwoULA+aF8MSJ4wQHreL4sWN07dqNr7/51qYa531797Fv7z7A3Cu5b7++OLtkb5TeEuHhd+ndqyf37t1DhYBWo0m1IEqybE5rUWRKly7NwkVLnmycso6YmBiGDhnMxYsXaNe+A94+PhQsWCjF0Hnw4AFr16xmw/p1eHjkYv6ChTYZ0jm8nvw0aSL79u3j199nWmVIJxMdHcU7w4bSoWMnRo0aneHYo0eOsmP7DjQaDeMmjEtJNb996xY//vg9u3fvxknvhCLLqFRmP3ayOJZRNFGqVGnee/99u3vB5pDDy4wsy1y+fJmoKHP3jFy5PClfvny6RtWRI4eZOvVnLpw/T8tWrahRsxYuLi4kJSby97lzbN26heLFizNy1Gira/bu3LnDgnkLABg3YZxN74Gs5vPPPmHNmjUICummicqyjEkUEWWJ4cPfyRJDOpmwsDB69uhO3rx58Q/oTuMmTVLtSRRF4cjhw6wKXMnVq1dYunQ5pZ/qR2stjx4+5NbtW8TFxeHq6kqRIkWtyg7L4dVk4ID+JCQmMHnyz5ka0skkG9STp/ycSuU6Li6O6VOnI0kSzd9uzluN3kr3fEmSWLduLfPm/sHNmzfRqNUo8hPTRzBrDeTPn58BAwfh5+f/QtpUZhePHz1m165dNGnShHzpCANaIioqKssCXP8FXgtj+kWSlJREyM4QTp86nfKzTp07Ua36v6mht27dpGeP7jRq1JiRo0Zn+AK5c/s27703niqVq/DT5ClWGdQXL14kKDAIgKJFixLQPSCNIR0WFsajRw+RJBlPz1wULVosWwXJFEVh//59zP3jD44fP4ZOq0OtViFKEqIo0qRJUwYOHESduukrZ2cVJ0+eZHVwMFqNjrC7oVy/cY3EhASio6OpV68effr0o0nTpi+N91uW5JwI9QsiKSmJtxo24KtvvqFu3Xo2n79v7x6mTJ7Mvv0HMuyjeP7v8xzYf4D/FfgfXbuZawtv3ryBv58vCfEJGaZ2KoqC0WTEJEn8NHkKHTp0tHmez5PIyEj27dvL48ePkUQJDw93qlStSsWKlTI/OYccMmDP7t2MGTOKXr374O3jk67ATnx8PJs2bmD2rJl8+eXXdPP2zvS6Z/86y7q169DqtHz0ccb6Gc+DkJCdzP1jDmfOnMHZySlFXVetVpOYlESDBg0YOGhItkTWHj16xIrly1mxYhkAVapWw9XVlcSEBM5fOE9CfDx+/gH06tU7pwd0Dply+fJlunXtzNr1G22ugV+8aCHHjh4lcFVQys+Sn1W1Rs2E9yakawRLksSY0aPYv28fPKldfjbTwtzFwYQiCFSqXJn58xfa3V4uu3nw4AE3rl8nLi4OZxdnChcqTDELNc2GJANz5szh8aPHCCqBWrVq8Vajt9Jk8Cqywrlz5yhUuFC6mT85ZM5rZUxH3I/g4MGDdOrU6bnVT8THx/P7r7+TmJhIvnz56NCxA0WLFU05HhoaSvcAPxo3aWp1C5nIyEhGjhhO1SpVmfLz1EzHK4rCurXrMJlMdOvWLeWzJyUlsXHDBubN+4Pr16+jffISMZlMeHl50advPwL8A8idzXW698LDCbtrVkJ2c3OjaNFiz/WBDdkZwuFDh8nlmYsaNavj7OxC4cKFKFjQvnT67GDTxk38c/kfGjdpTK3atqUl5pA1BAcFMWv2TFasXGVXT3hJkvDx7sqHH35E+/YdMh1vMpnQarUphnRSQqLVgkMmUcRgMr60BvXTKZzOTs6k9M0VBJKSkihZshT9+w+gQ8eO2b5pMRgMbNywgaCgQCIiIjAYjLi5uVKhQkX69utH9eo1svX+OWQ9e/bsYczokXz48ScWFYKfxqwA/r5VBnVyllf+/+Vn2PDU7Zhu3LjB4kUL2bx5E7GxsSiKgqurK82aNWfAgIF2K2hbw9WrV9m7d49Zp0MQ8PLKTYuWLSlatGjmJzuIKIrs2hXC1StXUiLIxUuUoHWr1hnWR+eQw9N8+uknREdF8cVXX9t8blRUFF06dWBV0OqUlOSNGzby5+k/KV68OH37p00Bf9qQ1qrVmQZNkksPK1WuzPwFi16aCLWiKBw6eJCFCxdw8OABdDodKpXKrC9gNPLmmxXo338Abdq0SfM8/n3ub0J2hhATEwOQqnPMtavXOHPmDGFhYUQ9jsLLy4uBgwbi4pp+yrksy+zevYutW7YQ8SACWZLJkzcPzZo1p3279v/pd8FrY0zHxcYxffp0JFGibr26tG6T+QJrL1evXqVkyZIpD+a5s+eIioqiQcMGaaK9H7z/HtHR0Xz7/Q823ePBgwf0CPBj9uw/0o3eJsQnoNVqU1p2SJKESlClCAXs2b2bsWPHmNNDJTmVN87c01kEQYXBaGD06DEMf2eEzb+HV4UL5y8QHBSMoBIYP368xRfFi2T5suVcvXKVipUq4u2TefQkh6yne4AfDd9qRED3HpkPtsDCBfO5cP488xcstGq8oii83bwpkQ8eoNfaJkCYbFBv3badEiWyTwzMFiRJYsL4sezYsSNdcSFIjq6bENQqvLy8WLlyFQXs7IubEfHx8fw64xdWrFiBIstp0+Yx97YvUaIEI0eNpl279lk+hxyynsTERBq91YCRo8bQoaP1jqQjhw/zwfsT2H/gUIbO3NOnTnP16lUKFixIo8bmfrWRkZGMfXcMJ04cR6/TI8CT75I5qiM/KcEoX748P0+dnq3ifDnk8CpiMBioU7sm036ZYbfuxzdffUnu3Ln54suvALOBfevmLfR6PeXfLJ9m/JTkDjOqzA3pZBRFwSiJtGrdmsmTf7ZrnllJTEwMfXr34urVK6gQ0DyTuSYrMiaTCVQqXF1dWbZ8JSVLlkx1DZPJxOFDhzl86DC+/r4puganTp5i86bNKeNKlixJ566d00SujQYDixYtYsGCeWYnoiyjEsxzUBQFQa1Co9HQo0dPBg8Z+p8UDHw55e/swM3djVq1anHs6DGOHT2Gk5MTTZpmbUuqK1eusG/PPu7evUvnLp2pWs2sImtJOfvx48ds3ryJ2X/Ms/le+fLlo137DixZsjiNMR0VFcXSJUvJkzsP/t39UalUqYz4Pbt3M2LEcLRqjbl1kzr1n1kQBLQasxEu6PT8+usMRFHM0nqrrOT48eMsXbI4RVxGr3eicJHCdA/oQYuWLTNt+1WqVCnUGjWSKHHhwgW7I7/x8fHcvHmDmJgY9Ho9+fLlz7KoQKFChbh65SoPIx9myfVysJ3w8HBKlizl0DXeKFmSkJCd6R6TJCmNs+3QwYPcv38fZ53tHl2tRoMiwKJFi/jiiy/tmm9WIooio0eN5ODBAzhpdRY3L4IgoNfpUBSFx48e4ePTjZWBQVmqWRAfH0/vXj1TNiBatRohnfeETqMQevsO740fz927dxk0KHvbK+bgOBs3biB37jw2GdIA9Rs0oEqVqqxcuYKRGfRQrlGzBjVq/putEBkZiZ+vNw8jI3F1ck77vX7yT41Gw7UrV/H38yFwVfBLZ1CfOXOG1cFB3L59m6SkJHLlykXNmjXx9fXL9uy0ZB49ekR0dDRqlQqv3Lmfm2BsDi+ehw8fkpiYyJtv2i+MW658eU6fOpXyb09PTzyrpV/XazAYWLJ4cUq3FWsRBAGNoGLzpk189NHHL1ToMioqih7d/QkLDbXYd14lqNDr9CiKQnxcHH6+3qxYGUiZMv8Kh2m1Wpo0bUKDBg1S/S7+97//UbFSRXLnzk25cuUoVDhttqbRYGDw4EH8+edpBAX0mvTT5EVRYvHiRezaFcKKlav+c/XWr40xDdCiRQsePXxkNnqfpGk9q2hnDw8iHrBxw0ZCQ0NTfnbjxo0UY9oSq1YFUq5cecqVK2fXfb19fOjVozv37t2jQIECwBOl36XLiIuNI+pxFGFhYakMugMH9qcY0takjGrUagRg1qyZaLVahg1/x665Zgdr167h1xm/EB4e/mSeAoIgEEs098PDOX3qFE5OTvTu3YcRI0dZrAHXO+kpW7Ysd+/etatO/OrVqyxauIB1a9ciyRJajQZJNnsDy5d/k0GDB9OmTdsM62QzI1khOSExwe5r5OAYiYmJDov2ubi4kJiQmO6xY0ePcebMGerXr0/1Gua+mfPmzUUtqOxKKwdQKbA6OIj33ns/y9RA7WXC+HEcPHjA6iiAIAho1RpiYmLw8e7Gjp0hWaKcnGxIX7t6FV0mLfqSDXtRkvh5ymSAHIP6JWfpksV0s7PHtLePL9On/cywYcMzdcKC2fjz9/PhYWRkpu0eVU++S0mJifj7+RAcvMZiLePzZOOGDfz22wxu3bqFVqMxV1wI5g3w8WPHmDp1Ki1atmDcuAlpIlpZgcFgYNPGDcyd+wfXrl1DEMyZIQB16tRh4KAhNGnS5KXRLckhe0hISDCrtlvx3FnCxcWVhIR4q8Zu2rgBRZbR2rHnU6vV6HU6Vq5cyYgRI20+PyuQJIkAf1/C74Zb1WpWEAT0Wh0GgxF/P1+2bNmWJuMrOZM1mSJFi1CkqGUnttFgYMiQwZz583SGa6k5QKdBURTuhoXRPcDvP2dQv1ZvL7VGja+/L6VKlyJfvnyUeUpZMjEx/Q1ueiQkJBAZGZnyb61Om2JIFy5SmJ69e9Kla5dMr3P40CFatGyZ6ThLFCtWnFKlS3Ps6FEAbt+6zcIFC4mLjUOr1RLQIyBNZPTzzz5Do1JbXXsJ5heHTqNl+vRpPHr4ckRGf/vtVz795GMiIyJw0Tuh1+rQabVoNRq0Gi1Oej16jRaTwcDcP/5gxDvDzakuFujYqSNj3h2TYsRYgyiKvDtmNB3at2XD+nVo1GpcnZzRabQ46/S4ubhy49pVPvn4Ixo2qMe5c2ft/rzJhpAlQyyH7MfNzY34eOsWakvEx8Wnq/6rKAonT54k8kEkd+7cAcwRr8OHD5k3uHaiedJ+btu2rXZfIyu4efMG27ZttSmdDp5sADRaEhMTWLlyRZbM5aMPP+Da1SvmaLSVTgqNWo1eq+PnKZM5eOBAlswjh6wnIiKCS5cuWVUnnR6NmzQhJiaWy5cvWRwjyzKSKCFLMnP/mMODBw+s2swmo9fqSExIYNKkiXbNMSuZ8ct0Pvzwfe6GhuHq5Ixeq0Ov06HX6nDS6dGq1bjo9ezbsxcf765cuHAhS++/b99e6tWtzddffUXYnVDcXV3xcHXDw9UNdxdX/jpzhlEj36FZ08Zcv349S++dw8uFm6sroihiNBrtvkZ8fFyq9fXc2XOc+fMMUVFRacYuW7YMRZbtdlQrssKK5cvsnquj7NixnbthYTatYwB6rRZZkliwcIHDc5gyZTKnT59CY+X7L9lBfjcsjPHj3nX4/q8Sr5UxDebNZffu3enRs0eKB8xoMDLt52nM+2Memzdt5vix49y5fSflnKTEJHaF7CI4KJi5c+YyZfIUtm75d3Pq6elJnbp16N6zOwMHDaRUKetSQaOiomxWLHyW3F65iYo2vyhEUcRkMuHs7Ezvvr3T9HM8cuQwERH3bTKkk9Go1eh1elYGrnRovlnBz1Mm8/tvv6LXaNFZSG2Bf9PVtWo1hw8fYtjQIRZf1LaKHImiyMgR77B7964UA1rzjIfTHInQo1NrSEpIpFfPHvz99zmb7pNM2bJlGTdhHO9/+L5d5+fgOMWKFePC+fMOXePChfMUTycadfXqVaIem5/j5L7S4eHmTAlHIzICcDcszKFrOMr8eXNx0uvt/yyywvx581KUiu0lIiKCHTu2o1bZHu3XqNVoVGpmz57l0BxyyD6io6PQarV2ZzCo1Wq8vDyJioq2OGbrlq189+13LF++nBUrlqN6khFlC1qNhpCQnTx48MCueWYFkyf/xKxZM9FrtOh1ltdRtVqNk06HaDLRPcCfs2ftdwo/zf79+xg+bCiyKKJ9EulLrrMUBHPqrZNOj5NWx+MnGQA5BvXrS+48efDw8OCvv87YfY2//vqLN54qxdq4cSMb1m8gLDTt+nfvXrhDa6tapeLhCwwuzZ4101yPbIczQFBg5YrlJCTYn+mYmJjIypUrUCOgsmEOgiCgUak5dOhQSuDgv8BrZ0wDqNQqcnn+WwB/+fJlTCYTYWFhnDp5im1bt3Ho0KGU44JK4NDBQ1w4f4G7d++iyAqhd0JTGWZt2rZJFem2ah4qFbLsmL6brPzrWStZqiQ+Pj70H9A/3frC+fPmobJjE5mMIsssXrTQ4Q2tI5w4cYK5c/9Ar9FanZKtUqnQqjWcOHGcRQszFn4STSK3bt7K9JrvjhltjhpaEWlLqQGVZHr26M6lS5ajHpbQ6rS4ublla7uyHDImIKAH69etRRRFu85PTmf0DwhIc+zkiZMAFC5cmIJPUq+S094cRZIkhyPqjhAfH2/uhYt97x0wGx/x8XEW682tZcXyZTjp9WlEz2yZx4kTx7l166ZD88ghexAEFY5qpsopQnQZ8yAiAlmS0jhRrUGtUuOk17NixXJ7pugwp0+fYv68uTato3qtDlkSGTN6pMO/4+PHjzNs6JAn5WaWDXl4Kj01KQl/Px/u3r3r0L1zeDnR6XR4e/uwOjgo88HpEBFxn4MH9tO9e/eUn7m7mWvu04tMGwwG7NwKA+bvpSRJL2Q/fPnyZS5fvozORlHSZJL3FVs2b85kpGU2btwACnbtUVQqFU56PUuWLLb7/q8ar6Ux/SwVKlTAz9+POnXr8EbJN3Bzd0tV36rX6ylZqiQVK1Wk4VsN6ebdjXETxqHT2fdFTsbTMxcPH0ZmPjADFBmcnf6thSz/Znny5ktfEOHYsaNo7NxEgnkj+fjxY27cuGH3NRxlzpxZaFRqm41KlSAgKDB//lyLL7+7d+8yaeIkFi1clNImID1OnTrJ7t27bE5Z1et0yJLMpIk/2jT3HF4OWrRsiVqtZv/+fXadH7JzBx4eHjRunFr4UJEV1CpzqtbT4neuT9LeHEWtVqebWv68CA0NRRRFu4yOZARBQK1SceWffxyay7JlS8EBQ0ClUqHX6wkMDHRoHjlkD7lz50YUxVRlWLZgMBh4/PgxuXNbFtxKNvyioqKQJMlu57QsyRw5fCjzgdnA3D/moFHbvo7qtToePHjAgQP7Hbr/jz98j1pQ2ZQlZ673NDB37hyH7p3Dy0vPnr04fOgQ4XY4TNasXk3devVSda7Ily8fQLoZIC4uro4sBSiKglars/kZSkhIIDQ0lOvXrxMZGWmXY+r27Vs46Z1siginQVG4c+e23acHrjR3wbD//rBmdbD9579ivFYCZJZQa9SUf7N8utL5yfTq3SvL79ukaTOCg4PsbrVz+OAhSpcqw80bN4mIiCB//vwWx8qyTFJSEu4urvZOF0EQ0Gl1xMRYToHLTsLCwjiwfz8uTvaJQGk1GuLi4ti5Ywdt2rZNczx/vvwkB8+uXrmaSrH1aebPn4fGzvRbnUbD4cOHCAsLo3Dhwlafd/nSZc7+dZY8efPQ/O3mNt83B8dRq9X07t2X32b8QoUKFVNE/6whNDSUObNnMWzY8DSbb0El4BfgR3RUNK5u/z6fhQoWMnu+ZTlVqwtbUYDCNihhm0wmtm3bypLFi58o+ybi5OREwYIF6dWrNx06drKpv2ZsbKxD4nvJSKKUoZMrM4wGA1FRUQ69AwFkSeJGTrrpS0nu3LmpVas2mzdtpG+//jafv3PHDgoVKkzZsmUtjknWr5AdqLcE83oaHf3819Lw8Lvs3r3brnVUEMz5JbNnzUrjFLSWS5cuceHCeVydbb+/GoHgoGAmTHjxgoo5ZD3Fihenbdt2fPjh+8yY8RseVrZQ2rdvL4ErVzB33vxUP8+XPx9Xrl5J1+B9s8KbHDl8GHtXJlGSKFXa+u4ex48dY/78uezduzfl3aEoCvnz56dfv/74+vlb3TIqMSHRoXcPmDPWHEnzjoiIcChNXqVSERMT8yQT6PWP277+n/AF4u3tw53bd+wSpgoLDePP03+iUqlwc3NLSWexhCCY67ocbRquoKTUNT1vdu/ehbOTk92GhSAIICts2ZJ+aotGq+GNJ17NK1eupDvmwYMH7AoJsTvKlpzesnTJIpvOu379OhcvXuT2bfs9iTk4zqDBg2nQoCEjRwzn3r17Vp0TGhrKyHeG0apVa3r36WtxXC7PXKlSpnLnyUOjxo0xSfZHp5Mj261bt8l0rCzL/PbrDBrUr8snH3/ExQvnMSQkIChgSEjk2pWrfP3VV9SrW5ufJk20WijG2dkpS1LhVGqVQxvouCep7o5uQgQEol+QQzGHzOnVuzfr1q6x6zu3OjiInj17ZfgdKVeuHO3atyMhMR5HF1SdHS3vHOXkiZM4OznbvY5q1RpOnz6V+UALLF68KFV9tC2YBRWfpJjm8Fryw48TKVyoMKNGjSDGCmfTvn17+fLzz5j002Tq1q2X6lidOnWY8N4EOnZK2yavX7/+yLJsV2RYURRQCQwcMCjTsaGhobRp3ZJ+/fpw+OAhXJycUkT2PFzdiI2O4dcZv9Cgfl1mzZxp1f3d3d2RFQeiwoBarcHNgdZzJpPJgcKtlLhVhsLArxM5xnQ24u7uTufOnVkwf75NC3/U4yi2bNqMIAi4uLjQq3evTFv2CIKAs7MLigMPoKIomEwmvHJ72XTeoUOHmDVrJpMmTWTatKksXbrErjS8x48fO1xjLggCkZGWRV9KlzGLtt26lX7d9J9/nsbJycnumkswp/Xu329bmty9cLPhllH2QQ7ZjyAIfP/Dj9SvV593hg1l86aNFo3KpKQk1q9byzvDhtKyZSu++PIrm+83YMBAuxd8AEUAP39/nDOJAsmyzNixY5g1ayYmgwGdWoOTTm9uVaJWo9Fo0Ot0aNVqFElmyZLFDBk8yCqDukCBAoiiiOxIShjm333BQmn7XFpLcns5R+s9FRQ8cvrfvrS0atUaRVFYZKNa7fp1a7lz5zbe3t4ZjitYqCC1atci///+h6C2f4skK3KKPoI13Lx5g2+++Rof7260btWCzp07MvbdMZw4ccKm+8bExGRJragtHVCe5tzZvxxyQkiiyKWLF+2/QA4vNTqdjl9/+53ChQoT4O/HrJm/ExFxP9WY5LZt708Yz2effMzEST/Rtm27NNdy93C3uPY1aNCQ/PnzYxRtN+ZESUSr1dK2Xdp7Pk1YWJi5zj/srrnrjE6XUtIFydmeWrN2gEbDjBnT+XXGL5nev1z58iQlJdm9piqKgqzIVKhgf09vNzc3FAceZEVRzC3GbMhye5X5T6R5v0hGjhpNjx4BfPvN13z2+RdWpTvcj7jP48ePcHVzo//A/rh7WLexe/vttwnZucPuuZpEE4UKFaJ48RKZjjWLDq1m3ty5RD6IQKvVIooSKpWASqXm22++pmWrVgwcMIhq1a1vR+UwQsbreIECBShSpAgeuTzSTT+Jjop2ODIvCAIxMbFWj496HJWieljijcx/968T4eF3uX8/gsTEBNzc3ChSpCheXrY5c7IaQRD47vsfWLpkMYuXLOa3X2fQoWMnSpcpg4uLCwnx8Vy6dInNmzaSL18+Ro0aTfce6ZdyrF+3HicnJ2rUrJFS3/U0DRo0pFChwty/dw+dxvr2O2B+XteG09UAACAASURBVA0mE717W46Gw7+G9J5du9GpNRm+g5L7RcqKwp+nTzFk8CDm/DE3Q/2IPHny0qhRY44fP4aTyj6dCVESkWSZ9u3a23U+mLUv3N3dkUyiw+lpxYq9+P7AOaSPRqNh1uw59O3TG61Wm2E2SDIbN27gl+nT+H3mbKujNV26dGXB/Hk2a2eAeSOpAH5+/pmOPXfuLBMn/sjJEyfMG09ZJjmuc+PaNbZv30aRIkV4d+w42lnxfJifVceyM/69ju3ExcU5ZswjpCsolYN9GA0GNm/ZzOnTp4iJiUGr1ZIvXz46duzskLHlCDqdjtlz/mDfvr0sXrQIn25dKVeuPLk8PTGZjISFhhIXF4+vny9ff/NtuoK7z3Iv/B4FCqYuzXrv/Q+YMH4cakG0WkhLkiRMksTY8e9l+Azcv38fP18f4mJirVq7NWoNAgIzZ/6ORqNl2PDhFscWKlSIRo0ac+zIEfR2PIcmUcTDw4O3325h87nJVK9Rkx1b7W+5aZJEypd/0+7zXzVyjOlsJn/+/CxdupxePbvz9ZdfMHb8eHLlstzI/ML583zy8Ye0aNGSIUOGkCePZaGUZ+k/YABbtmxGo9bYJ1ygUjFgYOZpLZGRkfj7+Zijz7KMk05vlsPX/RvNddE7sW/PXnaFhDDm3bEMHTos0+t6enpapbKaEYqskDdv+gJtAEWLFWXAoAEWj6sciEQ8jS2iFbdu3QLB3L6rXNlyWXL/lxlJkggJ2cnixYs4eeIEHh4eODs7ExcXR1JSEq1bt6F3nz7UqFHzhc1RpVLRp28/evfpy/79+wgOCuLP06dT+lwWLVaMGb/+RoMGDS1eIyE+gbNnz6LICgULFkzXmAaYv2Ahfr7exMfFW21Qm0QTRlFk+i8zKFasWIZjFyyYz+5duzI1pJ9GJQhonqR7Tv15Ch98+FGG44cMHcqRI4dRNPa18pABbx8fh9LSwGy8ONIbVJZlDEYjvn5+Ds0jh+ylUqXKLFy0mH59+3Dr1i269+iZbsvK0Dt3CApaxcYN65k5czYNG1p+Xp8mLDSMmOgY6tatz+lTJ3CyMV3bJIp4enrSpGnTDMedPn2Kvn16g6Lg6uRsfj6fWTp0GoV7d8OZMH4cDyMf0rtPnwyvWahwYYwmI06ZqGhbQpIlPD097e4s4ezswiPF/pZCCgpu7i9OUPF14eHDSBYsWMCqwJW4ubnT8K23KFq0GCaTidu3buPn603FipXo179/ulHf7EYQBJo2bUbTps24efMGf/75JzExMei0WvLkzUvjxk2samkaHx/PqpWruHPnDv0H9qdo0aIpx9q1a09cXBxffvE5kLkytSRJGEQTw4e/Q99+/TIcO2vWTGJjYmxygqvVanQoTJ8+FT9//wxb5w4eMpRDhw6itbHmWFEUEAQGDBzkkFO5X79+bNq4AY3GdntCURQElYqBgzK3J14Xcozp50CBAgVYumwFY98dQ+eOHWjZqhVdu/lQvHgx9HonYmNjOXH8GKuDg/nnn8v07defCRPes/k+lSpVpnTpMty8cd2Oxd+ErCh065ZxCtzDh08M6QcP0Ko1COr0v0LqJ0qiapWKX6ZPQwCGZGJQN2vWnO+/+/bfTYWNJNe5tGmTVnzMWnJ75UaURNQa+wWVZEW2KbpatVpVSrxRggcRD1BrXu/WWH//fY4R7wzHZBLp0rUrn3/xJXny/Ov8uHHjOsFBQfTv15cyZcsye/acVMefN4Ig0KRJU5o0yXhTnB4XL140K3lr1JQrb9lJUrRoUVYFrcbP15u42Di0GajwyoqM0WRCkmWmTf+FVq1aZzgHWZaZP28uKgSbnymVIKBCYMWK5Yx5d2yGG5u6devxxhtvcOfOHXRq2yLsRpMJkyQyoP9Am+aXHr1692HBgvmonVR2vUOMookqVapQpoxlgar/MteuXePixQvExsbi7OxMkSJFqVWrVuYnZgOVK1chMDCImbNmMqBfHypVqkyNmjVxc3MjPj6e83//zYkTx2nWrDlLl62gSpUqVl/74MGDXL50mXr16nHy5HFEG1pkSbKMjMLgwUMz/A4mG9KCAvoM1uvk1otqSeSHH75DEMzfc0vUr1+fXLlymSNm9ggDCgI9e9ovyGp+D9iv/aHRaHMyQxzk3r179OrZnbx58/HpZ19Qv0GDNO/kqKgoNqxfx+effcrf587x3vsfvKDZQokSb6RS6bYFFxcXEpPMJQmHDx7Gv3vqbBA/P38UWeaLLz7HScDc7kmtTvX7EEURWTCr/Y8YMZKRo0ZneM+EhARWBwehFmzvQa9Ra9DrIDBwJcOHv2NxXJ06dWjatBkHDx5AC1atZ4qiYJJEChQshL9/2hadtlCxYiXKlCnDjev22BMieie9VVourwuC4miB2UtEYmIiGzasJ3DlSm7fvk1CQjwuLq4UK1YM/4AAOnXqnGltYXZz4fx5Fi9exKZnajGLFStG0yZvU6t2LVq2amm3V/j8+b/p0T0ARZKtTg8xiSJG0cQPP06kc+cuFsdJkkTbNq25fy/cbEhb+RIRJQmjaOLbb7+jaybG+sAB/c3ponb01zOaTDi7unDw0BGLHkiT0YTBaLDYSighIYF6dWujfhKZsweTJDJq9BgGDhps1/mvK+fP/03fPr3x9fOnb7/+GXqJ4+Pj+PabbwgLvcPSpcvJbUOGxsvCooWLuHXzFuXLl8cvIPNIZ1hYGJN/msS2bVvNfctlJUVdV3lSvWQwGqlYsRLjJ0zIMCqezO5duxgzehR6rdauKJWiKJhkiU8//TzTaG1ERAQB/r48jIy0+v1gNBkRZZnfZ86yy2GRHoMGDuD48WM2G/WSJGGURKZOm07Llq2yZC6vA5IksXv3LhYvWsSpUycpXboMbu5uJCUmcfPmTfLly0uv3n3o1s07pW79eRMZGUnQqkCuXLlCXFwsrq5ulChRAl8/fwrZUYe/Z/ceDuw/gJeXF3onLdOnT0On0WZqUEuyjEkS6dq1G19/863FcbIs81bD+sTGxNi0URUlkUSDge07dmZYjjV//jymT5uKVqW2+RlINBo4cPBwhhleGXHo0CGGDB6I85OMNVuQJIkEQxIHDx2x+/7/dZIN6QoVKvKpFaWFN25cZ+Q7w/H19WPCe+8/p1lmLX+d+Yv169YDMHzE8HSzwMLD77Js6VKWL1+G0WhM6UAhiiKCIODt40vfvn2tMupXrFjOjz98j87OPaJ5r+rKocNHMtzrS5LE2HfHsHfPHjQqVYZjZVlGlGWKFC3K8hUrrVYOz4ijR48woH+/J/3irXPMiZKIwWTiq6+/wdf3v5Ph9VoY07Is88v0aSxduoTcufPQzdubChUqmusbExK4cPECa4KDefToIT179WLMmLEvXKpdFEWio6NISjLg4eFByM4Qzp09h0ajYdjwYeTOYzn9IzP+/vscPXt0R5Yk9BmkeimKgkkUMUki3//wI126dM3wuiEhOxk7ZoxdG3OjyYhXnjzs2bs/w3OPHTtK/359cbKxv5+iKBglkREjRjJ0mOValAsXLhC8KhgPDw9Gvzs63e/BV19+wdo1q9Ha8aJMdhwcPnLMqpdZaGioVfVArzoXLlygT++edO/R0+qWNqIo8vlnnxJ65zZLl63IMCXqZcNgMDD156kYDUa8fbypWKmi1ec+eviQFStXsGnjhpRet+7u7tSuXYd+/QdQvrzlFn/PMnBAf44fO2qzZ/lpDEYjb5QqyfoNm6yae0CAP/fuhaPiSZ1YOs+7JEmIsoSsKMz5Y65VjgFrefz4Mf5+vjY5/ZIN6b59+73QCM3LRlJSEkMGD+Lq1St07tKVLl27psoUMSQlsX37NlYHBxETE8OyZSsoVvzVjyqGhoYyf665Dc/AQQPZuMlcc61Wq9GqNWmUsmVZxiiaUIBu3brx9TffZXj9kJCdjH13DHqN7WupSRLx8fPn008/szgmLjaWFi3eJiEh3mqnkpzsCPD24auvvrZpTs/StEljHj+MRGejU9xgMtKkWTNmzPjNofv/l+napTNFihThsy++tHqfe+PGdUYMH87Hn3yS6T7wZUSSJGZMn0FcXBzevt68+ablWl2jwcDpP08T9TgKWZHJlcuT6tWr29RJYvSokezZtQsnO8W1FEUhJj6OkF17Mi3TkmWZr778gsDAlTjpnVBkOSWyrigKkiyjPImq161bj99++93hcqmn2bNnDyPeGWaVQS2KIgbRxMcff5Jh9szryCtvTMuyzMcffcjRY0f54IOPqF2njsWxJ04cZ+KPP1C3Tl1++HHiCzeok/nnn39YuXwlAC1btaR+g/oOX/PcubOMfXcM4eHhqAWV+eFTqRAAWVEwiSYElQq9Xs/nn39Jx06dMr1mQIAf58+es+sFoigKiUYDc+bMpeFbb2U4dtKkiSxeuBCdRmOVQS0rMqIkUa16Deb8MTdD9cCli5dy/fr1/7N31uFRXHsYfleTbIjhUpxAWwoUaZHgFEJwCxq8UCpYjbZUgba4u5OgIbi7BwkugeIWJ76brM7cP5akhNjuJgHSy/s893luM3NmDsnuzPnJ+T5KlSrF4CEZt5U+ePAAr9atUNnZWx/QG414tWnDpMlTsj3/1q1b+K/3x93dnS7duvynVQ/79OlFhQoVGTlqtFXjjEYjI4Z/yUd1Psp3QY7BYODO7Tu4u7ujUObch9kWWrX8hIiwMNvaPZ9jNBlR2NlxLsgyu5z4+HhWrljO6tWr0WqTEQXBLOonSVH4lKM36PH0bM1nwz63KjlgKSkBdXhYWJYZ/ZSEokkU3gbSL6HVavls6BASEhKYOXs2jo6Z72MVRZFpUycTeOoUq1ev/U8E1LNnzUYhV9CqdSsqVqzIubNnWbJ0MSeOH8fB3sGstCuKSGVSkrVaataqxZBPh9K8RYtsr92rVw+uX7lq07s0pSX1zNmgLDvtnj59Ss8e3iQmJGSbVEqpqLdp0zZX1kZbt25h7E8/WlTNT0FvMGAUTPhvDKBq1Q9ydP//V86fP8+ngweybccuq7tE/DesZ9/ePRYlTd9Egm8E4+zszDul87440denD1cuXbJJHCyFJJ2WDf4b+eCDahadHx39DH9/f3xXrSQ6+l9NAgcHFd28vfHx6Uv58ra1ymfHkcOHGTly+HP7WQHFC0lAURQxmowgkaLT6xg79uf/u0Aa8nkw/WIgPW/+QooVK5btmIiICL78Yhj16tbjr78nvvaAWhREFixYwLOoZ7zzzjsMHDQQSQ5FuF7k7NkzLFu6lOPHj6WR2a9atSqfDhlKq1aeqe0uWfHo0UM8W7W0eT8zmCtcdevXZ8nSZdmeO2vmDBYvXoRcKkORicCD+UtsQkCkbt26zJu/MEv1xfNB59m9azcA3by78X7VzJUs58+bx7x5c7CTKywKqFP2qhQqXBj/jZuybVEzGU3Mnz+f2JhYypQtw4CBWYtd5Gfu3LlNxw7t2bpth03t2qcDAxk/7ndOnAz8Tycc8oJGDRuQGJ+AwkIl04wwmUwYBBPXrgdbNc5oNHLo0EH27tnDs2fPMBoNuLq5Uad2Hbp288bVNXMhxtwgNjaWv/6cwK5dO1EqFIiCiFQqQYIk9dmBVIKzszOff/ElPj5983Q++Y0Rw7/iyZMn2QbSLzJ1yiQCT51i/4FDFokHvckkaZJQOaavVoWGhnLgwH5iY2MRBQEXV1eaNGmaoQBaRuj1ej6o+h4FVI42e0FrDXoWLlqSraBaeFgYffr0IjIiEhBRyhWp729RFDGZTKl7RXv27MVvv/+RY5/2FJYuWcyMGdMtCqhTdBMWLlpM48ZNcuX+/4+MHDkclYOKb2xo19Zo1HRo15blK1ZSu/br0UHILwwaOIDz587lKJhWJyexddsOqlSxXnQ2OTkZtVqNg4NDplsWc5sXHXzCwkJRKs1dr3q9HhcXF/oPGEjPHj3z5Za83CBfC5Dt3buHY8eOsmTZCosCaYBixYoxb/5CPh08kN27d9GuXXqz91eJIAhUq1aN04GnaeXZKlcDaTALA9WtWw9BEEhMTMRkNOLs4mKxTUAK9+7ew97ePkfJB5lMxp07ty06d+So0ZQsVYo5s2cREx2NVCpFKnm+EMasOCqRSpHL5fTt48PIUaOz/DcJJoFTp04BUKFiBd57P2vJ/i++/BJBMLFgwXwUophOsCLNtQUBg8lEocKF2OAfYNFer7NnzxIbEwsS8GydtYhUfsfP15fmzVvY/JCtV78+Tk5O7Ny5g65du+Xy7P7bqFQqEuLic3QNURRt0pqQy+V4erZ+bSIkbm5uTJk6jZ/G/sxG/w34+/sTExONXq9HpVJRtXJlBg4aTLNmzV97UvVN49Gjhxw8eAD/gM0WB9IA33z7PRcu9GD79m0W2UK9yWQUSIPZtqZ/f9uTnwkJCQC2OW48RyGXExcXm+15xUuU4MDBwxw7epQVK5Zz5oxZT0QqlWI0GrG3t8fbuzs+Pj42C0BlxqdDhpo7FqZNxU5ph1RCGh2SfytakreBdC6g0Wg4sH8/K339bBrv6FiAVp6t2bx5c74PpkNDQ9m/bz/du3fP9HucE4oXL5YTK3UEQcBkMtm8dc3BweGV6z85OjrSt28/+vbtx927d1OTic4uLlSqVMnqmOK/Rr7+1/v5+tLNuzvFixfP/uQXKFasGN7e3fHz9X3twbRMLqNR40bUq1cvT1tBpVJpjgQJEhMTs/VflkqlFClSlKLFiuPs7IKDSsWZwJOo1WbP5QoVK1K9ek0mT5yMSqVCpVJRq3YtPqz5IQBGg5GQ0BCKFimKg8oBb+/udOvmTWBgIH5+q7hz+zYajQZ7e3tKlixJr1598Gzd2iI/TKlMSpeuXdi7Zy/dvLtZlH3/avgIVI6OzJk9C4PJZN6rIpebPTRF8wNRAHR6HXXqfMSMmbMytT96EW2yluPHjwNQs2ZNSpQoke2Y/MzBgwcY+8uvNo+XSCS09mrDgf3780UwHR0dTXx8PO+8847NXq25Rdly5QgNCcnRNUyCQPl8vK/fzc2NoZ8Ny9ZN4C3/smb1ajw8Glr9bpVIJHTt2o3Vfr75PphOQafVERkZSekypbM/2QJyo/ArYpm6L8/Pa9a8Oc2aNyc6+hkR4RFodVqcnZwpXaZMnnb7DBn6Gc1bfMKqlSvYsmUzJtGAXCZHRMRgMODgoKJf//706tX7reBYDomOjsZoNOYoKVK+fHkuXrBsO8+bilarxW+VHzqdjq1bttKrd69cL1K1bdeBbdu2mS2gbPhC640GqlWrbtF68U2kUqVKr3sKbxz5Npi+desWV69eYdyErIU+MqNDx04sX7aU4ODg12Zc/yKva0+lpTioHFL96zKjSdMW6YQPVCpVajCtVJjVPbVaLVqtlpiYmDSWQRGREaxasco8zlFFkSJFcHd3x8PDw2J/UDC3rD1+9JgbN25gZ2eHVxuzVVbp0qUZ/Olgq6pQgwYNpk8fH3bu2M6SpUt4cP8+KTsjHBxUdOrcmQEDBlq1V8XewZ5u3t04duQYzZs3t3hcfiUuLp6iRYvm6BpFihTlfFBQLs0ob7ly+QonT5ykaNGiDPvi9QZwffr4EHjqlM0v/RS7ub79+ufB7N7yJiKKIps3b8pWRCszvNq0Zf68edy4cT3f7329efMmmzdtRiaTMWLkCKtEijLD2dkFiUSCIArIJNa7doiiORC1xn4xhUKFCr9yq8GKFSsybvwEvv9+DBcvXSQ+Lh6ZXEbBggWpXbuORdvM3pI9Op0WpVKZoy4bOzs7tFptLs7q1WNvb0/LVi3ZuWMnd+/eZePGjXTp0gW5IvfCHQ8PD4oUKUJsdLTVInsp7+JPh7x1e/kvkW+D6b17dtOwUSObXigArq6uNGrUmL17dr+WYFoQBAL8A/ig+ge89957ubZPKa8oUaIEOr0OmZ19pnN9+PABVT+oRnxcHNHRz0hK0pCYmJh6/MGD+zioHBg5ajRJSUkkJSWlyfbHRMek/v8kTRKPNI9weiE41yZrmTplKiqVCkdHR1SOKgoXLpwaLAMsXLCQqKgoRMEc8KZU/lP2ldjyorGzs6NrN2+6dvNGFEXUajV2dnY5qjpWqlTp/yK7Z96XZ7TZZiwFhUJubgnMBzx+bPZYfRVCKNnRuHET3NzcSIiLt0mEzGgyIZfLadumbR7M7i1vImq1moSEBNwr2+a17ejoSMlSJQkJCcn3wXTp0qWRSqXodXpOHD+RK1tyFAoFHh4enD8XhExpfTBtNJmws7OjVq3aOZ7Lq6SAk9PbNu48xKmAE3q9Hp1OZ3O3gVqtTrPmyq/Uql2LiIgIgs4FERISQnJyMk6K3P13Df50CJMnTUQQBKvWlTqDHpWjY57bLwqCwNEjR9i8eRPh4eEYDAZcXV1o0MCD7j162hw75RW3b/+Dn68vd+7cISlJg7OzMx9+WBOfvv2s7pB6HeTbYDo6JpoSxXPWHlu8RAmiY2KyPzEPuHXzFrdumf83dNjQN/7DUq1adUqUKEFUZCR2zzNxH9asTWjIUyIjIwB49PA+oSFP0enSZzZFUSRJm0T7jh2o8WGNjO9RvRoVKlYgKiqKqMgooqKi0tgGaDQaBEFArVajVqsBc9D9ItpkbWogXbRYUWrWrIldDmyBXkYikdj8sjGZTCTEJ+BW8M16iOUl5t+XM/EJ8eSkSTI+Ph4X55z7Jr4KQkNCAfNC/HUjkUgYMmQoU6ZMtvqlL4gCAiL9fPqifAXCb3qdjp27duK/YT0RERFotVocHQtQuXJl+g8YQN269fJ8Drbw6NFDli9fzpnTgSQmqpHLZRQpUhTv7t3p1KlzvhPiSk5OBsjRnjwHewe0yfm7wgVQoEAB6tevz/FjxwkKCuLjuh/nyiJ00KBPOXv2rG0dIxLo1av3a99C8pY3i8JFilCoUGHOnT1Lo8aNbbrGubNnqVvvzXzOZoTJZEIikWT4XvNq44WzszPu7u44OZvXbKIg5lrLd+/efTh39izHjh1FgWWFGp1ej0QqZcnS5Xm2x9hkMrFy5QqWL1tKQnwCID7XZzALb165fJlZs2bi5dWGkaNGZ2vNldcEBQUxbdoULj9XRxcFMdX26/q1ayxduoSWrVrx9dff5plaeW6Qb4Npo8GI0jFnCzyFQoHmeVD2qrly+QoA5cqXe+MD6RQGDf6UyZMmAlCxkjuly5SlRMlSHDtykKSkJARByDCQBnM2XSaTZVvhcnR0xNHRkXLlyqU7VsCpAN49vEnSJKHRaEhKSkpn/9C8RXMUCgVly5bNE+GJnHDq5ClOnjhJs+bNcsX+LL9Qo0YNTp44YbEFREacPHmCevXe/N9ZclIyRqO5gu5W0M3sX6zXv3KxkBfp268/N4KD2b1rJ0rkFr30BdEsqle/fgNGjByVp/PTarXMnTObtWvXYDIazVZaUhkSIFmjITI8nGPHjlKiRAm+/Go4nTt3ydP5WEpwcDCTJ03kzJnT5oTdC4FRXEwsf//5J5Mm/k2fPj4MHzEy3yjRpyQLExMTbU4EqNWJ/4kKF0CDBg24cP4CSqWS5KTkXAmmPRo2pEiRIsRER6cmpy3BYDSi1evo4+OT4zlkxePHj1mzZjVXr1wmMTERlUpFxYqV6N2nD9WqVc/Te1tDVFQUfn6+nA86R0JCAiqVisqVq9Cvf38qV7ZeJTk/I5fL6dGjB5s2BdgUTD98+JBLly4yY+Ysm+fw9OlTgoLOkRAfj0wup2DBgjRq1DhXnwUXL17Ad9Uqjh8/llpUcXZ2plmz5vTr3z/N59OjYdrtgUePHiU8PJxWnq0olEPVaZlMxsxZsxk1cgTHjh5FKpiQZ2JBl+JDL5FK8Vu9lurV8+Y7ZDKZGD1qJEePHEaCBDtFxj72UiQc2L+P48ePscE/gAoVKuTJfLLj2LGjfD7sM2RSaaZOQVJ7CUcPHyHw1Ck2Bmx+bXPNjnxrjfX3X38SExvLmB9+tPkaUyZPwtnJibE//5KLM8sejVrD9OnTEQWRjp06ZlqpfdPQaDR4NKiPg50drTzbIJPJePrkMZcuns9ynCAIGAQTAwYM5Otvvs3TORqNRg4eOMDq1b48efIErVaLSqWiUiV3BgwYSAMPj9fSUh8ZGcniRYsRTAK1ateiXft2qccCA0+xYvly7t69Q1JSUup8Bw0eTP36DV75XHObI0eOMOb779i6fYdN1ZSHDx7Qr28fjh47kaO91/fv3+fa1askJCYgl8lwcytIAw8PnJ2dbb7my2g0GjZv2syD+/c5dPgAYWFhAMjlCipXqUyf3j6079DhlVcrRVHkhx/GsHvXTqRIsrGbMyKIUL9BA+bNX5CnKp1arZYB/fsSHByMFEmmqvlmD3cDJkFg6NDPrPYrz22uX79G7149EQUhjd3Qi7xo3ffhhzVZumx5vqkmtvykBb19fGjfvoPVY0NDQ+jZ3ZuDh45QsmTJPJhd7qHVaklKSsLJySnLvbuhIaEUKVokV/f33r79Dz17dMdoMFgUUBuMRvRGA5MmTaF9B+v/LpZw48Z1pkyezJkzp7G3s0MwCeYqESIgwWA04O7uzuivv6VZs2Z5MgdLCAsLZdKkiezbuxc7pR3C8wqliDmhpdPrqVmzJmPG/MiHNWu+tnm+asLDw2nerAkrff2oUMEyq7YUJk+aiFabzOzZc60al9JOvHbtGgIDT1G5chVc3dwwmYxEhEcQHh5G23bt6dOnT44S6tevX2Ps2J94+OABrb3a4Nnai8KFCyGKEBUVyZ7du9m/by9VqlTh74mTcHdPu00lPj6euXPmYjKaPytVqlShYaOGlCyVs2eUyWRi0cIFrFy5Am1yMqIoIpVIkUie74+WStHqdNSv34Dvvh+TZ9tKTSYTX48eyZEjR1BIZZZVyg167Ozt8d+46ZVXfQMDTzFk8GDkMplFW9B0Bj0OKhUbAza/9mp6RuTbYHrvnj1MmDCOTVu2WeQD/DImk4luXTvzww8/0rZtlITasgAAIABJREFUu+wH5CJXLl9h29ZtyBVyvv32W5R2li2wkpOTSUxMxM7ODicnp9di53L58mUWzJtPqVLvoNNqOXL4AAaDIdPzUypc9erVZ/6ChXm2MBdFkaVLl7B0yWKSkpJAFJE9r26JiAiCiEkUKFKkCCNGjKJL1655Mo8M5yaILF+2nJCQELOn7ZefY2dnx4ED+5k8aRKhoSHIJNJ/PXAREUQRkyBQqtQ7fD9mDJ980vKVzTe3EQSBlp80p1//AbS1QT1/8qSJ6HRaZs2aY/VYg8HAgQP7WbNmNZcuXqRy5So4uzhjMpqIiIwgMiKCtm3b5UrF5fHjx/z268+cfd5m17FjZ0o9V/RWqxM5HxTEpk0BxMXG0X/AAL76avgrTeyIosjyZctYunQx6kS1+Tsikz1/6f9rN2dnZ0fffv354osvX0kgfTM4GEUmGf2XMZlM6IyG1xpQ37p1i549vBGMJot8RlM86D+uW5cFCxfnCwuRlStXsCkggBWrfK0eO3f2LMIjwlm4cLFN93769ClHjhwmLtZs/eTq5kaTJk1zbQEVHh7OunVr8Pf3J/rZs9SfV6hQgT59fOjcpWu23q1GozFX/o7//PMPvXp2x2AwoJDKMlzLCIKA3mDAJAp5GkhfvHiB/v36gihmmiASRBGDQY9REJgw4a9X+h5NITw8nG5dO5MQH488q9+Z0YAIrFjpy0cfffTK5/m6mDB+HAcO7Gfu/IUWO4Vs9N/AooULWLfen/fey9o29EX0ej0jRnzFpYsX6dCxEx07dU7XaRkcHMyWzQEc2L+fr4aPYNiwz63694A5kO7fry+dOnehX//+mdr1JSQksGzpEg4dPIDf6jVpAmpRFLl65SqHDh5KrWh38+7G+1XfTz2ek/ex0Whk//59+G/YQEREOHq9HicnZz6uW5e+ffvl+bavFSuWM33aVJQyy7rPUtAZ9BQpWpRDh4/m4ezSotFoqF/vYxBFqzpzdAYDpcuUZtfuvXk4O9vIt8G00WikaZNGjBr9DU1tyJAeO3aU6VOncPTYCZuzzWFhoRw5fITYuFhMJhMuzs7U+egji0RXIsIjiIyMpFr1rDN1arWaLZs3sXq1Hw8ePEj9uZ2dHW3btqNfv/68X7WqTfO3lWlTp6FRa7h48TyPHz1AJk3/MhNFEYPRiCiBevXqMW/+wjxT7RRFkbE//ciOHduRSSSZttqY52TAKAgMHz6CYZ9/kSfzeZlLFy+xY/sOAHr16YW7uzvbt2/jhzHfI5eas3KZVuMMBoyCicmTp9Ku/eu1ccsJW7Zs5o/ff2P6zNlWtTht3bqFObNmsW7deqs/58+ePaN/v74kJCbQsWMn2nfokE7JNjg4mK2bN3HgwH46d+7CH+PG2/RCffz4MT59elGzVm2GDfucIplU0EVRJOjcOf76awItmrfg9z/GvfJOCUEQOHz4EL6rVvLo0SOSkpJwcHCgZMlS+PTti6dn61eisDvm++/Ys2c3SgsD6RRSAurpM2a+cg9ro9FIQ48GJGnUVi0CUpKKAwcNZvTor/NwhrlDYmIiDT3qM2nKVOrUsTwQSYiPp7t3V2bNmoNHw4ZW3fPIkSOsWe1HYOApqlWrntqFEhUVxdWrV6hXrz59fHxo0eITq66bgtFo5Ldff2HLls3Uql2Hrl278e5772Fvb49arSbo3FkCNvoTFhbGiJGjGDhwUIbXOX7sODeDbzJw0ECLE+FZ8eDBA2bPmsnevXuwUyrN+0Cfp4ElUik6vZ4Pa9ZkxIiRNGhguauFNaQE0hIRixJEKVXyVx1QR0c/o2uXLsTGRFuUgNPp9YgSWL1mHTVq5I8OwJwiiiJjx/5E4KmTzJ47j5IlS2V5/ob161myeCHLlq+wyl9ap9PxxRfDCA8PZ9bsubi6umZ5/s2bwYweOYJ+/fpbtXXoxo3r9O/Xl959fOhnob/7gvnz2LVzB6tXr6WSu3uaYwaDgevXrnMz+CY9e/VEKjMHnnt27yHkaQhV3q1ClXer5NiB5FUiiiKNGjYgMT7BaqFRURRJ1utYvGSZVa45OWHd2jVMmvg3CiuFaUVRRKNNZoN/QJ61yttKvg2mAebMmc3xY8eYt2ChVZkYURT58othNGzYiBEjRlp1T1EUOXHiOGvXrOb48eNUrfoBRYsVRSqVER8f97z6VZnefXxo1659jvbJLVwwn4ULF1CqVCm6dvOmfv0GODs7o9PpePLkCVu3bubA/v188EE1ps+Y+Upb6i5cuMDyZUs5dOgg9nb2qW1WPK8CIzULdQ0cOJgBAwfmbSA99id2bN9mcUbOaDKhNxpeWUBtMpk4eeIkCQkJtO/Qnn379jJq5AjsFEoUFlQ3DEYjeoOBmbNn06pVztVkXxe+q1YyY8Z0ps2YZdGD0BxIz2TxkqVWi09FRETQ16cPFSpW4Pc/xmdbRQoJCWH4l1/QoEED/vp7olXPk8ePH+Pj05sGDRrwzbffWxQYhoSE8NWXn9O8WfPXElC/bmJjY/FoUA97hdKmziKtXod75Sps3rI1D2aXOXv37OG7777BTp5xAiwrDEYjcqWC02fO5Qs7oGVLlzB//jxmz5lHlXffzfb8hIQERg7/iuLFi7N4yVKrfj9/ThjPli2b6dS5Cx07dU5XUYuIiGDb1i1s2byJdu3a8+tvv1t1faPRyNejR/HPP7f486+JlMuipfHs2TP8MnYswz7/nKFDP0tzLCw0jCVLloAIlStXpkfPHrkmaBQTHc0G/w1cuXKZ+Lh4VCoV5StUoGfPXnnq/GBOENUnSa22SqwzZf/20aPHKW5hBTSnjBjxFUcPH0GZyTaVjNDqdbi4unLiZOD/zXNWFEV++eVntmzeRPMWn+Dt3T1NMlqr1bJv7x42BWwkIiKCRYuXUqeO5YE0wNejR3Hv3j1mzJpt8VapO3duM3L4V4we/Q29eve2aEy3bl344INqfDV8hFXzmzTxb6IiI1i5ys+i82fOmPlcsMtMjQ9r0LFTR6vu+bo4evQIX335BfYKpU2fcZ1ez8f16rJ02Yo8mF16PFt9QlhIqEWJu5fRGw208mzNlKnT8mBmtpOvg+mYmBi6dulEzZq1+HHszxZ9iERR5O+//uTSxQts2rSFglaIEAiCwC+//My+vXto174DnTp34Z130trfJCQksHvXTjZv2kShQgVZvmKVTeILU6dOwX/DBsb/+WeW2cL4+DhmzZjB9evXWL1m3SvfoxYREcHatWs4HxREfHwcSqUdxYsXp0vXrjRv3iLPW9G3bt3CL2N/yrQtLTOMJhM6gx6/1WusysbmlJiYGBp61Ecpl6OQW76g1hsMGExGTgWeeeMsDaxh1cqVTJkyiZatWtHNuwdVqqQViRFFkTOnTxOw0Z/Lly+xaPESq4XH1Go1HTu04/2qH/DzL79aHKxFREQw/Msv8PDwYLwV/vW9e/WkePHi/Dj2Z4wGA8E3glGr1XxYq2Y6gbwXCQkJYeing/j9j3F4ebWx+H7/BZYsXsy8ubOzzEwXLFgIURSJjf3XcaFUqXd4r+oHxMXF8fjJI6ZMnfZKrQ179PAm+Pp17G1wCBBFEb3JyKTJU/LN33v+vLksX76MvydOpmatWpmeFxYWxtgfx1C0aDHmz19gsfq7KIr8/tuvHDp0kDlz51M6m1bup0+fMvyrL2jcuAnjx0+w+Jk/auQIgoNvMHfeAove+cHBwYwaMZyvhg9n0KDBaY6dDjzNgf0HAKjfoD4tW+XfLTgA+/bu5dtvv7YtQWQy0bdfP7759rs8mt2/xMTE4NGgHg5KO6sScKIoojXoWbBwEY0a2aZynV/5559/8PPzZfu2rRQoUAAXFxd0Oj3Pnj2jZMkS+PTtR+fOXbJ8T2XEkydP8Gz1Cev9N2Zb+X6ZPbt3sWL5Mg4eOpLt9/f69Wv06tmDbdt34uxinZtHVFQUXTt3ZOeuPdmKVomiyO3bt/nn1j/cvn2bJE0Sbdq2oc5H5rVhYkIily9fplq1ari6ZV2Bfx2MHDGcw4cO2vReAjAJAprkJC5eupLtZyFFnPDK5UskJCTi4OBAmTJl6NGzp0VrtQcPHuDVuhUFHFQ2Bf5GkxGDycT1GzetHpuX5OtgGiA0NJQ+vXtSs1Ztvh/zQ5YVKKPRyJTJk7hw/jxr1q6jVCnLHwImk4kfxnxP0Pkg5sydn+1eFJ1Wyw9jvidRncjKlb6pLTDbt21Hq9VSs2ZN3Cu7Zzh2xoxprFu7jrnz51skIiEIAn//NYErly+zZu06SpR4s0VfcpO2bbx48uiRTRkunUFPw8aNmT9/YR7MzCx44VTAKbWNCGDx4kXMnzsXhQ3VOINg4quvhvPpkKG5Oc1Xzo0b1/Hz9WXnzh24u1embLlyODg4oFaruXH9Omp1It2796B3Hx+bkkMrViwnIGAjK1b6Wp3MefL4MX1692Tf/oMW7XG6desWXbt0Yuv2nbi5uaHT6Vi2eCkAHTt3otQ7WT9jVq5YzoXz51m3foNV88zvNG7kQXxsXLqWNDt7e8qVK0+ZsuWxt7cnIiKcc2cCU4+/X7UaFSv9+9wURZEBAwdQtlzZbO95+dIlzp07S3xCAlKJBLeCBWncuInFVb/Q0FCaNW1MAQeVzUlCrV5HjQ8/ZPWadTaNfx0sXryIuXNmU6FCRbp5d6duvXo4OTmRnJzMP//cYlNAAKdOnqBVK08mT55ilY2an58vixYuYN6CRRa/j8PCwvjy82EMGDgwXaCbERcunGfwoIFs2BiQbptHVpw/H8SY777l5KnT6RLiO7bv4NLFS1SoWIHevXunecbnN7y7deHWzZs2LcQNRiMyhZzTZ87lubieOQE3x6Z3p7nyVo+ly5bnwczefOLj4/nn1i3iE+JRKpUULlw4Rx7wf/05gfsP7jNx0hSrxxoMBrp06sC48ROy9Voe8/13GAwGfrJRJHjsTz9QqtQ7/PrrbxaPEQWRJ0+fULhQ4VRHmMBTgRw8cBCA0mVKU7NmTapXr/7GfO979vAm+MYNq7YevYgoiiRo1Bw+cixdgTCFK1euMGP6tHTihCCaNVdEgRIlSvD551/QtZt3pvcKCgpi0MD+2Ns4V0EQSEzScOXq9dfqkvIyb8YnIQeULFmS1WvWcfXqFbp16cTKFcuJfS5ckkJsbCyrVq6gW5dOXLl8yepAGmDq1MlcvHiB+QssE3Wws7dn0pSpFHQryJBP/9179c8//3Dr5q10c0zh7p07LFm8mDlz51msxiiVSvnxp5+pULEi06flbevD1i1bWbl8JdeuXsvT+1jC1atXuX//ntV7RFKQy2QcPnSIyMjIXJ6Z+eG0Yd0GFsxfwMMHD1N/5rtqFaIg2HZNk4Cv7yryef6LqlU/YOKkyZw4GUi3bt6ULVOGAo6OVKpUiVGjRnPiZCDffve9TYG0IAj4+a6iZ89eNgU8pcuUoYGHB36+qyw638/PlxaftEztFrCzs0tV6g4NCcl2fMdOnbl69Qq3bt2yeq75mcjISGQvLUQcCxTgk5atqVzlvdTf4csvy6dPH3Pj+lXCw0JTBWPCw8MzvY9Op2PTpgDaeHnSu3dP5s+fh9/KlaxauYI5s2bSrq0XPXp4c+DAfkwmU5ZzDgkJQalU5qjbRiaV8eTJE5vHvw6GDv2MEycDad++PcuWLqatlyeNGzbAs2ULfvphDOXLlWPv3v3MnDXbqkBaEARWrVzBkKGfWfU+LlGiBJ8NG4bvqpXZ/s0A/Hx98fJqY1UgDVCnzkeULVeOgICN6Y61adsGz9ae9O6TvwNpjUbDlStXUFrRJfUicpkMnU7HlStXsjxPq9Wy0d+ftm29qF6tKlUqV6JG9Q/o3KkDO7Zvz1LENIW9+/YgWPD3zmyeJ0+esGnsfwEXFxc+rluXli1b0aRJ0xwF0gCbNgVkGTBlhUKhoEPHTmzc6J/tuSdOHKelp+1b21p5tub4sWNWjZFIJZQpUyaNtaooitg7mN9JTx4/4fChwxhNRpvnldvodPrnOgu2IZFIkEgk6PX6DI+fOXOa3r16cOH8eRztHVDI5NgplSgVCpQKJXZKJQ5KOyIjIvjtt1+ZMnlSpvcyGPRIJbY/M1Oq2ZnN9XXx5suKWkCpUqXYu3c/Bw7sx8/Xl+XLllKyZEkcHQug0agJDQ2lRo0PGfvzL7Rs2cpqFc6EhATWrlnDzNlzKFq0mMXjlEolv48bT6cO7Th79gyVKrmTnJQMwDulM87+rF7tR7Pmzalo5R4pqVTKoMGfMmTwIH78aSwFCxa0arylhIaG8izqWaZV9VfJtq1bMrX4sQSZVIaDvT17du+i/4CB2Z5/5sxpAjZuJCTkKVqtDmdnZ2rUqEHv3n3S7Rm7dvVaukX+tWvXiImJRmVnmy2SQi7n2bNnXL9+7Y3y+rQVNze3XPdLPXrkCMnJyXySTcY7K3r06MX3333DqNFfo1Jl7lUuiiI7tm9j5ktWIu6V3bl29RrBN4Kp/VGdLIMvNzc3mjRtxrZtW3j3Xdtt/nITURTRJmtxUP0byKrVah4/ekzBggUpWKhgjqpQWq0WQRCQPbfvEJ4nlzRqNYmJZq/YJ48f8ejhQ9TqxDRjE+LjSYiP5/69u6gcC1DJ3Z269epmeJ+4uDh69exOSEgICCIqO3vzs0L5b2XL0d6B4GvX+XrUKGrXqcPixUsyDQg1GrVN+7tfRCLB7DaQz3BxcWHwp0MYNPhT1Gp1qge1i4uLzb+TI0cOo1araWmDDkSLT1oyb+5cDh44gGfrzEXonj17xv79+/D1W2PTHLt27Yafn286MTKZTJbmcycIApcuXqJWrVq5tof6VRAfHw9g8ztUIpGgkCuIj4/L9JzFixYyf/48RFFENAnIZDIcHRwQRbh35y5jx/7IH3/8xjfffEuv3n0yn2tcHFIbf7cSiQRBENBoNFa3NL8lLRqNhsTExBz5/VaoUIHTpwOzPS8+Pj5HftCFChXK8rNpKR4NPahXrx537tzhyuUruFd2T/MOTExIxMk597y0rcXV1SVHRRZRFBFFEReX9Hvfz5w5zeBBA5FLpSizqCZLJBLsFGYBxVWrVgLw3fdj0p3n5OSEwWhAaaMbgvD835mb3uW5wX8imAazYb2XVxu8vNpw7949Hj9+jEajxtGxAGXKlKFiRes8917Ef8N6ypevYFMAo1KpaNe+AytWLOeLz79KnWvxYsXTnatWq9m6dQvTZ8yyaZ6VK1fhvffex3/D+jwT1krJIL8JAjqhz6tTOcFkEnj2gj3Ky4iiiL//BpYsXkRYWBiy5xk1iUSCKIpcuXyZxYsX0bhxE4aPGEG1atURBIEjh48A4O7uTrny5QB4FhWFUmmbQETKPZVKJc+eRds0/v+BmzeDqVmrdo4+nzVr1UKv1/P06RMqV66S6XkJCQlotVrKlU3bYvxBtWpcu3oNjUZDWGhYtq3eZcuWJTIiwub55hbx8fFcvnSZy5cv4+bmRr/+/VKPPX78mICNAan/7eTkRI0Pa9C8RXOr76PVaqlVszaVKlXmn1s3efjgXuqxC+fPoU1OtqjiGBsbg06vy/BYSiAdGhJiVguXZ/ydk0ql5pY1UeTSxQsMHTok04BapXI0B/4ZuBdYiiimr7bnJyQSs7Bkbixk9u3dSyvP1jYlZuRyOV5t2rBv394sg+lbN29SpEiRLAXHsqKBR0P+nDCexMTELP/NO3fs5PKly9y/f5/OXTrnC/szMCd3ck7m7+Bp06ayYvky5FIZCpkMyUsaCXKZzOzHrtczfvw4krVai1r33/L6SKkIKm3cnwtgZ2ePTqvN9jzzOsvm2+TY7upFZHIZ7773Lu++l1aM8crlK+zZs4devXpZtN0oL/i4bj0uXbxo83iD0UjRokUpWDBt4kKtVjN0yKfIsgmkX0Qmk6HEbK9Yu3YdmrdokeZ4xYqVkMlkGE1G5FaqeYN5u667e+XXYg2cFW/WbHKJihUr0qxZM9q1a0+zZs1yFEgD+Pv70827u83jvb27c/TIEe7dMy8aS5YsmWFrWNC5c7i6ulI9BxYOrdu04fDhwzaPzw6jwdza8iYE09pkbY5aWwAEwYQ2k4e6KIr88vNYJowfR2REBA5KO+yUytT2FjulEoVMhsregTOBgfTs0Z3AwFPcu3cvNePf/JN/gw29Xp/jB7tEIrHoJfT/SkJCQq4s9J2dnUlISMzynORkc5eJnX3aTgO3gm40bNyIXn16ZxtIA9jb26de63Wg1+vZt3cfs2fN5tjRY8THxRMWGpYmUZWclJymApmYmJgukXXt6jUOHjjIpYuXePTokdnL+gWio6NZtXIVc2bN4f33P0CpVFKuXNogR6NWWxRIAyCRUPaFREZ8XDxRUVEYjUZ69+pBaEiIxf7V0ueWepcuXuDLrzJORJYoUQKdTpdaSbcFQTC9cpHIN5XY2FiKFbO80+tlihUrRnRM1onFRHWi1cJFL5LyLElISMj0HFEQU78bN4NvsmrlKuLicl4NexW4uJi1XGxNSouiiMFgyFAUc+aM6axYthSlTJ5lB1lKddtOoWTqlMmsXJHxvmY3NzezU4iN85TJZG+r0rlAinJ3yhrHFuLi4lI/e1nh6upKVJTt2/CioqLyVLBVFEROHD+BXqdnzeo13L17N8/ulRU9uvdAp9db/u58CYlUwsCBg9J9R7ds2Yz0ecXZGmQyGVKJhCVLF6c7plKp6NSpMzZ+lZHIpJnaFr5O8kf69DUTGhqCe+XK2Z+YCSVKlkSlUlGxUgU8PDwwGDPeHxQTG0PhwkVsvg9A4cKFc6WtJTOMRnMw/SZk3t3c3HJcmZbL5RnaOqRYS2zbtjVbyy2ZVIpMqURv0DPk08FMmz6D96u+j1qtTrNYdHJyMi/Ec7DHTjCZcLLQhuL/EXt7e3S5UOXVarU4OGTdjp+y0Nao1eks8KrXsLyLRa1Wv9aWJYlEwq2btxAFEXt7e6pVr5Zu/rXr1KZWrVrEx8cTHRNNVGRUuiz8hfMXePz4cZqfff3t1xQoUAAAlYOKRw8f/XvPWzd5/PCBTXNO8Yzv3qMnYPYIDToXROUqlXFxdebJkydWqxNLJRLkUhknT5wgODg4nUp46dKleb9qVe7evm2zmjdSKb16Zd7K+v+ERGJ7EAfmdr/s/r52SrtMk6WWoNOZx9rbZ/73lkgltG3XFpVKxYnjJwh5GsK9u/eoXae2zfd9VahUKmrVrs2Nq9ewt8HG02gy4qBSUb162gLAkydPWLhwASo7e4u3AchlMhAVTJ48iQ4dO6XbqubVpi13bt+2eo5gFu9s3LiJTWPfkhaZTEb1GjU4euQwPn37ZT8gA44ePWyRFVfz5i3Yu3u31Y4eKezdvTtdZTQ3kUgl9OrTC79VfiQkJLB+3Xr69+9P6TLZi5daQkJCAueDgoiLj0MqleLq6srHH9dNt/2sYKFCeLZuzaGDB63edmM0GTEYjRkWDJcvW2rW+LGhGUspV3Dp4kXu37+fbkuAT99+bNzoj1wqtarCbDAaEYG27dpZP6E85j9Zmc5NBEFAr9dnu7DODnsHB7TJWooWK5q52Ioo5rh1QSqR2py9tYR+/fsx7IthVK5ie3Iht6heowYyeU5aLkVMJiGN/2IKa9asZtvWLRZ7VwMoFUrkUinffD2aJk2b0H9A/zTH3StXRqfTYbKxsmUSBHR6PZVzkNj5r1O8eHHuP7ifo2tERESg0Wiy1UdwdHSkaNGiXL16Ndtr3rl9B21yxov6q1euUD4H+89yikKhwLO1Jw0bNWT016PxauNFqVKl0gUqEqkEVzdXKlasSL369dIJMRYsVJCSpUqmioeBOVGQgoPKgYaNGtK5S2eGfDaECxeD0GjSVq8tRW8wUKFiRWo87+IpWrQoAA8fPGTpkiVgY3ufVCpFqVBkWh0bOGAQEqnUpiDQaDIhk8nw8vKyeux/kUKFChMWFmbz+LDQ0GxFxUqULEl4WJjNn7N7d+/h4OBgURWtWfNmeHf35sOaH+aLQDqFwYM/BanEtsSGRIKPT990nWp+fr7Y21lnYQXm5LZSocR/w/p0x7p27YbeYLC6+iaKIoIg0K//AKvGvSVzBgwYyKaAjanFFWt48vgx586epW+/7APxvv36cfjwIWJiYrI992XCQkM5c+Y0ffr0tXqsNRQqVIgBgwbg6uZKsWLFcqX74c6d2/z04w/Ur/cxo0ePZMK4cYz743dGDP+KenU/Yty433n06GGaMSNHjkahUKAzWC7MZTKZ0BuNDB8+EpeXOnhu3rxJeHi4VRauL5KyhWr3rp3pjlWpUoU2bdpiEEwWd3qZTCYMJiPffPPNG7lV6m0wnQ1SqRQHBxWJiba9jFNQJyZmW1F0cXUlJpu2teyIjY3BOQMRgdyiRMkSFC1aNF0l7nXQpUtXDEYjRhtbWwxGIwWcCtCsWdp9n6IosnTJYqRIrE5uKBVKFHIF69evS7eYL1q0KA0bNrLpBQTmDGKjxo0pUiRn3Qv/Zdq0bcfDBw+4cf26zdcI2OhP48ZNLPo99+zVO0O13xc5efwEB/btZ92atdy5fSfNsXv37nH9+jW8vW1TRrWFW7duEbAxIM3i+d333qV5i+YolLa9OA0GAxIJnDx5nCNHD3Eq8Dj//HOT+fPmcOfOv9Wk5i2aU616NYoXL46nZ2uMomD1It4kCAiIDB3yWerP3AqaW/n0ej23bt20WeEfzAnJnTt3ZNjG2Lp1axQKBXorFizwfEGPSPcePa1SvP4v07ZtO/bt3WNT5Viv17Nn927atWuf5XnvvfceFSpUYPeuXTbNcfOmADp27GRxJ9Z7779Hh44dUv87KSmJdWvXER6WueJ8XpCYmMiKFcsZNGgAXbt0omcPb74ePYpTJ0+mO7d58xY4OTmht0BR+0UMRgM6vZ6evXqn+blWqzUHwzbm9EVBYOXK9ErtLi4utG7thVGw7pmhNxgoXLgwDRo0sG1Cb0lH69bmhODHcpupAAAgAElEQVSO7dusHrt06RI++aSlRRau7u6VqVmrFksWL7LqHqIosmjRQho2apyp1VNu4urqSv/+/Rk0eBAFC+VM/HfG9Gm0b9eWXTt3opQrUMrkyCQSZEiwkyuQIWFTQACerVqyYvm/Sd/y5cuzbr0/8ucBdXbfEaPJhM5oYNiwzxn2+efpjj97ljONHzB3skZGZtwpOGXqNBo08MBoQUCdEvQPG/a5RWLBr4O3wbQFVK1aldOBp2wef+nSRaRSKffu3uPA/gNEZNKGWqtWbcLDw9MsPq3l4MEDNGjgYfP4NwGj0cj9+/e5fOkS165d5enTpxme5+zsTIcOHRFE6yu9oigikUoYMGBguoA58NQpoqKiUNjQyu7q5oabqyvr163NMGjuP2AAAqLVAYQoigiiyID+b+aD5E3BxcWFTp06syGDyoYlaLVatm/bavGenJ49e3Ht6hUeZFENd3VzRS6Xk5yczIF9+9m0MYDo5yJyARv9ad3ay2rbHlswGozs2rkL//X+BN8I5szpMzm+pkajYfr0adSv9zFjx/7IhfNBhD59SkRYGNevXWXb1m20b9eW7t5dOXHieJqxE/78i7Jly2EwGS3+PpgEAYPRiLd3dzp26pT6c5XDv21vLs7OObLekMtkyGQy/snArkxpZ8eyZSsQJRKLKwCiKGIwmXi/alW+/vobm+f1X6Nho0YUKVKEvXt2Wz123769OLs407Rp02zP9fHpx6ZNAVY/c2Oiozl8+BA+fW2vbG3ftp07t++wZPES9u/bj16Xt3YukZGRjP3pR+rV/YhZM6YTdPYsd2/fJvjGDQ7s38fQoZ/StElj1q/71+dcJpOxcNESJDIpOgvtZgxGIzqDgSlTpqXb93737h30er25bdsGFHI5MTHRGXYt/PLrbxQrXsziZ4ZOr0cik7JgwaJcE6J6i7mDYNz4P5kze1aGlceMEEWRSRP/5trVK3z73fcW32vcuAmcPHmCubMtE+YVRZHJkyZy9eoVfvnlV4vvk1NcXNM6G7ysGWIJM6ZPY9nSpajs7FHK5Rl+h+RyOUqZHAc7O7PGwMoVqceqVKnC2nUbKODkhM5oQKtPq/GRsj3KYDKi1ev44osvGT5iZIZzMRqMOdf4QYIuk2eeTCZjztx5eDRshEabjN6YtuvEPFcjBpORJJ2WT4cMyXSubwJvg2kL6N9/AJs3bbK5oui/fj3dunlz6eIlTgeeztRjumDBgnh5tWFTQECGx7PjyePHXDh/nj5Z2EvklMuXLnPo4CHu3LmT/clWEh39jLlz59Cgfl28WrfCx6c3PXt0p3mzJni1bsWmgI3odGnVe4d+NgwkEosXAfDvl9RBpUrdc/ki69atRSaR2vQg+aBaDZo1b0mZMuUy9LVs2LARlSpVsiqASFmMu7u708AjfydKXgX9Bwzk2NEjHD1yxKpxgiAwfeoUSpQoiUfDhhaNKVy4MB06dGT8uD9ITMxYsOyDatXo0btnqh1eZEQkUqmUE8ePs3fPbgYMzPsEScjTEBYtWsSF8xcAs53gy6qk1qLRaOjX14eVK5Zj1BtQyuTYK+1QKhQo5AqzB6VcjqO9AzeDg/ls6JA0zzYnJyfWrltPmbLl0Juy7jARRRG9wYDeaKCbtze//f5HmuPOzs5UrFgRmUyK3Ma2tBeRy+TEJ2QssPNhzZosW74SkyCg1elSrToywmgyoTcZqeTuzooVq96Ijp43icGDh7B40aJUcU5LePjgAYsXLmDQoMEWPaPbd+iATqtl1szpFt9Dq9Xy66+/0KCBR5aK/tnx0ccf4erqiiiKnDl9hrVr19p8reyIiIigu3dXdmzfjlKuQJH6fVRip1Bir7TDQWlHbPQzJkwYx59/TkgdW716dfxWrwWpBK0+861IgiCg1evQGfRMmTKNdu3TdwYkJCTkyLJSIpEgl8szFH1zc3Nj3Xp/3AoWRG80ZPrMSJmnKIFVvqsz3Mr1lpzRrFkz5s1fwNQpk1m1cgUajSbTc8PDw/n9118IOneONWvXU6ZMGYvvU7FiRfz81rBv314mTfw7S/eViIgIxo/7g7Nnz7BmzTpKl7Z+73JsbCwLFy6gc6eONG3SiMaNG9KhQztmzZxBZGT2Ymgmo4l9e/cxd+5cNOrMfycvs3zZUpYtXYpSLrdoe4RcJkepUDB50kS2bN6U+vN3332XEycDmTZ9Bu+9/z6JSRrUyUkk6bQkaNQo7e35dOhnnDwVyJdfDc/0+k7OzjbHPKlIwC0Lm16FQsH8BQvZvmMnrVp7kazXkZikIUmnJTFJA1IJfXz6cvTYCUaN/jpnc8ljJGJOFZz+DzCZTDRv1pSBgwZn+PLIiocPH9LPpzc7du5mzXOvy779+1I+E6uOq1ev4tOnF2vX+1O8eHr7rKz4a8J4dDod8+YvsGqcNaxauYpHDx9Rr349Wnna7uX7MnNmz2L+/HnYKZUgishfUOFNWUgjNb9kZ8ycRZMm/1Ykrly5Ql+f3iCI5vFZkBpIO6rw37gpw4d6xw7teXj/vk2toi0922Bvb8/lyxfx7tGdXi+1wIF5sdGjezeL1IbNgbSRUu+8wwb/gDfOW+9NZe+ePXz33Tf8NPYXWrbK/nNqMpmYMH4cwTeu4+u3JnNdgwzQ6XQMGzaUmOgYZs6ek+Xf6NHDR0RHR6PRqPn1l7FMnDSZOnU+4nTgaWp8WINy5cpZtQg1Go1ER0ejVqtRqRwoWLBQhgFbZEQkixYtQhRFGjZsSNOmTTN0FLCUlED67p3bFitmG4xG9EYD48f/Sddu3VJ/npiYyMS//2Lrc994URCQSqVIMO/jNAomkEhwdCzAZ599xsAsrHNWLF/OnNkzUy3sbMUgmJgxYxbNmmdu/XX+/Hn+nDCOW7duoVAozNtCJBJEzIt5iVSCwWikU6fO/PjT2FQhtrekZdq0qWxYv46Zs+dkG7jeu3uXEcO/pEvXrowZY7kv+8OHD+jTuxfNmrfIdlGm02r59tuvMZlMLF++0qa/W2JiIpcuXSQ+Ph6ZVEZ8XDz37t2nR88euFd2t/p62REZGUl3767ExsRY9H00Cea2yd59fPjpp7GpP7916xYzZ0zj6NGjZss4k/BcKM6smaDT66lWrRqjRn2dacLx/PnzDBzQD3srFYBfJDFJw85de6hUqVKm/94Z06eZBUIVCgSTKdVGKWWe9es34NvvvqNq1Q9snsdbsufc2bOMH/8Hjx8/ppVna5o2a4arqxsmk4mI8HD27N7FmTOnqV+/ARP+/NOi9u6MuHv3LuPH/UFQ0DmaNmuOZ+vWFC5UGBGRqMgodu/examTJ6hfvwG//f6HVQE7mPU9/vxzAju2b+Pdd9+jbfv2FC9WHIlUQlRUFHt37+bSpYu0auXJr7/9nk4g78XrzJ09F71ez0cff4RXm+w1MvQ6HfXqfYxgNFndEak3GHByceHEyVMZfu+fPXvGs2fPMBj0uDi7ULJUKYu2ragTE6lX72MUsowr5NkhiiI6o4Fp02fQqpWnRWPUiYmEh4eTlJyMk5MTJUuWzDcJ6LfBtIXs2rWTH38Yw/g//8LDw7Kq1dMnTxj+1Rd4erZmzA8/8veffwPQq08v3N0zf6H+MOZ7zp47y7z5C7IVQUph8aKFbN2ymbXrNmT6AsoN1q1Zx507d6hdpzZt27XNlWumtLYoMmlrSeHfFhUTCxctTqPOefnyZfr19TEvZgUhXWZcFEX0RgMSqRSVSsUG/4BMH7atWrYg0gbhBYlEQtv2nZBIJAQGnqR7zx6ZembGxMTQo3s3c8u/IKJUKNLP93kCoVix4vhvDMhTi4f/IocPHWLUqBF07NSZrl278U4GWWpRFAk6d47Vfr48e/YMX7/VViex4N+AOjQkhH79B9C8xScZ2seFhYayaVMAmwI2MnHS5Of7Rvdx9sxZwNymXsm9ErVq10on8PUioaGhrF2zGn//DcTFxSGTyTCZTNjZ2dGuXXs8W3tRvVr1NPu3zp45S6l3SuXKHrIvv/icEyeOmz2crQj+ze2hetauW0/NmrXSHEtMTCQgYCPr160lOjoanU6HSqWiQoWKDBg4kE8+aZltxn7r1i38/tuvKHLkBS2SrNexes3adHPMiKtXr7JkySLOBwWh0WiQy+W4ubnh7d2dnr164+qavXjV/ztz585h8aJFtPbyomvXblR86R127949tmwOYM/u3QwcOMimKkVKQF24SBG8vXvQ4pO039GEhAR27tjOpoCNFCtWjOUrVlkdSN+5cxtf31Vs37YNFxdXChYqiEGvJzQ0lFKlStOjR3c6d+lKgQIFCDoXhMFgoF69ejlKbAEMHjSAoHNBKK2oCJsEEzqDgfkLFqZJTgOEh4Wxdt1arl29SmJiAiqVIxUqVqB37z7ZJjweP3pEy5YtKKByRGpDdVoQBBKTNJwLupDtdyc2Npb169cRdO4cCQnxqFSOuLu707dfv3S2e2/JWy5dusiaNas5e/YsCfEJKBRyXF3d8PT0pGev3jZViTPi7t27rFntx9GjR4iLi0MikeDq6kqLFp/g07cvZcuWs/qaGo2GQQMHIIgCo0Z/Q5UqGX/GHz54wOzZs4iJjsbXb3WmAfWxo8c4dvQYUpmUL7/6Mtu129atW/j1l5+tfp+C+X2lNeiZO28+TZs2s2psdoz96Ud27tiB0oYtj3qDAQdHFacCz1gtRJgfeRtMW8H27dsY+9OPFlW7bt/+h2++Hk2bNm345ZffAJg5YyYJ8Qm08mxFvfr1Mh0rCAJjvv+OCxcuMHX6dMqUydwI3mg0smTxInZs34bf6rWZPgSy4969e2zZspmI8HB0eh1OTs7U/LAm7dq3T6POG+AfQHBwMNVrVKdT505ZXNEy5s6ZzYIF883CChZ+4fQG856PJUuX4/FC23NcXBwb/f1ZsWIZCQkJKBVKREQkSNDqtJQsWYpPhwyhY8dOWaoBdunciXt3bltsUp+CRCKhXYfOAASePsmgwYPp3r1H5v8OvZ5du3aycMECnjx5jL2dPWbVFvN8y5Qpw7DPv6BNm7Yos6m4vyVjLl+6xOLFizh69Ai1atWmfoMGOD9vX4qMjGT/vr3ExyfQtWtXhn72WY72Lut0Ovx8V7FmzWqSk5PxatOWUu+8g52dHYkJCZw/f54zpwNp2KgxQ4YM5aOPPgLg0sVLnD17lsiIf1vIXk64HTp4CJlMhkQi4eCBA1y7dhVXNxe6eXfnw5q10Gg0XAi6QGRkJPFxsUgkUuLiY/n5l18yfeHbSnhYGE2bNkZl74DMBvcBnUFPw8aNmT9/Ya7OC8z7XD086uOgtF5JOAWD0YDS3p7A02ffCAvA/xcuXrzAmjVr2LtnN2XLlqNIkSJIJBKioiJ5+PAhnp6t6d3HxyJLncyIj48nYKM/q1f7kZSURMWKlXBwcCBRncitmzepXLky/foNoLWXl9XP3JUrVzB50kRatPiErt7efPBBtdRjOq2W/fv3EbDRn/j4eBYvXsq2bdsx6M3iWC0+aUGVd217d4eEhNCieVMc7R2sFszU6fXUqFUTv+ddc7lFh/ZteXj/QbadYpnNqWbt2qzy9cvVOb3lLRmRlJTEwAH9kUgkTJ0+I816NyOMRiNjf/qBiPAI/FavyTBQ1uv0zJ49myRNEo0aN6JZ86yD3I4d2vPg3j2bvi9g/s7UqlOHlat8bRqfGXfu3KZ9u7ZWP1tEUcQgmPhs2Od88cWXuTqnN5W3wbSV7Nmzm59+/IFixYrRqUtXvLy8cHQ0Z6+NRiMnjh9ny5ZNXL50icGfDuGbb75NHXvl8hVkMhnvlH4n24yrIAj8/tuvBARspG7denTr3p2PP66bmrWKiopi65bNbN+2FXt7exYsXMy771q/B/L48WMsX7aUc+fO0bBRY8qXL4+dnR0J8fGcPHWSuNg4vLt7M3jwEAoXLpyrwXRUVBSNGjbAwc7e6jYSnV5H0eIlOHjocLpjgiBwOjCQkNAQkjQaChRwonyF8tSubdkizJyNM+87s5a69T0wGY38j737Dqu6bAM4/j3nAAfZDpy5cO+Nijt37r1wa7kry9RKbVtpzjIrN25xaynumbhHoYhbcYAyBc4+7x+IryYqZyCg9+e63uu91N+4IX6c5/49z3Pf5/49x4yZs9I88Dtx4jhXLl8hIeEhrq5uFCtejKpVs057lczu7t27rF61knPnzhEbG4uDowPZvbLTpGlT3nmnlV2XEplMJvbs2c2WzZu5f/8+Gq0GD3cPSpUqRY+evZ77hv7unbv8888/3L59m44dO+Lmnvx7xWQ0MXnyZIyG/+8PVCqVDBn+/wqcd+/eZd2atU9dT6NJ4uy5M8/9wLfWT1OnELBkMY4q6xJNo8lIokbD/gOHHre0stXJEydRKBQULlKYL7+YxJ49u1FbuXdabzQwYNBgRmXiYievs6ioKA4dPEh0THJtES9PL+rWrUuOnDntdo+Uz4ibt26SlJi8pLB0mdJPJcCWWLRoITOmT2PajFlUrPjiHvPTp/3E4UMHGTToPS5f+v9e8U6dO1GuvOV7e3/4fjIrli+z6nk0mUwkaJIICtpJocLPf2lvqU0bN/L5559aPNOWMss259e50hdavBK//TaXTZs28tvv816aSKcwGAyMGjGcqtWqMW5c6ttN/jn3DwqFgrLlyr70GahQviyOSpUNL4ANuLi5cvjvYKvOf5Hx48cmz06nsU1sytbEnLlysX7Dpmdabr2uJJm2QmJiIps2bWT5sqVcuHABV1dXVCoV8fHx5M6dm67dutO1a7dnqlxaIzw8nGVLA1i9ejUJCQ9xc3NDq9Wi0WioXduPPn370qjR21b1pw5YspipU6fQvUdP2nfo+EwroCeXwN67d5dly1aQmJjE/fv3yZkzJ8WKF7Ppa5s9exbz//jdqkFAylLMhQsX41uzpk1x/FdISAidOrbHxdnZqqrAWr2O3HnysHOXZQWwhEhNUmISmzdv5tzZs+h0OvLnL4CTkxNdund9fIwmScOe3bvx9PTEK3t28uTNg6enJ5+OH0fEvXssW77Cbh9q1apWxqg3WFXtPoXeaOC9IUMZaoe31ga9gSlTpqDX6WnVuhUqBxU9unfFRe1s8e9Fg9FAklbLwUN/kytX+ldYF1nf7l27GD36A6ZNn0nFR33PX2baT1M5dOggixcHsHf3XuIfxjNs+LDHg2njo57kadGwQT1ioqKs7gdrMJl4/4MP6JfGDgZpodPpqF+vDokJCajTuMIreSuWAe/c3uzctUeqb4t0ZzKZaNSwAUOGDktTbZUnHQ0OZuKEzzl46HCak/DnKVmiGG4urlat9ILkQpdmBZw+c86mOFJjNpv5ZMzHbN+2DQeVEtULtlA9mUivXrP2jfoMlTVsVnBxcaF79x50796D27dvEx0dhcFgxNPTg4IFC9l1f0CBAgX4ZOw43v/gQ8LDw4mLi0OtVuPt7W3TD2pAwBJ++mkqP02fQeXKVVI9RqFQ4FuzJtVr1ODbr7/C378nS5cux7emr9X3TWEwGAhYsji5sokVFAoFKqWSBQvm2T2ZLlu2LKVLl+ZSWBjOTpbNWCa33FIycOAgu8Yk3lzZXLLh7Z2L7UHbWLZiZaqzzM7ZnGnZ6p1n/v67yd8zdMi7LF68yC4zrfHx8cTHx+Pu6mrTdYwGI1euPL+dmCWuXLmCXpfcI7dEyRJ4eHjQvn0HtmzZjBNpe5sOjypvGwyM/ujjN2oQIGwzb94f+Pfuk+ZEGuCDD0fz9+FDnDl9mn4D+vHw4cPH4waDwcDcOXMpXqI49RvUx8XF5YXXiouLQ2FDwT2TyUhMTIzV56fGycmJpctW0L1bF7Ra7UsT6pTCoO4e7iwJWCaJtHgldu/ehU6nfWGhyeep4etL9hzZ2bxpE126dn3hsUmJSTx8+BDv3N6p/rujo5PVY2EAzGacX7B10RYKhYIffpyCczZn1qxejbNaDebkFpIpz6nRZMRgTP5fsWLFWLhoyRv3GSqtsWyUP39+ypUrT6VKlShSpOhLE+m7d++yccPGp/ZHpoVarcbHx4fKlStTpkwZm35QT548wZQff2DqtOnPTaSfpFQq+WzCRMqUKcuIEfbZ/3D16hXi4uJwsHKpKIBKqeTIEdt75aZm1KgPMJpMFrcG0Bn0uLi4ULlKVRITE9MlNvHmWRKwmLbt2lm8XNvBwQF//96sXLECvV5vcxwPHya3/1JgY/9JBXYbwF941A86f/78eHh4AMkvEVq1ao3OaMBoen7LrRQGY3Kl8ZEjR/Huu+/ZJS7x+gsNDeXMmdO0a9/BovOUSiXtO3YiIGAxwFOFzk4cP0FUVBRHg48ye+ZsDh08hEFvY4uaF0mntYnFixdn5ao1OKnV6I0G9IZn20E+LipqMuLm4c7qNWst6qQghC2OBgdTt149q2pjKBQKGjZsxJHgF49BQ/4NYfbs2QQGBj7V8/lJefLkfm47urQwmkzkz29dlfS0UCqVfP31t+zes4+evfxRqJTEJTwkQZOU/P9JGurWq0/A0mVs3vLnG5dIgyTTr5TZZGbVylWcOX0m3ZLAtFi0aCGtWrdJUyKdQqlUMvrjMcTFxREUFGRR/7zUxMTE4vifCtaWUiiUJCUlpblfsyUavf02kyZ9gdagT3NCrdXrcHF1o1PHrqxZtYYdQTvsHpd481y7dpXgI0fo2KmTVefXrVcflYOK7du32RxLtmzJs2S2PnFmM3ZpFWU2mbl48SLAUwWcFAoFk7//gQ4dO5Go0aQ6mE+pmK83GtDq9Xzw4WiGDB1mc0zizbF58ybq129gVU2CNm3a8s8//3Djxo2n/r5KlSrUb1AfR0dHtFotBw8cRKfXPfc67u4emM3WD8SVKmW6VZwvXrw4W//cRu8+fVE6qNAZDeiNBjQ6LfpHL7CcnJ0ZPPhdNm/5UxJp8UrFxsaSPbv1BTq9vLITFxv7wmPc3NzQJGmIjIjk5ImTqR7Tq5c/CqV1Y+Hk1ZDJ/ZjTW/78+flk7DiCjx5n5649BAau469tQRw/cZI5v85Nc12i15Ek06+QQql4XMH33LlzJCa8+pnLiIgIdu7YQafOXSw+193dnTp+9Thy+AhXr161KQ6lUmGHN+JmFApFui0J69a9BxMmTkKj16HV6zEan53hShmQ64wGsrm4sGr16seV2s+cPvN4oC+Etf4+fJgKFSumuU3ef6lUKt5+uzGHDh60ORYPDw+cnJwwpWG290WUKpVdWnQplAr8e/vToGEDypYr+/S/KRR89dXXT71NT9Rq0BkNaA16HiYl4uLmytBhwzl0+AiDB79rczzizRIZGUEBK3+OPTw88PDw5P79yKf+3kntRMNGDRkxagRVqlahXv16Ty31jox8+viW77RMXuphBZPJRJJGY9Uy17TKkycPH308hiNHjvLtd5MZNPhd/Hv3YfC77zHlp2kc/juYESNH2b3rgBAv4+DoiE73/BdVL6PX61JtgfmkQoULPf5s2rNnD5okzTPHdO7SFZ1ejyGVMebLGIxGlCoVrVu3sfhcazk5OVGoUCHKliuHj48P7u7ur+zemZXsmX7Fqlatyr69+9Dr9Rw5coS3G6ffh1hq/vrrT8qXL0+RIpb34jObzY8TV1tbxmTPngOdXofahtlps8mMi4ttezdfpkePnpQvX54F8+ezbdtfqNVqTEYjJpMJlcoBk9mEq6srffr2o3v3HuTIkYOCBQsReiGU8PBwtmzawntD3sPVLX3jFK+vmNhYctjw9hwge/bsXAwNtTkWpVJJ+/Yd2Lxpk9XXMJlMaHVaOnS0bqb9v/LmzfvC3uApb9Pf/+BDLl0KIzY2FqVSiZdXdkqUKPFG9MAU6cNgSHuhsNQ4OKgwGFIfQLu7u9Om7dMD5PPnz7Nm1RoqV6lM48aNcXVzxd+/D4sWLkTlrLS44J7OoKd6jRpW9ea1lJNaTZs2bdP9PkKkVZ48eThz5rTV59+8eTNNHSmaNm3KxdCLqNVqHj58iHO2pwuWeXp60qlTZzZu3IDSrExzf3aTyYTJbKJ/n/527UgiLCfJ9CvmnM2ZqtWqEnwkmBvXb2A2ma1e3mGNyMhI3nor9dY8LxMdHW23OIoWLUrevHmJiYrG6SVv9p7HhJnmTZrYLabnqVChItNnzGTCg4ls/XMrEffuodFo8PDwoFTp0jRu3OSpAZVSqaRdh3b8Nvc3tDotCQkJkkwLqykVSkw2LOOE/xfGs4e+/foRGLgGB6Xlg3dIHsBXqVoVHx8fu8STVmq1mnLlyr/Se4rXm5eXJw8ePLDqXIPBQGxsLF5eaa+yf/J48jLR06dOExISQqNGjahZqyZ+fn4cP3YMJ0XaX04bTSZMZjMDBw62Kn4hsro2bdoy99c5REZE4G1hm8aEhIfs3BGUpt7Onl6edO7amcKFCqN2Tj3pnTjpC27fvs3xY0dxUDm8NKE2mUzojUYaN2nK+x98aFHswv5kmXcGaNSoEc1bNKdPvz6vNJEGMOj1OFiZvN64dh0ArVaDpwUDgNQoFAr6Dxho9ddvMpvQ6fV2befxMjly5qR37z589PEYPvt8AiNHvU+zZs1TnZnIlSsX7dq1o32H9uTOY59euuLN5JXd65mlnZa6f/++3fZFlihRkooVK6E3PltQ6GWMJiNmYPAg25dUx8XF2b0KsRCWaNCgIfv27kGr1Vp87u5dO/H29qZ48RJpPqd7z+40adoEJycndFodt2/fBmDy9z+SPUeOND+TRpMJnUFP9+49aNSokcWxC/E6KFq0KL6+vmzYsN7ic7du2UKRokWpUqVqmo4vWbLkU4l0UlLSU//u6OjIr3N/o3oNX/QGA1qdLtVn2WQ2o9Fp0RkNNG7ShJ+mTbfqpbawL/kvkAGc1E7UrFXz8QNgNBifebDSi7uHB7Gx1g1A8+bPh08xHy5fvmyXgXmnTp0xGI3oDZZXGdbp9ZQuXZqyZcu+/OAMUr5CeYEmrOMAACAASURBVMqUKfP4z1evXGX9uvU8uG/dTIZ4MzWo34AL589z8z+FitJKp9Oxc0cQTRrbbxXHzFmz8PTyQpdKhd7nMZqSW0/59+7D240b2xzD4UOHmTVjFuvXWT4QEsIe6tWrj6eXFzuCgiw+NzBwDT17+Vs0EFapVPjV8WPEqBFUq16NFi1bAMnLVVetDnz0TD5/7+XjonsGPV27dmPCxEkWxy3E62TAwEGsXLGcs2fPpvmcS2FhLFwwnwH9B1p1z8OHDvPLz79w7969p/7eycmJub/9zqQvv6RAwbeSa3wY9CRpNSRpNegMBhI1SRQvUYLJ3/8giXQmIv8VMpjJZGLdunUsWrCI+Lj4dL9fjeo1OHb0qFVtm/LmzUtsXCxJ2kSy2aGnnbu7O19//Q06Q3Kl3bTS6nWoHBz4bvIPNsfwqsTExBC4JpBzZ88x55c5yUm1lcsDxZslb758vP12Y9auXWPV+bt27cTV1ZUGDRvaLaZ8+fKzek0gHp4e6Az6F7b1SGl/k5JIjxs33ub7m4wm/vnnH4B0q0QsxMsoFAr8/XuzaNECHjy4n+bzgrZvI+ziRbp0eXF/2udxc3OjVetWjz+HzWYzx44eY+pP02n5Tiu0el1ylXqdLrlApl6HRqdFo9fhmd2LcZ9+xsRJX1h1byHSwmg0sn3bNvr360OD+vWoXq0q9evXpU9vf/78c6vFbUfTS/36DRg9+mM++vD9NCXUl8LCGDVyOL16+dOufXuL7xcfH8+B/QdITEhk8cLFXL50+al/d3R0pFOnzvy1LYily5bTf+AgOnbqTKcuXRk4eDBr165n/YZNtG7dRhLpTERhTo++QiLNbty4weKFizGbzbi6utKyVct0n219p2ULOnTsaFEBIJPJhEKhoEe3LgwZMozOXSyvBv48awMDmTDhMxxVDjg6ODx3z1fKW3Wlg4rlK1Zl6lnp/zIajJw8eZKDBw4SH5/80sTJyYkxY8dIASTxUsHBRxjy3rssXBxgURXshIQE3h08kK5dujJwkP33Rt65c5uJEydwYP9+nNXOYDahVChBocBsNmMwGlAoFDhny8aQocMYMMC6N/n/FRoayqoVqwAYPnI4OXPmtMt1ReZz9epVVixfxqVLl0hISMDF1QUfHx96dO9J8RJpXyKdXoxGI6NHf0DIvyH8PGcOOXO+uMfqjqAgJn/3DTNnzrZbFe29e/ayf9/+5Hod7dtRqHAh1qxexf4D+4mJjsHJyYl8+fPRpUtX6tWrn24dMIQAWLZsKb/N/RWj0Ui79h2oUKEiLi4uJCYlEvLvv2xYvw6z2cygwe/Sr1//jA4XSG4ZO+2nqbRq1ZpOXbo+U6T39u1w1gYGsmnjBvr27ccHH462+l7h4eEsW7rscWVv/97++BR7tTVEhH1JMp0JXLhwgXVr12HQJ7+pq1W7Fs2aN0u3+61Yvoz5C+azcNGSNM0wh10M4/ix43h6uTNr5kwOHf7b7pUDN27cwDdff5W898xkwtEhuZCKGTCbTBhMRgxGI2+9VZAZM2ZStlw5u97/VTEYDJw8cZKDBw9SsGBBunT9/0uJVStXkSNHDvLnz0/+/PnJnsPy3qXCMsHBR7hw4QLxcXGo1WpyeXvTuHETPDw8Mjq0Z3wxaSK7du3k5zlz09SPNSEhgdEfvI9zNmfmzVtgl9Ukz3Pnzm2WBgSwfv06YmNjMRgMODs7U7x4CQYOGkSzZs1t7gDwpDWr13A+5DwF3irAwEH2SdBF5nLy5Almz5pJcHAw9erXp3LlKri6upKYmMi5c2fZs3s3VatVY+TIUdSsWStDYzUajXz44fucPXMG/z59adGiJc7OT1fsvXHjOoFr1rB500Zmzpxtl60OKWJjYlkasPTxaqf2HdpTsVJFu11fiLSaPXsWSxYv4qOPP6Fho0ap/t43Go3s37ePqVN+oEvXbnz88ZgMiPRZp06dZPGiRQQFbadM2bLkyZMXpUJB5P1Izp09S/0GDejTpy9+fnVsvtf9+/dZv3Y9jk6O9O3b95XXT7LWhQsX2BG0nQcPHmAyGcmePQd16tbD19c3o0PLUJJM29nt27c5deokcbGxqBwcyJEjB35+dZ7qE5maqAdRbN60mes3rtOvfz8KFSqUbjFqNBr69e2D2Wxm6rTpLxxk37xxkz+3bMVoNHLv3l2aNGtChw4d0yUuvV7Ptm1/Me+P3zl//vzjv1cqlTRq1IgBAwc/7tOd1RkMBhITEx8nbXfv3OX3335/6pjceXIzZOiQx38+d/Ycamc1Hu4euHu44+oqFcKtkZCQwLp1a1m2NIDIyEjKl6+Au7s7Wp2W8Fvh3Lp1kzZt29GnT19KlSqV0eE+Zjab+fKLSezatZOJk76kStXnFz65dvUqk7/79pUk0hnh5ImTnDxxkspVKlO9RvWMDkfY2cEDBxg2bAidu3SlS5euqVbaffDgPuvWrmX5sqXMmDmLxnasCWANg8HAqlUrWRqwhHv3Iqhbty45cuRAp9dz/do1Tp06SePGTRg0+F0qVapk9/snJiSybNky7ty+Q6XKlWjXvp3d72GLq1evcujQQWJjYlCqVOTMmZMmjZuQQ1aVvDZ+/nk2ixYuYNbPv1Cy5Ms/O69cuczI4cMyVUINcO/ePXbv2klUdDRmkwmv7Nlp2LCRRavC0sJkNKHRaHBxTc4PUp7h2rVrU65cuUyTYJvNZrZt+4s/fv+dkJB/UavVGB8t01epVOgNBgoUeItBgwfToX0HnN7ANl2STNvJ338fZsmSxezbuxcfHx88Pb0wGA3cu3uX+Ph4OnXqjL9/bwoVLvzca5jNZm7euEmhwv9PpPfu2UuOHDkoV76cXZcDJyQkMKB/PwA+nziJfPnyPXPMhZDz7Nm9B7PZTGJiApWrVKF7j+52i+FF9Ho98XFxqBwccHd3f+33hty/f5/gI8GEh4cTcS8Ck8lEqdKl6Na9G5D8s/HtN99iMv5/b2q2bNkYM/b/H0AhISFZaul7Rrh27Sr+vXri4eFBp85daJ7KDFLIv/8SGLiGXTt3MHz4CIYNH5FB0T7LbDYzc8Z0Fi1aSIECBejYqTNVqlbFzc2dpKQkLl0KY11gIKdOnaTlO6345ptvX7tE+klms9miJasGgwGtVisvojKxQwcPMnToe4z+aAyt27R56fE7d+zg22++svtsry0OHz7E4cOHiY2NwdHRibx58tC2bTvypvI5a0+JCYmcPXuWWrUzdqY+hdlsZu/evcyf9zvHjx8nm3M2zGYToEChUKDRamjeogUDBw6iQgWZSc/K/v33H7p17cLv8+anKZFOceXKZQYNGMDCRYuoVu3NfjG6c8dODh86DCR3hKlXvx7ly5fP0KTabDYzbuwn/Ll1K2DGydExeSvXf47RGfSgUFCxYiXmL1j4xvW9lmTaRjqdjlGjRnA0+Cit27ShY6fOz7y9OnHiOIGrV3Po0EHGj/+U3n36punacbFxzJw5E7PJjJubG9VrVKda9Wp2Gwg+fPiQcWM/YffuXdT286NDx04ULeqDWq0mLi6WbVu3YTAYePjwIdVrVKNrt252uW9Wp9FocHJySrcE32AwcO/ePZQKJfnyJw++NEkafvnlFxISEuDRE5vNJRtjPvl/Mn3u7DkqVKyQLjG9DlIS6QYNG/Hh6I9eenzohQuMGjmcgQMHZaqEGpKf3fXr17F8+TKuXrmC6VEBsDx58tC5cxe69+hJnjx5MjjKzOHq1assWbyIDRvWJz8/JBeOKliwEP0HDKBDh44vXTkkXo2kpCTq1fXjvSFDLarpsX3bX/z4w/ccPPQ37u7u6Rhh1mIymTDoDTipnV75vc1mM59/9ikbN25AATg5OD7zmWk0GjGYTOgNeiZO+oIePXq+8jiFfYwfNxaNVsNnn0+0+NwpP/6ATqtl+oyZ6RBZ1hF+K5y9e/c+LkqWM2dOho0YlmE1Dh4n0n9uxUnl8NIxr9lsRm80UOENTKglmbaBTqdj1Mjh3Lx5k5mzf3lpVdlTp04y5qPRfPTxGHr37vPS6z+4/4CgoCDCLoY9/jvv3N4MHTbU5tifdOPGDZYtW8rmTRuJi4tDp9OhUCioVq061av5MnDwQDw9besrnZXpdDr+/HMrC+bPIywsDOOjtiPu7u506NiJPn36puuy/CeZjCbi4+OJi49Dq9VSvHjxx/8WHR1N9uyyzzo1ERERdOzQLs2JdIqUhHrkqPczTaGU/0peOZKIs7Pza1/MzpKZ6Fu3bjFu7BiOHz+O2kmNAlA9GgyYIbk4mlKJ2WzGv3cfPv54zGu/AiazW7VqJfPm/cGKlastPndA/7506NCR/v0HpENkWY9Wq2Vt4FpMRhM9e/VEqXp1P9spifSmTRvTNAg3GAxoDXomTJwkCXUWFBMTQ726fsz97Q9KlS5t8flXr16hb29/9u0/iLe3dzpEmH4SEhIICtrOnTt30CQl4e7uTpkyZalTt67VSfCtm7c4sP8AJUuVpFr1akDyOHTvnr1Ur1GdHDly2PNLeK5ff53DnF9+TtMznMJsNqMzGmjatBk/TZuezhFmHpJM2+D990cSdvEis3+Zg6dn2tqznD59io8+/IAvvvwqzXuPHzx4wNHgo5w+fZq3336bmrVqAskz1yuWr6CoT1F8ivlQqGAhi95Am01mwsPDuXTpEpcvXSb8djgVylegSbMmuLm5ScVPICBgCTOmT0Ov14PJjINK9fj7YjSZQAEarZZatWrz07RpL63kKjLGtGk/cfz4MWbN/sXicw8dOsg3X33JwUP2L7wnLHPyxEmCjwRTrnw56jeo/9zjbt26RdcunYiPj3/pQMBgNGA0mWjWvAU/TpkqCXUGatP6HVq1bkNnK1pGbd2ymYCAJezYsStLfXadPnWKo0eDiYuPx8HBgTy5c9O8RUubB8znzp573IO9WvVqtGrdyh7hpskff/zOrJkzLBqEpyTU8+YvsEuBJ/HqbNmymZ9nz2Lp8pVWX2PggH706d2HTp3t1ykmPV27dpWAgCWsX7eO3LlzU7SoD87ZnHkY/5AzZ07j6elFL39/OnfqjJsdVsscDT7Ktr+2gQLKlC5D02ZN8cqefm0h9Xo9frVrotdqcXRwtOhco9FIolbDwUN/kyvXmzEmtl951TdMaGgoO3fsIHDdhjQn0gCVK1fho4/HMGvmDNq1a5+mD5qcOXPS8p2WNHq7ESrl/2eerly5wr1797h37x5H/j4CQA3fGrR8pyWQ/GY6+Egwzs7OaDQaNBoN2bNnp4ZvchGvyPuRLJi/4Kl7hZwPoXHTxllqMJJe5v46h9mzZ+GoUuGkckDh8PT3JOW/ncpZycmTJ+jWtQurVq+RhDqT0el0rF61knGffmbV+X5+dfD09GTL5k1Z5oP+dRUaGkpkZCTh4eHPPSYlkX4Y/xD1o64AL+KgckCpMBG0fTuAJNQZ5M6d24SGhvLznF+tOr9J02b88P1kLl+6lClaZr2IXq9n8+ZNLFmymCuXr1DDtwZeXtnR6/Xs27uXb7/9hpYt36Ff//6UK1feqntUqFiB8PBwjgYf5cTxE3h7e+NbM/0r7hqNRub98TsqhcKi58jBwQGDycRvc+dKMp3FREdHp1ok0BLe3rmJio62U0Tpa+/ePYwcMRy/OnX4YcpUqlR5uhioVqtl544gVq9exbKlS1m2fAW5bfz+qFQqXF1dSUhI4Pz581y/cZ33P3gfR0fLEt20CgrajlarxUlleZqoUqlwVjuzfPkyRo16Px2iy3wkmbZSQMAS3m7cxKolKU2bNeeXn2ezd88eiwqm/LdQUp68eahZqyZXrlwhMiIS4KliQzHRMezds/epc4oUKfI4mc6dOzfuHu4oFAqKFy9O8eLFKepTVGbfgN9+m8vs2bNwcnDE4SVLZ5VKJU4KBZEREZJQZ0Lb/vqLbNmyWT1AUygUdOjYmSUBS7J0Mn3mzBnWrFnNrVs3SUhIwNXVlYIFC9K1a7csUfxHr9Nz9cpVgBdWWR/z8WgexsfjlIZEOoVSqcQR2L59G3Xr1aN9+w72CFlYIDo6hmzZsuHq6mbV+Wq1Gi8vL6JjYuwcmX3pdDqGDxvKxYuhdO3WnVYz2+Dm9vTXHBZ2kcA1a+jWtQvff/9jmgqxpaZZ82ZERUVxKewSiYmJ9gj/pXbu2EFiYiJqC2ezAJwcHAgOPsKNGzde2dYpYTuTyfRMUSpLKRUKzCbTyw/MYPv27WXUyBGMG/8pzVu0TPUYtVpNq9ZtaNHyHb756kt69exhc0JdrXo1KlWuxNkzZ9m1axd169ZNt0QaknuFm02mZyaR0sxsYoUk0+JFHsbHJ/eKnP2zVec7OjrStl17li4NsKn6aL58+R5X4Y6PjycyIhJPr//vbTYYDeTOnRudTofaWY2zszN58+V96hrvvvsurm6WFTQLC7vI0oAATp48QVxcPA6ODuTKmYt33mlFp06d7LKkJSNdu3aV6dN+Ipva+aWJdAqFQoGjyoGIe/eYOnUKkyf/kM5RirQ6dvwodevVt2m2sdHbjZg5YxpJSUlZrjr25k2bWLhwAZcuhdG0WXNq1fbD1cWFhMRELoaG0qtnD0qWLEm//gNo3dq6QfurcPXqVQyP2nGULFky1WMuX77MqVOncHXOZvHqGqVSicIAf/z+uyTTWVRm37Wm0+kYMXwYd+7eYeGiJXg8pxZJiRIlGf/pZ9StW4/x48eCAqueTaVSScdOHbl+/fora/O3avVKMFlWZT+FUqlE7aRm08YNjBg5Kh2iE+nBy9OLqOgom64RFRWF50vqDmW00NBQRo4Yzthx45+bSD9JpVLx+cRJfP3lF/Tr25stW/+yaRzi4OBA1WpVKV2m9FOTa5cuXSI+Lp4qVatYfe3/Cr9166mVsJZSKlVERUVZ3HEjq5Jk2gpXrl5FpXKgfHnrKyfX9qvD5k0b7RaTu7v7M1VMCxQowJBhQ55zRjJLEukTJ44zbdpPnDqZ3C/Tv3cfPDw9MegN3L59mzWBq5k+/Sc6dOjI6I8+zrJVVZcsWYxarU5zIp1CoVCgUqrYtHEj48d/9riHtMhYsbGxFClS1KZrpGzliI2JyVLJ9A8/TGZt4Fp69+nDT9NnpPpMjhg5ii2bN/HVl19w4fx5Ph7zSQZE+nJRUVGgAG9vb9zcU5+9XLxoAWobKu07OTpy5cplzpw5ky69gMXzZc/uRVJSEg8fPnxmpjYttFotsbGxZM/EA/Lvv59MePgtZv8857mJ9JPq1a/PV19/y/hxY/HxKWZV60NnZ+enEmmdTocCBY5O6TOrdTs83KaEwWhMHk+IrKNmrVqMHz+WGzeuU6jQ89u/Ps+dO3cICfmX2rX90iE6+1m4YD6NmzRJUyKdQqVS8dmEiXRs35bdu3fRpElTm+N4svuETqdj6+atxMbGAtgtodZoNNiSAitIfrmp1WqfWVX7OpKNYVaIj4vD09O2RMnD3Z24uHg7RZT+Dh8+xID+/ShdugwbNm5m0pdf0bhJU2rU8KW2nx+dOndm0eIApk2fyT///kP/fn15GJ91vr4UiYmJBK4JRGnlBIeDSoWjgwOBgWvsG5iwmlKhfDyjaa2UCu7KLFQt+4cfJrNh/Xrm/DqXHj17PfflloeHBz17+fPznF9Zs2Y1U6f8+IojTZtatWsxbtw4unZ7fnGqDRs2oLRhCKBQKHB0cGDdurVWX0NYJ1++/JQpU4a//txq1fk7goIoUOAtij3R4SAziY+PZ23gGsaMHZemRDpFvfr1adK0KUsWL7I5hpjoGBbMX8DGjfZ7kf9fyd1ArD9fgQKNRmO/gES6y5s3L40bN2FtYKBV569ft5Y6depm6qX90dHRbN26hc5dLG8Rm7IaNWDJErvHZTKZHifXmzdt5szpM3a5rqurK2asX+ljxoxSqXwjEmmQZNoqjk5OaHU6m66h1elwSqc3w/Z2+PAhhg55jw8+HM3QYcPJkTPnc4+tWKkSM2bMQq1W0y8LJtTBwcGAGQcH6xdtmEwmNm/aZL+ghE2yZ8/OgwcPbLrGg/v3AfDKIi3ili9fxvp165j98xyKFE3brLyPTzFm/zKH1atXs3LFinSO0DpOaidyPuf3T1JSEhqNxvbiYWYzt28/v8CZSD/+/n1YuzbQquXaawPX0MvfP9MuKQwMXEPhIkWsWtHWuUs3tmzZTLSNBZrOnz9PxL0IQv4N4eCBgzZd63ncPTxsWm5vNptfWesfYT+9e/fhz61buHv3rkXnRUZEsHnTRnr3eXm72Iy0adNGSpQoafV2ifYdOnL0aDA3b960a1zOzs707tP78ZbPv/78yy4vo8qUKYvJZP1zbDAaKVzY8lUKWZUk01bw9vYmJjqahw8fWn2N8Fs3yZUr8/fTS0hIYMTwYYwYOYo2bdul6Ry1szM/TpmKSqXiu8nfpXOE9hUV9QAHK6oXPkmpUNo86BH207BRI/bs3oXOhhdg27b9hZ+fH05ZoDif2Wxmwfx5DBs+Is2JdAofn2IMHTaMBQvmZ/r9p/+VlJQEYIdkSkFiwqsp1iSe1qZtW2JiYli31rKVAX/9uZUbN27QqVPndIrMdhs3bqBdu/ZWnVuqVCmKFS/Otm1/2RRDbb/aj5eK7961226zWE+qVas2CitfaJnNZpQOKqpUrfryg0Wm4luzJi1avsOIYUPSnFBHRkYyYvhQ6tdvQL16z291mBncvHHDqh7aKby9vcmVKxe3btk3mQZwzuZML/9eFClahN59e9tlNrhv334YzSarxwEKpZL+AwbaHEdWIcm0FYoWLUrJUqXYumWL1dfYuGEDbays0PkqrV+/jrx589KhYyeLzlM7OzPy/ffZvGnj470cWYHJaJ9qkinLgkXGq1+/AR4eHuzaucOq8/V6PRs3rKd3n752jix97N+/j7i4OJo0bWbV+c2aNScq6gGHDqbPzJW1oh5EER0dndzzPRUpy9htfQlgxoxXJt53+zpzdnZm9uyfmfPLbDZt3JCmc4K2b2PKjz8wY+asTF2nIzIigkI2zNQUKlSYyMhIm+No274t+fLlQ6VSpUv9B3//3mi0WkxWVGY2GA2o1WqaNWtu97hE+vvuu8nUru3HiOFDCfn33xcee+HCeUYOH0rlylWYMvWnTLuiJEViYuJTe5WtkdLaKj24uLrQp28fChQoYJfr+dasSb58+dA95/P2RVK21Vn78jArkmTaSr39e7N+nXXL0a5du8apUyfp0bNXOkRmX8uWBtDRynZApUuXoXjxEqxZvcrOUaUfD08PTGbbEmqz2YyHR+Yd1L1pFAoFPXr2YuWK5VbNTm/dshm12plGjd5Oh+jsb8WK5bRp2w4nJyerzlc7O9O6dRtWrFhu58isZzaZ+Xn2z8yeOZvr166neoyjoyO5c+fGYOOLLJVKRclXVPlYPKt2bT9+nfsbM2dMZ/asmc+d5YqMjGTur3OY/N23zJz1M40aNXrFkVpGp9Ph5GjdMwnJP9+2rK5J4eTkhH9vf3r596JkqdSr4tuiYMGC1KpVG53BskG42Zy8Q9Pfv3e6tvwR6UehUPDtd5Np2bIlI4YPZdCA/vz115/cunWL6OhowsPDCQraznuDBzHk3cE0bNiIH6dMtX1rzivg6upKoo2JsLXFFTPKRx+PwWAyWlRzxmgyojcZeW/I0CxVrNVWmf8nOJNq3aYtsbFxFieKBoOB2bNm0LRZM5ubuKe3kJAQwsPDaWFB5cL/ate+A5s2Z539w1WqVEWj0WA0WT8gVygV1M3kS5beNN2798BJrWbcJ2MsGpDu3bOHmTOmM2HixCzxgQ9w9coVKlS0rW90xcqVuXL1ip0isgMFj7//z5uZBujXrz8orZ/hMJpMaLRaunXrbvU1hO38/OqweHEA4bdu0bVzR8aP/YRVK1ewZfNmVq1cyeefjadTh3ZcCrvI/AWLMn0iDcmF/mJire+BHRsbg4edZt6zuWSjSNEij/98//59Tp44aZdrA3z62Wc4ODqi1aftd63ZbEZvNJA7Tx769utvtzjEq6dUKhk7djwHDh6mTZs2LFq4gK6dO9KqZXO6dOrAH7/NpVnz5hw4eJjPJ0xElUWKehYt6sO5f85Zff6dO3d48OABRQoXefnBVtLr9SxZvIT9+/bbZQa8Zct3mDjpC7QGfZoSaqPJiM5gwN+/N8OGDbf5/llJ1hgdZkJqtZpffpnDb3Pnsj6NlV8NBgMTPvuU+5H3mTTpy3SO0Hb370eSK5e3Tfsv3ir4FpERti9Ne1Vy587N22+/bfXslunRYLx378xdTONN4+rqyuLFAcTFxzFu7Cdp+qDZERTEl19MZOpP0+zSzuJViY+Pt/ntt5urW6YqHqhQKHB/tNrjRdtGOnfpil6vt3qbhcFgoFGjRuTNm9eq84X9VK5ShXnzF7BtWxDFihfn9KlTbN/2J6dOnqBQwUJs2foXixYH4Ovrm9Ghpkn16jXYt3evVecmJDzk+LFjVKte3c5Rwf3I+yxetJgtm7ewasUqEhNtrxdQsmQpli1fiVKlQqPTvXAFn8lkQm804J0nD6tWrZEtFq8JT09PBg4azK5de7gQGsaJk6c5f+Eie/bu5733hpA9e/aMDtEibdu25eaNm5w9e9aq89etDaRevXrkfVQoLD1cv36da1evsXfPXqu2WaSme/ceTJg4iSSdFr3RkGpSbTAa0Rn0JGo09PLvzbjxn9rl3lmJJNM28K1Zkz/+mMfPs2fz65xfiHpBxeDQ0FA++fgjbt++TcDSZVmiWqVWq0XtbFvBJbVajU6ntVNEr0b/AYMwGI1W/TLSGfTUqlWbt956Kx0iE7bw8PBg8eIAtBoN7du2ZtpPU7lx4+klw1qNhs2bNtKvb2++n/wdU6dOo3nzFhkUsXWyZcuG5lExLmslaZJwds5cS7Q8PZIrqb8omfby8qJjp84YzCaLt2voDQYMRiPvvjfEpjiFfRUqXJhx48bzx7z50xSwYAAAIABJREFUrFi5mnnzF/DpZ5/j4+OT0aFZpHefPuwI2k5cXJzF5/65dStFihShWjX7J9Muri68VSD58yo0NJS5v84lPNz2avZly5Zl+YpV5C9QgEStBq1e9/hz1WgyYTAY0BkMJGiSqFCxEqtXrXlhpxCRdSmVStzd3bPMLHRq3Nzdad++PYFrVlt8rlajYcvmTeled+XC+QtAcrEze9aP6NGjJzt27qZr9x6YlQq0ej1Gswmj2YzOaEBn0NPynVZs2LiZ8W9gIg1gW9ligW/NmixavJipU6fQoX1b3n67MQ0bNcLTywu9Xs/du3fZsmkToaEXaN26DTNmzMwSiTQkF/SJt+KD/0nxcfGZuihMamrUqEG9evX5++/DOCqSq3OnhVanQ6FU8snYcekcobCWp6cnq1av4ejRoywNWIJ/zx7kzJkTd3cPtFotkZGR5MmTh969e9OxYyfcstjPLkCePHm4fv0GtWr7WX2NG9evkztP5tqG4p3bG51O99Lfn5MmfcGdO7c5fvQoDqq0Pb96Q/KA4Psff6RKFakkLOyvfPkKlCpVihXLl/HekKFpPi8h4SFrVq9m6NBh6RKXi4sL3Xp04/ix4wRtD8JkND1+cQXJqzWsbRVZtmxZtgft4Pjx48yf9wcHDx5Aq9WiUChwdXWlXes29Os/IMu9GBFvpr79+tOxQzvWBgbSqXPaOgfo9Xo+/+xTChYsRN269dIttnt373HyZPJWjTJly9j9+gULFuTTTz9j9OiPOHo0mOioaEzm5N8V1apXxzOLtA1NLwpzVut/komFhoYSELCEEyeOExcbi8rBgRzZc9CiZUu6deue5Za1RD14QN26fixaEoCPTzGrrjFr5gzuR0Yy59e5do4ufen1eoYNfY/g4GAclaoX7pc1m83o9DpQKlkSsIzKlSu/wkiFLSIiIrh8+RJxcXE4OzuTM2dOypUrn+kri77IqlUrmT9vHstXWlf4z2w206N7V957dwhduna1c3TWM5vNaf7votfrGTLkXY4dPYrCDI4ODqmeazKZkmekTUa+/+FH2qax/Z8Q1jh9+jT9+/Vh8Lvv0TUN+/ITEhL48INRuLm6MW/e/HRvzRcZEcnDhIcUfdRSz2QyMXP6TFBArpy58K3pS6nSthXnMxgMKJXKLFODQognHTt2jMGDBjB02IiXJtR6vZ7Pxo/j/v1IlgQsS/ctDMeOHuP0qdP0G9BPivi9YpJMixca/eEHODo5MeaTsRafq9VoaNumFbN//gU/vzrpEF360uv1vD9qJLt370Lt5IRSocThiWVKyUm0HpQKHB0dmb9gkSTSIsMlJSVRr64fX3/7HTVqWL6fNDj4CJMmTODgocN26VeZUfR6PcuXLWPBgnk8ePAABcl7rxUokvvZqpRotFpq1/Zj6LDhWWbvrcjaTp48wYD+/ejZy5/uPXo+t93OtatX+e67b3B1ceWPefMz5Fm8GHqRlStWPv7zO63eoXoN+y81FyIrOXbsGIMG9qdc+fJ07tKVunXrPfVy6OHDh2zZvIm1awNxc3VlScCyVzaZZjaZUdhQhFNYR5Jp8UKnTp2kX98+rN+wCQ8Ll3FsWL+OwDWr2bbduv6+mcWFCxdYtHABmzdvQqFQ4KBSYTIlz0YXK1acQYMH0+qdVuk+ayBEWk2e/B1Hg4OZ9fMvFrWnSExMZOTwYfjV8WPs2PHpGOGrYzabOXjwAGsDA7lz5zYajQYPD0/Kly9Pz17+FCxYMKNDFG+YkydP8Pnnn3H3zh3eadWKRo0a4+nlhUGv58bNG2xcv55Tp07yTqtWfP31txnWYkar0RJ+O5yIexHExMRQrlw5ChaS50WIiIgIVq5YzoqVK3BQqShcpCjZsjkTHxdPSEgIxYoVo0/fvuk+Nrxx4wbe3t5vVBuqzEiSafFSAwf0JzommukzZuHq6pqmc06cOM6Yjz5i8vff06pV63SO8NWIj4/n8uXLxMfHoVY74+3t/Xg5nBCZSUJCAgMH9MdsNjN12vQ0fdAmJiby0Ycf4OjoyLz5C547Y5aRtFotJ0+c5NzZc/Tp1ydLz5wLERx8hIAlSzh6NJi4uDgcHR3x9vamffsOdO/RM9O3zxTiTafX69m/fx937twhKSkJd3d3ypQpS6VKldL93nGxccz9dS6OTo507dqVAm8VSPd7itRJMi1eSqPRMHjQQBISE5g+Yyauri9uu3PixHE++fgjPv30M7p17/GKohRCPCkloTaajHw8ZizFij2/7sGlsDCmTvkRJyenTJtIQ3LCP/2n6RiNRho2akj9BtLPXQghxJvFZDSxeNFibt68iVqtZsjQIXh6vdlFwDKSJNMiTZKSknh38CAuXgylXfsOdOjYCW9v76eOOXniBGvWrObQwQNMnPgF3XtIIi1ERkpISODLLyaxdesWypevQKfOnSlRoiSurq4kJCQQGhrK2rVrCPn3X9q0acvESV9k2kQ6xZ9b/+T4seM4ODowfMTwN76KqBBCiDdL0PYgjvx9BIDOXTpTtlzZDI7ozSbJtEgzk8nEvr17WbJkEcHBwZQuXQYPT08Mej137twmOjqaTp0649+7N4ULF8nocIUQjzx4cJ+VK1ayZs0q7ty587gydv78+enatRvdunXPMj1ekxKTmD17NpokDSVKlqB7j+5Zuvq6EEIIYYmwsDA2rNtAhYoVaNGyRUaH88aTZFpY5erVq5w+fYq42FgcHZ3ImSsn9erVz/SzWkK86cxmM4mJibi4uGTZJPTE8RNs3bKVxk0aU6du1usUIIQQQtgiLjYOVzdXVE90mREZQ5JpIYQQWc6tW7d46623MjoMIYQQQrzBJJkWQgiR5YWEhKBWq/Hx8cmyM+5CCCGEyFqULz9ECCGEyLwePHjAxvUbWb1qNTqdLqPDEUIIIcQbwiGjAxBCCCFskZSURM5cOcmbNy9qtTqjwxFCCCHEG0KWeQshhHgtGAwGHBzkHbEQQgghXg1JpoUQQgghhBBCCAvJnmkhhBBCCCGEEMJCkkwLIYQQQgghhBAWkmRaCCGEEEIIIYSwkCTTQgghhBBCCCGEhSSZFkIIIYQQQgghLCTJtBBCCCGEEEIIYSFJpoUQQgghhBBCCAtJMi2EEEIIIYQQQlhIkmkhhBBCCCGEEMJCkkwLIYQQQgghhBAWkmRaCCGEEEIIIYSwkCTTQgghhBBCCCGEhSSZFkIIIYQQQgghLCTJtBBCCCGEEEIIYSFJpoUQQgghhBBCCAtJMi2EEEIIIYQQQlhIkmkhhBBCCCGEEMJCkkwLIYQQQgghhBAWkmRaCCGEEEIIIYSwkCTTQgghhBBCCCGEhSSZFkIIIYQQQgghLCTJtBBCCCGEEEIIYSFJpoUQQgghhBBCCAtJMi2EEEIIIYQQQlhIkmkhhBBCCCGEEMJCkkwLIYQQQgghhBAWkmRaCCGEEEIIIYSwkCTTQgghhBBCCCGEhSSZFkIIIYQQQgghLCTJtBBCCCGEEEIIYSFJpoUQQgghhBBCCAtJMi2EEEIIIYQQQlhIkmkhhBBCCCGEEMJCkkwLIYQQQgghhBAWkmRaCCGEEEIIIYSwkCTTQgghhBBCCCGEhSSZFkIIIYQQQgghLCTJtBBCCCGEEEIIYSFJpoUQQgghhBBCCAtJMi2EEEIIIYQQQlhIkmkhhBBCCCGEEMJCkkwLIYQQQgghhBAWkmRaCCGEEEIIIYSwkCTTQgghhBBCCCGEhSSZFkIIIYQQQgghLCTJtBBCCCGEEEIIYSFJpoUQQgghhBBCCAtJMi2EEEIIIYQQQlhIkmkhhBBCCCGEEMJCkkwLIYQQQgghhBAWkmRaCCGEEEIIIYSwkCTTQgghhBBCCCGEhSSZFkIIIYQQQgghLCTJtBBCCCGEEEIIYSFJpoUQQgghhBBCCAtJMi2EEEIIIYQQQlhIkmkhhBBCCCGEEMJCkkwLIYQQQgghhBAWkmRaCCGEEEIIIYSwkCTTQgghhBBCCCGEhRwyOgAhhHiagctBCwi6qMP83GMUeFXqQLd6+VHZeDdtYiIqF5cX/jJ88hjD1Z0EbIukXOdu+Hq/5u8jjRGcWL2EzafuYfCoQY9PulLOKaODEhkrbc9njqqd6eqXx+Y39ml5Ph8dSWKiChcXB8DAtZ2L2X6/PJ261iTXa/2YGok4tpKlm88QYfCipv8YOpR1zOighBDijSHJtBAikzFy98R2tu6NfzRYN5IY9YCHSg+8vZxRAKAiv0N9utiUTOsIWTaGSUdqMntmT/KmOuB+9hjjtb0sXxBKx0ZdXvNk2sTdtZP4aMppvCrXpKSrGYfX+csVafTs85kQ9YAElQfeninPpwOFsjWhs03JtI6QZR8z6Vgdfp3W7cUJse5flo/5nBN+vzOlmzdKjFzft5SFoZ1p0Pn1TqZNtwP56oOpnM1RmRol3TE7KjI6JCGEeKNIMi2EyGTU1Bm/li3jH/3RcIG5vfxZ7DWMJb92t+PAWEPYkcPc0vhadIy61gcsDNThlu91//Vp4Malq2jyt2PCb+OoJpNdAnj2+fyHX7r3Y1nukSyb04XsdruPhrAjf3NL5/fyQ7UXCf77JvrHh6rxfX8Jq3TuvO6PqeHmJa7pCtL+q98ZXUUeUiGEeNVe848ZIcTrzBR/mb93HCAk0kSOwuWpXs+Xwq5PHGCM4uLB/RwPu8NDVU58qtajXqV8qEni+tG9XHhgxqwL41DQQSr5+VHc48lMPfVjimpuE3YhjoLZ8+LlZCbi7F7O6EvToOhDDu08QFiMmoK+zWhSOQ+q2CscO/g3IfeUvOXbjLfL53xiJt1E/OVD7DoYQqQpB4XK+1K3RmEeh2+K4vyBo9xyKUPTGoVT/fqNEWfZf1pHiXqFid67naPXtbj7+PJ2owrkempc/eJ7GSPOsO+MidK+HlzafpiI7D4UUEZz8moCmB7w747tGErXpaaPK5hiuXRoHydCw4lzzEOZOk3wK+7xaAbSSMSZvZw1laaGxyV2HL5HjioNqJX7HsGn9ZRsWJSEgzs5eDEG50I1aNK0CrlVsVw9eoAj/0agKliDxo0rkPPJ5QZJ4Zzaf4hz1+6TpHAjT0lf6tctTQ6HR/d79P1vWCyBwzsPcjHGmfylfalTuzheT17HGMulw7s5ciECvWsBytdrTI2C2Z74FsVx5dBODoZEYs5RmHK+9aj+1A+TsJQp/grHD53gwq0E3ApVwLduNd5yeeIAQxShh/ZzIuwOCQ658KlWn3oV8uD05LNnCGV/0CEq1qlNcfdU3qQlXefYnvNEmcEQuo8dhyrTvE5R4m9fJDSuENnzeqG6f5b9Z4yUalCYuP07OHwpFufCtWjWtCK5FDFcOnKAo+cjcSzqR9NGpfF64jYv/hpMRIUc4NitbJSu50vhbM+Gh/Ee5/adRl+qIQUjd7Lj6E10nsWo0bgh5Z5+SF98L+M9zu49i7lMDVwvBhF8PyfFC8CD49dIwMSDf4II0pWlXs2iZMNIzMVDHDgeRoTek0LlfPGrXujZ572GO2FBf/PAuxr1a+Xkzt9nMJZuSOG4/ew8eIm4bIXxbdaUCt4KYi4d4dCR89x3LErN5o0o/eQ3iURundjP32evEaVR4pa3NL6N6lDCS0XKM3rWmPw78mDQQS7Fu/BWWV/q1CzGU79yjbFcOrSbv0MjMLgVoGL9xlQr8PQzevXYYU6ev0mCWyHK16xH1YJP/kAJIcSrpzCbzc/f9iSEEBnt8cz0x2x+Ymb64dkAJn32C8HawpTzcSX2cgi3XevzwQ9f0rFUNjBcZs2H7zHtlJpSFYrinnSdc/9EkrvTj/w2pjzHfvyAX7edI9ycj5I+lejwxTd0KvpE9mWKIuj7Z49pfW0SrT8JpWPACoaWNrJvQgvGXSxN9djTnFcXIa/hJmERLjQZNRD1xt857pAPj9jLXLzvQbOvA/iqRW6UxHMuYAIT5hxBV6g8Rd1iuBwSjlv90Xz3RRdKZgN0R/mxwxDW5x/D33/0SPVbo93zOa0/OU8pXw2ht3JTrhBcP32WWJ8BTJk1lGqeSkjDvbQ7x9Py02uUrXSHo8djQF2O+tWVXAs5z42kHPgUy0+5bpMZXyuMOWO+YHmokiKli+ASfYkL4SoqvTeVHwdVwR0tuz5twoRr5al45wgnYkBd/WMCup/n3XEXKV0jljPn1RTOa+DmxQhcm75PP/UG5h9zIK9HLFcu3sejxTcs/KYF3krQX17DpyOncEhbkHIlvVFGXyHk4n2y1RrDb7N7UFSlTf7+h5WjZtxpzqsLk98hgrDL0Xg1+YI/JrcmnwpMcceZN/oTFvyjpEi5YnjFhXHmlhstvviNz5vnQxV/lmUTP+XXIzoKlyuKa+xlQsJdqT/6RyZ2LoVzuv+QZ2GPZ6bHsvWJmen404uY+NmvHNcXpZxPNqLDQrjn1ZgPf5hEu+Lqx8/nT6fUlK5QFPeEq5wLiSJf95/4bXRpjjx+9vJTqlhlOn71FR0KPbupwxS1nR8//Jmgc7cw5y+NT+XOzP+mNfsmtGBsaGcWLR9O0QOf03psGGWqx3D6vJrCefTcDIvEo8UH9DKvZeEZNXndorgcFkXO9lNZ9HlDvJRp+BrQcfz79gxfW4DRW3+jW+5Ukn3tHia1HENoaV+SLtwiT7nCmK6d4p+44gyYMYt3q3ik7ful3cnnTT/nZvnyhB85TizZqNi4GqZL/3L+ehI5ixcnb/kefP9pDc7P/IhvVoahKlKaQtmiuHwhAo+6I/n6616Uc330vE+4Qblytwg+EQsufoxb1ZkzfcZyuUx1Yk6dR10kD7obYUR6tGRUbzPr559Gnc+d6EsXicrVnu8DJtDASwn6SwR+MpLph3UULF+CnIoorv4bRpRbIz5bPIU2+Q3sm9CC8ZfL4xtzigvOBcmlv8WlW0nkaf0t875sjrcSTLHHmffRJyz4V8X/2LvvuK6q/4Hjr89gOQABUUFABRXRBARFhgPcO3dpO8ss25nlNnNkmmW5Mvesb2aW5sABbkBA3Iob+ICyN3zW+f3xAUVDw9KGv/N8PPrD271n3fO+3PO5597T0KsR1jkXOKGxoee0JYzvXAdl/nFWjx/HtzF6GrRoiFVWImdv2BL6/mwm9PfA4tH1cEmSpPsTkiRJ/2a6s2LRED/R9tUNIt1Qtq0oWnze1090fHmpOJ1n2mTIiReLnw8SwU8vFGd1QmhjZ4v+/j3E9EOFZQeViLMLh4kOncaIbZlCCJErfh7tL9qOWCtSDb/P1uT3+5TsHS86+w8SC8/qhBAlImJCR9HaN1S89+NVUSqEEEVxYt6AVsK/VRcxbptG6IQQojBKzO7bSgSP/klkCyGKY2aJAf4h4tWlp0S+qfQiJ36BeCm4jXhm0dmyCmWKC4f3iIi4pHs2Tcne8aKzr4/o9Npqcb6orGnOrRCvtfMTg+cdF9oq5lUS/pEI9fURXUavFifTc8WNa6miQJSIyEmhImDgV+KUzlSeXR+FiYDQ18TqU+WNnimOff2s6ODfW3wWVSSEKBG7Pw4RrX07irdWnxDpuWniemrBrXJ2fft/4qqpkcTxuf1FgI+v6Dl2m0g1NZKImdVHBLR9Q/ycLYQQ2eK390NEQN9p4mD2rRMvzi8aLgL9h4olFyq2fzvx+urzwnSm80T83AEiwP95sTpJL4QoFcfn9hMBgc+KRfG5wiCEEIYbYtfHXUVA92nicFGROPZZHxEQ8rJYdqteOeL4wudEh4CnxJKzunu2vySE0J0U3wz0E4GjfhBZ5duKosTsvn4i9JXl4myBaZMhO1YseKatCH72O5F4Kz57iVlHy+OzSJz6eqho32WcCM8T4lbsVYz7e8n7SbzV2k+M2njTdH7L+kXA4G/EWV15nPiKbu/+JK5rTXkd+6yPCPBpJfpODBc3dab89k/pJgJC3hc7CqtWByEMIuv8YbFvb5xIKrlH2Ur2ikmhPqJ1x9fEuvIgLTwtlr8cIoKGfCNO66qYV0m4GN/OR7Tp9KZYfypD5KZeFzeKhCiJmCS6+g8Wi87pTOXZ8aHo5hcq3lp7VpiS0ouMqHniuSA/MXBOrCgR5fHuK7q9tVaczsgVqdfTRFFZOdt0flf8fE1rOiMxs8QAP28R0HOC2H3DFAd5kZNFL79g8eF203nL3vaeCPXvI2YezhG3ovTkfDHc30+8sOyKuB2j7cXotYnC1AK54tjsJ0VA6xFig8YghCgV8XP7irbtRohlJ3NNiehTxW9jOouAnjNEdEmR6drQboRYdbuBRNzXw0W7ts+I5YkyRiVJ+uc8xp/lkCTpcaWN38FejQ2hzw7Hq6Zpm9LGh+FDA1En7iPikh5F9epUI4PYnzcSfjyJfIMFnqPWEbF7Nj3tHm55lLVC6NfbDXMAK0+83C1R1elIv871TO/SVPOkiasKfV42uQYt8dv3kGITxvDhzalhSgEbn2cZEmhG4r49ZYna0TgwjA6+9e+fucqJbi8PNT3NBqyaDuWpjjZcj9zLOV0V8wJQ2BI0YBAtHKxxdK3L7yY4F0ax+2Au9fuM4qnm5Y1uh9/LI+hUK5WIHbFobyUVwoBBT+BgXQeXutXLG4mg/n1wMzUSTZu7Y6mqQ/v+nalraiSaNnVDpc8jJ9cA1CD4ve/4bt7rBJZPKdXrMHeoTTUKyMu7PalK6RDGsCFNME34rIlXgDd2ZJF+0wi60+yJTMa20ws861M2HV3pSNibnzNn6hAaG+LZuVuDbdizDLtVLxu8n3mKALNEIvZevH/7S7+jjdvBPo0dYc8Nw7P89Nu24pkhbVGd3cv+64ay+Ewn5qeN7D6eTIHBiuajNxK5azqdaz6CQilrEfxkT1zMAKxo1qwRapUzHQd0oLYaoAbNPF1RlOSRW2isUh1ASa0mgXQM9aX+fR+NqnDuMYJB5UFazYshgwMxvxzJ/kv6KuYFoMA2aCADmttjXdcFx99NKy8kevdhctz68coQz7IYVmHf5hVeCLUmee8uTt4OUgIHDMLL3pq6LnUwJaWkVvCTdHc1TT+38vKigVqFc9gA2jua3gqs3swTV0UJ+bmFGIEaIe+xeMWXjAywKXvVQ4+umgP2llCQl3+rZArbdgwa5FGWjzUt/JtTU2SRkWmK0b2RKVh3HMbTLazLmqwuXd75nDmTB9FQH8fOPRrsOj/P0NsNhO9zQ2mjPEtExPX7nnpJkqRHSb4zLUnSf4yR/OQUsvXZ/DY2lF0VP15r1KM11CBVY0DdYRjvjjzDtGVfMy58PmobN1oGhtJt8HD6tqr9cC9+tg4V3vNVolIrwaYWtW79XGmGhbkSBAhjPimaLAzZ2xgXtvOOZIS+FGMNzYPlraxPgwYVa2OGs7Mj7ElBo80nv6p5KWtTp969P2BkyNRwo0SJq7s7d+xl7k7D+rA9LY2isk2K2nWpc3dSilo4VHgZWqFSo1TYUOt2I2FmboYSgenlIzXWDtaU/rKGqfMSuJysITU1nULUCOGAqPiGko0DtSrOADY3xwwDBqMAXQqpN6GOW4M7pmur6rUkpB4YM4+Tkq0n+7cP6XxnZ0JfaqCGRgN43rNdpLsZyU/RkK3PYNsHHdhxd3wa7dCkGlAH3o7Pj3fNR23bAO/AULoPeYY+PvZ/ecm731HYYmd3O1WlSolCYUOtW+/+KrG0tEBBCYiq1YFGVS2lkvru7lRcVc68Xj0cOIomRUd+RhXycjalY1+nzr3bxpCJJq0YlUtDGt0Rf5Y0aOAE4Wnc0MMTAEoHHOvcnZICW3u72+krVKgUCqxr1br1VXalpSUWCigpiz+1tQM1i7ewZuIXnLycRKomjYwiUAtB3QoxqqjliEOF7BRmZphhwFghRms71b/j2qJ28iHECYyZ8Wiy9WT++j6dtt8do0bsNKlAo3u1iiRJ0iMlB9OSJP3HKFGplChUbvSZPIGed69ppVBh28AMlBb4jfiGTUOvcfzwYaKjjnDowHpm7T1IypJ1vOnzEIukUlX95l+pQqlUoHLrx8Txvajzu+I/4PeQRQnFRXd++qK0VAvmFlioVRRWOS8lqvvNVVKrUSPQaUvhjufWJWi1oLK1uv3eolJZSXsoUT7IXxxjGtsnPM/USDWtevemR5gHDRs3p0nWUp59P+auiijuvQSTQoVSAcWFpidpFT8AZ0SJUqVEpVDh2ncKH/eq+7t0VLUaPkChpVvxqW5An8nj6fG7+FRTq5EalLam+BxyjeOHDxEVfYRDkWuZse8oN5auZmSLh1+uqve/KtbhARj0Ooxwu38ZDBiwxNxC9UB5Ke8fpKjVILRatMDtT3MJtDodmFtiWfFHv0qSUjxYkJK2bQIvT9mPuX8fevTujEejxjRrksrigR9xpuJK5Pe7RpbFqE5bclfyRozK8hhV49ZvCh/1/H2Mqu3cH6DMkiRJD5ec5i1J0n9O9caNcVakk1bgREtfX3x8ffHx9cat4CibtxwlWWsk79gapk9cyjGlG37dnmbUpPmsWvEGLY1XiY9Pw4AChQIQgnt/hbEq+zxw6WnsUR/SNRQ4eZeV3RcfbzcKojbxS9QDTlk0XufM6VyM5f82pJFwIhm1hxdNzB5eXirHJjSyFyTGxZJXMfvMeBKugovHXU+s/yJj9gG2RWbi8vQsvpr4OsP6dyWwRV2Kr14j12jEaPjjNAAw88C9AaSdPU2G8fbmon2T6NXuWZaluONRX0F6agFO3r632qilWz7RP20hOkl777SlSlVv3Bgn0kkrdr4jPl1yD7N5SwxpeiO5x1YzfdIy4tRu+HUfxuuTvmb10pE00ycSl5CJ8YFiz/RjiniI31P94zo8SGoGLsbFkXc7SNEknOKG2p1mjdUPLy+VI03c7TEmxhOfX2G7MZ2EhOso3Nxp+DAfoRizObQ1kizXYUyfP4GRw5+kU2Bz6uRfJ6lQIIzGP04DbsXojcuXKrQRFOz+mO7tXmJ1mjsezpCeVnRnjLrmcnTTFmI1D3TNfHT8AAAgAElEQVQyJEmSHio5mJYk6T/HrPkABvsriV48icV7L5JdXExa3DpmzFzGvkSobaemWrU8Tu9cxvx527mQVYquSMPxnUe5jiNNmzmiQo2VpTkiKZ7IoydJzq/sxq8q+zxw6fEaMBR/ZRTfTlrIvotZlBSnEb/+Uz5btoeLOJp2019m29wJTF125P7JGbOJXDSLTcdvUpB7hchFk1ieYEPokO7UVVYxr6ow92fgIC9KwucwcdlBruUWkXVxD4vHLSTKLICBvRs/1KlOSnMbbKwEGaejOZ9jAEMeV/YuYNrKBLSihJKSKg6c1I3oNSgIi9jlzFp5mOS8YnIubOfLRbspcGtHSNOW9BvaGlXUIqYs3MPFrGKK0+LY+OlMlu+5CI4P+QX7/wfMmvdnsB9ELZzI0ohL5BQXkXZsDdNnriDymgoHazXVqxVwdudSvpy3g8SsUvRFKcSFR5GCE55edigrxN6+6JMk598nQ7UVluaCpLi9xJxK/pvqoAT0XPl1DpMnLSMq736pGcne+w2zf0wgPT+HK3u+5pOVp3Do/hSdHJRVzKsqzGk1aCCeRTuZO2E5R67nUZh1gT1fj2PpsWqEDOmJ28OcP680x8bGCpF+iqhzORgwkHcpnAVTV3FGLygtLvnjNKAsRgNRH1rMp2uOkpxfTPb5rcxbEkGJewhBHi3pO8QPxZGFTF0SweWcYopSj7HukxmsjLiG0tH6IVZKkiTpwchp3pIk/feo3Bg0/QuKps9g1dhBrNADCkucA57hk4kv0VQNeL3IpA9S+GThRIZv+th0mLUHoe/M5PW2VoAR786dcTrwC3NHx3Huix1M6Xj3V4QsfrfPxw+l+EOYNreImTNX8PHg7zAACktnWg+fzvgXy97PNWZweu9v7HBqzuSXA++TmDNtWuaz7vXuzC42oqrlSfcxXzKmi73p19Kq5FUlZjR98XOm66bxxcp3GPSNHjDDrllX3p43pmzZoof4hKhmKK9+OIDkOYt5sdNizFVKqrl34cVJ71J9wldcuaiBDk5VSEiF85NT+Cz3U2ateIv+X+sBC+oGPMXU8S/SVK2CwdOZXfgpn60Yy9PL9IACS+cAhk2fyPNN5Z/JB6ZqwOAZcymcNpM1HwzkOwOgqIZL0It8OukZ3NWA18tM+TCZCfMnMOxHU79R2TSh0/szeNXXHDDi3SmMege28vnrcfT9ahsT293jK18WPoSGOXFg6yzejO9HdPi4v6cOGMk8vZcd253xGv0iAfcc9KpwbtOS3JUj6T2zBKPajma9xzPnww5la1pXIa8qzsQw83yZWbO0zJiznHf7zccAmDu0oOuYr3i3tzMPN0pr0mHUGHpd/oxvnwtjqbkKZY0mdHvlU962/ogFVy4CraqQjgrnJ6cyK+sTZn03mv5f6kFhiVPAMD6Z8BweahUMncnswml8tup9hn5rABRUcw3i+ZmTGOYuY1SSpH+OXGdakqT/NF2ehuSbpVg5OuNobf776Ta6PNKS0sinJnVc6mF911xkQ3EW2YVqbBys7zlNuSr7/MnSk6dJ4mapFY7OdbA2f7DJQqZ1psvWvHYvIEVTRI16TtiYV7b3X8vrDsZisjSp5JvXxsmx5kNuk7vo8rmpyUBv64xT5RWrOmMp2ZpUimrUo56tRaV9JTXpJqVWjjjVseavNJFkosvVkJx+//hMTUqjUGFNHZe61LxrXGQoziK7yAwb+z/qZwaKs7IoMrPFvuZDjtI/qsP9lK0zfWHgWta80Yj8lFRKatSj3j368l/K6w5GSrJSSM23oLazIzUe6XhTR/5NDZl6W5ycbPhLUWosJktzg+J7xmguqUnplFZzxMlRxqgkSf88OZiWJEn6j7pjMO0pn85I0r/OHYNpTzkdUJIk6TEjf9OTJEmSJEmSJEmSpAckn0xLkiT9Rxnzkjh/vQR798Y4Wv3TpZEk6XeMeSSfu0aJgwceMkglSZIeO3IwLUmSJEmSJEmSJEkPSE7zliRJkiRJkiRJkqQHJAfTkiRJkiRJkiRJkvSA5GBakiRJkiRJkiRJkh6QasqUKVP+6UJIkiT9bQw3if1+Ed+u2czuqBzqtG2Oo+phJKzncvgyfjhmoKGXM9UUDyPN+ygtoggzzJSA/iK7vttIvHDH08mKP5W1MZfTW75lyapN7IlKA3GeXbuSsfX2wE5ZnmURmJn9O36F1V9mz/INxBoa0dy5WtUOubiT5d/HIdyb4WT1qE+Q9Hcz3Izm+0WLWf/zbmLz6xHSrPY/XaQ7GXM5tXkJS1b/xN7odBSGU+zccwOHlo2w/RfGmD5xB8t+iIcqx4ueizu/439xgkbNnJAhJknS/wf/huu1JEnS38RI2qbJvP/5Jo5n6NAaBOqHdhU0cGXfalb8HEuW8WGlWTnt6XW8//RYfskoy0h3iX2rl/NLfDZ/9ouSeRGz+WDaWo4mFVNSrKfwzM+sWL2HKzoALWfWvc0zY38m4xHXrcoMV9m/djm/xGZV+RD95T2sXf4zx7PldzcfO0YNP01+h3k/xZOhK8EgzP7pEv1O3r7ZjJm+nqjkYkpK9OQm/MSKdRFcM4Apxt7imXG/kvMviTFTvGwhIaeq8aLjyp5VrNgSR5UPkSRJ+o9T/9MFkCRJ+vvouX7xCiVO/Zi45CP8/n3321VSeuEIR5JK8X9oKeq4mnCKbJsuzFo6nY5WoMvtzhO91NS2Aigh8ehhkkvaPLQcJemh0idx6aqW+gOmsfADX/59oa3jSsIpsmt1Y/a3n9DeCrQ5nfEZYI6jBZhi7AjJ2qB/uqCSJEnSA5CDaUmSHivG3ESORB7jvCYPM8dmBHYOwcNaCcYszh08TMyVQjBmcjp8J3rPEAIaVb8zAcMNTkQkYGwWgHXiTo7csMenYwjNHS3AmMvFQ5HEnk8hz6wOzYI7E+Rh/bspPsasc0QcjOJKkS0e/kG09axddnNvIO34Xk4UNyI40J3ynI03TxIZn4dLYKCprBjIPn+Q/ccucCNfhZ27HyHtvalrASXXook8l4kRLRcP7OKQTxCB9W/nrU+JY9+pQlzbBtPERlmhTGc4GJNO7dbtaGZXocTGPBIPH+BIYi6o87m4dydmHgG0VF/l/GUrLJxrkB4bwblMgdAmcmjXQbyDgsrKeWe7nYw8jq5pKA0LDrDnQCI5li607toVb0cVuZejOXTkNDdV9WndpTPN7SvOrTeSm3iIA8fOo8kzw9ErmLBgD+7MopjkmN3sP65BZ9+cDh0re/RlJP/SIfYcPEO60Q7XFm0Iae1G9Ur2lP5rDORcOMSBY4nc1Nng2rwNQf6uVMcUb4eORHG1AIwZp9i9U0uz9gE0qHRZ53unQ/EVovdfQDRpR0DDCq8OGNJI2JdAoVsgQY2tASP5l2M4HHuGlKIauLRoS7CfC+VHGG4mEJlgxLN1TRJ3HSHTvin1VenEX8wFZT4X9+1E3SSQ5uIy55OssXKqzs3yGNOfZ/+uQ7QMDsSjZmUxloDBsyNuefvZffAieVZutOnahSdqK8i5eJRDR8+SYdaQgG6heNpWON6QQ+LhSGIv3EBn64ZX6xD8XO+KjOIkosMjOJVhRn3vEPwrDbE8rsQcJu5sEoU1XGkR0I5WLlV7zUKSJOlxJAfTkiQ9JoxkHPiKsVPWcV7ZAE+3auRc+o4FC314dc5cXmqRTtyv37PvUjHG4gR+W5/OtaEtad2o+p2DYf1Jfpg6nmvNfUiNiiEXS/x0m/i66yUWjZnC+vNKGng2oFr2RVZ8swDvkXOYPcKXmuXHZ0Tw6fMbyK7VFGdxjWVfzsHj+bl8+UYbbJVaTq6fwqSU51nfxp1GZeNJ3dkfmTXhLP3XBuBhbeTy9+/y+pw4LD2fwK1mMdfXLeArx0HMXDoGr3Ph/HQkGaNBcOyn9ZTSlLYVBtMK/Qk2TFqE+s1NLHymPqYsDFzZNJ2PNtRn6s8daFaxviKDhG0b2X2hEGPBWcI35HKlTyPq5H3NpFX1mRb0EWL3jxxNNmAU0WzeUApNA/Gwvqv59Wf48dPxJHq2Ju/4WSwa1EWfdIHv1uznzZcs+XVpDOp6Ncm5dIHFK7szdfV0ujkqwZjOwa8+YNr68ygaeOJWLZtLy75msc9IPpv7Ct41AUMKuz4ZzbTfMrBv2hj7wo2sXFUbu1K4fRufz8k1E5m48Cha1xY0rJHDpm+/YGn795gxZTBNKh1YSf8Jxpvsn/c+n25MRNXAE1erLDYsmIN1yJtMmzacpunH2Pr9Xi6XGCiO/4WNaU/wlG9rGlgpHyid5uYFRH07np/cpvDTnN7UKjtcG7+KyR/vo81X7QlqnEfCqvFMWhSDruETNLTK4vtFX7I07ANmTO6PhwXoT2zk00nXad48majYXLDyoWuAgfMXCjEWnmbX+iyuDWiCTepXTNrUlM8C36O0Yoxt1KLwaotHzbvaoSzGLjXzJyf+LBYN6qC9nsjSdQd561nB5mXHsahXk+yLF1i89klmrZlIB1slxpuRfPXBFL5PVNPA0w2rrHUsmmND8OjpTBn+BNUBQ/J2Phk9lZ2ZDjRtYkfh2oUsrW2DDpvb+ecfZ/X4cXwbo6dBi4ZYZW3g2y+XEPr+bCb098DiUfYBSZKkfyshSZL0GDBk7hDjOrUSXV5bKU7nlW+LFgueCxYBfWaK6CIhhCgRkZNCRcDAr8Qp3T0SKgkX49v5iNahb4i1J9JF7o1rIjUvU+z6KEwEhL4mVp+6lbg49vWzooN/b/FZVJEQokTs/jhEtPbtKEavOSsKhBDCkCuOfzNchPj3F18d1wohisSuMUEiYNgScUlfIcuISaKr/2Cx6JxOCG2smNvXT/SZdkgUlv//swvEc8Fh4uOtmUIIIfJ+ekME+o8Q61MNph2KdoiPgv3Es99eFHqDRvzwWlsR+NwycaU8D90psWCQn+gxOfJWmncqFUc/7SoC+n4u4rVCCKEXl759WgQEjxG7ioQQIlf8PNpftB2xVpRn+ft22ysmhfqI1mFviU1XS4UQQhTHzxGD/bxFm64fiu0aU4MXRs8UA/wCxFs/ZQshDCJr51jRza+jeGPlKZFvaliRGTNfvBjiJ56cGSWKhEFk7fhQdPXvLD7YfE2UCiGEIV0cnN5fBPj4iWcWXTDlFTNLDPAPEa8uvZ1OTvwC8VJwG/HMorOmfXaOER38nxbfXdQL6b/CdP67+YWKt9aWxZXQi4yoeeK5ID8xcE6sKBFCiJIIMTXMTzy94KyoPLSrko5OJC5+SrQNHi023yzv6EXi0NSuIqDvHHFcK0Rx9EzR3z9EvLaiPA2DyI6dL54PDBAvfJcohBCiJPwjEerrK7q9tVaczsgVqdfTRFFZjLV98gtxQiuEEDpx9ptBIiB0vIgoEeJWjL26QaT/QYy16fyu+Pma1lS6mFligJ+3COg5Qey+Yap5XuRk0csvWHy4vVAIQ6bYOTZMBIS9LtadNZVY6DNE9Lzhor1/P/FFbIlpnw9DRUCX98Wv103p6m9EiE/7+Ql//+Fi2SW9EKJIxMzqIwLajRCrytMxZIu4r4eLdm2fEcsTdeJe1zdJkqTHmfwAmSRJj4WiqHAO57rQe9QwvMqe6CjtWvPiiM7Ype5lZ6z2AVJTYBsykIFPOGDt6Epdoth9MJf6fUbxVPNbieP38gg61UolYkcs5akrnbrxwlBP07RRpTXezw4nyOo6ByPOVTHr6lS3gsyYzfywK57kfAMWnq+z6uAeZvSy++PjlXXo1LMtFmfC2XVZD4D25Hb2XK1Dx14BPNoJmUpqBQ+gl5s5AJZNW9DIUoVjxwGE1TNNhKrm6YmLSk9+Ti4GCokOP0SOS19GDmtOjbI07PxH8FJnO1L3bidOW0zMnsPkuvbm+V6umAMoHQh8ZTgBtyqjJX77HlJswhg+/HY6Nj7PMiTQjMR9ex5praVHqZDo3YfJcevHK0PK4goV9m1e4YVQa5L37uJklUK7KumoadizJy31xwjfewMjQOERdu7PwqN7b5qbaYnbvhdNrc4883R5GkpsWz3P4AAlZ/ftu52dwpbAAYPwsremrksdHt7ECCW1gp+ku6vpxRErLy8aqFU4hw2gvaMpxqo388RVUUJ+biHGwij2HMrFte9rDPYsm9atsqf1Ky/S0TqZiF0JaItj2HckjwZ9XqC7iyldlWM7Xh7qa4o3AG0cO/dosOv8PEPL01Ha4vvcUNoozxIRcf2h1VCSJOm/RE7zliTpMWAgQ5NGsdKVRu53fnrIvFEjnNnGjRtFUOVbWiUOderdukAaMjXcKFHi6u5+54eNzN1pWB+2p6VRVH5kPVdcKr4OXK0+LrUhOi0N8PjjrNWNGfrBSM5MWcbCseEsUNvg5h1I+x5DGNavFQ5/eNVWYtexFyFffsTened5qXEj4n/bg8alGz19HvVETAW2te25VX2lCrVSgU2tWren0qvNMVeCEAIMmWjSilG5utPozoalYUNn2JbGjfwM8tKKUTq54FJhH6WtB+6OSrIAjPmkaLIwZG9jXNjOO0ok9KUYa2geUX2lR668j7g0vKuPWNKggROEp3FDD/zR8nZVTEfl3JVu/ov5Inw3mkHPUvPADg4VNOe5Hh6ojTloUrMwZP7K2I7b70he6EsRtSr0M6UDjnUeypp7d1Fga293u7oKFSqFAusKMaa0tMRCASVC3Lp2uTS869pl2RC3erD7xg1KM9JJK1ZQx9m1wk2hEjs3V2wV5wEw5qegydaT+ev7dNpecc0rI/pSI3aaVKDeI6ivJEnSv5scTEuS9FhQq9UgdGhL4Y4vTpWUokWFrdWDDSSVqgoTd9Rq1Ah0v08crRZUtla33xc0GNBXTEgIjEawsCjfQwECRMWP++i06G7njK3/K3y1ZSjX4o9wNCqKqIP7+X76Xg4nL2HV275/XPiawfTo6MDu3Ts5/ZwfOyMy8BjYE8+/4RPHSuWD/FlRYzptpWi5s2VLtVpQ1cLK0pwSc8BgvHPZL1FKSWnZFqUKpVKByq0fE8f3os5dc64Uqlp/pirSv0JZH9Fq0VLxHXmBVqcDc0ssqzRmrWI6yjqE9WzLgqnh7L7WHfudR9D5jqarq+p2P2vQj4njKuln6oozR5SoHtHcP8WDxFjZtUuru7PWCC2maluiNjfHHEFpaTFGKnxQ0Xg75pQqJSqFGrd+U/ioZ93ffXRRbef+p+sjSZL0XyaneUuS9BhQ4djEAwdxgfjYvArbjWQeT+Aarri7//mRpMqxCY3sBYlxsdyRemY8CVfBxeP2Ux/DtXOcK7y9jyE1npOpahp5NgaUqM1UUFRA4a2RoYHUq0m3nmwb82JY98kElseocPPvxtA3JvHF2pW85m3kalw8aQZQKBWAEXHP9Wit8O/dGSdNJLt+2MPRguZ06eH+F349VaBQAEL86XWsK6VypIm7PcYL8cTd0bAZHE+4Cq7uNLKwx7NJPYznY4iusIC3QXOa85nlpalOY4/6kK6hwMkbH19f03/ebhREbeKXKDkF9T+rvI8kxhOfX2G7MZ2EhOso3NxpWJWO/QDp2HboRTvrc0T8vIGdMdC6RxcclWDqZ85wM40i54r9zJXcIz/yy7G/MgPi0cTYrWtXfCx3Vvs4J5LA1b0R5vbNaFwPrp87R/GtPQzcvHSFW0uyV2+ChzOkpxXh5O17q+4tXXM5umkLsZo7fkKUJEn6f0MOpiVJeiyY+w9igFcJ4XPHseLgNXKLsri4ZwETFx5FHTCYXh5/YSKOuT8DB3lREj6HicsOci23iKyLe1g8biFRZgEM7N341kBVZO1k/rQNHNfkkXNpDwsmLeekXVeGdK0DqGns1Rh18g5W/RBLcuplYrfOZ+YP529lpaxWnfzTO1kxbx47z2dRqisiNX4nUdegjqcXjipQW1lhLpJIiDjKqaR8KhtTmz/Rmy4NU/hl+S4KfXrQ1eWvTDlVY2VpjkiKJ/LoSZLz7zmKf0DmtBo8GK+SXcwbv4xD13IpyrrI3oXjWXLUjIBBffBQm9Ny4GC8tXtYOGMj8Wn55F3bz3fTV3Pq1ruyZngNGIq/MopvJy1k38UsSorTiF//KZ8t28NFHB9SeaW/nzmtBg3Es2gncycs58j1PAqzLrDn63EsPVaNkCE9catS136AdKoH0iPUgXMb1xJvEUKPULuymyUzvPoPxU9xhCWTFhN5KYeSolRi10xl9ooIritr/4V63o6xfdEnSc7/4yOqxNyfAQM9Kd75OZNXHCYpr5CsC+EsGL+E2GrtGdSzASpzbwYM8qZox1ymfx9PemEeVyMXM23ViduzbMya03eIH4ojC5m6JILLOcUUpR5j3SczWBlxDaXj3Z/3lyRJ+v9BTvOWJOnxYObJ859/hm7abFa/24+FesDMDs8u7zJ3zED+0lgSM5q++DnTddP4YuU7DPpGD5hh16wrb88bQ39XFZTddqpbDaGv2MDbfT6jyKjGtvmTTPrmfTqWre1cv+9oXo2ZxPJ5L9N/jgKLOq15+uUhFM2LMWWl9uK5qWNImbyAyU/9iAFAbY176DtMG90WK8Do04kw5/1s/fwN4s7NY+vHlRRZ3Zju3Zuz+uvLtOnRuezJ2p9lgXfnzjgd+IW5o+M498UOpnR8OO9fmzV9kVmztEz/fCXvP/k1BsDMrhmd3/mS9wa6mt4NdR/Op7Py+WTGfEb2mI1Q1sS9Rz/CXH4guSwdldsQps0tYubMFXw8+DsMgMLSmdbDpzP+Rc+HUlbpn2Hm+TKzZmmZMWc57/abjwEwd2hB1zFf8W5v5z98XfrB07GiVa8u1P9pDSUdehNUYYkqVYOhTJtbyIyZqxg76FtTP6vmSuALMxn/TBW+iXBPFnh3CqPega18/nocfb/axsR2DyPGzPB8aQ7TSz/hi+VvMWC+HjDHvnl33v/yA3o5mWrdaPgMpudN5rMvX6LnLIGqRhN69O1Iyv9ulqWjosHQmcwunMZnq95n6LcGQEE11yCenzmJYe5qqPCyiiRJ0v8XCiHEQ521J0mS9E8zFmehSc3HvLYTjjUf8ovCxmKyNKnkm9fGybEm90pdl5PCDb0dTg5WlU4BMhSmkZQODvXrUqPSnzV15KcmkZYPNeu6UNf6rpwMxWRnFaK2daDyKhpIWfcaTy13YMrmmXR6CA+ODMVZZBeqsXGwvme9/zwjJVkaUvPNqO1UhxqVZWAoJD05jcJqTrjWrrxdQUeeJombpVY4OtfB2lxOwHp8GCnJSiE134Lazo73iJu/Kx0duZok0kur4ejs+ND6maE4i+wiM2zs731t+dOMxWSlpJJvURtnx5qVPk0xFN4kOa2Y6k4uONy9Tnc5XS6pSemUVnPEydEaGWKSJP1/JgfTkiRJjxU9JcVGFNn7mT1qHMdDFrJujD+W/3SxJEmSJEmSHjNymrckSdLjxHCDH9/ox1fxetTOffj0hVZyIC1JkiRJkvQIyCfTkiRJjxUDGaciiU02x71tIB62j2KtW0mSJEmSJEkOpiVJkiRJkiRJkiTpAcnPRkiSJEmSJEmSJEnSA5KDaUmSJEmSJEmSJEl6QHIwLUmS9FjRcyX8O5Z8H02m8cGOLC0qKlst+x+mv8KeZQv5ISqdB6tCKUVF/4oaSNKd9InsXLKQTceyHvDAf0uf1nN19zKWbIwi4796XZEkSXoE5GBakiTpsWLgWuQaVm6JI7vKN71azqx7m2fG/vzAN8qPhOEq+9ctZ/OxzKoPprWn2fDOU4z7OeMBB+CS9DfQXWbf6uX8cjyn6sdoT7P+7aeY+Gv2v6BPG7gWuZYVPx17gGuEljPr3uKZcb+S889XQJIk6ZGQS2NJkiT9v1dC4tHDJJe0+acLYmLRljdXbUJXs17V/0iVXuDo4SRKWz/KgknS36j0AlFHktAF/dMFAbCgzdur+V5bk3pVDsoSEo8eIVn7r6iAJEnSIyEH05IkPd6MuVw8FEns+RTyzOrQLLgzQR7WZdNyDNw8EUGCzpOO7oUc3n2QCzmWOHm2ITjQg3uuKmW4wcnI4+iahtGo8CB7D14gx9KZpm1CaOthS8XDjPmXOBJ+gDPpRuzcWuDfrg1u1QFjBqcjY8mo7Ue7Fg6m8hhucjIyjgyHVrRv6WhKx5jJmf3HyKoXQFBT20qnExUnR7M34jganT1eHUOpbImG4pQ4DhxK4FpmMYrqdWkc0JHgpnaoKeZadATnMgVCm8ihXQfxDgrCw1oJFJMSt58jJ66SWaygep0mtO4YQlO7e/zpKGsXbZP2uGVFsCvqGqU1G9K6UxgtapvddVoSORJ5jPOaPMwcmxHYOaQsT8CQR2riOfJcalHH1hxx8wT7j+toEupO0cFwDl3IxdLZE/+QsqW/iq8Rs+8sWQJ0Fw8QftCHwKDGWIssLhzcz7HEVApU9jRq1Y523vWwuMdplf5GhhwSD0cSe+EGOls3vFqH4Odavfx/cvNEBCcMnnRoWMDBXQe5mF+N+l5tCA5wx/oec+oMN0+wP8FA044NKTiwi0OJ+VR38cI/JIBGdxxkJP9yDIdjz5BSVAOXFm0J9nOhGmDMOMWB2HQc/DrQ3KHsKpF2nIjj2dT1v73NmH6S/fH5uAQG4V6zstIUkxwVTuTJDMxcvAlqXVlUFpEca4qvrBIlNep60iY0mMZ39Wn9+UjCD/nQLdjDdFRyrCmWs0pQ1qhH04BQgprced2p0Cq3rnHtXdLZuyuaJK017m06076FA3dE5R+ck3zNBc7nuVKrri3m4gYnIxMweIbSIH8/uw9eJL96fbxat6O1uzXKitcV/Xn27zpEy+BAPGoayTp/iAPHLnCjQI29uz/t2j+Bo3nl51SSJOnfTjVlypQp/3QhJEmSHgVj+n6+Hj2KqRujSSsu5kb8djas+IFYZUs6tKqHBTqOffMq4389z5n1S9h8JoucK4fYsn4tv11xIjSsCTUru3HXxbBw5Ef8duEMG779kbPZOVw5tJmNa7dy1bkToU1qoi79hbUAACAASURBVAQKTqxh3BsfsTIqHX1pGnHbVrP61wvY+IbQzL6UA5+PZNoBG7r38cFaCYarGxg7cgbfJ9rStW8rbJVgSP4f41+dT4rvcLp43D0MNKDZOZWRb3zJ7suFlKQfY8vyzZzLzyPb3IcB/f2xU+q49L8xjHh7AQeSitEW3OD8/s1sXPMTp2za081LTczqL/g5Lo28gjzSU/KoFRCGV43LbBrzIu8tjCS5WEd+2hkO/ryedZtPYtOuB81rVdIwZe2y5cxpNq/by02hJeng93y3+gC6J8Lwc7JEgZGMA1/y1qjJbIxJpbgojYQd61j5v2MoW3bEt67FrXS2W3ShfxsHDNFfM+rjX7lwej1LN50hO+cyhzevY/3WKziFheFONGvn/cTxtFzy826iybejTTtLwt97nrHrEsg16Mi5uJ8fv1vOrswmdA52w0rxCDqcVCXGm5F8+cYopn1/jLSiQpKPbmHNqq2cs2pBcMs6mJfF5YStFzizbglbzmWSfmYPmzdsYJfGjc4dPaheyfnTRX/NqI+3knh6HUs3nyPr5mn2/bSB73ek4NalI+7VFUAeCavG8ObHyzicpqU4JYatq1ay47IdviHNqFUSydzXpnDQphe9fKxRoufimnd5c/YPXHXsTc+W1igxcHn9e4z+Lpu2w0NxM7urIIZkdk5+ibfm7+FqQSEph39g5a8XKMjJxsJvMP1b1QLdRX784AXeX3yAlBId+amn2f/zOtZvSaRO5840rtCnC/LS0RTY0y+sMZd++IAR7yziUHIJ2oJUzkZsZuPan7lUtzNhTWtW8mNb2TXu5zOc/nEdERmgvbafH75dzRGjD6H+ph+XqnpOxv9mQacBbXAwxLBw5Mf8dv40G779ifOZNziz5ye+37gdTYMudGhovH1dyc8jXZOPfWA7LHa8x0sfrONUjhFt9nn2f7+UVeH5eHYLwkX+yiVJ0n+RkCRJehwZMsWuj8JEQOhrYvWpvFvbjn39rOjg31t8FlUkhCgRERM6ita+7cTrq8+LQiGEEHkifu4AEeD/vFidpK887ZK9YlKoj2jdfqRYe950lMiLE/MG+om2L6wUSXohRFG0+Lyvn+j48lJxujz7nHix+PkgEfz0QnFWZxApa18WQW1HiU3pBiGEQdzY+IoIDmgjgitsS10/QgS3e1f8llNZFXeIcWF+ovu7m8X1UiGEMIj0/Z+Kwf7eIuDpRSJRJ4TI3io+bOcn+k87ILINZQcWnRNLhvuLgKcWmvYRueLn0f6i7Yi1IrVsn+xt74lQ/z5i+oFsUX5Y8blF4rnWfmL4ogv3b5eOr4l154pu5bXqlRDRdsA8kaAtK3OnVqLLaytvt0tmtFjwXLAI6DNTRBeVp+MnnvrmrNAJIUr2jhedfX1E2GurxYWy5s6PmyOG+vuJl1cmlbX/T+Kt1n5i5NpUYRBCaGNni/7+PcT0Q4XlhRNnFw4THTqNEdsyKy++9DcwZIqdY8NEQNjrYt3ZAtM2fYaInjdctPfvJ76ILRG347K9GL02UZh6Uq44NvtJEdB6hNigMVSadHk/6TRqjUgs6355MZ+JQf7+YuQ6jRBCiOLomaK/f4h4bcVZYcrdILJj54vnAwPEC98lCmFIFutebi1C3vhRZBiEEPpkse6l1iIooI3o8PbPIlsIIfTXxKrnW4uu43eLwt+VwiCydowRXf27iA+3XBdaIYTQ3xCRU/uKAB8/8dzSS0KI2/E183DOrfgqOjlfDPf3Ey8su1JWeFOfHrXxpmmfslgeMP2wyLl9kFgw1E+0fXaZuFLp5aq8LTuIN9aeL2vLQnHmu5dExzZDxKJTugc6JwGDvxFnTUFpivUOr4l1txo7Wszp7ycCR6wru46UXVde3SDSTUEp5vb1E/1mHL3VbsUn54vhQV3EpF25lZ5TSZKkfzv5ATJJkh5PhVHsPphL/T6jeKp52TxMpR1+L4+gU61UInbEoi3bVekQxrAhTagGQE28AryxI4v0m/f7ao4S+7DhDGpiOoqazWnT0g6RdZN0I2jjd7BXY0Pos8PxKs/exofhQwNRJ+4j4pKROsFBNDGcJDqmACjgWNRZ7Lv0ppXyNLHxJWDM4vCh05j5daStze9LUByzhyN5rvR8sRcu5qYyOQS/ytNtq93eqUY73l66grmvB2FbdsXX6y1wqF0dCvLIq2z2KVAj5D0WL/+S14LKp5br0VnYYzos7z7tosKp+0sMampl+qdVUwYPC8XmegQRZ3UURYVzONeF3qOG3W4Xu9a8OKIzdql72RmrrTxZpQMdhw2hcVnVajRvyxO1IOvmzUp3V1SvTjUyiP15I+HHk8g3WOA5ah0Ru2fT0+4+xZcercIo9hzKxbXvawz2LJtCrLKn9Ssv0tE6mYhdCbfiUmHbjkGDPDD1JGta+Denpsgi436fqVfYEjx4MB5l3a/mE63xqinIzsgAtMRt34umVmeeedoTU+5KbFs9z+AAJWf37QNlXYKCmmI4GUVcIRhzY4i5UIfOPVtiOHmMhGIw3jzEkfPV8O/Qhmq/K0AxMXuOkOvWm+d6upimUascCXn1KXwrTGWuEfIei1d8ycgAm9vxVc0Be0soyMuvvG5lsTzntQBsymNZVw372laIgjwK7tMsyvo9eGlwk7K2rEazp4bQ1vwS+yMvPtA5uauxsWk3hIG3Grsl/s1rYsxKr3wlAUV1qltBRvQm/hd+nOR8A5Yt3mTtoV1M7WJ978JLkiT9i8l3piVJeiwZMjXcKFHi6u5+53uB5u40rA/b09IoKt9m40Ctii8cmptjhgGD8R4jTQAU2NjbcedhajAYMAoj+ckpZOuz+W1sKLsqTkk16tEaapCqMaBq145A94VsjjpOSUcjMSfVeH84FI+krfx47BSlrW9wOEGBz/vB/H4sbSAjJZUihRMurhVqqLTFvZEjqsyyf6utcbAuZeuaiXx14jLJGg1p6YWmq7+DQNyjimrr2liXbmHdpHmcvJyERpNKRiGoETiI+//I4Nyg4R1/XMzqO1Ob3aRoSsnQpFGsdKWR+51zY80bNcKZbdy4UUSlFDbY21VsbTPMzMBoNFRe/sbDeHfkGaYt+5px4fNR27jRMjCUboOH07dVbfnH7x9SHpcuDe+KS8uGuNWD3Tdu3FpGSVHLEYcKp1xhZoYZBoz3i0ulHY617zgIMzUYDAKM+WhSszBk/srYjtvvOEzoSxG1NICK+u2CaLRoM1EJpQSXRnPa0pePBzpzedtWjp0ppfnVg5wxb824gEpeljZkoEkrRlm3Pi4VOpnSzg0XWwXny/6ttnagZvEW1kz8gpOXk0jVpJFRBGohqHvvoMS+ZjG/rprIlycvkaLRkGY6COqJSr+VUE5V351GFd9LNq9HvdoQnZLyQOfkTkrsHGtXuAYqUJuCkkpPkboxQz8YyZkpy1jw4S6+Udvi5hNIhx5DGdbPB/t7faNCkiTpX0zeT0iS9HhSq1Ej0GlLgeoV/kcJWi2obK1uf4hKofhT6wQqFPe6+1OiUilRqNzoM3kCPevelbpChW0DM1A3JDjIjZXhRzl5QnC8pAUv+DWk8XkXlhyI4czhaxw3evN6cK1KyqfAwsIcBYa7blwF2hLtrRtrY9o2Jj8/mf3qVvTq05NQD3c8vJqS9d3TjIm5V82MpP02gZcmR6Bu1YdePcMY0siDZk2zWf70e8Tet1UEJcVFd97Yl5SgxRxLCyVqtRqEjt+fllK0qLC1uteLkwru2dyVUdriN+IbNg29xvHDh4mOOsKhA+uZtfcgKUvW8aaPfEHzH1EWl1qdFio+1xVadDowt7S8PThTqe7xUa37UaK8152NUoVSqUDVoB8Tx/Wizt1hqTZNWVA3bEeg63L2RZ8kofQ4uhav4dvYhVZ1lxEXc4bGF06g9BtLQGUPUxXmmJuDKC2lxEiFj6VVHGAaSds2gZen7Mfcvw89enfGo1FjmjVJZfHAjzhzj2GxMW0rk1+YwkFzf3r26k2YhzuNmzchdcEAxp/hvoNpDHr0RiosiGpAbwALC8sHOyd3N+k9G7vSvbH1f4WvtgzlWvxhjhyN4uiBSDZ8uo/oG9+xfFQL7n79XJIk6d9OTvOWJOmxpHJsQiN7QWJcLBUnJRsz40m4Ci4e7o/0xq1648Y4K9JJK3Cipa8vPr6++Ph641ZwlM1bjpKsBVDTJCSQumkxbNkSS4aHP342Zni0aYXdtSOs2RaL9okOBNlXdqlWYufZhLriPLFRWbfXoTVoOHO+fK1lI9kHtrI/y4WhsxYwbtSz9OsSRPO6RVy7loMwGjE911WgUACi7OmWMZtDv0aQ5TKM6d9MZOQz/ekc9AR1i65yPdeIwXi/J9NGrp85Td7tApF2/AQpqsZ4NrXEsYkHDuIC8bF3nBUyjydwDVfc3f/sWTH9ICIwAkbyjq1h+sSlHFO64dftaUZNms+qFW/Q0niV+Pg0Kn+eLT1qt+IyPpaKk5mN6cc5kQSu7o0e4a/81Wns4Qw30yhy9i6LSV98vF3JPfIjvxzTmHZTNyU4sB6pMT/xW1w2Tfz9sTb3pHUrG64cXc7OOIFvhyAqnZistMezST3EtXOcK7692XDjIleyy4a7xmwObY0ky3UY0+dPYOTwJ+kU2Jw6+ddJKhSIW/FV1qeFwBTL2ziQ7cZTs75m3Khn6NclEK86+SQlFYLRcN/BtCExjrjbQYlBc5zTaWrcmzV+hOfkzuuKMTeGtZ9MZEWsGjf/7jw1ejJfrv+OEV56LsYmVD41XJIk6V9ODqYlSXo8mfszcJAXJeFzmLjsINdyi8i6uIfF4xYSZRbAwN6NH+nUHLPmAxjsryR68SQW771IdnExaXHrmDFzGfsSobadsmy/drRxuMLu8MvU92tNXRWYtwjAx+o0Bw4X4NW+HY73uFKbew9ioLeWvV9P54e4NAryrnHg2+msPXnrbXAsbK2xEhmciT5HjgEM+ZfZt3AKqxO0iJISSgSAGitLc0RSPJFHT5JcqMbGxgqRfpqYczkYMJB/eQ+Lp67ghFZQWlxyn5obydm3kNn/O056fi5X9y3kk2UJ2IQNoWtdJeb+gxjgVUL43HGsOHiN3KIsLu5ZwMSFR1EHDKaXx588K2orLM0FyfERRJ1MQVjlcXrnMubP286FrFJ0RRqO7zzKdRxp2szxTzzxlB4Kc38GDPSkeOfnTF5xmKS8QrIuhLNg/BJiq7VnUM8Gj/DcmOHVfyh+iiMsmbSYyEs5lBSlErtmKrNXRHBdWfv2fu0CsLu4i/CrrrTyd0SJFd6tW2J24gDRWm9Cgipfpg7MaTlwMN7F2/nyk40k3Cwk72ok305dxanyudJK87L4OkVUWXzlXQpnwdRVnNFXiK+yPp0Ut5eYUxpTLBtvcupoWSznXWLP/CmsPa1HlJbHcuWM2XtZNPNHTtzMJ+fyHhZMXslph+4M7uLwCM/J7evKvuiTaIzVKTyzk+Vzv2DnhSxK9UVoYsOJSQanZl7YyTtSSZL+g+Q0b0mSHlNmNH3xc6brpvHFyncY9I0eMMOuWVfenjeG/q4quMebgA+Fyo1B07+gaPoMVo0dxAo9oLDEOeAZPpn4Ek3Lr77mLQluU4vNv6rwbu1huihb+uL/hCU7jjajXfs69/7VU+3O0zM+o2DKdBaM6M5coaSGRw/6dnLhx2TTLjU6vs4H/ZOZu+RZui0xR6WsTqMuLzH+nepMnn+ZSxoD7Rta4N25M04HfmHu6DjOfbGD8aPG0i/5c759Loyl5iqU1d3p/NJk3qo+nm+uXAQ63KviOAe0JH/NKHrPKsaorkXTHh8yZ2wX7JWA0pPnP/8M3bTZrH63Hwv1gJkdnl3eZe6Ygbj82dNi4UNYJ2cO/Dqbt+PO8vn2j5n0QQqfLJzI8E0fm0pm7UHoOzN5va3Vn8hAejjM8HxpDtNLP+GL5W8xYL4eMMe+eXfe//IDejk92rhUNRjKtLmFzJi5irGDvsUAKKq5EvjCTMY/43FrP/OW7WhTaxNbzX3xb2gK1ur+bfAy30OCd3tCKp0tYqJ2H860GblMnfklr3SbhVDVpHHPPrRP+R+mz+XVpMOoMfS6/Nnt+KrRhG6vfMrb1h+x4MpFIBAsfAgNc+LA1lm8Gd+P6N9e593el5mzuDyWa9C4+ytMfdea8V9f5mK6gQDnyoe9Suc2PJG7kjd6zKDEqMbOqzcfzxtLe1sloHxE58QC705h/B97dxkYxdU1cPy/HiFGsOBevEBxbXGKu7u1QIUKNVoqb5+n3lKjLe7e4oTg7lJoC9SwQkISkhBfmZn3QyAPFCIrSQic36eyO3fmbJLd7pl77zkhe9bz8fjjdJ22gcnvTeaf179kar+VaatDjAFUavMi7z5VB2k1LYTIj3SallGlCyGEeECoKcRcDSfBXJjiRfxyfV+ePf4q/0Ra8S5SgiL+5hxYEqSQFPkP15J9CCldGO97tYBOuMbVaAeBJUoQkMG3ViUlhtgkIwGF/G/+jOwkRl4l2hFI8eIBWX/Zte5gaseX+b3XQhZMqEDilaukFAghJIMLqikxXA1PwFy4OEX8PPFbUUiJiSHJFEihW+ezxxNxOYIE/ChaKgR/2ZR5/1BTiLkSToKlMCWK+OXy3X07N65eJsrqQ5ESRfA358C0qJJE1D8RJPsWp1Qh73v2gE6IvMr1TN9faX/TyaZAgm/+TdsTrnH1uoPA4hm/l//Hyq43O/DKud7MXTyB8vFXCE8tQEhG18uB34mSEkNssomAYL/0z5X4q5e5lqTDr1gpivnJvI4QIv+SZFoIIYRn3JFMV5GlT0LkuTuT6SryphRCCI+SHSpCCCGEEEIIIYSTZGZaCCGEZ6jx/HP2IqmFKlKxiOxLFiLvqcRfPsvl1GAqVCqKV16HI4QQDxhJpoUQQgghhBBCCCfJMm8hhBBCCCGEEMJJkkwLIYQQQgghhBBOkmRaCCGEEEIIIYRwkiTTQgghhBBCCCGEkySZFkIIIYQQQgghnCTJtBBCCCGEEEII4SRJpoUQQgghhBBCCCdJMi2EEEIIIYQQQjhJkmkhhBBCCCGEEMJJkkwLIYQQQgghhBBOkmRaCCGEEEIIIYRwkiTTQgghhBBCCCGEkySZFkIIIYQQQgghnCTJtBBCCCGEEEII4SRJpoUQQgghhBBCCCdJMi2EEEIIIYQQQjhJkmkhhBBCCCGEEMJJkkwLIYQQQgghhBBOkmRaCCGEEEIIIYRwkiTTQgghhBBCCCGEkySZFkIIIYQQQgghnCTJtBBCCCGEEEII4SRJpoUQQgghhBBCCCdJMi2EEEIIIYQQQjhJkmkhhBBCCCGEEMJJkkwLIYQQQgghhBBOkmRaCCGEEEIIIYRwkiTTQgghhBBCCCGEkySZFkIIcd/RNC2vQxBCCCGEyJQk00IIIe4re3bvYdWKVaiqmtehCCGEEEJkyJjXAQghhBC37Nyxk927dgMQsj+Eps2a5nFEQgghhBD3JjPTQggh8pyqqmzcsDE9ka5QsQINGzbM46iEeLjZ7XauRVzL6zCEEOK+JTPTQggh8ty1a9c4fvw4AJUfqUzvPr0xGuV/UULkNk3V+PXXXzl8+DBXr1zFYDDwymuvoNfL/IsQQvybfFMRQgiR50JCQujSpQtXrlyhQ8cO8sVdiDxy4sQJ1q9bn/5vVVWJjIykWLFieRiVEELcn3SalEwVQgiRB1KSU/D28c7rMIR46NltdkxmU/p/T/tiGiEhIdSpW4eSpUri7++fxxEKIcT9SZJpIYQQue7Y0WOEhYXRu09vKlWqlNfhCPFQctgdbNq0iWvXrjFixAgMRgMAyUnJ+Pj65HF0Qghx/5Nl3kIIIXJNamoq69eu57fffgNgS9gWKlaoiE6vy+PIhHi4xMbGsmLZCiIiIgA4duwYDRo2AJBEWgghskmSaSGEELkiOSmZGTNmcCPuBgCVKleiW/dukkgLkctsVhuzZs4iOSkZgOYtmlO/fv08jkoIIfIfSaaFEMJFMdevs3TpEtavX8/169FYrVb8/PyoXPkRBg8eQsvHH5dCWrfx8fWhVKlSJCQk0KZNGxo1bpTXIQnxUDJbzDRv3pxdu3bRo0cPKlWWrRZCCOEK2TMthBBOioyM5MMPPyB000aq16hB167dKVGyBGazhcTEBI4dPcraNavx8fFh9JixDBw4KK9DzjOapqHT/W/m2Wq1EnM9hpDiIXkYlXhYOBwOtmwJ4+jRI8TfiMdsNlO0WDG6d+9B6dKl8zq8PJeYmEiBAgXyOgwhhMi3JJkWQggnREZGMnjQQEqWLMm4p56mQsWK9zzObrezfdtWpn3xOQMGDmTSpBdzOdK89/dffxMaGsqgQYMICAzI63DEQyQhIYG5c+ewdMli/Pz8aNqsOQEBAdhsNi5dusTOHdtp0KAhI0eNokmTpnkdbq64FnGN0E2h9OrdiwJ+kkALIYQnSDIthBDZdCuRrlS5Em9NfQeDwZDlmL/+/JNnJk5gwMABD01CnZyUTFhYGKd+PgVA+QrlGTxkcB5HJR4WcXFxDBs6GD8/f4YMG8Zjj9W765gbN+JYt3YtC+bPY/LkV+jXf0AeRJp7YmNjmT1rNkmJSRQvUZzRY0bndUhCCPFAkD3TQgiRTS+9+ALlK5TPdiINUKFiRb765lvGPzWWmjVr0aZN2xyOMm9FhEewYP4CUlJSACharCjtO7TP46jEw+JWIl26TFnefGtqhjULAgICGTxkKPXq1+f5Z58BeGATakVRWLZ0GUmJSZjMJjo+2TGvQxJCiAeGJNNCCJENf/zxO0ePHmH1mnXZTqRvqVChAn379Wfe3LkPfDJduHBhfHx8cDgcPP7E4zRs2BC9QYqwidzxxuuvUaJkqUwT6dtVqVKVaV9+zcQJT1OjZk2qV6+RC1HmrgvnLxAZGQlAn759KFGiRB5HJIQQDw5Z5i2EENnw1ptTiI2NZeo777o0/vr1aHp068rateupWOnBqZzrcDhITEgkMCgw/bHw8HB8vH1kn7TIVVeuXKFD+7as+mkNBQsWdGrsV19OIzUlhQ8/+jiHostb//zzDxcvXKRps4djf7gQQuQWSaaFECILDoeDx+rW5vNpX1GrVi2Xz/PmlDcoXbo0r732ugejyzvnz59nw/oNGA1Gxo4bKzPQIk99/NGHXLp8malvv+P02KtXrzB44AC2bN3O2TNniY+PJyU5hVRrKrVq1aJ2ndo5ELEQQoj8TpZ5CyFEFuLi4khJSaFChQpunadixYpcvHjBQ1HlneSkZDZv3szpU6fTHtDB+Qvn3f75COGOTZs28oqTN6ocDgfx8fEUL16CGjVrsmf3Lv744y/sNnv6Mbf/XSclJrFyxUratmtL8RLFPRZ7Tvjl9C8ULlKYokWL5nUoQgjxwJJkWgghspCclASAj4+PW+fx8fEhKTEp02NUVSU6Opr4GzcwGI0EBQURGBiY6Zjctn79es6eOQukFRjr3KWzS/swbVYr0devk5iYgLe3D8HBwW7/jMXD6/r1GIoVy17/ckVR+Pnkz5w4fhyLxYtBQwZRrFgIMTEx1KhRg6TEJLx9vPHy8qJU6VLp406cOMHFixeZO2cu3bp3o3qN6jn1ctxit9vZuHEjNquNXn16UbVq1bwOSQghHkiSTAshRBYKFEjryZqUlJT+365ITEzMsL9rbGwsK1YsZ/GihVy9ehWj0YiqqqiqSp06dRkydCjt23fAZDK5fH1Pad26NRcvXKRZ82Y0bNQwW4Webvf3338zb95cfly1EqvVil6vR1VV9Ho9rVq3ZuTI0dSrd3c7IyEyo2lqtv4Wr0VcY8e27cTExABgTbUSHR2NXq9HUVW6dO1yz3FHjx7l0OFDgIbD4WDVylXExMTQvEVzT74Mjzh96jSpKanodDqKF898Bj0iPJwNGzcQERGBzWYjKDCQBg0bPjT9t4UQwh2STAshRBYCAgPx8/Pj7Nkz1KtX3+XznDt3lipV7pwh0jSNTz7+iHnz5vLII1UYO+4pmjVvgY+PD5qmcf36dTZt3MBHH37I++//H29PfYf2HTq4+5KyzeFwcPDAQerVr4eXlxcAwYWCeX7S85jMziX28fHxPDNxAocOHcTLYsGo12PxLYBOp0PTNFRVZe/u3ezcsYNSpUoz/bvvKV++fE68LPEACggIIOb6dUJCMp+dVlWV+Ph4AKpWq0q9+vXw8/cnNiaGwLp17zjWbrfz008/snDBfCIjI2neoiVBQUE4bAqapjH9229YsGAuw4aPpGnT+yf5PHL4CABVqlQhIODehQCPHTvKd9Ons3fvXrwsFhz2tKXtOr2O2bNnUbBgQUaNHkPfvv2wWCy5FrsQQuQnUoBMCCGy4T/v/x/nL1zgvx986NL4iIgI+vbuSWhoGKXLlAHSEum3p77F9h3bef8/H1CtWrUMx6uqSuimjXz80Yd88OFHdOrU2aU4nPH333+zcf1GYmJieKzeY3Tq3Mnlc8XHxzNwQH8uX7qIUW/IdAZR0zRsdjsWby+WLV8pCbXIltdeexWDQc/zk17M8tirV64CpO97jouLo2f3rmzYGEqpUmnLuu12O88+O5EL5y8weMgQWrVuk74y5EZcHMnJyXj7+LBu7RqWLF7EuHFPMWr0mBx6ddmXkpzCooWLuHr1KkOHDaVsubJ3HbN8+TLefedt9DodJoPxrvejpmk4FAfodDxSpQpz5y3A29s7t16CEELkG5JMCyFENly6eJEOHdqxYtVPLhX0+fabr7l86RIzZs5Kf2zqW2+yfcd2vv5merb3HO/evYupb07J0YQ6JSWFzaGbOfXzqbQHdNCgQQM6dHRtRjwhIYEB/ftx+dJFTAYjOp0uW+OsdhtmixfLlq+Q4mYiS2fOnGHQwP6sWbfhrsQvNTUVTdMyTAjnz5vL2TO/8cOMtPfnrUQ6OiqaTz//Isu9/BfOn2fihKcZM2bsfZFQA4RfDSek+N2zpKNDbAAAIABJREFU9LcSaZPBiNFgyPQcmqbhUFUeqfKIJNRCCHEP0sdECCGyoXSZMjRv0YIP/vM+NpvNqbGnTp1i1coVDB8xMv2x/fv3sW7dWqcSaYAWLVryxpS3eHPKGyQlZV7MzFWaqvHHH38AUCykGKNHj3Y5kQb4bvq3XHIykQawmMxYU1OYMuXBaCUmclbVqlWpXPkRFsyfd9dzhw4cYtH8hfxy+vRdz0VHR7Ny5QqGDh2e/tgXn3/GtWvXspVIx8bEkpKSwtffTOf7H75n75497r8YD7hXIn3q1KlsJ9IAOp0Oo17PubNneevNKTkRphBC5GuSTAsh8j1N00hMSCAlJSVHr/Pxx5+SkJjAq5NfznZCferUKV6c9ByTXnjxjj2VCxbMp3OXri5VwW7Tti1FixZj9eqfnB6bHT6+Pjz55JO0bdeW0WNGu9UCyGa1smTJYvTonEqkbzEbTZw4fpy//vrL5RgykpqaSkJCArJA68Hxn/9+wLq1a1m4YH76Y1arld9+/RWbzUZqqvWO46Ojo5k44Wk6dOhIs+ZphcRSUlJYtmwpL774cpaJ9Pm//2bJosXs3L6TwkWKMGjQYObMme35F5ZNDocDxaFk+PyMH75Hr9NnK5G+RafTYdDp2bBhfXrRNiGEEGkkmRZC5EupqamsWrWS3r16ULVKZerWrc2jtWpQo3pVnpk4gQMH9nv8mv7+/syfv5D4hHiee3Yix44dzfDYxMREli1dwgvPP8tzz09i+PAR6c9duXKFXTt30qt3H5dj6dWnzx0Jg6s0VePQwUOsXLHyjser16hO4yaNna7U/W/rN6xHcShOfXm/nV6vx2K2MG/eXLfiuOXIkSNMnDie6tWqUKtmdR6rW5sqj1SiY4d2LFu2lOTkZI9cR9zpxInjTH75Jdq0bkXDBvVo3qwJvXp2Z+7cOenFwDyhfPnyLFy0mGVLl/LltC+4fj2aaxHX0m+Y1KhZA0i7AXfw4AEmjn+KFi1a8tZbU9PPsXbNakqXKcMjVapkeb2SpUphMpnQNI0zv52hS5euHDlyJM/6yf/262/89z//ZfbMuxP669ej2bZtKyYX3ot6vR6LxcKyZUvdis/hcBC6aRMDBvSjUcP61H60Fo0a1mfAgH6EbtqEw+Fw6/xCCJHbZM+0ECLfmf7tN8yaNZPAoCC6d+9B4yZN8ff3R1EUoqIi2RwayqaNGwgOLsRbb01Nn3HylPj4eL7+6ktWrVpJ4cKF6dK1G8WLl8BisZCQEM/Ro0fZHBpK+QrleWrc03To2PGO8XNmzyZsy2a++vpbl2NITU2lfdvWrF6zlkqVKrt0jshrkaxbu44rV64A0LNXz/Rkw1OGDR3M8WPH8DK7Xg3Y7nBgMBk5dvyky+c4fPgwr706mfDwcIwGAwa9Ab1OBzrQtLQv+TpDWouuYcNH8OKLL7k0ky7udPjwYf7vvXeIiLhG5y5daNasOf7+/tjsNi5fusS6tWs5dfoU3bv3YMobUzB7qGr0+fPn+eTjj9i9exft23fE3y8Ak8lEtZrVuHjhAmvXrMZmtzNixEhG3Lb9AmDI4EG0atOGLl26Zutau3bs4tdffqGAnx9Dhg3h3XfepkqVKkyYMNEjr8UZYZvDOHjgICVKlGDUmFF3PLdk8SI+/vgjDLj2d213OAguXIht23e6NH7B/HlMm/YFDocD1aFgMBjQ3Xz/KYqC3mjAaDTy3HPPM2ToMJeuIYQQuU1aYwkh8g1N03jvvXfZtGkj7773Pg0aNrzrmEKFClG1ajWefno8q1f/xPjxT/HpZ5/Ttm07j8Xh7+/P629M4flJL7BmzWrWr1vH9evRWK1W/Pz9eaTyI8yfv4Dadercc3xMzHVCimXevicrXl5eBAUFcf36dSpVcn78kcNH2By6GVVVgbSZ6HLly7kV071ERUWj17k3u63X60lMTETTNJcS3L179jBu3Bj0Oh0+Fq+7z6EDg9kMgAMH8+bOIfLaNT748CO3Z+YfZnv37OHZZycyfsJEOnXucleP9MqVH6F1m7b8c/kyH3zwX8aNG8P0735Ib8HmjnLlyvHNt9MJD7/KwoUL+fOPP0lNTeHosUMULVqMya+8SuvWbTDcY5Y2OjqaEsWzv/2ies3q/PrLL/j6+JCcnEzx4sWJjo52+zW4IiI8AoCixe4ukhgVHY1id2BwsVe9XqcjNjbWpbGff/Yps2fPwqjTYzIawXznz/3WyhWHzc5HH31IdHQ0k17Iuiq7EELkNUmmhRD5gqZpTJnyBrt37WT69O8pebN9TUYsXl706z+AQoUK8cKk53Ok+rWPjw8DBgxkwICBTo2z2WyYbiZv7rBYvJwuhnZL4cKFUVUV/wB/OnXqRKXKLmTk2WCz2XB3glfHzarCDsddCVlWtm/bxsSJ4zEZjJizMdZoMKLXqWzatJHU1FQ+/2LaPRMukbldu3by/HPPMvXtd7NcGVKyVCk+/exzXnt1MqNGDmfOnHkem6EOCSnOyy9PdmqMojgwGrP/9ahQoUIMHDyIwKBAAIxGIyl5tF0gPiFtyXzBggXvek5xOFDdWYyoI/3mmzM+/uhDFiyYj/keLbj+zWg0oldV5syZjcPh4OXJr7garRBC5Aq55S6EyBd2797F5tBNfJuNRPp2rdu05Y0pbzHljddJTEzMwQizz9/fn4SEBLfPEx9/A3//gGwda021oir/+yJctlxZevbqyfjx43MskYa01+rubiJN0zCZTE4n0larlZdeegGTwZCtRPoWvV6P2WBkx47tbN4c6my4Dz1N03jv3Xd56eXJ2d5iYbFY+PCjT7gRH8/qNatzOMLM+fsHEBvrXKGtW4k0QGxsLP4B2Xtfelrz5s3p1LkTFStVvOu5gMBAjEbXbwxpmoavr69TYy5dusScObMxZdFb/nZ6vR6T3sCcObO5dOmSK6EKIUSukWRaCJEvzJ0zmx49exFS3PnK0m3atqVkyVKsWLE8ByJzXrVq1Tl+7Ch2u93lc5w9e4bU1FTKly+fjWPP8u0333LgwIE7Hq9RswZmi/sz5JmpXac2uLlU2qE4XNoXvnbNGhSHgsno/LJWvV4PmsZ306c7PfZht3PHDhTFQdt27Z0aZzKZGDhwEPPmeqbY3O3++vMvDuw/QPjV8CyPbdK0Kdu2bXPpOvHx8Vy+eJkmTZq4NN5dj9Z+lMfqPUaRIkXueq5x4ybY7HaXb26pmkbLli2dGjN71kzMJpPT2yX0ej1mk4nZs2Y6NU4IIXKbJNNCiPven3/8waFDh9yqft23f38WzJ/n0jJFT2v5+ON4eXmxfdtWl8+xcvlyOnfpir+/f4bHJCYksmLZCpYvXU5CQgJ79+7FarVmeHxOGDJkGFar1eWfu6ZpoNczavRop8d+//10NE11uZCY2WTmjz9+5+RJ1wufPYzmzZ9L7z59Xdpv3qp1G+Li4jh48EDWBzth27ZtbAnbwu5du7M8duCAgezZvYvr153b95wQH8+i+QupVKkyJmPO3qRyRdWqValUqRI2h/M38TRNw6EojBg5KuuDb0pOTmbVqpXoXSx4pkfHqlUrSUpKcmm8EELkBkmmhRD3vW3bttGwUSMKFSrk8jnatGlLVFQU586d82BkrtHr9QwaNISVK1a4ND42NpatW7cwNJOKt3a7ne+//54zZ84AUKp0KUaNGoXFQ3tRs6tcuXLUq1fPpS/wkDYrbbFY6NChY9YH3+bSpUtcunQJswuz0rfodTpMRpNbNz0eRmfPnKF+gwYujTUajdStW5ezN/9uPaXBzXjO/X4uy17JxUJCaNGyJXNmz3LqGiazmZiby8NDQ0P5888/XQvWDYpD4e+//2bXzl33fH7M2HHo9HqnZ6dtdjtVq1ZzaoXI5cuX0YHLNQcMhrS64//8849L44UQIjdIMi2EuO/FxMZQqFBht85hMpkICgpyei9kTunTty8R1yL46stpTo1LSkritVcm06RJE6pWrZrhcSaTifr162O2mHmy05MMHzGcQoVdvxnhjmeefQ5FVbE7mVArioJDVXnq6fFO75eOi4vDZDK53d5KVZU8q8ycXyUlJeHrW8Dl8b4FfD1e36BGzRpp+301OHjgYJbHv/76FA7sP8B307PXvi41NZWXX3qBhIR4goOD0VSNzaGb3a4X4KyIiAgWzl/Irp27uHbt2l3Pd+jQkWbNmuNQlWzHZrPbMVnMfPDhR07FkpSUiMGJQm73YjAaSUx0v76EEELkFEmmhRD3PxdbIv2bTqcjl7/bZigwMJBFi5YQFrY52wl1UlISL056Hi9vL6Z9+fVdz6empt7x76bNmjJhwgTq1a+Xpz2TGzZsxMeffIrVbs92Qq0oCjbFwdBhwxk1yvkl3q620bqb7r7YGpCf+Pr6kpzs+tLcpKRkChRwPRm/F6PRSP0G9QkODqZa9WpZHl+iRAkWLV7ClrAwPv34I2KuX8/w2L/+/JMXJz2Pj7cP3377LYMGD0qrkt+5U/rfoM1my5XEunjx4hTwS/vZHT50+K7nDQYDX371NU2bNceuKjgUJcNzqZqG1WbDaDaxbNkKKla8u6hZZnx9C6Bkcv7sUBSFAgX83DqHEELkJEmmhRD3vcCgIGJiMv4ymx2KohAbG0dQYGDWB+eS8uXLpyfUz0wcz57du++ZuCUlJbFyxXJGDh+Kl7cXM2bMwtvb+45jTp86zbTPp91R/dZgMODnf398Ee3UqTOffPoZVrsdm92e4ZdsVVVJtdmwOuwMHTacyS62xgkMDPBIAqPX6wh2Y3vBw6hSpcocO3bMpbGKovDzyRMuFZzLSpMmTRj39DjKli2breNLlizJ4iVLiY+Pp3evHrwz9S327tnDr7/8ws8nT7Jp00bGPzWOp58ay6OPPsr06d9htlgIDApk4jMT77jOju07+PrLrzl44OBdN708SafX0ahRIwBOnjjJtYi7Z6dvJdSDBw9BRcOhqtjsdhyKA4eiYHc4sCsOUqypPFqnNstXrKKiC83sS5QogaqqKKprCbWiKqiqSokS2e/5LYQQuU2n5fYaJCGEcNKvv/5Cv759WL12PYEuJsPbt23ly2lfsHPXHqd6yOaGqKgoFi1cwLJlSzGbzTRp0hT/gAAURSEqKopdO3dQunRpBg8ZSvdu3e/owasoCmGbwzhy+AgAxUKKMXbc2Lx6KVn67bffmD1rJhs3bsBsMuFwOG6uGNAwGIxYbVYaNmzE6DFjaN68hcvX0TSNli2acSM2zqm2WP8+R7I1lblz59OgYUOXY3nYhIVt5qMPP2DJshVOrw7Yvm0rs2fNZFNoWI6vpkhJTsHbxzvrA4Hw8KssWrSIfXv3ciP+BkaDgeBChejcuQvdu/fItGWU3W7ns08+Sy/+ZzKbaNu2LfXq1/PI6/g3h93Bt998S7FixWjXoV2mn5mpqamsW7eWpUuWEBUVicPhwM/Pj2bNmjNs2HBKlynjVixT3nid9evWYjI4/5lrVxx07tqN//u/992KQQghcpIk00KIfGFA/37Ufewxp6rJ3m7cmNG0bdeOp5562sOReY7NZmNzaCgnTp7gxo0bGI0GgoIK0qZNW+rVu/uLd2pKKksWL+Hy5csAlC5Tmt69e6cv87yfxcbGsvqnn/j7/F/ExcZRoEABihcvTrfuPShdurRHrrFg/jw++eRjzC58kQew2mwUL1mC0M1bPBLPw0JVVVq3epynJ0ykVavW2R7ncDh4auwY+vbrx8CBg3IsPk3T2Ld3H3v27GHkyJEULVY0x651S/jVcA4fPswvp39BURS69+hOrUdrAXAj7gZR0VGUK1fO5WJd/5aSknLX6pW8cP78eTo92QFvs8Wp6u6qqpJis7JxUyhly5bLwQiFEMI9kkwLIfKFzZtDeXvqW8xbsIiCBQs6Nfbggf28/tqr7N6zz+WZ7fuRpmosWbyEP//8k4aNGtK2bVv0Btm9c0tycjKNGzVAp2lO95rWNA2rw847775Hz569cijCB9fWrVuY/PJL/N/7/83WrL7dbuetKW8QGxfLwoWL8fLyyrHYUlNS+W76d8THxxNcKJgxY8dgNudOK6ukpCSOHztO/Qb101/jrp272LVzFxYvC5UrV6ZqtapUqVLFY9fUVI0ff/wRi8VCk6ZNnP78dNe777zNqpUrMOoN2UqoVVXFoSr06t2Ht6a+nQsRCiGE6wxvv/22fFIJIe575cuX5/SpU8ybO5eWjz+e6bLK2x06dJDXX32V11+fQr369XM4ytyl0+moWKkiJUqWoFHjRuj0eVdk7H5kMpkoXrw4YWFhaS169Nmb9VM1FYeiUKfuY7z88mSPzRY+TMqXr0CFihV59ZXJFC5ShPLly2eYSEVGXuOdqW9htaYye/ZcfHx8cjQ2o8lI8eLF+fnnn0lOTiYhIcGjyWtmzGYzZcqUuWOrybGjx4iKjEJxKEReiyTmegyP1Xss/fmUlBSnq9nfbsf2HRw7eozw8HCOHD5CVFQUZcqWybUbCC0ff5zY2FhOnT4FGpkm1A5FwaGpDBgwkClvvpUr8QkhhDskmRZC5As6nY527dtz6vQpZs78gUcfrZ1p32lVVQkN3cS7b09l6tR36NuvXy5Gm3N++/U3/P3907+Mm0wmChd2r23Yg6xKlSqULVeO0NBQ0DT0en2me3FvteNq1Kgx333/Q64lHA+i8uUrUKN6Db6c9gWLFi4gOTkZ3wK+2O0OYmNi+OWX03zz9VdM+/wzateuwxfTvszxRPqWgMAAdDodFy5cwGwyU6NGjTy7aVKtWjVq1aqFv78/NpuNyo9UpkzZtL3KKckpfPrJp/x+7ndu3LiB4lAo4FfAqViLFy+OxWIhMjISm81GTGwMLVu2TF/FcvzYca5cuYLNbsNoNObI33zzFi3Q6fUcOXIYTQcOhx10gHbz5pXDgabXoWoaEyY+w/OTXvB4DEIIkRNkmbcQIl9RVZUPP/gvixYtpPIjj9CjR0+aNG2Kn58/qqoSFRlJaOgm1q5dg8NuZ8qbb9GpU+e8Dtsj9u3dx7at2yhdpjSDBw/GaLq/Cqndz7Zv28arr75CSkoyqBomkwn9bUm13eFAp9eTak2lZ6/evPvue/ddobr8StM0DhzYz+LFizhx/DgJCQmYzWYKFS5M927d6dO3L8HBuV8xXVM1Tpw4Qe3ate/b7RG33vO3C9sSik6nY+TIkfTu0xc/v+xV7HfYHZw8eZLo6Gg6dOyQ/vjXX35NTExM+r/rN6hPxyc7po1xOLhy5QrFihbD4mW565zOslqtbNy4gblz5nDp0kWsVisWi4XSpcswfMQInnyyExaL+9cRQojcIsm0ECJfio2NZdXKFSxZuoTLt7WDAmjQsCGDBg2mTZu2bi2PvJ9s3bKV/fv2A1CqVCkGDBqQI/tKjx07ypzZszly5DBJSUno9Xr8/Pxo07Ydw4ePoFy5/FsMyOFwsH37NubOmcPRo0fueK5QoUIMGTqMvnmU2AlxO1VVeevNKaxZs5qSJUoRUiyEYiHF0TSNLZs34lAc6AwGQooVp33HDjz33PMuXUfTNBbOX8i1yGskJyUD0K59Oxo1TmuvdeWfK8yaOQuAwKDAtArh7TOvEC6EEA8TSaaFEPmapmncuHGD+Bs3MJpMBAQEZHs/dX6xbes29u3dB0DFihXp07cPJrNnbxJs37aNjz76gEuXLmE0GDDo0pZDa6T9jNFBqtVKnbp1eXPKW1SrXt2j189tycnJ3LgRh91mx8/fn8DAwBxvxSTuXzabjR3bdxASEpJeZTuvqKrK8889y65dOzHq9HfsMfbx8SU5OQlI2/rStv2TmM1mjCYj48ePJ6hgkMvXTUlOITo6moCAAPwD/IG0XtVr16y947iXXn4JH9+05fhX/rmCl5cXwYWCXb6uEELkZ5JMCyHEfe78+fMsW7qMihUr0qNnD4/v7Vy7dg2vvjIZo96A2WTKMKlUVRWr3Y7BaGDBwsXUqpW3SYcQnrJ2zVpOnjiJt7c34yeOz9MbcpOef47t27elVb/O5AaPj48PjZs0x+e2WAcMHEClypU8Gk9iQiIRERFEREQQFxdH5y7/2zYzZ9YcLl++TM1aNWnVuhUBAQEevbYQQtzvJJkWQoh84Hr0dQKDAnMskTYbTZiyuUfYarOhM+gloRYPjLi4OKZ/Mx273U6NmjXo2atnnsTxyy+n6d+vLxaTOdNE+hadTkeZsuWoVDmtGvkzzz1DSEhITocJpM3mf/3l1yQmJgJgtpjp2rUr1apXy5XrCyHE/UCSaSGEuA+lpqbmaK9dgEsXL9KuXRu8zJZsJ9K3WG02zF4W9h84JBWvxX3nwoXzLFywgM2bQ4mJiUHTNAICAnn88ccZMnQY1ardnfAdPHCQsM1hAAwdPpSyZcvmdti8MOl5tm3ZgtnJWg8GgwGfAgXo0asXzzzzLAAXzl+gbLmcfQ2KonDs6DF27tyJqqi0aduGevXr5eg1b/nzjz+YOfMHtmzZkl7fISAgkD59+jBk6DDpciCEyBWSTAshhAvsdjt7du8mPDyc5OQkfH0LULp0aZo0bZppH9XsiI6KZs7sObRu05q6j9X1UMR3e++9d1m5Yjlmg/NVqzVNw6Y4eO+99+nWvXsORJd/xMTEsGf3bmJiYlA1lcCAABo2akzJkiXzOrSHTnR0NK9MfpmjR4/Qtl17nuzUmWLFiqLT6bl+PZrNoaFsWL+eSpUq8cGHH91RUE9VVVb/tJqyZctSo2aNXL9JFBcXR5PGDfE2W1z6DHE4HBgtFg4eOsyRw0cI2xxGvfr16PhkxxyvBxAfH4+qqAQG5XxhsujoaJ59ZiI/nzyJ0fi/+g6Q9jtEr8Nqs9G5S1fef/8/D0wRSiHE/UmSaSGEcEJkZCRLFi9i6bKlGPR6ypYrj4+3N0lJSfz55x94e/swcNAg+vbt51LFW8WhMGvmLCIiIvD28WbiMxPx9vb2+OtITk6mcaMG6DVcbgFltdkoXbYs6zds9HB0+cPvv59j5owZrF+/DrPZnPaFXgN0kJKSQosWLRkzdhz169fP61AfCtHR0QweNIAaNWoy4ZlnM2wZlZqayvx5c9mwfh0LFy25byrUHz16lDGjR2LUuX4zLiE5iV2797I1bCt//fUXAI2bNKZtu7aeCjNPxcbG0qd3T6IiIzEZjJnWd1A0lQYNGzH9u+/zrIe4EOLBJ000hRAim7ZsCeP5556ldu06vDz5FZo1a37HDJLdbmf7tq2sWrmCb77+mpmzZjudSO3YsYOIiAgAevbsmSOJNMDWLVsA1xNpALPJxJ9//sGff/xBxUqeLXp0v/v2m2/48ssvsJjNeJnMGPQ3v6zf/G7v4+XNwQP72bt3D08+2YmPP/lUqoXnoJiYGAYPGsCjtevw0suTM/1Ze3l5MXbcU3h5eTFo4ACWLF1KmTK5v6T73xISEtC7kUgDmIwmEhMTGTBoAGvXrOXUz6c4sP8AxYoVo2atmh6KNGMpySn8+uuvXL58me7du6PTe+5vPjExkQED+hEdFZVpIg2g1+vRaToOHzrIc88+w9fffOuxOIQQ4nbufWoLIcRDYsuWMF6Y9DxvTX2baV99TYsWLe9aimkymWjfoSM/zJzN0+PHM3rUSI4cOZLBGe/2zz//sH9/Wi/pBg0bUKFiBY++htuFh4dnq8BRZnQ6HV4WC+ER4R6KKn+YOXMG33zzFT4WL8xG0z1nvQx6PRaTGW+zhc2bQ3nj9dfyINKHx6yZMyhZqlSWifTthg4bzhOtWvH5Z5/d8Xh0dDS7d+1mc+jmnAg1Q97eXmi4t1jQoTjw9vZGr9fTpWsXypUrR9WqVT1e4TsjiYmJbNywkdOnTnM1/KpHz71kyWIiwsMx6g3Z+h3rdDqMegO7du3k8OHDHo1FCCFukWRaCCGysHfPHl6Y9DxvvjWV1m2yt1yyd5++PD1hAqNHjeT06VPZGhMSEkKzZs0oUrQIbdq0cSfkLCUnJ6Eqqtvn0en1JCUmeSCi/GHFiuV89uknGSbR/6bX6zEbjKxbt5YP/vufXIjw4WO1Wlm+fBlDhgxzevZ/8OAhbN++jcjIyPTHIiMj2bljJ0cOH8HhcHg63AyVKFGS1NRUXN19p6gqer2e4OC0ns8Gg4H+A/vTp1+fHC9meEvhIoUp4FcAgPN/n/fYeTVNY97cOaBqTv2OdTodOmDunNkei0UIIW4nybQQQmThk08/ZtToMbRq7VyC27t3H7p07cpXX07L1vEGg4FWrVsxZswYjKac3YXj6+uL3uD+/wJUVaVAgQIeiOj+pygKn3z8ESaDEaMTezD1ej0mvYF58+YSc/16Dkb4cFq/bh3FS5Sgeo0aTo8tXKQIjZs0YenSJemPFSlSBEj7246Li/NYnFkpVaoU1apVx2a3uzReURU6deqMxWJJf+z24luapvH7ud/djjMrt/agnz/vuWR6//79JMQnOPW+u8VkMLJz5w6uX4/2WDxCCHGLJNNCCJGJkydO8Pdff9O9h2t9Z/v1H8DevXu5fPlytscYjDlfLKd06TIoquryLBikJRtWq5VSpUt5MLL71/bt20hOTna6jRik3SixmC2sWLkiByJ7uJ04cZxmzZq7PL5Z8xacOH48/d8W8/+SUZvN5lZszhozdix6g8Hp96WmaTgUhREjR93zebvNzrIly1i6ZCm/nP7FE6FmqGzZshQtWhRfX1+PnfPkyRPowKW6A3q9Hi8vL3777TePxSOEELdIMi2EEJmYP38eHTp2dHn2NSQkhMZNmrBw4YIMj9m7Zy9X/rniaoguadW6NSaTCYeiuHwOm8POo48+el8Ub8oNc2bPdnqZ6e00VWX+vLlp7XuEx8THx+PvH+Dy+AD/AOIT4tP/bXfYKV68OL6+vvj7+XsixGxr06YtwYWCsTvxvtQ0DYeqUrfuY1StWvWexxhNRmz2tBsDGzduJCEhwSPx3kudunUY9/Q4evZy7QbkvcTsqqd6AAAgAElEQVTHx6O48Vml0+mIj4/P+kAhhHCSJNNCCJGJI0cO80SrVm6d44knWnP40KF7PhcRHsH2bduZNXMWf/7xp1vXcYbZbGbQoMHp1aedpWka6HSMGj3Gs4Hdx06ePOHSrPQtJqORqKio9GrtwjMsFgs2m9Xl8Var9Y7Z6IIFCzJ67GhefPnF9P2/ucVgMLB02QqCCgZhz8Z+7VuJdIWKFfn+hxkZHqfT6ejWrRtmi5nUlFTWrVnnybBznMViATcLJt6+/F0IITxFkmkhhMjEjRs3CAoMcuscgUGB3Lhx772XO3bsACAoKIjy5cu7dR1nDRo8BJvd7tIeTZvDTmBgIK2d3EeeX9msVhwOh1vtrXQ6HQaDgRs3bngwMlEsJIQLF1zfn3vhwnmKFSvmwYjcU7hwYVas/JGg4GAcqorNbr9r2bemaVhtNuyqQsVKFVmwcBE+Pj6ZnjcgMIAOHToAEFwo2CMFCO8lJ1ZehBQrdsf+b2domobNZqNo0aIejkoIISSZFkKITOl0elTNvS+Hmqqh09/9cXvlyhX++P0PAFo+3tIjBcGcUbRoUWbMmIVDVZxKqK12G17e3ixYuChbFa0fBPf6/bnq3y3VhHt69OjJ1i1bSExMdHqsoiisW7uGnj17AWl9kq9du+b0ec6dO8d///M+EyektcSb/PJLrF79EzarazPmhQsXZnPYFt5+511Kli5Fqt2GqgNVB5oOkq2pVKtRg88/n8byFauyTKRvqV2nNmOfGkv7Du3d+rxJTEggIiKChISEuxL9E8dP8MF/PmDh/IUun//f2nfoSKo11aVE3aE4CA4OpmbNWh6LRwghbsnZcrFCCJHPBQQGuF2B+fr16wQGBt71+Kmf01pmFQwuSM2aNd26hquaNmvGDz/MZOzY0ag2FbPRlGGypygKDlXBy9ub5StWUrZsuVyONu+YTCbMZjOqprl8F1rTNBRFISDA9f294m7ly5endp06bFi/jn79Bzg1du+e3Xh5edG0WTMAjh07xvZt2ylRsgSjRt+7mNfttm3byjdff8W5c+cwGgxoqga6tN/1lrAw3nl7KgMGDmLs2HH3/AzIjNlsplv37nTr3p0zZ85w6eJFklOSKVCgAJUqVXL5/Xf7LLyqqGnto/RZr7iICA9nyZLFLF68iBs3bqDX61FVFT8/P/r1H8CgQYMpUaIEMTEx2Gw2jxZvK1iwIK1atWb3rl1YnLwZpTcYGD5ipMdiEUKI20kyLYQQmWjRoiWbN2+mUeMmLp8jLCyUli1a3vV4u/btqFip4s2Z6+wtH7ZarVy8eIH4+ASMRiMFCxakdOnSLscGaQn1okVL+Pzzzzh06GDa/lHtf4W2VC2tf63NbqdDh448P+kFt6+ZHzVu0oRDBw641J4H0gpblS5d+r5aUnw/SEhI4MCB/cTFxaFDR2BQII0bN3Gq6N+Y0WOZNOk5atepyyOPPJKtMeFXrzLtiy+YMGECOp2OlOQU9u3bB5CtJcEzZ/zAtGlfoAO8zZZ7bgFwKAqLFixgS9hmli1fScGCBbP9mm5XtWrVDIuLuSoqKorVP62mWrVqNG3WNMPjFEVhypTXWf3TT3hZ0j4b/H0LoNPp0m4QORwsXrSQuXNm06FDRxo2bAxAUEH3tsf82+gxY9mxYzsORZ/t96DNbkNnMKSvPBBCCE/Tae70RRFCiAfcuXPn6NmjGz+tWefSF+EL588zdMggdu7ak96/1hWXLl1iwYJ5LF+2DKvVislkSt8LWLZsWUaNHkPXrt3w9vZ2+RoAly5eZP6C+RzYv4/4+AQMBgOBgQF06NCRvv36u5wMuMtmtRIaGsrp06eIiYnBZDJRqFAhWrVuTd26j+VKDPv27mXs2NEZJk5ZsSsOXnzpZYYMHZYD0eU/v/9+jgXz57N27RrKlS9PoUKFAYiKjOTixQt0696DIUOGUrFixWydb8nixXz22Sd88eXXWSbU4VevMmH803Tv0YMXXngRgLDNYRw8cBCjycgzzzyDn79fhuNvJdImgwGDPvPETtM07KpC4cJFWL7C9YTa09asXsPPJ39Gp9MxcNBAKlSscNcxDoeDZ5+ZyL59ezHq9JluUVBVFQWNzp26YTabadGyBY8/8bhHY169+iemvPE6ZoMxyy0mNrsd9DoWLlosS7yFEDlGkmkhhMjCoIH9eaRKVcZPmOj02PfefQcd8MW0L126tt1u5+WXXiQ0dBNeFgs6DYy3VZTWNA2b3Y7u5v7H99//L527dHHpWvejf/75h/nz57Fi+TIURUFVFHQ3S5Dr9DocikKJEiUZMWIkPXv1wsvLK8di0TSNFs2bciMuDovJ7NRYh8OBTXFw6NARCvhlnKQ9LBbMn8cnn3zMk50606t3H8qWvbO92vnzf7NyxQpCN23k1VdfZ8DAgdk67+LFi/joww/o3KUrvXv3oWSpO3ugR0VG8tNPP7L6px/pP2BgeiINsG7tOk4cP0Gz5s1o1TrjCv7Hjx9j6JDBmI3GLBPpWzRNw64oPFa/HrNnz83WmJxmtVqZPXM2UVFRWLwsjB4zmuDg4DuOmTD+Kfbt3YtRb8jWDSRN0/APDKJ2nbo8+9yzd/1ePWHZ0qW8++7bGPUGjAbDXQm+oihp+8tVlXnzF1CnTl2PxyCEELdIMi2EeCCdPn2KJUuWcPHCBRITE/Hx8aZYsRB69upFs2bNnZpZPHnyJMOHDeGpp8fTu0/fbI/7bvq3rFu7hiVLl99RqVtVVM6dO0fFShUzrVBrt9sZ//Q4Dh06hEl/95fGf7PZ7dgVB//94EO6deue7TjvVz///DNDhwxOW3IOGA13f6HXNA2bww46HRUrVWL+/IUu9wTPjq1bt/DMxAlYTOZst8lSFAWrw86kSS8weszYHIstv1gwfx5fffUVX371NRUrVcr02HNnz/L8c8/wwgsvZTuh/u2331iwYD4bNqynSpUqFC1SFJ1eT8z16/z880meaNWKIUOGUb9+/bvGhoeHU7BgwUzbKD391Dj27d3j9A0VVVVJsVnZsnU7JUqUcGpsTomNiWXGjBnY7XZ69e5FlSpV0p87c+YMvXv1cHolhqZpWB125s1fkGOrRn799Rd++P57tmwJw9vLC1XVbnbO0qGoCv369Wf4iJEUL148R64vhBC3SDIthHigbNy4gdmzZnLu3DnatmtPjRo18PX1JSUllfPn/2bjhvUEBgYxePAQBg8Zku1q1EePHmX0qBGMHjOW/gMy/1Kvqirffzed9evWsmDhIipXvnPJ6fm/z7Ng/gJMZhPPPvcsvr6+d51DURSeGjcmLZE2GNDrsld0x+5wYHPY+eDDj+jatVu2xtyPjh07yvBhQ9FpYDFnnbTcmvkrXaYMCxctztEiX2vXruHVVyZjNpqyTKgVRcHmcDBm7Fiee35SjsWUX4SFbWbKG6/z5VffZJlI33Irof7o40954oknsn2tuLg4du3cSWxsDKqmERAQQLNmzd1qkRQZGcnjLZvjbba4VJXdrir07z+AV1973eUYPO3ChQuYTWaKl7gz8Xxh0vNs27IFswstqax2G42bNM2097UnREVFsXv3LuLi4jAaDBQqXJgnnmiV7ermQgjhLkmmhRAPjK+++pK5c2YzdPgIunTpir+//13HWK1Wtm4JY86c2dR+tDaffva5Uwn1U0+NpWBQQXr27s2TT3a640tb/I0brF27hp9+XIWiKMyeM/euRBpg7569bN+2ncCgQJ597tl7Xmvjxg28MvllzEZjthPpW27tFTx46Ijbe6jzwpUrV2jfrg16dNlKpG9JS6gdVKxUiVU/rs7BCGHVypVMmfJ6Wnza3bPmDsWBStrf25gxY3nxpZdzNJ78onevHnTq3IUuTt7o+XHVSnZs38aSpcs9HtPxY8cpVboUhQsXzvLYpUuX8NGHH2DAtX7jDocDHz8/9u0/4NL43BAVGYXFYqZRowZO3TQoVLgwVapU49TPJ4mLiyXFamXPvn0EBxfK4YiFECLvSLNLIcQD4csvpzF/3ly++uZbBg0afM9EGsBisdCpcxe+/2EGv/76Ky++MAlFUbJ1jXr16rFnzz5GjR7N+rVr6fxkRwYN6MeYUSMZ0L8vXbt0Yt++vbz00mS279h1z0QaICI8AoCQkJAMrzVzxgzQNKcTaSB9JmnNmpxNKHPK3LmzMegNTiXSADqdDpPByJkzZzh27GgORZemV+/e7Nm7nxEjR2EwGUm123BoKoqmYnXYcagqPXr2JCxsqyTSN50+fYoLFy7Qrl17p8d2fLIT586d4+zZsx6NKS42jg0bNjD92+mcO3cuy+NjY2NRs/l5cS86vZ6EhHiXx+e0axHXmD1rNiuWr8BsMmc7kdbr9dSsVYeggsHUql0HvV6P2WzmypWrORyxEELkLWmNJYTI99atXcv8eXP58utvMkxg/y04uBBff/stE8Y/zWeffsLLk1/J1jhvb2/69etPv379+fXXXwgPDyc5Ka33a6nSpahUqXKW54iISEumM2qRdPbsWX777Vd83ZhV1lSVmTNm0N/Jvrt5LSkpiWVLl7o886fT6TAaDMz44Xse+76eh6O7U6FChXju+Uk8/fR4jp84TmxsLIqiEBgQSO3ataXQ2L+sXLmSJzt1xuJCkThvb2/ad+jIyhXLmfLmWx6Lae/evWiqho+PD+XLlc/yeE3TcGdBn+7mOe5XZ8+exWq1cuXKVVq2fIITx46SkpKc5bjKj1SlQIECaJrG6VMngbQEOzUlJadDFkKIPCXJtBAi35s58wdGjByd7UT6luDgQrw8+RVee2UyEyY+4/Q+u+rVa1C9eg2nxgCUKl0KHx+fDIvjbN0ShsVscWlW+haz0cTly5e4ePECZcp4vqJuTlm/ft3NhNj1/z0ZDUZ27NhBVFRUtpbuustssdCoUeMcv05+d/XqFZo2be7y+AoVKnD82DGPxaMoCqdPnQagUeNGmMxZ7w0ODAjE4MbfpqqpOVogz10tH2+JwWBg+7btBAcXotajtTl0cH+mY4xGIyVLpfWdv3D+b27ExQFpWx38MlghJIQQDwpZ5i2EyNeOHz/GhQsX6NS5s0vjH3usHkWKFGHN6p88HFnGunXvxsjRI+/Z1xXg+vXrKIrDrWvodDpMJhOxsXFunSe3Xbp0CU1V3TqHQa/HZDJx5coVD0UlPMFmtbrVuszi5UVqaqrH4klMSCSoYBDooHr16tka06JlS1Ktqaiaa3+jGtC2bTuXxuaWZs2b0a1HN1JTUzl96uf0x4sUKUr5ChUpVKgwRYoWw98/rcifw+Hg8KH9RIRf5cxvv6Q9pigYDIY7uhgIIcSDSGamhRD52pLFi2nfoaNbsz09e/VmyZLFDBg4yIORuc5mt3nkPDqdDrvd7pFz5ZaEhARUN5NpAIPBQFJSkgciEp5SwM+P+PgbLo9PiI/PsBaCKwICA3jq6adITU3NdpJfsmRJ6jdowPGjR/EyZ9w+615u9YQfPmKkK+HmqkcffZQ9e3YTn5iA8ea+6ZKlSlOi5P/6dv9z+RInjqfVJoi/cYMjhw/+7wQ6GDhwUKYtxoQQ4kEgM9NCiHzt/IXz1KxVy61z1KxViwsXLngoIvcFBRV0qe3O7TRNw263ExCQv5ZZ+vv7u/3aIW227H5eTvswevTR2hw4kPmS4czs37/f7ff6vTg7Wz52zDjQ6Zy+6WNz2KlV69F8M1s7aNBgbHYbjpsF12w2W/r+aU3T8PG5u6Uf3OyrbrMxdNjwXItVCCHyiiTTQoh8LTExkQK+7iVNBQr4kZqaisPh3tLq7IiKiuLA/gMcOXwkw2Nq1aqFhnuFihyKgpeXV77aLw1QrmxZdHq9W69dURUURaFUyZIejEy4q3fvPhw7epSrV51ffn/p0kVOn/qZPn365kBkzmnarBn9+w/ArirZTqhtDju+BQrwyaef5XB0nhMUFMR7//c+dsWBQ3Hwy+mf2RoWyqYN69iwbjX79u66a4xDUbApDt544023+nkLIUR+Icm0ECJf8/H2JiXVvYqxKSnJmEwmjMac3/kSfjWcLWFb2LF9R4bHtG7dBl9fX+zuJPc66Nd/QL5bZtmpcxd0Ol36bJgrHKpK23btKBgc7MHIhLsKFixIhw4dWbRwodNjFy5YkGHveFdtCdvCt998y7at25we+8aUN+nXrz92VcHusGd480dVVax2Gz6+vqxY+SMl89kNnp49e/HBBx9hVxSsdhuapuG4x+tNW8Juw644mPr2OwwaPDiPIhZCiNwlybQQIl8rWqwYF91con3hwgWKFCnioYjcZzAYGDpsODq9a+2hFFUl1WplyJChHo4s53l5eTFw4CBc7IyFenN5+5gx4zwbmPCIic88y57du5g/b262x8yaOYMjhw/x9PgJHo0lPj6e6Kho4uNd6/v8xpQ3efW11/EPDMSuKKTarNgdduwOBzZ7Wq/xFJuVNm3bsurH1fkukb6lc5cufP/9DMpXqECKzYrNYcdm/9/rtDnspNptlCxdhq+++ua+WD0ghBC5RQqQCSHytR49ejJ16luMGDnK5Znln35cRY+evTwcmXv69x/AvLlzSE1OwWzKumXPLaqq4lAVunTpmm+/vA8dNpxFixZitdmwmM3ZHqdpGg7FQd26damVA3trhftKly7NosVLGTSwP7ab+2rNGfyOrVYrc2bPYnPoJhYtWkKJEiU8Gov3zT7uCfEJLp9j4MBBDBgwkL1797Bs2VKuXrmKzWYjIMCfZs1b0K9ffwoWLOipkPNM02bNaNqsGefOnWPxooWcPXuGxMREfH19qVSpMoMGD6FatWp5HaYQQuQ6nebOxjQhhMhjiqLwxBMtGT/+/9u778CmqreB49/stNAWKJQNgpa9ZMneKGXTsveULeB6BUTAhXv8ZMiGlk0BBWRvGQ6WCijLAmV30AFtk9zkvn8gCAIlSdMFz+c/kzOeaBPvc+85zxlBs+bNXe4fEfE3fXr1ZPeevRlyJvHvv/3Od2u+w2w28+Zbb6ba9ty5CLp07uR0Qu1wOLA57NSrV59vpk5Dp9N5KuwMd/z4MXr26I5dUTAZHp9Qq6qKzW6nVOnSLFgYSo4cDy+OJLKGiIgIxowZxeVLl2nTti1BLVv98/1TiboexQ8/rGf9urUUK1aML7/8mmLFi3s8hl9/+ZWNGzaSI0cOXnvjNY+P7y5VVdFo3FyaIYQQIkNJMi2EyPa+/XYGq8LD+XbWLPz8cjndz2az8ebrr5EvXz6++PKrdIzwX9euXuOvv/5Cr9dTt17dx7a/k1BbUlJQHSoGvf6BC22Hw4FVsaECDRo0zPaJ9B0nT56ke7cu2BUFVBW97iGfXXXcPv5Lq6VipUrMnTs/TWcZi4x19MgRQkMXsn379ruVor29vWnWvDm9evWhcuXK6Tb3lStXOHTwEAEBAdSoWSPTElhFUdi6dQtzZs/m1KmTWCwW9Ho9AQH56dmrF506dcbPzy9TYhNCCJE6SaaFENmezWZj5MjhXDh/nm+mTXcqobbZbIwf+xZRUdcJDVtM7ty5MyBS98TFxbFyxQrmz59LYmIiWo0Wu11Bo9Gg0+tJSUnh+apVGThwEI0bN/HI0VJZRVRUFMuXLWXRojBu3br172dHg06vI8VioWy5cvTr25+gli0xuLAkXmQtVosFNJpHLvt+Eq1d+z3vvfsuNpsVh93+7w0jVcXucKDRabFarXTp0pW3J7zzRH23hRDiSSDJtBDiiXAnoT5/7jyvv/EmlatUeWTbcxERfP3Vl8THx7EwdFGWTqTv5XA42LVzJ38c+4O4uDiMRiN5cueh+YsvZpuza91lt9vZvWvXv5/dYCBX7tw0bdqUUqVKZ3Z4Qrhs1apwJr4zAb1WhyGVeg92hwO76qBR48Z8+eXXklALIUQWIsm0EOKJYbPZ+OzTT1i+fBmFixQhJKQj5ctXIEfOnCQnJxEREcGa1av47ehRWrQIYsI7E7NNIp0erBYLN2/exMvb+24xJiGeNtHR0fx04CeaNmuaYd+D79as5u23x2PQ6dE7sSXDoaooDjuNGzfhq6//lwERCiGEcIYk00KIJ87NmzdZs3oVy5Yt4/z5c1itVvR6A/nzB9AhOIRu3bpnSLGxR/lxz4+cizhH+QrlqVqtaobOff36dZYuWczixYuIi4u7+7q3tzedOnWmV+8+FCtWLENjEiKzWC1WPv/sc2w2G40aN6JBwwbpPqfFYuGFmtXROFSXTiBwqCpWxca8+QupUaNGOkYohBDCWZJMCyGeeDabLUvtpV26eCmnT5+mcpXKtGvfLkPmTEpKYtzYt9i0aSNmkxlUBzqtDo1Gg/rP/kw0kGKxUKtWbT7/4gv8/fNmSGxCZKaNGzby6y+/4uXtxejRozEY0/e3YuXKFXz4wfvoNa4v17bYrNSpW49vZ85Kh8iEEEK4SjbeCCGeeFkpkQbIm+92khoVFZUh8yUlJdG7V0927tiOt9mMQafDoDeg1WrRaDRotVoMej0GnZ4cZi8OHzpE504diY6OzpD4hMhMtevURqPVkJyUzJEjR9J9vjmzZ+NQ7G71Nej07Nmzm2vXrnk4KiGEEO6QZFoIITLYnSXm0VHpn6zeSaRPnzqFQadHp019f6ZWq8Wo1xMdFUWXzpJQi4xnt9u5ceMGNxMTM2S+XLlyUaFCBYoULXL3Rld6sdlsRET8nWrBsdRotVq8zF4cP3bMw5EJIYRwh3u/5kIIIdwWEBAA3D5fNj4uHr9c6XeG7PRpUzl16iTGh5zR/CgajQaDTk/U9etMmvQOU6dOT7f4hIDb34XNmzcRFhrK4cOH7r7u5eVFq9at6d27L2XKlEm3+du0aYPekP6XRIkJCWkeQ6OBhMS0jyOEECLtJJkWQogMVrBgQdq2a0vp0qXx8k6/6sFWi4UlSxajReN0In2HRqNBr9OxbetWoqKiMrVgm3iy7dq5k/Hjx+Lj60tISEfe++BD/Pz8sNvtXLwYyfffraF7ty6UL1+Br//3DXny5PF4DP9NpGOiY/DP6+/xeYwmk0fGMXloHCGEEGkjy7yFECKDabQaqjxfJV0TaYD1P6zHrtidOnrnYXRaHWaTmSVLFns4MiFu27ljB2PGjGL0mNdYtHgpHYJDyJ079+19/AYDJUqUZPSY1/h+3Q/kzZeP3r16Ehsbm27xOOwOtmzewrRp0zhz5ozHx8+ZMydmsxmHw+FWf1VVsdpsFChQwMORCSGEcIck00II8YQKX7kC1WF3+an0vVSHg/CVKzwYlRC37du7l1dfHc17739A4yZNUm3r5eXFuPFvU6p0aXr36plu+6kdDgcRf0eACmtWr+FG7A2Pz9EhOAQH7h2kotgVcufOTZUqz3s4KiGEEO6Qo7GEEE88RVHYuXMHZ8+eJTExES+zmYD8+XnppRb4+aXffmVnRF6IJDExkXLly3l87KZNGhETFYVB7341c7vdjs1h549jJzwYmRDQtk0rOgSH0Kat88fDqarKyBHDad6sGQMGDkqXuGJiYpg9azZWi5UcOXIwYNAAcuXK5bHxIyIiaNWyBd4ms8s3uhTVwauvvkav3n08Fo8QQgj3yZNpIcQTKzo6mqlTv6FRw/pMmvgOR44c5sqVK/z515+EhYZSr25txo59ixMnMidR3L9vP/PnzWfDhg047O4t+0yNxWIB3H8qDbe7W61Wj8QjxB0HDx4kKiqKoJatXOqn0Wjo1r07ixcvcnup9OP4+/vTuUtnjEYjxYsX9/gNtxIlSlCrVm0UhwNXnmdYbVa0Wi0dgkM8Go8QQgj3SQEyIcQT6eiRI/Tr14fSpcswavSr1G/QAP1/jqP5888TrAoPp1PHYEaPfpVBL7+coTGWLl2abVu3kXQribNnzxJYKtCj4+fMmZPE+LRV/VXV20tshbhw4QKXL18iOTkFHx8fAgMD3U40F4WF0rZd+we+k86oXbsOX335BXv27KZRo8ZuzR8REcHVq1ewWCz4+PhQulRpcvr43H2/ZMmSDBg4gNx5ct99ehx3Iw69QU/OnDndmvNeU6dNp2eP7vx99iz6f857T43VZkVnMLBs+QqPzC+EEMIzJJkWQjxxjhw5TP9+fRkwcBBdu3V/ZLuyZcvx9oR3CA7pyOhXRqKi8vLLgzMsTv+8/hQuXJhLly6xZ/cejyfTZcuW4/Kly2kaw25XeK5UKQ9FJLIbu93Otq1bCQ1dyB9//EHhwoUwmc3cTEzkelQULYNa0rt3H8qVL+/SuMePH6dtu/ZuxaTVaqlRvQbHjx93KZm22Wxs2riR0LCFnD51ioKFCmE0GklMSCQmJpp27drTq3dvAgNv/73nC7i/gv26teu4dPkS9erVo3r16pi9zG7FD+Dt7c2ixUtuJ9R/n0VV7Bj0hvuSalVVUex2NFoteqORZctX3I1NCCFE1iDJtBDiiXL69CmnEul7lStXjq/+9w2jXxlJjhw56NGjZzpH+a+GjRuyZNESLl26RMTfEZQoWcJjY/fu04fNmzdh1Dt/xvS9VFUFrZZ+/Qd4LCaRfVy/fp0+vXtitzsI7tiRDz/6mBw5ctx9/+LFi6xeFU6vXj1o3vxFPpzyEVqtc7vHEm8mktPH/SesOX18SHShCFlkZCR9evfEy8uL4JCOfPnV/zCb/02Gz507x6qVK+jcqSMhIR15e8I79/W/EXuDS5cvYbVY2bF9B3v27KFNmzZUrFTR7c/g7e3N8hUr2bRpI7NmziQi4m9MRiOqQ0Wj1WBXHZi9vejTtx9dunRNlyPBhBBCpI0UIBNCPFHeeP01bIrywMWwM3bu2MEXX3zGnj17MRjcL9rlqo0bNlKqVCmefe5Zj4/dMuglLl6IxGQ0utzXptgwmEzsP/Bzhv77EJnv+vXr9OzRnWrVq/Pqa6+nejMmPj6OV0aOoFzZcnz8yadOJdSNGzVg8rvvu/xE+47PP/uUAgUKMGrU6Me2jYyMpGeP7polhyAAACAASURBVLz40ksMHjI01bZRUVGMHDGM+vUbMHHipPveu3nzJrt27uK3o79ht9sZOmzo3afX165dQ6/Tp+ls6uPHj3Hi+AkSb94ukli4SBHq1q2Hzs2j7YQQQqQ/3aRJkyY9vpkQQmR9sbGxjBv7FuPGv42/v+sXtcWLF2fVypXkz5+f0qVLp0OEDxcYGJhuT51y58nD5i2b0Wg0Tj81BLA7HCh2O0OGDadmzRfSJTaRNVksFjp1DKFa9eq89vobj13VYDabadqkKaGhC/n77FkaNmr02Dm2bd2Kr58vZcqUdSvGRaGh1Kldm3LlUk/GExMT6RgSzEstWjw2kQbIkSMHjRo34dsZ04mNiaFW7dp33zMajZQqXYrqNaqTP3/++1aRrF+7nk0bN3Hs2DESExLxMnvhc88ebGcEBARQvnx5nn++KhUrVqJ48Wdc+s4KIYTIePIrLYTINNevX+fkyZOcOHGCS5cuYbfb0zTeihXLKVe+vNv7CrVaLcEhIYSGLkxTHHC7kvipUyc5cfw4kZGRKIqS5jHd0bJlK0aPHoNVsaE4+e/X7nBgsyt0CA5h8OAh6RyhyGrWr1+HyWTitdffcLqPr58fH338KStXriA6Ovqx7Tt37sKa1avciu/smTOcPn2KFkEtH9s2PHwlhQoX4mUX/o7z5s3LBx9+xIIF8x96nrW3t/d9y7sdDgdXrlwBICY6hn1793Ho0CGn53taWK1Wzp8/x7Fjf3Dm9GkSEtJWHFEIIbIC2TMthMhQFouF9evWErYojBPHj2M0GtHpdCQnJ1OgYEG6d+tOZzf3Bx4+fIh69RukKb4GDRsyY/o0VFV1eZ/xvQWOfjt6FIPBgF6vJzk5mbx589G1a1e6dutOQEDAQ/s7HA5++fkXTpw4Qc9ePTG6sTT7YQb9U1Ttyy+/QLFrMeoND33ipaoqVpsNFegQHMy7773vkflF9hIWGkqnzp1d7lewYEFq167D0qVLGDnylVTbtggKYsqUD/jt6FEqV6ni0jwrV66gQ3DIffu3H0ZVVRYvWsSwESNcGh/g2WefpVy5cqxatYo+ffum2lar1TJqzCguXLjAyb9OcvLkSUqX+Xdly4XzF1i/bj0VK1WkYsWK5MrtuTOrs4OrV64QFhbKkiWLsVqt6PV6HA4HiqLQoEFDBgwcRM2aNTM7TCGEcIvsmRZCZJjvvlvDB++/h5+fHx2COxIUFITvP0frWCwW9v64h/DwlZw4fpw+ffryxpv/51JC27VLZ4JatqRV6zZuxxgfH0fQSy/y68HDLh37s3XrFt55ZwJGg4EOwSG0at2G3LlzA6AoCvv372PVypUcOXKYTp278M47Ex/YC3nh/AUWLFgA6u1jszp36YxGm8Zzou+xa9dOZs78liOHD2MymlAdDjSa28dfoQGbohAYGMigQYNp07atx+YV2cft714vvl/3g1s3cw4dOsh7kyexd9+Bx7adM3sWi5csZtr0GQQE5Hdq/C2bN/H5Z5+yes13FC/+TKptf/rpAG++8Trhq79za7n07l07mTNnNhs3bna5770349avW8/hQ4fvvleqVClCOoZgMD75dQg++/QT5s6di9lkBBX09/zmOf5ZAaMCJUs+y8LQMHLlerpuNAghsj95Mi2EyBCrwsOZPHki49+eQJOmzR5Ikk0mE02bNadps+ac/Osv3nrrTW7dusWkye86nVBrddo0LxW3K7f7u1L0Z/PmTbz+2qu89vqbtGzV6oELd71eT4MGDWnQoCEREX8z9v/e5I3XX+PTzz6/b55ixYvRrFkztm3dxsmTJ9m2bRvNX2yeps9zr0aNGtOoUWPOnj3L8mVLOXv2LAmJCeTMkYOiRYvRuUsXKlRwvzqxyP7OXzjPs88+5/aqiLJly3H9+nUsFgsmkynVtgMHvUxUdDTDhw11KqG+k0jPmj3nsYk03K7QXap0Gbf3HZctV47ICxfc6nvvb1aVf568nzh+gpSUFKw261ORSH805UOWLFmMt8n00P8GWq0Wk9aIqqqcPxdB1y6dWLZ8pSTUQohsRZJpIUS6W71qFZMnT2TKx5/wwgu1Htu+dJkyTJ02gxHDh6KqqtPLjfPkzkNUVFSaYo2KikKvN5Azp3PH9mzZspnXX3uVdyZOpnGTJo9tX6JESaZO/5YRw4bw+muv8vkXX953oVmnbh1iYmI4cvgIB/Yf4LnnnvPocVlwewnruPFve3RM8WRISkrCy9vL7f5eXl53x3lcMg0wduw4AHr16E7rNm0J6diRQoUK331fVVV+/vknwleu5PffjjJ7zlyqVavuVCzJScl343GHl5c3VqsVRVHQ692/XCpStAhFihYhKCiIY8eOEZD/320eKSkpXL58mZIlS7o9flb09ddfsWTJYgxa3WNvZmg0GvRaHVevXKFr186sXbveY1tchBAivUkBMiFEukpISGDy5Im8+94HTiXSdxQuXJhp02ewbt1afv75J6f6NGnalM2bNpGW3SsbNvxA06ZNnWprsVgYP24sb/7fW04l0nfkzZuXqdO/5cCB/Wzfvu2B91u2askzJZ6hYaOGHk+khUhNzpw5SbqV5Hb/pKRbd8dx1tix41gYGkZKcjI9u3ejf98+vDpmFK+MGE7H4PZ8+vHH1K5dm+07djmdSN+JIenWLZc/wx23bt3CbDanKZG+l06vo3KVyhQsWPDua1s2b2HJ4iWcOHHCI3NkBYmJicyeNRO9Ruv0qoA7CfW1K1fYvGlTOkcohBCeI0+mhRDpauXKFTxTogT16td3uW+hQoVp1bo1YaGhTiXirVu15qMpH/LTgQPUrlPH5fmSkpLY8MN6Zs6c7VT79evW4ufn51RV4f/KmzcvHYJDCF24kObNX7zvPZ1OR89ePeVYHA9xOBzs3rWLLVs2Ex0dhU1RyJMnDzWq16Bd+w54e3tndohZRmBgKU6e/Itbt26SI4fzCfEdBw8epHjxZ1w+l7xChYp8/MmnvDV2HEePHiUhPh6D0UDevPmoXr26W9+FUqVL8+mnn2C1Wt3b/33wVwIDA13u5yxFUbhw4QIOu4NVK1fhCHZQoWKFdJsvo4SHr8RgMKDTuPbfTKPR4FDszJr1rdRsEEJkG3KlJoRIN6qqsmTxIjp27OT2GCEhndixYztXrlx+bFujyUTnzl0IC1vo1lFU4StXUKhQYWq+4Ny5yqFhoXQI7uhy1e872nfowKFDBzlz5swD7/03ebh+7fp9RYzE4926dYu5c2bToH5dRo4czrq1a/n5wAEO/3qQrZs289FHU6j1Qg0mT5pIZGRkZoebJZQsWZKKFSvxw/r1bvVfFb6SHj17uj1/7ty5ady4Me3at6dly1bUrFnT7ZtKlStXplChQmzfttWt/qvCw+nRs5dbfZ2h1+vp27cvAQEBqKrK2u/XOnWs2OP88cfvjB71CtWrPU/ZMqUoX64MdWq/wMcfTeHy5cf/jqbV/HnzUN2sXWHQG4iIiHiintQLIZ5skkwLIdLNnydOEBUVTdNm7hfRKlqsGBUrVmLrVucuiPsPGEhiQgIT33nbpYT6++/WsHDBfCZOnORU+4sXL/LniRO0at3a6Tn+y98/L7Xr1GHzpo2ptouPi2fRokWsX7ee3bt2uz3f0yQxMZGuXTrzv6+/IjE+AbPBiJfJhMlowmQ0YjaZMOr06NCwetUq2rRuKRfw/+jVuzerwsNdviF15vRpThw/TseQjukUmet69urFiuXLcTgcLvU7cuQwV69eoXUr97/fzsjpk5PefXrj4+tDzRdq3j0BwB0nT56kVcsgunXtws7t23HYFLxNZswGI8k3b7Fs6RKaNmnEoIED0u2MZ4fDwdWrV9Dp3Fv4qNFoMJvMnD374A1GIYTIiiSZFkKkm5jYGPz986S5mEz+AgWIjY11qm2ePHlYtHgJF86f550Jb5OUlPr+T1VVCV+5gv99/RUzZ812+ql0TEwMPj4+Lu0NfZgCBQoS85jPZjab8ff3B2D3rt2sX7c+zVXLn2SJiYl079aVC+fPYdDpMRoMj1w9oNfrMer1OOx2unfrIgk10LRpMwICAhg39i2nE+pLly7xxuuvMnTYcHL6+KRzhM5r06YtWp2W9yZPcjqh/vvvs7w9biyjx7yK0YkiamnlncObYcOH0ax5M5dOEbjXyZMn6dalM5Hnz2M2GDEaDOh0t4t/abVa9Ho9eq0Ob5OZX3/5ma5dOqVLQn1nj3paDvRTVQc3b970TEBCCJHOJJkWQqQbxaZgMKS9KqvRaMRmsznd3t8/L4sWLyE6Kop2bVrxxeefceHC+fvaJCYmsnTJYjp3CmH+vLnMnDWbWrVqOz2Hotg8UnHWYDBgtVpTbWMym+jRswdly5YF4PChw+zcsTPNcz+pBvTvdzeRdnYJvslgvJtQx8SkfaltdqbT6Zgzdx7JSUmMG/sWycnJqbY/d+4cI4YNoVOnzgwePCSDonSOyWQiNHQRFyIv8N7kSVgsllTbnzx5kpHDhzF4yFC6d++RQVFyX+VzVVVJTEh0uu/p06fo1qUzdkXBZDSm+jev1WrRa3VcvnSJrl06cTPR+Xmc4fVP/QH3S0Defjqd0439+kIIkRkkmRZCpBsfX18SEuLTPE5CQgJ+vr4u9fH3z8vqNd8xc9YcEhMS6Nm9G62CXqJLp460a9OaVkEvsXPHDl55ZRQ//rjPpUQawNfX1yNPdhISEvDz83tsO71eT8dOHXmh1gv4+vpSp67rBdaeBkePHuXYsT9cSqTvMBmMoKosXbo0naLLPnLkyMGcufPQAO3atOarL7/g4j37ylVV5ddffuH/3nyD/n17061bd14ZNTrzAk6Fn58foaGLiImNoUO7NkyfNpUrV67cfd/hcLBv315eHTOKYUNeZtiwEfTvPyBTYo2MjGTWzFksXrQY1eFcSvru5MnYbFaMThZ9u1M5+/KlS4QtCktLuA/Q6XQEBAS4vXJGVVVSLBY5xUAIkW1o1LScISOEEKmIi4ujXt3azJw9l9KlS7s1hiUlhXZtW/P1199Qt149t2OJjo7mwoXzJCYm4mX2Il9AACVKuH/BlpKSQt06tfhgykdUr17DrTHsdjsdQzowbtx4glyoCJ6SkoLZbL77z8lJyegNepcrKD+JRr0ykp07d2DSu/fvwqbYMJrN7D/ws8eORMru/vzzT8LCFrJu7Vrg9raDmzdvkjdfPrp3606XLl3J8882hKzut99+IyxsIRs3bECn02Eymbh58yYFCxakR4+edOzU2ambW+nlwvkLLJi/AID2HdpTqXKlx7Q/T4sWL+JlMqN18eaRTVEwe3uxb/9Pbi8vf5jZs2cxfepU9G4UjrPabBQqUpiNm7Z4LB4hhEhPkkwLIdLV/735BjabjXFvT3Cr/7p1a1m6eDGbt2x1u2p2evnwg/eJiIhgysefuNV/966dfPH5Z+zes9ftxE1VVZYsWkJ8fDyt27SmWPFibo2TFrExMSxbvowVy5cTExONzWbDbDZTokQJ+vbtT1DLlh5ZEv/YOGJjqVunFl5Gk9vJgaqqWBQbn332BS+1aOHhCLM3i8VCbGwMyckp+Pj44O/vn22Pb0tJSSE2NpaUlBR8fX3x9/fPMr8vy5Ys49SpU/jl8mPEiBHo9I/+W37/vXcJX7kCgxsFv1RVRVEdfPLpZ7z44ktpCfk+8fHx1Kn9AiaDAZ3W+e+hqqrYVQcTJk4iODjEY/EIIUR6yp7/FxRCZBu9+/Rh69YtxMXFudz3TnGwnj17ZpkL3Xv17NmLffv23rdk1BUrV6yga7fuaXoCGhkZydm/zxIdHc2C+Qv4/rvvH1t0zVNiY2MZM3oUdevWZta3M7gRE4NBq8PbbEajwtnTZ5gwYTy1a9Xkm/99TXrfuz1//hx6nS5NT9k0Gg06rZZTp056MLIng8lkomDBQpQsWZJ8+fJl20Qabj9dL1To9mfJmzdvlvp9adK0CWhu13W4ePFiqm0PHNgPbn6vNBoNis3GoUOH3Or/KH5+fvTp0xe7Q3W66Juqqih2O3n8/dO9groQQnhS9v0/oRAiWyhfvgIvvFCL/3vjdW79U+nVGaqq8tmnn3AzMZHgLHTUzr2KFS9OUFBL3nrzdeLjXbtZMGP6NM6di6Bbt+5pi6FYMfr160e+gHwA/Hb0N8JXhKdpTGfExsbStUtntm/bhpfRdLdqtk6nQ6fVodfpMBmNGHV67DaFWbNm8uYbr7t8RJErEhMT0XlgabaiKOl2dJAQjxOQP4DWrVszYuQIij9TPNW2iYmJaboRoNFoiI2Ncbv/o7z5f2/Rrn17FIf9sd/5O4m0f768rFgZniEV1IUQwlMkmRZCpLtvpk7Dy9uLV0ePciqhvpNIHziwn7BFS9J8/FR6mvLRxxQtWpRXRgx3OqGeMX0aP6xfx6JFS8ibN2+aYyharCiDBw+mWfNmGIwGGjdtnOYxU3Mnkb529QpGvT7VJ8EajQaDXo9Rp2fzpk3pmlCbTCaPjK3V6u7bky5ERqtarSq5cuV6bDuDwejug+nbVPBKp7/1d997n+COnUi2WrDZlQeKkjlUFYvVgs1hp0ChgqxYGY6/f9p/D4UQIiNJMi2ESHdeXl7Mnj0XL28v+vTuyfJlyx56jqiiKGzbupUhLw/iwIH9LFq0hGLFMn4PsCuMRiP/+2YaRYsWpW/vXiwKCyUh/sEK5na7nd27dzFyxLC7ifRzgYEei0Or01Knbh3GjBlD0aJF/51XsbNv7z5sVuePFnuct956k6tXr7hUMVur1WLQ6di8eRPLly/zWCz38vfPi9VqTfNycp1O65GbHEJ4wrlz54i78fAbdYUKFUzTDSSdXk+hQoXd7v84EydOYsPGTXQIDsHmcGBRbNhVFZvDTlJKMpWrVuWbqdPYuGmLJNJCiGxJCpAJITKM1WJh3fp1hIYu5FxEBPXrNyBv3rzo9Hri4uI4sH8/eoP+doXgrt3InTt3ZofsNEVR2LhxA6GhC/nrzz+pV68+Afnzo9frSUhI4OeffkJRFLp27Uq37j3Ily9fhsS1Z/cedu3cha+fL02aNKFipYppWhZ65cplGjdqiLfZC50be2YtNisB+fOzbXv6nJP90ovNuHLpMiY3C57ZHQ6SUpLZvWcv+fPn93B0Qrjmh/U/cOjgIcqULUPnLp0feH/Dhh8YN/YtDC4U+rrD4XCQbLWwfcdOChYs5IlwU5WcnMz58+dJSEjAy8tMgQIFM+x3UAgh0osk00KITHHkyGF2bN9OXHwcdsWOr58fVatWpWnTZh49piUzHD9+jK1bthAbG4vNZsPXz49KlSrx4osvZfjxVTt37GTv3r13z6wtUKAA7dq3I38B9xLFTz/5mMWLwtyqHgy3l/AnWy3MnTff5bO9nbFixXI+eP89jG7GZ7FZqVe/PtNnzPRwZEL86/TpU+zZs4e4uDgMBgP58+cnKKglvr6+97U7cvgI69auA6BXn14PHOenKAq1a9VEsVhdLmRosVmp+UIt5sydl7YP849bt26xadNGrly+TIrFgq+PD5UqV06X77kQQmQVkkwLIcQTLup6FNu2beP0qdPo9XpGvDLigYt2Z9WoXhWbxYohDYW+UqxWmjVvzpdffe32GI8cOyWFWi/UQLU7MLp448LusJNitbIwdBE1arh3drgQqdm1ayfz583jt9+OUrdePfLk8Uex2Th/4TzHjx2jfYdg+vfvT/HizwCgOlRmz57N1StXyZU7F4MHD8Zkvr9A18xvZzB9+jT0Gq3TFdYVux2bXWFhaBjVqlVP02c6dy6CuXPn8N2aNRj0Bux2BYfdgV6vx6468Pf3Z8DAQYSEdMTLyytNcwkhRFYjybQQQjwlzp07R2xMLFWrVb37WmJCIg6HA79cfo/trygK5cqWxsc7R5qORbLarASWLkP4qtVuj5GanTt3MnzYkLsVxp1hd9ixKgo9e/XmrbfGpktc4un2+eefsSp8Jd2696BN23YPFFY8FxHByhXL2bp1C3PmzqNq1WoAXL50mXlz51G2XFlat2mN6SHVrsePH8v6deucSqjvJNLvv/8B7TsEp+kz/fTTAQYNHIBOq0WL5oFVRbcrdSug1VK0aFGWLl1OTh+fNM0phBBZiSTTQgjxFFuzeg0nTpygZs2a1K9fH7PXoyv7JiYmUq1qFXxy5ECrSUsybaNw0SJs2LjZ7TEe505CrdfqMBoMqe4TVxQFm8MuibRIN1OnfsPyZUuZPmMmBQulvj9586aNfPHF5yxYsJCKFSsBcP36dQICAlLtN378WNatXYvquL0q47/fUbvDgd1hR7Hb+eCDDz2XSGs0GPSp37RSVRXF4aBosaIsWbocH0mohRBPCEmmhRDiKZWSksLUb6aSdCsJALOXmfr161OzZk10+gf3rTscDsqUDiSndw63io/dYbFZKVuuHMvT+TzsPXt2M27sWOLibqABDHoD2n+SalVVsdkVNFotqgqDhwxh+PAR6RqP8IyoqCjWr1vHtevXUGw2cuXKRcNGje4mnlnNtm1bmfD2eGZ8O4si91TaT826td8zY/o0du3+EW9v7/ves9lsnD1zljJlyzzQb9/evcyaPZNff/kFL7MZh8OBBg1oNVitVlq3aUP//gMoVap0mj7TtWvXaNqkEXqt9rGJ9B13Eurnqz7P/AWhaZpfCCGyCkmmhRDiKWZJsbB3715+/ulnFEXBz8+P4SOHP7KYUb26tUmMT3B5P/K9rDYbrdq04cMpH7k9hrMcDge7d+9m7pzZ/PLLz/e9V6xYcQYMHEi7du0fSFhE1nPy5EnmzJ7F5s2bqFGjJsWKF0ev1xMbE8OuXTt57rlA+vcfwEstWmR2qPfp1bMHdevVo2OnB6txp2bIy4Po1KkTXbp2u/ua3W5nxbIVnD59mjp169C4ceOH3viKjIzkwIH9xMfFodPr8c/jT+MmTdyulfBfX3z+GWGhC9G7WEVcVVWSLCls3LT57r5wIYTIziSZFkIIQUJCAjt37KRkyZJUrFTx7usOuwOt7t+n0DO/ncGM6dMxuFlx3aGqJKUks2rVGsqVL5/muF1htVqJj4/Hrij4+vlJAp2N7Nu3jxHDh9K2XXs6dupMwYIF73s/JSWFDT+sJyx0Ia1at8kyy/UjIiJo364Na9f/QI4cOR/f4R6bN21k+fJlrF27/u5rFouFJYuXEHkhEoD8+fPTPrh9hh7jZrVaqV2rJg5FQe9G1Xyb3U6HkBAmTpyUDtEJIUTGkmRaCCGeYDdv3mTzpo1cvnyZ5ORkfHx9KVO6DA0bNXpsoSJVVZk/bz758+encZPGeHt7c+PGDerWqYXZYHTrCDOL1ULJ557ju+/XufuRHurSpUts27aVG7Gx2BQFXx9fKlWuRO3adTw6j8h4v/zyC0MGD2L8hHdo2LBRqm2vXbvGsCEv075DB8aMeS2DIny0b775H2fOnGbCO64njlarldYtWxC+ag0lS5a8+7piU9i2bRu//PwLAN17dOe5wOc8FvPj7N69i9GvvILeza0eit0OWg1Hjv7u4ciEECLjuX+2iRBCiCwrMjKS0IULCA9fSYGCBXm25LOYvbyIiIggdOECvLy86NW7D506dnpkdd3jx45zMfIiFyMvcvzYcZo1b0bValVp27YdGzb8gFbVplrY67/sdjt2VWXo0OEe+YyqqrJ/3z7mzp3D/v377u4RVVUVrU6HYlPIF5CP/v0H0CE45IHqySLrS0xMZMjgQbz5f2Mfm0jD7Se130ybzpDBL1O+fAVefPGlDIjy0aKjoihcuIhbfY1GI/nyBRAdFXVfMq036GkR1ILSpUtz/vz5+xLpP37/g2LFi+Hn9/jq/O6Kuh6Vpmr+Oq2WhFs3URTF5bOxhRAiq5FfMSGEeMLs2rWTEcOHUbt2HT759HOer1r1vvdtNhvbtm1l+bKlLFgwn2XLllOw4IMVhsuUKUPjJo3Z++NeUlJSWL9uPdeuXWPyu+8ReTGSY7//jkGndyqhttvtWBQbQ4cN98ieVofDwWuvjmHLls3oNFpymL1uX+Dfs4dTb9QSGx3NZ599yowZ01mxchVFiriX2IjMsWb1KsqULUuz5s2d7lOoUGF69+5DWGhopifTNsWWpoTRYNBjs9ke+l6JkiUoUbLE3X++ceMG33/3PWigcuXK1Kpdi3z58rk996PYFBugAs7fSHsYSaaFEE8C928tCiGEyHL27dvHKyNHMHbceKZ8/MkDiTSAwWAgKKgl8xeEUrVqVXr17MH169cfaKc36KnfoD7DRw6ndOnSaLQaqlSpgslkYt68BVSoVAmrXcGm2HjUjiFVVbFYrXcT6REjRqb5M95JpLdv24bZYMRkND70SZlGo8FoMGLQ6riZeJPOnUK4ePFimucXGWfJ0iV06BDicr+gli35/fff+Pvvv9MhKuf5+vqREB/vdv/4+Hh8/ZwrGhYdHY3JZMJhd3Dk8BFmTJvBlStX3J77UW4/9XY/kVZVFZ1Oh9n86GP4hBAiu5BkWggh7pGUlMSJ48c5cGA/hw8fIjIyMrNDctqJEycYNnQwY157nZdaBD22vUajYey4tylTpiy9e/XAYrE8tJ2vry9dunXh5cEvU7DQ7cJPdxLqQS8Pxmg2Y1VspFgtWG02bIqC1WbDYrORZEmhSLGifPzJpx5JpAEmTBjP9m3bMOh0Ti031Wg0GPV6bt68nVDHxcV5JA6Rvk6fPkV0VBT1GzRwuW+OHDlp1LgxGzdsSIfInFejRg1+/PHHR95sSs1ff/1JcnKy08dYBQYGMmr0KJo1b4aPrw/58+e/r1DbxciLKIrichz/VbVqNZKTk3E4HG71tykKzz//fJrjEEKIrEDW1wghBHDx4sW7e4wVxY6fny8Wi5WEhHgqV65Cv379af7ii24V3coos2fN5KUWQbRp09bpPlqtlgkTJ9G9a2d++GE9wcGPfgr434rBRw4foXnzFxk6dBhb60xrFwAADapJREFUtmxm+fJlXLt6lZSUFHLmzEmpUqXp3bsPVTx44XzlymVWhYfjbTa7tG9To9Fg1OlJunWLFcuX8fLgIR6LSaSPqKgoAgLyu70UuGDBQkTHRHs4Ktc0atSYSZMmcvDXX6lRs6ZLfdesXkVwcAgmk8npPkaTkTp161CrVi0SEhLuvp6SnEJYWBhms5mglkGUKfPgGdXOKlCgAPXq1+en/fsxGY0u9VVVFY1Oy6BBg92eXwghshJJpoUQTzVVVXn33cksX7aU2nXqMOXjT6hWrfrd9+Pj4/huzRrefXcyH300hW9nzkrThWh6iYmJZvPmTSwIDXO5r06no32HYJYsXpRqMn2v48eOs2XzFjQaDe07tKdly1a0bNnK5bldtSgsDLPJjM7F823hdkKtOlTmz5/HgIGDsvSNEXF7n31a9tTq9Xpu3bzpwYhcp9Pp6NKlK4vCQqlarZrTf3NXLl9m65atfL/Wvar3Wp2WXLlz3f3niHMR2Gw2bFYbK5atoEzZMrRu3RrvHO4dDzdw4CAOHNiPw+Fw6aaWTVHI6etDw0aPLyYnhBDZgSzzFkI81SZPmsj2bdsIXbSEjz/57L5EGsDPLxd9+vZj9Xff07RZM/r26cWZM2cyKdpHCw8Pp3z5CpQs+axb/Vu1as1ff/3FiePHnWpfrFgx8gXkQ1VVvv/+e86fO+/WvK6wWiwsXboE0nCio+Gf5d47dmz3YGQiPfj6+hEX7/6S/Li4G+la1dpZffv05VbSLT784D2nlntHXb/OKyOH07dfP0qUKPHY9s4oW7YsQ4cNpVixYgBcvnQ5TTcqar7wAq1at0ZRHU4v97YpNuyqg08/+zxN1cCFECIrkV8zIcRTa8qUD9m6dQvfTJvOM888k2pbvV7P0GHDaRHUkj69e3LhwoUMitI5R48coW69em739/Xzo1KlSvz221Gn2vv4+tCnTx9y586Nw+5gxfIVxMe5X2jJGRcuXODWrVvo0/BEWaPRoFHh6FHnPqfIPGXKlCHpVhJ//fWny30dDgd7du+hVq3a6RCZa3L6+LBgQShnTp/h/Xcnc+PGjUe2/fPPE4wcMYwXX3yJV1/17DnZ+fLlo0+/PrRq3YpWbVphNLm2RPu/pkz5mKCWLVFUx+2zox9BVVUsNiuKw8HMWXOoU6dumuYVQoisRJJpIcRT6ezZs4SFLuTr/02lcOHCTvcbMfIVqlR5nq+/+jIdo3NdQkICvr7OVf19FF8/P+Lv2Wf5ON45vOnWvRsms4kKFSvg4/vw86o9JT4hHqPR6NLZ1g/jUB3ciI31UFQivZhMJkJCQlizepXLfffv34fBaEjTDSZPypUrF6FhYSiKQkiHdnzw/nscOnSQiIi/OXXqJFu3bGHIy4MYM2oUwcEhjB03Pl3i0Gg0VKtejcDAwLuv/fH7H2zf5vpKDY1Gw5QpH9OvX38cqCgOB1ab7fZ58g47it2OxWbDotgoWKgwc+fNp25dSaSFEE8W2TMthHgqLV2ymAYNG/KMG8soe/fpS/9+fRj/9gTy5MmTDtG5Tq/XYU/l6ZAzFMX1Pap58+Vl6LChaU7knaHT6d2qivwweoPBI+OI9NW1W3fatW1Dr959nT4jXFEUFoeF0bVLtzTfePEkf/+8TJs+g8jISEIXLuCLzz4lPj4evV5P/vz5CenYiQ4dgvHy8sqwmM5FnOO7Nd+hqio+Pj7UfMG1ImkajYbRY15lyNBhrFu3lrDQUK5evYLVaiNHTm+qVHmeAQMHUrVqtXT6BEIIkbkkmRZCPHVSUlJYs2Y1H0z5yK3+zz73HGXKlCU8fCUvv5w1qtLmyp2bqKioNI0RHRVFLr9cj2/4H/9NpK0Wa5qXkD5M7ty5sFqtmA1pezqt0+mzzE0QkboSJUow6OWXGTliGDO+nUWBAgVSbW+323lnwtvodDp69OyZQVG6pmjRoox/ewLjmZDZoVC0WFGeC3yO06dOs23bNgJLBZI7d26XxzGbzXTq1JlOnTqnQ5RCCJF1yTJvIcRT5+Cvv2IymalevYbbY7QICmLH9qxTxKpJ46Zs3brF7Se3kRcucOrUSbfO9L1XTHQMs2bOYt/efWka52GKF3+GIkWKYFVsbo/hUFWsNivNmjXzYGQiPY0c+Qpt27Rl5IhhHD165JHtrl69yoS3xxMTHcWcufPw9navUvXTRKfT0a59O7xzeKPYFH7c82NmhySEENmKJNNCiKfOjbgb5AvIl6Yx8uULIC7u0YWEMlpQUBC3bt7k559/cqv/mjWradKk6QNnSbtq7969xMbGsn37dk6dPJWmsR5mwMBBaNJQCdhmsxEYGEiFChU9GJVIb2+8+X/06N6DCePH0b9vH9atW8vvv//On3+eYO+PP/L2+LF079oZP19f5s1fSM6cOTM75GzD29ubVq1a0ahxI0qXKZ3Z4QghRLYiy7yFEE8du92eporQcHuPsk1RPBRR2hlNJoJDOrJk0SJq1nzBpaNnYmKi+WH9Ov73zbQ0x9GyZUuuXr3KtavXWL16NQMGDiBfvrTduLhXhw7BfPzRFGyKDYPetX3PqqqCRsOgQVljab5wzcBBL9O7dx/WrVvH6tWruH79Oopiw88vF40aNeK99z7w6N/a06RsubKUpWxmhyGEENmOPJkWQjx1fH18SXChavXDxMfH45cBRbdcMWjQIK5HXeeD9507zxYgLi6OV0aOoG69etSpUyfNMRiMBrp27Yq3tzdWi5X1a9enecx7eXt7M3HiZKyKguLCzQxVVbHZFao8/zwtgoI8GpPIOEaTiZCOHVm8ZClbt21n5649fPf9WkaPeVUSaSGEEBlOkmkhxFOnXPnyXLp0iYuRkW6PsX//PipXruLBqNLO3z8vi8IW8/tvR/n4oylYrdZU20dHRzN61EhKlizJF1985bHKx365/OjUuROFChciOCTYI2PeK6RjRya/+x4WxebU6gCHw4HNrlC5yvPMmTMXg1TyFuI+B/Yf4MzpM9hs7tcjEEKIp5FG9dQ5I0IIkY0MHTqYgID8jHxllMt9b9y4Qbs2rfh+7ToCA0ulQ3RpExkZycAB/YmLu0H7DsGEhHQkj7//3fdPnTrJimXL2Lp1C02aNOXzL77EaPR89W1VVdP1aKLw8JVMmvgOep0e1eHAoNffN59iV3CoKharlYYNG/HNN1MxmkzpFo8Q2dFff/7FiuUrAOjcpTNlypbJ5IiEECL7kGRaCPFU2vvjj4wZM4o136/DbDa71Hfhgvn8+ssvLPvnAjQrUlWVnTt2MH/+PA4e/JVcuXJh9vLiZmIiSUlJtGnTlr79+lOmTMZdOEddj0pz4bf/SkxMZNWqcObPm8v169cxGAxoNBoURUGj0dChQzB9+vTlucBAj84rxJMgMTGRb6d/S3JyMkWKFqFvv74u1VsQQoinnSTTQoinkqqqdOncEbPZi48//czppb8H9u9n/Li3mDptOg0aNEznKD3j4sWLXLlyhZSUZHxy+lD8mWfcOkvWXYpNYd26dZw4foKu3bvy7LPPenwOVVU5ffoUN27cwGa14evnR8mSJaWqsxCpWL1qNcf+OIbRaGTw0MEZ+rsghBBPAkmmhRBPrYSEBHr17I5/3nx8OOUj9PrUDzg4ePBX3nz9NSa/+x4dOnh+L/CTKulWEnNmzyEuLg69Xk+37t0oUbJEZoclxFPv1q1brP1+LeXKlaNylcqZHY4QQmQ7kkwLIZ5qsbGx9OzZHbui0LlLV1oEtcT0n321Z06fZtmypWzdspl33plIl67dMina7OvGjRssXLCQhPgE9AY93Xt055lnnsnssIQQQggh3CbJtBDiqZecnMzq1atYuGABcXE3qFa9Bn5+vlgsFiIvRPLnnydo1ao1ffv1p1y5cpkdbrZ1I/YGCxYsIDExkXbt2smTMCEygdVq5a8//6JS5UqZHYoQQmR7kkwLIcQ/VFXlxx/38MfvvxMfH4/JZCJfQAAtW7Yib968mR3eEyE2JpbLly9ToWKFzA5FiKfSnX3SFStVpENwh8wORwghsjVJpoUQQmSqY38co0iRIuTKnSuzQxHiiXbw14Ns+GEDAI2bNKZ+g/qZHJEQQmRvqVfbEUIIIdLR5cuX+W7NdxgMBuo3qE+VKlXwzuGd2WEJ8US6cuUKAIGBgdSrXy+ToxFCiOxPnkwLIYTINGfOnGH1qtWkJKcAoNVpadCgAQ0aNsjkyIR4Mv3+2+8EBgbi5e2V2aEIIUS2p83sAIQQQjy9nnvuOYYMHUKV56tgMBhw2B3kynX/cu+Dvx7MpOiEePJUqlxJEmkhhPAQeTIthBAiS7CkWDh27BiVKlfCYDDcff2Xn3+h5gs1MzEyIYQQQogHSTIthBAiS4uNiSWPf57MDkMIIYQQ4j6STAshhBBCCCGEEC6SPdNCCCGEEEIIIYSLJJkWQgghhBBCCCFcJMm0EEIIIYQQQgjhIkmmhRBCCCGEEEIIF0kyLYQQQgghhBBCuEiSaSGEEEIIIYQQwkWSTAshhBBCCCGEEC6SZFoIIYQQQgghhHCRJNNCCCGEEEIIIYSLJJkWQgghhBBCCCFcJMm0EEIIIYQQQgjhIkmmhRBCCCGEEEIIF0kyLYQQQgghhBBCuEiSaSGEEEIIIYQQwkWSTAshhBBCCCGEEC6SZFoIIYQQQgghhHDR/wN5nRz/JaJVbwAAAABJRU5ErkJggg==)\nMakine öğrenmesi modelinin geliştirilmesi", "_____no_output_____" ] ], [ [ "# Başarılı bir model geliştirmek için eğitim verisinin yeterince büyük olması gereklidir.", "_____no_output_____" ] ], [ [ "![image.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAwsAAAHeCAYAAADUypcPAAAgAElEQVR4nOzdeXhM1x/H8ffMJJEgCRFb7PtSugiCoqgWte9UUdqiraIL3WytalFqKaW1V6uWai1Vtde+71uQIEhCJBGJ7DNzf39ElV+DWEeSz+t5+jzp3NxzP3fGnMl3zj3nmgzDMBAREREREfk/ZkcHEBERERGRx5OKBRERERERSZWKBRERERERSZWKBRERERERSZWKBRERERERSZWKBRERERERSZWKBRERERERSZWKBRERERERSZWKBRERERERSZWKBRERERERSZWKBRERERERSZWKBRERERERSZWKBRERERERSZWKBRERERERSZWKBRERERERSZWKBRERERERSZWKBRERERERSZWKBRERERERSZWKBRERERERSZWKBRERERERSZWKBRERERERSZWKBRERkTQyDIOrMTG33J6cnPwI04iIPHwqFkRERNIg6vJlXu38CnVr1+LTjz68aZvVaqXn669To2oVPvzgfQclFBF58FQsiIiIpMHBgwc5euQIAOvXrbtpFOFUYCB79+wGYN3atYSEBN+079Ilixn11VccO3r00QUWEXkAVCyIiIjcYMumTbz26qsMHTyIpKSk64/7VavGszVrUaBgQd59/wOcnZ2vbytVujTNmjcnR86ctGzVGh+fAte3nTlzhmFDh7JwwXze6N6NuLi4R3o+IiL3w2QYhuHoECIiIo8Dm81GzWp+WK1WAH6YPoNnKlW6rzajoqLo0LYNEeHheHh4sOSP5WR3d7++/XJkJBYnJzw8PO7rOCIiD4NGFkRERK6xWCwUK14cgJKlSlGiZMn7bjNHjhz89Ms8hg3/kmkzZ91UKOzetZOXGrzIC3XrsGTx4vs+lojIg6aRBRERyZRiY2PZv28f5cuXJ6eX102PhwQHU7RYsZsuNXoY5syezYRxYwGoULEiM3+c81CPJyJyt5wcHUBERORRi4+P59XOr3Dm9Gny5s3LH3+tvL4tW7ZslCpd+pHkaNOuHYcPH+LkiRN07PTKIzmmiMjdULEgIiKZTlBQEGdOnwZSCofExESyZMnyyHO4ubkx8uvR/3ncMAwWLphPTHQ0rVq3uWnkQ0TkUVKxICIimU7JkiXp8PLLBJ05Q9du3RxSKNzO7l27+HrECAB2bN/OD9NnODiRiGRWKhZERCRDCw8PZ+OGv6lcuQqFixQBwMnJiff7D3BwslvLnTs3zs7OJCcnExYW5ug4IpKJaYKziIhkWHFxcbRv05oLoaFkz+7O2g0bMJvTx0KAhw4eZPOmTTRo2JDiJUo4Oo6IZFIaWRARkQwr7OJFLoSGAinzA5KTkx+7S45upeKTT1LxyScdHUNEMrn08fWKiIjIPShStChvv9OHRi81Zsy4cemmULiVn378kbatWvLbol8dHUVEMgldhiQiIhmG1WrFMIyHfn8ER7Db7dSp+Szx8fGYTCb++Osv8uTJ6+hYIpLBaWRBRB4Ke/RJNsybxMjBA/jwoyGMmjCbVYcjSE7DvtbTa5g5eT67I+xpPJqVM2um8/38naR5l7S0GrCSaZMXsi8q9UbvtP1WEuLisT2IgP8vPo74h9Jw+nBg/z6aNGpI4wYvXl8WNSMxm83UqVsPAGdnZ5ydMl5BJCKPHxULIvKA2Qnf+i1vturIB2Pms+1EJAlXQzi04nsGdWlKp0GLCYi/fQu2M38zd8bv7E3zX/42gjb8xKzFe4h8kMXCqbX8NGMpB6JSH4C90/b/SuTInHd4ZeByoh9czJSWD8+h38sDWXHlATecjkz9/gciwsO5fPkye/fsdnSch2LosGF8M34CP89foHsviMgjoQnOIvJAJfrP5KMPZ3K6SBuGTu5L41LZUjbYIjk47wsGjvuC982ezBxSF69bfF2RpVo/Zv6ahLtPWruoLFTt+yPzkjxI8y4OEcvxbdsJofYDbznOfys7g3kILacfTz/zDDu2b6NY8eLUf7GBo+M8FGazmVq1M/OrLCKP2mP9sSoi6Yz9Equ+n8XhLM8zdOxHNMx9QzVg8eLJTsMZdqkzb/48llnNa/BeJSfCDvzNIaMslbIdZ832CPJUqUtVrxBO+sdQyCsfns7X2og/z67V6zkUZiJv+RrUqmhwbFswOavWpHQOg5iQExyPKYJXPk/M4QfZeMBGmTpFiNm4hq0BMWQrVIGqtapQ1P3GCiWO87s3sPVQEJcTzLj7lMOvTg1KeFru+SmIO7eLTVsPERSZgMXdh7LV61KthCcW4gnauRH/SAM7x9nw11YqPluDEu4AdqIDNrFm8zEiTN4Uq1idZysVwA0AG2EH/+agrSzPFYlh05otBMZko1AFP56tUgx3MyQE7WSjfyR2A/z//ottTz1L9ZSG/4+VSP/NbNp9gouxzniXrEytWhXJ7fLvcQ4kl6VWoTDWrdrF+SRPSvi9wHNPeN38YRF3jj0btnA4KJIEizv5y1bnuRolufFps0WdZMu6rZy4ZMOj0NPUer4S+W+YW2yPDmDz6k34h5vwLv4kfjUrUcDtnp92AF7v0YMmzZqRN29eTCbT/TWWjhw5fIhs2d0pWrSoo6OISAakYkFEHpyY7WzYGYtXg+bUzZ3asIEbFVo3pfzcCWxae4R3Kj3BoflDGRJUnnLndrI/BrLVcmJuiz18OTCAtj/7UdLdjC1kJV++PYTl4bkoU8ab2Lk/MLNUcRL3RvHC9OqUzmHn2MIvGBLQjh/9SlL4yAK+/DSA8lWjOHg8KwVzJXI+8FvG+bTkq5mDeM7LDEknWTDgHcZts1K4QklyEcGpI5OZMKk+g2aNoLHP3RYMSQTM70+/MduwFqlACW+IDDzC9+O/o97gWQxv7MrxNYvYGWzDznYWzUvCVMGPEu6xHJj9KYMm78ZWrAJFXcNZ+N04vBoM4KtBzSjmbOXYwi8YHPAEVaIOcDxrQbwTzxMwcQw+LUcxbeBz4L+axTtCsNtg56+/kGyukEqxYOXU/L68NfoAruUqUiRbDKd//JYJhbsy5oe++LpfO45/WXxj/Qn1qUBBayA/fzeFcm9N4JvuFckKJJ2cx8d9xrDdWoQnSnpDRCBHJ49nSv0hTP+qKfktdq7smsIHH07nmFMJnijhzuV53zFxZmu+mPwhtb3NxOyfxeBPJrPHVozyxVyJmP8dE3I15IORg2hS/P6uw8+XL9997Z/erF61kk8+/BAnJyfGT5xEVT8/R0cSkQxGxYKIPDDJZ09xNtFMoRIludWffJa8pSnuZXD0zCmu8gQASf6BePT/mZUv5SQuIQe5juz5dwd7JOu+HcXyhNoM/mU4LxV0xh6xg2/f6cdPxm1WgrEe56hpANOXdqS4m50rW76kW98/+O3vd3iuVQ4ur5rMlC0uNJ8wn/7VPTEDcfu/ofsbc1m6/iKNO/nc1bnbI1fx/aQtZGn5LT9/WB3PlAYZ3+V15i1ZR1jjV3jxk/FcPd+A0XRlwndtyQkk7PqOzybup0DvmYzpWpas2IncOZa+fb9iROmn+P6VfNdO5wgMmMXiDiVwtV9h65edeW/5Ijb2fo4WDT7lm5jzNB4FXSZOplVql7InH2TxzzvI2nYyPw+oghsQt28M3Xv/xYo9PfCtk1Lc2QL9sfSfys8dS+JKNIcm96b31JHMqzWL7iWiWT15EltcWjJ+3kf4pZwkB8Z2pufcxay/2JiXvQ8wc/gMThbpwfcTevKEO9hCFvNh5y+YMKcx1XslMGXIRA4U7M3Uca9SJivYI3cyoXcfRn1Ziorfd6bIvQ/sZDoR4RFAyipQa9esVrEgIg+cJjiLyANjxMeSYJhwdct6687FlJVsriaMpEQSr01GNueqRatW5fHyyE/BPP93LUrsdtZsvkLpVj1pUND52u/70b1rLTxud6WJORe127WiuBuAGc9KVSjrZiUiLAIw4177fSbPHEcPP89rWa0kZ8+FlxPERsfc9bmbPWrTb+pMRvf0SykUAGtydrxyOWFcjSYm1YnXSexdsY4Qrxfp0rEsWVNawqtqN1r5Ghxcu+GG06lN25YlcAUwe1KpallcreGEpXUSuCkr2dwgbNtCFqw+QEisjazPvM+8LSsYWOff59xcqDHd2pRMOQ4eVOzUlsomfzZuPA9mD2q9P5XpY3tR5d+TJJtXLpy5SkyMneTDa9gY7MXz3TrzxLXBDYtPY/qN/oaPmhXEuvcv1od4Uf/VlymT9doxvarStY0v9gNr2RR6d8s5GYbBhHHj+PTjjwgLu3hX+2YEjZs2oaqfH7m8vWnY6CVHxxGRDEgjCyLywJi9cuFpNoiKjMCGB6l+QWyNIDLaIEspLzz+qShy5SHvLaoLW9h5QhNc8Cnsc1N7boUKk8d04tZhTF7kzn3DHmZnnC1gt6f8Merk4Y1H/GLmDPqGQ6fOERIcSkSCCYsNitzL7WecPPB2j2fp7EGMOxRIcHAwoREJmCw2KGqQaov2GEJCI7GFL6H/c8tv3mRNwJ4n5N8HvPLgfcPpmJydccKOPa2rPzmVpn3/HhwZPINJA1Yx0TknxZ6uQZ0mHejYpCI5rz3/loJFKXrjsJBbfvJ5gX9ICFAUD28P4hfPZujYg5w6F0xIaAQJJgs2imIAyaGhhJGfIkVvvPmZMwWfqU1B7ETsCeGyNZxl79XmzxuLPZuVRHseQi7YoGDahxa2b9vGnNmzAChUqBC93no7zftmBO7uHkya8r2jY4hIBqZiQUQeGKcifvgWmMqiXdu52K0YqV32n3BoBweinSjz9JP88+ekyWzBfKtRAhcXnEki5kocdtyuj1gYCfEk3PZvejPmWw5v2Ahd9gndP99KVr8mNGhSnxIlSlO+dBDjmw7ibBrO9T8thixj8Kufsc3Nj5caNeX5kiUoVb40Z8c1YUjQrSJaMJtNWIq1ZMjHjfj/aR4mZ+9/fzabUy++0sxMjso9mLCsA2f2bmHbjh1s37SBOUPXsStsFlNfT5kcayTEE2eHHNefaCs2G7i4ZgFbCMs/7cqwrW74NWlE4/qlKF6qPKWCxtJycMpJms0mzMQRe/XmF8dut2M2m7FYzJicitH8809o8N8TxrvE3X0smW94kV1d73OGtIiI/IeKBRF5cJwr0LTlkyyeOJPxy2owrEURXG7YbI/ex8yJy7jg9Ty9G+bHkoZbtFnyVaR8fli5eydRbRpdW241Ef9N2wm91/vP26PY+udGLhftyphxvSl/rSe0ntjA+aR/Rx/uokGitixn0+WivDJ2PG89cb1BNp1LApv92k3YzJhMYNhT/sOcjVIlC8DBUGILPcULea794Wu/yNpJE9ju2YKnKuRJUwKTOaVhAzupXWFqv7KLueOXktzwY7pVbUTRKo3o2OskP3TpyKw9B4m5VizYzxzhaAz4eF47hbMHORruSvEyxbBHreXPjZcp2mUcY3o/ce0DxMrJjedIxIbdCk4lS1GEHfj7R2Avn/dakiv81b8ZX8f1YkbPUvhwiAtXC/Lki/9stxO2ZiKTtuagWb/yFLyLZ75ylSr07tOHK1eiadOu3V3sKSIiaaE5CyLyADlR4uVP6VPTxMZhXXjj89n8tfkAx4/uYv1vExnQ+U1mnsxPi08+oF6uNHY/zk/TplMV7OvH8MnYRWzeuZWVUz9myLxT2LjH5THNLnh4uGGEHWLX8Wjs2LgSsJIJw+ZywmqQGJ9wtw2SxdMDN3sYR3YeJ9oOtisBrBr7OfOOWyHpn1GQLLi5OmEE7WbdzsOcj3WmfMv2+Jq2MmXQFDaeiiIhLpidMz9n9I8bCXFJW6EAkMXVFScjiL1rdnLkfOx/E2bLRuzRlcwcO57VJ6NIssZyftcq9pw3Ubh8WbJf+z175Fomj1zEkfAYLgesZMLQOZz0eYl2db0wu3jg6Wbn0uEdnEg5SQJWjmX43OPYjCTiEwycijelVTUndkz9kl92BnM1PpJjS0YxfZONsrVqULhiS9r6wrbvBjF1wymi4uMI2TGT4aN+ZGOIM97Z7u5jyWKx0LVbd/r060f27NnvvEMGduXKFd7o3o2O7doSEhLs6DgikkFoZEFEHiznErT6ehb5Zk9k+oLvGPR7YsrjFneKVGnKh8N60/JJz7v4psJCkbZfMcY6mglzx/Hhr07kK1+XDm/U44fJwbi63kvB4E6dXgNoem40k1+pwxQXCxb3MjR4Yzi93Qcw9VQA4HtXLWav+ybvNz/HN9914oXvXLBY3CndqAef93Xn4ymnCLxsp1KeLDxdvx75tixn5Nv7aDHhDz59tj2fj7nKV1/+yIDWP2ADzNkK8+xrI/mkXREgMU3Hz/LMC9TNv4U/v3qLfS2/5c/Bz978C07leXXYAM5/Mp5B7eanjHQ45aRMwwF89npFnK8dx1zIjyfCp9GzwTAS7c7kqtCUT755Fz93gHr0GNCcc6O/o2vd73CxWHAv04jXhvfFfcAUTgVEgm8Bmn/xLckjhzL5zcaMs4PJtSDVX/2Sj9sXwWKBtsPHEPvFl8x5vxXTUk6YwjVf58tB7bUS0n3Yt3cP+/ftA2DGtGkMHDzEwYlEJCMwGca9zOQTEUkDexLRYcFcjLGQ08cH72z38P2E/QqBBwIxCpSj5A0rJUUv6cNLXybS94/JtE31ng5pkUzMhRAi7Dkp4ONxy+Ve76rFmAuEhNvJWdAHj1s0aIuP5HKcM5653G84ZhJXQs5xKSk7eQvkxf1ewtjiuRwZh3OOXGS/5f7JRIec42KsCc98hcjj/s9rksiGQQ35KKAdP855kyLR5wlN9MAnfyrPS3I0F0IiMHIWJP+tThJIjDzH+ctmvAsVwNPlv9uTrgRz/lIy2fMWIM89nbDcKDg4mI5t2xAfH88rXbrQ9933HB1JRDIAjSyIyMNjdsEjXzE87uc+WUY0Wyf05AfjDX6Y1INy2cAWuYefft0NpbtTKef9XE3pjHu+IqR2r+N7btE9H0Xu0KDFzQvv/8zFdcHTpwSe93Nwixs5c99pkq8zHj7F8bjDb7l4FaTILZvwIF+RO7UAWbwKUSK1ez78cwzPAhS/rxNOERsby7gxY8iWPRt9+r1706TnzKRAgQJMnTmTgBMnadCwoaPjiEgGoWJBRB5vlgI0fbMbWwZOo1uDheTP5czVsAvE565Dn1GduMvFcyQDWrp4MYt//w2AQoUK07ptWwcncpwyZcpSpkxZR8cQkQxEH7Mi8pgzk6PqW0xZ0pIj+45wNjIZ1zyleLpSSXKqB3uAnHmmxySmJeaiUDp7XnN5/7vEbGzsVQcmERHJeDRnQURE0r0fZ88iMiKCXm++haub7reQlJSEs7MzJtM9rhgmInKNigUREZEMJDAwgLd79gSTiTk/zyV3nrQvwSsi8v8y5ywwERGRDGrzxo1EREQQER7OXytWODqOiKRzKhZERCRDOXrkCJ98OIDly5Y5OopDPFPp33uElCmryc4icn90GZKIiGQovd54gz27dwHw/bTpVPK9uxvsZQQnjh/HZrNRrnx5R0cRkXROIwsiIpKhlH/iies/X7582YFJHKd0mTIqFETkgdDIgoiIZCjW5GSW//EHhmHQpFkznJzS2VqwIiKPERULIiIiIiKSKl2GJCIiGd6ZM2fo+05vxnw9ioT4eEfHeWhiYqJ5pUN7alavxu5dOx0dR0QyAI3NiohIhrdw/jy2bt7MVsDDw5M3evZ0dKSHYu3qNRw/fhyATRs3UrlKVQcnEpH0TiMLIiKS4VWvUcPRER6JvPnyAWBxcqJmrdoOTiMiGYHmLIiISKZw7OhRgs6cof4LL+Dk7OzoOA+Nv78/Tk5OlCxZ0tFRRCQDULEgIiIiIiKp0mVIIiKSKV24cIFx34xhw/r1jo4iIvLYUrEgIvIAJcTHczYoCJvNdtPjiYmJDP70Uxq+UJ/Jkyb9Z7/9+/YxfeoPBAQEPKqomd6kCRP4ec4cPnjvXY4cPuToOPdlx/btzJk9O0Ov9CQijqFiQUTkATl/7hxtWragdYvmfPBuv5u27d+7lxV/LiciPJyZ06cRFxd3fVv4pUu83asnU777jlc6tOfKlSuPOnqmlCdv3us/nz512oFJ7s/x4/707f02E8aNZdLEbx0dR0QyGC2dKiJyl2w2G6cCA8mXPx/u7h7XHz948AAXL14EUibT3qjiU09RvHgJgoLO0LRZc7JmzXp9m1vWrJhMJgCcnJxISky8ad/t27aSkJBAzVq1dTfiB+iNHj2w2azY7QYNGzVydJx7diow8PpIlsVicXAaEcloNMFZROQuJCcn83avnuzbu5ciRYowd8FCXFxcgJRLjYZ9NpQLoaG80bMXftWq/Wf/hIQEXF1d//P4+XPn2Ljhb556+mmeqFDx+uN7du+i1xtvANCseXMGDf3sIZ2ZpFdxcXF8NngQVpuNwUM/w9PT09GRRCQDUbEgInIXTgUG0r5NawDMZjNrN2wke/bsD+14e3bvptcbrwNQpEgRfl285KEdS0RE5P9Zhg4dOtTRIURE0gt3d3cuXbqEyWTi/f4DKFuu3EM9no+PD/ny58NiNvNGrzcpULDgQz2eiIjIjTSyICJyC9bkZPbt20ex4sXx9vZ2dBx5SOLi4njnrbcIvxTGmHHjKVmqlKMjpcnZoCCyu7vj5eXl6CgikoFpNSQRkVt4t28f3urZgzYtmpOQkODoODdZsngxHdq2Ye7PPzk6Srq3e9cuDh7YT0hICAsXzHd0nDRZ/NtvtGnZglbNmnIhNNTRcUQkA1OxICKSiqjLl9m+bRuQMqn56tWrDk50sx8mf0dgQABjR4/WvRnuU/ESJa6vIlSwYCEHp0mbpUsWYxgGsbGxhIeHOzqOiGRgWoNPRCQVOXLmpGu3bmzfto1XOnd57C5DqlOvHgvmzcNkMpHl2mpMcm8KFizI9FmzCAkJod7z9R0dJ01q1qrFoYMHqV2nDuXKl3d0HBHJwDRnQUQkHTIMg62bN+OdJzdlypR1dBxxgMTERFxcXK7fo0NE5GFQsSAiAiQlJbFl8yYqVnwS79y5HR1HRETksaBiQUQE6PpKJ44eOUKhwoVZ+Nvv6fJOuP7HjmGzWW+6qZuIiMj90ARnEcn0LoSGcvTIEQBir17FbE5/XeP+ffvo/HJHXu3cmaVLFjs6Trrz268L6dLpZdatXevoKLdkt9v5Ycpkxo4ZTWJioqPjiEgmkf4+EUVEHrB8+fPTvGVLChcuzKeDh6TLa8Cjo6Ov/7x29WoHJkl/rMnJjB0zhmNHj/Lt+HGOjnNLv/36K1O//565P/3EksW/OzqOiGQSWg1JRAQYOHiIoyPclxrPPkvjpk3ZsX07LVq1cnScdMVssZAjZ04uhIbi4eHp6Di3FBkZef1n3YhNRB4VzVkQEZFM79jRo6xft5bGTZtRpEgRR8dJ1dWYGCaMH0++fPno/vrrjo4jIpmEigURyXQSExMZP3YsFy9e4MOPPyZPnryOjiQiIvJY0mVIIpLpLFq4gIXz5wFQvHhx3n6nj4MTPRw7d+zAycmJSr6+jo4iIiLplCY4i0imExsbd/3np55+2oFJHp5NGzfydq+e9Hz9NVb8udzRcUREJJ1SsSAimU7nLl3o/9FHjPt2IjVr1XZ0nIciPj7++s+bN21yYJLH366dO3mzxxv8umCBo6Ok6sdZM6lbqxaTvp3g6CgikglpzoKISAaUnJzMF58N5fChQ3w6eIguRbqNRi++QPilS1icnNi+a7ej49wkOTmZWtWrYbPZyJEzJ6vXrXd0JBHJZDRnQUQkA3J2duazL4Y7Oka6YLNaAShYsKCDk/yXxWIhR44cREREUKVKFUfHEZFMSCMLIpIp/Ln8Dzb8/TevdutOufLlHR1HHiNHDh9mw99/07RZMwoVLuzoOP8RGhKCv/8xqtd4FldXV0fHEZFMRsWCiGR4QUFBtGnRHADfylWYMnWqgxM9ejabjT27d1OgYEEKFCjg6DgiIpJOaIKziGR4UZcvX/+58GN6w62Hbc7s2bzdqycvt2vLqcBAR8cREZF0QnMWRCTDe+rpp/nok08JD7/Eq91fc3Qch7h8ORKAuLg4tm7ZQvESJRycyHGCg4OZOH48efPlpe+772EymRwd6ZasVismkwmLxeLoKCKSSalYEJFMoXXbto6O4FAdX+7Ent27iY2Npe7zzzs6jkN9Oexzdu7YAUCt2s/hW7mygxOl7sD+/fR75x1y58nNjFmzye7u7uhIIpIJqVgQEckE8uXPz0+/zHN0jMdCTEwMAFmzZqVkqVIOTnNri3/7jatXY7h6NYYD+/fzbK1ajo4kIpmQigURybBsNhsXLlwgb548ODk7OzqOPCaGfzWCtWvWUPf55/H09HR0nFsqVTqlkMni6kqZsmUdnEZEMiuthiQiGdbATz5m5YoV1Ktfn5Ffj3Z0nMdKcnIySxcvJlu2bDRo1Oixvm4/M9u5Ywc+BQo8lveAEJHMQSMLIpIhxcXFsXLFCgAOHTjo4DSPn+XLljHiy5SbtkVFRdHh5ZcdnOjhSE5OZtmSxWAy0ax5C5yc0tfHXlU/P0dHEJFMLn31miIiaeTi4kKZMmU4fvw4LVu3dnScx47HDZffHDp0kA5kzGJh/NhvmP/LLwB4eubg+fr1HZxIRCR9UbEgIhmSk5MT02bOIioqinz58zs6zmOnbr169H33PQ4e2E/31193dJyHxv/Yses/e3t7OzCJiEj6pDkLIiKSYfn7+zN/7lxq1HyWF15s4Og4d2Xnjh2M+HI4lXx9GTh4iKPjiEgmpWJBREQAsNvtrFm9mmzZsvFszZqOjnPXDMMg/NIlcufJ4+goD0S/d95hy+ZNAMz79VdKlCjp4EQikhmZHR1ARORBO3/uHGtWr8JqtTo6SrqyYvlyPv3oQ/q905sfZ810dJy7cjkykpZNm/BSgxeZ+v33jo7zQNx4l21PzxwOTCIimZmKBRHJUK7GxPB6t1f5eMAAvh45wtFx0pUbVwras3u3A5PcvT+WLSM4OBhIf9lv5Y2ePbyJKAkAACAASURBVPlk0CCmzZyp+RYi4jCa4CwiGcr58+eJiIgA4EJoqIPTpC8vNmxIWFgY+/bu4fUePR0d565Ur1GDeXN/xtnFhXf69nV0nAfCzc2Nlq20kpeIOJbmLIhIhpKQkEDf3m9z+tQpRo4ewzOVKjk6UroXExPNe337cuHCBXq99TaNmzRxaJ6jR45w7uxZnq9f/6Y7c9vtdsxmDZiLiDxIKhZEROS2dmzbRu+33gRSLlXatstxl/msX7eWAe+/D0Db9h0Y8NFHDssiIpIZ6CsYERG5rYpPPUUlX18AqlardtO2uLg4Ro8aydcjRxB1+fJDz3Lj3bgzwwT2+Ph4+rz9Fq2bNyMgIMDRcUQkE9LIgoiIpElMTDRZs2bDYrFcf2zq99/zw5TJANSsVYuxE769aZ/z588TG3uVMmXKptqmNTn5pkuJ/jnOwE8+Yc+uXTRr0fKm0YOIiAgmjh9Pjpw56NHrTdzc3B7U6T2WDuzfz+vdXgXguTp1GT12rIMTiUhmo5EFEclQ9u7Zw8EDBxwdI0Nyd/e4qVAAKFO27PXHPDw8btq2ds0aWjZtwisdOvDt+PE3bbt48SLtW7emZo3qTJxw87a9e/aydfNmEhMT+X3Rr9hstuvbcuXKxZDPP6fvu+9l+EIBoHDhwnjmSFk21dPT4w6/nb7s3bMHf39/R8cQkTvQakgikmFs2riR9/r2AWDqjJk8/cwzDk6U8dV+7jl++mUeFy9coFLlyjdtO3P61PWfDx7Yf9O2bVu2cOpUIACLf/+d3n3+XcHI19eXsuXKEXTmDN1ee/0/BUpmktPLi++nTePokSO82KCho+M8MCv/+ouBH3+ExcmJqdNnUPHJJx0dSURuQcWCiGQYhw7+ez37mdOnVSw8IiVLlaJkqVL/ebxt+w5ER0cTERFBp1c637StTt26/LViBWeDgni9Z4+btmV3d2fO3F8eaub0pESJkhnu7s3B588DYLNaCbt40cFpROR2NGdBRDKMY0eP8m7fPuTw9GTarNlkz57d0ZFEJBXW5GR++H4Knp456NS58513EBGHUbEgIhmKzWbL1JetSMa2c8cONm/aRNt27ShUuLCj44hIJqBiQUREJB1ITEykQf3nib16Fe/cufljxV8qjEXkodNqSCIiIulAlixZ8PLyAiAyIoKoqCgHJ7p7l8LCuKg5CiLpikYWRCTDCAkJJi4unpIlM9ZkUJF/nA0KYvasmTzzTCWaNGvm6Dh3Zf++ffR5+y0SEhKY/MNUfP9v9SwReTxpZEFEMoTAwABaNGlCx7Zt2Lpli6PjiDwUhYsUYdCQoemuUAD4848/iI+PxzAMgs6cdnQcEUkjFQsikiHs2bWLfwZKDx86eIffFsk4oqKiCA8Pd3SMO/KrVg2z2Uyp0qWp93x9R8cRkTTSZUgikiGcOXOGN7p3IzkpiVk//UzRokUdHUnkoQsKCqJT+3ZYrVY+HTyYps2aOzrSbSUlJWE2mXBydnZ0FBFJIxULIpJhxMfH4+TkhLP+EJFMYsP69Xzw3rtAys3xflmw0MGJ/nUpLAyr1Up+Hx9HRxGR+6DLkEQkw3Bzc1OhIJnKs7VqUf+FF3FxcaFR48aOjnPd8mXLeKnBi7Rs3ozDhw45Oo6I3AcnRwcQERGRe+Pk5MRXo0ZhTU5+rC7tmT/vFwBsVivnzp2lQsWKDk4kIvdKIwsiIiLp3P8XCufPn2fJ779zOTLSIXlat2lLjpw5afRSY154sYFDMojIg6E5CyKS7lmtVj4eMICtWzYzdNgw/XEimZrdbufF5+txJSqKAgUKMH/Rb2TJkuWhHe/ixYvEx8VRtFixh3YMEXEcjSyISLoXEhzM3+vXkZSUxML58x0dR8ShrFYrLs4uAAQHBxMaEvLQjvXn8j9o0rAB7du0ZsvmzQ/tOCLiOCoWRCTdK1CwIC81boxnjhx06tzF0XFEHMrFxYWvvxlDvfr1eaNnr4f6jf/i334DUkYzgs+fe2jHERHH0WVIIiIimcSO7dvZumUztZ97Dt/KVe67vfXr1jJx/HgqPPkkAwcP0WpkIhmQigUREZFMwGazUbtGdZKSkrBYLPyyYCHFihdP076zZ87g90WLePKppxk6bBhmsy5MEMks9G4XERHJBCwWCwUKFgTAMAxsNttN22dMm0rnlzvyw5QpNz0eFnaRiRMmEBwczIo/lxN05swjyywijqf7LIiIiGQS02bOYv26dRQo4EPJUqWuPx4UFMTkSZMA8D92jEYvvUShwoUB8PDwpEzZshz396dW7drXCw4RyRx0GZKIiEgml5yczMvt23Hm9GnKlCnD7J/nYrFYrm+32+1cvXoVDw8PB6YUEUdQsSAiIiIYhkFwcDAFChTAZDI5Oo6IPCZULIiIiIiISKo0wVlERERERFKlYkFERERERFKlYkFERERERFKlYkFERERERFKlYkFERERERFKlYkFERERERFKlYkFERERERFKlYkFERERERFKlYkFERERERFKlYkFERERERFKlYkFERERERFKlYkFERERERFKlYkFERERERFKlYkFERERERFKlYkFERERERFKlYkFERERERFKlYkFERERERFKlYkFERERERFKlYkFERERERFKlYkFERERERFKlYkFERERERFKlYkFERERERFKlYkFERERERFKlYkFERERERFKlYkFEREQyvP3793P69GlHxxBJd1QsiIiISIa2dOkS2rVtTZfOnbhy5Yqj44ikKyoWREREJEMyDIMxY0YzaOBARn09mmLFivHOO29jtVodHU0k3TAZhmE4OoSIiIjIgxQXF0f//u9z6OBBxo2bQOkyZYiJiaZrly5Ur16dzz4f5uiIIumCRhZEREQkQ7kQGkrHDu25EHqBOT/NpXSZMgC4u3swbvwEli//gzlzfnRwSpH0QcWCiIiIZBj79++nVasWFCtWjKnTppMrV66bthcuXJivR49h1MgRbNm82UEpRdIPFQsiIiKSISxbupQunTvRoUNHvhj+JS4uLqn+XpUqVXn/g/706dObU6dOPeKUIumL5iyIiIhIumYYBuPGfcOsmbP4Yvhw6tatl6b9Ro0cwbZtW/l10e94eno+5JQi6ZOKBREREUm34uPj6f/B+xw4sJ9x4ydQpkzZNO9rs9no805vDAxmzJiFk5PTQ0wqkj7pMiQReSim/vADvXr1YOvWLY6OIiIZVMpE5naEhoby08+/3FWhAGCxWBgxchQXQi/w+WdDH1JKkfRNIwsi8sDZbDZq16qJq5srlyMjyZs3L507d6VFy5ZkzZrV0fFEJAOwWq00ePEFYq7G8Ndfq245PyEtzp49S5fOnejTpy9dur76AFOKpH8aWRCRB27NmtUYhp1ff/2NlatW0659B+bMmU3NZ6sz/IthBAWdcXREEUnnnJyc6N9/AAnx8Zw8efK+2ipcuDCjx3zDqFEj2bxp0x1/32q1smnTxvs6pkh6oWJBRB64OT/Opk2btjg7O+PmlpW2bdux8NffGPPNWIKCgmjY4EVef607GzduQIObInKvGjZqRI+evXjv3b6EhYXdV1uVK1eh/4AP6dv3HQIDA2/5e1FRUVTzq8L6devu63gi6YUuQ7obMYFsXL6CrUfOcjnRiez5SuJbrzEvPJ0XZ0dny5CsBKycwdqYyrRvVYkc9tOsnb2CyCfb07pKrkdf6cbHEe+SFTcLWE+vYc5fEVRs15bKuVRz3+jEieO0aN6cFX+t/M/65v8ICQnh14UL+P3338iZMyedOnWmdevWZHd3f8Rp5e7ZubL3N+ZtvYANABMmsxln1+x4FXoCv2efpkBWvSfu32PW/z3m3n23L6cCTzF9xkxcXV3vq62vvx7Jls2b+XXR7+TIkeOmbYGBgfTs8Tpnz56lZ89evP9B//s6lkh6oP4mjaznlvFpx470H7+UgyExxEeH4L9qGp+91obuYzYRYXd0wowomdNrZzNr6X6iDMB2ho0/z2DJngge9dOdeHgO/V4eyIorKf9vO/M3c2f8zl698P8xZ86P1H+h/i0LBQAfHx/69O3Hir9W0aXrq/z66wJq1qzBZ0OH3PYbPXkcGEQfWMrM6bNZuHQpy5ct4Y/Fv7Nw5kRGDehO62bd+XbTpUf+Hs14Hp/+Lz0YMWIUTk5ODB488L5HK9977wMKFizEO73fIjk5+frjmzZtpG2bVjxbsyYVKlSgaLFi9xtbJF1QsZAmMWz8fhxrk2ryyfw/mDt9MhOmzOLnP5bwTbu8nP5lJDP2Jjo6ZMaXpRrvzF7E1x2L8agXt4vz38rO4PhHfNT0Jzo6mqVLltChw8tp+n1XV1datGjJL/MW8O3ESRw6dJBGDV/kQmjoQ04q982pHK9OW8HylatYvmoVKzdtZeXPQ2mc05+fPvmEeYFWRyfMWBzY/6UHWbJkYcr3P3Do4EGmTJl8X21ZLBa+GjGSsLAwPv88ZYWk2bNm8dabvej37nv07/8hl6Oi8Pb2fhDRRR576nPSwnqRwFNXcCpbm3qFb1htwSk31V/rQr2dc4g6G46tcgEIO8jG/cmUrlWQ8HWr2HUuGc+Sfjxf5wm8bnq24zi/ewNbDwVxOcGMu085/OrUoISnBQBb2AE2HLBT1jc7J1ZvJzJvFWrXKIu3ORL/zRvYe/ICsc7elKj8HDUr5OHGNSDs0QFsXr0J/3AT3sWfxK9mJQq43fYEifTfzKbdJ7gY64x3ycrUqlWR3Dc2GneOPRu2cDgokgSLO/nLVue5GiXxtAC2ixzacABb2ToUjtrAmq2BxGQtRrUG9Xkil0HkiW1s2XGcy24lqdGgNiXdzdfOMa3P1TW2aEJP+hNTOCd5PZ0xwg6y8YCNMnWKELNxDVsDYshWqAJVa1WhqPsNdXD8eXatXs+hMBN5y9egVkWDY9uCyVm1JqVzmLGHH2bT7mCyl6+Lb+H/rqaRELSTjf6R2A3w//svtj31LJX+2Whc5fS2zWw/EkKyV1lqvVCTYjceGzvRAZtYs/kYESZvilWszrOVCnDblyMdW7hwAcWKFefJJ5+8632feaYSlatUwcPDg3z58z+EdPJwWfAs34KPRkRyptNE5szZSsuhta//W79dv2RL43vZGunPlg27CLgYh7N3SXxr1+KJPDf1fnf/frPeuU+NO7eLTVsPERSZgMXdh7LV61KthCcWuMf+z0bYwb85kFyWWoXCWLdqF+eTPCnh9wLPPeGV+gfzTf2fmfCDf3PQVpbnisSwac0WAmOyUaiCH89WKcZNXdB99n/pSe7cufn++6l07NieYsWK0bBho3tuy93dnXHjv6VL506s+PNPTCYTEyd+h2/lygBERkSoWJBMQ3MW0iSGdZ+24OP13jTp/xFvNHyGfLf49ElcP5AmA/wpUzmO4yH5qVDYyqm9h4kv/zbfjO9OhaxA0kkWDHiHcdusFK5QklxEcOpIAJc96jNo1gga+1hIXPMxjQYFUa7cOXbti4Hstfh0YV8Sv+jJ2ANulK1QlGxXT3Ho6BUKdR3HlD5VyA7E7J/F4E8ms8dWjPLFXIk4cZRLuRrywchBNCme2swKK6fm9+Wt0QdwLVeRItliOH3wKNGFuzLmh774ukPSyXl83GcM261FeKKkN0QEcjTgMh71hzD9q6bkt65nSKMBBJatzOX9/rgVzUNC0AkivZvTt10sC2YeI2t+NyJOBhDt05Excwbgly0tz1U8qwfUZ/CF1/hlZneKWtczpFF/AtvNZVav0tjWD6TJhwGUrxrFweNZKZgrkfOBwST6tOSrmYN4zsuMLWQlX749hOXhuShTxpvYU6ehVHES90bxwvSF9HnSmaTtX9Dq7cUU+XAlk9r9/6UzdiJXfsX7E//iSDD4lC3OM+2G85HnFJoM2EmBJ10JOmHFp5AL4YFBxOZvzZczPqG2lxmI5sDsTxk0eTe2YhUo6hrOyaOX8GowgK8GNaNYBpvoYrfbeaF+PV5/vQdNmzW76/2TkpJo1PBFRowYRd16abv7qjiCjXMzu9H2O3hz0Uy6FrbcvNkexsK3mzLmfCsm/fYhvs537pcS0/BetgbO4/2eozngVo4KRbNzNfAAx6IL03ncVHpXduee3m/WQBb063GbPjWJgPn96TdmG9YiFSjhDZGBRwi87Em9wbMY3tQHS+K99H+JbBjUkI/8y+Ib60+oTwUKWgPZfziecm9N4JvuFcnK7fq/ImwZ1JCPA56gStQBjmctiHfieQKCE/BpOYppA+viZeYB9H/p08qVf9H/g/f5Yeo0KlSoeF9t7dm9m1Ffj2T06G8oVKgQAPHxcTxbozqbNm8lb968DyKyyOPNkDSxhq41vn65llH16aeMqtWeN9q99q4xfNIvxtpDF43EG34vYd2nRv1nnjZeeHuucTIu5bHoA5OMbjX8jK5TTxiGYTMil71r1K3c1BixNcqwXdsvdt8Yo31lX6PHT8Ep7az+yKj7zDNGg35zjaMRV4zQsxeN6D2jjBaVmxpf74r7Zy9j35jWRu1GQ42/4wzDiNthjGrma9R9Y6bhH5vyG7aIHcbYjn5Grdd+NM5YUzmxpD3GmKa+RsuRO43rre4dbbSv0dAYtj7OMGwRxh/v1jT8mg03tkddT2vs/6aV4Ve5u/FzsM0wEtYZg+s+bVR94T1j6bkkwzAM4+qWYUazSk8Zfs0+NzaFW1POe9VHRgPfusbQvxPS+FzFGav61zCqdZ5unLYa147ja3SafNxIvmH/+r3nGoFxhmEYNiNq8zCjpa+f0WdRhGHYIoxVH9Uz/Br2N5Zfy2UL326M61jNqOzb3Bh/4NpjEf7G1nXrjP3nb3wlbxa5sJdRvUovY1HEja/zM0aDvguMUwnXjr1xiNHMt5rx7uLLhmEYRvzOr4yWlWsavWYdM1JeDpsRsWO08Uq1akaPOWdueaz0at26dUbVKr5GRESEERMTc9f/zf35Z6NundqGzWa788HEgazG2RmdDb/KnY1ZQal1KonGls9eMKr69TP+iDXS1C/d8b1sJBl7RzUxqjUbYez+t6MyxrapbjT5bL0RZ9zb+y3pDn2qLWKZ8UEtX6PVl1uNf7u/fca41r5GtdfmGCE24x77vwTj74F1jCrP1DH6zD1pxBuGYRhXjIPfdTZqV+tkTD+RbNy+/7th/18CUva3RRlbhjU1/Kq9bfweYXug/V96NGnit0b1alWNgICAe+qPbvzvypUrN/3/sWPHjFIlixvJycmOPk2RR0JzFtLIkq8eH8xezI9jPqRro4q4h+/lj2kj+LBzMzq8N4dDMTf+ciEavtaGktdGH9yffIXWVU34b9gImHGv/T6TZ46jh5/ntUkjVpKz58LLCWKjb2jI7E3N1q0p5+VBvkJ5cMuWjaxcZPvCX1hzIJhYW1aefu9XNvw5hOfcIGnvX6wP8aL+qy9T5tp9r8xeVenaxhf7gbVsCrX998RMWcnmBmHbFrJg9QFCYm1kfeZ95m1ZwcA6bmD2oNb7U5k+thdVPK/9c7Emk80rF85cJSbmn6l2ZnLVbEWDgilf32WrWIFiFieKvtCKarksgBmPMqUpYIrhypUbctz2uUoDcy5qt2tFcbeUDJ6VqlDWzUpEWATEbmfN5iuUbtXzei5zLj+6d62Fh+mGJrzKUL1uXZ4qcJdD8GZvardvTrEs145duRplXZO5dCEcSGLvinWEeL1Il45lyXrtOfKq2o1WvgYH1264u2OlA3N+nEXLVq3u+cZI8+b9QqdXOmM2q1tK30y4uWXBZE8kKeku+qXbvZcxkTV7VoyL21g0bxUHQ2KxZX2Gfgu3smxwHdzu8f1mukOfavaoTb+pMxnd049/u7/seOVywrgazfXu7x77P3OhxnRrU5KUtXs8qNipLZVN/mzceD5Nz7Q5V23atiyRsr/Zk0pVy+JqDScswv7w+7/H3Ftv96Z69Rq8268v8fH3N9/s//ukiPBwPD1z4OSkK7klc9C/9Lvh5EWZeh0pU68jYCc2eD9r509m8i/jGDypDL98VBUTgLkgRW8a83YjXz4vOBqS0oyHNx7xi5kz6BsOnTpHSHAoEQkmLDYocuNVYSYv8uT9t5NyKvUy771xmM9mfsvHqybgnLM4T9Wox0vtX6ZxxRzEBIdw2RrOsvdq8+cNHwbYrCTa8xBywQYF/++SAafStO/fgyODZzBpwComOuek2NM1qNOkAx2bVCSn2QkPbw/iF89m6NiDnDoXTEhoBAkmCzaK8m9aEzm8b1jOz8kJC+CZM+f1x0yurteWmL3hHO/wXN2RyYvcuW84J7Mzzhaw223Yws4TmuCCT2Efbjxrt0KFyWM6kbb2b3vsnOTyuuFDxOyCqxPYbFawxxASGoktfAn9n1t+0252awL2PGk8v3Ti9OnTbN++nU8HDr6n/Q8cOMCZM6dp27bdA04mj56VyMgrGG4V8XSzp61fgtu+l8GJUh0/4PXDQ5j17QBWT3AmR/GnqVG3CW1fbkqFHPf2frt9n+qF2ckDb/d4ls4exLhDgQQHBxMakYDJYoOixn33f5aCRSl6U/eXn3xe4B8SAqTh8havPHjf8JSZnJ1xwo7dzsPv/9KBr74aQadOLzN40EBGfT0ak8l0553SICIyAm/vjHHJlkhaqFhIA+uJXxgyciNF3ppAD99/enYz2QpUolm/EdhONWbkvl0EWatSFMBIID7ODjn++ZgwsNpskCULYCN02Sd0/3wrWf2a0KBJfUqUKE350kGMbzqIszcd2YzFfONXQDnw7fEdv3c4w74tW9i5cxubN8xi2LodXJo5k1YWMyanYjT//BMa5P6/b2dNzniXSO3lNpOjcg8mLOvAmb1b2LZjB9s3bWDO0HXsCpvF1G7urPy0K8O2uuHXpBGN65eieKnylAoaS8vBQTe3lOo3wnfonG/7XKWFmVt+Ee3igjNJxFyJw47b9Q9tIyGehAcyU8fELT97zBbMZhOWYi0Z8nEj/vtyZKyJcXPm/Mhzz9W55+t358//hWbNm+Ph4fGAk8kjl3iIPYdicS5TkbLOZixp6Zd2w23fy4A5RxVen7SMdmf2sG3LDnZu38Sm2UNZv/MS02a0ubf32x361K7uKxj86mdsc/PjpUZNeb5kCUqVL83ZcU0YEvR/Td1D/2ckxHNz92fFZgMX17T1fyazGcutNj70/u/x55IlC5OnfE/rVi2Y/N0k3nq79wNpNyIiglya3CyZiIqFNDDnzELs8Z0sWbSeVs+8iPeNnwlGDFdirFg8cuLxz+P2Mxw9EgM+nin/bz3LoSPhuJYoC/Yotv65kctFuzJmXG/KX3sFrCc2cD7pn2/RUmPnyu6fmLg0mRc/eo0qjYpSuVEnepycQreXp7P3UDSdS5fCh0NcuFqQJ1/Me+3DwU7YmolM2pqDZv3KU/D/W72yi7njl5Lc8GO6VW1E0SqN6NjrJD906cisPQe50hL+3HiZol3GMab3E9f+wVg5ufEcidiw3+/qiLd7ru6TJV9FyueHlbt3EtWmESmDAIn4b9pO6F1+WJrMJjDsGNhJ24rD2ShVsgAcDCW20FO8kOfaPvaLrJ00ge2eLXiqQqG7C/GYio2N5fffFjFu3IR72j88PJw1q9eweMmSB5xMHr0kTv06nb8u5OC5Po3wMUNyqTv3S7nv1Kz9Cnt+Gs+y5IYMeK0qDYpWpUGnNwmY0plXZuzhcHTXe3i/3alPjaK5sZxNl4vyytjxvPXE9c6aTeeSwGbnVr11WtnPHOHoTd3fQY6Gu1K8zP2v3/8g+7/0zNvbmynfT6Vjh3YUL16Cho3ufYWkf2glJMlsdHFwGphzN+LVDqWJ+WsIPd8fx69rdnLo8CH2bVzMlP59mXE0Ny92bMj1K4bskayfOILfD4dzNTKA1WOH8vMJHxq1qwNmFzw83DDCDrHreDR2bFwJWMmEYXM5YTVIjE+4VQqyZb3KsZXTGD9+JQFRSVhjz7Nn9S6CTYUpV9Yd5yda0tYXtn03iKkbThEVH0fIjpkMH/UjG0Oc8c7235fbnC0bsUdXMnPseFafjCLJGsv5XavYc95E4fJl8XDxwNPNzqXDOzgRbQfbFQJWjmX43OPYjCTi7/crqts9V/fL+WnadKqCff0YPhm7iM07t7Jy6scMmXcK2w3f+FkDljBm0BBm74y5ZVNZXF1xMoLYu2YnR87HpuXglG/ZHl/TVqYMmsLGU1EkxAWzc+bnjP5xIyEuee7//B4Tv/22iPz5fa4vKXi3Fv26EF/fSpQuXeYBJ5OHyojixPplLF2ymKWLf2PRzz/wTf8u9Bi3h+z1+tCjXsqdb++lX/oPczayxh5h9fRvmLjqJFFJVmLP72LN7vOYC5ejjPu9vN/u1Kd6ksXTAzd7GEd2Hiel+wtg1djPmXfcCkn3/w29PXItk0cu4kh4DJcDVjJh6BxO+rxEu7pe99cwPND+L70rV64cX48ew+efD+XQoYP33V5EZCS5ve9Y4opkGBpZSBM3nn5zAmPdxzNp3gJG/z3r2jdKTmQvXIUWn3/Am/W8/q28LIWoUiGcGW+8yJcJdpxzVaDxoG/oWz3lEos6vQbQ9NxoJr9ShykuFizuZWjwxnB6uw9g6qkAwDfVFE7lX2PogPMMHP8pHRekfKVvyVmWBgOG072CM1CUtsPHEPvFl8x5vxXTbIA5G4Vrvs6Xg9pT5H/s3XlYVNX/wPH3zDAsKqCAKAgCgorijoAKrmlmua/ZXlpq3xbb/KXmkrmkZZqWmblrpplZmeYCKqgo4L6C4hYwKDLIIuvMnfP7A9xBBlzT83oen56GO2f5zL2fe89dzi3uerVFfV77YgSJo75lTP9Vhf2yqELdZ0bw+eCGaG18eWtEDxK+nsOr7edgqdFgW7cLgya9j+2IuZyJT4MGdxHaO8bqbl+CpsGj3xSmG79m1oqZ/N9vFlSv357n3+zAvB+SsLYu3GGaUo+wbcN6PBq+x6uBxZdk1bQT1FyuoQAAIABJREFU7V12sWHK2xzoNZu1rc2o3XMAE6ZfYcrkpYzoM4/Cn6MmwYOmMqq/x1327dEghGD5smW88OKL5fq+wWBgzZrfGDf+83vcMum+U/5l88zxbAZAhcaqEk7u9Wg95BsGvdIGt6t7F03pean0V1paUO+NiXyUMJLvRvdjtbHws8q+z/DxpDfx0wLl2N5Ky6k2dYfxUY8EvpnzIp3mWKLR2FKny1tMeN+WkXPPcPqyiWb25Q+h2j0Iv9T5DOn8BfkmLY4NujHqmw8Isi1/mdfdu/z3OOjU6Wnefvt/fPjBcJYtX0H16tXLXZZen4pHzeL305L0OJLvWSizAjKSdVzKyENt54Kbq/1NL+8pfHdAPP1+Xs5bnpkkJudj5+qC3W1zfBvIuqBDb6pCDVc7yjTlviGT5IQLZKvsqe5ejUrFDPkKMpJIvGSgUrUaONuaU7qBTF0CF7NV2Fd3x9n2lkINmVzQ6RFV3HC5vTPlYn6sysmUwelDpxE16uHjfP3FGJl/vsezk/N5/+8f6Hfrzc13ouRyOS0HbWVHKpWpjQVk6BK4VFCJajWqYdbP8YiYNetbmjRpQuvWbYp9OHDXzp28//67/LNxMzY2ZX/V3MZ//mH27G/Zui0cjabEu6+lx0TZ89LtDJk6Ei9ko6pcHTdn22LOeJVjeyslpxqyLqBLNVHFzfUe5aei9yzE92fpsmF4ZCaSnG+Hq0sZ9wV3cq/z32Pik48/4kTsCRYtWoyNTYVylfH6a6/w/PMD6dO33z1unSQ9muSVhTKzxN7FE3tzXjBr6YBbiSeQtdhW96BcJ5C0drjUuvODoJb2NahVpjNeWuxca1FiqVo7qnvcx4dP7xirchKZRM4awjzxJvO+f4t6FUFJ28fy3/ZCnTdoVqWMO0qNDVWqlufdy5bYu3pzFycgH5pq1aoxeNAb1KxZk5defoW+ffpSyfb6Wrt06RK69+hZroECwMqVKxj4wotyoPCEKHteup3WzhWvO6aicmxvpeRUrW11PO7J2f7iWTq4cc+vNd7r/PeY+HzCFzRp3JApUyYzYcLEcpWh16fJB5ylJ4ocLEiPL00Nug17nV2fzef1zqtxcdRyJeUCuVXb8d60Fyl2cijpJt2792DC5+Np0KAha35bzcwZ39CjR09efvkVrKytiYgIZ+0ff5Wr7OPHjxMbG8tP8xfe41ZLkiTz3+0yMzN5953/UdPDg8GD3ix3OXr5gLP0hJG3Id1jpswE4v7Nx9HbB+fynWx9YjywWOUmc+zAMf5NM2DtXJsmzXyo8gTuKMvr8/HjSE1NZcqXUzl48AArf/mFrVvDMBqNhLRuzaxZ35Wr3HFjx6DVavly6rR73GJJetSZyEyIJSHfEW+fakUvZbtPZP4D4Ny5s7z11ptUr1aNqdO+wta2fFfKc3NzCW7Vgogdu+7quQdJ+i+RgwVJku4oLi6OXj17sGnTZqo4FM7SkpKSwpo1vxEYEFiuWZAuX75Ml2ee5tdff6O+n9+9brIkSdI1kZG7eO/dd+jS5Vk++viTu3rzcmJiIt27Pcex47Fotf+hB9Ak6S48mTctSpJktrp169KwYQP+/Ov6exCcnZ0ZNuztck+Xuvb3Nfj5+cmBgiRJ99Xs2bN47dVXGDJkKP/36ci7GihA4S1I9vb2cqAgPVGewIuRkiSV1fMDX2D2rG959dXXip0VqSwURWH16tX836ef3qPWSZIk3W7xooXMnvUtAF99NY0ff5yLg4Mjjo6OODg44OjoiKOjE46ODjg4OuLo4Fj4X0dHLC0tiy0zLU2Po6Pjg+yGJD108jakR5Ah7m8WbjXQ8qVeNDJnBg5jPJsXbuZK8xfo2azy7ZeLSvv7f4Uhlg3zt2Jo9TI9Gps7NYmBuPU/sb0gmBd7NabSfW1g2RgOrmDqn2m0GDyMjjWKZgPKzSHXsgI2j9jkQPn5+YQEt2TKlKm0aNnyrsoKDd3CtGlTCQ/fIc/O/ZfdsK4az4aybKOehv370dzxP5thZO4tSbly76MjLy+P1NTUon+XSE1NRX/t/4v+6Qs/u3LlCgCVKlUqGlQ4Fg0qCgcSu3buwNLSihW/rHzIvZKkB0deWXgEGeK2sGRhDna9etDI1ozdi+E025Yu5ILFs/RoVrnsf/+vMJwkbNFCsu16l2mwcGrLIhZn29OjR2MqPUJ7ayUxmg1/J1Clz1A61oD8o8v4vzEHaLPgG3rfgxe43ktWVlb07NmLNWt+u+vBwqqVvzBw4AtyoPAfduu6qpzbzoqF8fR7qs9/fLAgc2+xypV7Hx3W1ta4ubnh5uZW6rL5+fno9XouXbpUOKDQXx9QnD17BrVGQ8NGjR5AqyXp0SEHC5L0kFh3HsmHoWPJsSu8rScnNpLoJGjzkNtVkucHvkC3rs9y6dIlqlatWq4yTp06xb59+3iqYycKCgpKvNQvPdoe9XVVksrLysoKV1dXXF1dH3ZTJOmRIQcL5lAuciT8EIpvO2qmhxMaeZqsCl606NwRP0dB2snd7IqK47KND606t8HnxjNSSjond21n38kUjA6e+AWE0Mz9lrdG5iYQtXkbx/RWuDcNoVlxbTBlEr9jM5En9FDVi4YtWtO0xt3MN2oiM34HoTtPoFc54dWwJcHNanBziTkk7g0n8sh5LuepsXWtR1C7VnjbawCFlEPbOSJ8aVYxjtA9epwD2tOytkJc+CEU3/Z4ZEUQtjOerEru+AWG0NzL7obL8ObUn0vink1sP5qGVc2mtPI3r2e5CXvYsu0IaVY1aRJS/AO4SnockeF7OZVipIpnAwKC/XG75WcxZcazc8sOYlNVONVqRFBIM24MuTEtll3hMcRfzEHr5IN/m9b4ORce/Coph4k4pFC3nQdZEaFExmdR0b0Bga0D8CxaP3IOrWS71dN8UkND3vloImLTMAmI3b6R3Y2Daen9aJ3B8/b2pmnTZvz55x8MHly+OcqrV6/G8OEfsHzZUhbM/4kBA55n4AsvUq1atXvcWul+KW5dvZazxBXO7t7JnmM6DA6+tO4UgtdNZ+jN2e5vIHNvMbkXlJRDhB8y4etfiZNb9pBWLYA2QVVI3v1wc68kSY8nOVgwh/E4v00czWnf5lw+GIuNpzN5539kwaoevN8/m18XnaCCiw36U98zb8VApi8bQVBFMKWE8+3H41l1SotXvZpY6Zcz5+sqtBs+lbH9fbEBlMR/mPDO52zSV8W3TmWyln2PtmolTNS4Xn/WQZaOHsW8vUY8/byw1q/kx5nzePqTaYzqXouy38yRyaEloxnzw14UrwZ4Wqeyes5MHDqPYMqY7nhpgYJT/DriXWbuNlKzgQ+O6Dlz7Admfd+RMYu/5DlXI0dWjWfc+frUS4jmYBZUbG3Bb1MrF8bKL5D0g3FUcHekIOE0c/Jd6TF1MaPaO6A2p34lkU3j/8cXG9Oo6lubyplLmautSiUTlPzybIXEf8YxfMJG9FV9qV05k+VztFStZIJrJ4lMXAr/hhGfryTewgvfmlbol89mRpUOvPfl5/T1tSkK+WLGjvqBfYoX9b2s0a+awyzHZ/h46hi61tJiPL2Sj4Z8zSGbejTwrMSV00v4fmZNXp75E+80t8V47Fcmj46nfmA6h+Mq4OaYT+Lp2cx07cWURWNo66CmQqOhTG9mhZXaRFrsFv6I0mFSIPq3XzCoGzxygwWA5wcO5OuvpvHGG4NQq8t+u4mtrR2vvPoaL738ChEREaxa+Qvz5v3I00935uVXXsHfv3yzK0kPiomcYtbVZvYAaUROe4VfThpxdbck9fQPzFvah8kLR9HGQY1ZeedWMvcWk3s1GA+vZOLY89Srl0DMgSyo1BrNyl7sf2i5V5Kkx5qQSpe3VYxt30QEdvpQ/JVQIIQQ4squL0T3Zo1FUPcJYkeqUQihiLTNn4rO/u3F+O15Qih6sen/OoigdkPF8tgrheUYL4qd0/qL4IB+4vsjBYXLjGgvgjp/ItYnFpZr0G0R47s1E82bDxYrkhUhRI6I+bKbCGo9WCyJzS4sR9GL6OnPi+CWg8TP54xC5GwUnwb7i9cWnBXG4tp/y99zo6eIXs1DxNDFJ0RhiYrQR30tXmrRQry17FxhX9Z9INo37ya+jEwXSlEx2QemiwHN/cVby5OEEHkidGSICGj2lPjwl2NCn6ETCRdzrsUqoP3/xKr4nMLS03eKSd2bieB31gi9Ymb9Gz8RTzd/Woz8O1EUFAZGhI7pKoKa+Ishy5OL/ZkU/UYxsoO/eG7EOlEYToNI3jJG9PRvLFoMXi6SlcJlRj3VTDw1dKm4/rPsEF8PCBIt+88WRwuEEDlRYlp3f9H+zUXiesijxIyBQaL1oKXinLFA7J/WVbTo/qXYm3M1OPvFjL4tRdfPt4kcIUTe1tGiY9MmouM7K8TpnMI+pu/8QvTyDxLvrdEX2/601UNFy4ChooQ/PxLy8/NFYIC/+GfDBpGVlXVP/sXERIumTRqJ2j61RO/ePcX5c+cedjelUty6rhau701F5/d/FWfyhBBCEekR40R3/xbigz8uCyHM2e6LIXOvEOLW3CtE3pZPRfumTUXn4SvEcX2GSP73osh9iLlXkqTH23/3SbQHTo1jSG86uxWeS6rYsAFeGgs8O/WmhaMGUGNXtw41VFlkZCiQHUXYrgxq9nyb/nUrFhahcSZ4yOuE2Jxhe1gs5MawbXcmXt1e4+kaheVauLRncP8m189YFexnU5gOx06v83zdokvoagcCXuuLvzjI1vDkMvajgP3/bEXn8DSvDPSlQlHfHAJfp7e/4HBYOKDGts1H/LBoJm8F2RddvjZiqOSIgwVkZ2Zdj4pja3r3ro+DnQtu117DrMahTX96eBf+v9q+GQG+NhhTL5JmMqf+XGLCdpPh1Z2XO9cojIWFC+2G9KfxHU7l5caEsSfTi+defYbCcFpQvf0Q+jW5/qWcqC1EZnjQ4+3nuf6zhPDma62xObOVrbEGCvZvZJvOgY6vvcD1kAfyal9/TIfC2JFsokKlCoiLu1mzcjOHddkoFZoyfHUk68a2u345X+1Im/69qWVT2Ef7ZgH42hjRp+jL+Js9OiwtLendpw9r1vx212X9+++/zJzxDf97exi2trYMHfY2P/zwIzU9PO5BS6UHTu1EmwE98LICUGPfvAW+1gYuXUjFvLxTYsEy9xaTe1E7EdKnD/Uc7Kju7lz0FuiHk3slSXq8yduQzKaispPj9fs+LSzQAPZVqlz7TGVtXbSjESh6HRfzLPCo7XPzpeoKnnhWh6gLF1FSk7iQq8HF3f2GH0JDVZ9aOKjPAWDKSkJ32UjqXx/QYcON89srGPNMVNOVcYdlykKXnIaS+ieftF1/85+MeZicdYXds3PCLvcPlo35hiNnEtAlJaPPU6FRwOPG2XYdnal225BThYOzM9dn/1Sj1WrAZEIxp34lFd2FXCxc3HC/YQ3VVK2Nl6Oac8V2TCE1KZkcjQtuN38J71qOqM8WLaO7QK6FJz4+N+/5Knh5Uo09XLhoJCtNx2VjKus+bMPNITeSb3JGd0FF7YEfM/joOBbPHsGWWVoq12pCq/Zd6fdCNxo4FAVE5UDVqjfMgarWUhgGpdge/Fc8//xAnun8NBcvXizzswYGg4Ht27ex5rff2LdvL23btmXqtK9o06ZtuW5rkh4hqio4OtzwG6otsbYARTGanXdKKFjm3uJyr8oB59uS78PIvZIkPe7kYKEMij+YKeEFVRYWWGDCUGCAGx8dEwUUGMDa2gqVpSWWCEymm191IfLzKSj6SK1Ro1FZ4NVrAiOecb5tnm6tkzewuwyd0KBWq9B49WLcyC5UvaVAldYJUEheN4o3JkRSIagrnbt2xNu7DvXrnOfbbmP498bl1RrUxYSgxAM/c+pXWWBpCcIkuCkyIp/8/JJeC6LCysoSlRCYbvpckJ+Xf60cCwsLMBm47WfJL8CANdZWGjQaNSoLL3pMGEXn2xuIk7cFatsABn+/jv7n9rF7VxTRe3awY8l4tkVfYv7CwRSeG1fzOB7/enh4EhAYyNq1vzN06DCzvpOUlMjvv//On3/8gVZrQb/+A/j66+lUd5F3QT8+VJT4vj6z8k7JZO69PfeCGk0xyffB515Jkh53j+GhzKNB41ybWg4mYvfs5vINR6/KhYMc1alxr+WF2rEetV1MxO2NIfPaEkbOH48j82perlgHnxpwMTkbt8ZNadK08F+jGnp2rVnHgYtlbVlFahcWSLZ742vlNWnsin7nb/y9/wKY0oncEMFlzxf5YuZo3nqxJ0+1qE/V1HMkFtztmXEz6lc74lvHBSUuhpjrgcF47hinskraYalx8K1DdVMs+6Nv+hInTmYV7fg0ONf2xtF0nOg9aTcMKhQuHjxCstqdWl4aKtaujSspXLjiRqOr7WvaGFf9Dtb+tZ+LSgb7lk5g/IJ9WHgG0vnFdxkzewVzBvlSELuPo5m3ts08KrUKhODW4c6j6IWBL/LH2t9RlJLXBaPRyNatYfzv7WH07NGd0/HxTJo8me3hO3jvvfflQOE/rOzrqhnb/T0ic+9d1F+u3CtJ0uNODhbuF8sAevXxJSdsJhOX7SExM5vUuH/4dvQCDtu1o09nV7BsTO8+jcjd8i1Tfz3EpawMzm39jqkrT3Jtl6D1o3t/f1SR3zP+x3DOpueSo4tiyYSpLAtPwrJqxTI2TEv9XgPwV0Uyd8xcIs6kk5eTRPSiCXy9NAKdpTOoLbGzs0GkHCEmLhMTChnxm5j1xQpOGgX5uXl3ERgz6seSRn360ih3C99P/pXDKVlknA1jzpSVnDSWXLJl4770bpRL6KxJ/HYohSsZZ9n23WR+veFLlgF96FU3h60zJ/DznkQys1OJ+2cGYxYexq5dPzq5atD69aKfP+yeM4afws+QnpuDLmoRk6YtJUKnxcnWlgrZx9iy4Bu+23yK9AIj2YkxhO5NRF2zHnXLOYmRlbU1FuI8+0OjOZaYXb5CHpCnOnZEURR27Nhx29+Sk5OZ8/13PPfsM3z55RSaNG1CaNg25i9YyFNPdUSjecReTy2VWdnXVXO2+3tE5t7y11/O3CtJ0uNN3oZ032ipN2gGM7Rf8uX8d+g10whY4tTwWUbM+ojOzmpATa2XpjAxaxxTZ7zGs1MEGrt6dHuuDUlrcorK0eA5YDLTsify5ZIP6T9PAdRUrBnCG1PH0tdDA7lla5nGcwATpl9hyuSljOgzDwVQV6xJ8KCpjOpfeANNu6Ej6JbwNT+81I65lho0tnXp/OYk3rEdwU9n4oEG5Y6MOfVbeL/ExMmZfD7lGwZ3nozQ2OHb/TlaJ62hxEMTC29emDKZrHFTmPn600wVGuzq9aBLmyTWXv2Stj6vz5iFdupkFr7blVlGwLIqDZ79lG8+7IKzGsCTfpOmkz1xMss+6s38wgZSM2Qwk8cMwEOjhjcm8lHCSL4b3Y/VRgALKvs+w8eT3sRPC/nliItV0060d9nFhilvc6DXbDaMDS5HKQ+GVqulT99+rPltNe3atUNRFHbu3MGaNb+xOzKSli1bMm7853To8FThrV/SY+XWdXVt69K/Y852f2887rm3/C89uG+5V5Kkx5pKCCGvLd5vplz0ScnkWFfDpWrFYkdoxuwUEi/kYuvqgWNJ7/spyECXcIkCW2dqONuVY47v2wokQ5fApYJKVKtRDdvbCjSQdUGH3lSFGq73or6y1g8Ys7mUeIEcW1c8SgzMbV8iOyWRi7m2uHo4Fs0ScjtTrh5dcjbW1Vxxqlj8AW1BRhKJlwxUqlYD52IaaMjUkXghG1Xl6rg529796FvJ5XJaDtrKjlR6xGcfSUxMpEP7tnTr1p2o6CgUo5E+ffoy4PmBuLu7P+zmSfdbuddVM7b7e0Xm3nLWTzlzryRJjyM5WJAkqdwGD36DK1eu8Oqrr9GxYye02kd8hCNJkiRJUpnIwYIkSeVmMBjkAEGSJEmSHmNysCBJkiRJkiRJUrHkbEiSJEmSJEmSJBVLDhYkSZIkSZIkSSqWHCxIkiRJkiRJklQsOViQJEmSJEmSJKlYcrDwKMjNIVcpfTEA49lQFv2wir16U5mrycvJvfZ20rsp584NjGfzvDn8vj+dMpVchhiUTS4596dgSZL+s4ycC13Aj6uiudcpsDg35t4HU8+D7Z8kSY83OVh4yPKPLmP4C5/xT4Z5yyvntrNi4Vr2l2kPkM+xZe/y0mfrybyrcsxgOM22pQtZdzAdc6fZKmsMzJZ/hJ/fHci4Dfe6YEmS/tsUzocvZ/Ef+0i7rwfTt+feB1PPg+qfJElPAjlYeMhyYiOJTsq9z7VkE7d7D7q8R3OW3PsWg+xY9kQl8Yh2W5Kkh8aKwPeXsnL6C3je9WvX7+RB5d5HO8dLkvTfdl/T5GNDuciR8EMovu2omR5OaORpsip40aJzR/wcBWknd7MrKo7LNj606twGH9sbxmA5CewL38XR82nkaWxx8W1J21Y+2Gsg73w0EbFpmATEbt/I7sbBtPS2BRTST+1ke2QclxRb3Ju0pX0zV6xubJO4wtndO9lzTIfBwZfWnULwsi1u7JfL+egIYtMEJuII3xhJw+BWuJlbjimT+B2biTyhh6peNGzRmqY1bMoWv3LFwERm/A5Cd55Ar3LCq2FLgpvV4GrNSsphIg4p1G3nQVZEKJHxWVR0b0Bg6wA8bdWQe56YiDguCwGx29kU2YjOrXzK1m5Jkkp2h+0aFFIOb+eQwZfW7ils3RxDYoE93kGdaOvnULjjUS5yJPwghrptcLsURmh0IgX2PgR2akd9h6Jdk3KRw9sPI+r5UzFuC1H6avi3a4WvkyUo6ZzctZ19J1MwOnjiFxBCM/cKAJhSj7JjbxIW3i1pWduu8KyYKZP43bs5XeCKf9v6mHQnicvywKG6PVpxP3J88bnX2xZKy28AxrRYdoXHEH8xB62TD/5tWuPnbFnMD1FcPf7X/iqunCFq526O6QqoUq8tHYNrcdOu4l7keEmSHmua8ePHj3/YjXjkGWKYM2Qk648fZsW8tZy8fImjW1bx65aLVDL9w5eT/uBkynli/v6FVWFZ+HUNxs0SCk6tZMTrw/lpl448QxbJR7fzx4rl/H3WhQ4daiOilzBr7QFSMrLITEkip2ow7eoI9s59myGjf+GAvoD8lH2sm7+AzfratAv2xOr8VlZsiScjfh1LV0aRnHaGqHUrWbVFj8/TIXjYqG5uuymL6KXf8Mf+i2RmZZKiy8YpuA0+adtLLyfrIEtHDGPMshj0xjySov5k6dKNJDo1J7huFTTFxcp4mrBlW7nStD/dm1bGWJ4Y+MKhJZ/w7qil7NMr5CVGsW7xEjYnOOEfUpcqGjBEz2bYyHWcPPozC/6MQ3/xKFt/X8GvW1Lw6dyGmsZols/4nQMXM8jKTEGX7UyP9r4PYm2RpMfenXNbXWzVBvZ+9xaj/zjO0dU/E6GH/LPbWDXvZ2Is/OnQtBpaQwxzhnzKX8ePsmbFdvTkc277CuaviMaiWUeaVNOCYQ+zB41h4/H9/LxoNRE7dnLJsycdHffy7f+GMXHVflLyrvDv7rUsXbKe07ZNCfZzwtLiMtu//IjJay5Sp0sbPG0EqWETGfbJYs7V6k1vf3sOfvcWn220oVOvAByV+5Dj27lwfPntudfX7kqp+c14eiUfvfoxK49mYjKkEbf1FxYs3Up+o84Eulrd/GMUm+ODsd63gi2n0jn992J+jdaRdnY361f+Qpi+Dh1DPLFRUb4cL0nSk0dIpcvbKsa2byICO30o/kooEEIIcWXXF6J7s8YiqPsEsSPVKIRQRNrmT0Vn//Zi/PY8IRS9+PuDEBHUfZLYk64UFZQtDn7TWwQ1f0P8nFT4WdrqoaJlwFCxRl+4RP7+r0Wf5q3EoLmHRaYQQgijSFo7XDzdvIf49lCByNs6WnRs2lR0fv9XcSZPCCEUkR4xTnT3byE++ONyCR3QizXDmouWw34Vade6VFo5OSLmy24iqPVgsSQ2u/BLil5ET39eBLccJH4+Zyy+qpyN4tNgf/HagrPCWM4Y5EZPEb2ah4ihi0+IwpoVoY/6WrzUooV4a9m5G9rfRHR8Z4U4nVPU/p1fiF7+QeK9qwXpfxXvBDQX76zWl/YLS5JkLrO26zyx/bN2IqBpO/HeilMiVwghRIY4POdl0abFi2LBScO1vBrQ/m2x8lROYTGZh8TcV1qK4JfmiVMGIUTeFjG6dRMR2PF9seqYXmTo/hUpV/Ri0/91EEHthorlsVcKv2e8KHZO6y+CA/qJ748U5mjD6eViaGt/0X3cVnEpaZ0Y0amZaDd4qYgvENfa1+L5OeKkQdzHHH977i09vxWI/dO6ihbdvxR7c64Wu1/M6NtSdP18m7j60c1uraco/s2eEh+uPi0KU3y62DG+iwhq+a7487IQ5c7xkiQ9ceQzC2ZT4xjSm85uWgAqNmyAl8YCz069aeGoAdTY1a1DDVUWGRkKqO1o/dFPLJgxlAD7ojAbDVR0cETLFbKyinvqzMCx0HASq3Tk1ZcbYguABtfnPmLq9FF0dSsqR+1EmwE98LIqbJd98xb4Whu4dCG1jF26QzkF+9kUpsOx0+s8X7dC0fIOBLzWF39xkK3hyWaUX54YFLD/n63oHJ7mlYG+FNasxiHwdXr7Cw6Hhd9QviNt+vemlk1R+5sF4GtjRJ+iL1scJEkyXxm2a7X7c7ze1wdrAOxo+GI/mqtiiYhILFpCg9uzg+ntU3Tbi20jBvYNRB0bzo7Eq/MHqXFs3Zee9R2wc3GnqimKsF0Z1Oz5Nv3rViwqxpngIa8TYnOG7WGxAFjUep5R77UgZ/0U3nl3GuHGlrwz5gW8tSV27AHkeHPym4oKlSogLu5mzcrNHNZlo1RoyvDVkawb246y3CCkdmpH/+61Cm9hVdvjH1Qfa8MlLqYa702OlyTpiSCfWTCbispOjtefCLewQAPYV6ly7TOusMZTAAAgAElEQVSVtTWFuxkBWGDnZEfuH0sYP+MwZxKS0CXryVNpUPAsYaYgA8nJKeDqieeNV5q1bjRpU/iUQT6AqgqODjeM89SWWFuAohjL2KWSyzFlJaG7bCT1rw/osOHGW5sUjHkmqumS4fqTDyUoRwxMWeiS01BS/+STtutv/pMxD5Oz7ob2O1C16g0XytVatBowmeRUqZJ0/5i/XWvcPPG88eDcxoXqDhCru7odq3Hz8uLmRarjwHGSdQq4AKhwcK52Lc8qeh0X8yzwqO1z0/eo4IlndYi6cBFoCGhw7zWSQZv7Mj0Gmn8ykp4173RjzQPI8WblNwtqD/yYwUfHsXj2CLbM0lK5VhNate9Kvxe60cDB/HN8qsoOVLkxxVtZYYGConCPcrwkSU8COVgoA7W6uCStKuYzQNGxfvSrfBFpQ1DXLjzXsTa1aten9vkZ9Bp7voQaVGjUKsjJ4cpNexoTJpOa69WrUJVQbdmUXI5ao0ajssCr1wRGPON827RZWifv0osvTwzUGtRqFRqvXowb2YWqt1Ss0jrduDDF/iSSJN0/ZdiuRV4uOSaofHU7FUYUBSytrSg89SHIy8nGRGWuL6JgxAorq+vJSa1RX8+0FhZYYMJQYIAbz7OLAgoMYG19/UxLQfwmQk/ko7FQExu2mTP9Xqd2iVcWHkCONzO/qSsHMPj7dfQ/t4/du6KI3rODHUvGsy36EvMXDqaOuXtutaqk1t+bHC9J0hNBDhbuE1P6LjZEXMbzlZlMf8evKNBGTkUkkI+CqegigEqtAmFCYAK0ePt4QtQJYtNM1HMuSt8ZG/i0x1Syh/zCN9XL0xo1qsJqECbMmzC3Yh18asCR5GzcGjflalNMKaH8MCsS+57D8XO/HzGoSG2fGnA4mWz3xnS6VvFFwr6fxR77njRuUErF17qtRoVAEWButyVJujNzt2sA07ljHM8CV/vC/zf+e5jjqdbUqusFHAJMnD9+jCxqYF9Uzr+Hj6G39qauV/FH9Rrn2tRyMBGxZzeXu3e+duZcuXCQozo17r28Cj/IPcKiL+ZxzKEvUz605McR85i0MJAfh/hhVWzJ9yMOt+ZeM/JbfTv2Lf+WdYZnGDEokM6egXR+cRjxc1/mpYX7OJo5mDoOt7bm4eR4SZKeDPL46T5RW9phb2Pi0tEoTmaaQMkgftMMJq2IQxEF5BbNh21lbY2FOM/+0GiOJebj1a0vLSz2sGDyz8TosshNO85fX81jp+JL61Yu5WyNFTbWFojze9kafZTEbDO+ovWje39/VJHfM/7HcM6m55Kji2LJhKksC0/CsmrF+xSDAur3GoC/KpK5Y+YScSadvJwkohdN4OulEegsncvQbRusLQT/7gsl5mhi6ctLklQqc7drAFNaGD9MXcOx1Cwux29i1vhlnHJ9lv7trx7tmkgL+46vfj9CalYa8Ztn8MWyk7g+O4C2tx0QF7EMoFcfX3LCZjJx2R4SM7NJjfuHb0cv4LBdO/p0dgWy2T93AstOutLvs3dp3/ZtRjxfg1OLv2DBwXvzThfz4nBr7tWWnt/UFamQfYwtC77hu82nSC8wkp0YQ+jeRNQ161HXtrjWPJwcL0nSk0FeWbhfbDvw1ogeJHw9h1fbz8FSo8G2bhcGTXof2xFzOROfBv7VsGraifYuu9gw5W0O9JrNhrG9+HxWAV99Pod3npuOCRXWbsG8Mvkz+nloMJ4pT2OsaNKxA9V3rWfq/w7Qc9bffFzqdzR4DpjMtOyJfLnkQ/rPUwA1FWuG8MbUsfT1MGNSvXLHYAATpl9hyuSljOgzDwVQV6xJ8KCpjOrvUYZuN+WpDi5Erp/Me/t7E7V5rPnflSSpeGZt15UBULsH4Zc6nyGdvyDfpMWxQTdGffMBQbYUPYClwS2oIfr5b/LcF3mYtI74dR3DtOEtKfaYGAAt9QbNYIb2S76c/w69ZhoBS5waPsuIWR/R2VlN5u5ZTF5xnpovzmVoQGFJzYaMon/kUH6e+D1Bi959QHGoelvuHR1cen6r98ZEPkoYyXej+7HaCGBBZd9n+HjSm/gVe8Hl1hy/hpBSO3APcrwkSU8ElRBCvvLxfjJkckGnR1Rxw8WuhJtllVwup+WgrexIpWuL5HM5IYHLmqq4udpT3Kt4ykrJTeNyjhZ7R1vucNvu7Qoy0CVcosDWmRrOdmX7LtxFDArI0CVwqaAS1WpUw7bMFQMo5KalkaOtjGP5CpAkqTh33K7zCR/zDJ/G92fpsmF4ZCaSnG+Hq8sN+SN/G+O6fMLp/itYPNSTrMRk8u1cqV5SjiiOKRd9UjI51tVwqVrx4Zz9MiO/FZ97S89vhkwdiReyUVWujpuzban9e2g5XpKkx5q8snC/ae2o7mF352U0NlSpeuuEeFZUcfehyj1sisbGAafyvJjT0h5Xb/vyV1zuGFhi7+rNXdQMaLBxqFqm6QYlSTKDOdt1EUsHN+58TdCSKm5luGp4ldoGR/daOJb9m/eOGXEoPveWnt+0dq54mRfiO9RjhrvN8ZIkPdbkMwuSJEmSJEmSJBVL3oYkSZIk3WMmMhNiSch3xNunWtFL2W5dJJPE2PPkOfng4yyv/UmSJD2q5GBBkiRJkiRJkqRiyduQJEmSJEmSJEkqlhwsSJIkSZIkSZJULDlYkO6Cgbj1c/hx7SGuFPt3I/Gb5vHjb/tJN93vpsSy4Yc5/Hko6+7Kyc0hV7k3TZKkx1N5t+tccu7TxpWXk4vcbM1jjN/E/B9WcyDdxAPN0ZIk/WfJwYJ0Fwyc2rKIxRuOcqXYHY2Bs2FLWPzXQdLv95MxhpOELVrIP0fNeXVp8fKPLmP4C5/xT8Y9bJckPXbKsV3nH+HndwcybsO93rjyObbsXV76bD2Z97jkx5XxTBjLF/7FoXTBA83RkiT9Z8nBgiQVyYmNJDop92E3Q5IeP9mx7IlKIu+eH5BmE7d7D7p7X7AkSZJURL6UzVzGNGJ3hrP/1AWytU54N29LSAPnwjcrKxc5En4QQ902uF0KIzQ6kQJ7HwI7taO+w80hzkmIYUfkEc6n5aGxdcW3ZXtaeNujuWEZJf0Uu7ZGcvKSgp17E1o/1QwXq+t/N2XGs3PLDmJTVTjVakRQSDNqlDbzYE4C+8J3cfR8GnkaW1x8W9K2lQ/2mqvtP4Ti2x6PrAjCdsaTVckdv8AQmnvZ3TSizE3Yw5ZtR0izqkmTkOblCqU57S89Trkk7tnE9qNpWNVsSiv/2+sxpsWyKzyG+Is5aJ188G/TGj/n4t+FnXc+mojYNEwCYrdvZHfjYFp62wIK6XG7iNh7kkvGKng0CKSVvzsVytVzSXp8lbjN5p4nJiKOy0JA7HY2RTaicyufwi+ZMonfsZnIE3qo6kXDFq1pejUZlJqXcjkfHUFsmsBEHOEbI2kY3Apv22Ia98Dyt0L6qZ1sj4zjkmKLe5O2tG/myg3puwz520Tq0XD2pzrj38YPR3Vh+RcObuVwuuttnx3J9yEkyAsbTGTG7yB05wn0Kie8GrYkuFkN+WJKSZLKTTN+/PjxD7sRjzzjaX4d/gqf/nKETMVAWtw2fv1pCdvzG/F0UA0sDTHMGfIpfx0/ypoV29GTz7ntK5i/IhqLZh1pUk0LFBC/6kMGDf+RSF0eBVk6jm3/nV+W/8VZl6doX9cWNSYyYn7gvTdHs2p/KoaCi+z/az6LN+rx7hCMRwUVWQcXM3LYSH7eq8eYl0j0n4tYtikBp+Yh1KmiKbb5BadWMuL14fy0S0eeIYvko9v5Y8Vy/j7rQocOdbE1xjBnyEjWxx3hl3l/cFJ/keOhv7Ny5RYu+XQmxMsGFQqJ/4xlyDuz2Ho+m5yEnaxauIHTVy6TbtuS57s1opLqtsBxZstStl1pRt+eTamsxoz2mxEnJZFN497gvdlbOZ+dTcLOlSzeEM+Vy+nYtnyBro0qYTy9ko9e/ZiVRzMxGdKI2/oLC5ZuJb9RZwJdrW5pp4nMqCXMWnuAlIwsMlOSyKkaTLs6eUR88zbvTFjFgYu5XPl3N38tXsLGeDsahfjhpL33q5okPfpu3a7vvM22dTnOzzN+58DFDLIyU9BlO9OjvS9kHWTpiGGMWRaD3phHUtSfLF26kUSn5gTXrYLGUEpe8jASs/Qb/th/kcysTFJ02TgFt8HX7pYL5g8qf5sy2Dv3bYaM/oUD+gLyU/axbv4CNutr0y7Ykwoqc/LfjQTZ27/i7c934tC1Gw1tVWA8wdL332H6mvO4dO+KXyUVGI+x+N13WVXQgQEhthxd8gnvjlrKPr1CXmIU6xYvYXOCE/4hdamiAePpLSzfeoUm/XvSpLJyW46WJEm6jZBKVbBvmujZvJv4Kian6JNscWB6H9Gmy3ixPUcIkbdVjG3fRAS0f1usPFW0TOYhMfeVliL4pXnilEEIRb9OfNzaX/SeHCnSlavFHBAz+/iLFoOWCZ0ihMjfL2b08BdtX5srjmYWLmJMWis+6uAv+nxzSBTkRIlp3f1F+zcXidjswr8r+igxY2CQaD1oqThnLKbxil78/UGICOo+Sey5XrE4+E1vEdT8DfFzknJD+/8nVsUXtl9J3ykmdW8mgt9ZI/SKEIp+oxjZwV88N2KdSCwQQgiDSN4yRvT0byxaDF4ukpVi6hY5YvMnrUSLlxeIs0YhhBntLz1Oikjb+Il4uvnTYuTfiaKwKToROqarCGriL4YsTxZCFIj907qKFt2/FHuv/WT7xYy+LUXXz7eJnFubWSRt9VDRMmCoWKMXQghFpG0cITr7txX/W3pCXCn8NUTKjqnixRbNxcDZh0soRZIedzdv12blNv2v4p2A5uKd1fprZcR82U0EtR4sllxPBiJ6+vMiuOUg8fM5o1l5SQi9WDOsuWg57FeRVkJrH1T+zt//tejTvJUYNPewKEzfRpG0drh4unkP8e2hArPy362MiUvFW4EtxfDf9UIRQhjPLxJvBASI4KAQMWJDhhBCCMPJH8RLAZ3F5Mg8kRs9RfRqHiKGLj4hCqtQhD7qa/FSixbirWXnhBBC5G76RLRt/pJYVJiUb87RkiRJxZDnEcygqliRClxkz+pfCD2URLZSgSYf/kb4hnG0vXZtV4Pbs4Pp7VP0gW0jBvYNRB0bzo5EBbVdG4b/tIivhwRhXxR1o6ESDo4WiCuZZJnAcDSUiCQHnnr9ZfyKLqVrXJ9j+Nff8Gl3N4z7N7JN50DH116gbtF9MGqHQF7t64/pUBg7kouZD0RtR+uPfmLBjKEEXK+Yig6OaLlCVtbVJ5PVOLTpTw/vwvar7ZsR4GuDMfUiaSbIjQljT6YXz736DDW0ABZUbz+Efk3MP71eYEb7S49TLjFhu8nw6s7LnWtQ2BQX2g3pT+NrTVFRoVIFxMXdrFm5mcO6bJQKTRm+OpJ1Y9uZeTk+m+jQSNI9ejH0eV8qFv4aVA0ZysutbTizbavZ/Zakx5k5ue02BfvZFKbDsdPrPH89GRDwWl/8xUG2hidfLf2OeckcDyZ/GzgWGk5ilY68+nJDbIvKdH3uI6ZOH0VXN7VZ+e9WmuohtKhj4EjUXnIxcTkmmtPuz/JsAwNH9x6lAIWkXbs5U7klbZqo2P/PVnQOT/PKQN+iWyXVOAS+Tm9/weGwcPMCJkmSdAv5zIIZLGq/wIdvHuXzRbMZuXkW2iq1aNyqA88OeIHnGjoU3dOvxs3LixsPnW1cquPAcZJ1Cnja4WSby19LxjDzyGmSkpJI1ueh0ijgKRCAITmZFFzw8LzxNhktbk3b4IYJ/T4dl42prPuwDRtuvOVHMZJvckZ3QQG3Wy9lW2DnZEfuH0sYP+MwZxKS0CXryVNpUPDk+mOBKhycnW+491aNVqsBkwkFhdSkZHI0Lri537DKaKriXcsR9VlzomgiK8mc9pcSJyUV3YVcLFzcuLkptfFyVHOuqM+1B37M4KPjWDx7BFtmaalcqwmt2nel3wvdaOBgxhhZ0RfW4+GD903joQp4elaH3cklfVOSniwWpee2W5myktBdNpL61wd0uDkZYMwzUU13dfu6U14ys3kPJH8bSE5OAVdPbk7fbjRp4waY0JuV/27J3xp3glvVYt4feziU35Ls6ONUajqQHo4nWB+2j/gCTw7visO2xes0s8pifXIaSuqffNJ2/c3xNuZhctaZGTFJkqSbycGCOdSV8X9rDmufP8eBXbuIjt7NzvDFfLE1ikuLFvGGJ4AgLycbE5WvPRAsjApGrLCyUqHo1jH2tc/ZbRPEs1268ZSPN7Xr1+HfmV0Zd76oGrUKNTlkX7l592oymVCr1Wg0alQWXvSYMIrOVW854FVpcfIu5udUdKwf/SpfRNoQ1LULz3WsTa3a9al9fga9xp6/uZvqkg6iVVhZWaISgptP5gny8/KLPRgoJohmtb/UOKkssbQEYbrlIETkk59//RN15QAGf7+O/uf2sXtXFNF7drBjyXi2RV9i/sLB1Cl1zbfAwgKEwYABbrgaISjIN4CNtVm9lqTHnTm57VZqjRqNygKvXhMY8YzzbdPyaZ28gX2Fy5aYl8z0QPK3Co1aBTk53Jy+TZhM6vLnbyzwCmmJ26JtxBw5QPYhaPhBE2o7NqXqkv3si6vO3uMVCHw+ABu1AbVahcarF+NGduH2KpzuLo6SJD2x5GChVCYy9i7nu78MPP3pIAK6eNK8y4u8dWour7+wgP1HMot2NibOHz9GFjWwB8DIv4ePobf2pq6XhvSw9ey47MlLM77lbb+isBtPsiOhAJTCs2QWPrXxIIrYWD2m+tWKdloZbPykO1/lDGXhkNq4coQLV9xo9PTVv5tICf2O7yMr0314fdxubX36LjZEXMbzlZlMf8ev6Ac3cioigXwUTEZzYqDGwbcO1U2h7I/OpMdTdkXtP8eJk1mI4mYfKUbF2qW13xebXaXESe2Ibx0XlNAYYjJ70uFaU45xKktQCcCUwb7l37LO8AwjBgXS2TOQzi8OI37uy7y0cB9HMwdTx+H29qnUKhAmBCbQOONTywElfDdRad3pdPVqhHKBQ0d1aNx7m9dpSXqsmUgvbZsFUKtRIVAEmAB1xTr41IAjydm4NW6Kc9HmZUoJ5YdZkdj3HI6fszn1q1EVbrYIE8VMBv6g8rcWbx9PiDpBbJqJelc7lLGBT3tMJXvIL8z0K3v+BtD6tiao2gr2/r6B/Cv1GNCsIpaVAmlc4S92LrDgpGUAowIrAgZq+9SAw8lkuzem07WgXiTs+1nsse9J4wbu5gRVkiTpJvKZhVKpqVjhCic2zefbbzcRn16AMTuRfVtiSFLVpJ7v1SNlE2lh3/HV70dIzUojfvMMvlh2EtdnB9DWQY2VvR02phSORceRaQIlI57NMyawMs4IBbnkCbCo1Y3eLSyI+mkyv0QncSU3jRN/TmPBDgXf1q2o2bAX/fxh95wx/BR+hvTcHHRRi5g0bSkROi1OFW//OdWWdtjbmLh0NIqThRUTv2kGk1bEoYgCcs2cn9yycV96N8oldNYkfjuUwpWMs2z7bjK/njRrtAGA1q+09luYESdLGvXpS6PcLXw/+VcOp2SRcTaMOVNWcq0p6opUyD7GlgXf8N3mU6QXGMlOjCF0byLqmvWoW8LgxsraGgtxnv2h0RxLNNC8T1/q5oQx64tlRCVmkp0ay6YZo1h82J62fZ82u9+S9PgyL7dhZYO1heDffaHEHE0ErR/d+/ujivye8T+GczY9lxxdFEsmTGVZeBKWVSuaWb8VNtYWiPN72Rp9lMTb3sn4oPK3BV7d+tLCYg8LJv9MjC6L3LTj/PXVPHYqvrRu5WJG/ithd6xtQHBgFU5tCSXBqzn+DmqwaULzRrA/fC8q/7YE2gJoqd9rAP6qSOaOmUvEmXTycpKIXjSBr5dGoLM0Y/RlPMmfUz9jwpIYM+MvSdKTQF5ZMINF/UGMH5HIZ9+OZuCvhUekmiq+dB4xiTcaaCEfQINbUEP089/kuS/yMGkd8es6hmnDWxY+7NZ+GB/1SOCbOS/SaY4lGo0tdbq8xYT3bRk59wynL5to5lyDHhNnY5g6nh+GPcdME6is3Wj52mRGDvBAo4F+k6aTPXEyyz7qzXwFUFekZshgJo8ZgEdxM6faduCtET1I+HoOr7afg6VGg23dLgya9D62I+ZyJj4NGpgTBG9emDKZrHFTmPn600wVGuzq9aBLmyTWmvvSZI1n6e03J07eLzFxciafT/mGwZ0nIzR2+HZ/jtZJayhsigX13pjIRwkj+W50P1YbCz+r7PsMH096E78Snsm2atqJ9i672DDlbQ70ms2GsYOYPsuCrybP5/1uM1AAy6oNeebTWXzQpZqZnZakx1slc7ZZp6Y81cGFyPWTeW9/b6I2j8VzwGSmZU/kyyUf0n+eAqipWDOEN6aOpa+HpiivlsaKJh07UH3Xeqb+7wA9Z/3N6OCb36XywPJ3jV58PquArz6fwzvPTceECmu3YF6Z/Bn9PDSAGfmvpD6GBGK/dgO2zZpTQwNQGf/m9dBGxNGsTYuiqyGg8RzAhOlXmDJ5KSP6zKOwipoED5rKqP4epYfTlMKRsA1srN2Usa8GmPMDSJL0BFAJIeSrL81lyCQ54QLZKnuqu1ej0tWhVv42xnX5hNP9V7B4qCdZicnk27lS3e72o1JD1gV0qSaquLlSzJ+vyU9LIPGyGif3GtgX8x6xgowkEi8ZqFStBs62ZsxIZMjkgk6PqOKGy50qLpWR7JRELuba4urhSHnv3C+t/WbFyZjNpcQL5Ni64uFY/BxHhkwdiReyUVWujpuzbemjYyWXy2k5aCs7UulavSby9EkkZ1vj7FqVinKILUm3KX2bVchNSyNHWxnHG7f5ggx0CZcosHWmhrMd5clOSm4al3O02Dvalvz9B5a/87mckMBlTVXcXO0p7jWQZc7fZVZAhi6BSwWVqFajGmWpwnBoBoN/qceSL5+5D+2SJOm/SA4W7oWbdjZ15OUaSZKk/wqZv69TzrP2k9EceW4WY58q5sEuSZKeSE90XpQkSZIkqYjGlTaffks3ZzlQkCTpOnll4V4wZZIYe548Jx98nM175ZckSZL0CJD5W5Ik6Y7kYEGSJEmSJEmSpGLJqVMlSZIkSZIkSSqWHCxIkiRJkiRJklQsOVgwh/EsYQvmsDpGj+lht+U+y8vJLXzjanHKG4fcHHJLLPQBM8azed4cft+fXv4+PEHrgyQ9WAbi1s/hx7WHuFLCEsazoSz6YRV79WXb+u6Y2x4wY/wm5v+wmgPp5e2DkXOhC/hxVTRlDIMkSVKZycGCOZRzRPy8kD/3Pc4Hh/kcW/YuL322nsySFilHHPKPLmP4C5/xT8Y9aubdMpxm29KFrDuYjrkP69zWhydifZCkh8HAqS2LWLzhKFdK2LiUc9tZsXAt+80+SjYjtz1gxjNhLF/4F4fSzc5Ct/RB4Xz4chb/sY80mYQkSbrP5NSp5rBqwbtL1mCwc32MA5ZN3O496GhT8iLliENObCTRSdyp1EfebX14ItYHSXo0WbUYzqLfCrB1NXfrMyO3PfJu7YMVge8vZWWBHWaHQZIkqZxkmjGHkknyqViyalahmr0WkXKYiEMKddt5kBURSmR8FhXdGxDYOgBP2xsu1ijpxO/cyu64Syh27jRu8xRNXa1u+vvJXdvZdzIFo4MnfgEhNHOvcPWPpBzazhHhS7OKcYTu0eMc0J5AhyR2HzLh61+Jk1v2kFYtgDatfHGyBFNmPDu37CA2VYVTrUYEhTSjxi0zASrpp9i1NZKTlxTs3JvQ+qlmuFjlcj46gtg0gYk4wjdG0jC4Fd62d4qDmtTD2zms+NLWI4sdobs4nVUR9wZBBAd4YauGvPPRRMSmYRIQu30juxsH07Ko0Du1VUk5RPitfQyqQvLugxjqtsHtUhih0YkU2PsQ2Kkd9R0sbuljHJHhezmVYqSKZwMCgv1xq0DJchLYF76Lo+fTyNPY4uLbkratfLDXlNAHz5yb1ge1Ob9lKbGSpP+cO2w3ZVnncxP2sGXbEdKsatIkpHmp1SpZOk7FZuHuUB179SWOhB9C8W2PR1YEYTvjyarkjl9gCM297FBzh9xmyiR+x2YiT+ihqhcNW7Sm6bWEWVL+1bHnoIE6rd1I3bqZmAQD9j5BPNXOj5vTkEJ63C4i9p7kkrEKHg0CaeXvzp3TUAw7Io9wPi0Pja0rvi3b08LbHk2xfQjCVneSuCwPHKrboy2K5x1zn3KxlFhJkiQVTw4WzGE8zm8TR3O6/woCfexQjv3K5NHx1A9M53BcBdwc80k8PZuZrr2YsmgMbR3UmDKi+emD/2PRCQtq+Xlje/kX5s5aSO8pc/m4rROkhPPtx+NZdUqLV72aWOmXM+frKrQbPpWx/X2xwciRVeMZd74+9RKiOZgFFVtbsKJbNBPHnqdevQRiDmRBpdZofv+WDklLGTvqB/YpXtT3ska/ag6zHJ/h46lj6FpLC5jIiJnLx/+3gBMW3vh523J55Ry+W9SHid+/SV7oGqKTFEzsYc3KAlQNgvC21dwhDh6cWD2RsfF+BKQfIu7/2bvvqCiuNoDDv92lFwsiiqigoGIvWGPvscTee+/GJJbYjcYeWzSx9xbzRZPYe+/YO9iVpghIr7s73x8YIoIRVFjQ9zkn55i5M3feucvenXfKvRZ5sY3x5v4vc8nTcjYrx9cEj4P8fd4XvQ7ct/5GnLoEVZytCbu69j9j1V7fkvQYt7Tk8tRxeBapQKSnD/Yl8qN9sJ6ly4oz4Oef6VHSAtDz4vg8Rk3ewn2jArjmNyVw4yLmZ6/D1zMn08Y16Rjqsfe2MObruZzTOlLcxRYCH3B7yc8srTeJVTOaYJrcMeS9l+jvISWf5X+3VW1s5JdaZCL//b35CntNSv7mdXjvncQ3U/YRmNOVQtlC2bjYmJxWesjz9n1rb/2P6ePv03ZTJVwcX/VJxSsSfNUTi3w5iPV6wOKYPDSftWPHQN0AACAASURBVJaxNcEzub6NG6wfN5blF7U4FS+AWeAWli1YToORsxnbrCDGb+t/W1xi+jgPipSPxNPXnhL5tTzc8Csrig1m3s+9KGEB6P05MX84P265h3GBouQzDWTzL3PIXvtbpk5uR9JuKJb7v4/km7ln0TqWwNkWgh7cYtnPi6kzcS3TmpglcwxlyPbHVCbdb8f6Si5Yq1PQ92nf0Va1bSRhEEIkTxHvFn1EmVjbTem8xFOJUxQl+sg4pV7ZMkq9IZuVB5GKoig6JfjUj0pLt0rK19sCFUWJUa7MbaZUqtpdWX4jJL4Orbfy9ze1lUot5inXowOV/d/XUSrVGqBs9Ah/Vf5cOTW7nVK1Qlvl1xuxiqJEK4fGVFMqlKurfPfbLSUwxFfxeh6pRB8crdQuW1Zp+M1m5XZgiOL39LkSFXlemd3MTandd43iERFfnS7wvDK/YyWleu/1ymOtoigxl5X5zd2Umj2WKjdDX+3S5y9leB03pfW8a0qsEqhsG1heqTLwf0pQitohWjk2vpZSoWwt5evf7itRiqIoumDl9I9fKZUqD1b+CtQpiqIoQX8MUKpUGKBsC3xVRwpiTfYYo48oE2uXUSrUHqRsuRcZv2HoNWVptypK1S7LlXtxiqIL3KeMrVtOqTtgvfJvs55U5rSvpFRpt0i5GasoSuQ+ZXRVN6XHqkeKVheo7Pq2mlKp2TTlXLDuVYARytV5rZRK5Xspm3zecgyvt4MuZZ9lStpKiEwhRd+bd//N6wL3KWPquClNRu1UvGMVRVHiFL+DE5QWbqWVyn02Kn5v+VpEHxmn1CvfXlnmGffqu1hGqVB7sPL7/fh+QRd8SpnWrJxSdcg2Jf6r9WbfFqlcmPmVUql6H2Xdv52Q4j63g1K1Sm9l02Ot8tb+91XfX3/wZuXfbuhXpecXlZTuK+4qiqJTgvaNUhq61VQGr7+jxPcIWsX/5Cylc+XySsdF1xVFUZSo/SOVmuW7KGseaRVd4E5lRHU3pdX0M8q/zXlFWdDaTance4Pim+wxxLdv5Q6Llbsp7ftS1FZCCJGUXEh4X+oc1GjXioLmAGqylquAq7mWQP9AiLvJkeM+2NTrRecSWeLX1zjQePhcfhrbjLzR5zl8OoT8LQbRrojlq3I7qvbvSTXzhxw77PHabqrTqlUxbLLYk/ef2UXVtlRr3ZqiNlnInc8O9eV9HPW1oV6PThR5dctZbVOR7m3c0F87zEk/HXE3D3HCx4a6PbtS/NXjRZo8TfhmzjxGN8v73leU1Dlq0LalM2YA6qyUq+iKmTYA/7e8fBibgliTO0az+IjJ27gPrVxetYN1KTq2qYja4zgnvXVEnj/ImRBHmg/qwL/NWo2+Papj/vAIRzzi3gg+C9WHr2DV/AFUyPqqBbRxWNrkwJhwwsJS8OZgRGo+y9S1lRAZUiq+N//1Nx914TDnQgvQpPuXOBgDGJG7dn/aljFObUDY1GhHc+f4fkGdtRwVXM3RBjxP/uXf2MvsP+xLjvo96fBvJ0SFHm1wU65y5Ljfa/En0/9q8vFl7zb82w11oXVFFR7HTwARuB86Q7BjSwZ0cCW+R9CQs9oAulY35+HRI8k0Zw2+WbGGOf0r8W9zWmGTwwglPJSUdEMp7/tS2VZCCIE8hvT+VDbkzPnaYzpqY4w1oNfrIM4XP3+wd3LitTcUMM5blup540fzeB5thGMhFxL9LFo44ZQbzj97DhSOX5bDjlxvnsmrbLBLWKgnzMeXl9oAdn5Xgz2q19bTaYnR2+H7TEfccz/8scfRKVFE5C1bg7wABL1fO9jYYftaM6iMjTFCjz7ZH56UxZr0GP+hJm+BAonazNw+Nzbcxs83lgDfZ0QZOeHikvhkw6KAE7k4x7PnenB5vcSILLZZiPp7HT/Mv85DLx98/QKJVmnQ4ZSi0ZJ0gb4p/yxT1VZCZFSp+N689W9eR4CPH5Eae/Lme+1nSJMT54I5UD9KTTwqbOzs+Hc3aozjO+Nkh0rVh/ng+1JLwI5vqZO4E0IbrSeXrx+QM35Rcv2vOi9OBRL1QuTObQO3fUEXiO+zKIwcXXBO3CHg5JQbzvqRhFEWbK2j2LFuAgtuPMDHxwe/wGhUGh04KSnoh3Qp6/sKQ2rbSgghQJKFD6BG/bbL8SoNahVERoQn7uj1evRqNRgZYYSeuNg44LUHWJVYYuPAzOzfE3qVOr6uN/etSVioRqNRozIqQPMpY2mY842gVMbYOhuhfqFCTSQR4Yl/evR6Peq3Hsi7qdRqNO9eLVWxcv7VukkOXCE6MgI92RLuhChaHVpMMTVVYWRkBPo4kjRrTCxxmGFm+kZ9Ol92j+vOj2fMqdS0EU3qFaJgoWIUejKflhOfpOyQUvVZpqathMigUvG9efvfvApTUxNUivLG8MMKMdExKR7W+B+p6cPUGjUalREFWk5h1Jd2Se6qGts6/xtlcv2vEk1UpB6yJfRCaHU6MDUFjDAyAiUujsQ9gkJsTByYmyWJR+e7k4k9JnPWvBKNG31FXRdnChUrzNMFTZmU4m4o5X3fh/T3QojPk/QaacHYBWcn8PO4k+jWbsjeETSoPoCtcQUpaKPH49xZXr5Wrnt2lZu+avIVLJCq3VkWKkQe/HkWnpdSZctSpmxZypQtTZ7Ak/y14zLPFTByKYQjfnh4vD43QAj7Rtam9sDf8NapUalAUUD5iFe6Ver4Sv85JUhJrG+n58ntW4Ql/L+Wp9dvEWjmTJECZtgVciaH/jbu54JeO0Ydz6/ewE+dj4IFEp+26INPs+fES5w6zWDu2IF0bF6PSsXsCHziRQw69Nrkj+F1GrtCH/WzFCKjS+n35r+psXEtTG69B5fdX5v9QPuYO3fDUp0svGtfifo2y8K4OMBzvwjylv6nDypLKYdATm/byZXn76hO/5jbt/7thdA+5catAMycXUFjh0tBG3R3znI+KFGHwLWbvmjyOb9ZGcGnd3PypRMdpv/M6AGdaVavMkXtAnjiFQu6f674/1f/rEl13yeEEKkhyUJaMCpIkzZVMDq3glmb3PENiyLo9t/MW34SbdEaVM5fmZatXYk8vICpG87hHRpBgOdefh63iutZatG64X8MBZIM4+ItaesGZxdPYMXxhwRHReJ7fg3TZq/nhK8xtpZqjAp+RavKRpxfMZ3f3H0IjwrizvbZrDqpw7X6F9hrTDE3M0J5cpEj7jfxjvg4TWFqZoaR8oTLh9y55R2RoljfTk/Q4V/46c8bBIQFcf/AfH7ccJc8jdtT0wZMKrSmZZFIjiyYwqZz3oRGBOC5dz4TVl8nS6221M+T+AdTbZKFrOZ6Xtw8z91QPehCuL9/PtM2e6JTYomKVpI9hkRMKny0z1L7cCfzJ0xkzdmMMnWUEEml9HvzLial29CqVBSHFk5j6zV/wkMecfSX6fzvboqyjVR4o2+LLU6zdm6ozvzKD8uO8yg4ikjf86ybMosNx30wyWn539Xpgzj6y0z+uhlAeNB9Ds7/gU1389CoXS3AhPKt21Ak8jALf9zAee9QIgI82D9/LGuvZ6VmmwZvVKbGNGsWzPX+3HL3JL4573Ng/hS2eGohNor45vzv/jm1fZ8QQqSGPIaUJjQ4tJjKgtiZTP11AM3n6UFlhkPVHkwd1x5HjQZ6z2e+8UxmrhxCywVawATbko0ZtXA4De1SmcNpnGg7bS4RU6ezYXgrVuoAtSX5q/Vh+oT2OGoAHGg+dRFxs35gycAmLNCDyiwvVXpMZ0x7RzToKVOvDrlP72bW4Cu0WLiLcVVNPrglTMvWp7b9afbMGMSVlovYM7HqO2ONefuBkrdSSQJX9qXJj9HojXNQvOkEZn9TBWsA42L0nL8Q41nTWT20KQu1gElOSjQezbzvGpGkWa3r0G9Uc7zmLKZ77cWYaDRYF2lE72nDsB61lIf3g8AtV9Jj+P71Sowp+pE+S33gLY7t2YN9scH0rJIlxdsJka5S9L3J9u56jJzpNGM6YZNmsKBnA2YpGrIUbU6jGj789ZEuVsQzTdq3tZ/O7IipzFz3He2W6wA1lvmr0WvWRNo4aoD/SFg0+ahQIoDVfRswPVqPcY4SNJkwj2GvvrPGxXozd6ERP01fybCv5qMDTHKW5MvRC/m2Ua4k1VnVHsjw5l7MW9yZ+otN0GisKdyoH1OGWTNm6UMevNRTzu7NY9hGtdcrSW3fJ4QQqaBSFOXj3vEVicUE4e39ErVtPvJkTebkWx9FoI8fkWa5sM9p+cHZW2yID94v4rDK5YCddfKjisQEeeH9Uo1tPgfeDEkXFcTLSGOy5rAmtWOSvJUuipdBkRhny4HVa5WmJNZ/gz7KpEYjedBuM2sHOBHm7UdMljzkzpL8dvqoQHz9IjDLlQdby3e0alwoz3wDUbLnxf4t9b3tGN7Y6Uf9LIXI0FLyvUkRLRH+3jyPsiaPYw6SPtX/cSTbt8WG4Ov1glhrOxzssryzz4s5Op6mo+7TdtNG+jmF4u0XQ5Y89iR/+HqiA33wizDDLk9O3tkNhT3DN0BP9rx53lJfyvrnVPV9QgiRApIsiMwhUbJQWE7EhRDpLlGyUFh6ISHE50FuTgohhBBCCCGSJXcWROagD8Xb4wnRti642Jm/e30hhPjI9KFeeD6NIYezC9INCSE+F5IsCCGEEEIIIZIljyEJIYQQQgghkiXJghBCCCGEECJZkiwIIYRIBS339y9n2dbLBH/E2d6TFefBniWL2X4t7N3r/ofoyKhXMyF/3rT397NyyR9cCdYDWh4fWsWy390J/Mif47/tnXb7EEKkH0kWhBBCpEIcjw6vY+2OqwSn9RtvcXc5vGY1e2++7yxtMdzaMJQu43cj86KD9uFhNq7ewbVgBdDx5PhG1v59iaCPdiL/ZnunxT6EEOlNBooWQgjxiYrA8+w5fKlh6EAyIFMqDlvPltgs5PloZwJvtnda7EMIkd7k6yuEEOKD6UPvc+rgSTwCVNgWLEWlauVwSDS8aCTeF49z5sYTXkarsc5TlEq1vsA5q+a1daLwPrefYzeDMM1fli/cku5HG+TB6eMXuP88EmNbF9xqVKe4nUnSFYniifsJPIIU9HhyfN8ZSlb9AmdrAB3Bnqc5cfEuL7TZcSxRkS/c8mHxjmOM8nbn8LFr+KtyU7RKTUootzjna0OFqkXIpgbQE3r/JIdO3SFQZUuBklWoWs6Bf5pB53+dE9d0FKnlSNiJQ5y5H4ZlvhJUrF4BJ+vXb/S/q55rHL+mx9XNirsHzxGUqwI1vnDF1gQivS5w8swNngRFo7HOg2uV2lR2zoqGN+kI872LZ5gjNrmzgu9Fjt14QdIbAGqyu9agYoH4vb+9/uTauxLWr+3D+NUh6oI9OXP8Ivf8tWR3KkGFqm7ktUhtGwkh0oskC0IIIT5I2NW1TBy7hEu6AhQrYEbg74tZmONLRsyaQNOCxhB7j/+NGsqCs1ryl3AhB4E8vLWEhb/WY8LamTTJowGdN/t/GMyP+4LI6VqIbKHrWWqcEys92L/aj/bBFob3n8M186KUcLIi/ME6fl2Qn64LVjCkvHXioPRReB7ahruPDj3n2LYlFlWJSjhbBnJi/nB+3HIP4wJFyWcayOZf5pC99rdMndwO12TnT9Dhd2AKQyftJjCHK4Vtw9myfBUuBWO4EtyA5ZWLkE0dyrV145iw5CK6AiVwMgvgj8ULsGk4ihkTmlHAGLS3/sf0cfcpVjGY654W5M0Rg/eDRSzI05IZayZQ00YNpKCe61uYOvEJRYt6ceFKGFhVR/PnTxQ/8j3fzD2L1rEEzrYQ9OAWy35eTJ2Ja5n2VZ43jknLnT+mMul+O9ZXcsHeYx/rVl4hDkAFKrUGbdBjngQa88WEvylfQMPD30e+vf4mZsm0dxmyvbYPa7WeF8fnMWryFu4bFcA1vymBGxcxP3sdvp45mTau5ilsIyFEulKEEEKIFItUDoz8QqncdZXySKsoSuR5ZXYzN6V23zWKR0T8GrrA88r8jpWU6r3XK4+1OiVo57dK7fJfKTPPBCu6V7VEXJmrtC/vpvTb6KMoik4J2jdSaVC+gTJml7cSqyiKEuerHJrQVKlUxk3pv9FPUZRY5fLspkrlZjOVi5H/VHJZmd+mitJ08lElUklOoLJtYHmlysD/KUHxkSlB+0YpDd1qKoPX31HCFUVRFK3if3KW0rlyeaXjouvJ1qIL3KeMreumNBm1U/GKja8n4NxcpUuV0kqlFvOU67GKEuU+Q2lZvpoyYO0dJeLVvgLPz1G6VK6s9NvwWFEURYk+Mk6pV7aMUm/IZuVBZPw6wad+VFq6VVK+3haoKEoK6zk4WqldtqzS8JvNyu3AEMXv6XMlInCnMqK6m9Jq+hkl+N9GVha0dlMq996g+OoUJWr/SKVm+S7KmkdaRVGilWPjaymVOyxW7sYlPeaYe5uVIbXdlMbD/1a84hRFl4L6k7Z34n3Et2M5pe6A9YpHfOMr2ucnlTntKylV2i1SbsamrI2EEOlLUnQhhBDvLfbyPo762lCvRyeKvHqURG1Tke5t3NBfO8xJPwXrGsNZsmYB/SplfTWqhpY4qxzYGEFEaBgQxYXDZwkp0IyuDR0wBjCyp1b/dpQ2/mdPKiysLFCen2XblgNc941AZ1GWb/44w86JtUjZhMoRuB86Q7BjSwZ0cMUSAA05qw2ga3VzHh49kvxW5w5yJqQwLfs1Iq8xgJoclfrQtVqWf1qBy3uP4GvTgG4dXV89zqTGpmJPWrkpXD98/N/K1Dmo0a4VBc3j18largKu5loC/QNTWY8t1Vq3pqhNFnLns8MiSw2+WbGGOf0rkfXVL7s2zgqbHEYo4aGEpeIFY73/EWYNn8+NvL2YPrk5eY1A/RHqjzx/kDMhjjQf1IEi8Y2Pxq4afXtUx/zhEY54xKWgjYQQ6U0eQxJCCPGe9IT5+PJSG8DO72qwR/VakU5LjN4O32c6jPLakiXqbzZMmMeNh174+vgRGK1CowNHRQFdAL7PojCyz0u+136VNDkLUSCHmscAGFGo4wj63JzE2kWjOLjQmGwFy/BF7aa07fQVJVLyeIouMH4/ji44G79eYIGTU24465fcRrzw8SXKxIF8eTSJtsmXPxfqe4A+DF+/IHQB2xlZc3fiFtJGo7fz/XeByoacOV+rR22MsQb0el2q67HL9doxG2XB1jqKHesmsODGA3x8fPALjEal0YGTQooHroq8wZrRk9ijq8/kWf0pbfmx6tcR4PuMKCMnXFwSNT4WBZzIxTmePdeD5h1tJIRId5IsCCGEeE9qNBo1KqMCNJ8yloY53zhhVxlj66zCb+cYek05g0WlpjRsWg9n58IUK/yEn7+awFMAlQkmJqDo3zjpVGKIifl3iTpbBfr8upN2jy9x9vR53M+d5OS6Hzjq/oKVq/tQ+J2/aEYYGYESF0ccvHY3QiE2Jg7MzZLdysTEBGLDCI3Ug7k6YZvoqOj4eNUa1GoVmgItmTSmEUmbwTZRm6nflteksh6N+t/sTOe7k4k9JnPWvBKNG31FXRdnChUrzNMFTZn05O0tkojOh90/jGTFgyIMXDKeBvb/nrB/jPqNjIxAH0ds4sZHiYklDjPMTFWgjT+2t7aRECLdSbIghBDivVkWKkQebvAsPC+lGuR69ZiRHv9Dv/DrmWw0+zoXj/ec4KVTd+YuGEKxV7862rvH8Y59dbVYnQPXwvboDl3gQmgL6rx6ukf7+Bb3whSsAPQhXNr4MzvjvmRU74o0dKpIw84Dub+0K11WX+JmaB8K27wZnRqVChR9/H9o7HApaIPu+FnOBzWj/j93I3TPuHbTF02+VskcoYZcJYphz14uuQfTqpFN/DHG3OHUOb9XyY0lhVwc4LofEflKU9/uVb365xz+dSHnsragdIl8KWnN96xHT/Dp3Zx86USX+T8zqHhCI3PSKxZ0+ndPSqcP5eKi4cw6bkqzWTPoWuz1B7tSWv8b7Z3ohF+DXSFncuiP4X4uiK8avGpHdDy/egM/dT5aFNDAvRQ0kxAiXUnuLoQQ4r0ZF29JWzc4u3gCK44/JDgqEt/za5g2ez0nfI2xtTYjSxZzFP8bXPAMRY+OkPv7WfjjZu5qFWKiogETSrVuQ6mog/w6/X9c9w8j5NFhFs/Ywl3tqx2pLbGIuMXBVfP45cA9gmO1RHhf4NBFb9T5i1LEOrnoTDE3M0J5cpEj7jfxjjChfOs2FIk8zMIfN3DeO5SIAA/2zx/L2utZqdmmQfLHWKY9nSroOTrve37+8wQXzuxl1djx/P5ICyoAY4q1bI+b6gxLJyzlxMNgoiN9cF8zhTnrT+BrYpfS1nzPetSYZs2Cud6fW+6ehOpBF3KfA/OnsMVTC7FRRP/nc0I6Hm8bx/iN3ji1G0SjbE+5cvECl179d/n+S4xTVP+b7Z14LyYVWtOySCRHFkxh0zlvQiMC8Nw7nwmrr5OlVlvq50k6wGuytA/ZPXc8k1edTdn6QogPIncWhBBCvD+NE22nzSVi6nQ2DG/FSh2gtiR/tT5Mn9AeR42aPANG8ZXXHJZ0qcVSEw0a6yI07DuNIdajWPHwPuCGkXMXpk4PZfKMefRpOB1FkwXXZk2o7rON+HNOI4r2mspwrzH8Mq4tf2jjl2Vz/ZIR0/pS3Di54EwpU68OuU/vZtbgK7RYuItxVXszd6ERP01fybCv5qMDTHKW5MvRC/m2Ua63HKMjracvIG7uAn6fP4JtxrkpVqsTfeosYZmPGWYq0Di1Z8rccGZMX8+o1suJb4b8VO09i7HtHFPRnO9Xj1XtgQxv7sW8xZ2pv9gEjcaawo36MWWYNWOWPuTBSz3F3rp1DDdPnyNQF0fgxu/ptzFxqXHNKRz66d31l7N7s723US1RRcXoOX8hxrOms3poUxZqAZOclGg8mnnfNcJODTEpaSR9ALeO7GFfnuJM6l0lJVsIIT6ASlGUFL/3JIQQQrxNbIgP3i/isMrlgJ31m2fvcYQ98yVQnx2HPFlI9tweQBvBC+9nRFrnwTFH8mMcxYX64v0sAlW23OS1s37nVS9dVBAvI43JmsP6tf3qiQ70wS/CDLs8ObH8j0r0Ife5/kDBoWghciaEFMqOYQ2YFfMt2xe3xzbhPn0sIb5evIi1IpdDLpI0Q4q9Xz1xYc/wDdCTPW8esrz3vj+s/uTbOzF9VCC+fhGY5cqD7X81vhDC4CRZEEIIIf6Dzmsd/VsvQemzmoX9imGJjpeXlvDt1xug5yZW9nGR2/RCiE+WJAtCCCHEf9EHc2HZSCatvUJkVntyGIXz/HkUOWsOY8a0jm+Z9VkIIT4NkiwIIYQQKRDtd5Mrt54QFGeGXaGylHWxkTsKQohPniQLQgghhBBCiGTJRREhhBBCiM/U7du38fJ6SmxsLNmyZaNcOTcsLS3fvaH4bEiyIIQQQgjxGdHpdOzbt5c1q1fj6elB3rx5MTExJTAwgMjISNq170D37t2xt89j6FBFBiCPIQkhhBBCfCZiYmLo07snDx8+pEOHjrRq3YasWbMmlJ8/f55NG9fj7u7O0qXLqVa9ugGjFRmBJAtCCCGEEJ+BuLg4Bg7oT0DAC5YuW4GVldVb1/37rz/56afZrFq9lvLly6djlCKjkceQhBBCCCE+A7NnzcTHx5sVK1f/Z6IA0KJlKyIjo+jbpxeHDx/FJkeOdIpSZDTqd68ihBBCCCEys8jISP744w9GjxlHtmzZUrRNp86dcXFxYeu2rWkcncjIJFkQQgghhPjE7dq1Ezs7O9zc3FK1XevWbfjf71uQp9Y/X5IsCPEZCw4OJjwszNBhCCGESGN79+zhq2bNUr1d/QYN8ff358rly2kQlcgM5J0FIT5he/fsZu7s2WTLlo2ff12Mg4NDQtlvmzYxf+4czMzMmDFrNlXfGPFCGxeHAhgbG6dz1EIIIT62gIAA8ubNm2S5VqvF6+lTXvj7ExgYSHh4OFFRkajVGrJly0a27NmxsLBk8MABLF+5ipKlSiXaPjY2Fo1Gg0ajSa9DEelMkgUhPmGrVqwgJCSEkJAQDh88QLcePRPKzp07i6IoREVFcerUyUTJwt49u5k8aRKWFhbM/GkOFSpWNET4QgghPhKdXodKpeLRw4d43LmDp6cHHnfu8PDBA+Li4lJUR78+vSlVqjQlS5WkZKnSeNy5zeqVK7G1tWXuzwtxdXVN46MQhiDJghCZXMCLF4wfOxZ//+dMmPQDZcuVSyhr0bIVy5cuwdHJiTp16yXarv+AgcTFxmJsbEz7jp0SlZ06cQKdVktoaCi7du5IlCzo9Xq8vbywsbHByto6bQ9OCCHEB4mJicH9/Hkiw8OZ/9NPREZGvnXdLFmzkiVLFszMzNDpdAQHBxMSEoJepwPi7zhfvnSRy5cuJtrO39+fHX//hevoMQnL4uLi8PX1JZedHWbm5mlzcCJdyDwLQmRy06ZM4e+//gSgcdOmTP5x6gfX6enpwdTJk4mNiWXcxImUKl06oWzenDn8tmkj1tbWLFq8hOIlSnzw/oQQQnw80dHRHD50iCOHD3H+3DlioqMTlZuZm1O4cGGKuLpSpIgrhYsUIbe9fbKPnZ45c5pxY8awfv0G7t2/x83rN7h+/RoPHzxI8tJzqdKlafjllzRt1pwpP0zi8MGD2NnZsXTFSvLlz5+mxyzSjtxZECKTK1m6FH//9SdmZmY0b9Hyo9RZpIgrGzb/lmyZp4cHAGFhYZw+dUqSBSGEyCAePLjPn1u3smfXbsLDEw9ekSNHDnz9/Jg5+yeqVa+OWp2yMW7+3LaNVq1bU7R4cYoWL06z5i0AiIiI4OKFCxw6cIATx48RGRnJ9WvXuH7tGkt+XYxKFb+9v78/ly5elGQhE5M7C0JkEs/8/Ni8aRM2NjZ06dYNI6N/c/0X/v5YWllhYWGR5nE8eviQn2bNBGDyj1PJaWeXULZ/3z6uX7tK4yZNKF6iZJrHIoQQn7u4wRzZogAAIABJREFUuDgO7N/Pn1u3cv3a1YTlKpWKKlWrUrNWLWrUqIltzpz06dMLFTBn7vwUvZB88eIFBg0cwK7deylYsOBb14uJieHsmdMc3L+fI4cPo9VqE8rs7OyYPmsWpcuUTVi27Y8/ePL4MS1bt6bAf9QrMgZJFoTIJDq3b8/du54ALFm+nPIVMtZLxy+DgmhQtw4A5ubm/L1rNzY2NgaOSgghPk3auDh27NjO6pUref7sWcJytVqNXq8HYOvf23F0dEwoCwgIoFPH9hQvUZIpU35E9c/l/2TcuHGdgQP6M3LU93Tu3CXFcQW8eMGW3zbz59athL02NHfFypXp3qMnWbNlpUuHDgBkz56dPfsPYCSj7mVoMs+CEJlERGQEAJaWljg4JB3+ztDMzM1xeDUsn06nkwl8hBAiDeh0OnZs/5vWLZozY+pUnj97hkqlonqNGsxdsCDhvYOcOe3Inj17om1tbW1Zt34Dly9dZOiQwVy6dClJ/SEhIaxcuYLBgwYyaPCQVCUKALY5czLk62Hs3n+A4SNHkSdPHgDcz51j8ID+LF60iKxZswLxw7byHwmLyBjkzoIQmYSPjw/Hjhzhi6pVM+xt26CgIE6dPIGzs0uSdxm8vb1xcHD4zytZQggh3u7I4cMs+nkB3l5eCcvq1W9AvwEDEn4X7t+7xwV3d2rWrkWePA7J1vPs2TNWrFjGtq1byZ/fkWLFimFiakpAwAtOnTyJq6srffr2o0GDhh8cs06n4+iRw6xbswaPO3cAMDU1pVr1GvTu25dChQsnWtfX1wdPT0/u3r1LVGQkVlbWlCpdiipVvvjgWMT7kWRBiAzm0sULLFuyBHt7e8ZOmIipqamhQ/pgkyaMZ8+uXTi7uLBm/QbMZRg9IYRIsYCAAGbPmM7RI0cSlhUvUYLYmBjKurkxYtT373UhJiwsjO3b/8br6VNiY2PJmi0btWrWokzZsu/eOJUURWH7X3+x6OcFhIaGAuDs4sKYceMpXaYMERERtG3dEi9vb8zNzSlfoQLmZuaER4Rzwd2dXLly0aVrN9q375DonT2R9iRZECID0ev1NKhbh5DgYACWr1qdaN6EzKpV82Z4PX0KwPRZs6j/Ea5WCSHE52DXjh3Mm/NTwvP/5dzc6Nt/AN8MHUJMTAwAf+3clezszBlR8MuX/Dx/Prt27khY1rhJE27cukVERDh9+/Wnbt16iYZxjYqKZNeuXaxbt5bixYuzYMHCZId5FWlD3lkQIgNRq9XY2toC4OjkhGvRogaO6OMYNXo0Dg4OOLu4UKbMx79iJYQQn5qAgAC+HjyIyZMmEhYWhqWlJWPGjWPpipWUKVsWS0tLAEqXKYO9vb2Bo025bNmzM2nKFJavWk3Bgs4A7Nm9Gx8vL7p1607Dhl8mSQTMzS1o27Yd69dv4OGDBwwbNjTFs06LDyd3FoTIYCIiIrh96xYlSpaUx3WEEOIz9PDBA4YNHcIzPz8Aqlarzpjx48mVK1fCOi+Dgnj8+DElSpbMtFfZtXFx9Ovbm5vXbyQMiuFWvgLjJ04k2xsvZ/8jKCiQbt260r9ffzp26pye4X625M6CEAby+NEjRnz7LePGjCb8teHlLC0tqVCx4iefKOj1eqZNmcKg/v24eeOGocMRQogM4eIFd3r36MEzPz9MTU3p068fer2OVcuXJ7qant3GhrLlymXaRAFAq9Nx4+ZNvhs1ii+qVgXi39vr07MHN65fS3YbG5scdOvanY0bN6RnqJ81ubMghIH07NY14SR56oyZNPzySwNHlL6uX7tG7x7dAcibLx9/bt8hIyUJIT5psTEx7N6zm982b+Lu3XtERUViZWVF6TJl6NqlG5GREUydPBmtVkvWbNmYO38B48aMTphH4VN5j+0f27b+wZKlS/jrr+2oVCr27dnD/LlziI2NRa3R0Ldffzp06pRku8jISBo2qMfSZcupVKmyASL/vMjr5EIYiMWr502tra1xc3MzcDTpr4irK7nt7Xnm54eJsYkkCkKIT9q+fXuZNHECFpaWtG3TluEjRmFhYUFoaCgnThzn+1EjUHTxk6nly5+fnxf9Qr78+RPuMtvb5/lk3mP7x/Xr1/nii6oJ/f+XjRtTuEgRJk0Yj7eXF8uWLMbD4w6jx47DzMwsYTsLCwvc3Mpz48YNSRbSgSQLQhjI3HnzOX36NGXKliVHjhyGDifdmZqasvn33zlz6jQVK1UydDhCCJFm9u7dw6iRIxgzdhxNm36FWp34KfDs2bOzZ8cOonXRKCgMH/U9+fLnB2DlmrVcuniRChUrfHKPp0ZERGD32nsYAAWdnVm2chU/zZzJsaNHOH70KL4+PkybMZOcdnYJ61lZWREeHp7eIX+W5J0FIdLBwQP7WbViecLY0hA/43HdevU+y0ThH9bWWWjYqBHZbWwMHYoQQqSJM2dOM2rkCH6cOo1mzZonSRRiYmKYNH4c0dHRZM+endbt2jN0yCAeP34EQNasWalTty7W1lkMEX6asrCwIDIiItnlk6ZMoU+/fgDcu3uX/n37cPvWzYR1IiIiEkaEEmlLkgUh0tj2v/9m7Pffs3TxYtasWmnocDK8xb/8wvcjR/D48WNDhyKEEB/s119+oVv3HtSrVz/Z8gXz5vL40SNUKhXjJ/3A0KFfU6FiRdasXp3Okaa/okWL4u7u/tbyzl27MXXGTMzMzXkZFMQ3Q4fifu4cMTExXLp0Efez5/D3f56OEX+eJFkQIo09fHA/4d+v30IVSXncucOaVSs5cugQk8aPM3Q4QgjxQTw9Pbly5Qrt2rVPttzjzh327dkDQPeevSj36v21Ll26sn3734lGyvsUNWveAj8/Xy5dvPjWdapWq8bipcvIlTs3cXFxjB83lmVLl6DVarly+RKzZ8xMx4g/T5IsCJHGuvXoScfOXRg5ejTtO3Q0dDgZmpOTE1mzZgX45H8khRCfvu1//0Wt2rXe+rjpn9u2AvEjwnXt3j1hedmy5bC3t+fAwQPpEqehWFpa0qJlK1asWI5Wq33regUKFuTnX34ht709cbGx/LV1K0ZqDSqVSt5bSAeSLAiRxnLkyMF3I0bQrn0HNBqNocPJ0MzMzdn42xaGfP01c+YvMHQ4QgjxQV68eIGTo1OyZdHR0Rw/ehSAFi1bJXmXwdHRCX9//zSP0dCGDBlCYGAAo78f9Z8JQ65cuZk2YyYao/ixecxMTWnRsiWTJk9Or1A/W5IsCPGRRUVFccHdnYhkXtoS75bb3p7uPXtRoGBBQ4cihBAfRK/Xo1Inf6rlcecOsbGxANSuWzdJuUajQa/Xp2l8GUGOHLZs2LiZJ08eM2jQANzdzydZR6fTceTIYcaOG0N+J0fscuUiLi6OvXv34uvrY4CoPy8ydKoQH5FWq6Vf71543LlD1WrVWbBokaFD+iQEBQWRPXt2mYtBCJGpZM+enWd+fsmWPXsWv9zSygqbZEaE83vmR7Xq1dM0vozC1taWjZs2s2TxYkYM/45cuXJRvnwFzC0sCA8L49Spk2i1Wtq370C//gMIDAxkQJ/ePH/+nG+GDuXnX36lnJsbQUFBybal+DByZ0GIj8jb2xuPO3cAeP78mYGj+TQsXLCAhnXrMHTQILRxcYYORwghUqxBwy85fPhQsneag18GAyR7cvvgwX08PDyoW7demseYUeTIYcv4CRM5dfosPXr2wtjEhLDQUCwtLRn1/WiOnzjFsG++xdzcnLx587J0xUpy5rQjOjqab4YOYciAATSsW4dJE8Yb+lA+OXJnQYiPyN7entJlynDX05MBgwYbOpxPwj+jZJw/d5abN29SpmxZA0ckhBApU7FiRfLmzceOHdvp2LFTojJLq/g5ApKbZ+D3LVtoUL8Bdp/hCHoWFha0b9/hnevlzZePpStW0L9vHwJevEh4fGnPrl30HziQPHkc0jrUz4bcWRDiIzI1NWXlmrWcOHOWmrVqGTqcT0LHTp0wMjLC2tqavHnzGjocIYRIlX79+rN0yWJu3bqVaHm2bNkACA4OTrT88OFD7NixnZ69eqdbjJlVfkdHlq5YSQ5bWxRFAcAuVy6yZctu4Mg+LZIsCCEytC8bN2bH7j3s2X8A25w5DR2OEEKkSrPmzenTpy+DBvbnxo3rCcv/OaHV6XSEvEoYDhzYz/hxY5n90xxKly5tkHgzG0dHR37+5RdMTU0ByJ8/P2ZmZgaO6tOi+eGHH34wdBBCCPFfLC0tMTI2NnQYQgjxXipUqIii1zN2zGhuXL9OFussOBUowLY//kBRFCKiIlmyZDFbt/7B7J/m0LhxE0OHnKnY2tqS086O48eO4evri06no0LFioYO65OhUv65byOE+CABAQEc2LeXSlWq4OzsYuhwhBBCZDA+Pj4sX7aUHTu2ExERgaW5BUYaDUbGxvTs04e2bduRPbs8QvO+pk6ZzPa//gJg/sKFVKtew8ARfRokWRDiI/lu2NecPHECGxsb9hw4KBOwpYHw8HC+HzGc6OgYps2cSe7cuQ0dkhBCpEqXDu3x8PDAqUABqlWvwaYN68maLRsHDh9JMjGbSJ2YmBh69+iOp4cHGo2GUqVL89O8+WTNmtXQoWVq8lcpxEfi5eUFQEhICNHR0QaO5tN05vRp3M+f5/q1q6xcvszQ4QghRKp5+/igUql4GRRE8xYtAAgJDubG9WsGjizzMzU1ZdacuZiamqLT6bhy+TKbNmwwdFiZniQLQnwkvfr0wcHBgX4DBmBpaWnocD5JxYoVS3hxzVjeYRBCZEL9BgzAwcGBIcOGUaBgQRxejfJ2+NBhA0f2aXBwcKD/wEEJ/3/t6lUDRvNpkMeQhBCZyl1PT+7cuUPDhg0xMzc3dDhCCPFBFv/yC2tWrcTKypo9Bw5gLv3aRzFm1EgOHTyIxsiIzVt+p6Czs6FDyrQkWRBCCCGEMJAX/v581aQxOq2WkaNH0y4FE5KJd4uMjKRdq5Y8f/6c0mXKsGL1GlQqlaHDypTkMSQhhBBCCAPJaWdHvfr1Afh982bkGu7HYWFhwcjRY4D4R5H+GSVJpJ4kC0J8RNeuXuXc2TOGDuOzcef2bUJCQgwdhhBCpMr5s2e5fOlSwv937NQZgKdPn3L61ElDhfXJqVmrFjVq1gJg4YL5REREGDiizEmSBSE+kls3b9C3V0+GDhrErh07DB3OJ2/xL4vo1rkTvXt0JyoqytDhCCFEihw/epQhgwbSv09vzp89C0DxEiUo9WrG5s0bNxoyvE+Oxih+GPOwsDD++H2LgaPJnCRZEOIjCQp6mXD7+NLFiwaO5tN37+5dAJ48fozHnTsGjkYIIVImICAg4d+XL19O+Hfnrt0AuODuztUrV9I9rk+Vj5d3wr83rl8vQ5u/B0kWhPhIqlSpQtVq1cmePTuNmjQxdDifvA4dO6ExMsLBwQFHJydDhyOEECnyZePGlCpdmly5c1O7bt2E5bXr1MHZxQWApYsXGyq8T06nrl0TLuSFhITQtXMnhn/3LfPmzeXJk8cGji5zkNGQhBCZVnRUFGqNBhMTE0OHIoQQH+zokcOMGj4cgCXLl1O+QkUDR5S5KYrC6tWrWL9+HVHhESh6PRYWFjRu1oz79+5y+vRpKlepwrBh31L61WNgIim5syCEyLTMzM0lURBCfDJq16lLEVdXAJb8KncXPoSiKIwfN5Z1a9cwePAQlq1YCcQPqVq5UmXmL1jI9h27cMzvSLeunXE/f97AEWdckiwIIYQQQmQQAwbFzz58/dpVzp6R0fXe1/hxYzl16iQrV62mceMmFCpcGKcCBQA4dTJ+xCl7e3uGjxjJsGHf0rdvb9zd3Q0ZcoYlyYIQaeD6tWuM/O479u/bZ+hQPnmhoaG4nz9PXFycoUMRQohUOXXyBMO/GZYwKhJAteo1KFGyJABLF/9qqNAytYsXL7Jr1y5WrFxFnjwOCcurVa8BkGR42nbt29OjZy8m/zAxXePMLCRZECINLJg3l2NHjzB+zGi8nj41dDifLL1eT8e2bRg8oD8L5s01dDhCCJEqM6dN48Tx43z3zTDCw8MTlg8YGH934fatW5w4ftxQ4WVaGzesp0nTpokSBYBqNaoD8bNme3p6JCrr3LkLvr6+nDt3FpGYJAtCpIGCzs4J/5ZJw9JOUGAg/v7+AFy6cMHA0QghROr881uh1WoJDwtLWF6pShXKlC0LwLLFi2VW51QIDAzgwIH9dGjfIUlZkSKu2NraAnD1tWFrIX7G56+aNWfzpk3pEmdmIsmCEGlg5Pej6T9wEOMn/ZBwO1l8fLY5c1K3fn3UajWNv/rK0OEIIUSqTJs5k569ezN91mxy29snKhswaDAAd+96yt2FVHj8+DHW1taJLtq9rqBz/PC0Xl5eScrc3Ny4/+B+msaXGRkZOgAhPkWmpqb06dfP0GF8FmbO/onIyEgsLCwMHYoQQqSKtXUWBg0ZmmyZW/nylC1XjiuXL7N65Upq1qqVztFlTlGRUZibm7+13CGvA5yHZ37PkpRZWFgQFRmVluFlSnJnQYh0Eh4Whr//c0OH8UmSREEI8akICgoi+OVLAHr16QPA7Vs3E70ELd7OysqKsNce6XpTlixZAAgPT7pOaGgoVlZWaRZbZiXJghDp4NmzZ3zZoD7NGjfmz21bDR2OEEKIDOj2rVs0btiAJl825PixY1Su8gXFihcHYPWqlQaOLnMoVLgwer2eSxcvJltuZGQMQEx0TJKyY0ePUqZsmTSNLzOSZEGIdPDk0SNioqPR6XRsWr/e0OF8UoKCgmTEKSHEJ+H+/XvotFpiY2PZsjn+RdtevePvLly+dImrV64YMrxMwdLSkhYtW7Hl99+SLfd+9a6CfZ48iZYHBARw+PBhunbtluYxZjaSLAiRDipUqkTDRo0wMjLiy8ZNDB3OJ8Pr6VOaN2lMm5YtOH3y5Ls3EEKIDKx+g4ZUrlIFExMT6jdsCECNWrVwdol/KXf1yhWGDC/T6Nq1G8ePHePSpUtJyh49eghAgYIFE5YpisLChQsoV64shQsXSbc4MwtJFoRIB2q1mqnTZ3DizFn69u9v6HA+GWfPnCY6Ohq9Xs+tWzcNHY4QQnwQc3NzFi1ewrFTp2nVug0AKpWKnq/uLpw9c4Y7t28bMsRMoWDBgowZM5avhw7h8mtDpOr1eh4/egRAgYLxszkrisL0aVO5dPEi02fMMki8GZ0kC0KkI2Nj40T/HxERwZMnTwwUTeZXqnQZLC0tMTU1pUHDLw0djhBCfBRv/lZUqFgxYYKx1Svl3YWU6NqtOyNGjGDokEHMmzsHHx9vHj54QGxsLACOTgU4ceI4A/r348yZ02zYuJn8+fMbOOqMSaXITB9CGER0VBRfNWlM8MuXdOzche9GjDB0SBmSoiicOnWSE8ePExIagomJKQ4ODrRs2YrcuXMTHByMmZkZZmZmhg5VCINQFIXTp05x/PgxQkJDMDY2wcHBgVYtWyUZu19kPs+ePaNNyxbEREcnLKtdty6KSoWVpSUlS5WiSZOmmJqaGjDKjOvcubOsXbOa48ePY5/bnrCQEIyNjTE2MyU6Opp27TvQrVt3cubMaehQMyxJFoQwEG8vL1o2i59IzMjIiBNnzia5mvS527RpI+vWrSUoMJDateuQwzYHsbFx3PX05PLlS9SpU5chQ7/G1dXV0KEKYRCbN29i7do1Sb4j9+7e5dKli9SpU5fBQ4ZStGhRQ4cq3tOlixfp16cXcVodJkZGqFQqChUpTOkyZQkPD+f06VOEh0fQtm1bBg4aLEN/vsWjR4/o1rkT0VFRuFWoQIuWrahTpw4mkmS9k0zKJoSB5M2Xjz79+vHbpk00bNRIEoU3/DBpIgcPHmDAgEE0atw4yZ2DR48esXHDejp1bM/qNesoU0aGuxOfl8mTf2D/vr0MGDiIRo0aJ5mI6vHjxwnfkTVr1lGmbFkDRSo+hFMBJ0zMzHDOnx/7XLk5c+oUEeERDH41mZter+fM6dMsW7aEixcvsGr1WkkYkvHw/n2io+InXJv4ww8Jj3WJd5M7C0IYmF6vR62W14deN/mHSRw8eIDlK1a98xnStWvXsHrVStasXU/p0qXTKUIhDGvKlMns37eXFStWkd/R8T/XXb9uLStWLJeEIRMKCgykS5dO5M/vyIyZs7h/7x6D+vcD4NelSylWvETCulFRUXw9dDB6vZ7Va9ZhaWlpqLAzjIiICHZu307JUiVZuOBnLl+6SIWKFVm8bLmhQ8tU5AxFCAN7M1HQarW88Pc3UDSGd+nSRf78c1uKEgWAHj160rZdeyZNHJ8O0QlheFeuXGbrH/9j+YqV70wUALp170GHDh2ZOHFCOkQnPqbFi38lW/bszJg5C2NjY4oWK0befPkA2L9vX6J1zc3NWbjoVyIiItiwQebzAZgwdgxzf5pN3169uHwpfpK2dh06GDiqzEeSBSEyEK1WS7vWrWjyZUOW/PqrocMxiA0b1tOkSdNUjUrRo0cPHj16xJUrl9+9shCZ3Ib162ncuAmOjk4p3qZb9x48efKEi2+Z1VZkPJGRkfz55zb69Omb6DHVBq/mXzh6+DA6nS7RNubm5nTv3pPfNm9KUvY5unPnDgBxcXEAFCteglq16xgypExJkgUhMpCAgAC8nj5FURQ2rFtL9GujX3wOAgMDOHDgAO1TeeXH2joLjRs3YdPGjWkUmRDpKzo6mju3b3P82DF279zJls2bWbFsGTOmTeXI4UMYGxnx+5bf2Ld3D+fOnsXT04PYmJi31mdtbU2Tpk3ZtGlDOh6F+BA7d+4gV65cVKxYKdHymrVqAxAWFsajhw+TbFevfn1iY2M5evRIusSZkfXt1y/RiGBDh31twGgyL3nBWYgMJHfu3HTq0oUtmzdT5YsvPrvhQB8+fEQW6yw4O7uketsKFSuyds2aNIhKiLQVHBzM9WvXuH/vLvfu3uPeXU+8vLzQ6/XJrm+sMWLfnj1JlhsZGVG4SBHcylegRs2auBQqlKi8YoWKrFgpz2pnFp6enpQvXyHJ8nz582NlbU14WBi3bt5M8jkbGxtTtmw57t69S7169dMr3Azpq+Yt2LghPkGuWLky5StUNHBEmZMkC+lMURSioqKwsLBItszf359cuXIZIDKRUXw7fAR9+w/4rEaz0Ol0XLxwgXVr16LRvN8NT0sLSyIjIz5yZEKkjbuenpw6eZLTp05y4/p13jbWiFqtxsrKCisrK6ytrdHp9Tx69AhXV1dCQkIICQ4mLCwMiH+M8fatW9y+dYsN69ZSoGBBhg77hrLlygFgYWlBZERkuh2j+DCRERFkyZo1yXKVSkXRokW54O6Op4dHsttaWFgQGSmf9bo1a/B6+hSVSsXQr4cZOpxMS5KFNBAUFISvjw+FChdONElKQEAAQwYO4OGDBzRr0YLxEycllMXExNClYwceP3pE6TJlWLF6DSqVKqHc29ub27duUc7NDVtb23Q9HpH+PodEQVEU3M+d4+DBAxw7epSQ4GC0Oi2858hQYWFhn0W7iczr2tWr7N61izOnTvL8+fNEZSqVivz58+NSuDC2trYc3L+f4OBguvfsxaAhQxLWO3H8GEOHDuGXJUsTlul0OgICXnDr5i2uX7vK6ZMnCQgI4NHDh3w37Gtq163LoCFD5TuSyVhZWREeHp5sWUFnZy64u/P48aNky8PCw3CxKpRs2efC6+lT1qyKn+26Tdt2uMpcI+9NkoWPzOPOHQb060tEeDjVa9Rg3s8LE8quXrnMg/v3Adi3dy9jxo1Ho9EA8WPGP34U/6W/dvVqojsM4WFh9OrWlZcvX2Jtbc2fO3aSLVu2dD4yYSgvg4IICwtL0agnmYFOp+PA/n2sXbWahw8fJCorVLgIt+/c5vr165QqVSpV9R4+fAhFrxAbG4uJicnHDFmI9xYZGcne3bvZ+sf/uH/vXrLrVK5ShZk/zUkY6nLzxg0EBQUBcGDf3kTJQnR0DLGxsVy7epXSr+YW0Wg05MqVm1y5clOnbl2Gffsd165e4ddFi7h/7x5HDx/m3Jkz5Mqdm5Kp/F4Jwyldugxz5sxOdnjtAgULAvD40SMURUl0cTEqKpKLFy7QvXuPdI03o/Dw8GDc96MICQkhNjaWHLa2DBo61NBhZWrygvN7evTwIX9u28rTJ08SLb99+xYRr64EPH36NFFZtWrVqV6jBnny5GHYN98mJAoArq6udO3eA2cXF/oPHJToUSSNkRGhoaFA/NXToMDARPVGREQQGxv7UY9PZAwBL17Qvk1rWrdozt9//mnocD6INi6O7X/9RZuWLZg4blxColCkSBEGDRnKtr+387+t22jeoiW/b/ktVXUHBARw9OgRvJ48YfCA/gnfFyEM5cGD+8ycPo2Gdeswc/q0hETBqUAB+vYfQLMWLRLW1ev1icbEb9ioMeUrVMDR0ZHBXyd+IbNW7doUcinEpk1vf5lfpVJRpmw5lq1cxbBvv8PK2pqoqCgeP3pERFgYWq32Ix+tSAsNv/yS2NhYThw/nqTMqUABIH5uhTfvUu3evZucOe344ouq6RJnRrPkl0U8ffqUkJAQAL4bMVLuqH0gmZTtPTx88IDuXToTHR2NnZ0du/cfSCiLiYlh3pyfeBkURJ9+/SlcpMhH2eetmzfYvXMXxUuWpEnTpgnLvb286PV/9s46Kqq9C8MPHQKKKNhigAFe22sLiiIGFrbYHdjd3d1JI9h9FQMMbCxQsbGV7hyY74/B+eSCgRcYBs6z1l2Le/IdZM7M/u293z2gPzExMcyaM5cO1tbZcj+BvMGzZwH0S3MGMq1RA3sn+XMyESUnc/TIERwd7Pn65Yt0e5OmzRgybFiGlc6AgAC6dunM3n37qFHj91ZBFy6Yz/3794gMCwegvKEhm7ZuFSZ0CuQ6nz99YtuWLZz9J30DcuHChVm5Zi1169UDIDIykvVr15CclMSoseMoU6bMb9/j+fNndO7UiT179/31hEWaAAAgAElEQVRWBi4iPJzhQ4dI57dUq16dnXv2Zpj4LJD32LB+HRcvXmTvPvt0vY6JiYlYtbZALBazbOUqGjVuDEgy0X369GL4iJHY2vaXlWyZMnXSRLy9vACo16AB24UBbP8ZIbPwB4SGhkotLZWU0ldyqampMXP2HFatXZdtgQKAiWkNps2cmS5QAHj+/Dnh4eEkJyeza8eOH5wtIK8YGRnTpGkzABr83VDGarLOrRs36NXdhlUrlvP1yxcUFBRoaWGBi7s7GzZvzrQkomrVqoyfMIExo0fh5/fol/dYsXwZN25cZ8/efcxdsAAlJSXeBgYyyNaWp0+e5MTLEhDIQFRUFBvWraVb507SQKHwd82p5Q0rSAOFb/sWLFrM0hUrsxQoABgbV2HixEmMGT2SR49+/R7ZtWsnwSHBpKatDT598oRRw4cRERGRpfsK5D7DR4xEW1ubMWNGpWtYVlNTk1qCvk+rYggPC2PgwP4UL1acXr16y0RvXqBwWpm2iooKs+cIgwizA6UFCxYskLWIvExKSgphYWHpIvrSpUtTrJgelY2MmD5zJlra2jLTV7ZcOV68eM6HDx/o1t2G+v/yYxaQbxQUFGhtaUnffrY0adpU1nJ+my+fP7N44QK2btlMZGQkCgoKWLVrz9IVK+jes+cvm/Tr1asHCgosmD+PhIQEyhuWp1Ch/6eRxWIxN2/eYMXyZdy/fw8XVzcMDStQpWpVTGvU4LKXF1FRUXh6nsO8VSuhx0cgxxAlJ+Pq4sKMqVO4e+cOqampFNHVZdz48SxeugxtbR1Ma5gyeeq0bLVCrlu3LkqKSiyYP5f4+HgMDQ3TvUcAbt64wYoVy7l79y77HBx4FxhI0NeviMVigoOCuHrlMpZtrQqcRbM8oaKiglW7dpw+dYr9+13RUNegQoUKKCkpce3qFb58+ULpMqV5//498+fPx8jICHtHp3RD3AoST588YeWyZQCMGDWKFubmMlaUPyhQZUipqalcvHiBe/d8iY6OoZCmJsZVqtCxQ0dUv3Mt+sa7t28ZOmggERERzJg1i6423WWg+vcQJSejXEAfDgJ5h+TkZJwdHdm3dw+Jadm36iamTJsxAxNT0yxfz8vLCwf7fdy5c5u6deuhp6dHUlISz188JyI8nG7dbBg0aHC6oTsgsaUcPXIEkRERGFaogIOzS7qacAGB3+HTp08cP35M+gW7qJ4e7dq1p3JlyRyQ9+/eMXvmjHQZrFYWFsxdsDDX/t68vb1wsLfn9u1b1Kkjccv79h4JDwvDxqZ7uveIKDkZT89zLJo/n5SUFGrXqcPWHTsL7JdLeSE+Ph43VxdcXV2IjY2lZq1afHj7juCgIMSI0S9Rgn79+tO9R48C+28pFosZPKA//n5+lC1XDo9Dhwvs7yK7KRDBgkgkws3VBUdHB2Jj42jWrBnaOjrEx8Vx+/YtYmNj6dOnL4MHD0mXJVi3ejX73VwBsLSyYsmy5bJ6CVkmICCA/a4uNG3WjNZtLGUtR6AAcOvmTVYuXyZNiRcuUoSx4+zo1KVLOqeOrBATHY2bqytFdHWJi4slMioKVVVVSpUsRRtLy5/WXN+5fZtxo0eRkpJC8xYtWLN+wx/rEChYPH36lB3bt3H+vCf16tXD0LACCooKfP70mWvXrtKocWNq/lWTQx4exMfHpzvXdsBA7CZMyHXNr1694uqVy0RGRaGiokLpUqV/+h45cfwYi9MKC0qVKsWM2XOkde8CeZfU1FSuXLnMi+fPuXb1Kk/8/TGsUIGDR47KWppMCQkJYemiRVy7egWAjVu20rhJwWzwzgnyfbCQmprKpEkTePDgAcOGDcfKql06W0XJG+8Ku3ftRE1NFXsHJ+mK0P1795g2ZTLFihVj5eo1cmVdOXLYMHzv3gFgj709NWvVlrEigf/Ko4cPKV68OCVLlZK1lHTEx8ezcf06Dh88CEhKp7ra2DBqzNh0Ndt/wrjRo7h54wZKysqcOH0aff2sDSx0c3Vh/Zo1AAwdPpwRo0b/Jz0C+R9f37sMHjSQtm2tsO0/AENDw3T7X796xbw5s/n44QMARXR16dKtGwfc3alYsSJr129At2hRWUjPMtu3bmXfnt3S/z968lSW+ycEZMcBD3dWr1iRwWjF9+4dDCtURE9PT4bqcpfuXToTGBgIkMG2XuC/k68bnMViMXNmz+Lhg4fs2+dAp06dM/ivKyoqYmZmxp69+wAFRo4cTmJiIgC169TB8+Il9h84KFeBApCuuTokOESGSgSyg6NHDjNk4ABsunSWOprkBR7cv0/vHt2lgUKVqlVxdHFlxqzZ/zlQAHjx/DkASoqKaGllvTeoT99+tEszBdiza1emFoQCAt94/NifYUOHMGr0GObOm58hUHiXVnb0LVBISU1lwuTJjB4zFq8rV9nn6CQ3gQLAqDFjMChRQvr/p0+ekKEagaxikLZ4EhoaSmpqKgA7t29j5LBh9OpukyHrlV9JSkqSBgoAU6ZPl6Ga/Em+Dhb++ecMly5dZOeuXejr6//0WA0NDTZt3kJIcDA7d2yXbpfXsoWxdnZMnzmLaTNmCg0++YAn/o8ByUPR399PxmokOjasW8vwIYP5+OEDSkpKDBsxAgdnF6pVr55t95k2cxaNmzRh1dp16UwGssKsOXOpbmICwPKlS344EVWgYCMWi5kw3o6ePXvRr59thv3PAgIYN3oUXz5/RllZmZFjxmA3cSJz58wmPj5ebj8rnN32Sy2GnZ2cePP6tYwVCfwu+mnzmFJSUggNkSwKPnks+ayICA/n9atXPzw3P/Hu7Vvp+8+6c2fBMjsHyNfBwn43V3r27PXbfzhaWlqMGDkKDw93UlJSclhdzqKqqopNjx5079kTZWVhULe808HaGg0NDUqXLk3tOnVlquXpkyf0690LV2dnxGIxFSpWxN7JmeEjR2X731rLVq3YuGXrf3KCUlNTY+GSpaioqBASHMz2rVuyUaFAfuH69euEh4czdNjwDPse+/sz0W4cUZGRFNLSYt3GjfTs1Rsbm+7o6RXj9KlTMlCcPejq6uLg7IxesWIkJiQwc/o0aXZdIG+j/93w1qAgyWC2zl27oqqqSpUqVahStaqspOUoYrGYmOho6TDaDevWIRaLKVWqFNNmzJSxuvxJvg0W3rx5g6+vL506d8nSeS1amJGamoqX16UcUiY7vnz+zJHDh4gID5e1FIEsUrNWLc57eXPo6DGZ2YCKxWIc7fcxqL8tb16/RkFBgX79++Oy3z1bswk5gaGhIQMHDwbgoIeHdPVNQOAb7vvdaNe+A2r/csZ7+fIF06dOIT4+Ht2iRdm4eQs1/qop3d+1a1f273fLbbnZim7RoixashQFBQVevXzJwvnzOHrkMLGxsbKWJvATihYtKl2g+fpVUp5q3rIVF70v47zfPV8tFIrFYry8vBg8eCDVq1WhTp1amJpUo2HDBty6eQOAsePHZ3j/CmQP+TZYuO7jQ82aNX9ZfvRvVFRUMG/ZimtXr+aQMtkxacJ4li9ZQq8e3YWVIzlETU1NZva4EeHhTBg3li2bNpGSkkLpMmXYtXcf4ydOytAHlB1ERkaSnJycrdccOHgI5cqVk0w8XbxI7rOHAtmLj8812rRpk25bcFAQUydOJDYmBp3ChVm/cROV0mxTv9HGsi1+fo+Ijo7OTbnZToO//5YG1OfPnWPZ4sUMGThAxqoEfoaCggLFi0u+4wR9/Srdrq6hIbdlcZkRGPgGyzYWTJ82hcqVjXB1c+f8hUucPnMWo4qVAIlZzf3792WsNP+SL4MFUXIyjx/7U7jwn63A6hUtypXL3ixfugRH+32c9zzHm9evkXfjKAUkD4/QkBBevXwpYzUC8sKD+/fp26sn1318AImNsJvHAWrVzhmHLSdHByzMWjBy2FBp0152oKqqysw5cwB49uwZHnK+GiyQfaSmphITE0PRoundY1YuX05ERASampqsXruO8v9qeAbJ6i5IAlx5Z/jIUfxV8/9Zk1cvXxISHCxDRQK/Qt9AEiyEhOTPf6fAwDf069uHevUbcPbceezsxmNsbCyZuZOYSMDTpwAMGDyYo0ePsHTJYhkrzp/knxwVcM/XF3c3V27dvElYRDj1GjT4o+uIRCJCQ0M5cuhQuu2FChWiarXqVDcxwcTUFBMTkwzDoPIycxcuZMPatZQ3LC9t+BSQH5KTkwkNDaXEd+4lOYlYLMZh3z52bt9GSkoKampqTJk2nc5du+bofT3cJF/iHz18SEJCwh83NmdGvfoNaNe+PWdOn2bXzp10sO6Ejo5Otl1fQD5RUFBAUVExXbbp+NGjUvvpCZMmp3OY+x6RSASAspJSzgvNYZSVlVm6fAU9bboRFxeHvr4+RXR1ZS1L4CeopU3fTkpMkm6Lj48nNiaGYsWLy0pWthAdHU2/vn0wMzNnxsxZGfbvd5XMwdLXN6D/gIG0bGXBsKGDKVq0KKNGj8ltufmafJFZCAgIwG7MaEYMHYLXpUvExcWhqKDIh/cf/uh679+/p1KlyjRq3Jjy5ctLyyxiY2PxvXsHZ0cHZkydQsd2Vli2aslEu3Hs2bWLWzdukJCHrcqqVq3Kjt27mTl7jqylCGSR5ORkunaypqNVW44dOZLj94sID2f82DFs27KZlJQUypcvj4OzS44HCvB/298WZubZGih8w27iJNTU1YmNicHFySnbry8gfygoKFBUT48PH94D8OnTR3Zs2wpAs+YtaG3548GWHz9K3MDyy5fqEiVLsmCxZHU2KCiIndu3yViRwM/41pfwLWgNDwujvWUbOli1xUfOy6kPHz6Erq5upoFCfFwc3mm9pd17SYxcDA0NWbBgEfv27RNKrbMZuc4svH37lh1bt3Lh/P+HkRgZG9O5a1f+qlkLm25dCAh4StWq1X77mlFRUXh7e+HucQBT0xqAxJbs9atXPHn8mMePH/PE34+XL1+SkpJCWFgY165elfY4KCsr81fNmtRv8Df1GzTAxNQ0XzUZCciGp0+e8OXzZwC8Ll3M0S/t9+/dY/aMGQQHSxrmrNq1Z8bs2TnyxT0zVq5Zy9vAwExLPrIDPT09uvfogYuTE+5urvTp109mTeMCeYf27dpz4vhxmjRpysply0hISKBwkSJMmjLlp+cdP3aMli1boZ62wpsfMG/ZCpsePTh04ACO9vY0bNSIuvXqy1qWQCZ8y2iJ0rJivr53pf0zl729adKsmcy0/RfEYjGuLs4MGDgo0/0+PtdITExEUUkJC4vW0u2NmzShcGEdTp44gU337rklN98jl99iU1JS2LFtK86OjtK0cZmyZRk5egxtLC2ljT1t21px6OBB5syd99vXPnXyJEZGRtJAAUBJSQkjY2OMjI3p1EXirpSYmMjzZ8947O8vCSL8/Xj37h0ikYh7vr7c8/Vl5/ZtaGpqUrtOXeo3aED9Bg0wMjbOM41HgW/e4OrsTOOmTTBv2UrWcgR+QtVq1ShWvDghwcE59vAXi8XY793Lzu3bSE1NRU1NjakzZtKpc+ccud+PUFVVxcjYOEfvMWDgIA4fPEh8fDxODg7YTZiQo/cTyPv06t2Hjh3a4eTgwKOHDwGYOHnKTzMGiYmJnDx5kg0bNuaWzFxjwqTJ3Pf15dWrV8ycNo2/GzWic5cuQtCQx/i2GJkikhhC1KlbDw0NDRISEmjctIkspf0n/P39CAoKom1bq0z3e128CECdOnXSvUcVFBTo3KUrJ08KwUJ2InfBQnhYGLNnzuDO7dsAFC+uz9ARw7Hu1DnDCr5t/wH07dOLFmZmNGvW/JfXfv78Obt27WDu3Pm/PFZNTY0af/1Fjb/+km4LDQ3l7u3b3L59izu3bvP58yfi4uLwuXYVn2uSzEMRXV3q169P0+bNada8OdrasquX3rh+HdeuXuXY0SO4unv8sCZXQPaoqqpy9MRJvn79SvkcmCYeHhbGvDmzuXlDYkFnWKECy1etpvK/nF/yC0V0denRqxeO9vYc9HCnX//+0kZVgYJJpUqVqFevHk6ODgC0smhNCzOzHx4vFotZunQx+vrF/9MckLyKmpoaS5avoH/fPoSHh3P2zBkueHpy8sw/cl8Ln59Q+ldmoWjRopw550lUdJRcDycLCQmlePHimWbsYmNjpd8BzVq2zLC/TOky/BN6Jsc1FiTkqmfhsb8ftn16S/9IutrYcPTkSbp2s8m01KdmzZosXrKU6dOmcufO7Z9eOzAwkNGjRtC7T98/XknV09PD0sqKufMXcOLMGY6ePMWsuXNp3aaNNPKNCA/nvKcn8+fMobW5OaOGD8Nj/35piUluUqZsWenPnz59zPX7C2QNdXX1HAkUnj55gm2f3tJAoV2HDji5uuV6oHDn9m3+OXM611zHbAcMpFChQiQkJOBovy9X7imQtzE1MUWcmoqioiKjxv68QXLliuXc8/Vl7z77PJMtzm4qGxkxdvx46f+LRCK+fmfRKSB7lJUldtrfehYAtLS15TpQ+BU+V6+SnJyMkpISzZq3yHiAAiDf5pV5DrnJLBw7coRVK5aTnJyMqqoqM2bPpqN1p1+e1759Bw66ezBq5AgsLCywtR2AiampdP+7d+9wc3Xh5MkTdO/eg8mTf16fmhXKlClDmTJl6NK1G2KxmJcvXnDn9i18fHzwvXuXFJGIu3fucPfOHdasWkmVKlVobmaOmbl5rqzyjxk7Dg0NDVRV1WhhZp7j9xPIe5w+eZJlSxaTlJSEmpoa02bOxLpT7pYdgcSedczIEYjFYqKjoujRq3eO37Nw4cL06tOHvbt3c/jgQYYMGy44IxVgAt+85vDBgwAoq6oweNBA+vTth7W1NRoakn6d5ORkzp39B1dXF0LDwti/352SJUvJUnaO07tPX65evsLdO7dRUVGlsNDfk6f4llnIb3Nj9PT0CA4OITExMcOgNW8vLwDq1KuX6TP748eP6BXTy7Bd4M+Ri2DhgPt+Vq9cCUCpUqVYuXYdVX9zjLnXpUu8eP4MTXV1Xjx/wZAhg9DVLUrhwjrExsXx9csXWrZshb2DI3Xq1M2x16CgoCDte+jTz5aY6Gh8rl3D28uL6z7XiIuL49mzZzx79ozdO3dQsmQpWpib0cLMjNp16kofCNmJuoYGo8eOy/brCuQcouTkbBnMJhKJ2LBuLR779wNgUKIEa9atp2q13zcDyE4Cnj6VZhRiY+Ny7b69+/bDxcmJxMREjh89iu0AYQhVQWXxwoXSuR6tLdpQvYYp9vv2smnjRgwMDFBSUuTr16/o6BSm/4AB9OjREy0tLRmrznkUFBRYvGwZvbrbEBkRwdxZM9m9z57Hjx9zwMOdt+/ekpSYiI5OYRo1bkz37j2EoDsXkbohJYvSbc+uzwpZUaNGDYoV0+PcubNYf7cwHBMTw920ShEz84yLnGKxmGNHjzJs+PBc01oQyPPBwonjx6SBQq3atVmzfgOFCxf+7fO/PbSUFJWYOHESDRo25OmTJ0RHR6NZSJPKlY0oVSr3V4a0tLWxtLLC0sqK5ORk7ty+zWVvL65cvkxIcDCfP3/C3c0Ndzc3dHR0aNqsGS3MW9KocWM0NDRyXa+AbElMTGTIgP58+vSJVWvXUq/+n80QAUl/woxp07jnexeQNMStWLUKXRnW7LcwM+OfM6dRV9fApkfuNaUVLlwYSysrThw7xqEDHvS1tUVRUa6qMwWygS+fP+Pv5wdIvhx3sO5IvfoN6N69B35+fgQFfSU1NRU9PT1q1aqdI4s3eZlixYoxd/4CpkycgL+fH5YWrQgKCaatVTssLFqjrqZGWFgYJ0+cYOOG9XTq3JmZM3PPQa0go6T8LbPwf+vUgbb9SExKYvPWbTluFpFTKCgo0LefLe7796cLFnyufV+ClLEX9caN60REhKc7R+C/k6eDhXNnz7Jk4UIATExN2bhla5YfPo0aN8bB2QUlJSXpqmlea0ZTUVGhcZMmNG7ShBmzZvPksT/eXl5c9vbmzevXREVFceb0ac6cPo2qqioN/v6bFmbmNDczy9amTH8/P44dPUJHa2tq1sqZ6bwCf8Z1n2s8e/YMgPOenn8cLAQ8fcqUSRP5+uULAL369GH8xEkyt/ctWaoUji6uMrl3z169OXHsGJ8+feLa1as0b5FJDaxAvmbP7l2kpqaipaXFtp27qFa9OiD5wvLXdyYWBZkWZmY0btKE6z4+REdFsWHDJv5u2DDdMf0HDMTP7xGrV61i2NAh7N6zVwgYcphv/TKpaZnZc2fP8unTJwCuXrkst8ECgI1Nd/bs2c2a1auYMnUa8P8SpLr16mcwiHn39i2LFi5gwMBB+crKOC+QZ5fQLnt7M3/ObMRiMUbGxmzauu2PHzompqYyK6/IKgoKCpiY1mDMODsOHD7C4WPHsZswkb9q1kJBQYGkpCSuXb3K0sWLaGvRiiEDB+Dk6MC7t2//872XLFzA8aNHGT1iBJGRkdnwagSyiwoVKkpXvGvWqvVH1zh96hRDBg7g65cvqKqqsmDxEiZPnSbzQEHWGFepIv2derjvl7Eagdzm06ePnDpxAoDBQ4dJAwWB9Ny6dROvy97o6uoiFovZsG4tsbGxGY6rUeMvtu/YSVJSIkOHDCYpKSmTqwlkF2GhoQDoppmoVPrOmELeF/10dHRwcXHD0/Mcq1atJCI8nLu3My9BevHiBcOHD6V16zaMFcqrsx0FcW5Zj2SBWzdvMtFuHMnJyZQ3NGT33n1ZKpFITk5GLBZLJy/nF8LCwrh6+TKXvb24dfNmhoewYYUKtDAzw8y8JSampll26Bg/bizXr10DYP/BQ+nccDJrMhLIXV6+fEl0VBS1atfO0r+tSCRi4/p1uLu5AWBgYMDqdeuFL0Xfcd7zHLOmTwfg4JGjGFaoIGNFArlBQkIC27duwc3FhSK6upw884+wIvkDunS2plmz5pibt2T0iOGIRCKat2jBgsVLMn0excbG0qN7NyZNmpLrs1oKEoMH9Mfv0SMGDBrEWDuJc5W/nx+KiopUNzGRsbrs4fXr1wwdMpiY6GjEKSkoKSnh6OqGiooKAQFPOeDhwe3bt7C17c/MWbNlLTdfkueCBb9Hjxg1YjiJCQmULl2a3fvsKa6v/9vnBwcFMWrEcD68f8/a9Rvkdnrhr4iPj+fmjet4e3lx7coVoqKi0u3XK1aM5i3MMDM3p0GDBr/V6BQeFsbOHdupWLEiNj16cuHCeez37ePRo4fSGsEqVaowYOAgOnToiIocN08VFCLCw5k+dep3/Ql1WbFqtUz7E74nMDCQHdu2UrduPbr37CkzHSKRiI7trAgJDqZ7z15MmzFDZloEcofXr14xdtRIgoODARg0ZIhg+PADHty/T//+/TjneR5tbR083PezY+tWAHr06s2oMZnbzDo42OPt5cWhw0dyU26BomM7K758/syUadPp2TvnXeRkxYP795k43o6Y6GiSRSLiEuIBSUale/ce9OnbTyb9pwWFPBUsBAV9pX+fPoSGhqKvr89ue/ssewU7OzqyacN6QOJ0MmlK9lmh5lVSUlJ4cP8el728ueztJa1X/IaOjg5m5uZYtLGkfoMGvyw78fN7xITxdsTGxtGjRw/MzFuio6NDbGwM169fx32/GyJRCitWrvytYXcCsuHf/Qk9e/dmwqTJearsaFB/W2lj6cXLV2TqorJn1y7p1PUznucpVKiQzLQI5DxrV6+SZtsUFBQ4+c9ZDAwMZKwqbzJzxnRSU1OZO+//A0tXLl/G2TOSwVcTJ0/BOpPsQWRkJJZtLDh0+OhvOxgKZI3GDeqTnJzMitVraGVhIWs5OUZ7yzYEBQUBMH3mLCzatEFVVVXoickl8kzPQlJSEtMmTyY0NBQNDQ02bt32R0NFvtXraWtrY9WuXXbLzJMoKSlRt159Jk2dyvHTZ3DzOMDwkaOokvZwjoqK4sTx49iNGU3b1hYsXbSI27duZerL/Pz5MwYPGkibNpac+ecsw0eMxNjYmBIlSlCpUmVsbftz4uRpBg4axNgxo7l9++fD7gT+O6mpqYSEhEhtHX+HM6dPMXTQQGl/wvyFi5gybXqeChQAac2ztra2zMvcunTrhrKyMnFxcdIadoH8S+XKRtKfGzZqJAQKPyEwMBBT0xrptk2eOo069eoBsHH9Om7dvJHhvMKFC1O2bFnevfvvPXUCGYmIiCA5ORmA4sWLk5ycTGhaD0N+4/vqiabNmlGkSBEhUMhF8kywsGLpUh77+wOwYPGSP54e27hJE06dPcehY8cLbE22kbExw0aMwGW/O8dOnWasnR1V0oa8RUZEcOzoEcaMHIFVawtWLFuK7907pKam8uXzZwYO6E+XLl0ZM3bcD3s+lJSU6N27D3bjJzB82FACAgJy8+UVOGZOn4ZVawuWLVn8y2NFIhHrVq9m/pw5JCYmYmBgwO599nSwts4FpVln0ZKldOvenfWbNsk8WNDT08OidRsADnq459okaQHZUN7QUPrzgEGDZKgk75OYmIiaevr3p7KyMgsXL8GwQgVSU1NZMG8eL54/z3Cumpo6iQmJuSW1QBGSVkIHoFukCKNHDKetRSt27dghQ1U5Q+MmEhfLMmXKUqJkSRmrKXjkiWDhgIc7J08cB2DQkKG0bNXqP13PwMAgWy1F5ZnSpUszYNBgXNw9OHL8BKPGjKWykWRFLTw8nMMHDzJy2DDaWbZh7NgxGBkZYTd+wm9du2fPXrSxbMOOHdty8iUUaL58/sylCxcASc3mzwgPC2PsqFHsd5NYkNauUwcnt/15usmtarVqzJg1O8+4dnyr+X379i33fH1lrEYgJzmQ5nxVqVIl6tarL2M1eRsdHR3Cw8MzbNfS0mLF6tUU1dMjIT6eGdOm8iWt7PEb4eFhaOto55bUAkFYWBh3797l3Nl/AFBUUiIoOFj6GfHwwc8/K+SRV69eAmDesqWMlRRMZB4s3PP1Zd2aNQA0adqMkaNHy1hR/oXwqkYAACAASURBVKVsuXIMHjqU/QcOcvDoMYaPHEXFipUAyQpFQMBTevfum6Vr9u7dF09PT6l9m0D2oqWtTenSknK8rjY2PzzuyePH2Pbpje/dOwB079mLbTt3CUFzFjGtUYMKFSsCEockgfxJSHAwly5dAiTvFYGf07BRox++HwwMSrBi1WrUNTQICw1lxtQpxERHA+Dv70dYWBi18shigLzz4sVzZs6YTrOmjRkzeiSurpKFoRSRiIOHDlJIWxtFRUU6dekqY6XZy6dPH3kbGAhA46ZNZKymYCLTYOH1q1dMmTiBFJGIcuXKsWTZsj+anurv58eAfv1Yv3ZNDqjMnxgaGjJsxAg8Dh/G/dAhWpibo62tTeMmWXsjGhkZUa1qNY4fP55DSgs2WlpauLh7cOzUafr07ZfpMSdPHGfY4EF8/fpV2p8wbcaMPNefIC+0bmMJwKULFzLt6xGQX3yuXqVf714sXriAFJEITU1N2nXoIGtZeZ4ePXoSEBDA06dPM91vZGzMwkWLUVRS4m1gIHNmzSQ5ORkPd3c6drSmSJEiuaw4/7F1yxY6WVsTFxeHg6MzFy95U6OGpI+kYaPGhIWGEhQcxNARI2hjaSljtdmLz1WJpbumpqYQeMoImQULQUFfsRszmujoaHR0dFi7YSNa2n+WqlyyaCFPHvvj5uLC169fs1lp/qdSpcoYValCrVq1/yhYq1GjBoGBb3JAmQBIAoZv2YXvESUns3L5MhbNn09SUhIGJUqwx94hz/YnfM+rVy8ZMnAgyxYvRiQSyVpOOlqnfdCGh4dz984dGasRyE7mzp7Fs4AArvv4ANDKojUaGhoyVpX3KVq0KB07WrNq1Qri4+MyPaZBw4ZS98GHDx4wa/o0PD3PYdt/QG5KzZfY2+9j797d2Ds4snTZcqpVq4ZYLOZZWr9gk2ZNWbd+A5s2bWHTpo2cOJF/Fu/u+fqybcsWABr83fC3bOAFsh+ZBAtv375l9PDhfP36FTV1ddZv2vyfhiCJUyWNiCVLlkJPTy+7ZBYo4uPiUP/DD01NTU3i4jL/ABH4M361oh0SEsKIYcM4dOAAAPXq18fFbb/cNPVv3bSJRw8fcPTIYd6+DZS1nHQYGhpiZGwMCKVI+QmRSISiklK6be06tJeRGvlj9py5KCooYDduLPHx8Zke075DR/r17w/A3Tt36N69B9WqVctNmfkOLy8vNqxfx8ZNmzH5rv/s1auXxMbEAFCtmuS537BRI5avWMXsWTN/2eMmL6xZuYKYGElZW7009y2B3EcmwUJ8fDxfvnxBSUmJZStW8lfNmv/pemvWr2filCnscbAXSi/+kEKFChEXF/tH58bExqKuIUw9zS4unPfEvFlTBg/oL7XF+55HDx9i26c3jx4+AKCvrS1btu+giK5ubkv9Y9TUJH8vhQoVoqhu3uur+FaK5HXxIqJM/g0E5A9lZWW27dhJvfqSZmYDAwOhsTkLaGlpsXefA6mpqfToboOzsxPR0emHgT5+/Jg3b99KFzu8L17kw4cPspCbb9i3dzcDBgykdu066bZ/K83R09OTWsYDmJmZ0dG6E45ODrmqM6cQfbdw1qxFCxkqKdjI5Jt11apVmb9oEbq6utSr3+A/X69suXI/rOcW+D2qVqvG3r17SE5OzvJk5mtXr5IYH0/tmrWw7txFCNj+I9u2bCE+Ph6/R4/48P69tOEW4PDBg6xZtRKRSISaujpz5y/Asm1bGar9M2bMno2RsRF169XLM9Okv6e1pSXbtmwmKiqKW7du0aRpU1lLEsgGKhsZEZjWKNm2XTsUFBRkrEi+0NLSwtnZlZOnTuLs5Mj2bVspW7YcaupqhIeHExIczF81/pIeHxsby+wZM9hrby+Uj/wBb9684e7duyxctCTDPq9LFwFo0qxZhr9jG5vu2PbrQ1hYmNybXJjW+Is3r19Ttlw5SmVSjiuQO8isZ6F1G8tsCRQEsoemTZuhrq7OpbQH0O/y8MEDvnz5TGJCAsuXLqV71y6c++cfwaP+P1AybWS9aY0aUi/4pKQkFi9cwIplSxGJRJQuUwYHJ2e5DBRAMqxp8NBhecYy9d+UKVNGWtIllCLlH+7cuiX1pm/XXmhs/hNU1dTo1s2GY8dPst/dg3Hj7OjffwBz58zj6rXrdLOxQem7cq8nj/2lNecCWeP0qZM0atwYfX39dNtfPH8udQdq2Srj1GZjY2MqVzbi3LmzuaIzJ3l4/x4A5i3/m6W+wH9DrpeAnz0LQFNDk7LlyslaityjqKiITfceuLm60qqVxW9nB5ydnWjdug2FNDQ47+nJh/fvmTNrJo4O9oweO5amzZrnsPL8x7oNGwl4+hQTU1MUFRX5+vUr0yZP5sljydDCRo0bs2T5CnR0dGSsNH/Tuo0lT588wdvLi6SkpB8OKRTI+/j7+VGsWDHOnD4FQJWqValYqZKMVck/JiammJiYptvWvkNHHvv78zYwkNJlynDm1CmcnRyp//ffNGrcWEZK5ZOvQV8pX658hu2eaUGAgYHBD8u4y5UvR/B3Q9vkkS9fvvDu3TsAIbsrY2Q+Z+FP+efMafr16kXvnj148/q1rOXkC/r1syU6Jpr58+aSmpr6y+M3rF/HI79HTJk6jWUrV+G8311qvfri+XMm2tkxbfIkIjIZ5iPwY9TU1KhZqxbKysr43r2Lbe9e0kBh0JChbNi8RQgUcoHWbSTTnGNjYrhx/bqM1Qj8KU4O9gzqb0vfXj25eFGSORXsUnMONTU15s5fwB57B+bMm0+VKlUAWDB3DqHCPJ4sIUoWZSgLTkpK4mLaoM5Wrdv8sJRORUVF7vutvrk9KSoqYmJq+oujBXISuQ0WvNIe+okJCYSEhMhYTf5AV1cXJ0dnHj56yPx5c3/4YI+OjmLd2jWcOHECR0cnypYtC0h6UTZu2cquvfv4q2YtALwuXaKnTTcue3vn2uuQNzzPnWPPrl1ERaVvFnRzdWHMyBGEh4ejqanJqrVrGT127B/Z2+YlQkNDmTltGsuXLsm0gTuvUKJkSWr8Jam/FkqR5JeLFySfFVFRUSQmJKCoqIhlWysZqyoYqKiosHTlKjQ0NAgLC2PenNlCiWoWyGxy9qkTJwgPC0NRURGrdu1+eG5EeDjacr6odPfObQC0tbVRU1OTsZqCjdx+62jewgyAOnXrUat23qx7lkdKlCyJk5MLISHBtLOyZMH8eXh7e+Pr68vVq1dYtXIFbS0tefDgAX379OXO7dsZVi9q16nDXgcHps+chbq6OmFhYUyZOIFF8+cTG/tnjkv5lbNnzjB7xnR2bt+Gk4M9ADHR0cycNo31a9aQkpJC+fLlcXB2yTc1m4cPHuDCeU+OHDrEzRs3ZC3np3xzRbri7U1CQoKM1Qj8CWbm5gAULSqx1W7YqJFgsZ2LlCpVSlqOevvmTelzTuDXNGzUmCtXLksXVRITE3FxdgKgVevWlElbqPs3UVFR3L59m0aNGuWa1pzg2yJjZGQkAWlZBgHZILfBQgdra27e9WXnnj1Zdu8R+DnlypXD2cWNg4eOoKCoyMaN65k5Yxpr16wmLCyMPXv3MX78BFycHFm/Zg1bt2zO9Do2PXrg5nFAujp78sRxenfvju/du7n5cvI0z579/wGob2DAo4cP6dOzJxfOewLQ3MwMBxfXdI5I8s73K0Samnl7IJZF69YoKCgQHx+Pz7VrspYj8AcMGjKEU2fPEhEhWaEVGptzl+NHj6bLzG3fupWHD/LHDICcpkWLFqipqUmNR44dPSLJKigpMWDQ4B+ed+rkSSpXroypaY3ckpojfJ9tF4Ynyha5bnBW+teAHYHspXr16qxevSbTff5+ftKfn/0k4i9brhy799nj5ODArh3b+fz5EyOHDaVPv36MGWdX4JtG+/azJS4ujtKlyxAVGcXwIYNJSUlBWVmZsXbj6dOvX76zd+xm053Y2Fj09IrleZ/74vr61K5Th3u+vpz3PEcri4zOIwJ5H6+Ll0hNTUVDQ4MWZmayllOg+N4auVChQsTGxjJr+nRc3T3kajaMLFBUVKRHz17s27uHunXrsd/VFYC2ba0o/QMb0bCwUFxdnRk1ekxuSs12RCKRdOhc7779KF8+Y6O3QO6hIBYKCAX+kDOnT3H18hVsBwyg+neTJX/E82fPmDdnNq9evgSgYsVKLFyyhKoFfMJncFAQc2fPxvfuHUCS2Vm6YmWB/73kFQ4dOMDK5ctQU1fn/CUvYYVLDhkycCCPHj6gjaUlS1eslLWcAoe7mxv+/n60sWzLrOnTSExMpGGjRmzaui3fLYZkNwkJCQweNJCQoCDCw8JQVlbGxd0dA4MSGY6Niopi+LAhlCtfns2bt8r1gmpIcDBWbVoD4OTqJrWyFpANSgsWLFggaxFZ4cuXLxz08KBQoULoFSsmazkFGiNjYyxat6b4vzygf4ResWJ06tKFpKRk/B49JDw8jBPHjwNQs1YtuW/c/RURERHs2r6d27dvU7NmTZRVVLhy+TJ2Y0bz5o3E0atDR2vWbtxIyZIlZaxW4BslSpbEzcUZUXIylSobUfm7aakCeZc3r19z9PAhFJWU2L1zBwAjx4zBsEIFGSsreJjWqEHLVhaUNzRET68YVy5f5sOHDygpKVGnbl1Zy8tzfPn8mW1btvDsWQB169XH0NCQQwcOANCocROsO3VOd3xqaire3l7MmzsHAwMDtmzdLvfl2R8/fuTwoYMADB42DC0tLRkrKtjIXWZhoG0/Hvv7U7hIES54CQ47eY2Y6Gi0tLV/edyD+/dZMHcOHz9+BMDE1JSFS5bm61TjrOnTOO8p6UWYv3ART5484aCHOyBJz8+YNZu2P3G3EJAdY0aO4PatW7QwM2fN+vWyliPwC0QiEV2sO/Ll82eKFS9OSHAwmpqanPfyLvClj3mBBfPmcvrkSRQVFdm6Y4cwoPVffMuEAWzZvoPVK1fwNjAQLS0tomJiKFGyJA0bNaKQpibR0dFcuXKZ2NhY+vTtx7Bhw/NF9tPZyYlN69cBMH7yZOrUqUt1IbsgM+RuKTfwzRtA8mEgkLdwdnSklVkLRgwdQkJ8/E+PrVW7Nm4HDtKlazcAHvv707dnDzz278+31nrfLPAUFBSw37dXGihUNzHFxd2jwAQKSUlJ7Nm1iz27dpGSkiJrOb/FN1ek6z7XiEmroxXIuyQlJfHl82cAotOaJJu3MBMChTyCpmYhQLIiPnvmTMH+/F9EhIcBkqbe48eO8jYwEEVFRTZu2cqVaz706t2bhPh43r1/R7JIxKhRY7hy5Rp2duPlOlBISkxk/3432rdry/p1kn5JRUVFjh87RnebrvTs2Z3Tp0/91hwogexF7sqQNDU1iYiIYPzESfnKISY/sG3LFj59+sjnz5+pWLESlY2Mfnq8iooKzVq0wMTUlDt3bhMdHc11Hx8e3L9P3fr1813asW7derx7/44vnz8TFib5MOg/cBCLly4tUI1+58+dY/XKFfjevUOJkqWoWrWqrCX9kpKlSuHq4oJIJKJCxYoYGxvLWpLAT1BRUSElJYXYmBiCgoKAtBIkQ0MZKxMAWLNqpdTpJj4+nvv37mFlZYWynJfOZBd/N2xI0aJ6lClTllMnTgCSgZwdOnZEVVWVmjVr0qqVBW3bWtGyZSuqm5igpCzXfjUkJiYyYvgwvC5dpFevPpibt+S6jw+ampocOHQYm242xMTEsnXrFl69eknLVq2EfpdcRO4yCz169cbRxZWWrfKH53x+oq+tLerq6qipqVE1C+nCJk2b4nHosHRi7p3bt+nV3Ub6kJRHIiMjuX/vnjRLEvjmDXNmzcLn6lWSkpLQK1aMLdt3MG78+AL3ASnm/5mjqMhIGSr5fQoXLszff/8NSIIdgbzPqDFjsEqzSS2kpUWjxo1lrEjgGwMHDUZZWRl1dXUAnjz2Z8a0aXKTacxOQkNDefTwYbpthhUqUFxfn0MHJX0KFq3bMHL0aFnIyxW+BQqRkRG4uu2nR8+e6BaVLKDFxcUBEletIUOH4uTsws1bN5k1c4aQYchF5K5nQSBvExkZibKyMoUKFfqj88+dPcuq5cukq04tzMyZPXduOvu93CAmOppznuf4+vUrYrGYokWL0rp1G4r9RlP9+3fvsO3Tm9jYWGx69KBYsWLs3b1bOljHonUbps2YkeuvKa+QkpLCmlUriY6OZtKUqRSVk9/DqRMnWDh/HkrKynhevISOnE9HLQgM6NePJ4/9ade+PQuXLJW1HIHvCA0NRUtLCw/3/WzesAGATl26MGfe/B+eExj4hitXrhAVGYmKiiply5XFwqJ1nisve/niBT7XfYiKjERNTQ3DChVo2bIVyv9a/fd79IgRQ4eQnJzMWDs76ewEZ0dHNm2Q9EbVrFWLbTt35bnXmJ1s3LAeT89z7Nm7D21tyXP1wf37TLQbB8CZc55oaGpKj//06RODBvZn0uQpdOtmIxPNBQ25K0MSyNuoq6tneKi5Ojtz+NBBihcvjr6BwU/Pr1y5Mu3at+f161d8eP+et4GBnDp5grLlyuWKi8nnz5/YvGkjU6ZM5vnzZyQnJRMaEsKtmzdYs2Y17969xdDQ8KcTYI8dOSwd4PU2MJAb16+TmppK8eL6LFq6lCHDhsl1Xel/RVFRkabNmtGylYVc/R5KlSqFW1opUrny5eWifKog8/nTJ+kXrlFjx+Vr8wR5RFNTE2VlZWrWqkV0dBT+fn48CwjA2+sSderWRfe70sy7d++yaOF8li1bSnRMNPHx8Xz4+IETx4+xd+8eEhISqVKlijRTISt8fHxYMH8uq1evJj4ujrj4ON6/f8+hgwdxcnJEJEqhSpUq0s9IR3t7/B49AiQziRo3acrqlSux37sHgGrVq7Nh85Y/XnyTB5KSkpgyeRITJ02mSpX/P1Ojo6M5eULiltilWzc0vwsWtNNMVI4eOUyvXr1zV3ABRW4yCx/ev8fVxZn6Df4WSpDkiI8fP9K5Q3tAUspx/PSZ337wHTl0kPVr15KQkABA+44dmTJteo71Mrx69Yo+vXtSvboJ/QcMoP6/HDqePQvAxdmZixcvsGv3Hho2bJTpdZ4+fcokOztCQoKl27p0s8FuwoR814dR0Jg8YTxXLl+mUePGbNq6TdZyBDLh2bMADh88BIg5evgwWlraeF66JPdWkvmZ1NRURo8Yju/du4DEHe7SlasoKipy8OABFi1cQNdu3ejTpy+lS5dJd96lSxdxdHQgPi4et/3uMstU2tvvY8P6dfTo0ZNevftg8N3CmEgk4rynJ46O9qioqODs7IqWtjYBAQEsWbiAkiVLMWjIEDZt2CCdt9OkaTOWr1olVwsqf8LJkydYs3oVJ06eTjcX4uvXL/SykWQNduzZky6QAIiOjsKyTWscnZypXbtOrmouiMhNsDBq+DDu3pG8ia7dvIWampqMFQn8DnFxcfTt1ZMP79+jrq6O15WrWarR//D+PfPnzpXayBkYGDBq7Dis2rXL1rkM79+/p3evHrRu3YbJU6b+9NiDBzzYuHEjDg6OlCtfnlcvX1K7dm1SUlPx2O+G/d59xMREA5LVotlz51G3Xr1s0yogO06fPMmCeXNRVlbm/CWv37IJFshdbLp05m1gIEpKSqSkpNChozXzFy2StSyBX/Dp40e6de4kdTpsYWZOk+bNmDd3DuvWb6BRox/3nIhEIqZOmURQUDAurm7SlefcwsPDnWVLl7Bl67affnFNTEzEbtxYUlJTsLd3RENDA7FYzMEDHmzesEG6MNalmw3TZ86U66Fqv8vCBfMRiURMnzEzw74OVm2JjYlh0tSpdLTulGG/3bgxNGnajCFDhuaG1AKN3AQLfXr24MXz56ipq+N58VK6lJRA3iY8LIzr169jYmKSrpQoOCiIo0cOU7ZcOazatf/h+ampqbg4ObJj2zZp3X+lSpUYNWYsLczNs0Vjl87WVK5sxPwFC3/reHv7fTg5OqCprkFcbCwmJiaEhITw9etXQFKO1a9/fwYOHiIEtj/gzOlTvH/3jgEDB6EuJ6tn0dFRtG7ZkhSRiIVLltAurYFWQPY8fPiQ69d92Lt7F0kJidL33YbNW2jStKmM1Qn8Dh8/fmTRgvncS8swpKSmMnXGDDp2tP7luUlJSYwbO4YSJUqwdl3uzUJ58eI5nTtZs3HjZho2yjzb/D3x8XGMGD6cmjVrYm7ekr27d/Hk8WMAiujqMn3mTCxat8lp2XmGKZMnoW9gwOjRYzLsmzxxAvfu3sWyrRUzZs/OsH/O7FkYVqjAhAkTc0NqgUZuehYMKxhKRpmPHEWVKlVkLUcgC2hoaGBsbJzBHnTurJkcPXIEr0uX0NfXp2q1apmer6CgQM1atTFv2YqvX77w7u1bwsPD8Tx3jhvXfShTpgylSpf+Y30PHz5kz57dbNu+87e/2NeqVYsDBzyIjIhASUmJ4OBgYmNjUVRUpFPnLqxet47mLcwyNLQJSHj54gVjR43inq8vMdExNGnWTNaSfgs1NTUePrjPhw8fEKeKadO2rawlFXg8Pc8xa+YMdu7cgbKyMtVMqoP4/05bxQz0qV69uhC0ywE6Ojp0tLZGWVmJu3fuoKigwGM/P3R1dX9pxa2kpETVqtVYuWoFvXv1TtcQm5Ns3bqF4sWKM2jw4N86XlFRkeioaA7sd8Pz3FmCgyXlqq1at2bjps2YmJrmpNw8x9VrVxCnpmbqVhYcFMT9e75ERkTQo1evDPtPnT5J+XLlhcx9LiA31ql169Vn7YaNtLKwkLUUgWzCoEQJ6c//HnQlSk7my+fP6az0KlaqxLqNm9hj70CdunUB8PfzY9Tw4Qzqb8vJE8eladys4OG+nzZtLLOUulZQUMDGpjsK35VCNW/RAveDh5g9bx7FihfPso6CxPfp9U+fPslQSdYxT+uZunnjOvG/GD4okLNs2rSRmTOm07BRY/4568m69RuYN28BpCXMa9epg7fXJfr26U2knNj0CkjmzyipqKCiqkpUVBQrly9j0ng7Prx//9PzjIyMqF6tGocPH84VnQkJCRw/dpSuv+HIE/D0Kdu3bqVHt6447tsrnRFQq3ZttmzfwYpVqwukQ17lSpW5d+9epvvq1a8PQEhICAFPn6bbl5ycjN+jR1SqXDnHNQrIURmSQP4jJSWFM6dPIRKJ6NS5i7QHISUlhRFDh/DwwQOMjauwbdcuChcunOH8G9evs3XzJp4FBEi3aWlp065De8xbtqRWrdq/1R/R8O/6LF26nL8bNsyS/i9fvtDOypImjZtgN34CtesITVZZwc3VBZ9r1xgzdhzVTUxkLee3CQsLo61FK8RiMctXrSpQJQN5ib1797B1y2Z27d5D1ar/z0p+b7m4dcdOjIyNmTDejtjYWBydnPO1s0x+4cGDBwwa2B+PA4fYvHEDN69fB0BFVZX27TvQuWtXyv9gwN4BDw/On/dkv7tHjuv0uXaNKVMm4Xn+YoYBYcFBQTy4f58HD+7je+eOtET1GwYlSqBTuDBuuaAzLxMZGUmzpo3ZuWs3NWr8lW6fWCzGtk9vPn74gFW79kyb+f++hrP//MPmzRu55HW5QPR2yBqhRkJAZigpKWXatASSjAHA8+fPeBYQQIO0gVgAr169xO/hI/5u2BBnt/1c9/HhyOFDXLtyhZiYaA64u3PA3R1NTU3qN/ibvxs2pLKRERUqVMh0UnJkZCTF9fWzrL94WvZg4eIllCtXLsvnF3T69O1Hn779ZC0jyxQtWpTadepwz9cXr4sXhWBBBvj7+7F+3Vp27tydLlAApMMcK1aqJA1C165bz6iRw1m1cgULFy3Odb0CWSMiIpyiRYtSqlQplq9cxWUvLzZt3EBYaCjHjh7h2NEjmJiaYt6yFc1btEj3/NbX1yciIhyAJ48f8/zZM5qbmWWbS1JKSgo+166hp1eU8IhwihfXJyIigreBb3jz5g0vn7/gwf17mWZMDUqUoJVFayzbtuXhw4ccPXYkWzTJM4ULF6ZjR2v27tnDuvUb0hmXKCgo0NHamh3btnHp4gVGjxuHlpYWiYmJODo60Lt3HyFQyCXkIli4dfMmVy57071nLwx/sJogkH9QUlJi4eIlODs5Ut7QkDrfrdhHR0cxbNAgoqOj0dDQ4Nip0zRp2pQmTZvy9etXHPft48J5T8LDw4mLi+OytxeXvb2k52toaFBcX58SJUqipqaKqpqaZMryHyTYviXlhJHzBY+WrSy45+vLtbSJ3Pl5YFJexM3VFUvLttSsVSvd9qioKK5c9gagQ8eO0u3q6uqMs5uA3bixTJs+Q8gu5HEUFBTSPZJbmJtTt359jh4+xInjxwkJDuaxvz+P/f3Zsmkjenp6VKlalSpVq5KQmIhYLObDhw8MHTSQ5ORktm3ZzBnP8+l6yJ4+ecKtmzdp0rQpRsbG6e4fGhrK2TNnqFS5Eg3TXJhSUlKIiIhg2pTJPHogcefTNzAgODiYrtYdyQwVFRWqVa9Ordq1adykKVW+m81y/8F9FBA+OwDGjbOjV++eLFq4gHnzF6QLGNq2a8/e3btJTEzE8+xZ2nfsyMQJdqioKNOvn60MVRcs8nwZUnx8PFZtWhMbE0ODv/9m646dspYkIEMSEhJo2bwZycnJKCgocODwEanDUlJSEtbtrAgNDQVg8NBhvHzxnIcPHxIZEfHjayYlsWbdOpo0yZpjyocPH7Du2J579x4IFpoFjKCgr7S3tARg7YaNNG/RIsfudevGDYKDg2nfsaMQmCKZrt6kSSO2bd9JzZo10+07dMCDrZs3o6qqyuHjJzLMNbHp1oVBg4fQs2fGZkmBvIOf3yP69e2D9+WrGeZjpKSkcPPGDS5dvMB1Hx8SftA3pKqqSlJSEiBpKm7brh0lS5ZERUWFxMREnBwdSRGJUFBQYMTIUSgpKxMVGUlUdBTn/vlH2v+mp6dHfHw8cXFxv9RdVE+PChUqYlqjBrVqCFohbQAAGaNJREFU16J6dRNUf9BYv3PHdl68fMGOHbuy8qvJt3z8+JG+fXtjbGTMyJGjMP7OyGbpooVcOH+eYsWKoVW4MCkpIhwcndHR0ZGh4ryH16WLpKaKc6S3N89nFkSiZOJiYwGJR7FAwUZdXR0nVzf+OXMG0xqm6axY/+08ZN6qFaPGSOzYIsLDGTJoIO/evgWgSdOm6OsbkJiUyJ07tzlx/HiWg4VTp07SsFEjIVD4j5w7exb7vXvoaN2JvrbysVKkr2+Aiakpj/39uXTxQo4FCw8fPGDcmNGIxWLCw8OxHTAgR+4jT9y+fZsiRYpkCBTEYjHHjh4FJO/9zAYgWlq2xdvbSwgW8jgmJqYUKVKEixcv0LatVbp9SkpK0mxyYmIizwIC/v/fswBpE/S3QAEk9ttnTp3K9F5isZgd2388YPHb4tP3qKmpUbJkSeo1aMChQ4cYOmw41p2s0db+vS+vYrGYM2dOM3ac3W8dXxAoXbo0bm7urF+3FlvbvpiYmFK/QQM0NTVJRbKmHRISQkRkJFOmTRMChX9x8cIFZkydAsCS5SuwzGanvjwfLGhr6zBh8mRu+PgwYtRoWcsRyANUNjJi3PjxGbYrKiri4OLK+XPnqFa9GlW/S/kW0dVl87ZtXLpwkQoVKqSz6nz69Ck23boQHh6ObiY9DZmRkpLCsWNHmTN77n9/QQUc+z27efXqFRvWraWVhQUlSpaUtaTfoqWFBY/9/bni7Y1IJMoRm1yRSCQtd/s2Y6SgExkVSfHiGXuM7t65w8cPHwDo0rVbpucWL16cO2nDPQXyLoqKinTv0ZMjRw5nCBa+R01Njb9q1uSvtMDxyZMnDB40gB07dhEfH0dYaBihoSGEhoYSFhpKeHg4ouRkUlJTSUxMJCYmBnU1NVTSnJd0dHTQ1tFBUUGR/7V353FR1tsDxz/AsG+yuYBrauktzeua4i6BqKgYYLhrmrtluYS5mxsqueWSgqbinpcr7iyKKWh161paoiliiorJOrIMM/D7g+LKz1ExwBnkvP9BnvU88/LFM+d5vt9z0tJSqV2nDo0aNS5abmtrQ81atYsV3MgH/vOf7xk4qOTzr7799lvS09Pp8ZT+QpWRs7Mzy5av4JOAAPbt28fVK1fIys7Cysqahq+9xtX4eNR5eSxbsoReXr2lHPIj1I/cH9TlcK/Q+2QBKu5ESPHiVa9e/YlPX52dXRg0ZMhjyxs3bkyTpk1ZsngRi5csLVFn6PXrv8AApJRvGXjzn//k2rVrKBQKNPn5ug6nxLp2c2PNypVkZmbyn+++o00JGjI9rxYtW7IsKIjke/fw8etf5seviAwNDMnP1zy2POxAYbnMv8aua6PJz8fAUIZyVQS+vn5s3LCeQ+EH6VWCpmzZ2VksXbKY3r370P4F9m3x9/enV88eREdH0bVrt2dun5GRQdCKZfj4+smX3SdwcHBkzJixxZbdvn0bb69eFBQUYGhoSF5ennx+j/Dw9EStVqPJ19DTS/scmtKoMH0WhChPq1ev5fLlX1mwYD7PmsazJSSYr/fvZ3PwlsfG04rnN3X6J8ycPYe16zfgUormei9azZo1efXVwnG1UVFR5Xaezl264veuf4mS2MrA3sGeO3fukP9IYnnv3l3OxcUB0Ldfvyfum5SUhKODY7nHKEqvWrVqrFn7BQsXfkZkZMRTt83NzeXDDz7A2MSYWbPnvKAIC9WtW4+lgcv4dEYAsbFnn7ptVlYWE8aPo1r16nz00ccvKMKXg4uLC779Cx+YGBgYoMzM1HFE+qenlxe9+/Qtl2PL3UcICsvtbdseyrm4WEYMH0pUVGSxLyMA3333LR9MmkhwcDDBIVulk3gZUSgU9PH2rpBdOLu6FT5JjDkZ/dj/F1E+2rRug1qtIS4utmjZwbB/k5+fj42NzROf7hb2dTlMd88nD2sR+qVz5y4ELlvOpzMCmDVrJlfi44utz83N5V8HDjDAvz9ZWQ/ZvCkYc3PzFx5nz569mDN3HpM//ID58+dy/fr1Yuuzs7PZs2c37/b3xczMjHXrNkgFtb/h/TFjsbW1RaVSMW/O7Gc+2BNlR++rId1ISCA6KpK33T2oJbXsRTlLTU0lNHQHoTu2o1AYU7t2bYwURiQlJfHgjz/w8+vPkKHDKtQTcFF+rl+7Rn+fwvHxGzcHF3UWF+Vr8eJFJCRcJyhoJXl5efj18yYtLY3+/v6MGTde6z4xMadYMH8e35yJlTeCFczly5fZsiWYQ+Hh1KtXD0dHJ3Jzc7l27TdsbW0ZMnQYPj6+OkkUHvXzzz8REhLM8WPHaNCgIfb29uTk5PDbb1epWrUqw4aPoG+fvk+skCSe7fixY8wM+ASAKdOm09/fX8cRVQ56nyz49vPmRkICDRo2ZNfefboOR1QSKpWK06djSL6XTH5BPvb29nTs2ElrhRVRtraGhHDqZDTvjxlLO1dXXYfzTD7efUm8cYP+/v5MmTZd1+FUCjduJNCrZw/mzV+AoYEhixbMx8DAgNDde6jh7PzY9g8ePOC9EcPw8urNBx9O1kHEoizcv3+fuNhY0tLTMDExoVbNWrRt107vhujdvXOHc+fPkZ6ejpmZGXXr1qVNm7d0HdZLI2DaNCIjTmBkZESDhg2ZHjCDJk2bPntH8bfpdbKQn59P5/auZGdn4+DoyLGISF2HJIQoR1lZWXRu70pBQQGWlpZEnjyFQs+fAq9bu5YtwZupVq0a4UePlboXwq1bt1gwdy5OVZ2YO2++3l+/rpw4cZyPP5pMg1fqc+v333mrbVsWBy57bLv09HRGjXyP+vXrs3LVaun4KkQFl5aWhq93X9L+7J/kUrMmYeHaS+O+zC5fvszSRQtp1Lgx0z4JKNc+PPqVjv8/hoaGjB0/gfr16zP54ym6DkcIUc4sLCyKnhApFApUFaBcaNc/K2Ldu3ePXy5dLPXx1qz8nB/+8z3Hjx7lxx9/LPXxXlbu7h5MmDCxqK5+i1atiq3PyMggJHgzvj79qOFcg6DPV0qiIMRLoEqVKsyaO7fo97TU1Eo5f2HpooVc/Pln9u/dS+KfPaTKi96XTvUfOBD/gQN1HYYQ4gX5Yv0Gjh49QpMmTbGwsNB1OM/UqFEjnJ2dSUpKIioyitffaFKq4+XnF970LCwseKV+/bII8aV1PzkZAHNzc5YtC2Tnzp3Y2dmRk5NDQsJ1Xn31NWbOmo2HR3dJFIR4iXTs1Bm/d/3Zu3sXDx8+ZP/evUXVkiqLv+4Vjk5OVK9evVzPpdfDkIQQoiJYGbSC0O3bcXZ2JuzQ4VK9Dk5NSeHE8eO0aNmSBg0blmGUL5fMzAw83d3Jzclh0oeT6dGrJ99//z0Z6RmYmZtRr1493ihl4iaE0F/5+flMnjSR2LNnMVIo+GL9hgpZVe/vSkq6zelTMXTq3FnrXK2yJMmCEELvXfz5Z+zs7fW2CtXlX39l8IDCqhxr12+gzVsymbG87dm1i+WBSzExMeHIiYhiXXWFEJWDMjOToYMGcvPmTapUseOjqVNp1bo1jo7ST6Us6fWchdzcXE4cP861a7/pOhQhhI4cO3KE4UMG4+3Vi7PffKPrcLRq1LgxjRo3BuDA1/ufa19lZiaxZ8+SVwHmZ+iTI4cPA4WdtI2MjDh86BC3b9/WcVRCiBfJytqaFatWY2llRVpaKrM/nYGXZ3d+uXRJ16GVuZSUFM7Fxeqkp49ez1kI3rSJLcGbMVIoOBEVjY2Nja5DEkK8YA8ePACgoKCAo0eP4Nqhg44j0q6fjw+LFiwg5tQp7t69y/lz54iIOEFqagpGCgXVqlajr3c/XB8pB5uWmorvO/1IS02lq5sbS5ct1+EVVBy3fv+9aDJ5d09PVgQGcij8IDY2NhyPikah0OtbmxCiDNWtW5eglasYN/p9NBoNarWag2Fh/OP114u2yc7OJizsX8TEnCIjPQNjY2NcXFzw8fWleXP974+TmJjIoHf7k5OTo5My3Xr9ZuHqlcJujRq1miR5YiREpeT9zju4e3hQs1Yt+np76zqcJ/Lo7om5uTlZ2dn08PQgKGg5tevUwaO7J106d8Hc3JxJE8fT3cOdw4cLy/zFxp4lLTUVKJyrIErm2NGjANhWqUKbtm2L7hUZGRlF5RSFEJVH8xYtmPfZwqL5YufiYklLTSUvL4/ApUto79qWLSEhvPZaIzw9e9CpU2c0Gg3Dhw2lbx8vTp+O0fEVPN2p6ChycnKAwuaxL5peP37xfseHixcv0qJly6JX/EKIysXCwoKFS5bqOoxnMjc3p4qDPSqNmpkzZ9OxU6fHmkVNnPQBh8IPMiPgE5SZmXh096R1mzaoVCqmTv9ER5FXPO1cXbl/PxlbW1sUCgV+/v6sCgrCw7OHjFUWopLy6N4dExNjAqZN4/bt24wcMRxbeztu37pFYOByrXPJPpz8Efv372P8uLGsCPocd3cPHUT+bG7uHpyOicHS0pIJEye98PPLBGchRIV0MzERa2tr7OztdR0KAHNmzyI6OorNwSG4uNR86rbnz5/no8kfMHPWbHx9/V5QhEII8fI7duQIc2fPQqPRYGhoyIrPV9KsefOn7nPixHHmzJ7F6tVr6dK1a5nHlJuby+HDh4iPjycr6yGWllY0bdoUd3ePCjFsUpIFIUSFc/bMGSZPmoiBgQGBK4Lo1LmzTuOJj4/Hu28f9n99gNq1a5don8iICBYuXMA3Z2IxMzMr5wiFEKLy2LhhPZs2bsQAsLS05LPFi2n2z6cnDLt372LXzlAio06WWTfk9PR0Nm5Yz759+7CxsaZVq9ZYWFiQqVQSFxdLQX4B/d99l1Gj3sfc3ByA6KgoQjZvoknTpkwPmPHc57yfnIxSqaTeK6+UyTWAJAtCiApoZ+gOPl9eOBm4bbt2rP5inU7j+fTTGSgzM1m4aHGJ9ykoKMDbuw+j3x+Dr5+8XRBCiLIy+v1RWFhY8POFC6SnpaFQKBg9bhw+T3mTm5ubi2d3dwKXLadz5y6ljiEtLY0hgwdhambKyPdG0c7VtVgSolarOXXqJJs3fUmVKlXYtDkEjVpNz+4eZGVlARB26HCJSobfvXOHqMhIoqMi+enCBdq1b8+qNWtLfQ1/0etk4dvz58nLy8O1fXtdhyKE0DOHw8O58N//MmjIEGrXqaOzOFJTU2nTuiWHjxyjRo0az7VvZEQEq1evJPqkfk+u03cxp05hbW1N8xb6X9VECFG+frt6ld69vTh5KgZTU1NWLAvk2JEjANjZ2zNz9pwn/q3YtWsnkRER7Nm7r1Qx/HLpEgMG+DNu3HgGDBz4zO3XrFlN2L8OsG//AdasXMn5c3EMHjqMUaNHa93+3r17REVGEBURyc8/XeDRr/Kmpqa0befKwiVLMDExKdV1/EVvB0qdPXOGDydOAGDR0qW8raeTToQQutHTy4ueXl66DoMHDx5gZGT03IkCgEvNmqRIFaRS+XdYGJ/Nm4uBgQEbNm2WhEGISu6PB39g72CPpaUlANMDZvBms2asCgoiNSWFKZM/xLf/uwwbMaJo6M9fatasSUrKg1LHsHbtGnr07FmiRAFg4sRJ3EhIICR4M8uCgtBoNBgZGRXb5k5SElGRkURGnODSxYvF1pmZmdG+Q0e6ve2Ga/sOj11XaeltspCZkVH07/S0dB1GIoSoKFYsC+TQwXB69Or54qoLFRT87fGtZTQstlJLTy8slVpQUEBmZqaOoxFC6Jq2ATPdPXvQuPE/mDdnNgnXr7N39y5ORkcxctT7uLm7F1WuM6D0f5Tv3Eni5Mlo9n994Ln2GzhoMJMmjmfKlKlYWVujzssjPv4y33/3HVGRkfz6yy/FtrewsKBDp050c3OjbTvXcp37prfJQvcePcjMzCQvT8U7vr66DkcIoecKCgo4sH8/KpWKvbt34+7RnTebNSv389o7OKBWq7l79y7Vq1d/rn1v3bqFnZ1+VHOqqAYOGgwFBdjaVtH5RHchhO45OjqSmpJCVlYWFhYWRcvr1K3L+i83sTUkmH179nA/OZnFCz8jdMd2unv2oJubG7du3yp1hb2wsDBatmxFnTp1n2u/Vxs2xMHBgYBPpqPKzeXSxYvk5uYW28bSyopOnTrR1e1t2rZrV2bDjJ5Fr+csCCHE8/jyzwoYDg4O7Ni954XV3B88eCBNm77J2LHjnmu/8ePG0qRJE6ZMnVZOkQkhROXTw9MDX18/fP36a11/IyGBjRvWcy42tthyYxNjWrVuw8BBg2nUuDE2NjbPfe7Zs2ZiZGTEx1OmFi3Ly8sjJeUBqSmpRT/v37/Pgwd/8Mf9+yQlJXEzMVHr8ezt7XFt34Fubm60fustjI2Nnzum0pJkQQjxUklLS0NhZISVtXXRMmVmJrGxsbzyyis0aNiwTM5z+ddfORR+kKZvNsPA0IDZs2Zy5OjxEv8hT0y8ga/PO0RGncTZ2blMYhJCCAE7Q0PZvv0r9u77+qnDRC9dvMjh8HBOx5zi4cOHj62vUcOZ2nXqYGdnh52dHVXs7DAzMyU3V4VKlYtKlYdKlUturoo8VeGyH3/8EQMDA2rUcCY1NYWUlBSUzzFE0sbWlm7d3HizWTPebNaMmrVq/a3PoCxJsiCEeOl9/OEHnI6JwUihYO26dbRs1bpUx0tLS6Nfb6+iMfJHTkQwbOhg6tSty6JFS57ZZCc1NZUxo0fx+htvEBi4vFSxCCGEKC4rKwsPdzc6dupEQMCnz9w+MTGR0aPeQ6NWo85Tl2tsBgYG2Nra4uDoiJOTE45OTjg5OfHqa43YsjWELl26MnLkqHKN4Xnp7ZwFIYQoKzl/jvvUqNVcvXK1WLKQnp7Or79c4rXXGj02VlWj0XAuLg4HR0caNWpUbF12djYALi4uWFhYsPWr7QwaOICAgOnMn78Ac3MLtElOvseE8eOoVas2n3226LmvRalUcuzIEX66cIFPPv202JhcIYQQhZN/d4TuZOAAfwoKYOrUaU9865uYeIPx48fRvkNHrl29yo2EBADeGzkKC0sL7t27R1pqKsnJySTfS8bQ0BAzczNMTEwxMTHGxNQUhZECpTITG1tbNGo1Z8+e5R0fHxwdHbG3d8De3h57h8KftlWqPFbpCArvDT/+8APz539Wrp/N3yFvFoQQL73UlBRCgjdjYGDI+IkTMTU1BQqb4gzo70fC9etYW1sTunsPNR4ZEjRl8mRiTp0EYMOmzbRo2bJo3U8XLhB/+TKdu3TBqWpVAO7fv8/I90Zw69bv9O7dh959+lK9ejXUag0JCQns3rWT6OgorCwtWb9hI61atynxNWRlZbE1JJhdoaHk5OQAcDo2rsxL5AkhxMsiMfEG740YjlL5kH79+tGjZy8cHBzIy1NxJf4Ke/fu4ZtvTuPr68fcefO5c+cO27/aio2NLaPHji36Uq9UKvHx7suDP/7AwdGRr8P+XVSaFWDooIH8cukSCoWC0N17mDBhHD4+fvj11z5nQpt1X6zl4qWLbNu2o8w/h9KSZEEIUWmpVCo6tmuLRqMBYOWatcWaQHp061rUB2HmnLn06dv3mce8mZjI9u3b+Prr/VhZWVGtWjU0Gg2///47hgaGZGdlYWxsTL26dQlatbpEDeVu377N2lWriIw4UbTMyMiIM3HnUOhgspsQQlQUV67Es23bNg7+OwxHRyccHR1QqfK4eTMRRycnBg0cRL93fLCysnriMe7euYNXD8+i33fu2UvDV18FCicvu7ZpXVSydfUX6zhz5hti42L58svNJXqgk56eztChgxk0aDDDhg0v5RWXPUkWhBCVWvCmL9m+bRsNGjRg7br1mD3yh/2rLSHs3LGDxv94nbnz51PFzq7Ex83KyuL8uXOkpKagMFJQrXo1Wrduw6HwgyxasKCo6U43NzcGDBrM62+8UWx/pVJJ7NmzxJw6ycmoKPLy8gCoX78+w98bSZdu3V5Y2TwhhKjoMjIy+Pbb86SnpWFsbIKziwstWrQocZ+cwCVLOHIonOYtWrAs6PNiQ4k+X7GcI4cP89ZbbZkxaxYatZphw4ZiamrKylWrn5owpKenM2b0+zhVdWLD+o2Y/PnmW59IsiCEEC/YubhYZgYEkJ7+v4aT9Rs0oGrVqlhaWpGensaPP/yAWv2/iXZ2dnaMGTeOPt79tI53FUIIoT+UmZkMGzYUlUrF8BEj6NKla7HiFyqViuPHjhESEkyt2rX0NlEASRaEEEIncrKzORQezq7QHdy8eVPrNoaGhjT7Z3M6d+lC7759i42RFUIIod+USiVfbd3Krl2hADRv0QJLC0uUSiXnz5/D2tqaAQMGMnjwEL1NFECSBSGE0KmCggLOnvmGny5cQJmp5OFDJQCt32pL+w4dsLW11XGEQgghSkOtVhMVGUn8lXiyHj7EysqKN5o0oWPHThgaGuo6vGeSZEEIIYQQQgihlf6nM0IIIYQQQgidkGRBCCGEEEIIoZUkC0IIIYQQQgitJFkQQgghhBBCaCXJghBCCCGEEEIrSRaEEEIIIYQQWkmyIIQQQgghhNBKkgUhhBBCCCGEVpIsCCGEEEIIIbSSZEEIIYQQQgihlSQLQgghhBBCCK0kWRBCCCGEEEJoJcmCEEIIIYQQQitJFoQQQgghhBBaSbIghBBCCCGE0EqSBSGEEEIIIYRWkiwIIYQQQgghtJJkQQghhBBCCKGVJAtCCCGEEEIIrSRZEEIIIYQQQmglyYIQQgghhBBCK0kWhBBCCCGEEFpJsiCEEEIIIYTQSpIFIYQQQgghhFaSLAghhBBCCCG0kmRBCCGEEEIIodX/AWLd1vfKs2fEAAAAAElFTkSuQmCC)", "_____no_output_____" ] ], [ [ "# Geliştirilen model, model kullanmadan da elde edilebilecek başarılı tahmininin üzerinde bir performans göstermelidir.", "_____no_output_____" ], [ "# Bir modelin \"iyileştirilmesi\" (daha başarılı tahmin yapabilmesi) için neler yapılabilir?\n# Problem-1: Öğrenme 'başlamıyor' olabilir. (loss value azalmıyor)\n# Problem-2: Öğrenme başlamış olmasına rağmen, tahmin performansı düşük kalabilir. (Rasgele bir tahmini bile geçemeyebilir!)\n# Problem-3: Overfit aşamasına geçilemeyebilir (Underfit aşaması uzun sürebilir.)", "_____no_output_____" ], [ "from tensorflow import keras\nfrom tensorflow.keras import layers", "_____no_output_____" ], [ "from keras.datasets import mnist\n(train_images, train_labels), _ = mnist.load_data()\n", "Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/mnist.npz\n11493376/11490434 [==============================] - 0s 0us/step\n11501568/11490434 [==============================] - 0s 0us/step\n" ], [ "train_images = train_images.reshape((60000, 28 * 28))\ntrain_images = train_images.astype(\"float32\")/255", "_____no_output_____" ], [ "model = keras.Sequential([\n layers.Dense(512, activation=\"relu\"),\n layers.Dense(10, activation=\"softmax\") \n])", "_____no_output_____" ], [ "model.compile(optimizer=keras.optimizers.RMSprop(1.), # Learning rate = 1. (yeterli olmayan bir öğrenme oranı)\n loss=\"sparse_categorical_crossentropy\",\n metrics=[\"accuracy\"])", "_____no_output_____" ], [ "model.fit(train_images, train_labels, epochs=10, batch_size=128, validation_split=0.20)", "Epoch 1/10\n375/375 [==============================] - 5s 11ms/step - loss: 1044.2358 - accuracy: 0.3697 - val_loss: 2.2206 - val_accuracy: 0.2186\nEpoch 2/10\n375/375 [==============================] - 4s 10ms/step - loss: 3.1049 - accuracy: 0.2192 - val_loss: 4.0936 - val_accuracy: 0.1776\nEpoch 3/10\n375/375 [==============================] - 4s 11ms/step - loss: 3.0352 - accuracy: 0.1980 - val_loss: 2.3684 - val_accuracy: 0.2104\nEpoch 4/10\n375/375 [==============================] - 4s 11ms/step - loss: 2.5307 - accuracy: 0.1900 - val_loss: 2.2047 - val_accuracy: 0.2016\nEpoch 5/10\n375/375 [==============================] - 4s 11ms/step - loss: 3.0833 - accuracy: 0.2071 - val_loss: 2.1490 - val_accuracy: 0.2368\nEpoch 6/10\n375/375 [==============================] - 4s 10ms/step - loss: 2.4417 - accuracy: 0.2181 - val_loss: 2.4385 - val_accuracy: 0.2333\nEpoch 7/10\n375/375 [==============================] - 4s 11ms/step - loss: 2.5927 - accuracy: 0.2367 - val_loss: 2.0063 - val_accuracy: 0.2506\nEpoch 8/10\n375/375 [==============================] - 4s 11ms/step - loss: 2.5220 - accuracy: 0.2581 - val_loss: 3.7151 - val_accuracy: 0.2786\nEpoch 9/10\n375/375 [==============================] - 4s 10ms/step - loss: 2.6994 - accuracy: 0.2180 - val_loss: 2.7880 - val_accuracy: 0.2843\nEpoch 10/10\n375/375 [==============================] - 4s 11ms/step - loss: 2.6781 - accuracy: 0.2452 - val_loss: 2.1869 - val_accuracy: 0.2524\n" ], [ "# Öğrenme başlamadı. Problem: optimizer'ın \"learning_rate\" parametresi doğru olarak seçilmediği için.", "_____no_output_____" ], [ "# Problemin çözümü: Optimize \"öğrenme oranı\" (learning rate) değiştir => 0.01\nmodel = keras.Sequential([\n layers.Dense(512, activation=\"relu\"),\n layers.Dense(10, activation=\"softmax\") \n])\n\nmodel.compile(optimizer=keras.optimizers.RMSprop(0.01), # Learning rate = 0.01 (daha uygun bir öğrenme oranı)\n loss=\"sparse_categorical_crossentropy\",\n metrics=[\"accuracy\"])\n\nmodel.fit(train_images, train_labels, epochs=10, batch_size=128, validation_split=0.20)", "Epoch 1/10\n375/375 [==============================] - 4s 10ms/step - loss: 0.3590 - accuracy: 0.9103 - val_loss: 0.1680 - val_accuracy: 0.9537\nEpoch 2/10\n375/375 [==============================] - 4s 10ms/step - loss: 0.1390 - accuracy: 0.9635 - val_loss: 0.1560 - val_accuracy: 0.9682\nEpoch 3/10\n375/375 [==============================] - 4s 11ms/step - loss: 0.1161 - accuracy: 0.9715 - val_loss: 0.1631 - val_accuracy: 0.9687\nEpoch 4/10\n375/375 [==============================] - 4s 10ms/step - loss: 0.0961 - accuracy: 0.9789 - val_loss: 0.1734 - val_accuracy: 0.9707\nEpoch 5/10\n375/375 [==============================] - 4s 9ms/step - loss: 0.0833 - accuracy: 0.9811 - val_loss: 0.2345 - val_accuracy: 0.9685\nEpoch 6/10\n375/375 [==============================] - 4s 11ms/step - loss: 0.0734 - accuracy: 0.9843 - val_loss: 0.2279 - val_accuracy: 0.9700\nEpoch 7/10\n375/375 [==============================] - 4s 11ms/step - loss: 0.0701 - accuracy: 0.9862 - val_loss: 0.2713 - val_accuracy: 0.9647\nEpoch 8/10\n375/375 [==============================] - 4s 10ms/step - loss: 0.0681 - accuracy: 0.9872 - val_loss: 0.2449 - val_accuracy: 0.9749\nEpoch 9/10\n375/375 [==============================] - 4s 10ms/step - loss: 0.0610 - accuracy: 0.9885 - val_loss: 0.2949 - val_accuracy: 0.9704\nEpoch 10/10\n375/375 [==============================] - 4s 11ms/step - loss: 0.0591 - accuracy: 0.9895 - val_loss: 0.2719 - val_accuracy: 0.9754\n" ], [ "# Learning rate 0.01 değeri ile öğrenme başladı. Doğruluk oranı (accuracy) arttı.", "_____no_output_____" ], [ "# Doğrulama seti doğruluk oranı problemi:\nmodel = keras.Sequential([\n layers.Dense(10, activation=\"softmax\") # Sadece output layer var (Bu hali ile bir \"Logistic Regression\" modelidir.) \n])\n\nmodel.compile(optimizer=\"rmsprop\", \n loss=\"sparse_categorical_crossentropy\",\n metrics=[\"accuracy\"])\n\nmodel.fit(train_images, train_labels, epochs=20, batch_size=128, validation_split=0.20)", "Epoch 1/20\n375/375 [==============================] - 1s 3ms/step - loss: 0.6615 - accuracy: 0.8393 - val_loss: 0.3595 - val_accuracy: 0.9021\nEpoch 2/20\n375/375 [==============================] - 1s 3ms/step - loss: 0.3512 - accuracy: 0.9036 - val_loss: 0.3075 - val_accuracy: 0.9141\nEpoch 3/20\n375/375 [==============================] - 1s 3ms/step - loss: 0.3157 - accuracy: 0.9122 - val_loss: 0.2897 - val_accuracy: 0.9203\nEpoch 4/20\n375/375 [==============================] - 1s 2ms/step - loss: 0.2996 - accuracy: 0.9169 - val_loss: 0.2813 - val_accuracy: 0.9215\nEpoch 5/20\n375/375 [==============================] - 1s 2ms/step - loss: 0.2897 - accuracy: 0.9190 - val_loss: 0.2766 - val_accuracy: 0.9233\nEpoch 6/20\n375/375 [==============================] - 1s 3ms/step - loss: 0.2834 - accuracy: 0.9207 - val_loss: 0.2720 - val_accuracy: 0.9267\nEpoch 7/20\n375/375 [==============================] - 1s 3ms/step - loss: 0.2783 - accuracy: 0.9213 - val_loss: 0.2697 - val_accuracy: 0.9262\nEpoch 8/20\n375/375 [==============================] - 1s 3ms/step - loss: 0.2745 - accuracy: 0.9235 - val_loss: 0.2670 - val_accuracy: 0.9271\nEpoch 9/20\n375/375 [==============================] - 1s 3ms/step - loss: 0.2716 - accuracy: 0.9249 - val_loss: 0.2664 - val_accuracy: 0.9273\nEpoch 10/20\n375/375 [==============================] - 1s 3ms/step - loss: 0.2687 - accuracy: 0.9259 - val_loss: 0.2644 - val_accuracy: 0.9285\nEpoch 11/20\n375/375 [==============================] - 1s 3ms/step - loss: 0.2667 - accuracy: 0.9257 - val_loss: 0.2641 - val_accuracy: 0.9290\nEpoch 12/20\n375/375 [==============================] - 1s 3ms/step - loss: 0.2649 - accuracy: 0.9268 - val_loss: 0.2619 - val_accuracy: 0.9303\nEpoch 13/20\n375/375 [==============================] - 1s 3ms/step - loss: 0.2629 - accuracy: 0.9277 - val_loss: 0.2638 - val_accuracy: 0.9279\nEpoch 14/20\n375/375 [==============================] - 1s 3ms/step - loss: 0.2618 - accuracy: 0.9281 - val_loss: 0.2628 - val_accuracy: 0.9294\nEpoch 15/20\n375/375 [==============================] - 1s 2ms/step - loss: 0.2608 - accuracy: 0.9287 - val_loss: 0.2627 - val_accuracy: 0.9293\nEpoch 16/20\n375/375 [==============================] - 1s 2ms/step - loss: 0.2593 - accuracy: 0.9289 - val_loss: 0.2628 - val_accuracy: 0.9298\nEpoch 17/20\n375/375 [==============================] - 1s 2ms/step - loss: 0.2583 - accuracy: 0.9298 - val_loss: 0.2616 - val_accuracy: 0.9298\nEpoch 18/20\n375/375 [==============================] - 1s 2ms/step - loss: 0.2575 - accuracy: 0.9294 - val_loss: 0.2600 - val_accuracy: 0.9308\nEpoch 19/20\n375/375 [==============================] - 1s 2ms/step - loss: 0.2565 - accuracy: 0.9300 - val_loss: 0.2611 - val_accuracy: 0.9306\nEpoch 20/20\n375/375 [==============================] - 1s 2ms/step - loss: 0.2556 - accuracy: 0.9301 - val_loss: 0.2606 - val_accuracy: 0.9309\n" ], [ "# Doğrulama seti doğruluk oranı problem çözümü:\nmodel = keras.Sequential([\n layers.Dense(96, activation=\"relu\"),\n layers.Dense(96, activation=\"relu\"),\n layers.Dense(10, activation=\"softmax\") \n])\n\nmodel.compile(optimizer=\"rmsprop\", \n loss=\"sparse_categorical_crossentropy\",\n metrics=[\"accuracy\"])\n\nmodel.fit(train_images, train_labels, epochs=50, batch_size=128, validation_split=0.20)", "Epoch 1/50\n375/375 [==============================] - 3s 6ms/step - loss: 0.3570 - accuracy: 0.8997 - val_loss: 0.1759 - val_accuracy: 0.9497\nEpoch 2/50\n375/375 [==============================] - 2s 5ms/step - loss: 0.1545 - accuracy: 0.9545 - val_loss: 0.1364 - val_accuracy: 0.9608\nEpoch 3/50\n375/375 [==============================] - 2s 5ms/step - loss: 0.1089 - accuracy: 0.9670 - val_loss: 0.1138 - val_accuracy: 0.9659\nEpoch 4/50\n375/375 [==============================] - 2s 4ms/step - loss: 0.0843 - accuracy: 0.9744 - val_loss: 0.1011 - val_accuracy: 0.9700\nEpoch 5/50\n375/375 [==============================] - 2s 5ms/step - loss: 0.0672 - accuracy: 0.9802 - val_loss: 0.1045 - val_accuracy: 0.9704\nEpoch 6/50\n375/375 [==============================] - 2s 5ms/step - loss: 0.0543 - accuracy: 0.9836 - val_loss: 0.0995 - val_accuracy: 0.9704\nEpoch 7/50\n375/375 [==============================] - 2s 4ms/step - loss: 0.0456 - accuracy: 0.9863 - val_loss: 0.1054 - val_accuracy: 0.9710\nEpoch 8/50\n375/375 [==============================] - 2s 4ms/step - loss: 0.0379 - accuracy: 0.9887 - val_loss: 0.0930 - val_accuracy: 0.9749\nEpoch 9/50\n375/375 [==============================] - 2s 5ms/step - loss: 0.0316 - accuracy: 0.9901 - val_loss: 0.0907 - val_accuracy: 0.9746\nEpoch 10/50\n375/375 [==============================] - 2s 5ms/step - loss: 0.0267 - accuracy: 0.9920 - val_loss: 0.0996 - val_accuracy: 0.9735\nEpoch 11/50\n375/375 [==============================] - 2s 5ms/step - loss: 0.0221 - accuracy: 0.9933 - val_loss: 0.1128 - val_accuracy: 0.9737\nEpoch 12/50\n375/375 [==============================] - 2s 4ms/step - loss: 0.0194 - accuracy: 0.9941 - val_loss: 0.1049 - val_accuracy: 0.9771\nEpoch 13/50\n375/375 [==============================] - 2s 5ms/step - loss: 0.0155 - accuracy: 0.9951 - val_loss: 0.1083 - val_accuracy: 0.9756\nEpoch 14/50\n375/375 [==============================] - 2s 5ms/step - loss: 0.0135 - accuracy: 0.9956 - val_loss: 0.1243 - val_accuracy: 0.9732\nEpoch 15/50\n375/375 [==============================] - 2s 5ms/step - loss: 0.0122 - accuracy: 0.9961 - val_loss: 0.1271 - val_accuracy: 0.9755\nEpoch 16/50\n375/375 [==============================] - 2s 4ms/step - loss: 0.0100 - accuracy: 0.9965 - val_loss: 0.1254 - val_accuracy: 0.9756\nEpoch 17/50\n375/375 [==============================] - 2s 5ms/step - loss: 0.0083 - accuracy: 0.9973 - val_loss: 0.1232 - val_accuracy: 0.9762\nEpoch 18/50\n375/375 [==============================] - 2s 5ms/step - loss: 0.0076 - accuracy: 0.9976 - val_loss: 0.1286 - val_accuracy: 0.9772\nEpoch 19/50\n375/375 [==============================] - 2s 5ms/step - loss: 0.0064 - accuracy: 0.9980 - val_loss: 0.1345 - val_accuracy: 0.9762\nEpoch 20/50\n375/375 [==============================] - 2s 5ms/step - loss: 0.0051 - accuracy: 0.9985 - val_loss: 0.1367 - val_accuracy: 0.9757\nEpoch 21/50\n375/375 [==============================] - 2s 4ms/step - loss: 0.0050 - accuracy: 0.9984 - val_loss: 0.1517 - val_accuracy: 0.9760\nEpoch 22/50\n375/375 [==============================] - 2s 5ms/step - loss: 0.0042 - accuracy: 0.9985 - val_loss: 0.1654 - val_accuracy: 0.9737\nEpoch 23/50\n375/375 [==============================] - 2s 5ms/step - loss: 0.0037 - accuracy: 0.9988 - val_loss: 0.1528 - val_accuracy: 0.9762\nEpoch 24/50\n375/375 [==============================] - 2s 5ms/step - loss: 0.0033 - accuracy: 0.9991 - val_loss: 0.1700 - val_accuracy: 0.9734\nEpoch 25/50\n375/375 [==============================] - 2s 5ms/step - loss: 0.0027 - accuracy: 0.9992 - val_loss: 0.1706 - val_accuracy: 0.9757\nEpoch 26/50\n375/375 [==============================] - 2s 5ms/step - loss: 0.0034 - accuracy: 0.9990 - val_loss: 0.1616 - val_accuracy: 0.9768\nEpoch 27/50\n375/375 [==============================] - 2s 5ms/step - loss: 0.0020 - accuracy: 0.9994 - val_loss: 0.1825 - val_accuracy: 0.9753\nEpoch 28/50\n375/375 [==============================] - 2s 5ms/step - loss: 0.0027 - accuracy: 0.9992 - val_loss: 0.1837 - val_accuracy: 0.9760\nEpoch 29/50\n375/375 [==============================] - 2s 5ms/step - loss: 0.0023 - accuracy: 0.9992 - val_loss: 0.1923 - val_accuracy: 0.9755\nEpoch 30/50\n375/375 [==============================] - 2s 5ms/step - loss: 0.0015 - accuracy: 0.9995 - val_loss: 0.1973 - val_accuracy: 0.9753\nEpoch 31/50\n375/375 [==============================] - 2s 5ms/step - loss: 0.0023 - accuracy: 0.9993 - val_loss: 0.1890 - val_accuracy: 0.9761\nEpoch 32/50\n375/375 [==============================] - 2s 5ms/step - loss: 0.0016 - accuracy: 0.9994 - val_loss: 0.1923 - val_accuracy: 0.9772\nEpoch 33/50\n375/375 [==============================] - 2s 5ms/step - loss: 0.0017 - accuracy: 0.9995 - val_loss: 0.1926 - val_accuracy: 0.9779\nEpoch 34/50\n375/375 [==============================] - 2s 5ms/step - loss: 0.0012 - accuracy: 0.9996 - val_loss: 0.2351 - val_accuracy: 0.9740\nEpoch 35/50\n375/375 [==============================] - 2s 5ms/step - loss: 0.0014 - accuracy: 0.9994 - val_loss: 0.2103 - val_accuracy: 0.9762\nEpoch 36/50\n375/375 [==============================] - 2s 5ms/step - loss: 0.0012 - accuracy: 0.9996 - val_loss: 0.2221 - val_accuracy: 0.9751\nEpoch 37/50\n375/375 [==============================] - 2s 4ms/step - loss: 0.0012 - accuracy: 0.9996 - val_loss: 0.2339 - val_accuracy: 0.9758\nEpoch 38/50\n375/375 [==============================] - 2s 4ms/step - loss: 0.0014 - accuracy: 0.9995 - val_loss: 0.2234 - val_accuracy: 0.9751\nEpoch 39/50\n375/375 [==============================] - 2s 5ms/step - loss: 0.0013 - accuracy: 0.9996 - val_loss: 0.2342 - val_accuracy: 0.9761\nEpoch 40/50\n375/375 [==============================] - 2s 4ms/step - loss: 9.9403e-04 - accuracy: 0.9997 - val_loss: 0.2329 - val_accuracy: 0.9768\nEpoch 41/50\n375/375 [==============================] - 2s 5ms/step - loss: 0.0012 - accuracy: 0.9996 - val_loss: 0.2327 - val_accuracy: 0.9760\nEpoch 42/50\n375/375 [==============================] - 2s 5ms/step - loss: 8.5756e-04 - accuracy: 0.9998 - val_loss: 0.2304 - val_accuracy: 0.9776\nEpoch 43/50\n375/375 [==============================] - 2s 5ms/step - loss: 5.9231e-04 - accuracy: 0.9999 - val_loss: 0.2564 - val_accuracy: 0.9760\nEpoch 44/50\n375/375 [==============================] - 2s 5ms/step - loss: 7.3821e-04 - accuracy: 0.9998 - val_loss: 0.2516 - val_accuracy: 0.9763\nEpoch 45/50\n375/375 [==============================] - 2s 5ms/step - loss: 0.0011 - accuracy: 0.9998 - val_loss: 0.2438 - val_accuracy: 0.9762\nEpoch 46/50\n375/375 [==============================] - 2s 5ms/step - loss: 8.1025e-04 - accuracy: 0.9997 - val_loss: 0.2461 - val_accuracy: 0.9771\nEpoch 47/50\n375/375 [==============================] - 2s 5ms/step - loss: 4.7317e-04 - accuracy: 0.9998 - val_loss: 0.2672 - val_accuracy: 0.9754\nEpoch 48/50\n375/375 [==============================] - 2s 5ms/step - loss: 0.0012 - accuracy: 0.9996 - val_loss: 0.2584 - val_accuracy: 0.9763\nEpoch 49/50\n375/375 [==============================] - 2s 5ms/step - loss: 9.4651e-04 - accuracy: 0.9997 - val_loss: 0.2520 - val_accuracy: 0.9767\nEpoch 50/50\n375/375 [==============================] - 2s 5ms/step - loss: 7.1669e-04 - accuracy: 0.9998 - val_loss: 0.2649 - val_accuracy: 0.9768\n" ], [ "# Early stopping\ncallback = keras.callbacks.EarlyStopping(monitor='loss', patience=3)\n\nmodel = keras.Sequential([\n layers.Dense(96, activation=\"relu\"),\n layers.Dense(96, activation=\"relu\"),\n layers.Dense(10, activation=\"softmax\") \n])\n\nmodel.compile(optimizer=\"rmsprop\", \n loss=\"sparse_categorical_crossentropy\",\n metrics=[\"accuracy\"])\n\nhistory = model.fit(train_images, train_labels, epochs=50, batch_size=128, validation_split=0.20, callbacks=[callback])", "Epoch 1/50\n375/375 [==============================] - 4s 8ms/step - loss: 0.3639 - accuracy: 0.8984 - val_loss: 0.1894 - val_accuracy: 0.9454\nEpoch 2/50\n375/375 [==============================] - 2s 5ms/step - loss: 0.1634 - accuracy: 0.9515 - val_loss: 0.1326 - val_accuracy: 0.9604\nEpoch 3/50\n375/375 [==============================] - 2s 5ms/step - loss: 0.1159 - accuracy: 0.9656 - val_loss: 0.1066 - val_accuracy: 0.9689\nEpoch 4/50\n375/375 [==============================] - 2s 5ms/step - loss: 0.0888 - accuracy: 0.9730 - val_loss: 0.1024 - val_accuracy: 0.9674\nEpoch 5/50\n375/375 [==============================] - 2s 5ms/step - loss: 0.0730 - accuracy: 0.9779 - val_loss: 0.0976 - val_accuracy: 0.9708\nEpoch 6/50\n375/375 [==============================] - 2s 4ms/step - loss: 0.0600 - accuracy: 0.9818 - val_loss: 0.0975 - val_accuracy: 0.9715\nEpoch 7/50\n375/375 [==============================] - 2s 5ms/step - loss: 0.0501 - accuracy: 0.9844 - val_loss: 0.1004 - val_accuracy: 0.9723\nEpoch 8/50\n375/375 [==============================] - 2s 5ms/step - loss: 0.0424 - accuracy: 0.9874 - val_loss: 0.0980 - val_accuracy: 0.9728\nEpoch 9/50\n375/375 [==============================] - 2s 5ms/step - loss: 0.0356 - accuracy: 0.9890 - val_loss: 0.1048 - val_accuracy: 0.9728\nEpoch 10/50\n375/375 [==============================] - 2s 5ms/step - loss: 0.0298 - accuracy: 0.9905 - val_loss: 0.1028 - val_accuracy: 0.9746\nEpoch 11/50\n375/375 [==============================] - 2s 5ms/step - loss: 0.0258 - accuracy: 0.9920 - val_loss: 0.1071 - val_accuracy: 0.9726\nEpoch 12/50\n375/375 [==============================] - 2s 4ms/step - loss: 0.0218 - accuracy: 0.9934 - val_loss: 0.1076 - val_accuracy: 0.9763\nEpoch 13/50\n375/375 [==============================] - 2s 5ms/step - loss: 0.0188 - accuracy: 0.9943 - val_loss: 0.1089 - val_accuracy: 0.9769\nEpoch 14/50\n375/375 [==============================] - 2s 5ms/step - loss: 0.0156 - accuracy: 0.9954 - val_loss: 0.1147 - val_accuracy: 0.9762\nEpoch 15/50\n375/375 [==============================] - 2s 5ms/step - loss: 0.0136 - accuracy: 0.9958 - val_loss: 0.1229 - val_accuracy: 0.9743\nEpoch 16/50\n375/375 [==============================] - 2s 5ms/step - loss: 0.0112 - accuracy: 0.9965 - val_loss: 0.1203 - val_accuracy: 0.9762\nEpoch 17/50\n375/375 [==============================] - 2s 5ms/step - loss: 0.0099 - accuracy: 0.9971 - val_loss: 0.1176 - val_accuracy: 0.9762\nEpoch 18/50\n375/375 [==============================] - 2s 5ms/step - loss: 0.0088 - accuracy: 0.9972 - val_loss: 0.1251 - val_accuracy: 0.9776\nEpoch 19/50\n375/375 [==============================] - 2s 5ms/step - loss: 0.0069 - accuracy: 0.9981 - val_loss: 0.1327 - val_accuracy: 0.9757\nEpoch 20/50\n375/375 [==============================] - 2s 5ms/step - loss: 0.0065 - accuracy: 0.9979 - val_loss: 0.1501 - val_accuracy: 0.9743\nEpoch 21/50\n375/375 [==============================] - 2s 5ms/step - loss: 0.0059 - accuracy: 0.9981 - val_loss: 0.1531 - val_accuracy: 0.9747\nEpoch 22/50\n375/375 [==============================] - 2s 5ms/step - loss: 0.0051 - accuracy: 0.9983 - val_loss: 0.1449 - val_accuracy: 0.9773\nEpoch 23/50\n375/375 [==============================] - 2s 5ms/step - loss: 0.0044 - accuracy: 0.9985 - val_loss: 0.1388 - val_accuracy: 0.9769\nEpoch 24/50\n375/375 [==============================] - 2s 4ms/step - loss: 0.0045 - accuracy: 0.9985 - val_loss: 0.1628 - val_accuracy: 0.9754\nEpoch 25/50\n375/375 [==============================] - 2s 5ms/step - loss: 0.0035 - accuracy: 0.9988 - val_loss: 0.1563 - val_accuracy: 0.9774\nEpoch 26/50\n375/375 [==============================] - 2s 5ms/step - loss: 0.0031 - accuracy: 0.9989 - val_loss: 0.1536 - val_accuracy: 0.9772\nEpoch 27/50\n375/375 [==============================] - 2s 5ms/step - loss: 0.0027 - accuracy: 0.9991 - val_loss: 0.1672 - val_accuracy: 0.9756\nEpoch 28/50\n375/375 [==============================] - 2s 4ms/step - loss: 0.0026 - accuracy: 0.9991 - val_loss: 0.1717 - val_accuracy: 0.9760\nEpoch 29/50\n375/375 [==============================] - 2s 5ms/step - loss: 0.0031 - accuracy: 0.9989 - val_loss: 0.1685 - val_accuracy: 0.9770\nEpoch 30/50\n375/375 [==============================] - 2s 5ms/step - loss: 0.0027 - accuracy: 0.9992 - val_loss: 0.1769 - val_accuracy: 0.9780\nEpoch 31/50\n375/375 [==============================] - 2s 5ms/step - loss: 0.0014 - accuracy: 0.9996 - val_loss: 0.1780 - val_accuracy: 0.9782\nEpoch 32/50\n375/375 [==============================] - 2s 5ms/step - loss: 0.0025 - accuracy: 0.9990 - val_loss: 0.1880 - val_accuracy: 0.9769\nEpoch 33/50\n375/375 [==============================] - 2s 5ms/step - loss: 0.0018 - accuracy: 0.9995 - val_loss: 0.1947 - val_accuracy: 0.9779\nEpoch 34/50\n375/375 [==============================] - 2s 5ms/step - loss: 0.0016 - accuracy: 0.9995 - val_loss: 0.2148 - val_accuracy: 0.9754\n" ], [ "len(history.history[\"loss\"]) # optimum model için epochs = 34", "_____no_output_____" ], [ "# IMDB veriseti\nfrom keras.datasets import imdb", "_____no_output_____" ], [ "(train_data, train_labels), _ = imdb.load_data(num_words=10000)", "Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/imdb.npz\n17465344/17464789 [==============================] - 0s 0us/step\n17473536/17464789 [==============================] - 0s 0us/step\n" ], [ "import numpy as np\n\ndef vectorize_sequences(sequences, dimension=10000):\n results = np.zeros((len(sequences), dimension))\n for i, sequence in enumerate(sequences):\n results[i, sequence] = 1.\n return results\n\ntrain_data = vectorize_sequences(train_data)", "_____no_output_____" ], [ "model = keras.Sequential([\n layers.Dense(16, activation=\"relu\"),\n layers.Dense(16, activation=\"relu\"),\n layers.Dense(1, activation=\"sigmoid\")\n])\n\nmodel.compile(optimizer=\"rmsprop\", loss=\"binary_crossentropy\", metrics=[\"accuracy\"])\n\nhistory_1 = model.fit(train_data, train_labels, epochs=20, batch_size=512, validation_split=0.4)", "Epoch 1/20\n30/30 [==============================] - 2s 50ms/step - loss: 0.9787 - accuracy: 0.7869 - val_loss: 0.7801 - val_accuracy: 0.8535\nEpoch 2/20\n30/30 [==============================] - 2s 61ms/step - loss: 0.7275 - accuracy: 0.8689 - val_loss: 0.6918 - val_accuracy: 0.8706\nEpoch 3/20\n30/30 [==============================] - 2s 73ms/step - loss: 0.6619 - accuracy: 0.8765 - val_loss: 0.6512 - val_accuracy: 0.8635\nEpoch 4/20\n30/30 [==============================] - 2s 59ms/step - loss: 0.6175 - accuracy: 0.8755 - val_loss: 0.6269 - val_accuracy: 0.8541\nEpoch 5/20\n30/30 [==============================] - 1s 37ms/step - loss: 0.5834 - accuracy: 0.8801 - val_loss: 0.5953 - val_accuracy: 0.8584\nEpoch 6/20\n30/30 [==============================] - 1s 43ms/step - loss: 0.5544 - accuracy: 0.8788 - val_loss: 0.5567 - val_accuracy: 0.8723\nEpoch 7/20\n30/30 [==============================] - 1s 44ms/step - loss: 0.5278 - accuracy: 0.8862 - val_loss: 0.5756 - val_accuracy: 0.8433\nEpoch 8/20\n30/30 [==============================] - 1s 36ms/step - loss: 0.5119 - accuracy: 0.8845 - val_loss: 0.5457 - val_accuracy: 0.8552\nEpoch 9/20\n30/30 [==============================] - 1s 40ms/step - loss: 0.4978 - accuracy: 0.8881 - val_loss: 0.5283 - val_accuracy: 0.8586\nEpoch 10/20\n30/30 [==============================] - 1s 44ms/step - loss: 0.4851 - accuracy: 0.8871 - val_loss: 0.5066 - val_accuracy: 0.8664\nEpoch 11/20\n30/30 [==============================] - 1s 44ms/step - loss: 0.4696 - accuracy: 0.8897 - val_loss: 0.6070 - val_accuracy: 0.7973\nEpoch 12/20\n30/30 [==============================] - 1s 43ms/step - loss: 0.4661 - accuracy: 0.8855 - val_loss: 0.5256 - val_accuracy: 0.8449\nEpoch 13/20\n30/30 [==============================] - 1s 43ms/step - loss: 0.4538 - accuracy: 0.8954 - val_loss: 0.4754 - val_accuracy: 0.8764\nEpoch 14/20\n30/30 [==============================] - 1s 43ms/step - loss: 0.4513 - accuracy: 0.8919 - val_loss: 0.4800 - val_accuracy: 0.8696\nEpoch 15/20\n30/30 [==============================] - 1s 44ms/step - loss: 0.4461 - accuracy: 0.8947 - val_loss: 0.4776 - val_accuracy: 0.8694\nEpoch 16/20\n30/30 [==============================] - 1s 44ms/step - loss: 0.4419 - accuracy: 0.8936 - val_loss: 0.4692 - val_accuracy: 0.8737\nEpoch 17/20\n30/30 [==============================] - 1s 44ms/step - loss: 0.4373 - accuracy: 0.8961 - val_loss: 0.4776 - val_accuracy: 0.8657\nEpoch 18/20\n30/30 [==============================] - 1s 36ms/step - loss: 0.4363 - accuracy: 0.8931 - val_loss: 0.4591 - val_accuracy: 0.8787\nEpoch 19/20\n30/30 [==============================] - 1s 34ms/step - loss: 0.4325 - accuracy: 0.8960 - val_loss: 0.4560 - val_accuracy: 0.8783\nEpoch 20/20\n30/30 [==============================] - 1s 43ms/step - loss: 0.4260 - accuracy: 0.8991 - val_loss: 0.5567 - val_accuracy: 0.8207\n" ], [ "# DAHA KÜÇÜK MODEL\nmodel = keras.Sequential([\n layers.Dense(4, activation=\"relu\"),\n layers.Dense(4, activation=\"relu\"),\n layers.Dense(1, activation=\"sigmoid\")\n])\n\nmodel.compile(optimizer=\"rmsprop\", loss=\"binary_crossentropy\", metrics=[\"accuracy\"])\n\nhistory_2 = model.fit(train_data, train_labels, epochs=20, batch_size=512, validation_split=0.4)", "Epoch 1/20\n30/30 [==============================] - 3s 68ms/step - loss: 0.6007 - accuracy: 0.7338 - val_loss: 0.5037 - val_accuracy: 0.8325\nEpoch 2/20\n30/30 [==============================] - 1s 38ms/step - loss: 0.4369 - accuracy: 0.8855 - val_loss: 0.4154 - val_accuracy: 0.8799\nEpoch 3/20\n30/30 [==============================] - 1s 32ms/step - loss: 0.3438 - accuracy: 0.9084 - val_loss: 0.3463 - val_accuracy: 0.8807\nEpoch 4/20\n30/30 [==============================] - 1s 31ms/step - loss: 0.2731 - accuracy: 0.9229 - val_loss: 0.3078 - val_accuracy: 0.8902\nEpoch 5/20\n30/30 [==============================] - 1s 31ms/step - loss: 0.2255 - accuracy: 0.9349 - val_loss: 0.2870 - val_accuracy: 0.8903\nEpoch 6/20\n30/30 [==============================] - 1s 30ms/step - loss: 0.1923 - accuracy: 0.9456 - val_loss: 0.2783 - val_accuracy: 0.8921\nEpoch 7/20\n30/30 [==============================] - 1s 31ms/step - loss: 0.1684 - accuracy: 0.9541 - val_loss: 0.2838 - val_accuracy: 0.8861\nEpoch 8/20\n30/30 [==============================] - 1s 40ms/step - loss: 0.1479 - accuracy: 0.9602 - val_loss: 0.2758 - val_accuracy: 0.8901\nEpoch 9/20\n30/30 [==============================] - 1s 41ms/step - loss: 0.1316 - accuracy: 0.9633 - val_loss: 0.2868 - val_accuracy: 0.8860\nEpoch 10/20\n30/30 [==============================] - 1s 38ms/step - loss: 0.1167 - accuracy: 0.9691 - val_loss: 0.2848 - val_accuracy: 0.8875\nEpoch 11/20\n30/30 [==============================] - 1s 37ms/step - loss: 0.1047 - accuracy: 0.9718 - val_loss: 0.2933 - val_accuracy: 0.8862\nEpoch 12/20\n30/30 [==============================] - 1s 41ms/step - loss: 0.0934 - accuracy: 0.9760 - val_loss: 0.3035 - val_accuracy: 0.8847\nEpoch 13/20\n30/30 [==============================] - 1s 42ms/step - loss: 0.0841 - accuracy: 0.9783 - val_loss: 0.3157 - val_accuracy: 0.8825\nEpoch 14/20\n30/30 [==============================] - 1s 41ms/step - loss: 0.0751 - accuracy: 0.9821 - val_loss: 0.3283 - val_accuracy: 0.8805\nEpoch 15/20\n30/30 [==============================] - 1s 41ms/step - loss: 0.0670 - accuracy: 0.9848 - val_loss: 0.3608 - val_accuracy: 0.8764\nEpoch 16/20\n30/30 [==============================] - 1s 37ms/step - loss: 0.0601 - accuracy: 0.9874 - val_loss: 0.3547 - val_accuracy: 0.8810\nEpoch 17/20\n30/30 [==============================] - 1s 35ms/step - loss: 0.0539 - accuracy: 0.9884 - val_loss: 0.3714 - val_accuracy: 0.8783\nEpoch 18/20\n30/30 [==============================] - 1s 42ms/step - loss: 0.0483 - accuracy: 0.9905 - val_loss: 0.3853 - val_accuracy: 0.8776\nEpoch 19/20\n30/30 [==============================] - 1s 41ms/step - loss: 0.0427 - accuracy: 0.9919 - val_loss: 0.4009 - val_accuracy: 0.8764\nEpoch 20/20\n30/30 [==============================] - 1s 40ms/step - loss: 0.0386 - accuracy: 0.9925 - val_loss: 0.4237 - val_accuracy: 0.8732\n" ], [ "# DAHA BÜYÜK MODEL\nmodel = keras.Sequential([\n layers.Dense(256, activation=\"relu\"),\n layers.Dense(256, activation=\"relu\"),\n layers.Dense(1, activation=\"sigmoid\")\n])\n\nmodel.compile(optimizer=\"rmsprop\", loss=\"binary_crossentropy\", metrics=[\"accuracy\"])\n\nhistory_3 = model.fit(train_data, train_labels, epochs=20, batch_size=512, validation_split=0.4)", "Epoch 1/20\n30/30 [==============================] - 6s 167ms/step - loss: 0.5125 - accuracy: 0.7546 - val_loss: 0.3363 - val_accuracy: 0.8650\nEpoch 2/20\n30/30 [==============================] - 5s 158ms/step - loss: 0.2648 - accuracy: 0.8985 - val_loss: 0.2683 - val_accuracy: 0.8927\nEpoch 3/20\n30/30 [==============================] - 4s 149ms/step - loss: 0.1640 - accuracy: 0.9397 - val_loss: 0.2913 - val_accuracy: 0.8901\nEpoch 4/20\n30/30 [==============================] - 4s 147ms/step - loss: 0.1089 - accuracy: 0.9631 - val_loss: 0.3541 - val_accuracy: 0.8832\nEpoch 5/20\n30/30 [==============================] - 5s 152ms/step - loss: 0.1106 - accuracy: 0.9734 - val_loss: 0.3386 - val_accuracy: 0.8874\nEpoch 6/20\n30/30 [==============================] - 5s 154ms/step - loss: 0.0121 - accuracy: 0.9978 - val_loss: 0.4641 - val_accuracy: 0.8852\nEpoch 7/20\n30/30 [==============================] - 6s 190ms/step - loss: 0.1057 - accuracy: 0.9792 - val_loss: 0.4905 - val_accuracy: 0.8684\nEpoch 8/20\n30/30 [==============================] - 5s 166ms/step - loss: 0.0042 - accuracy: 0.9997 - val_loss: 0.5192 - val_accuracy: 0.8844\nEpoch 9/20\n30/30 [==============================] - 4s 145ms/step - loss: 0.0011 - accuracy: 0.9999 - val_loss: 0.6224 - val_accuracy: 0.8858\nEpoch 10/20\n30/30 [==============================] - 4s 149ms/step - loss: 3.0210e-04 - accuracy: 1.0000 - val_loss: 0.7349 - val_accuracy: 0.8842\nEpoch 11/20\n30/30 [==============================] - 4s 147ms/step - loss: 7.0991e-05 - accuracy: 1.0000 - val_loss: 0.8567 - val_accuracy: 0.8813\nEpoch 12/20\n30/30 [==============================] - 4s 148ms/step - loss: 1.6020e-05 - accuracy: 1.0000 - val_loss: 0.9707 - val_accuracy: 0.8813\nEpoch 13/20\n30/30 [==============================] - 4s 146ms/step - loss: 3.4263e-06 - accuracy: 1.0000 - val_loss: 1.2073 - val_accuracy: 0.8692\nEpoch 14/20\n30/30 [==============================] - 4s 149ms/step - loss: 0.2336 - accuracy: 0.9795 - val_loss: 0.8614 - val_accuracy: 0.8770\nEpoch 15/20\n30/30 [==============================] - 5s 155ms/step - loss: 5.5212e-05 - accuracy: 1.0000 - val_loss: 0.8664 - val_accuracy: 0.8787\nEpoch 16/20\n30/30 [==============================] - 4s 145ms/step - loss: 2.1771e-05 - accuracy: 1.0000 - val_loss: 0.8868 - val_accuracy: 0.8793\nEpoch 17/20\n30/30 [==============================] - 5s 156ms/step - loss: 9.2390e-06 - accuracy: 1.0000 - val_loss: 0.9198 - val_accuracy: 0.8798\nEpoch 18/20\n30/30 [==============================] - 4s 145ms/step - loss: 4.3320e-06 - accuracy: 1.0000 - val_loss: 0.9720 - val_accuracy: 0.8811\nEpoch 19/20\n30/30 [==============================] - 4s 146ms/step - loss: 1.9279e-06 - accuracy: 1.0000 - val_loss: 1.0331 - val_accuracy: 0.8816\nEpoch 20/20\n30/30 [==============================] - 4s 148ms/step - loss: 7.9249e-07 - accuracy: 1.0000 - val_loss: 1.1243 - val_accuracy: 0.8821\n" ], [ "val_loss_1 = history_1.history[\"val_loss\"]\nval_loss_2 = history_2.history[\"val_loss\"]\nval_loss_3 = history_3.history[\"val_loss\"]", "_____no_output_____" ], [ "import matplotlib.pyplot as plt", "_____no_output_____" ], [ "epochs = range(1, len(val_loss_1) + 1)\n\nplt.plot(epochs,val_loss_1, \"b\", label=\"Normal Model val_loss\")\nplt.plot(epochs,val_loss_2, \"b-.\", label=\"Küçük Model val_loss\")\nplt.plot(epochs,val_loss_3, \"b.\", label=\"Büyük Model val_loss\")\nplt.title(\"Model büyüklüğü ve Doğrulama Kaybı\")\nplt.xlabel(\"Epochs\")\nplt.ylabel(\"Kayıp (loss)\")\nplt.legend()\nplt.show()", "_____no_output_____" ], [ "# REGULASYON\nfrom tensorflow.keras import regularizers", "_____no_output_____" ], [ "model = keras.Sequential([\n layers.Dense(16, kernel_regularizer=regularizers.l2(0.02), activation=\"relu\"),\n layers.Dense(16, kernel_regularizer=regularizers.l2(0.02), activation=\"relu\"),\n layers.Dense(1, activation=\"sigmoid\")\n])\n\nmodel.compile(optimizer=\"rmsprop\", loss=\"binary_crossentropy\", metrics=[\"accuracy\"])\n\nreg_L2 = model.fit(train_data, train_labels, epochs=20, batch_size=512, validation_split=0.4)", "_____no_output_____" ], [ "val_loss_1 = history_1.history[\"val_loss\"]\nval_loss_regL2 = reg_L2.history[\"val_loss\"]", "_____no_output_____" ], [ "epochs = range(1, len(val_loss_1) + 1)\n\nplt.plot(epochs,val_loss_1, \"b.\", label=\"Normal Model val_loss\")\nplt.plot(epochs,val_loss_regL2, \"b-\", label=\"Reg_L2 val_loss\")\nplt.title(\"Model büyüklüğü ve Doğrulama Kaybı\")\nplt.xlabel(\"Epochs\")\nplt.ylabel(\"Kayıp (loss)\")\nplt.legend()\nplt.show()", "_____no_output_____" ], [ "# DROPOUT\nmodel = keras.Sequential([\n layers.Dense(16, activation=\"relu\"),\n layers.Dropout(0.5),\n layers.Dense(16, activation=\"relu\"),\n layers.Dropout(0.5),\n layers.Dense(1, activation=\"sigmoid\")\n])\n\nmodel.compile(optimizer=\"rmsprop\", loss=\"binary_crossentropy\", metrics=[\"accuracy\"])\n\nhist_dropout = model.fit(train_data, train_labels, epochs=20, batch_size=512, validation_split=0.4)", "Epoch 1/20\n30/30 [==============================] - 3s 80ms/step - loss: 0.6181 - accuracy: 0.6556 - val_loss: 0.5002 - val_accuracy: 0.8438\nEpoch 2/20\n30/30 [==============================] - 2s 64ms/step - loss: 0.4978 - accuracy: 0.7817 - val_loss: 0.3970 - val_accuracy: 0.8623\nEpoch 3/20\n30/30 [==============================] - 1s 44ms/step - loss: 0.4124 - accuracy: 0.8381 - val_loss: 0.3388 - val_accuracy: 0.8731\nEpoch 4/20\n30/30 [==============================] - 1s 45ms/step - loss: 0.3507 - accuracy: 0.8735 - val_loss: 0.3128 - val_accuracy: 0.8777\nEpoch 5/20\n30/30 [==============================] - 1s 45ms/step - loss: 0.3021 - accuracy: 0.8961 - val_loss: 0.2917 - val_accuracy: 0.8847\nEpoch 6/20\n30/30 [==============================] - 1s 40ms/step - loss: 0.2608 - accuracy: 0.9118 - val_loss: 0.2718 - val_accuracy: 0.8922\nEpoch 7/20\n30/30 [==============================] - 1s 41ms/step - loss: 0.2295 - accuracy: 0.9257 - val_loss: 0.2740 - val_accuracy: 0.8936\nEpoch 8/20\n30/30 [==============================] - 1s 46ms/step - loss: 0.1997 - accuracy: 0.9384 - val_loss: 0.2960 - val_accuracy: 0.8886\nEpoch 9/20\n30/30 [==============================] - 1s 45ms/step - loss: 0.1799 - accuracy: 0.9441 - val_loss: 0.2990 - val_accuracy: 0.8920\nEpoch 10/20\n30/30 [==============================] - 1s 44ms/step - loss: 0.1602 - accuracy: 0.9495 - val_loss: 0.3137 - val_accuracy: 0.8921\nEpoch 11/20\n30/30 [==============================] - 1s 44ms/step - loss: 0.1420 - accuracy: 0.9556 - val_loss: 0.3378 - val_accuracy: 0.8926\nEpoch 12/20\n30/30 [==============================] - 1s 35ms/step - loss: 0.1360 - accuracy: 0.9593 - val_loss: 0.3559 - val_accuracy: 0.8918\nEpoch 13/20\n30/30 [==============================] - 1s 38ms/step - loss: 0.1160 - accuracy: 0.9634 - val_loss: 0.3939 - val_accuracy: 0.8906\nEpoch 14/20\n30/30 [==============================] - 1s 45ms/step - loss: 0.1073 - accuracy: 0.9676 - val_loss: 0.4079 - val_accuracy: 0.8896\nEpoch 15/20\n30/30 [==============================] - 1s 42ms/step - loss: 0.1009 - accuracy: 0.9696 - val_loss: 0.4544 - val_accuracy: 0.8880\nEpoch 16/20\n30/30 [==============================] - 1s 35ms/step - loss: 0.0948 - accuracy: 0.9717 - val_loss: 0.4746 - val_accuracy: 0.8880\nEpoch 17/20\n30/30 [==============================] - 1s 35ms/step - loss: 0.0903 - accuracy: 0.9731 - val_loss: 0.5212 - val_accuracy: 0.8812\nEpoch 18/20\n30/30 [==============================] - 1s 43ms/step - loss: 0.0787 - accuracy: 0.9756 - val_loss: 0.5118 - val_accuracy: 0.8887\nEpoch 19/20\n30/30 [==============================] - 1s 35ms/step - loss: 0.0781 - accuracy: 0.9760 - val_loss: 0.5225 - val_accuracy: 0.8870\nEpoch 20/20\n30/30 [==============================] - 1s 34ms/step - loss: 0.0761 - accuracy: 0.9779 - val_loss: 0.5507 - val_accuracy: 0.8853\n" ], [ "val_loss_1 = history_1.history[\"val_loss\"]\nval_loss_droput = hist_dropout.history[\"val_loss\"]\n\nepochs = range(1, len(val_loss_1) + 1)\nplt.plot(epochs,val_loss_1, \"b-.\", label=\"Normal Model val_loss\")\nplt.plot(epochs,val_loss_droput, \"b-\", label=\"Dropout ile val_loss\")\nplt.title(\"Model büyüklüğü ve Doğrulama Kaybı\")\nplt.xlabel(\"Epochs\")\nplt.ylabel(\"Kayıp (loss)\")\nplt.legend()\nplt.show()", "_____no_output_____" ], [ "# MAKİNE ÖĞRENMESİNİN GENEL İŞ AKIŞI\n# ! veri seti ile değil, problem tanımı ile başlanmalıdır.\n# 1. Yapılacak işi tanımla (model ne tür bir iş yapacak?)\n# 2. Verisetinin toplanması (Modelin eğitim ve testi için)\n# 3. Verinin anlaşılması (kullanılacak verinin özellikleri, yeterince temsil gücünün olup olmadığı, vb)\n# 4. Başarı ölçünütün belirlenmesi (Model başarısını nasıl ölçeceğiz?)\n# 5. Verinin hazırlanması (Normalizasyon, vektörizasyon, eksik verilerin nasıl işleneceği (atılması veya yerlerine uygun değer konulması gibi))\n# 6. Bir değerlendirme protokolünün seçilmesi (doğrulama seti veya K-fold cross-validation?)\n# 7. Model başarısı için minimum bir kriter belirlenmeli (Model hangi başarılı tahmin değerini geçerse başarılı kabul edilecek)\n# 8. Overfit'e ulaşan bir model geliştirilmeli (Overfit'e ulaşmayan bir model, hala underfit aşamasında olabilir. Optimum model, overfit'in başladığı noktadır.)\n# ! Model overfit için (a) ek katman eklenebilir, (b) katmanlar büyütülebilir (node sayıları arttırılabilir), (c) Daha büyük epochs değerleri ile model fit edilebilir.\n# 9. Regulasyon ile modelin tune edilmesi\n# 10. Modelin test edilmesi\n# 11. Modelin deploy edilmesi (kullanıma açılması)\n# 12. Deploy edilen modelin takip edilmesi\n# 13. Modelin bakımı (modelin geliştirildiği eğitim veri ve test setleri güncelliğini yitirmiş olabilir.)", "_____no_output_____" ], [ "# KERAS MODELİ OLUŞTURMAK İÇİN FARKLI BİR YÖNTEMLER", "_____no_output_____" ], [ "# Seçenek-1:\nmodel1 = keras.Sequential([\n layers.Dense(64, activation=\"relu\"),\n layers.Dense(32, activation=\"relu\"),\n layers.Dense(10, activation=\"softmax\")\n])", "_____no_output_____" ], [ "# Seçenek-2 (katman eklemeli - incremental)\nmodel2 = keras.Sequential()\nmodel2.add(layers.Dense(64,activation=\"relu\"))\nmodel2.add(layers.Dense(32,activation=\"relu\"))\nmodel2.add(layers.Dense(10,activation=\"softmax\"))", "_____no_output_____" ], [ "# Yukarıdaki model1 ve model2 tamamen aynıdır.\nmodel1.build(input_shape=(None,3))\nmodel2.build(input_shape=(None,3))", "_____no_output_____" ], [ "model1.summary() # bir modelin özel bilgisi", "Model: \"sequential_12\"\n_________________________________________________________________\n Layer (type) Output Shape Param # \n=================================================================\n dense_35 (Dense) (None, 64) 256 \n \n dense_36 (Dense) (None, 32) 2080 \n \n dense_37 (Dense) (None, 10) 330 \n \n=================================================================\nTotal params: 2,666\nTrainable params: 2,666\nNon-trainable params: 0\n_________________________________________________________________\n" ], [ "model2.summary()", "Model: \"sequential_14\"\n_________________________________________________________________\n Layer (type) Output Shape Param # \n=================================================================\n dense_41 (Dense) (None, 64) 256 \n \n dense_42 (Dense) (None, 32) 2080 \n \n dense_43 (Dense) (None, 10) 330 \n \n=================================================================\nTotal params: 2,666\nTrainable params: 2,666\nNon-trainable params: 0\n_________________________________________________________________\n" ], [ "model1.weights # bir modelin ağırlık parametrelerini listelemek için", "_____no_output_____" ], [ "# TENSORBOARD\n# ", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb637058b1f74b5b4fe01427e31103b49b3e5398
4,440
ipynb
Jupyter Notebook
Day 3 Assignment 1 & 2 .ipynb
CresiDaniel/LetsUpgrade-Python-B7
6f17a7894613359123387d7d80d19e42425a4e15
[ "Apache-2.0" ]
null
null
null
Day 3 Assignment 1 & 2 .ipynb
CresiDaniel/LetsUpgrade-Python-B7
6f17a7894613359123387d7d80d19e42425a4e15
[ "Apache-2.0" ]
null
null
null
Day 3 Assignment 1 & 2 .ipynb
CresiDaniel/LetsUpgrade-Python-B7
6f17a7894613359123387d7d80d19e42425a4e15
[ "Apache-2.0" ]
null
null
null
17.276265
42
0.385586
[ [ [ "## Assignment 1", "_____no_output_____" ] ], [ [ "#Checking Altittude Preogram", "_____no_output_____" ] ], [ [ "num = input(\"Enter Altittude-\")\nnum = int(num)\nif num <= 1000:\n print(\"Safe to Land Plane\")\nelif num > 1001 and num <= 5000:\n print(\"Bring down to 1000\")\nelif num > 5000:\n print(\"Turn Around\")", "Enter Altittude-1000\nSafe to Land Plane\n" ], [ "num = input(\"Enter Altittude-\")\nnum = int(num)\nif num <= 1000:\n print(\"Safe to Land Plane\")\nelif num > 1001 and num <= 5000:\n print(\"Bring down to 1000\")\nelif num > 5000:\n print(\"Turn Around\")", "Enter Altittude-2000\nBring down to 1000\n" ], [ "num = input(\"Enter Altittude-\")\nnum = int(num)\nif num <= 1000:\n print(\"Safe to Land Plane\")\nelif num > 1001 and num <= 5000:\n print(\"Bring down to 1000\")\nelif num > 5000:\n print(\"Turn Around\")", "Enter Altittude-6000\nTurn Around\n" ] ], [ [ "# Assignment 2", "_____no_output_____" ] ], [ [ "#Prime Numbers Program", "_____no_output_____" ] ], [ [ "for num in range(1, 201):\n for i in range(2, num):\n if num % i == 0:\n break\n else:\n print(num)\n break", "3\n5\n7\n9\n11\n13\n15\n17\n19\n21\n23\n25\n27\n29\n31\n33\n35\n37\n39\n41\n43\n45\n47\n49\n51\n53\n55\n57\n59\n61\n63\n65\n67\n69\n71\n73\n75\n77\n79\n81\n83\n85\n87\n89\n91\n93\n95\n97\n99\n101\n103\n105\n107\n109\n111\n113\n115\n117\n119\n121\n123\n125\n127\n129\n131\n133\n135\n137\n139\n141\n143\n145\n147\n149\n151\n153\n155\n157\n159\n161\n163\n165\n167\n169\n171\n173\n175\n177\n179\n181\n183\n185\n187\n189\n191\n193\n195\n197\n199\n" ] ] ]
[ "markdown", "raw", "code", "markdown", "raw", "code" ]
[ [ "markdown" ], [ "raw" ], [ "code", "code", "code" ], [ "markdown" ], [ "raw" ], [ "code" ] ]
cb6376201fe7f936cb7db8693d21cae5402fbeed
24,308
ipynb
Jupyter Notebook
src/1_create_scenarios/create-scenarios.ipynb
akxen/wholesale-price-targeting
dd47ddbbc69fd09f17ebd09396299faeb5c90c31
[ "MIT" ]
null
null
null
src/1_create_scenarios/create-scenarios.ipynb
akxen/wholesale-price-targeting
dd47ddbbc69fd09f17ebd09396299faeb5c90c31
[ "MIT" ]
null
null
null
src/1_create_scenarios/create-scenarios.ipynb
akxen/wholesale-price-targeting
dd47ddbbc69fd09f17ebd09396299faeb5c90c31
[ "MIT" ]
null
null
null
42.720562
444
0.59935
[ [ [ "# Scenario Construction\nDemand and dispatch data are obtained from the Australian Energy Market Operator's (AEMO's) Market Management System Database Model (MMSDM) [1], and a k-means clustering algorithm is implemented using the method outlined in [2] to create a reduced set of representative operating scenarios. The dataset described in [3,4] is used to identify specific generators, and assign historic power injections or withdrawals to individual nodes.\n\nIn this analysis data for 2017 is considered, with demand and dispatch time series re-sampled to 30min intervals, corresponding to the length of a trading interval within Australia's National Electricity Market (NEM). Using these data a reduced set of 48 operating conditions are constructed. These operating scenarios are comprised on demand, intermittent renewable injections, and fixed power injections from hydro generators.\n\n## Import packages", "_____no_output_____" ] ], [ [ "import os\nimport math\nimport pickle\nimport random\nimport zipfile\nfrom io import BytesIO\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n# Initialise random number generator\nrandom.seed(1)\n\n# Used to slice pandas DataFrames\nidx = pd.IndexSlice", "_____no_output_____" ] ], [ [ "## Paths to files", "_____no_output_____" ] ], [ [ "# Contains network information and generator parameters\ndata_dir = os.path.join(os.path.curdir, os.path.pardir, os.path.pardir, 'data')\n\n# MMSDM historic demand and dispatch signals\narchive_dir = r'D:\\nemweb\\Reports\\Data_Archive\\MMSDM\\zipped'\n\n# Location for output files\noutput_dir = os.path.join(os.path.curdir, 'output')", "_____no_output_____" ] ], [ [ "## Import generator and network information", "_____no_output_____" ] ], [ [ "# Generator information\ndf_g = pd.read_csv(os.path.join(data_dir, 'generators.csv'), index_col='DUID', dtype={'NODE': int})\n\n# Network node information\ndf_n = pd.read_csv(os.path.join(data_dir, 'network_nodes.csv'), index_col='NODE_ID')", "_____no_output_____" ] ], [ [ "## Extract data\nFunctions used to extract data from MMSDM tables.", "_____no_output_____" ] ], [ [ "def dispatch_unit_scada(file):\n \"\"\"Extract generator dispatch data\n \n Params\n ------\n file : bytes IO object\n Zipped CSV file of given MMSDM table\n \n Returns\n -------\n df : pandas DataFrame\n MMSDM table in formatted pandas DataFrame\n \"\"\"\n \n # Columns to extract \n cols = ['DUID', 'SCADAVALUE', 'SETTLEMENTDATE']\n\n # Read in data\n df = pd.read_csv(file, usecols=cols, parse_dates=['SETTLEMENTDATE'], skiprows=1)\n\n # Drop rows without DUIDs, apply pivot\n df = df.dropna(subset=['DUID']).pivot(index='SETTLEMENTDATE', columns='DUID', values='SCADAVALUE')\n \n return df\n\n\ndef tradingregionsum(file):\n \"\"\"Extract half-hourly load data for each NEM region\n \n Params\n ------\n file : bytes IO object\n Zipped CSV file of given MMSDM table\n \n Returns\n -------\n df : pandas DataFrame\n MMSDM table in formatted pandas DataFrame\n \"\"\"\n\n # Columns to extract \n cols = ['REGIONID', 'TOTALDEMAND', 'SETTLEMENTDATE']\n\n # Read in data\n df = pd.read_csv(file, usecols=cols, parse_dates=['SETTLEMENTDATE'], skiprows=1)\n\n # Drop rows without DUIDs, apply pivot\n df = df.dropna(subset=['REGIONID']).pivot(index='SETTLEMENTDATE', columns='REGIONID', values='TOTALDEMAND')\n\n return df \n\n\ndef get_data(archive_path, table_name, extractor_function):\n \"\"\"Open CSV archive and extract data from zipped file\n \n Parameters\n ----------\n archive_path : str\n Path to MMSDM archive containing data for given year\n \n table_name : str\n Name of table in MMSDM archive from which data is to be extracted\n \n extractor_function : func\n Function that takes a bytes object of the unzipped table and returns a formatted DataFrame\n \n Returns\n -------\n df : pandas DataFrame\n Formatted DataFrame of the desired MMSDM table \n \"\"\"\n\n # Open MMSDM archive for a given year\n with zipfile.ZipFile(archive_path) as myzip:\n \n # All files of a particular type in archive (e.g. dispatch, quantity bids, price bands, load)\n zip_names = [f for f in myzip.filelist if (table_name in f.filename) and ('.zip' in f.filename)]\n\n # Check that only one zip file is returned, else raise exception\n if len(zip_names) != 1:\n raise Exception('Encounted {0} files in archive, should only encounter 1'.format(len(zip_names)))\n\n # Get name of csv in zipped folder\n csv_name = zip_names[0].filename.replace('.zip', '.CSV').split('/')[-1]\n\n # Convert zip files to BytesIO object\n zip_data = BytesIO(myzip.read(zip_names[0]))\n\n # Open inner zipfile and extract data using supplied function\n with zipfile.ZipFile(zip_data) as z:\n with z.open(csv_name) as f:\n df = extractor_function(f) \n return df\n\n# Historic demand and dispatch data\ndemand = []\ndispatch = []\n\nfor i in range(1, 13):\n # Archive name and path from which data will be extracted\n archive_name = 'MMSDM_2017_{0:02}.zip'.format(i)\n archive_path = os.path.join(archive_dir, archive_name)\n\n # Extract data\n dispatch.append(get_data(archive_path, 'DISPATCH_UNIT_SCADA', dispatch_unit_scada))\n demand.append(get_data(archive_path, 'TRADINGREGIONSUM', tradingregionsum))\n\n# Concatenate data from individual months into single DataFrames for load and dispatch\ndf_demand = pd.concat(demand, sort=True) # Demand\ndf_dispatch = pd.concat(dispatch, sort=True) # Dispatch\n\n# Fill missing values\ndf_demand = df_demand.fillna(0)\n \n# Resample to get average power output over 30min trading interval (instead of 5min) dispatch intervals\ndf_dispatch = df_dispatch.resample('30min', label='right', closed='right').mean()", "_____no_output_____" ] ], [ [ "Re-index and format data:\n\n1. identify intermittent generators (wind and solar);\n2. identify hydro generators;\n3. compute nodal demand;\n4. concatenate demand, hydro dispatch, and intermittent renewables dispatch into a single DataFrame.", "_____no_output_____" ] ], [ [ "# Intermittent generators\nmask_intermittent = df_g['FUEL_CAT'].isin(['Wind', 'Solar'])\ndf_g[mask_intermittent]\n\n# Intermittent dispatch at each node\ndf_intermittent = (df_dispatch\n .T\n .join(df_g.loc[mask_intermittent, 'NODE'], how='left')\n .groupby('NODE').sum()\n .reindex(df_n.index, fill_value=0))\ndf_intermittent['level'] = 'intermittent'\n\n# Hydro generators\nmask_hydro = df_g['FUEL_CAT'].isin(['Hydro'])\ndf_g[mask_hydro]\n\n# Hydro dispatch at each node\ndf_hydro = (df_dispatch\n .T\n .join(df_g.loc[mask_hydro, 'NODE'], how='left')\n .groupby('NODE').sum()\n .reindex(df_n.index, fill_value=0))\ndf_hydro['level'] = 'hydro'\n\n# Demand at each node\ndef node_demand(row):\n return df_demand[row['NEM_REGION']] * row['PROP_REG_D']\ndf_node_demand = df_n.apply(node_demand, axis=1)\ndf_node_demand['level'] = 'demand'\n\n# Concatenate intermittent, hydro, and demand series, add level to index\ndf_o = (pd.concat([df_node_demand, df_intermittent, df_hydro])\n .set_index('level', append=True)\n .reorder_levels(['level', 'NODE_ID']))", "_____no_output_____" ] ], [ [ "## K-nearest neighbours\nConstruct clustering algorithm to transform the set of trading intervals into a reduced set of representative operating scenarios.", "_____no_output_____" ] ], [ [ "def create_scenarios(df, k=1, max_iterations=100, stopping_tolerance=0):\n \"\"\"Create representative demand and fixed power injection operating scenarios\n\n Parameters\n ----------\n df : pandas DataFrame\n Input DataFrame from which representative operating conditions \n should be constructed\n\n k : int\n Number of clusters\n\n max_iterations : int\n Max number of iterations used to find centroid\n\n stopping_tolerance : float\n Max total difference between successive centroid iteration DataFrames\n\n Returns\n -------\n df_clustered : pandas DataFrame\n Operating scenario centroids and their associated duration.\n\n centroid_history : dict\n Dictionary where keys are the iteration number and values are a DataFrame describing\n the allocation of operating conditions to centroids, and the distance between these\n values. \n \"\"\"\n\n # Random time periods used to initialise centroids\n random_periods = random.sample(list(df.columns), k)\n df_centroids = df[random_periods]\n\n # Rename centroid DataFrame columns and keep track of initial labels\n timestamp_map = {timestamp: timestamp_key + 1 for timestamp_key, timestamp in enumerate(df_centroids.columns)}\n df_centroids = df_centroids.rename(columns=timestamp_map)\n\n def compute_distance(col):\n \"\"\"Compute distance between each data associated with each trading interval, col, and all centroids.\n Return closest centroid.\n\n Params\n ------\n col : pandas Series\n Operating condition for trading interval\n\n Returns\n -------\n closest_centroid_ID : pandas Series\n Series with ID of closest centroid and the distance to that centroid\n \"\"\"\n\n # Initialise minimum distance between data constituting a trading interval and all centroids to an \n # arbitrarily large number\n min_distance = 9e9\n \n # Initially no centroid is defined as being closest to the given trading interval\n closest_centroid = None\n\n # Compute Euclidean (2-norm) distance between the data describing a trading interval, col, and\n # all centroids. Identify the closest centroid for the given trading interval.\n for centroid in df_centroids.columns:\n distance = math.sqrt(sum((df_centroids[centroid] - col) ** 2))\n\n # If present value less than minimum distance, update minimum distance and record centroid\n if distance <= min_distance:\n min_distance = distance\n closest_centroid = centroid\n\n # Return ID of closest centroid\n closest_centroid_ID = pd.Series(data={'closest_centroid': closest_centroid, 'distance': min_distance})\n\n return closest_centroid_ID\n\n\n def update_centroids(row):\n \"Update centroids by taking element-wise mean value of all vectors in cluster\"\n \n return df[row['SETTLEMENTDATE']].mean(axis=1)\n\n # History of computed centroids\n centroid_history = dict()\n\n for i in range(max_iterations):\n # Get closest centroids for each trading interval and save result to dictionary\n df_closest_centroids = df.apply(compute_distance)\n centroid_history[i] = df_closest_centroids\n\n # Timestamps belonging to each cluster\n clustered_timestamps = (df_closest_centroids.loc['closest_centroid']\n .to_frame()\n .reset_index()\n .groupby('closest_centroid').agg(lambda x: list(x)))\n\n # Update centroids by computing average nodal values across series in each cluster\n df_centroids = clustered_timestamps.apply(update_centroids, axis=1).T\n\n # If first iteration, set total absolute distance to arbitrarily large number\n if i == 0:\n total_absolute_distance = 1e7\n\n # Lagged DataFrame in next iteration = DataFrame in current iteration\n df_centroids_lag = df_centroids\n \n else:\n # Element-wise absolute difference between current and previous centroid DataFrames\n df_centroids_update_distance = abs(df_centroids - df_centroids_lag)\n\n # Max total element-wise distance\n total_absolute_distance = df_centroids_update_distance.sum().sum()\n\n # Stopping condition\n if total_absolute_distance <= stopping_tolerance:\n print('Iteration number: {0} - Total absolute distance: {1}. Stopping criterion satisfied. Exiting loop.'.format(i+1, total_absolute_distance))\n break\n else:\n # Continue loop \n df_centroids_lag = df_centroids \n\n print('Iteration number: {0} - Difference between iterations: {1}'.format(i+1, total_absolute_distance))\n \n # Raise warning if loop terminates before stopping condition met\n if i == (max_iterations - 1):\n print('Max iteration limit exceeded before stopping tolerance satisfied.')\n\n\n # Get duration for each scenario\n # ------------------------------\n # Length of each trading interval (hours)\n interval_length = 0.5\n\n # Total number of hours for each scenario\n scenario_hours = (clustered_timestamps.apply(lambda x: len(x['SETTLEMENTDATE']), axis=1)\n .to_frame().T\n .mul(interval_length))\n\n # Renaming and setting duration index values\n scenario_hours = scenario_hours.rename(index={0: 'hours'})\n scenario_hours['level'] = 'duration'\n scenario_hours.set_index('level', append=True, inplace=True)\n\n # Final DataFrame with clustered values\n df_clustered = pd.concat([df_centroids, scenario_hours])\n\n # Convert column labels to type int\n df_clustered.columns = df_clustered.columns.astype(int)\n\n return df_clustered, centroid_history", "_____no_output_____" ] ], [ [ "## Create operating scenarios\nCreate operating scenarios for different numbers of clusters and save to file.", "_____no_output_____" ] ], [ [ "# Create operating scenarios for different numbers of clusters\nfor k in [48, 100]:\n # Create operating scenarios\n df_clustered, _ = create_scenarios(df=df_o, k=k, max_iterations=int(9e9), stopping_tolerance=0)\n \n # Save scenarios\n with open(os.path.join(output_dir, '{0}_scenarios.pickle'.format(k)), 'wb') as f:\n pickle.dump(df_clustered, f)", "Iteration number: 1 - Difference between iterations: 10000000.0\nIteration number: 2 - Difference between iterations: 44872.23773754929\nIteration number: 3 - Difference between iterations: 28923.76265257388\nIteration number: 4 - Difference between iterations: 20897.04798111056\nIteration number: 5 - Difference between iterations: 15480.767642873347\nIteration number: 6 - Difference between iterations: 12484.768446179873\nIteration number: 7 - Difference between iterations: 9998.374403476835\nIteration number: 8 - Difference between iterations: 9016.496824554233\nIteration number: 9 - Difference between iterations: 8692.182458056617\nIteration number: 10 - Difference between iterations: 7989.27306254635\nIteration number: 11 - Difference between iterations: 7771.20160855181\nIteration number: 12 - Difference between iterations: 7535.567204322281\nIteration number: 13 - Difference between iterations: 6681.957822619\nIteration number: 14 - Difference between iterations: 6063.21184936421\nIteration number: 15 - Difference between iterations: 5215.424862353642\nIteration number: 16 - Difference between iterations: 5562.561987406691\nIteration number: 17 - Difference between iterations: 4636.872155246882\nIteration number: 18 - Difference between iterations: 4151.283815569257\nIteration number: 19 - Difference between iterations: 3367.486504039685\nIteration number: 20 - Difference between iterations: 3459.5770540746457\nIteration number: 21 - Difference between iterations: 3245.760539723998\nIteration number: 22 - Difference between iterations: 3143.0057158004997\nIteration number: 23 - Difference between iterations: 2853.8722473869843\nIteration number: 24 - Difference between iterations: 2948.263029603487\nIteration number: 25 - Difference between iterations: 3178.291647564428\nIteration number: 26 - Difference between iterations: 3090.058478165758\nIteration number: 27 - Difference between iterations: 3132.6272932979864\nIteration number: 28 - Difference between iterations: 2883.076964496857\nIteration number: 29 - Difference between iterations: 2795.3964481259122\nIteration number: 30 - Difference between iterations: 2390.457020044656\nIteration number: 31 - Difference between iterations: 2221.2255906327273\nIteration number: 32 - Difference between iterations: 1759.969622790314\nIteration number: 33 - Difference between iterations: 1770.7633197404978\nIteration number: 34 - Difference between iterations: 1680.3575206390276\nIteration number: 35 - Difference between iterations: 1783.2077282971384\nIteration number: 36 - Difference between iterations: 1491.049419327789\nIteration number: 37 - Difference between iterations: 1447.7904656385074\nIteration number: 38 - Difference between iterations: 1256.9267791535688\nIteration number: 39 - Difference between iterations: 940.6514017071211\nIteration number: 40 - Difference between iterations: 884.3361346740842\nIteration number: 41 - Difference between iterations: 852.7413183370817\nIteration number: 42 - Difference between iterations: 1067.5105096582222\nIteration number: 43 - Difference between iterations: 1102.0147548261473\nIteration number: 44 - Difference between iterations: 945.0963234945507\nIteration number: 45 - Difference between iterations: 970.5755968229757\nIteration number: 46 - Difference between iterations: 609.280009600923\nIteration number: 47 - Difference between iterations: 387.9318250670119\nIteration number: 48 - Difference between iterations: 271.39762944419186\nIteration number: 49 - Difference between iterations: 216.32451261327768\nIteration number: 50 - Difference between iterations: 190.70642500194035\nIteration number: 51 - Difference between iterations: 168.16605745847238\nIteration number: 52 - Difference between iterations: 78.9477029701386\nIteration number: 53 - Difference between iterations: 66.12790849852881\nIteration number: 54 - Difference between iterations: 32.896363159693706\nIteration number: 55 - Difference between iterations: 38.53012359293304\nIteration number: 56 - Total absolute distance: 0.0. Stopping criterion satisfied. Exiting loop.\n" ] ], [ [ "## References\n[1] - Australian Energy Markets Operator. Data Archive (2018). at [http://www.nemweb.com.au/#mms-data-model:download](http://www.nemweb.com.au/#mms-data-model:download)\n\n[2] - Baringo L., Conejo, A. J., Correlated wind-power production and electric load scenarios for investment decisions. Applied Energy (2013).\n\n[3] - Xenophon A. K., Hill D. J., Geospatial modelling of Australia's National Electricity Market allowing backtesting against historic data. Scientific Data (2018).\n\n[4] - Xenophon A. K., Hill D. J., Geospatial Modelling of Australia's National Electricity Market - Dataset (Version v1.3) [Data set]. Zenodo. [http://doi.org/10.5281/zenodo.1326942](http://doi.org/10.5281/zenodo.1326942)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
cb63823d37a5a451c69f3cf750ff315b47f2339b
4,732
ipynb
Jupyter Notebook
.ipynb_checkpoints/test-checkpoint.ipynb
wcmckee/wcmckee
19315a37b592b7bcebb5f2720c965aea58f928ce
[ "MIT" ]
null
null
null
.ipynb_checkpoints/test-checkpoint.ipynb
wcmckee/wcmckee
19315a37b592b7bcebb5f2720c965aea58f928ce
[ "MIT" ]
null
null
null
.ipynb_checkpoints/test-checkpoint.ipynb
wcmckee/wcmckee
19315a37b592b7bcebb5f2720c965aea58f928ce
[ "MIT" ]
null
null
null
20.845815
122
0.448225
[ [ [ "empty" ] ] ]
[ "empty" ]
[ [ "empty" ] ]
cb6389b28745817f370b49d507dea27617ea4762
881,493
ipynb
Jupyter Notebook
notebooks/15_Transformers.ipynb
Study-Repos-Forks/MadeWithML
7d850414945bb6ff23c9e4724fa18b61a924a937
[ "MIT" ]
3,599
2019-09-16T21:49:56.000Z
2020-02-24T13:20:24.000Z
notebooks/15_Transformers.ipynb
Study-Repos-Forks/MadeWithML
7d850414945bb6ff23c9e4724fa18b61a924a937
[ "MIT" ]
15
2019-09-24T23:42:02.000Z
2020-02-17T17:44:21.000Z
notebooks/15_Transformers.ipynb
Study-Repos-Forks/MadeWithML
7d850414945bb6ff23c9e4724fa18b61a924a937
[ "MIT" ]
594
2019-09-17T01:16:39.000Z
2020-02-24T12:48:56.000Z
245.063386
633,235
0.805652
[ [ [ "<div align=\"center\">\n<h1><img width=\"30\" src=\"https://madewithml.com/static/images/rounded_logo.png\">&nbsp;<a href=\"https://madewithml.com/\">Made With ML</a></h1>\nApplied ML · MLOps · Production\n<br>\nJoin 30K+ developers in learning how to responsibly <a href=\"https://madewithml.com/about/\">deliver value</a> with ML.\n <br>\n</div>\n\n<br>\n\n<div align=\"center\">\n <a target=\"_blank\" href=\"https://newsletter.madewithml.com\"><img src=\"https://img.shields.io/badge/Subscribe-30K-brightgreen\"></a>&nbsp;\n <a target=\"_blank\" href=\"https://github.com/GokuMohandas/MadeWithML\"><img src=\"https://img.shields.io/github/stars/GokuMohandas/MadeWithML.svg?style=social&label=Star\"></a>&nbsp;\n <a target=\"_blank\" href=\"https://www.linkedin.com/in/goku\"><img src=\"https://img.shields.io/badge/style--5eba00.svg?label=LinkedIn&logo=linkedin&style=social\"></a>&nbsp;\n <a target=\"_blank\" href=\"https://twitter.com/GokuMohandas\"><img src=\"https://img.shields.io/twitter/follow/GokuMohandas.svg?label=Follow&style=social\"></a>\n <br>\n 🔥&nbsp; Among the <a href=\"https://github.com/topics/deep-learning\" target=\"_blank\">top ML</a> repositories on GitHub\n</div>\n\n<br>\n<hr>", "_____no_output_____" ], [ "# Transformers\n\nIn this lesson we will learn how to implement the Transformer architecture to extract contextual embeddings for our text classification task.", "_____no_output_____" ], [ "<div align=\"left\">\n<a target=\"_blank\" href=\"https://madewithml.com/courses/foundations/transformers/\"><img src=\"https://img.shields.io/badge/📖 Read-blog post-9cf\"></a>&nbsp;\n<a href=\"https://github.com/GokuMohandas/MadeWithML/blob/main/notebooks/15_Transformers.ipynb\" role=\"button\"><img src=\"https://img.shields.io/static/v1?label=&amp;message=View%20On%20GitHub&amp;color=586069&amp;logo=github&amp;labelColor=2f363d\"></a>&nbsp;\n<a href=\"https://colab.research.google.com/github/GokuMohandas/MadeWithML/blob/main/notebooks/15_Transformers.ipynb\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"></a>\n</div>", "_____no_output_____" ], [ "# Overview", "_____no_output_____" ], [ "Transformers are a very popular architecture that leverage and extend the concept of self-attention to create very useful representations of our input data for a downstream task.\n\n- **advantages**: \n - better representation for our input tokens via contextual embeddings where the token representation is based on the specific neighboring tokens using self-attention. \n - sub-word tokens, as opposed to character tokens, since they can hold more meaningful representation for many of our keywords, prefixes, suffixes, etc.\n - attend (in parallel) to all the tokens in our input, as opposed to being limited by filter spans (CNNs) or memory issues from sequential processing (RNNs).\n\n- **disadvantages**:\n - computationally intensive\n - required large amounts of data (mitigated using pretrained models)", "_____no_output_____" ], [ "<div align=\"left\">\n<img src=\"https://madewithml.com/static/images/foundations/transformers/architecture.png\" width=\"800\">\n</div>\n<div align=\"left\">\n<small><a href=\"https://arxiv.org/abs/1706.03762\" target=\"_blank\">Attention Is All You Need</a></small>\n</div>", "_____no_output_____" ], [ "# Set up", "_____no_output_____" ] ], [ [ "!pip install transformers==3.0.2 -q", "\u001b[K |████████████████████████████████| 769 kB 5.3 MB/s \n\u001b[K |████████████████████████████████| 3.0 MB 30.8 MB/s \n\u001b[K |████████████████████████████████| 895 kB 44.0 MB/s \n\u001b[K |████████████████████████████████| 1.2 MB 33.9 MB/s \n\u001b[?25h" ], [ "import numpy as np\nimport pandas as pd\nimport random\nimport torch\nimport torch.nn as nn", "_____no_output_____" ], [ "SEED = 1234", "_____no_output_____" ], [ "def set_seeds(seed=1234):\n \"\"\"Set seeds for reproducibility.\"\"\"\n np.random.seed(seed)\n random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed(seed)\n torch.cuda.manual_seed_all(seed) # multi-GPU# Set seeds for reproducibility\nset_seeds(seed=SEED)", "_____no_output_____" ], [ "# Set seeds for reproducibility\nset_seeds(seed=SEED)", "_____no_output_____" ], [ "# Set device\ncuda = True\ndevice = torch.device(\"cuda\" if (\n torch.cuda.is_available() and cuda) else \"cpu\")\ntorch.set_default_tensor_type(\"torch.FloatTensor\")\nif device.type == \"cuda\":\n torch.set_default_tensor_type(\"torch.cuda.FloatTensor\")\nprint (device)", "cuda\n" ] ], [ [ "## Load data", "_____no_output_____" ], [ "We will download the [AG News dataset](http://www.di.unipi.it/~gulli/AG_corpus_of_news_articles.html), which consists of 120K text samples from 4 unique classes (`Business`, `Sci/Tech`, `Sports`, `World`)", "_____no_output_____" ] ], [ [ "import numpy as np\nimport pandas as pd\nimport re\nimport urllib", "_____no_output_____" ], [ "# Load data\nurl = \"https://raw.githubusercontent.com/GokuMohandas/MadeWithML/main/datasets/news.csv\"\ndf = pd.read_csv(url, header=0) # load\ndf = df.sample(frac=1).reset_index(drop=True) # shuffle\ndf.head()", "_____no_output_____" ], [ "# Reduce data size (too large to fit in Colab's limited memory)\ndf = df[:10000]\nprint (len(df))", "10000\n" ] ], [ [ "## Preprocessing", "_____no_output_____" ], [ "We're going to clean up our input data first by doing operations such as lower text, removing stop (filler) words, filters using regular expressions, etc.", "_____no_output_____" ] ], [ [ "import nltk\nfrom nltk.corpus import stopwords\nfrom nltk.stem import PorterStemmer\nimport re", "_____no_output_____" ], [ "nltk.download(\"stopwords\")\nSTOPWORDS = stopwords.words(\"english\")\nprint (STOPWORDS[:5])\nporter = PorterStemmer()", "[nltk_data] Downloading package stopwords to /root/nltk_data...\n[nltk_data] Unzipping corpora/stopwords.zip.\n['i', 'me', 'my', 'myself', 'we']\n" ], [ "def preprocess(text, stopwords=STOPWORDS):\n \"\"\"Conditional preprocessing on our text unique to our task.\"\"\"\n # Lower\n text = text.lower()\n\n # Remove stopwords\n pattern = re.compile(r'\\b(' + r'|'.join(stopwords) + r')\\b\\s*')\n text = pattern.sub('', text)\n\n # Remove words in paranthesis\n text = re.sub(r'\\([^)]*\\)', '', text)\n\n # Spacing and filters\n text = re.sub(r\"([-;;.,!?<=>])\", r\" \\1 \", text)\n text = re.sub('[^A-Za-z0-9]+', ' ', text) # remove non alphanumeric chars\n text = re.sub(' +', ' ', text) # remove multiple spaces\n text = text.strip()\n\n return text", "_____no_output_____" ], [ "# Sample\ntext = \"Great week for the NYSE!\"\npreprocess(text=text)", "_____no_output_____" ], [ "# Apply to dataframe\npreprocessed_df = df.copy()\npreprocessed_df.title = preprocessed_df.title.apply(preprocess)\nprint (f\"{df.title.values[0]}\\n\\n{preprocessed_df.title.values[0]}\")", "Sharon Accepts Plan to Reduce Gaza Army Operation, Haaretz Says\n\nsharon accepts plan reduce gaza army operation haaretz says\n" ] ], [ [ "## Split data", "_____no_output_____" ] ], [ [ "import collections\nfrom sklearn.model_selection import train_test_split", "_____no_output_____" ], [ "TRAIN_SIZE = 0.7\nVAL_SIZE = 0.15\nTEST_SIZE = 0.15", "_____no_output_____" ], [ "def train_val_test_split(X, y, train_size):\n \"\"\"Split dataset into data splits.\"\"\"\n X_train, X_, y_train, y_ = train_test_split(X, y, train_size=TRAIN_SIZE, stratify=y)\n X_val, X_test, y_val, y_test = train_test_split(X_, y_, train_size=0.5, stratify=y_)\n return X_train, X_val, X_test, y_train, y_val, y_test", "_____no_output_____" ], [ "# Data\nX = preprocessed_df[\"title\"].values\ny = preprocessed_df[\"category\"].values", "_____no_output_____" ], [ "# Create data splits\nX_train, X_val, X_test, y_train, y_val, y_test = train_val_test_split(\n X=X, y=y, train_size=TRAIN_SIZE)\nprint (f\"X_train: {X_train.shape}, y_train: {y_train.shape}\")\nprint (f\"X_val: {X_val.shape}, y_val: {y_val.shape}\")\nprint (f\"X_test: {X_test.shape}, y_test: {y_test.shape}\")\nprint (f\"Sample point: {X_train[0]} → {y_train[0]}\")", "X_train: (7000,), y_train: (7000,)\nX_val: (1500,), y_val: (1500,)\nX_test: (1500,), y_test: (1500,)\nSample point: lost flu paydays → Business\n" ] ], [ [ "## Label encoder", "_____no_output_____" ] ], [ [ "class LabelEncoder(object):\n \"\"\"Label encoder for tag labels.\"\"\"\n def __init__(self, class_to_index={}):\n self.class_to_index = class_to_index\n self.index_to_class = {v: k for k, v in self.class_to_index.items()}\n self.classes = list(self.class_to_index.keys())\n\n def __len__(self):\n return len(self.class_to_index)\n\n def __str__(self):\n return f\"<LabelEncoder(num_classes={len(self)})>\"\n\n def fit(self, y):\n classes = np.unique(y)\n for i, class_ in enumerate(classes):\n self.class_to_index[class_] = i\n self.index_to_class = {v: k for k, v in self.class_to_index.items()}\n self.classes = list(self.class_to_index.keys())\n return self\n\n def encode(self, y):\n y_one_hot = np.zeros((len(y), len(self.class_to_index)), dtype=int)\n for i, item in enumerate(y):\n y_one_hot[i][self.class_to_index[item]] = 1\n return y_one_hot\n\n def decode(self, y):\n classes = []\n for i, item in enumerate(y):\n index = np.where(item == 1)[0][0]\n classes.append(self.index_to_class[index])\n return classes\n\n def save(self, fp):\n with open(fp, \"w\") as fp:\n contents = {'class_to_index': self.class_to_index}\n json.dump(contents, fp, indent=4, sort_keys=False)\n\n @classmethod\n def load(cls, fp):\n with open(fp, \"r\") as fp:\n kwargs = json.load(fp=fp)\n return cls(**kwargs)", "_____no_output_____" ], [ "# Encode\nlabel_encoder = LabelEncoder()\nlabel_encoder.fit(y_train)\nnum_classes = len(label_encoder)\nlabel_encoder.class_to_index", "_____no_output_____" ], [ "# Class weights\ncounts = np.bincount([label_encoder.class_to_index[class_] for class_ in y_train])\nclass_weights = {i: 1.0/count for i, count in enumerate(counts)}\nprint (f\"counts: {counts}\\nweights: {class_weights}\")", "counts: [1746 1723 1725 1806]\nweights: {0: 0.000572737686139748, 1: 0.0005803830528148578, 2: 0.0005797101449275362, 3: 0.0005537098560354374}\n" ], [ "# Convert labels to tokens\nprint (f\"y_train[0]: {y_train[0]}\")\ny_train = label_encoder.encode(y_train)\ny_val = label_encoder.encode(y_val)\ny_test = label_encoder.encode(y_test)\nprint (f\"y_train[0]: {y_train[0]}\")\nprint (f\"decode([y_train[0]]): {label_encoder.decode([y_train[0]])}\")", "y_train[0]: Business\ny_train[0]: [1 0 0 0]\ndecode([y_train[0]]): ['Business']\n" ] ], [ [ "## Tokenizer", "_____no_output_____" ], [ "We'll be using the [BertTokenizer](https://huggingface.co/transformers/model_doc/bert.html#berttokenizer) to tokenize our input text in to sub-word tokens.", "_____no_output_____" ] ], [ [ "from transformers import DistilBertTokenizer\nfrom transformers import BertTokenizer", "_____no_output_____" ], [ "# Load tokenizer and model\n# tokenizer = DistilBertTokenizer.from_pretrained(\"distilbert-base-uncased\")\ntokenizer = BertTokenizer.from_pretrained(\"allenai/scibert_scivocab_uncased\")\nvocab_size = len(tokenizer)\nprint (vocab_size)", "_____no_output_____" ], [ "# Tokenize inputs\nencoded_input = tokenizer(X_train.tolist(), return_tensors=\"pt\", padding=True)\nX_train_ids = encoded_input[\"input_ids\"]\nX_train_masks = encoded_input[\"attention_mask\"]\nprint (X_train_ids.shape, X_train_masks.shape)\nencoded_input = tokenizer(X_val.tolist(), return_tensors=\"pt\", padding=True)\nX_val_ids = encoded_input[\"input_ids\"]\nX_val_masks = encoded_input[\"attention_mask\"]\nprint (X_val_ids.shape, X_val_masks.shape)\nencoded_input = tokenizer(X_test.tolist(), return_tensors=\"pt\", padding=True)\nX_test_ids = encoded_input[\"input_ids\"]\nX_test_masks = encoded_input[\"attention_mask\"]\nprint (X_test_ids.shape, X_test_masks.shape)", "torch.Size([7000, 27]) torch.Size([7000, 27])\ntorch.Size([1500, 21]) torch.Size([1500, 21])\ntorch.Size([1500, 26]) torch.Size([1500, 26])\n" ], [ "# Decode\nprint (f\"{X_train_ids[0]}\\n{tokenizer.decode(X_train_ids[0])}\")", "tensor([ 102, 6677, 1441, 3982, 17973, 103, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0])\n[CLS] lost flu paydays [SEP] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD]\n" ], [ "# Sub-word tokens\nprint (tokenizer.convert_ids_to_tokens(ids=X_train_ids[0]))", "['[CLS]', 'lost', 'flu', 'pay', '##days', '[SEP]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]', '[PAD]']\n" ] ], [ [ "## Datasets", "_____no_output_____" ], [ "We're going to create Datasets and DataLoaders to be able to efficiently create batches with our data splits.", "_____no_output_____" ] ], [ [ "class TransformerTextDataset(torch.utils.data.Dataset):\n def __init__(self, ids, masks, targets):\n self.ids = ids\n self.masks = masks\n self.targets = targets\n\n def __len__(self):\n return len(self.targets)\n\n def __str__(self):\n return f\"<Dataset(N={len(self)})>\"\n\n def __getitem__(self, index):\n ids = torch.tensor(self.ids[index], dtype=torch.long)\n masks = torch.tensor(self.masks[index], dtype=torch.long)\n targets = torch.FloatTensor(self.targets[index])\n return ids, masks, targets\n\n def create_dataloader(self, batch_size, shuffle=False, drop_last=False):\n return torch.utils.data.DataLoader(\n dataset=self,\n batch_size=batch_size,\n shuffle=shuffle,\n drop_last=drop_last,\n pin_memory=False)", "_____no_output_____" ], [ "# Create datasets\ntrain_dataset = TransformerTextDataset(ids=X_train_ids, masks=X_train_masks, targets=y_train)\nval_dataset = TransformerTextDataset(ids=X_val_ids, masks=X_val_masks, targets=y_val)\ntest_dataset = TransformerTextDataset(ids=X_test_ids, masks=X_test_masks, targets=y_test)\nprint (\"Data splits:\\n\"\n f\" Train dataset:{train_dataset.__str__()}\\n\"\n f\" Val dataset: {val_dataset.__str__()}\\n\"\n f\" Test dataset: {test_dataset.__str__()}\\n\"\n \"Sample point:\\n\"\n f\" ids: {train_dataset[0][0]}\\n\"\n f\" masks: {train_dataset[0][1]}\\n\"\n f\" targets: {train_dataset[0][2]}\")", "Data splits:\n Train dataset:<Dataset(N=7000)>\n Val dataset: <Dataset(N=1500)>\n Test dataset: <Dataset(N=1500)>\nSample point:\n ids: tensor([ 102, 6677, 1441, 3982, 17973, 103, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0])\n masks: tensor([1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0])\n targets: tensor([1., 0., 0., 0.], device=\"cpu\")\n" ], [ "# Create dataloaders\nbatch_size = 128\ntrain_dataloader = train_dataset.create_dataloader(\n batch_size=batch_size)\nval_dataloader = val_dataset.create_dataloader(\n batch_size=batch_size)\ntest_dataloader = test_dataset.create_dataloader(\n batch_size=batch_size)\nbatch = next(iter(train_dataloader))\nprint (\"Sample batch:\\n\"\n f\" ids: {batch[0].size()}\\n\"\n f\" masks: {batch[1].size()}\\n\"\n f\" targets: {batch[2].size()}\")", "Sample batch:\n ids: torch.Size([128, 27])\n masks: torch.Size([128, 27])\n targets: torch.Size([128, 4])\n" ] ], [ [ "## Trainer", "_____no_output_____" ], [ "Let's create the `Trainer` class that we'll use to facilitate training for our experiments.", "_____no_output_____" ] ], [ [ "import torch.nn.functional as F", "_____no_output_____" ], [ "class Trainer(object):\n def __init__(self, model, device, loss_fn=None, optimizer=None, scheduler=None):\n\n # Set params\n self.model = model\n self.device = device\n self.loss_fn = loss_fn\n self.optimizer = optimizer\n self.scheduler = scheduler\n\n def train_step(self, dataloader):\n \"\"\"Train step.\"\"\"\n # Set model to train mode\n self.model.train()\n loss = 0.0\n\n # Iterate over train batches\n for i, batch in enumerate(dataloader):\n\n # Step\n batch = [item.to(self.device) for item in batch] # Set device\n inputs, targets = batch[:-1], batch[-1]\n self.optimizer.zero_grad() # Reset gradients\n z = self.model(inputs) # Forward pass\n J = self.loss_fn(z, targets) # Define loss\n J.backward() # Backward pass\n self.optimizer.step() # Update weights\n\n # Cumulative Metrics\n loss += (J.detach().item() - loss) / (i + 1)\n\n return loss\n\n def eval_step(self, dataloader):\n \"\"\"Validation or test step.\"\"\"\n # Set model to eval mode\n self.model.eval()\n loss = 0.0\n y_trues, y_probs = [], []\n\n # Iterate over val batches\n with torch.inference_mode():\n for i, batch in enumerate(dataloader):\n\n # Step\n batch = [item.to(self.device) for item in batch] # Set device\n inputs, y_true = batch[:-1], batch[-1]\n z = self.model(inputs) # Forward pass\n J = self.loss_fn(z, y_true).item()\n\n # Cumulative Metrics\n loss += (J - loss) / (i + 1)\n\n # Store outputs\n y_prob = F.softmax(z).cpu().numpy()\n y_probs.extend(y_prob)\n y_trues.extend(y_true.cpu().numpy())\n\n return loss, np.vstack(y_trues), np.vstack(y_probs)\n\n def predict_step(self, dataloader):\n \"\"\"Prediction step.\"\"\"\n # Set model to eval mode\n self.model.eval()\n y_probs = []\n\n # Iterate over val batches\n with torch.inference_mode():\n for i, batch in enumerate(dataloader):\n\n # Forward pass w/ inputs\n inputs, targets = batch[:-1], batch[-1]\n z = self.model(inputs)\n\n # Store outputs\n y_prob = F.softmax(z).cpu().numpy()\n y_probs.extend(y_prob)\n\n return np.vstack(y_probs)\n \n def train(self, num_epochs, patience, train_dataloader, val_dataloader):\n best_val_loss = np.inf\n for epoch in range(num_epochs):\n # Steps\n train_loss = self.train_step(dataloader=train_dataloader)\n val_loss, _, _ = self.eval_step(dataloader=val_dataloader)\n self.scheduler.step(val_loss)\n\n # Early stopping\n if val_loss < best_val_loss:\n best_val_loss = val_loss\n best_model = self.model\n _patience = patience # reset _patience\n else:\n _patience -= 1\n if not _patience: # 0\n print(\"Stopping early!\")\n break\n\n # Logging\n print(\n f\"Epoch: {epoch+1} | \"\n f\"train_loss: {train_loss:.5f}, \"\n f\"val_loss: {val_loss:.5f}, \"\n f\"lr: {self.optimizer.param_groups[0]['lr']:.2E}, \"\n f\"_patience: {_patience}\"\n )\n return best_model", "_____no_output_____" ] ], [ [ "# Transformer", "_____no_output_____" ], [ "## Scaled dot-product attention", "_____no_output_____" ], [ "The most popular type of self-attention is scaled dot-product attention from the widely-cited [Attention is all you need](https://arxiv.org/abs/1706.03762) paper. This type of attention involves projecting our encoded input sequences onto three matrices, queries (Q), keys (K) and values (V), whose weights we learn.", "_____no_output_____" ], [ "$ inputs \\in \\mathbb{R}^{NXMXH} $ ($N$ = batch size, $M$ = sequence length, $H$ = hidden dim)\n\n$ Q = XW_q $ where $ W_q \\in \\mathbb{R}^{HXd_q} $\n\n$ K = XW_k $ where $ W_k \\in \\mathbb{R}^{HXd_k} $\n\n$ V = XW_v $ where $ W_v \\in \\mathbb{R}^{HXd_v} $\n\n$ attention (Q, K, V) = softmax( \\frac{Q K^{T}}{\\sqrt{d_k}} )V \\in \\mathbb{R}^{MXd_v} $", "_____no_output_____" ], [ "## Multi-head attention", "_____no_output_____" ], [ "Instead of applying self-attention only once across the entire encoded input, we can also separate the input and apply self-attention in parallel (heads) to each input section and concatenate them. This allows the different head to learn unique representations while maintaining the complexity since we split the input into smaller subspaces.", "_____no_output_____" ], [ "$ MultiHead(Q, K, V) = concat({head}_1, ..., {head}_{h})W_O $ \n\n* ${head}_i = attention(Q_i, K_i, V_i) $\n* $h$ = # of self-attention heads\n* $W_O \\in \\mathbb{R}^{hd_vXH} $\n* $H$ = hidden dim. (or dimension of the model $d_{model}$)\n", "_____no_output_____" ], [ "## Positional encoding", "_____no_output_____" ], [ "With self-attention, we aren't able to account for the sequential position of our input tokens. To address this, we can use positional encoding to create a representation of the location of each token with respect to the entire sequence. This can either be learned (with weights) or we can use a fixed function that can better extend to create positional encoding for lengths during inference that were not observed during training.", "_____no_output_____" ], [ "$ PE_{(pos,2i)} = sin({pos}/{10000^{2i/H}}) $\n\n$ PE_{(pos,2i+1)} = cos({pos}/{10000^{2i/H}}) $\n\nwhere:\n\n* $pos$ = position of the token $(1...M)$\n* $i$ = hidden dim $(1..H)$", "_____no_output_____" ], [ "This effectively allows us to represent each token's relative position using a fixed function for very large sequences. And because we've constrained the positional encodings to have the same dimensions as our encoded inputs, we can simply concatenate them before feeding them into the multi-head attention heads.", "_____no_output_____" ], [ "## Architecture", "_____no_output_____" ], [ "And here's how it all fits together! It's an end-to-end architecture that creates these contextual representations and uses an encoder-decoder architecture to predict the outcomes (one-to-one, many-to-one, many-to-many, etc.) Due to the complexity of the architecture, they require massive amounts of data for training without overfitting, however, they can be leveraged as pretrained models to finetune with smaller datasets that are similar to the larger set it was initially trained on.", "_____no_output_____" ], [ "<div align=\"left\">\n<img src=\"https://madewithml.com/static/images/foundations/transformers/architecture.png\" width=\"800\">\n</div>\n<div align=\"left\">\n<small><a href=\"https://arxiv.org/abs/1706.03762\" target=\"_blank\">Attention Is All You Need</a></small>\n</div>", "_____no_output_____" ], [ "> We're not going to the implement the Transformer [from scratch](https://nlp.seas.harvard.edu/2018/04/03/attention.html) but we will use the[ Hugging Face library](https://github.com/huggingface/transformers) to load a pretrained [BertModel](https://huggingface.co/transformers/model_doc/bert.html#bertmodel) , which we'll use as a feature extractor and fine-tune on our own dataset.", "_____no_output_____" ], [ "## Model", "_____no_output_____" ], [ "We're going to use a pretrained [BertModel](https://huggingface.co/transformers/model_doc/bert.html#bertmodel) to act as a feature extractor. We'll only use the encoder to receive sequential and pooled outputs (`is_decoder=False` is default).", "_____no_output_____" ] ], [ [ "from transformers import BertModel", "_____no_output_____" ], [ "# transformer = BertModel.from_pretrained(\"distilbert-base-uncased\")\n# embedding_dim = transformer.config.dim\ntransformer = BertModel.from_pretrained(\"allenai/scibert_scivocab_uncased\")\nembedding_dim = transformer.config.hidden_size", "_____no_output_____" ], [ "class Transformer(nn.Module):\n def __init__(self, transformer, dropout_p, embedding_dim, num_classes):\n super(Transformer, self).__init__()\n self.transformer = transformer\n self.dropout = torch.nn.Dropout(dropout_p)\n self.fc1 = torch.nn.Linear(embedding_dim, num_classes)\n \n def forward(self, inputs):\n ids, masks = inputs\n seq, pool = self.transformer(input_ids=ids, attention_mask=masks)\n z = self.dropout(pool)\n z = self.fc1(z)\n return z", "_____no_output_____" ] ], [ [ "> We decided to work with the pooled output, but we could have just as easily worked with the sequential output (encoder representation for each sub-token) and applied a CNN (or other decoder options) on top of it.", "_____no_output_____" ] ], [ [ "# Initialize model\ndropout_p = 0.5\nmodel = Transformer(\n transformer=transformer, dropout_p=dropout_p,\n embedding_dim=embedding_dim, num_classes=num_classes)\nmodel = model.to(device)\nprint (model.named_parameters)", "<bound method Module.named_parameters of Transformer(\n (transformer): BertModel(\n (embeddings): BertEmbeddings(\n (word_embeddings): Embedding(31090, 768, padding_idx=0)\n (position_embeddings): Embedding(512, 768)\n (token_type_embeddings): Embedding(2, 768)\n (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n (dropout): Dropout(p=0.1, inplace=False)\n )\n (encoder): BertEncoder(\n (layer): ModuleList(\n (0): BertLayer(\n (attention): BertAttention(\n (self): BertSelfAttention(\n (query): Linear(in_features=768, out_features=768, bias=True)\n (key): Linear(in_features=768, out_features=768, bias=True)\n (value): Linear(in_features=768, out_features=768, bias=True)\n (dropout): Dropout(p=0.1, inplace=False)\n )\n (output): BertSelfOutput(\n (dense): Linear(in_features=768, out_features=768, bias=True)\n (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n (dropout): Dropout(p=0.1, inplace=False)\n )\n )\n (intermediate): BertIntermediate(\n (dense): Linear(in_features=768, out_features=3072, bias=True)\n )\n (output): BertOutput(\n (dense): Linear(in_features=3072, out_features=768, bias=True)\n (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n (dropout): Dropout(p=0.1, inplace=False)\n )\n )\n (1): BertLayer(\n (attention): BertAttention(\n (self): BertSelfAttention(\n (query): Linear(in_features=768, out_features=768, bias=True)\n (key): Linear(in_features=768, out_features=768, bias=True)\n (value): Linear(in_features=768, out_features=768, bias=True)\n (dropout): Dropout(p=0.1, inplace=False)\n )\n (output): BertSelfOutput(\n (dense): Linear(in_features=768, out_features=768, bias=True)\n (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n (dropout): Dropout(p=0.1, inplace=False)\n )\n )\n (intermediate): BertIntermediate(\n (dense): Linear(in_features=768, out_features=3072, bias=True)\n )\n (output): BertOutput(\n (dense): Linear(in_features=3072, out_features=768, bias=True)\n (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n (dropout): Dropout(p=0.1, inplace=False)\n )\n )\n (2): BertLayer(\n (attention): BertAttention(\n (self): BertSelfAttention(\n (query): Linear(in_features=768, out_features=768, bias=True)\n (key): Linear(in_features=768, out_features=768, bias=True)\n (value): Linear(in_features=768, out_features=768, bias=True)\n (dropout): Dropout(p=0.1, inplace=False)\n )\n (output): BertSelfOutput(\n (dense): Linear(in_features=768, out_features=768, bias=True)\n (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n (dropout): Dropout(p=0.1, inplace=False)\n )\n )\n (intermediate): BertIntermediate(\n (dense): Linear(in_features=768, out_features=3072, bias=True)\n )\n (output): BertOutput(\n (dense): Linear(in_features=3072, out_features=768, bias=True)\n (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n (dropout): Dropout(p=0.1, inplace=False)\n )\n )\n (3): BertLayer(\n (attention): BertAttention(\n (self): BertSelfAttention(\n (query): Linear(in_features=768, out_features=768, bias=True)\n (key): Linear(in_features=768, out_features=768, bias=True)\n (value): Linear(in_features=768, out_features=768, bias=True)\n (dropout): Dropout(p=0.1, inplace=False)\n )\n (output): BertSelfOutput(\n (dense): Linear(in_features=768, out_features=768, bias=True)\n (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n (dropout): Dropout(p=0.1, inplace=False)\n )\n )\n (intermediate): BertIntermediate(\n (dense): Linear(in_features=768, out_features=3072, bias=True)\n )\n (output): BertOutput(\n (dense): Linear(in_features=3072, out_features=768, bias=True)\n (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n (dropout): Dropout(p=0.1, inplace=False)\n )\n )\n (4): BertLayer(\n (attention): BertAttention(\n (self): BertSelfAttention(\n (query): Linear(in_features=768, out_features=768, bias=True)\n (key): Linear(in_features=768, out_features=768, bias=True)\n (value): Linear(in_features=768, out_features=768, bias=True)\n (dropout): Dropout(p=0.1, inplace=False)\n )\n (output): BertSelfOutput(\n (dense): Linear(in_features=768, out_features=768, bias=True)\n (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n (dropout): Dropout(p=0.1, inplace=False)\n )\n )\n (intermediate): BertIntermediate(\n (dense): Linear(in_features=768, out_features=3072, bias=True)\n )\n (output): BertOutput(\n (dense): Linear(in_features=3072, out_features=768, bias=True)\n (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n (dropout): Dropout(p=0.1, inplace=False)\n )\n )\n (5): BertLayer(\n (attention): BertAttention(\n (self): BertSelfAttention(\n (query): Linear(in_features=768, out_features=768, bias=True)\n (key): Linear(in_features=768, out_features=768, bias=True)\n (value): Linear(in_features=768, out_features=768, bias=True)\n (dropout): Dropout(p=0.1, inplace=False)\n )\n (output): BertSelfOutput(\n (dense): Linear(in_features=768, out_features=768, bias=True)\n (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n (dropout): Dropout(p=0.1, inplace=False)\n )\n )\n (intermediate): BertIntermediate(\n (dense): Linear(in_features=768, out_features=3072, bias=True)\n )\n (output): BertOutput(\n (dense): Linear(in_features=3072, out_features=768, bias=True)\n (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n (dropout): Dropout(p=0.1, inplace=False)\n )\n )\n (6): BertLayer(\n (attention): BertAttention(\n (self): BertSelfAttention(\n (query): Linear(in_features=768, out_features=768, bias=True)\n (key): Linear(in_features=768, out_features=768, bias=True)\n (value): Linear(in_features=768, out_features=768, bias=True)\n (dropout): Dropout(p=0.1, inplace=False)\n )\n (output): BertSelfOutput(\n (dense): Linear(in_features=768, out_features=768, bias=True)\n (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n (dropout): Dropout(p=0.1, inplace=False)\n )\n )\n (intermediate): BertIntermediate(\n (dense): Linear(in_features=768, out_features=3072, bias=True)\n )\n (output): BertOutput(\n (dense): Linear(in_features=3072, out_features=768, bias=True)\n (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n (dropout): Dropout(p=0.1, inplace=False)\n )\n )\n (7): BertLayer(\n (attention): BertAttention(\n (self): BertSelfAttention(\n (query): Linear(in_features=768, out_features=768, bias=True)\n (key): Linear(in_features=768, out_features=768, bias=True)\n (value): Linear(in_features=768, out_features=768, bias=True)\n (dropout): Dropout(p=0.1, inplace=False)\n )\n (output): BertSelfOutput(\n (dense): Linear(in_features=768, out_features=768, bias=True)\n (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n (dropout): Dropout(p=0.1, inplace=False)\n )\n )\n (intermediate): BertIntermediate(\n (dense): Linear(in_features=768, out_features=3072, bias=True)\n )\n (output): BertOutput(\n (dense): Linear(in_features=3072, out_features=768, bias=True)\n (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n (dropout): Dropout(p=0.1, inplace=False)\n )\n )\n (8): BertLayer(\n (attention): BertAttention(\n (self): BertSelfAttention(\n (query): Linear(in_features=768, out_features=768, bias=True)\n (key): Linear(in_features=768, out_features=768, bias=True)\n (value): Linear(in_features=768, out_features=768, bias=True)\n (dropout): Dropout(p=0.1, inplace=False)\n )\n (output): BertSelfOutput(\n (dense): Linear(in_features=768, out_features=768, bias=True)\n (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n (dropout): Dropout(p=0.1, inplace=False)\n )\n )\n (intermediate): BertIntermediate(\n (dense): Linear(in_features=768, out_features=3072, bias=True)\n )\n (output): BertOutput(\n (dense): Linear(in_features=3072, out_features=768, bias=True)\n (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n (dropout): Dropout(p=0.1, inplace=False)\n )\n )\n (9): BertLayer(\n (attention): BertAttention(\n (self): BertSelfAttention(\n (query): Linear(in_features=768, out_features=768, bias=True)\n (key): Linear(in_features=768, out_features=768, bias=True)\n (value): Linear(in_features=768, out_features=768, bias=True)\n (dropout): Dropout(p=0.1, inplace=False)\n )\n (output): BertSelfOutput(\n (dense): Linear(in_features=768, out_features=768, bias=True)\n (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n (dropout): Dropout(p=0.1, inplace=False)\n )\n )\n (intermediate): BertIntermediate(\n (dense): Linear(in_features=768, out_features=3072, bias=True)\n )\n (output): BertOutput(\n (dense): Linear(in_features=3072, out_features=768, bias=True)\n (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n (dropout): Dropout(p=0.1, inplace=False)\n )\n )\n (10): BertLayer(\n (attention): BertAttention(\n (self): BertSelfAttention(\n (query): Linear(in_features=768, out_features=768, bias=True)\n (key): Linear(in_features=768, out_features=768, bias=True)\n (value): Linear(in_features=768, out_features=768, bias=True)\n (dropout): Dropout(p=0.1, inplace=False)\n )\n (output): BertSelfOutput(\n (dense): Linear(in_features=768, out_features=768, bias=True)\n (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n (dropout): Dropout(p=0.1, inplace=False)\n )\n )\n (intermediate): BertIntermediate(\n (dense): Linear(in_features=768, out_features=3072, bias=True)\n )\n (output): BertOutput(\n (dense): Linear(in_features=3072, out_features=768, bias=True)\n (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n (dropout): Dropout(p=0.1, inplace=False)\n )\n )\n (11): BertLayer(\n (attention): BertAttention(\n (self): BertSelfAttention(\n (query): Linear(in_features=768, out_features=768, bias=True)\n (key): Linear(in_features=768, out_features=768, bias=True)\n (value): Linear(in_features=768, out_features=768, bias=True)\n (dropout): Dropout(p=0.1, inplace=False)\n )\n (output): BertSelfOutput(\n (dense): Linear(in_features=768, out_features=768, bias=True)\n (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n (dropout): Dropout(p=0.1, inplace=False)\n )\n )\n (intermediate): BertIntermediate(\n (dense): Linear(in_features=768, out_features=3072, bias=True)\n )\n (output): BertOutput(\n (dense): Linear(in_features=3072, out_features=768, bias=True)\n (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n (dropout): Dropout(p=0.1, inplace=False)\n )\n )\n )\n )\n (pooler): BertPooler(\n (dense): Linear(in_features=768, out_features=768, bias=True)\n (activation): Tanh()\n )\n )\n (dropout): Dropout(p=0.5, inplace=False)\n (fc1): Linear(in_features=768, out_features=4, bias=True)\n)>\n" ] ], [ [ "## Training", "_____no_output_____" ] ], [ [ "# Arguments\nlr = 1e-4\nnum_epochs = 100\npatience = 10", "_____no_output_____" ], [ "# Define loss\nclass_weights_tensor = torch.Tensor(np.array(list(class_weights.values())))\nloss_fn = nn.BCEWithLogitsLoss(weight=class_weights_tensor)", "_____no_output_____" ], [ "# Define optimizer & scheduler\noptimizer = torch.optim.Adam(model.parameters(), lr=lr)\nscheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(\n optimizer, mode=\"min\", factor=0.1, patience=5)", "_____no_output_____" ], [ "# Trainer module\ntrainer = Trainer(\n model=model, device=device, loss_fn=loss_fn, \n optimizer=optimizer, scheduler=scheduler)", "_____no_output_____" ], [ "# Train\nbest_model = trainer.train(num_epochs, patience, train_dataloader, val_dataloader)", "/usr/local/lib/python3.7/dist-packages/ipykernel_launcher.py:14: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).\n \n/usr/local/lib/python3.7/dist-packages/ipykernel_launcher.py:15: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).\n from ipykernel import kernelapp as app\n/usr/local/lib/python3.7/dist-packages/ipykernel_launcher.py:55: UserWarning: Implicit dimension choice for softmax has been deprecated. Change the call to include dim=X as an argument.\n" ] ], [ [ "## Evaluation", "_____no_output_____" ] ], [ [ "import json\nfrom sklearn.metrics import precision_recall_fscore_support", "_____no_output_____" ], [ "def get_performance(y_true, y_pred, classes):\n \"\"\"Per-class performance metrics.\"\"\"\n # Performance\n performance = {\"overall\": {}, \"class\": {}}\n\n # Overall performance\n metrics = precision_recall_fscore_support(y_true, y_pred, average=\"weighted\")\n performance[\"overall\"][\"precision\"] = metrics[0]\n performance[\"overall\"][\"recall\"] = metrics[1]\n performance[\"overall\"][\"f1\"] = metrics[2]\n performance[\"overall\"][\"num_samples\"] = np.float64(len(y_true))\n\n # Per-class performance\n metrics = precision_recall_fscore_support(y_true, y_pred, average=None)\n for i in range(len(classes)):\n performance[\"class\"][classes[i]] = {\n \"precision\": metrics[0][i],\n \"recall\": metrics[1][i],\n \"f1\": metrics[2][i],\n \"num_samples\": np.float64(metrics[3][i]),\n }\n\n return performance", "_____no_output_____" ], [ "# Get predictions\ntest_loss, y_true, y_prob = trainer.eval_step(dataloader=test_dataloader)\ny_pred = np.argmax(y_prob, axis=1)", "/usr/local/lib/python3.7/dist-packages/ipykernel_launcher.py:14: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).\n \n/usr/local/lib/python3.7/dist-packages/ipykernel_launcher.py:15: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).\n from ipykernel import kernelapp as app\n/usr/local/lib/python3.7/dist-packages/ipykernel_launcher.py:55: UserWarning: Implicit dimension choice for softmax has been deprecated. Change the call to include dim=X as an argument.\n" ], [ "# Determine performance\nperformance = get_performance(\n y_true=np.argmax(y_true, axis=1), y_pred=y_pred, classes=label_encoder.classes)\nprint (json.dumps(performance[\"overall\"], indent=2))", "{\n \"precision\": 0.8085194951783808,\n \"recall\": 0.8086666666666666,\n \"f1\": 0.8083051845125695,\n \"num_samples\": 1500.0\n}\n" ], [ "# Save artifacts\nfrom pathlib import Path\ndir = Path(\"transformers\")\ndir.mkdir(parents=True, exist_ok=True)\nlabel_encoder.save(fp=Path(dir, \"label_encoder.json\"))\ntorch.save(best_model.state_dict(), Path(dir, \"model.pt\"))\nwith open(Path(dir, \"performance.json\"), \"w\") as fp:\n json.dump(performance, indent=2, sort_keys=False, fp=fp)", "_____no_output_____" ] ], [ [ "## Inference", "_____no_output_____" ] ], [ [ "def get_probability_distribution(y_prob, classes):\n \"\"\"Create a dict of class probabilities from an array.\"\"\"\n results = {}\n for i, class_ in enumerate(classes):\n results[class_] = np.float64(y_prob[i])\n sorted_results = {k: v for k, v in sorted(\n results.items(), key=lambda item: item[1], reverse=True)}\n return sorted_results", "_____no_output_____" ], [ "# Load artifacts\ndevice = torch.device(\"cpu\")\ntokenizer = BertTokenizer.from_pretrained(\"allenai/scibert_scivocab_uncased\")\nlabel_encoder = LabelEncoder.load(fp=Path(dir, \"label_encoder.json\"))\ntransformer = BertModel.from_pretrained(\"allenai/scibert_scivocab_uncased\")\nembedding_dim = transformer.config.hidden_size\nmodel = Transformer(\n transformer=transformer, dropout_p=dropout_p,\n embedding_dim=embedding_dim, num_classes=num_classes)\nmodel.load_state_dict(torch.load(Path(dir, \"model.pt\"), map_location=device))\nmodel.to(device);", "_____no_output_____" ], [ "# Initialize trainer\ntrainer = Trainer(model=model, device=device)", "_____no_output_____" ], [ "# Create datasets\ntrain_dataset = TransformerTextDataset(ids=X_train_ids, masks=X_train_masks, targets=y_train)\nval_dataset = TransformerTextDataset(ids=X_val_ids, masks=X_val_masks, targets=y_val)\ntest_dataset = TransformerTextDataset(ids=X_test_ids, masks=X_test_masks, targets=y_test)\nprint (\"Data splits:\\n\"\n f\" Train dataset:{train_dataset.__str__()}\\n\"\n f\" Val dataset: {val_dataset.__str__()}\\n\"\n f\" Test dataset: {test_dataset.__str__()}\\n\"\n \"Sample point:\\n\"\n f\" ids: {train_dataset[0][0]}\\n\"\n f\" masks: {train_dataset[0][1]}\\n\"\n f\" targets: {train_dataset[0][2]}\")", "Data splits:\n Train dataset:<Dataset(N=7000)>\n Val dataset: <Dataset(N=1500)>\n Test dataset: <Dataset(N=1500)>\nSample point:\n ids: tensor([ 102, 6677, 1441, 3982, 17973, 103, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0])\n masks: tensor([1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0])\n targets: tensor([1., 0., 0., 0.], device=\"cpu\")\n" ], [ "# Dataloader\ntext = \"The final tennis tournament starts next week.\"\nX = preprocess(text)\nencoded_input = tokenizer(X, return_tensors=\"pt\", padding=True).to(torch.device(\"cpu\"))\nids = encoded_input[\"input_ids\"]\nmasks = encoded_input[\"attention_mask\"]\ny_filler = label_encoder.encode([label_encoder.classes[0]]*len(ids))\ndataset = TransformerTextDataset(ids=ids, masks=masks, targets=y_filler)\ndataloader = dataset.create_dataloader(batch_size=int(batch_size))", "_____no_output_____" ], [ "# Inference\ny_prob = trainer.predict_step(dataloader)\ny_pred = np.argmax(y_prob, axis=1)\nlabel_encoder.index_to_class[y_pred[0]]", "/usr/local/lib/python3.7/dist-packages/ipykernel_launcher.py:14: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).\n \n/usr/local/lib/python3.7/dist-packages/ipykernel_launcher.py:15: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).\n from ipykernel import kernelapp as app\n/usr/local/lib/python3.7/dist-packages/ipykernel_launcher.py:76: UserWarning: Implicit dimension choice for softmax has been deprecated. Change the call to include dim=X as an argument.\n" ], [ "# Class distributions\nprob_dist = get_probability_distribution(y_prob=y_prob[0], classes=label_encoder.classes)\nprint (json.dumps(prob_dist, indent=2))", "{\n \"Sports\": 0.9999359846115112,\n \"World\": 4.0660612285137177e-05,\n \"Sci/Tech\": 1.1774928680097219e-05,\n \"Business\": 1.1545793313416652e-05\n}\n" ] ], [ [ "## Interpretability", "_____no_output_____" ], [ "Let's visualize the self-attention weights from each of the attention heads in the encoder.", "_____no_output_____" ] ], [ [ "import sys\n!rm -r bertviz_repo\n!test -d bertviz_repo || git clone https://github.com/jessevig/bertviz bertviz_repo\nif not \"bertviz_repo\" in sys.path:\n sys.path += [\"bertviz_repo\"]", "rm: cannot remove 'bertviz_repo': No such file or directory\nCloning into 'bertviz_repo'...\nremote: Enumerating objects: 1416, done.\u001b[K\nremote: Counting objects: 100% (213/213), done.\u001b[K\nremote: Compressing objects: 100% (142/142), done.\u001b[K\nremote: Total 1416 (delta 137), reused 133 (delta 71), pack-reused 1203\u001b[K\nReceiving objects: 100% (1416/1416), 213.85 MiB | 23.27 MiB/s, done.\nResolving deltas: 100% (900/900), done.\n" ], [ "from bertviz import head_view", "_____no_output_____" ], [ "# Print input ids\nprint (ids)\nprint (tokenizer.batch_decode(ids))", "tensor([[ 102, 2531, 3617, 8869, 23589, 4972, 8553, 2205, 4082, 103]],\n device=\"cpu\")\n['[CLS] final tennis tournament starts next week [SEP]']\n" ], [ "# Get encoder attentions\nseq, pool, attn = model.transformer(input_ids=ids, attention_mask=masks, output_attentions=True)\nprint (len(attn)) # 12 attention layers (heads)\nprint (attn[0].shape)", "12\ntorch.Size([1, 12, 10, 10])\n" ], [ "# HTML set up\ndef call_html():\n import IPython\n display(IPython.core.display.HTML('''\n <script src=\"/static/components/requirejs/require.js\"></script>\n <script>\n requirejs.config({\n paths: {\n base: '/static/base',\n \"d3\": \"https://cdnjs.cloudflare.com/ajax/libs/d3/3.5.8/d3.min\",\n jquery: '//ajax.googleapis.com/ajax/libs/jquery/2.0.0/jquery.min',\n },\n });\n </script>\n '''))", "_____no_output_____" ], [ "# Visualize self-attention weights\ncall_html()\ntokens = tokenizer.convert_ids_to_tokens(ids[0])\nhead_view(attention=attn, tokens=tokens)", "_____no_output_____" ] ], [ [ "> Now you're ready to start the [MLOps lessons](https://madewithml.com/#mlops) to learn how to apply all this foundational modeling knowledge to responsibly deliver value.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ] ]
cb6394c4416e1730f6dc36a9c2564b063fbcb313
176,730
ipynb
Jupyter Notebook
sagemaker-deployment/Project/solution/SageMaker Project.ipynb
naderabdalghani/udacity-deep-learning-nanodegree
d049bfe07a89afa64c84dc99a044fb7b71a31f4f
[ "MIT" ]
null
null
null
sagemaker-deployment/Project/solution/SageMaker Project.ipynb
naderabdalghani/udacity-deep-learning-nanodegree
d049bfe07a89afa64c84dc99a044fb7b71a31f4f
[ "MIT" ]
null
null
null
sagemaker-deployment/Project/solution/SageMaker Project.ipynb
naderabdalghani/udacity-deep-learning-nanodegree
d049bfe07a89afa64c84dc99a044fb7b71a31f4f
[ "MIT" ]
2
2020-09-16T16:55:36.000Z
2020-09-29T12:56:35.000Z
87.969139
76,621
0.61358
[ [ [ "# Creating a Sentiment Analysis Web App\n## Using PyTorch and SageMaker\n\n_Deep Learning Nanodegree Program | Deployment_\n\n---\n\nNow that we have a basic understanding of how SageMaker works we will try to use it to construct a complete project from end to end. Our goal will be to have a simple web page which a user can use to enter a movie review. The web page will then send the review off to our deployed model which will predict the sentiment of the entered review.\n\n## Instructions\n\nSome template code has already been provided for you, and you will need to implement additional functionality to successfully complete this notebook. You will not need to modify the included code beyond what is requested. Sections that begin with '**TODO**' in the header indicate that you need to complete or implement some portion within them. Instructions will be provided for each section and the specifics of the implementation are marked in the code block with a `# TODO: ...` comment. Please be sure to read the instructions carefully!\n\nIn addition to implementing code, there will be questions for you to answer which relate to the task and your implementation. Each section where you will answer a question is preceded by a '**Question:**' header. Carefully read each question and provide your answer below the '**Answer:**' header by editing the Markdown cell.\n\n> **Note**: Code and Markdown cells can be executed using the **Shift+Enter** keyboard shortcut. In addition, a cell can be edited by typically clicking it (double-click for Markdown cells) or by pressing **Enter** while it is highlighted.\n\n## General Outline\n\nRecall the general outline for SageMaker projects using a notebook instance.\n\n1. Download or otherwise retrieve the data.\n2. Process / Prepare the data.\n3. Upload the processed data to S3.\n4. Train a chosen model.\n5. Test the trained model (typically using a batch transform job).\n6. Deploy the trained model.\n7. Use the deployed model.\n\nFor this project, you will be following the steps in the general outline with some modifications. \n\nFirst, you will not be testing the model in its own step. You will still be testing the model, however, you will do it by deploying your model and then using the deployed model by sending the test data to it. One of the reasons for doing this is so that you can make sure that your deployed model is working correctly before moving forward.\n\nIn addition, you will deploy and use your trained model a second time. In the second iteration you will customize the way that your trained model is deployed by including some of your own code. In addition, your newly deployed model will be used in the sentiment analysis web app.", "_____no_output_____" ], [ "## Step 1: Downloading the data\n\nAs in the XGBoost in SageMaker notebook, we will be using the [IMDb dataset](http://ai.stanford.edu/~amaas/data/sentiment/)\n\n> Maas, Andrew L., et al. [Learning Word Vectors for Sentiment Analysis](http://ai.stanford.edu/~amaas/data/sentiment/). In _Proceedings of the 49th Annual Meeting of the Association for Computational Linguistics: Human Language Technologies_. Association for Computational Linguistics, 2011.", "_____no_output_____" ] ], [ [ "%mkdir ../data\n!wget -O ../data/aclImdb_v1.tar.gz http://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz\n!tar -zxf ../data/aclImdb_v1.tar.gz -C ../data", "mkdir: cannot create directory ‘../data’: File exists\n--2020-09-10 12:02:29-- http://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz\nResolving ai.stanford.edu (ai.stanford.edu)... 171.64.68.10\nConnecting to ai.stanford.edu (ai.stanford.edu)|171.64.68.10|:80... connected.\nHTTP request sent, awaiting response... 200 OK\nLength: 84125825 (80M) [application/x-gzip]\nSaving to: ‘../data/aclImdb_v1.tar.gz’\n\n../data/aclImdb_v1. 100%[===================>] 80.23M 21.3MB/s in 6.3s \n\n2020-09-10 12:02:36 (12.7 MB/s) - ‘../data/aclImdb_v1.tar.gz’ saved [84125825/84125825]\n\n" ] ], [ [ "## Step 2: Preparing and Processing the data\n\nAlso, as in the XGBoost notebook, we will be doing some initial data processing. The first few steps are the same as in the XGBoost example. To begin with, we will read in each of the reviews and combine them into a single input structure. Then, we will split the dataset into a training set and a testing set.", "_____no_output_____" ] ], [ [ "import os\nimport glob\n\ndef read_imdb_data(data_dir='../data/aclImdb'):\n data = {}\n labels = {}\n \n for data_type in ['train', 'test']:\n data[data_type] = {}\n labels[data_type] = {}\n \n for sentiment in ['pos', 'neg']:\n data[data_type][sentiment] = []\n labels[data_type][sentiment] = []\n \n path = os.path.join(data_dir, data_type, sentiment, '*.txt')\n files = glob.glob(path)\n \n for f in files:\n with open(f) as review:\n data[data_type][sentiment].append(review.read())\n # Here we represent a positive review by '1' and a negative review by '0'\n labels[data_type][sentiment].append(1 if sentiment == 'pos' else 0)\n \n assert len(data[data_type][sentiment]) == len(labels[data_type][sentiment]), \\\n \"{}/{} data size does not match labels size\".format(data_type, sentiment)\n \n return data, labels", "_____no_output_____" ], [ "data, labels = read_imdb_data()\nprint(\"IMDB reviews: train = {} pos / {} neg, test = {} pos / {} neg\".format(\n len(data['train']['pos']), len(data['train']['neg']),\n len(data['test']['pos']), len(data['test']['neg'])))", "IMDB reviews: train = 12500 pos / 12500 neg, test = 12500 pos / 12500 neg\n" ] ], [ [ "Now that we've read the raw training and testing data from the downloaded dataset, we will combine the positive and negative reviews and shuffle the resulting records.", "_____no_output_____" ] ], [ [ "from sklearn.utils import shuffle\n\ndef prepare_imdb_data(data, labels):\n \"\"\"Prepare training and test sets from IMDb movie reviews.\"\"\"\n \n #Combine positive and negative reviews and labels\n data_train = data['train']['pos'] + data['train']['neg']\n data_test = data['test']['pos'] + data['test']['neg']\n labels_train = labels['train']['pos'] + labels['train']['neg']\n labels_test = labels['test']['pos'] + labels['test']['neg']\n \n #Shuffle reviews and corresponding labels within training and test sets\n data_train, labels_train = shuffle(data_train, labels_train)\n data_test, labels_test = shuffle(data_test, labels_test)\n \n # Return a unified training data, test data, training labels, test labets\n return data_train, data_test, labels_train, labels_test", "_____no_output_____" ], [ "train_X, test_X, train_y, test_y = prepare_imdb_data(data, labels)\nprint(\"IMDb reviews (combined): train = {}, test = {}\".format(len(train_X), len(test_X)))", "IMDb reviews (combined): train = 25000, test = 25000\n" ] ], [ [ "Now that we have our training and testing sets unified and prepared, we should do a quick check and see an example of the data our model will be trained on. This is generally a good idea as it allows you to see how each of the further processing steps affects the reviews and it also ensures that the data has been loaded correctly.", "_____no_output_____" ] ], [ [ "print(train_X[100])\nprint(len(train_X[100]))\nprint(train_y[100])", "We purchased this series on DVD because of all of the glowing reviews we had seen here. I gave it three stars because there can be little doubt that sometimes the acting, directing and writing are brilliant. In fact they are so brilliant we did not see the propaganda that was being transmitted so smoothly on the series. If one watches it with discernment, one will see the entire litany of the radical right wing beliefs being promulgated by the Fox (Faux) News Network. To avoid giving away any spoilers I will refrain from pointing out all of the dozens of specific instances. A brief look at the plots found here on IMDb will disclose that everything from torture to gun control to the right of a network to provide \"Infomercials\" and call them news is justified with cute plot twists and impassioned speeches given by some of the best actors in the world. We watched many shows and finally gave up in disgust when they justified torture using Attorney General Gonzales as a shining example of why all kinds of torture should be used in the name of protecting all of us. The series also manages to demean male and female gays in subtle ways by using them as plot devices depicting evil people. All in all the complete litany of the radical religious right wing.<br /><br />No doubt the popularity of this program will be used by future historians as proof that America lost its way in the early part of the this century. As a student of history myself I would characterize this program as being in a league with the propaganda produced by Goebbels for Hitler and some of the propaganda produced by Hollywood for the American audience during WWII.<br /><br />So if you want to use this as a teaching tool to help your students understand how subtle propaganda can be then by all means do so. Just be sure to purchase an inexpensive used copy so you can avoid enriching the ultra right wingers at Faux Network who produced this travesty.\n1940\n0\n" ] ], [ [ "The first step in processing the reviews is to make sure that any html tags that appear should be removed. In addition we wish to tokenize our input, that way words such as *entertained* and *entertaining* are considered the same with regard to sentiment analysis.", "_____no_output_____" ] ], [ [ "import nltk\nfrom nltk.corpus import stopwords\nfrom nltk.stem.porter import *\n\nimport re\nfrom bs4 import BeautifulSoup\n\ndef review_to_words(review):\n nltk.download(\"stopwords\", quiet=True)\n stemmer = PorterStemmer()\n \n text = BeautifulSoup(review, \"html.parser\").get_text() # Remove HTML tags\n text = re.sub(r\"[^a-zA-Z0-9]\", \" \", text.lower()) # Convert to lower case\n words = text.split() # Split string into words\n words = [w for w in words if w not in stopwords.words(\"english\")] # Remove stopwords\n words = [PorterStemmer().stem(w) for w in words] # stem\n \n return words", "_____no_output_____" ] ], [ [ "The `review_to_words` method defined above uses `BeautifulSoup` to remove any html tags that appear and uses the `nltk` package to tokenize the reviews. As a check to ensure we know how everything is working, try applying `review_to_words` to one of the reviews in the training set.", "_____no_output_____" ] ], [ [ "# TODO: Apply review_to_words to a review (train_X[100] or any other review)\nreview_to_words(train_X[100])", "_____no_output_____" ] ], [ [ "**Question:** Above we mentioned that `review_to_words` method removes html formatting and allows us to tokenize the words found in a review, for example, converting *entertained* and *entertaining* into *entertain* so that they are treated as though they are the same word. What else, if anything, does this method do to the input?", "_____no_output_____" ], [ "**Answer:** Beside removing HTML formatting and word tokenization (stemming), it removes punctuation marks, converts all letters to lowercase and removes English stopwords (e.g. and, the, a, etc.)", "_____no_output_____" ], [ "The method below applies the `review_to_words` method to each of the reviews in the training and testing datasets. In addition it caches the results. This is because performing this processing step can take a long time. This way if you are unable to complete the notebook in the current session, you can come back without needing to process the data a second time.", "_____no_output_____" ] ], [ [ "import pickle\n\ncache_dir = os.path.join(\"../cache\", \"sentiment_analysis\") # where to store cache files\nos.makedirs(cache_dir, exist_ok=True) # ensure cache directory exists\n\ndef preprocess_data(data_train, data_test, labels_train, labels_test,\n cache_dir=cache_dir, cache_file=\"preprocessed_data.pkl\"):\n \"\"\"Convert each review to words; read from cache if available.\"\"\"\n\n # If cache_file is not None, try to read from it first\n cache_data = None\n if cache_file is not None:\n try:\n with open(os.path.join(cache_dir, cache_file), \"rb\") as f:\n cache_data = pickle.load(f)\n print(\"Read preprocessed data from cache file:\", cache_file)\n except:\n pass # unable to read from cache, but that's okay\n \n # If cache is missing, then do the heavy lifting\n if cache_data is None:\n # Preprocess training and test data to obtain words for each review\n #words_train = list(map(review_to_words, data_train))\n #words_test = list(map(review_to_words, data_test))\n words_train = [review_to_words(review) for review in data_train]\n words_test = [review_to_words(review) for review in data_test]\n \n # Write to cache file for future runs\n if cache_file is not None:\n cache_data = dict(words_train=words_train, words_test=words_test,\n labels_train=labels_train, labels_test=labels_test)\n with open(os.path.join(cache_dir, cache_file), \"wb\") as f:\n pickle.dump(cache_data, f)\n print(\"Wrote preprocessed data to cache file:\", cache_file)\n else:\n # Unpack data loaded from cache file\n words_train, words_test, labels_train, labels_test = (cache_data['words_train'],\n cache_data['words_test'], cache_data['labels_train'], cache_data['labels_test'])\n \n return words_train, words_test, labels_train, labels_test", "_____no_output_____" ], [ "# Preprocess data\ntrain_X, test_X, train_y, test_y = preprocess_data(train_X, test_X, train_y, test_y)", "Read preprocessed data from cache file: preprocessed_data.pkl\n" ] ], [ [ "## Transform the data\n\nIn the XGBoost notebook we transformed the data from its word representation to a bag-of-words feature representation. For the model we are going to construct in this notebook we will construct a feature representation which is very similar. To start, we will represent each word as an integer. Of course, some of the words that appear in the reviews occur very infrequently and so likely don't contain much information for the purposes of sentiment analysis. The way we will deal with this problem is that we will fix the size of our working vocabulary and we will only include the words that appear most frequently. We will then combine all of the infrequent words into a single category and, in our case, we will label it as `1`.\n\nSince we will be using a recurrent neural network, it will be convenient if the length of each review is the same. To do this, we will fix a size for our reviews and then pad short reviews with the category 'no word' (which we will label `0`) and truncate long reviews.", "_____no_output_____" ], [ "### (TODO) Create a word dictionary\n\nTo begin with, we need to construct a way to map words that appear in the reviews to integers. Here we fix the size of our vocabulary (including the 'no word' and 'infrequent' categories) to be `5000` but you may wish to change this to see how it affects the model.\n\n> **TODO:** Complete the implementation for the `build_dict()` method below. Note that even though the vocab_size is set to `5000`, we only want to construct a mapping for the most frequently appearing `4998` words. This is because we want to reserve the special labels `0` for 'no word' and `1` for 'infrequent word'.", "_____no_output_____" ] ], [ [ "import numpy as np\nfrom collections import Counter\n\ndef build_dict(data, vocab_size = 5000):\n \"\"\"Construct and return a dictionary mapping each of the most frequently appearing words to a unique integer.\"\"\"\n \n # TODO: Determine how often each word appears in `data`. Note that `data` is a list of sentences and that a\n # sentence is a list of words.\n words = [j for i in data for j in i]\n word_count = {} # A dict storing the words that appear in the reviews along with how often they occur\n word_count = Counter(words)\n \n # TODO: Sort the words found in `data` so that sorted_words[0] is the most frequently appearing word and\n # sorted_words[-1] is the least frequently appearing word.\n \n sorted_words = sorted(word_count, key=word_count.get, reverse=True)\n word_dict = {} # This is what we are building, a dictionary that translates words into integers\n for idx, word in enumerate(sorted_words[:vocab_size - 2]): # The -2 is so that we save room for the 'no word'\n word_dict[word] = idx + 2 # 'infrequent' labels\n \n return word_dict", "_____no_output_____" ], [ "word_dict = build_dict(train_X)", "_____no_output_____" ] ], [ [ "**Question:** What are the five most frequently appearing (tokenized) words in the training set? Does it makes sense that these words appear frequently in the training set?", "_____no_output_____" ], [ "**Answer:** The five most frequently appearing words in the training set are: 'movi', 'film', 'one', 'like' and 'time'. Since the reviews are all about movies, this makes total sense", "_____no_output_____" ] ], [ [ "# TODO: Use this space to determine the five most frequently appearing words in the training set.\nprint(word_dict)", "{'movi': 2, 'film': 3, 'one': 4, 'like': 5, 'time': 6, 'good': 7, 'make': 8, 'charact': 9, 'get': 10, 'see': 11, 'watch': 12, 'stori': 13, 'even': 14, 'would': 15, 'realli': 16, 'well': 17, 'scene': 18, 'look': 19, 'show': 20, 'much': 21, 'end': 22, 'peopl': 23, 'bad': 24, 'go': 25, 'great': 26, 'also': 27, 'first': 28, 'love': 29, 'think': 30, 'way': 31, 'act': 32, 'play': 33, 'made': 34, 'thing': 35, 'could': 36, 'know': 37, 'say': 38, 'seem': 39, 'work': 40, 'plot': 41, 'two': 42, 'actor': 43, 'year': 44, 'come': 45, 'mani': 46, 'seen': 47, 'take': 48, 'life': 49, 'want': 50, 'never': 51, 'littl': 52, 'best': 53, 'tri': 54, 'man': 55, 'ever': 56, 'give': 57, 'better': 58, 'still': 59, 'perform': 60, 'find': 61, 'feel': 62, 'part': 63, 'back': 64, 'use': 65, 'someth': 66, 'director': 67, 'actual': 68, 'interest': 69, 'lot': 70, 'real': 71, 'old': 72, 'cast': 73, 'though': 74, 'live': 75, 'star': 76, 'enjoy': 77, 'guy': 78, 'anoth': 79, 'new': 80, 'role': 81, 'noth': 82, '10': 83, 'funni': 84, 'music': 85, 'point': 86, 'start': 87, 'set': 88, 'girl': 89, 'origin': 90, 'day': 91, 'world': 92, 'everi': 93, 'believ': 94, 'turn': 95, 'quit': 96, 'us': 97, 'direct': 98, 'thought': 99, 'fact': 100, 'minut': 101, 'horror': 102, 'kill': 103, 'action': 104, 'comedi': 105, 'pretti': 106, 'young': 107, 'wonder': 108, 'happen': 109, 'around': 110, 'got': 111, 'effect': 112, 'right': 113, 'long': 114, 'howev': 115, 'big': 116, 'line': 117, 'famili': 118, 'enough': 119, 'seri': 120, 'may': 121, 'need': 122, 'fan': 123, 'bit': 124, 'script': 125, 'beauti': 126, 'person': 127, 'becom': 128, 'without': 129, 'must': 130, 'alway': 131, 'friend': 132, 'tell': 133, 'reason': 134, 'saw': 135, 'last': 136, 'final': 137, 'kid': 138, 'almost': 139, 'put': 140, 'least': 141, 'sure': 142, 'done': 143, 'whole': 144, 'place': 145, 'complet': 146, 'kind': 147, 'differ': 148, 'expect': 149, 'shot': 150, 'far': 151, 'mean': 152, 'anyth': 153, 'book': 154, 'laugh': 155, 'might': 156, 'name': 157, 'sinc': 158, 'begin': 159, '2': 160, 'probabl': 161, 'woman': 162, 'help': 163, 'entertain': 164, 'let': 165, 'screen': 166, 'call': 167, 'tv': 168, 'moment': 169, 'away': 170, 'read': 171, 'yet': 172, 'rather': 173, 'worst': 174, 'run': 175, 'fun': 176, 'lead': 177, 'hard': 178, 'audienc': 179, 'idea': 180, 'anyon': 181, 'episod': 182, 'american': 183, 'found': 184, 'appear': 185, 'bore': 186, 'especi': 187, 'although': 188, 'hope': 189, 'keep': 190, 'cours': 191, 'anim': 192, 'job': 193, 'goe': 194, 'move': 195, 'sens': 196, 'version': 197, 'dvd': 198, 'war': 199, 'money': 200, 'someon': 201, 'mind': 202, 'mayb': 203, 'problem': 204, 'true': 205, 'hous': 206, 'everyth': 207, 'nice': 208, 'second': 209, 'rate': 210, 'three': 211, 'night': 212, 'follow': 213, 'face': 214, 'recommend': 215, 'product': 216, 'main': 217, 'worth': 218, 'leav': 219, 'human': 220, 'special': 221, 'excel': 222, 'togeth': 223, 'wast': 224, 'everyon': 225, 'sound': 226, 'john': 227, 'hand': 228, '1': 229, 'father': 230, 'later': 231, 'eye': 232, 'said': 233, 'view': 234, 'instead': 235, 'review': 236, 'boy': 237, 'high': 238, 'hour': 239, 'miss': 240, 'talk': 241, 'classic': 242, 'wife': 243, 'understand': 244, 'left': 245, 'care': 246, 'black': 247, 'death': 248, 'open': 249, 'murder': 250, 'write': 251, 'half': 252, 'head': 253, 'rememb': 254, 'chang': 255, 'viewer': 256, 'fight': 257, 'gener': 258, 'surpris': 259, 'includ': 260, 'short': 261, 'die': 262, 'fall': 263, 'less': 264, 'els': 265, 'entir': 266, 'piec': 267, 'involv': 268, 'pictur': 269, 'simpli': 270, 'top': 271, 'power': 272, 'home': 273, 'total': 274, 'usual': 275, 'budget': 276, 'attempt': 277, 'suppos': 278, 'releas': 279, 'hollywood': 280, 'terribl': 281, 'song': 282, 'men': 283, 'possibl': 284, 'featur': 285, 'portray': 286, 'disappoint': 287, 'poor': 288, '3': 289, 'coupl': 290, 'stupid': 291, 'camera': 292, 'dead': 293, 'wrong': 294, 'produc': 295, 'low': 296, 'either': 297, 'video': 298, 'aw': 299, 'definit': 300, 'except': 301, 'rest': 302, 'given': 303, 'absolut': 304, 'women': 305, 'lack': 306, 'word': 307, 'writer': 308, 'titl': 309, 'talent': 310, 'decid': 311, 'full': 312, 'perfect': 313, 'along': 314, 'style': 315, 'close': 316, 'truli': 317, 'school': 318, 'emot': 319, 'save': 320, 'sex': 321, 'age': 322, 'next': 323, 'bring': 324, 'mr': 325, 'case': 326, 'killer': 327, 'heart': 328, 'comment': 329, 'sort': 330, 'creat': 331, 'perhap': 332, 'came': 333, 'brother': 334, 'sever': 335, 'joke': 336, 'art': 337, 'dialogu': 338, 'game': 339, 'small': 340, 'base': 341, 'flick': 342, 'written': 343, 'sequenc': 344, 'meet': 345, 'earli': 346, 'often': 347, 'other': 348, 'mother': 349, 'develop': 350, 'humor': 351, 'actress': 352, 'consid': 353, 'dark': 354, 'guess': 355, 'amaz': 356, 'unfortun': 357, 'lost': 358, 'light': 359, 'exampl': 360, 'cinema': 361, 'drama': 362, 'ye': 363, 'white': 364, 'experi': 365, 'imagin': 366, 'mention': 367, 'stop': 368, 'natur': 369, 'forc': 370, 'manag': 371, 'felt': 372, 'cut': 373, 'present': 374, 'children': 375, 'fail': 376, 'son': 377, 'qualiti': 378, 'support': 379, 'car': 380, 'ask': 381, 'hit': 382, 'side': 383, 'voic': 384, 'extrem': 385, 'impress': 386, 'wors': 387, 'evil': 388, 'went': 389, 'stand': 390, 'certainli': 391, 'basic': 392, 'oh': 393, 'overal': 394, 'favorit': 395, 'horribl': 396, 'mysteri': 397, 'number': 398, 'type': 399, 'danc': 400, 'wait': 401, 'hero': 402, 'alreadi': 403, '5': 404, 'learn': 405, 'matter': 406, '4': 407, 'michael': 408, 'genr': 409, 'fine': 410, 'despit': 411, 'throughout': 412, 'walk': 413, 'success': 414, 'histori': 415, 'question': 416, 'zombi': 417, 'town': 418, 'realiz': 419, 'relationship': 420, 'child': 421, 'past': 422, 'daughter': 423, 'late': 424, 'b': 425, 'wish': 426, 'credit': 427, 'hate': 428, 'event': 429, 'theme': 430, 'touch': 431, 'citi': 432, 'today': 433, 'sometim': 434, 'behind': 435, 'god': 436, 'twist': 437, 'sit': 438, 'deal': 439, 'annoy': 440, 'stay': 441, 'abl': 442, 'rent': 443, 'pleas': 444, 'edit': 445, 'blood': 446, 'deserv': 447, 'comic': 448, 'anyway': 449, 'appar': 450, 'soon': 451, 'gave': 452, 'etc': 453, 'level': 454, 'slow': 455, 'chanc': 456, 'score': 457, 'bodi': 458, 'brilliant': 459, 'incred': 460, 'figur': 461, 'situat': 462, 'self': 463, 'major': 464, 'stuff': 465, 'decent': 466, 'element': 467, 'dream': 468, 'return': 469, 'obvious': 470, 'continu': 471, 'order': 472, 'pace': 473, 'ridicul': 474, 'happi': 475, 'group': 476, 'add': 477, 'highli': 478, 'thank': 479, 'ladi': 480, 'novel': 481, 'pain': 482, 'speak': 483, 'career': 484, 'shoot': 485, 'strang': 486, 'heard': 487, 'sad': 488, 'polic': 489, 'husband': 490, 'import': 491, 'break': 492, 'took': 493, 'cannot': 494, 'strong': 495, 'robert': 496, 'predict': 497, 'violenc': 498, 'hilari': 499, 'recent': 500, 'countri': 501, 'known': 502, 'particularli': 503, 'pick': 504, 'documentari': 505, 'season': 506, 'critic': 507, 'jame': 508, 'compar': 509, 'alon': 510, 'obviou': 511, 'told': 512, 'state': 513, 'visual': 514, 'rock': 515, 'offer': 516, 'exist': 517, 'theater': 518, 'opinion': 519, 'gore': 520, 'crap': 521, 'hold': 522, 'result': 523, 'room': 524, 'realiti': 525, 'hear': 526, 'effort': 527, 'clich': 528, 'thriller': 529, 'caus': 530, 'sequel': 531, 'explain': 532, 'serious': 533, 'king': 534, 'local': 535, 'ago': 536, 'hell': 537, 'none': 538, 'note': 539, 'allow': 540, 'david': 541, 'sister': 542, 'simpl': 543, 'femal': 544, 'deliv': 545, 'ok': 546, 'class': 547, 'convinc': 548, 'check': 549, 'suspens': 550, 'win': 551, 'buy': 552, 'oscar': 553, 'huge': 554, 'valu': 555, 'sexual': 556, 'scari': 557, 'cool': 558, 'similar': 559, 'excit': 560, 'provid': 561, 'apart': 562, 'exactli': 563, 'shown': 564, 'avoid': 565, 'seriou': 566, 'english': 567, 'taken': 568, 'whose': 569, 'cinematographi': 570, 'shock': 571, 'polit': 572, 'spoiler': 573, 'offic': 574, 'across': 575, 'middl': 576, 'street': 577, 'pass': 578, 'messag': 579, 'somewhat': 580, 'silli': 581, 'charm': 582, 'modern': 583, 'filmmak': 584, 'confus': 585, 'form': 586, 'tale': 587, 'singl': 588, 'jack': 589, 'mostli': 590, 'attent': 591, 'william': 592, 'carri': 593, 'sing': 594, 'subject': 595, 'five': 596, 'richard': 597, 'prove': 598, 'stage': 599, 'team': 600, 'unlik': 601, 'cop': 602, 'georg': 603, 'monster': 604, 'televis': 605, 'earth': 606, 'cover': 607, 'villain': 608, 'pay': 609, 'marri': 610, 'toward': 611, 'build': 612, 'pull': 613, 'parent': 614, 'due': 615, 'fill': 616, 'respect': 617, 'four': 618, 'dialog': 619, 'remind': 620, 'futur': 621, 'weak': 622, 'typic': 623, '7': 624, 'cheap': 625, 'intellig': 626, 'atmospher': 627, 'british': 628, 'clearli': 629, '80': 630, 'non': 631, 'dog': 632, 'paul': 633, 'fast': 634, '8': 635, 'artist': 636, 'knew': 637, 'crime': 638, 'easili': 639, 'escap': 640, 'doubt': 641, 'adult': 642, 'detail': 643, 'date': 644, 'member': 645, 'fire': 646, 'romant': 647, 'drive': 648, 'gun': 649, 'straight': 650, 'fit': 651, 'beyond': 652, 'attack': 653, 'imag': 654, 'upon': 655, 'posit': 656, 'whether': 657, 'peter': 658, 'fantast': 659, 'aspect': 660, 'captur': 661, 'appreci': 662, 'ten': 663, 'plan': 664, 'discov': 665, 'remain': 666, 'near': 667, 'period': 668, 'realist': 669, 'air': 670, 'mark': 671, 'red': 672, 'dull': 673, 'adapt': 674, 'within': 675, 'lose': 676, 'spend': 677, 'color': 678, 'materi': 679, 'chase': 680, 'mari': 681, 'storylin': 682, 'forget': 683, 'bunch': 684, 'clear': 685, 'lee': 686, 'victim': 687, 'nearli': 688, 'box': 689, 'york': 690, 'inspir': 691, 'match': 692, 'mess': 693, 'finish': 694, 'standard': 695, 'easi': 696, 'truth': 697, 'suffer': 698, 'busi': 699, 'dramat': 700, 'bill': 701, 'space': 702, 'western': 703, 'e': 704, 'list': 705, 'battl': 706, 'notic': 707, 'de': 708, 'french': 709, 'ad': 710, '9': 711, 'tom': 712, 'larg': 713, 'among': 714, 'eventu': 715, 'accept': 716, 'train': 717, 'agre': 718, 'soundtrack': 719, 'spirit': 720, 'third': 721, 'teenag': 722, 'soldier': 723, 'adventur': 724, 'drug': 725, 'suggest': 726, 'sorri': 727, 'famou': 728, 'normal': 729, 'cri': 730, 'babi': 731, 'ultim': 732, 'troubl': 733, 'contain': 734, 'certain': 735, 'cultur': 736, 'romanc': 737, 'rare': 738, 'lame': 739, 'somehow': 740, 'mix': 741, 'disney': 742, 'gone': 743, 'cartoon': 744, 'student': 745, 'reveal': 746, 'fear': 747, 'kept': 748, 'suck': 749, 'attract': 750, 'appeal': 751, 'premis': 752, 'greatest': 753, 'secret': 754, 'design': 755, 'shame': 756, 'throw': 757, 'copi': 758, 'scare': 759, 'wit': 760, 'admit': 761, 'america': 762, 'relat': 763, 'brought': 764, 'particular': 765, 'screenplay': 766, 'whatev': 767, 'pure': 768, '70': 769, 'averag': 770, 'harri': 771, 'master': 772, 'describ': 773, 'treat': 774, 'male': 775, '20': 776, 'fantasi': 777, 'issu': 778, 'warn': 779, 'inde': 780, 'forward': 781, 'background': 782, 'project': 783, 'free': 784, 'memor': 785, 'japanes': 786, 'poorli': 787, 'award': 788, 'locat': 789, 'amus': 790, 'potenti': 791, 'struggl': 792, 'magic': 793, 'weird': 794, 'societi': 795, 'okay': 796, 'doctor': 797, 'accent': 798, 'imdb': 799, 'hot': 800, 'water': 801, 'dr': 802, 'alien': 803, 'express': 804, '30': 805, 'odd': 806, 'crazi': 807, 'choic': 808, 'fiction': 809, 'studio': 810, 'becam': 811, 'control': 812, 'masterpiec': 813, 'difficult': 814, 'fli': 815, 'joe': 816, 'scream': 817, 'costum': 818, 'lover': 819, 'uniqu': 820, 'refer': 821, 'remak': 822, 'girlfriend': 823, 'vampir': 824, 'prison': 825, 'execut': 826, 'wear': 827, 'jump': 828, 'wood': 829, 'unless': 830, 'creepi': 831, 'cheesi': 832, 'superb': 833, 'otherwis': 834, 'parti': 835, 'ghost': 836, 'roll': 837, 'public': 838, 'mad': 839, 'depict': 840, 'earlier': 841, 'badli': 842, 'moral': 843, 'week': 844, 'jane': 845, 'fi': 846, 'dumb': 847, 'grow': 848, 'flaw': 849, 'sci': 850, 'deep': 851, 'maker': 852, 'cat': 853, 'footag': 854, 'connect': 855, 'older': 856, 'plenti': 857, 'bother': 858, 'outsid': 859, 'stick': 860, 'gay': 861, 'catch': 862, 'co': 863, 'plu': 864, 'popular': 865, 'equal': 866, 'social': 867, 'disturb': 868, 'quickli': 869, 'perfectli': 870, 'dress': 871, '90': 872, 'era': 873, 'mistak': 874, 'lie': 875, 'previou': 876, 'ride': 877, 'combin': 878, 'concept': 879, 'band': 880, 'surviv': 881, 'answer': 882, 'rich': 883, 'front': 884, 'christma': 885, 'sweet': 886, 'insid': 887, 'bare': 888, 'eat': 889, 'concern': 890, 'ben': 891, 'beat': 892, 'listen': 893, 'c': 894, 'serv': 895, 'term': 896, 'la': 897, 'german': 898, 'meant': 899, 'hardli': 900, 'stereotyp': 901, 'law': 902, 'innoc': 903, 'desper': 904, 'promis': 905, 'memori': 906, 'intent': 907, 'cute': 908, 'variou': 909, 'inform': 910, 'steal': 911, 'brain': 912, 'post': 913, 'tone': 914, 'island': 915, 'amount': 916, 'nuditi': 917, 'compani': 918, 'track': 919, 'claim': 920, 'store': 921, 'flat': 922, 'hair': 923, '50': 924, 'univers': 925, 'land': 926, 'kick': 927, 'fairli': 928, 'danger': 929, 'scott': 930, 'player': 931, 'plain': 932, 'step': 933, 'crew': 934, 'toni': 935, 'share': 936, 'tast': 937, 'centuri': 938, 'engag': 939, 'achiev': 940, 'cold': 941, 'travel': 942, 'record': 943, 'suit': 944, 'rip': 945, 'manner': 946, 'sadli': 947, 'wrote': 948, 'tension': 949, 'spot': 950, 'fascin': 951, 'intens': 952, 'familiar': 953, 'remark': 954, 'depth': 955, 'burn': 956, 'histor': 957, 'destroy': 958, 'sleep': 959, 'purpos': 960, 'languag': 961, 'ignor': 962, 'ruin': 963, 'delight': 964, 'italian': 965, 'unbeliev': 966, 'collect': 967, 'soul': 968, 'abil': 969, 'clever': 970, 'detect': 971, 'violent': 972, 'rape': 973, 'reach': 974, 'door': 975, 'scienc': 976, 'trash': 977, 'liter': 978, 'caught': 979, 'commun': 980, 'reveng': 981, 'creatur': 982, 'trip': 983, 'approach': 984, 'fashion': 985, 'intrigu': 986, 'skill': 987, 'paint': 988, 'introduc': 989, 'complex': 990, 'channel': 991, 'camp': 992, 'christian': 993, 'hole': 994, 'extra': 995, 'mental': 996, 'ann': 997, 'limit': 998, 'immedi': 999, '6': 1000, 'comput': 1001, 'million': 1002, 'slightli': 1003, 'mere': 1004, 'conclus': 1005, 'slasher': 1006, 'imposs': 1007, 'suddenli': 1008, 'neither': 1009, 'teen': 1010, 'crimin': 1011, 'nation': 1012, 'physic': 1013, 'spent': 1014, 'respons': 1015, 'planet': 1016, 'fake': 1017, 'receiv': 1018, 'blue': 1019, 'sick': 1020, 'bizarr': 1021, 'embarrass': 1022, 'indian': 1023, 'ring': 1024, '15': 1025, 'pop': 1026, 'drop': 1027, 'drag': 1028, 'haunt': 1029, 'suspect': 1030, 'pointless': 1031, 'edg': 1032, 'search': 1033, 'handl': 1034, 'common': 1035, 'biggest': 1036, 'arriv': 1037, 'faith': 1038, 'hurt': 1039, 'technic': 1040, 'angel': 1041, 'genuin': 1042, 'dad': 1043, 'solid': 1044, 'f': 1045, 'awesom': 1046, 'focu': 1047, 'colleg': 1048, 'van': 1049, 'former': 1050, 'count': 1051, 'tear': 1052, 'heavi': 1053, 'wall': 1054, 'rais': 1055, 'visit': 1056, 'younger': 1057, 'laughabl': 1058, 'sign': 1059, 'excus': 1060, 'fair': 1061, 'cult': 1062, 'key': 1063, 'tough': 1064, 'motion': 1065, 'super': 1066, 'desir': 1067, 'addit': 1068, 'stun': 1069, 'exploit': 1070, 'cloth': 1071, 'smith': 1072, 'tortur': 1073, 'race': 1074, 'davi': 1075, 'cross': 1076, 'author': 1077, 'jim': 1078, 'minor': 1079, 'consist': 1080, 'compel': 1081, 'focus': 1082, 'chemistri': 1083, 'commit': 1084, 'pathet': 1085, 'park': 1086, 'obsess': 1087, 'tradit': 1088, 'frank': 1089, 'grade': 1090, 'asid': 1091, '60': 1092, 'brutal': 1093, 'steve': 1094, 'somewher': 1095, 'depress': 1096, 'rule': 1097, 'opportun': 1098, 'grant': 1099, 'u': 1100, 'explor': 1101, 'honest': 1102, 'besid': 1103, 'anti': 1104, 'dub': 1105, 'intend': 1106, 'trailer': 1107, 'bar': 1108, 'regard': 1109, 'west': 1110, 'longer': 1111, 'scientist': 1112, 'decad': 1113, 'judg': 1114, 'silent': 1115, 'armi': 1116, 'creativ': 1117, 'wild': 1118, 'g': 1119, 'south': 1120, 'stewart': 1121, 'draw': 1122, 'road': 1123, 'govern': 1124, 'ex': 1125, 'boss': 1126, 'practic': 1127, 'club': 1128, 'festiv': 1129, 'motiv': 1130, 'gang': 1131, 'surprisingli': 1132, 'redeem': 1133, 'green': 1134, 'page': 1135, 'london': 1136, 'machin': 1137, 'display': 1138, 'idiot': 1139, 'aliv': 1140, 'militari': 1141, 'thrill': 1142, 'repeat': 1143, 'nobodi': 1144, 'yeah': 1145, '100': 1146, 'folk': 1147, '40': 1148, 'garbag': 1149, 'journey': 1150, 'smile': 1151, 'ground': 1152, 'tire': 1153, 'mood': 1154, 'bought': 1155, 'cost': 1156, 'sam': 1157, 'stone': 1158, 'mouth': 1159, 'noir': 1160, 'terrif': 1161, 'agent': 1162, 'requir': 1163, 'utterli': 1164, 'sexi': 1165, 'honestli': 1166, 'area': 1167, 'report': 1168, 'geniu': 1169, 'enter': 1170, 'glad': 1171, 'humour': 1172, 'investig': 1173, 'serial': 1174, 'occasion': 1175, 'passion': 1176, 'narr': 1177, 'marriag': 1178, 'climax': 1179, 'studi': 1180, 'industri': 1181, 'ship': 1182, 'center': 1183, 'demon': 1184, 'charli': 1185, 'nowher': 1186, 'hors': 1187, 'bear': 1188, 'loos': 1189, 'wow': 1190, 'hang': 1191, 'graphic': 1192, 'admir': 1193, 'giant': 1194, 'send': 1195, 'damn': 1196, 'loud': 1197, 'profession': 1198, 'subtl': 1199, 'rel': 1200, 'nake': 1201, 'blow': 1202, 'bottom': 1203, 'insult': 1204, 'batman': 1205, 'kelli': 1206, 'r': 1207, 'doubl': 1208, 'boyfriend': 1209, 'initi': 1210, 'frame': 1211, 'gem': 1212, 'opera': 1213, 'affect': 1214, 'challeng': 1215, 'drawn': 1216, 'cinemat': 1217, 'church': 1218, 'evid': 1219, 'nightmar': 1220, 'j': 1221, 'seek': 1222, 'fulli': 1223, 'l': 1224, 'arm': 1225, 'conflict': 1226, 'essenti': 1227, 'wind': 1228, 'henri': 1229, 'christoph': 1230, 'grace': 1231, 'assum': 1232, 'narrat': 1233, 'witch': 1234, 'push': 1235, 'hunt': 1236, 'wise': 1237, 'chri': 1238, 'repres': 1239, 'month': 1240, 'nomin': 1241, 'avail': 1242, 'sceneri': 1243, 'affair': 1244, 'hide': 1245, 'smart': 1246, 'justic': 1247, 'thu': 1248, 'bond': 1249, 'interview': 1250, 'flashback': 1251, 'outstand': 1252, 'constantli': 1253, 'presenc': 1254, 'satisfi': 1255, 'central': 1256, 'bed': 1257, 'iron': 1258, 'sell': 1259, 'content': 1260, 'everybodi': 1261, 'gag': 1262, 'slowli': 1263, 'hotel': 1264, 'hire': 1265, 'system': 1266, 'adam': 1267, 'individu': 1268, 'charl': 1269, 'thrown': 1270, 'hey': 1271, 'allen': 1272, 'mediocr': 1273, 'jone': 1274, 'lesson': 1275, 'billi': 1276, 'ray': 1277, 'cameo': 1278, 'photographi': 1279, 'fellow': 1280, 'pari': 1281, 'strike': 1282, 'rise': 1283, 'absurd': 1284, 'brief': 1285, 'independ': 1286, 'neg': 1287, 'impact': 1288, 'phone': 1289, 'model': 1290, 'born': 1291, 'ill': 1292, 'spoil': 1293, 'angl': 1294, 'fresh': 1295, 'likabl': 1296, 'abus': 1297, 'discuss': 1298, 'hill': 1299, 'ahead': 1300, 'sight': 1301, 'photograph': 1302, 'sent': 1303, 'logic': 1304, 'occur': 1305, 'blame': 1306, 'shine': 1307, 'mainli': 1308, 'bruce': 1309, 'forev': 1310, 'commerci': 1311, 'skip': 1312, 'held': 1313, 'surround': 1314, 'segment': 1315, 'teacher': 1316, 'blond': 1317, 'zero': 1318, 'trap': 1319, 'satir': 1320, 'summer': 1321, 'resembl': 1322, 'queen': 1323, 'six': 1324, 'ball': 1325, 'fool': 1326, 'twice': 1327, 'sub': 1328, 'tragedi': 1329, 'reaction': 1330, 'pack': 1331, 'bomb': 1332, 'will': 1333, 'protagonist': 1334, 'hospit': 1335, 'sport': 1336, 'mile': 1337, 'drink': 1338, 'trust': 1339, 'vote': 1340, 'mom': 1341, 'jerri': 1342, 'encount': 1343, 'plane': 1344, 'program': 1345, 'current': 1346, 'station': 1347, 'al': 1348, 'celebr': 1349, 'martin': 1350, 'choos': 1351, 'join': 1352, 'favourit': 1353, 'lord': 1354, 'tragic': 1355, 'round': 1356, 'field': 1357, 'robot': 1358, 'vision': 1359, 'jean': 1360, 'tie': 1361, 'arthur': 1362, 'fortun': 1363, 'random': 1364, 'roger': 1365, 'dread': 1366, 'psycholog': 1367, 'intern': 1368, 'epic': 1369, 'nonsens': 1370, 'prefer': 1371, 'improv': 1372, 'formula': 1373, 'pleasur': 1374, 'legend': 1375, 'highlight': 1376, '11': 1377, 'tape': 1378, 'dollar': 1379, 'porn': 1380, 'wide': 1381, 'object': 1382, 'fox': 1383, 'thin': 1384, 'gorgeou': 1385, 'ugli': 1386, 'buddi': 1387, 'influenc': 1388, 'prepar': 1389, 'nasti': 1390, 'ii': 1391, 'progress': 1392, 'supposedli': 1393, 'reflect': 1394, 'warm': 1395, 'youth': 1396, 'worthi': 1397, 'unusu': 1398, 'length': 1399, 'latter': 1400, 'crash': 1401, 'superior': 1402, 'shop': 1403, 'seven': 1404, 'childhood': 1405, 'theatr': 1406, 'remot': 1407, 'funniest': 1408, 'disgust': 1409, 'pilot': 1410, 'paid': 1411, 'trick': 1412, 'fell': 1413, 'convers': 1414, 'castl': 1415, 'rob': 1416, 'establish': 1417, 'disast': 1418, 'gangster': 1419, 'suicid': 1420, 'disappear': 1421, 'heaven': 1422, 'ident': 1423, 'mine': 1424, 'forgotten': 1425, 'singer': 1426, 'decis': 1427, 'mask': 1428, 'tend': 1429, 'heroin': 1430, 'brian': 1431, 'partner': 1432, 'desert': 1433, 'alan': 1434, 'recogn': 1435, 'p': 1436, 'ms': 1437, 'thoroughli': 1438, 'stuck': 1439, 'sky': 1440, 'replac': 1441, 'accur': 1442, 'market': 1443, 'commentari': 1444, 'seemingli': 1445, 'andi': 1446, 'uncl': 1447, 'clue': 1448, 'eddi': 1449, 'danni': 1450, 'devil': 1451, 'jackson': 1452, 'that': 1453, 'pair': 1454, 'refus': 1455, 'therefor': 1456, 'ed': 1457, 'unit': 1458, 'accid': 1459, 'fault': 1460, 'river': 1461, 'fate': 1462, 'tune': 1463, 'afraid': 1464, 'russian': 1465, 'hidden': 1466, 'clean': 1467, 'stephen': 1468, 'captain': 1469, 'convey': 1470, 'irrit': 1471, 'test': 1472, 'instanc': 1473, 'readi': 1474, 'quick': 1475, 'european': 1476, 'insan': 1477, 'daniel': 1478, 'frustrat': 1479, '1950': 1480, 'food': 1481, 'rescu': 1482, 'chines': 1483, 'wed': 1484, 'dirti': 1485, 'angri': 1486, 'lock': 1487, 'joy': 1488, 'price': 1489, 'steven': 1490, 'bland': 1491, 'cage': 1492, 'anymor': 1493, 'rang': 1494, 'wooden': 1495, 'news': 1496, 'rush': 1497, 'jason': 1498, 'n': 1499, 'twenti': 1500, 'led': 1501, 'martial': 1502, 'board': 1503, '12': 1504, 'worri': 1505, 'transform': 1506, 'cgi': 1507, 'hunter': 1508, 'symbol': 1509, 'piti': 1510, 'onto': 1511, 'invent': 1512, 'x': 1513, 'sentiment': 1514, 'johnni': 1515, 'explan': 1516, 'process': 1517, 'attitud': 1518, 'awar': 1519, 'owner': 1520, 'aim': 1521, 'favor': 1522, 'energi': 1523, 'floor': 1524, 'target': 1525, 'necessari': 1526, 'religi': 1527, 'opposit': 1528, 'chick': 1529, 'insight': 1530, 'blind': 1531, 'window': 1532, 'movement': 1533, 'comparison': 1534, 'research': 1535, 'deepli': 1536, 'mountain': 1537, 'possess': 1538, 'grand': 1539, 'comed': 1540, 'whatsoev': 1541, 'rain': 1542, 'bank': 1543, 'mid': 1544, 'shadow': 1545, 'began': 1546, 'parodi': 1547, 'princ': 1548, 'weapon': 1549, 'credibl': 1550, 'taylor': 1551, 'friendship': 1552, 'pre': 1553, 'flesh': 1554, 'teach': 1555, 'dougla': 1556, 'bloodi': 1557, 'hint': 1558, 'protect': 1559, 'terror': 1560, 'marvel': 1561, 'leader': 1562, 'anybodi': 1563, 'superman': 1564, 'accord': 1565, 'load': 1566, 'watchabl': 1567, 'drunk': 1568, 'brown': 1569, 'freddi': 1570, 'hitler': 1571, 'tim': 1572, 'seat': 1573, 'jeff': 1574, 'appropri': 1575, 'villag': 1576, 'unknown': 1577, 'keaton': 1578, 'charg': 1579, 'knock': 1580, 'media': 1581, 'unnecessari': 1582, 'empti': 1583, 'england': 1584, 'enemi': 1585, 'strength': 1586, 'perspect': 1587, 'craft': 1588, 'utter': 1589, 'dare': 1590, 'buck': 1591, 'wave': 1592, 'nativ': 1593, 'ford': 1594, 'correct': 1595, 'kiss': 1596, 'contrast': 1597, 'nazi': 1598, 'chill': 1599, 'magnific': 1600, 'knowledg': 1601, 'distract': 1602, 'soap': 1603, 'speed': 1604, 'anywher': 1605, 'mission': 1606, 'fred': 1607, 'breath': 1608, '1980': 1609, 'ice': 1610, 'crowd': 1611, 'moon': 1612, 'joan': 1613, 'jr': 1614, 'frighten': 1615, 'soft': 1616, '000': 1617, 'kate': 1618, 'dan': 1619, 'nick': 1620, 'hundr': 1621, 'dick': 1622, 'somebodi': 1623, 'dozen': 1624, 'radio': 1625, 'simon': 1626, 'shakespear': 1627, 'thousand': 1628, 'loss': 1629, 'academi': 1630, 'andrew': 1631, 'account': 1632, 'root': 1633, 'quot': 1634, 'sum': 1635, 'vehicl': 1636, '1970': 1637, 'behavior': 1638, 'convent': 1639, 'leg': 1640, 'regular': 1641, 'gold': 1642, 'compet': 1643, 'demand': 1644, 'worker': 1645, 'pretenti': 1646, 'notabl': 1647, 'privat': 1648, 'stretch': 1649, 'lynch': 1650, 'candi': 1651, 'explos': 1652, 'japan': 1653, 'interpret': 1654, 'constant': 1655, 'debut': 1656, 'tarzan': 1657, 'prais': 1658, 'sea': 1659, 'translat': 1660, 'revolv': 1661, 'spi': 1662, 'failur': 1663, 'technolog': 1664, 'threaten': 1665, 'jesu': 1666, 'sat': 1667, 'ass': 1668, 'quiet': 1669, 'franc': 1670, 'toy': 1671, 'aid': 1672, 'punch': 1673, 'kevin': 1674, 'met': 1675, 'higher': 1676, 'interact': 1677, 'abandon': 1678, 'vh': 1679, 'mike': 1680, 'bet': 1681, 'command': 1682, 'separ': 1683, 'confront': 1684, 'site': 1685, 'servic': 1686, 'gotten': 1687, 'recal': 1688, 'techniqu': 1689, 'stunt': 1690, 'belong': 1691, 'cabl': 1692, 'foot': 1693, 'bug': 1694, 'freak': 1695, 'fu': 1696, 'african': 1697, 'bright': 1698, 'jimmi': 1699, 'capabl': 1700, 'stock': 1701, 'succeed': 1702, 'fat': 1703, 'presid': 1704, 'clark': 1705, 'boat': 1706, 'gene': 1707, 'spanish': 1708, 'structur': 1709, 'paper': 1710, 'kidnap': 1711, 'factor': 1712, 'belief': 1713, 'whilst': 1714, 'educ': 1715, 'tree': 1716, 'witti': 1717, 'bob': 1718, 'complic': 1719, 'realis': 1720, 'attend': 1721, 'realism': 1722, 'finest': 1723, 'broken': 1724, 'assist': 1725, 'santa': 1726, 'smoke': 1727, 'v': 1728, 'determin': 1729, 'depart': 1730, 'up': 1731, 'observ': 1732, 'rubbish': 1733, 'fame': 1734, 'hat': 1735, 'domin': 1736, 'lewi': 1737, 'routin': 1738, 'oper': 1739, 'advanc': 1740, 'foreign': 1741, 'hook': 1742, 'morgan': 1743, 'kinda': 1744, 'safe': 1745, 'lone': 1746, 'numer': 1747, 'rank': 1748, 'shallow': 1749, 'vs': 1750, 'washington': 1751, 'shape': 1752, 'civil': 1753, 'rose': 1754, 'werewolf': 1755, 'morn': 1756, 'gari': 1757, 'accomplish': 1758, 'winner': 1759, 'ordinari': 1760, 'kong': 1761, 'virtual': 1762, 'peac': 1763, 'grab': 1764, 'whenev': 1765, 'offens': 1766, 'h': 1767, 'luck': 1768, 'bigger': 1769, 'complain': 1770, 'activ': 1771, 'patient': 1772, 'unfunni': 1773, 'contriv': 1774, 'welcom': 1775, 'trek': 1776, 'pretend': 1777, 'dimension': 1778, 'con': 1779, 'dri': 1780, 'lesbian': 1781, 'cain': 1782, 'wake': 1783, 'eric': 1784, 'flash': 1785, 'code': 1786, 'guard': 1787, 'statu': 1788, 'manipul': 1789, 'albert': 1790, 'dancer': 1791, 'corrupt': 1792, 'gain': 1793, 'signific': 1794, 'awkward': 1795, 'speech': 1796, 'context': 1797, 'sourc': 1798, 'clip': 1799, 'psycho': 1800, 'sean': 1801, '13': 1802, 'corni': 1803, 'anthoni': 1804, 'advic': 1805, 'priest': 1806, 'curiou': 1807, 'theatric': 1808, 'religion': 1809, 'w': 1810, 'reli': 1811, 'addict': 1812, 'flow': 1813, 'jennif': 1814, 'skin': 1815, 'asian': 1816, 'howard': 1817, 'specif': 1818, 'secur': 1819, 'core': 1820, 'organ': 1821, 'luke': 1822, 'golden': 1823, 'comfort': 1824, 'promot': 1825, 'cash': 1826, 'lucki': 1827, 'cheat': 1828, 'dislik': 1829, 'associ': 1830, 'lower': 1831, 'regret': 1832, 'devic': 1833, 'wing': 1834, 'degre': 1835, 'frankli': 1836, 'spell': 1837, 'frequent': 1838, 'balanc': 1839, 'contribut': 1840, 'forgiv': 1841, 'lake': 1842, 'sake': 1843, 'print': 1844, 'thoma': 1845, 'mass': 1846, 'betti': 1847, 'crack': 1848, 'unexpect': 1849, 'gordon': 1850, 'construct': 1851, 'unfold': 1852, 'grown': 1853, 'categori': 1854, 'depend': 1855, 'amateur': 1856, 'invit': 1857, 'walter': 1858, 'intellectu': 1859, 'condit': 1860, 'grew': 1861, 'honor': 1862, 'matur': 1863, 'anna': 1864, 'sole': 1865, 'veteran': 1866, 'spectacular': 1867, 'mirror': 1868, 'sudden': 1869, 'experienc': 1870, 'meanwhil': 1871, 'grip': 1872, 'freedom': 1873, 'overli': 1874, 'card': 1875, 'robin': 1876, 'gift': 1877, 'liner': 1878, 'demonstr': 1879, 'brilliantli': 1880, 'colour': 1881, 'theori': 1882, 'unabl': 1883, 'circumst': 1884, 'oliv': 1885, 'section': 1886, 'drew': 1887, 'subtitl': 1888, 'sheriff': 1889, 'crappi': 1890, 'cook': 1891, 'sheer': 1892, 'pile': 1893, 'laughter': 1894, 'matt': 1895, 'altern': 1896, 'path': 1897, 'parker': 1898, 'relief': 1899, 'lawyer': 1900, 'treatment': 1901, 'wander': 1902, 'hall': 1903, 'accident': 1904, 'defin': 1905, 'sinatra': 1906, 'captiv': 1907, 'hank': 1908, 'dragon': 1909, 'gratuit': 1910, 'moor': 1911, 'halloween': 1912, 'wound': 1913, 'unintent': 1914, 'kung': 1915, 'k': 1916, 'jacki': 1917, 'broadway': 1918, 'barbara': 1919, 'wayn': 1920, 'cowboy': 1921, 'spoof': 1922, 'statement': 1923, 'canadian': 1924, 'surreal': 1925, 'winter': 1926, 'compos': 1927, 'gonna': 1928, 'fish': 1929, 'cheer': 1930, 'treasur': 1931, 'fare': 1932, 'unrealist': 1933, 'sensit': 1934, 'emerg': 1935, 'woodi': 1936, 'victor': 1937, 'ran': 1938, 'neighbor': 1939, 'sympathet': 1940, 'driven': 1941, 'authent': 1942, 'glass': 1943, 'topic': 1944, 'expos': 1945, 'overlook': 1946, 'menac': 1947, 'handsom': 1948, 'gross': 1949, 'michel': 1950, 'chief': 1951, 'ancient': 1952, 'feet': 1953, 'comedian': 1954, 'stranger': 1955, 'nevertheless': 1956, 'russel': 1957, 'cinderella': 1958, 'contemporari': 1959, 'built': 1960, 'network': 1961, 'pleasant': 1962, 'miser': 1963, 'letter': 1964, 'consider': 1965, 'earn': 1966, 'underr': 1967, 'endless': 1968, 'gori': 1969, 'blockbust': 1970, 'switch': 1971, 'brook': 1972, 'solv': 1973, 'joseph': 1974, 'virgin': 1975, 'convict': 1976, 'edward': 1977, 'bullet': 1978, 'victoria': 1979, 'alex': 1980, 'scale': 1981, 'scenario': 1982, 'chosen': 1983, 'cynic': 1984, '0': 1985, 'outrag': 1986, 'com': 1987, 'sword': 1988, 'gut': 1989, 'curs': 1990, 'monkey': 1991, 'substanc': 1992, 'driver': 1993, 'uk': 1994, 'screenwrit': 1995, 'proper': 1996, 'wrap': 1997, 'juli': 1998, 'par': 1999, 'court': 2000, 'indic': 2001, 'bird': 2002, 'remov': 2003, 'roy': 2004, 'rental': 2005, 'inevit': 2006, 'advertis': 2007, 'loser': 2008, 'nanci': 2009, 'consequ': 2010, 'grave': 2011, 'naiv': 2012, 'germani': 2013, 'invis': 2014, 'fatal': 2015, 'slap': 2016, 'bridg': 2017, 'brave': 2018, 'le': 2019, 'footbal': 2020, 'anger': 2021, 'provok': 2022, 'loui': 2023, 'ador': 2024, 'chan': 2025, 'anderson': 2026, 'alcohol': 2027, 'willi': 2028, 'stumbl': 2029, 'ryan': 2030, 'professor': 2031, '1930': 2032, 'patrick': 2033, 'bat': 2034, 'sharp': 2035, 'australian': 2036, 'assassin': 2037, 'lousi': 2038, 'amateurish': 2039, 'cell': 2040, 'eight': 2041, 'saturday': 2042, 'liber': 2043, 'deni': 2044, 'refresh': 2045, 'trilog': 2046, 'strongli': 2047, 'heck': 2048, 'ape': 2049, 'sin': 2050, 'san': 2051, 'vagu': 2052, 'justifi': 2053, 'resid': 2054, 'mini': 2055, 'sympathi': 2056, 'reput': 2057, 'creator': 2058, 'defeat': 2059, 'terrifi': 2060, 'indi': 2061, 'prevent': 2062, 'endur': 2063, 'task': 2064, 'tediou': 2065, 'expert': 2066, 'tabl': 2067, 'trial': 2068, 'offend': 2069, 'rival': 2070, 'employ': 2071, 'che': 2072, 'basebal': 2073, 'imit': 2074, 'max': 2075, 'weekend': 2076, 'fairi': 2077, 'beach': 2078, 'pitch': 2079, 'complaint': 2080, 'europ': 2081, 'dig': 2082, 'risk': 2083, 'format': 2084, 'murphi': 2085, 'purchas': 2086, 'tini': 2087, 'glimps': 2088, 'reminisc': 2089, 'bite': 2090, 'harsh': 2091, 'titan': 2092, 'powel': 2093, 'nois': 2094, 'hype': 2095, 'fals': 2096, 'till': 2097, 'north': 2098, '14': 2099, 'asleep': 2100, 'prime': 2101, 'strip': 2102, 'africa': 2103, 'revel': 2104, 'destruct': 2105, 'descript': 2106, 'texa': 2107, 'surfac': 2108, 'uninterest': 2109, 'semi': 2110, 'arrest': 2111, 'spin': 2112, 'inner': 2113, 'excess': 2114, 'sitcom': 2115, 'massiv': 2116, 'maintain': 2117, 'controversi': 2118, 'twin': 2119, 'hitchcock': 2120, 'makeup': 2121, 'dinosaur': 2122, 'argu': 2123, 'reject': 2124, 'ludicr': 2125, 'kim': 2126, 'ideal': 2127, 'expens': 2128, 'stare': 2129, 'melodrama': 2130, 'insist': 2131, 'subplot': 2132, 'ala': 2133, 'forest': 2134, 'press': 2135, 'supernatur': 2136, 'erot': 2137, 'atroci': 2138, 'ga': 2139, 'nail': 2140, 'host': 2141, 'columbo': 2142, 'notch': 2143, 'identifi': 2144, 'cant': 2145, 'dude': 2146, 'presum': 2147, 'guest': 2148, 'character': 2149, 'crude': 2150, 'forgett': 2151, 'closer': 2152, 'plagu': 2153, 'method': 2154, 'ear': 2155, 'landscap': 2156, 'foster': 2157, 'princess': 2158, 'lion': 2159, 'border': 2160, 'beast': 2161, 'damag': 2162, 'jungl': 2163, 'birth': 2164, 'previous': 2165, 'accus': 2166, 'bound': 2167, 'storytel': 2168, 'aunt': 2169, 'urban': 2170, 'pacino': 2171, 'propaganda': 2172, 'thirti': 2173, 'chose': 2174, 'jess': 2175, 'emma': 2176, 'nude': 2177, 'guid': 2178, 'doll': 2179, 'mainstream': 2180, 'pet': 2181, '25': 2182, 'whoever': 2183, 'warrior': 2184, 'mate': 2185, 'gritti': 2186, 'poster': 2187, 'exact': 2188, 'upset': 2189, 'latest': 2190, 'deadli': 2191, 'cooper': 2192, 'friday': 2193, 'size': 2194, 'merit': 2195, 'citizen': 2196, 'sun': 2197, 'ton': 2198, 'contact': 2199, 'warner': 2200, '1990': 2201, 'popul': 2202, 'rough': 2203, 'wilson': 2204, 'blend': 2205, 'contest': 2206, 'settl': 2207, 'corps': 2208, 'buff': 2209, 'select': 2210, 'alic': 2211, 'rat': 2212, 'bu': 2213, 'overcom': 2214, 'metal': 2215, 'pitt': 2216, 'environ': 2217, 'mgm': 2218, 'widow': 2219, 'guilti': 2220, 'lift': 2221, 'revolut': 2222, 'link': 2223, 'particip': 2224, 'ted': 2225, 'corpor': 2226, 'afternoon': 2227, 'matrix': 2228, 'moron': 2229, 'exagger': 2230, 'prostitut': 2231, '1960': 2232, 'corner': 2233, 'johnson': 2234, 'accompani': 2235, 'instal': 2236, 'multipl': 2237, 'clair': 2238, 'leagu': 2239, 'hood': 2240, 'doom': 2241, 'friendli': 2242, 'holm': 2243, 'sincer': 2244, 'defend': 2245, 'string': 2246, 'examin': 2247, 'advis': 2248, 'campi': 2249, 'junk': 2250, 'hip': 2251, 'sunday': 2252, 'grim': 2253, 'irish': 2254, 'aka': 2255, 'lugosi': 2256, 'blah': 2257, 'tight': 2258, 'icon': 2259, 'pro': 2260, 'rachel': 2261, 'confid': 2262, 'shut': 2263, 'shake': 2264, 'varieti': 2265, 'mexican': 2266, 'directli': 2267, 'jaw': 2268, 'medic': 2269, 'denni': 2270, 'goal': 2271, 'attach': 2272, 'sullivan': 2273, 'prior': 2274, 'terrorist': 2275, 'breast': 2276, 'legendari': 2277, 'bourn': 2278, 'courag': 2279, 'sarah': 2280, 'duke': 2281, 'vietnam': 2282, 'sentenc': 2283, 'dean': 2284, 'truck': 2285, 'donald': 2286, 'split': 2287, 'entri': 2288, 'yell': 2289, 'un': 2290, 'behav': 2291, 'hong': 2292, 'nose': 2293, 'proceed': 2294, 'stolen': 2295, 'borrow': 2296, 'buri': 2297, 'swim': 2298, 'confess': 2299, 'crush': 2300, 'forth': 2301, 'unconvinc': 2302, 'jerk': 2303, 'lifetim': 2304, 'concentr': 2305, 'everywher': 2306, 'gather': 2307, 'turkey': 2308, 'california': 2309, 'deliveri': 2310, 'julia': 2311, 'pan': 2312, 'lip': 2313, 'spite': 2314, 'proud': 2315, 'freeman': 2316, 'flight': 2317, 'downright': 2318, 'reward': 2319, 'offici': 2320, 'hoffman': 2321, 'quest': 2322, 'china': 2323, 'fade': 2324, 'notori': 2325, 'worthwhil': 2326, 'fabul': 2327, 'betray': 2328, 'jail': 2329, 'jon': 2330, 'lazi': 2331, 'sink': 2332, 'inept': 2333, 'encourag': 2334, 'sir': 2335, 'retard': 2336, 'storm': 2337, 'lisa': 2338, 'survivor': 2339, 'bag': 2340, 'teeth': 2341, 'cousin': 2342, 'susan': 2343, 'relev': 2344, 'shower': 2345, 'branagh': 2346, 'bell': 2347, 'imageri': 2348, 'toler': 2349, 'hugh': 2350, 'tremend': 2351, 'bride': 2352, 'trade': 2353, 'alright': 2354, 'summari': 2355, 'facial': 2356, 'shark': 2357, 'mexico': 2358, 'quirki': 2359, 'finger': 2360, 'stab': 2361, 'hyster': 2362, 'blown': 2363, 'ha': 2364, 'bitter': 2365, 'pose': 2366, 'von': 2367, 'ron': 2368, 'christ': 2369, 'larri': 2370, 'scheme': 2371, 'address': 2372, 'bone': 2373, 'cruel': 2374, 'afterward': 2375, 'ned': 2376, 'thumb': 2377, 'screw': 2378, 'pursu': 2379, 'traci': 2380, 'beg': 2381, 'swear': 2382, 'snake': 2383, 'tour': 2384, 'feed': 2385, 'distinct': 2386, 'occas': 2387, 'chair': 2388, 'mechan': 2389, 'raw': 2390, 'obscur': 2391, 'photo': 2392, 'stomach': 2393, 'southern': 2394, 'sidney': 2395, 'heavili': 2396, 'argument': 2397, 'gruesom': 2398, 'resist': 2399, 'chain': 2400, 'hardi': 2401, 'cabin': 2402, 'holiday': 2403, 'render': 2404, 'necessarili': 2405, 'understood': 2406, 'indulg': 2407, 'philip': 2408, 'satan': 2409, 'racist': 2410, 'india': 2411, 'fourth': 2412, 'integr': 2413, 'belov': 2414, 'forgot': 2415, 'pregnant': 2416, 'tongu': 2417, 'lay': 2418, 'stalk': 2419, 'outfit': 2420, 'midnight': 2421, 'obnoxi': 2422, '17': 2423, 'magazin': 2424, 'slapstick': 2425, 'garden': 2426, 'ticket': 2427, 'restor': 2428, 'inhabit': 2429, 'carol': 2430, 'deeper': 2431, 'incid': 2432, 'brad': 2433, 'devot': 2434, 'lincoln': 2435, 'shoe': 2436, 'divorc': 2437, 'anticip': 2438, 'benefit': 2439, 'sandler': 2440, 'underground': 2441, 'maria': 2442, 'disbelief': 2443, 'guarante': 2444, 'lili': 2445, 'elizabeth': 2446, 'explod': 2447, 'creation': 2448, 'cring': 2449, 'mildli': 2450, 'slave': 2451, 'amazingli': 2452, 'capit': 2453, 'princip': 2454, 'bbc': 2455, 'greater': 2456, 'lesli': 2457, 'extraordinari': 2458, 'introduct': 2459, 'halfway': 2460, 'funnier': 2461, 'overwhelm': 2462, 'transfer': 2463, 'enhanc': 2464, 'text': 2465, 'advantag': 2466, 'punish': 2467, 'extent': 2468, 'tap': 2469, 'wreck': 2470, 'east': 2471, 'plant': 2472, 'jessica': 2473, 'error': 2474, 'deliber': 2475, 'dynam': 2476, 'preview': 2477, 'lo': 2478, 'horrif': 2479, 'lane': 2480, 'homosexu': 2481, 'sophist': 2482, 'vacat': 2483, 'miscast': 2484, 'ensu': 2485, '2000': 2486, 'miller': 2487, 'basi': 2488, 'appli': 2489, 'vincent': 2490, 'sleazi': 2491, 'mansion': 2492, 'extend': 2493, 'elev': 2494, 'spoken': 2495, 'via': 2496, 'measur': 2497, 'steel': 2498, 'reed': 2499, 'bollywood': 2500, 'uncomfort': 2501, 'overact': 2502, 'beer': 2503, 'mous': 2504, 'goofi': 2505, 'stanley': 2506, 'fix': 2507, 'assign': 2508, 'daili': 2509, 'conceiv': 2510, 'savag': 2511, 'blair': 2512, 'alter': 2513, 'cathol': 2514, 'dentist': 2515, 'breathtak': 2516, 'hippi': 2517, 'melt': 2518, 'subsequ': 2519, 'properli': 2520, 'sacrific': 2521, 'succe': 2522, 'oppos': 2523, 'everyday': 2524, 'carpent': 2525, 'burt': 2526, 'nowaday': 2527, 'inspector': 2528, 'massacr': 2529, 'circl': 2530, 'laura': 2531, 'block': 2532, 'neck': 2533, 'grey': 2534, 'lesser': 2535, 'fallen': 2536, 'mob': 2537, 'portrait': 2538, 'pool': 2539, 'fay': 2540, 'concert': 2541, 'access': 2542, 'christi': 2543, 'seagal': 2544, 'competit': 2545, 'usa': 2546, 'relax': 2547, 'jewish': 2548, 'isol': 2549, 'react': 2550, 'sinist': 2551, 'chees': 2552, 'jake': 2553, 'chop': 2554, 'appal': 2555, 'suitabl': 2556, 'immens': 2557, 'spiritu': 2558, 'nonetheless': 2559, 'nine': 2560, 'creep': 2561, '2006': 2562, 'lyric': 2563, 'stink': 2564, 'ironi': 2565, 'franchis': 2566, 'needless': 2567, 'nut': 2568, 'shirt': 2569, 'sold': 2570, 'reduc': 2571, 'rage': 2572, 'navi': 2573, 'adopt': 2574, 'user': 2575, 'showcas': 2576, 'spring': 2577, 'luci': 2578, 'retir': 2579, 'nurs': 2580, 'asham': 2581, 'digit': 2582, 'uninspir': 2583, 'jay': 2584, 'per': 2585, 'bath': 2586, 'zone': 2587, 'bulli': 2588, 'stanwyck': 2589, 'oddli': 2590, '2001': 2591, 'upper': 2592, 'laid': 2593, 'illustr': 2594, 'sutherland': 2595, '1940': 2596, 'broadcast': 2597, 'amongst': 2598, 'aspir': 2599, 'disguis': 2600, 'throat': 2601, 'brando': 2602, 'baker': 2603, 'stylish': 2604, 'fulfil': 2605, 'wanna': 2606, 'pound': 2607, '18': 2608, 'pride': 2609, 'neighborhood': 2610, 'nobl': 2611, 'thief': 2612, 'endear': 2613, 'wwii': 2614, 'em': 2615, 'impli': 2616, 'cinematograph': 2617, 'distribut': 2618, 'diseas': 2619, 'albeit': 2620, '16': 2621, 'prop': 2622, 'coher': 2623, 'shift': 2624, 'tens': 2625, 'shoulder': 2626, 'dawn': 2627, 'bo': 2628, 'rochest': 2629, 'dinner': 2630, 'bett': 2631, 'forti': 2632, 'rebel': 2633, 'poignant': 2634, 'surf': 2635, 'function': 2636, 'knife': 2637, 'silenc': 2638, 'wash': 2639, 'snow': 2640, 'contract': 2641, 'shout': 2642, 'matthau': 2643, 'eeri': 2644, 'internet': 2645, 'henc': 2646, 'height': 2647, 'duti': 2648, 'chuck': 2649, 'derek': 2650, 'widmark': 2651, 'proof': 2652, 'horrend': 2653, 'instinct': 2654, 'silver': 2655, 'cancel': 2656, 'heat': 2657, 'cannib': 2658, 'reunion': 2659, 'mindless': 2660, 'elvira': 2661, 'repetit': 2662, 'alik': 2663, 'mill': 2664, 'innov': 2665, 'absorb': 2666, 'pie': 2667, 'premier': 2668, 'etern': 2669, 'torn': 2670, 'neat': 2671, 'spielberg': 2672, 'incoher': 2673, 'elvi': 2674, 'greatli': 2675, 'glori': 2676, 'musician': 2677, 'homag': 2678, 'infam': 2679, 'crisi': 2680, 'itali': 2681, 'burton': 2682, 'diamond': 2683, 'britain': 2684, 'precis': 2685, 'nelson': 2686, 'redempt': 2687, 'trite': 2688, 'announc': 2689, 'racism': 2690, 'lovabl': 2691, 'bang': 2692, 'horrifi': 2693, 'wealthi': 2694, 'blank': 2695, 'fbi': 2696, 'dedic': 2697, 'flop': 2698, 'hammer': 2699, 'resolut': 2700, 'streisand': 2701, 'parallel': 2702, 'happili': 2703, 'ensembl': 2704, 'wilder': 2705, 'helen': 2706, 'chaplin': 2707, 'pat': 2708, 'mar': 2709, 'factori': 2710, 'disagre': 2711, 'plastic': 2712, 'triumph': 2713, 'st': 2714, 'conclud': 2715, 'carter': 2716, 'cube': 2717, 'oil': 2718, 'broke': 2719, 'weight': 2720, 'march': 2721, 'fighter': 2722, 'climb': 2723, 'bush': 2724, 'row': 2725, 'vega': 2726, 'chuckl': 2727, 'rocket': 2728, 'own': 2729, 'wherea': 2730, 'spare': 2731, 'unforgett': 2732, 'kurt': 2733, 'mst3k': 2734, 'meaning': 2735, 'dane': 2736, 'lust': 2737, 'thug': 2738, 'dump': 2739, 'luca': 2740, 'sensibl': 2741, 'boot': 2742, 'enorm': 2743, 'stress': 2744, 'difficulti': 2745, 'caricatur': 2746, 'dear': 2747, 'adequ': 2748, 'engin': 2749, 'butt': 2750, 'threat': 2751, 'fifti': 2752, 'brand': 2753, 'karloff': 2754, 'bobbi': 2755, 'rap': 2756, 'arnold': 2757, 'secretari': 2758, 'journalist': 2759, 'fest': 2760, 'homeless': 2761, 'barri': 2762, 'elabor': 2763, 'ego': 2764, 'ralph': 2765, 'polish': 2766, 'swing': 2767, 'hamlet': 2768, 'arrog': 2769, 'flynn': 2770, 'fanci': 2771, 'conspiraci': 2772, 'induc': 2773, 'spike': 2774, 'resort': 2775, 'simpson': 2776, 'unbear': 2777, 'arrang': 2778, 'grate': 2779, 'float': 2780, 'puppet': 2781, 'tool': 2782, 'tribut': 2783, 'boll': 2784, 'cruis': 2785, 'exercis': 2786, 'guilt': 2787, 'pig': 2788, 'phillip': 2789, 'choreograph': 2790, 'basement': 2791, 'muppet': 2792, 'puzzl': 2793, 'document': 2794, 'editor': 2795, 'item': 2796, 'medium': 2797, 'toilet': 2798, 'tower': 2799, 'slip': 2800, 'fianc': 2801, 'babe': 2802, '24': 2803, 'stan': 2804, 'layer': 2805, 'ward': 2806, 'ham': 2807, 'korean': 2808, 'scarecrow': 2809, 'file': 2810, 'superfici': 2811, 'slaughter': 2812, 'denzel': 2813, 'assur': 2814, 'orient': 2815, 'librari': 2816, 'portion': 2817, 'philosoph': 2818, 'doc': 2819, 'catherin': 2820, 'minim': 2821, 'territori': 2822, 'persona': 2823, 'spark': 2824, 'glover': 2825, 'larger': 2826, 'inexplic': 2827, 'transit': 2828, 'jeremi': 2829, 'wolf': 2830, 'owe': 2831, 'curti': 2832, 'boredom': 2833, 'financi': 2834, 'sneak': 2835, 'walken': 2836, 'pg': 2837, 'shi': 2838, 'jet': 2839, 'dorothi': 2840, 'ban': 2841, 'multi': 2842, 'metaphor': 2843, 'cusack': 2844, 'ambigu': 2845, 'backdrop': 2846, 'profound': 2847, 'hudson': 2848, 'eleph': 2849, 'whale': 2850, 'stiff': 2851, '2005': 2852, 'rave': 2853, 'birthday': 2854, 'elsewher': 2855, 'union': 2856, 'ultra': 2857, 'hack': 2858, 'implaus': 2859, 'notion': 2860, 'viru': 2861, 'gadget': 2862, 'canada': 2863, 'squar': 2864, 'disc': 2865, 'bibl': 2866, 'slight': 2867, 'eastwood': 2868, 'pad': 2869, 'newspap': 2870, 'afford': 2871, '1st': 2872, 'reader': 2873, 'poison': 2874, 'distanc': 2875, 'hawk': 2876, 'deriv': 2877, 'lloyd': 2878, 'eva': 2879, 'urg': 2880, 'superhero': 2881, 'skit': 2882, 'heston': 2883, 'button': 2884, 'essenc': 2885, 'cure': 2886, 'sadist': 2887, 'charisma': 2888, 'spread': 2889, 'huh': 2890, 'health': 2891, 'drown': 2892, 'montag': 2893, 'restaur': 2894, 'maniac': 2895, 'gradual': 2896, 'muslim': 2897, 'scoobi': 2898, 'fetch': 2899, 'estat': 2900, 'peak': 2901, 'godfath': 2902, 'dealt': 2903, 'invest': 2904, 'lab': 2905, 'companion': 2906, 'subtleti': 2907, 'cup': 2908, 'tea': 2909, 'alli': 2910, 'countless': 2911, 'servant': 2912, 'kane': 2913, 'gothic': 2914, 'miik': 2915, 'ritter': 2916, 'iii': 2917, 'electr': 2918, 'charismat': 2919, 'elect': 2920, 'salli': 2921, 'heroic': 2922, 'briefli': 2923, 'resourc': 2924, 'nuanc': 2925, 'reel': 2926, 'tender': 2927, 'grandmoth': 2928, 'toss': 2929, 'ingredi': 2930, 'wannab': 2931, 'admittedli': 2932, 'neil': 2933, 'bud': 2934, 'cole': 2935, 'stood': 2936, 'stronger': 2937, 'carrey': 2938, 'kubrick': 2939, 'punk': 2940, 'pit': 2941, 'mafia': 2942, 'mild': 2943, 'poverti': 2944, 'label': 2945, 'shall': 2946, 'pauli': 2947, 'gate': 2948, 'dawson': 2949, 'reev': 2950, 'cox': 2951, 'fond': 2952, 'assault': 2953, 'cardboard': 2954, 'tag': 2955, 'useless': 2956, 'outcom': 2957, 'astair': 2958, 'ian': 2959, 'easier': 2960, 'smash': 2961, 'smooth': 2962, 'updat': 2963, 'burst': 2964, 'terri': 2965, 'bakshi': 2966, 'increasingli': 2967, 'samurai': 2968, 'exchang': 2969, 'divers': 2970, 'qualifi': 2971, 'vari': 2972, '2002': 2973, 'melodramat': 2974, 'sketch': 2975, 'resolv': 2976, 'vulner': 2977, 'fist': 2978, 'rex': 2979, 'coincid': 2980, 'insert': 2981, 'conveni': 2982, 'reynold': 2983, 'brillianc': 2984, 'blast': 2985, 'suspend': 2986, 'tame': 2987, 'be': 2988, 'scratch': 2989, 'luckili': 2990, 'templ': 2991, 'ambiti': 2992, 'seventi': 2993, 'coach': 2994, 'meat': 2995, 'hamilton': 2996, 'fisher': 2997, 'matthew': 2998, 'strictli': 2999, 'gotta': 3000, 'nuclear': 3001, 'farm': 3002, 'jami': 3003, 'walker': 3004, 'soprano': 3005, 'pin': 3006, 'ninja': 3007, 'eccentr': 3008, 'spooki': 3009, 'monk': 3010, 'instantli': 3011, 'kudo': 3012, 'recreat': 3013, 'struck': 3014, 'grasp': 3015, 'revers': 3016, 'butcher': 3017, 'worthless': 3018, 'convolut': 3019, 'clock': 3020, 'brosnan': 3021, 'closet': 3022, 'joey': 3023, 'discoveri': 3024, 'cave': 3025, 'empir': 3026, 'timeless': 3027, 'fifteen': 3028, 'inconsist': 3029, 'importantli': 3030, 'wipe': 3031, 'eighti': 3032, 'communist': 3033, 'declar': 3034, 'sidekick': 3035, 'miracl': 3036, 'bleak': 3037, 'pal': 3038, 'gray': 3039, 'cliff': 3040, 'sloppi': 3041, 'mitchel': 3042, 'partli': 3043, 'selfish': 3044, 'clown': 3045, 'seller': 3046, 'evok': 3047, 'norman': 3048, 'enthusiast': 3049, 'stoog': 3050, 'piano': 3051, 'aforement': 3052, 'chew': 3053, 'lifestyl': 3054, 'websit': 3055, 'flawless': 3056, 'psychiatrist': 3057, '45': 3058, 'debat': 3059, 'ho': 3060, 'cheek': 3061, 'farc': 3062, 'superbl': 3063, 'australia': 3064, 'destin': 3065, 'seed': 3066, 'dash': 3067, 'regardless': 3068, 'incompet': 3069, 'directori': 3070, 'bash': 3071, 'soviet': 3072, 'kitchen': 3073, 'drivel': 3074, 'pressur': 3075, 'abc': 3076, 'splatter': 3077, 'dire': 3078, 'akshay': 3079, 'slice': 3080, 'wrestl': 3081, 'wick': 3082, 'anni': 3083, 'emili': 3084, 'suppli': 3085, 'distant': 3086, 'lou': 3087, 'cameron': 3088, 'helicopt': 3089, 'flower': 3090, 'doo': 3091, 'increas': 3092, 'chapter': 3093, 'seduc': 3094, 'beaten': 3095, 'artifici': 3096, 'duo': 3097, 'jar': 3098, 'blob': 3099, 'pleasantli': 3100, 'curios': 3101, 'recov': 3102, 'cagney': 3103, 'judi': 3104, 'boil': 3105, 'dave': 3106, 'ken': 3107, 'glow': 3108, 'prize': 3109, 'cia': 3110, 'mann': 3111, 'psychot': 3112, 'drunken': 3113, 'francisco': 3114, 'ellen': 3115, 'favour': 3116, 'craven': 3117, 'eleg': 3118, 'glenn': 3119, 'craig': 3120, 'panic': 3121, 'laurel': 3122, 'web': 3123, 'combat': 3124, 'ranger': 3125, 'goldberg': 3126, 'perri': 3127, 'splendid': 3128, 'hop': 3129, 'turner': 3130, 'plausibl': 3131, 'modesti': 3132, '20th': 3133, 'shortli': 3134, 'gandhi': 3135, 'slightest': 3136, 'alexand': 3137, 'gentl': 3138, 'hatr': 3139, 'philosophi': 3140, 'rid': 3141, 'graduat': 3142, 'wizard': 3143, 'min': 3144, 'greek': 3145, 'flip': 3146, 'fx': 3147, 'ruth': 3148, 'falk': 3149, 'fund': 3150, 'preciou': 3151, 'harm': 3152, 'jealou': 3153, 'ocean': 3154, 'holi': 3155, 'we': 3156, 'legal': 3157, 'lend': 3158, 'felix': 3159, 'manhattan': 3160, 'dracula': 3161, 'unpleas': 3162, 'tall': 3163, 'knight': 3164, 'futurist': 3165, 'digniti': 3166, 'forbidden': 3167, 'mock': 3168, 'scientif': 3169, 'tank': 3170, 'overdon': 3171, 'ami': 3172, 'bless': 3173, 'childish': 3174, 'thread': 3175, 'giallo': 3176, 'nod': 3177, 'reviv': 3178, 'explicit': 3179, 'margaret': 3180, '99': 3181, 'awaken': 3182, '2004': 3183, 'yesterday': 3184, 'awe': 3185, 'fever': 3186, 'eve': 3187, 'repeatedli': 3188, 'torment': 3189, 'thick': 3190, 'nerv': 3191, 'elderli': 3192, 'unwatch': 3193, 'verhoeven': 3194, 'mel': 3195, 'pirat': 3196, 'broad': 3197, 'uniform': 3198, 'timothi': 3199, 'griffith': 3200, 'automat': 3201, 'ambit': 3202, 'roman': 3203, 'absenc': 3204, 'bin': 3205, 'publish': 3206, 'ah': 3207, 'lean': 3208, 'rivet': 3209, 'eas': 3210, 'acclaim': 3211, 'kay': 3212, 'politician': 3213, 'custom': 3214, 'royal': 3215, 'stiller': 3216, 'romero': 3217, 'launch': 3218, 'pulp': 3219, 'crook': 3220, 'warren': 3221, 'darker': 3222, 'pierc': 3223, 'bathroom': 3224, 'wallac': 3225, 'transport': 3226, 'tomato': 3227, 'phrase': 3228, 'antic': 3229, 'termin': 3230, 'stinker': 3231, 'gabriel': 3232, 'purpl': 3233, 'homicid': 3234, 'sunshin': 3235, 'foul': 3236, 'q': 3237, 'kenneth': 3238, 'sixti': 3239, 'karen': 3240, 'album': 3241, 'pray': 3242, 'marin': 3243, 'revolutionari': 3244, 'hollow': 3245, 'contrari': 3246, 'donna': 3247, 'juvenil': 3248, 'eyr': 3249, 'choreographi': 3250, 'packag': 3251, 'awak': 3252, '2003': 3253, 'prom': 3254, 'rambo': 3255, 'evolv': 3256, 'coloni': 3257, 'li': 3258, 'saint': 3259, 'brazil': 3260, 'viciou': 3261, 'ought': 3262, 'horrid': 3263, 'blade': 3264, 'nerd': 3265, 'overr': 3266, 'beatti': 3267, 'conserv': 3268, 'candid': 3269, 'ireland': 3270, 'twelv': 3271, 'option': 3272, 'ramon': 3273, 'defi': 3274, 'boast': 3275, 'mildr': 3276, 'dose': 3277, 'stole': 3278, 'kapoor': 3279, 'mummi': 3280, 'funer': 3281, 'jazz': 3282, 'global': 3283, 'altman': 3284, 'collabor': 3285, 'flame': 3286, 'confirm': 3287, 'kirk': 3288, 'detract': 3289, 'astonish': 3290, 'natali': 3291, 'trio': 3292, 'fulci': 3293, 'protest': 3294, 'audio': 3295, 'blake': 3296, 'nicholson': 3297, 'leap': 3298, 'bottl': 3299, 'yellow': 3300, 'destini': 3301, 'racial': 3302, 'delici': 3303, 'spit': 3304, 'enterpris': 3305, 'mystic': 3306, 'tommi': 3307, 'shade': 3308, 'bull': 3309, 'whip': 3310, 'staff': 3311, 'threw': 3312, 'inherit': 3313, 'meaningless': 3314, 'neo': 3315, 'pseudo': 3316, 'popcorn': 3317, 'fonda': 3318, 'vivid': 3319, 'adolesc': 3320, 'visibl': 3321, 'swedish': 3322, 'enchant': 3323, 'harder': 3324, 'bedroom': 3325, 'todd': 3326, 'altogeth': 3327, 'reunit': 3328, 'merci': 3329, 'leonard': 3330, 'fanat': 3331, 'tip': 3332, 'roommat': 3333, 'await': 3334, 'ruthless': 3335, 'suspici': 3336, 'lawrenc': 3337, 'exhibit': 3338, 'voight': 3339, 'bust': 3340, 'synopsi': 3341, 'befriend': 3342, 'reserv': 3343, 'kennedi': 3344, 'wire': 3345, 'madonna': 3346, 'crocodil': 3347, 'moodi': 3348, 'lemmon': 3349, 'edi': 3350, 'uneven': 3351, 'decor': 3352, 'jew': 3353, 'atlanti': 3354, 'respond': 3355, 'dimens': 3356, 'voyag': 3357, 'clint': 3358, 'garner': 3359, 'bargain': 3360, 'incident': 3361, 'chao': 3362, 'clumsi': 3363, 'bold': 3364, '2007': 3365, 'ventur': 3366, 'carl': 3367, 'audit': 3368, 'bradi': 3369, 'abysm': 3370, 'centr': 3371, 'rural': 3372, 'unsettl': 3373, 'holli': 3374, 'palma': 3375, 'lit': 3376, 'versu': 3377, 'mall': 3378, 'humili': 3379, 'immigr': 3380, 'imperson': 3381, 'cd': 3382, '2nd': 3383, 'acknowledg': 3384, 'elimin': 3385, 'neglect': 3386, 'cuba': 3387, 'wealth': 3388, 'hart': 3389, 'trail': 3390, 'characterist': 3391, 'cari': 3392, 'nearbi': 3393, 'poetic': 3394, 'daddi': 3395, 'timon': 3396, 'ant': 3397, 'tiger': 3398, 'echo': 3399, 'troop': 3400, 'saga': 3401, 'pun': 3402, 'solo': 3403, 'domest': 3404, 'jeffrey': 3405, 'collaps': 3406, 'mistaken': 3407, 'celluloid': 3408, 'prejudic': 3409, 'paus': 3410, 'infect': 3411, 'marshal': 3412, 'mickey': 3413, 'repuls': 3414, 'homer': 3415, 'hbo': 3416, 'inappropri': 3417, 'milk': 3418, 'apolog': 3419, 'chest': 3420, 'coffe': 3421, 'coat': 3422, 'ginger': 3423, 'harvey': 3424, 'interrupt': 3425, 'leon': 3426, 'assembl': 3427, 'undoubtedli': 3428, 'pant': 3429, 'tribe': 3430, '1996': 3431, 'inan': 3432, 'promin': 3433, 'olivi': 3434, 'sore': 3435, 'equip': 3436, 'gear': 3437, 'cake': 3438, 'embrac': 3439, 'trace': 3440, 'pen': 3441, 'pot': 3442, 'colleagu': 3443, 'colonel': 3444, 'humbl': 3445, 'institut': 3446, 'maggi': 3447, 'instant': 3448, 'highest': 3449, 'solut': 3450, 'florida': 3451, 'aveng': 3452, 'furthermor': 3453, 'jenni': 3454, 'exot': 3455, 'primari': 3456, 'brooklyn': 3457, 'vulgar': 3458, 'consum': 3459, 'devast': 3460, 'retain': 3461, 'airplan': 3462, 'polanski': 3463, 'illog': 3464, 'seduct': 3465, '3rd': 3466, 'dutch': 3467, 'sale': 3468, 'smaller': 3469, 'descend': 3470, '1999': 3471, 'principl': 3472, 'outer': 3473, 'ya': 3474, 'wive': 3475, 'gender': 3476, 'rick': 3477, 'dian': 3478, 'godzilla': 3479, 'linda': 3480, 'strain': 3481, 'disabl': 3482, 'cope': 3483, 'poke': 3484, 'bowl': 3485, 'gloriou': 3486, 'predecessor': 3487, 'cue': 3488, 'inferior': 3489, 'secondli': 3490, 'glamor': 3491, 'primarili': 3492, 'yard': 3493, 'bubbl': 3494, 'beneath': 3495, 'scope': 3496, 'vast': 3497, 'lol': 3498, 'devoid': 3499, 'rabbit': 3500, 'mixtur': 3501, 'dive': 3502, 'blatant': 3503, 'gundam': 3504, 'dud': 3505, 'hal': 3506, 'disjoint': 3507, 'trademark': 3508, 'invas': 3509, 'aggress': 3510, 'streep': 3511, 'myer': 3512, 'alfr': 3513, 'alert': 3514, 'april': 3515, 'museum': 3516, 'z': 3517, 'hideou': 3518, 'simplist': 3519, 'breed': 3520, 'pearl': 3521, 'garbo': 3522, 'countrysid': 3523, 'talki': 3524, 'shirley': 3525, 'shelf': 3526, 'casual': 3527, 'senseless': 3528, 'et': 3529, 'arab': 3530, 'grinch': 3531, 'domino': 3532, 'uwe': 3533, 'vanish': 3534, 'stir': 3535, 'sh': 3536, 'experiment': 3537, 'boom': 3538, 'obtain': 3539, 'hardcor': 3540, 'mayor': 3541, 'defens': 3542, 'disgrac': 3543, 'slide': 3544, 'robberi': 3545, 'oz': 3546, 'maci': 3547, 'applaud': 3548, 'robinson': 3549, 'acid': 3550, 'hopeless': 3551, 'illeg': 3552, 'stellar': 3553, 'rendit': 3554, 'loyal': 3555, 'unhappi': 3556, 'stack': 3557, 'mail': 3558, 'khan': 3559, 'span': 3560, 'emphasi': 3561, 'declin': 3562, 'grandfath': 3563, 'tempt': 3564, 'blew': 3565, 'recruit': 3566, 'rifl': 3567, 'soccer': 3568, 'counter': 3569, 'fri': 3570, 'spider': 3571, 'wont': 3572, 'amanda': 3573, 'diana': 3574, 'dismiss': 3575, 'psychic': 3576, 'incomprehens': 3577, 'tenant': 3578, 'dicken': 3579, 'hartley': 3580, 'berlin': 3581, 'scroog': 3582, 'craze': 3583, 'topless': 3584, 'porno': 3585, 'sibl': 3586, 'ration': 3587, 'sympath': 3588, 'niro': 3589, 'parad': 3590, 'riot': 3591, 'faster': 3592, 'goer': 3593, 'bitch': 3594, 'resurrect': 3595, 'shed': 3596, 'lumet': 3597, 'trashi': 3598, 'shaw': 3599, 'justin': 3600, 'intim': 3601, 'woo': 3602, 'wet': 3603, 'revolt': 3604, 'ethnic': 3605, 'rider': 3606, 'wendi': 3607, 'partial': 3608, 'choru': 3609, 'hesit': 3610, 'patriot': 3611, 'immort': 3612, 'biographi': 3613, 'farmer': 3614, 'gap': 3615, 'dealer': 3616, 'unreal': 3617, 'commend': 3618, 'nephew': 3619, 'worm': 3620, 'slick': 3621, 'weakest': 3622, 'ballet': 3623, 'lena': 3624, 'hopper': 3625, 'feminist': 3626, 'mario': 3627, 'andr': 3628, 'honesti': 3629, '00': 3630, 'region': 3631, 'enlighten': 3632, 'wheel': 3633, 'eager': 3634, 'steam': 3635, 'ensur': 3636, 'jonathan': 3637, 'victori': 3638, 'wore': 3639, 'prequel': 3640, 'nostalg': 3641, 'skull': 3642, 'vice': 3643, 'psychopath': 3644, 'repress': 3645, 'snap': 3646, 'util': 3647, 'owen': 3648, 'safeti': 3649, 'confin': 3650, 'mutant': 3651, 'sappi': 3652, 'hung': 3653, 'properti': 3654, 'franco': 3655, 'morri': 3656, 'charlott': 3657, 'macarthur': 3658, 'composit': 3659, 'leo': 3660, 'sandra': 3661, 'similarli': 3662, 'kingdom': 3663, 'blunt': 3664, 'cg': 3665, 'compens': 3666, 'valuabl': 3667, 'rocki': 3668, 'emperor': 3669, 'repli': 3670, 'drain': 3671, 'del': 3672, 'drum': 3673, 'recycl': 3674, 'pattern': 3675, 'rambl': 3676, 'bumbl': 3677, 'hyde': 3678, 'rope': 3679, 'heartbreak': 3680, 'tad': 3681, 'thru': 3682, 'deed': 3683, 'despair': 3684, 'miseri': 3685, 'speci': 3686, 'whoopi': 3687, 'tail': 3688, 'acquir': 3689, 'bergman': 3690, 'latin': 3691, '1972': 3692, 'compass': 3693, 'exit': 3694, 'bonu': 3695, 'strand': 3696, 'snl': 3697, 'kyle': 3698, 'dust': 3699, 'montana': 3700, 'campbel': 3701, 'farrel': 3702, 'nervou': 3703, 'bow': 3704, 'dalton': 3705, 'tonight': 3706, 'rotten': 3707, 'airport': 3708, 'rapist': 3709, 'gimmick': 3710, 'contempl': 3711, 'radic': 3712, 'carradin': 3713, 'romp': 3714, 'chess': 3715, 'slug': 3716, 'pour': 3717, 'roth': 3718, 'mistress': 3719, 'bleed': 3720, 'orson': 3721, 'percept': 3722, 'downhil': 3723, 'da': 3724, '35': 3725, 'martian': 3726, 'wacki': 3727, 'olli': 3728, 'gal': 3729, 'oppress': 3730, 'belt': 3731, 'arguabl': 3732, 'shelley': 3733, 'edgar': 3734, 'taught': 3735, 'preach': 3736, 'unpredict': 3737, 'programm': 3738, 'banal': 3739, 'attorney': 3740, 'pervert': 3741, 'slash': 3742, 'tooth': 3743, 'stilt': 3744, 'tackl': 3745, 'heal': 3746, 'pursuit': 3747, 'pervers': 3748, 'melodi': 3749, 'mislead': 3750, '1983': 3751, 'arc': 3752, 'champion': 3753, 'dazzl': 3754, 'paltrow': 3755, 'graham': 3756, 'orang': 3757, 'chicken': 3758, 'duval': 3759, 'employe': 3760, 'raymond': 3761, 'closest': 3762, 'gambl': 3763, 'maid': 3764, 'mesmer': 3765, 'vocal': 3766, 'cleverli': 3767, 'plight': 3768, 'bela': 3769, 'uplift': 3770, 'marti': 3771, 'passeng': 3772, 'tiresom': 3773, 'rubi': 3774, 'sensat': 3775, 'poem': 3776, 'franki': 3777, 'virginia': 3778, 'conneri': 3779, 'vengeanc': 3780, 'dixon': 3781, 'inject': 3782, 'convincingli': 3783, 'numb': 3784, '1968': 3785, 'yawn': 3786, 'quarter': 3787, 'giggl': 3788, 'habit': 3789, 'engross': 3790, 'gerard': 3791, 'crystal': 3792, 'iran': 3793, 'lundgren': 3794, 'paranoia': 3795, 'outing': 3796, 'abraham': 3797, 'bay': 3798, 'calm': 3799, 'secretli': 3800, 'climact': 3801, 'suffic': 3802, 'amitabh': 3803, 'clone': 3804, 'swallow': 3805, 'tube': 3806, 'extens': 3807, 'mute': 3808, 'monologu': 3809, 'pokemon': 3810, 'scottish': 3811, 'volum': 3812, 'profan': 3813, 'whine': 3814, 'sirk': 3815, 'backward': 3816, 'underst': 3817, 'meander': 3818, 'profess': 3819, 'plod': 3820, 'junior': 3821, 'trend': 3822, 'bend': 3823, 'ethan': 3824, 'franci': 3825, 'grotesqu': 3826, 'richardson': 3827, 'nichola': 3828, 'chicago': 3829, 'fed': 3830, 'frankenstein': 3831, 'im': 3832, 'dispos': 3833, 'taxi': 3834, 'surpass': 3835, 'austen': 3836, 'lowest': 3837, 'poetri': 3838, 'abort': 3839, 'expand': 3840, 'earl': 3841, 'septemb': 3842, 'linger': 3843, 'spock': 3844, 'descent': 3845, 'der': 3846, 'myth': 3847, 'sue': 3848, 'mundan': 3849, 'greedi': 3850, 'tourist': 3851, 'rant': 3852, 'econom': 3853, 'simplic': 3854, 'household': 3855, 'compliment': 3856, 'dysfunct': 3857, 'lure': 3858, 'instrument': 3859, 'stallon': 3860, 'literatur': 3861, 'spoke': 3862, 'hum': 3863, 'catchi': 3864, 'cannon': 3865, 'waitress': 3866, 'rubber': 3867, 'muddl': 3868, 'eugen': 3869, 'nostalgia': 3870, 'firstli': 3871, 'mortal': 3872, 'irrelev': 3873, 'omen': 3874, 'june': 3875, 'occupi': 3876, 'dictat': 3877, 'crucial': 3878, 'lang': 3879, 'randi': 3880, 'mankind': 3881, 'recognis': 3882, 'louis': 3883, 'hello': 3884, 'dement': 3885, 'recognit': 3886, 'duck': 3887, 'furi': 3888, 'carel': 3889, 'map': 3890, 'insur': 3891, 'stale': 3892, 'phoni': 3893, 'alongsid': 3894, 'equival': 3895, 'coast': 3896, 'flee': 3897, 'eaten': 3898, 'deaf': 3899, 'phantom': 3900, 'molli': 3901, 'cent': 3902, 'damon': 3903, 'bacal': 3904, 'sissi': 3905, 'lengthi': 3906, 'blackmail': 3907, '1973': 3908, 'biko': 3909, 'bike': 3910, 'bump': 3911, 'newli': 3912, 'wisdom': 3913, 'labor': 3914, 'antwon': 3915, 'freez': 3916, 'dreari': 3917, 'heel': 3918, 'onlin': 3919, 'ashley': 3920, 'daisi': 3921, 'rooney': 3922, 'loyalti': 3923, 'drake': 3924, 'likewis': 3925, 'damm': 3926, 'rude': 3927, 'distinguish': 3928, 'grayson': 3929, 'cyborg': 3930, 'twilight': 3931, 'reign': 3932, 'buffalo': 3933, 'interior': 3934, 'approv': 3935, 'unorigin': 3936, 'basketbal': 3937, 'nineti': 3938, 'pink': 3939, 'attribut': 3940, 'emphas': 3941, 'worn': 3942, 'keith': 3943, 'analysi': 3944, 'butler': 3945, 'prey': 3946, 'incorpor': 3947, 'baddi': 3948, 'vein': 3949, 'barrymor': 3950, 'provoc': 3951, 'tunnel': 3952, 'proce': 3953, 'chronicl': 3954, 'ridden': 3955, 'startl': 3956, 'sailor': 3957, 'inher': 3958, 'exposur': 3959, 'boxer': 3960, 'er': 3961, 'unrel': 3962, 'elm': 3963, 'degrad': 3964, 'nicol': 3965, 'bunni': 3966, 'underli': 3967, 'robbin': 3968, 'predat': 3969, 'drift': 3970, 'walsh': 3971, 'condemn': 3972, 'hypnot': 3973, 'barrel': 3974, 'fleet': 3975, 'carla': 3976, 'stalker': 3977, 'indiffer': 3978, 'substitut': 3979, 'meg': 3980, 'belushi': 3981, 'undeni': 3982, 'julian': 3983, 'mormon': 3984, 'improvis': 3985, 'simmon': 3986, 'millionair': 3987, 'mighti': 3988, 'othello': 3989, 'meyer': 3990, 'shove': 3991, 'lampoon': 3992, 'roof': 3993, '3d': 3994, 'firm': 3995, 'vital': 3996, 'edgi': 3997, 'agenda': 3998, 'dolph': 3999, 'alison': 4000, 'unawar': 4001, 'greed': 4002, 'exquisit': 4003, 'hay': 4004, 'reid': 4005, 'nyc': 4006, 'palac': 4007, 'rukh': 4008, 'disord': 4009, 'alarm': 4010, 'priceless': 4011, 'errol': 4012, 'watson': 4013, 'warmth': 4014, 'marion': 4015, 'enthusiasm': 4016, 'novak': 4017, 'mtv': 4018, 'simultan': 4019, 'what': 4020, 'peck': 4021, 'championship': 4022, 'sergeant': 4023, 'coup': 4024, 'drip': 4025, 'profit': 4026, '1933': 4027, 'cassidi': 4028, 'distort': 4029, 'minimum': 4030, 'crown': 4031, 'angela': 4032, 'randomli': 4033, 'ponder': 4034, 'thompson': 4035, 'gestur': 4036, 'showdown': 4037, 'session': 4038, 'glanc': 4039, 'unleash': 4040, 'eastern': 4041, 'peril': 4042, 'orlean': 4043, 'testament': 4044, '13th': 4045, 'campaign': 4046, 'nun': 4047, 'beatl': 4048, 'preserv': 4049, 'pamela': 4050, 'israel': 4051, 'iraq': 4052, 'zizek': 4053, 'valentin': 4054, 'petti': 4055, 'spain': 4056, 'empathi': 4057, 'valley': 4058, 'cooki': 4059, 'perpetu': 4060, 'bro': 4061, 'travesti': 4062, 'stake': 4063, 'climat': 4064, 'regist': 4065, 'stroke': 4066, 'shootout': 4067, 'crawl': 4068, 'buster': 4069, 'sabrina': 4070, '1984': 4071, 'unimagin': 4072, 'represent': 4073, 'kurosawa': 4074, 'din': 4075, 'realm': 4076, 'jan': 4077, 'rout': 4078, 'reson': 4079, 'brenda': 4080, 'miyazaki': 4081, 'wig': 4082, 'quinn': 4083, 'gentleman': 4084, 'cream': 4085, 'han': 4086, 'scotland': 4087, 'exposit': 4088, 'crow': 4089, 'calib': 4090, 'restrain': 4091, 'mon': 4092, 'contradict': 4093, 'fido': 4094, 'cloud': 4095, 'pole': 4096, 'perceiv': 4097, 'warrant': 4098, 'traumat': 4099, 'absent': 4100, '1997': 4101, 'pretens': 4102, '1987': 4103, 'sucker': 4104, 'soderbergh': 4105, 'monoton': 4106, 'meryl': 4107, 'wax': 4108, 'delic': 4109, 'josh': 4110, 'compromis': 4111, 'unsatisfi': 4112, 'femm': 4113, 'demis': 4114, 'tacki': 4115, 'painter': 4116, 'crawford': 4117, 'fuller': 4118, 'unseen': 4119, 'dana': 4120, 'sammi': 4121, 'distress': 4122, 'abomin': 4123, 'ross': 4124, 'stargat': 4125, 'greg': 4126, 'shoddi': 4127, 'passabl': 4128, 'baldwin': 4129, 'mclaglen': 4130, 'shaki': 4131, 'businessman': 4132, 'darren': 4133, 'censor': 4134, 'ustinov': 4135, 'spacey': 4136, 'derang': 4137, 'geek': 4138, 'wholli': 4139, 'primit': 4140, 'expedit': 4141, 'fenc': 4142, 'tarantino': 4143, 'norm': 4144, 'judgment': 4145, 'anchor': 4146, 'jewel': 4147, 'click': 4148, 'exclus': 4149, '1993': 4150, 'valid': 4151, 'unravel': 4152, 'dee': 4153, 'verbal': 4154, 'tech': 4155, 'deceas': 4156, 'kumar': 4157, 'deniro': 4158, 'reluct': 4159, 'seal': 4160, 'correctli': 4161, 'clash': 4162, 'polici': 4163, 'sid': 4164, 'austin': 4165, 'uncov': 4166, 'antonioni': 4167, 'nathan': 4168, 'fog': 4169, 'accuraci': 4170, 'furiou': 4171, '3000': 4172, 'fabric': 4173, 'sheet': 4174, 'patienc': 4175, 'debt': 4176, '1971': 4177, 'logan': 4178, 'bake': 4179, 'temper': 4180, 'unfair': 4181, 'wang': 4182, 'murray': 4183, 'sustain': 4184, 'trait': 4185, 'fought': 4186, 'slam': 4187, '2008': 4188, 'conduct': 4189, 'tax': 4190, 'nicola': 4191, 'dreck': 4192, 'wretch': 4193, '1995': 4194, 'joel': 4195, 'fart': 4196, 'roller': 4197, 'sunni': 4198, 'hallucin': 4199, 'behold': 4200, 'pocket': 4201, 'shanghai': 4202, 'clerk': 4203, 'malon': 4204, 'enforc': 4205, 'mode': 4206, 'ritual': 4207, 'alec': 4208, 'vanc': 4209, 'seldom': 4210, 'sand': 4211, 'darn': 4212, 'crippl': 4213, 'preston': 4214, 'pete': 4215, 'stark': 4216, 'fundament': 4217, 'phil': 4218, 'squad': 4219, 'bias': 4220, 'conscious': 4221, 'outlin': 4222, 'soup': 4223, 'despis': 4224, 'exhaust': 4225, 'guitar': 4226, 'legaci': 4227, 'preposter': 4228, 'sweep': 4229, 'shell': 4230, 'divid': 4231, 'critiqu': 4232, 'rita': 4233, 'schedul': 4234, 'grief': 4235, 'robber': 4236, 'penni': 4237, 'stuart': 4238, 'bridget': 4239, 'technicolor': 4240, 'isabel': 4241, 'scriptwrit': 4242, 'clau': 4243, 'runner': 4244, 'helpless': 4245, 'tactic': 4246, 'canyon': 4247, 'boyl': 4248, 'palanc': 4249, 'alley': 4250, 'kansa': 4251, 'russia': 4252, 'sugar': 4253, 'gregori': 4254, 'delv': 4255, 'culmin': 4256, 'inabl': 4257, 'rehash': 4258, 'alicia': 4259, 'downey': 4260, 'newman': 4261, 'restrict': 4262, 'marc': 4263, 'unexpectedli': 4264, 'invad': 4265, 'jacket': 4266, 'consciou': 4267, 'liberti': 4268, 'drove': 4269, 'bloom': 4270, 'jodi': 4271, 'vomit': 4272, 'flair': 4273, 'lacklust': 4274, 'sentinel': 4275, 'passag': 4276, 'rear': 4277, 'agenc': 4278, 'propos': 4279, 'connor': 4280, 'cigarett': 4281, 'sniper': 4282, 'implic': 4283, 'feat': 4284, 'cap': 4285, 'improb': 4286, 'tripe': 4287, 'rod': 4288, 'vet': 4289, 'delet': 4290, 'arrow': 4291, '1936': 4292, 'rampag': 4293, 'aesthet': 4294, 'wrench': 4295, 'asylum': 4296, 'karl': 4297, 'behaviour': 4298, 'rehears': 4299, 'mccoy': 4300, 'awhil': 4301, 'sharon': 4302, 'ladder': 4303, 'pale': 4304, '22': 4305, 'bacon': 4306, 'kolchak': 4307, 'chainsaw': 4308, 'foxx': 4309, 'yeti': 4310, 'tendenc': 4311, 'horn': 4312, 'lush': 4313, 'newcom': 4314, 'amazon': 4315, 'tasteless': 4316, 'lurk': 4317, 'wagner': 4318, 'underneath': 4319, '1978': 4320, 'globe': 4321, 'tomorrow': 4322, 'conscienc': 4323, 'weav': 4324, 'paramount': 4325, 'hackney': 4326, 'rumor': 4327, 'basing': 4328, 'rhythm': 4329, 'suffici': 4330, 'financ': 4331, 'elit': 4332, 'hungri': 4333, '19th': 4334, 'shortcom': 4335, 'filler': 4336, 'visitor': 4337, 'loneli': 4338, 'stream': 4339, 'scoop': 4340, 'aristocrat': 4341, 'prank': 4342, 'coaster': 4343, 'spice': 4344, 'thunderbird': 4345, '1988': 4346, '1920': 4347, 'paradis': 4348, 'hulk': 4349, 'wildli': 4350, 'sung': 4351, 'el': 4352, 'minu': 4353, 'suspicion': 4354, 'fright': 4355, 'rub': 4356, 'iv': 4357, 'grudg': 4358, 'secondari': 4359, 'worship': 4360, 'smell': 4361, 'straightforward': 4362, 'penn': 4363, 'choppi': 4364, 'dirt': 4365, 'cancer': 4366, 'lectur': 4367, 'leigh': 4368, 'counterpart': 4369, 'ingeni': 4370, 'couch': 4371, 'brit': 4372, 'heist': 4373, '75': 4374, '1939': 4375, 'minist': 4376, '1989': 4377, 'beverli': 4378, 'impos': 4379, 'ram': 4380, 'abrupt': 4381, 'atroc': 4382, 'en': 4383, 'curli': 4384, 'immers': 4385, 'quietli': 4386, 'recogniz': 4387, 'literari': 4388, 'inmat': 4389, 'standout': 4390, 'tierney': 4391, 'entranc': 4392, 'naughti': 4393, 'posey': 4394, 'bread': 4395, 'chamberlain': 4396, 'hopkin': 4397, 'chavez': 4398, 'teas': 4399, 'paxton': 4400, 'springer': 4401, 'wwe': 4402, 'attenborough': 4403, 'skeptic': 4404, 'injuri': 4405, 'heartfelt': 4406, 'nemesi': 4407, 'nolan': 4408, 'ace': 4409, 'bernard': 4410, 'morbid': 4411, 'transcend': 4412, 'enthral': 4413, '1986': 4414, 'convert': 4415, 'moreov': 4416, 'quaid': 4417, 'cattl': 4418, 'lindsay': 4419, 'sassi': 4420, 'clan': 4421, 'missil': 4422, 'yearn': 4423, 'geni': 4424, 'misguid': 4425, 'policeman': 4426, 'variat': 4427, 'net': 4428, 'entitl': 4429, 'duel': 4430, 'watcher': 4431, 'esther': 4432, 'laurenc': 4433, 'sublim': 4434, 'ratso': 4435, 'characteris': 4436, 'steadi': 4437, 'reliabl': 4438, 'vader': 4439, 'kidman': 4440, 'bye': 4441, 'moder': 4442, 'diari': 4443, 'poe': 4444, 'brood': 4445, 'buzz': 4446, 'kitti': 4447, 'spiral': 4448, 'hopelessli': 4449, 'rosemari': 4450, 'graini': 4451, 'tyler': 4452, 'cruelti': 4453, 'youngest': 4454, 'grin': 4455, '1979': 4456, 'puppi': 4457, 'egg': 4458, 'setup': 4459, 'uncut': 4460, 'out': 4461, 'dont': 4462, 'bean': 4463, 'artsi': 4464, 'hk': 4465, 'obstacl': 4466, 'enabl': 4467, 'carlito': 4468, 'unexplain': 4469, 'facil': 4470, 'mytholog': 4471, 'weather': 4472, 'kline': 4473, 'narrow': 4474, 'clueless': 4475, 'christin': 4476, 'disastr': 4477, 'acquaint': 4478, 'bewar': 4479, 'brendan': 4480, 'bronson': 4481, 'baffl': 4482, 'decept': 4483, 'athlet': 4484, 'gina': 4485, 'exterior': 4486, 'oblig': 4487, 'effici': 4488, 'spontan': 4489, 'hammi': 4490, 'underworld': 4491, 'hain': 4492, 'niec': 4493, 'despic': 4494, 'bounc': 4495, 'fuel': 4496, 'sweat': 4497, '1969': 4498, 'heap': 4499, 'gillian': 4500, 'martha': 4501, 'preming': 4502, 'patricia': 4503, 'loath': 4504, 'taboo': 4505, 'tick': 4506, 'rome': 4507, 'goof': 4508, '19': 4509, 'suprem': 4510, 'candl': 4511, 'hepburn': 4512, 'dandi': 4513, 'insipid': 4514, 'outlaw': 4515, 'dilemma': 4516, 'angst': 4517, 'shatter': 4518, 'housewif': 4519, 'viewpoint': 4520, 'mermaid': 4521, 'circu': 4522, 'biker': 4523, 'astound': 4524, 'mayhem': 4525, 'injur': 4526, 'preachi': 4527, 'uh': 4528, 'trigger': 4529, 'lester': 4530, 'sleepwalk': 4531, 'enlist': 4532, 'virtu': 4533, 'fontain': 4534, 'renaiss': 4535, 'headach': 4536, 'loi': 4537, 'harmless': 4538, 'sooner': 4539, 'scar': 4540, 'analyz': 4541, '73': 4542, 'redund': 4543, 'camcord': 4544, 'filth': 4545, 'surgeri': 4546, 'contempt': 4547, 'immatur': 4548, 'scorses': 4549, 'gere': 4550, 'stair': 4551, 'hostag': 4552, 'whore': 4553, 'fluff': 4554, 'intric': 4555, 'dish': 4556, 'amor': 4557, 'hooker': 4558, 'hokey': 4559, 'boston': 4560, 'sox': 4561, 'ariel': 4562, 'guin': 4563, 'steer': 4564, 'spade': 4565, 'tripl': 4566, 'bent': 4567, 'oldest': 4568, 'foolish': 4569, 'zoom': 4570, 'glorifi': 4571, 'claustrophob': 4572, 'phenomenon': 4573, 'salt': 4574, 'stimul': 4575, 'idol': 4576, 'slimi': 4577, 'overlong': 4578, 'ebert': 4579, 'cassavet': 4580, 'dismal': 4581, 'macho': 4582, 'corbett': 4583, 'schlock': 4584, 'astronaut': 4585, 'trivia': 4586, 'cohen': 4587, 'spree': 4588, 'joker': 4589, 'nolt': 4590, 'zane': 4591, 'proport': 4592, 'perman': 4593, 'alvin': 4594, 'fascist': 4595, 'gasp': 4596, 'keen': 4597, 'widescreen': 4598, 'obligatori': 4599, 'mutual': 4600, 'shield': 4601, 'flashi': 4602, 'flirt': 4603, 'gabl': 4604, 'margin': 4605, 'harold': 4606, 'naschi': 4607, '1976': 4608, 'flag': 4609, 'frantic': 4610, 'transplant': 4611, 'cush': 4612, 'rhyme': 4613, '1981': 4614, 'radiat': 4615, 'conquer': 4616, 'corman': 4617, 'down': 4618, 'preced': 4619, 'mount': 4620, 'dwarf': 4621, 'antagonist': 4622, 'shred': 4623, 'messi': 4624, 'strongest': 4625, 'assert': 4626, 'remad': 4627, 'beard': 4628, 'cow': 4629, 'spinal': 4630, 'faint': 4631, 'muscl': 4632, 'vaniti': 4633, 'info': 4634, 'deer': 4635, 'www': 4636, 'departur': 4637, 'mobil': 4638, 'brush': 4639, 'boob': 4640, 'sensual': 4641, 'discern': 4642, 'bachelor': 4643, '1945': 4644, 'danish': 4645, 'interestingli': 4646, 'hara': 4647, '28': 4648, 'neurot': 4649, 'barn': 4650, 'off': 4651, 'scandal': 4652, 'archiv': 4653, 'raj': 4654, 'inflict': 4655, 'wield': 4656, 'ritchi': 4657, 'persuad': 4658, 'fishburn': 4659, 'divin': 4660, 'flock': 4661, 'resum': 4662, 'someday': 4663, 'triangl': 4664, 'carey': 4665, '95': 4666, 'instruct': 4667, 'bitten': 4668, 'claud': 4669, 'strive': 4670, 'mol': 4671, 'repris': 4672, 'aborigin': 4673, 'frontier': 4674, 'miracul': 4675, 'carlo': 4676, 'proclaim': 4677, 'heartwarm': 4678, 'fragil': 4679, 'senior': 4680, 'undermin': 4681, 'bate': 4682, 'anton': 4683, 'earnest': 4684, 'biblic': 4685, 'hapless': 4686, 'cher': 4687, 'harrison': 4688, 'rot': 4689, 'pixar': 4690, 'melissa': 4691, 'dim': 4692, 'mobster': 4693, 'dylan': 4694, 'europa': 4695, 'recit': 4696, 'cycl': 4697, 'hug': 4698, 'ish': 4699, 'dame': 4700, 'casino': 4701, 'timberlak': 4702, 'luka': 4703, 'prophet': 4704, 'clad': 4705, 'loretta': 4706, 'traffic': 4707, 'cliffhang': 4708, 'banter': 4709, 'hilar': 4710, 'submit': 4711, 'cb': 4712, 'jade': 4713, 'neill': 4714, 'kathryn': 4715, 'pacif': 4716, 'parson': 4717, 'helm': 4718, 'axe': 4719, 'artwork': 4720, 'vibrant': 4721, 'colin': 4722, 'pickford': 4723, 'wendigo': 4724, 'electron': 4725, 'feast': 4726, 'articl': 4727, 'illus': 4728, 'flavor': 4729, 'vile': 4730, 'static': 4731, 'northern': 4732, 'http': 4733, 'isra': 4734, 'pc': 4735, 'lucil': 4736, 'estrang': 4737, 'choke': 4738, 'rooki': 4739, 'vanessa': 4740, 'redneck': 4741, 'cerebr': 4742, 'marlon': 4743, 'trier': 4744, 'token': 4745, 'wardrob': 4746, 'seedi': 4747, 'eli': 4748, 'nope': 4749, 'blatantli': 4750, 'akin': 4751, 'aris': 4752, 'antholog': 4753, 'uma': 4754, 'foil': 4755, 'misfortun': 4756, 'orphan': 4757, 'toronto': 4758, 'mason': 4759, 'mathieu': 4760, 'milo': 4761, 'breakfast': 4762, 'alexandr': 4763, 'lui': 4764, 'venom': 4765, 'shepherd': 4766, 'bikini': 4767, 'razor': 4768, 'legitim': 4769, 'holocaust': 4770, 'bondag': 4771, 'winchest': 4772, 'jordan': 4773, 'sicken': 4774, 'jo': 4775, 'nightclub': 4776, 'charlton': 4777, 'ceremoni': 4778, 'boyer': 4779, 'feminin': 4780, 'peer': 4781, 'glare': 4782, 'ideolog': 4783, 'fifth': 4784, 'deem': 4785, 'audrey': 4786, 'cartoonish': 4787, 'dudley': 4788, 'affleck': 4789, 'huston': 4790, 'magician': 4791, 'clinic': 4792, 'swept': 4793, 'frog': 4794, 'tack': 4795, 'shorter': 4796, 'psych': 4797, 'gunga': 4798, 'linear': 4799, 'retriev': 4800, 'abund': 4801, 'oppon': 4802, 'comprehend': 4803, 'outdat': 4804, 'turd': 4805, 'wrestler': 4806, 'styliz': 4807, 'disregard': 4808, 'gilbert': 4809, 'knightley': 4810, 'highway': 4811, 'howl': 4812, 'smack': 4813, 'leather': 4814, 'durat': 4815, 'newer': 4816, 'corn': 4817, 'evolut': 4818, 'uniformli': 4819, '1991': 4820, 'compris': 4821, 'lighter': 4822, 'greet': 4823, 'einstein': 4824, 'toe': 4825, '1994': 4826, 'deliver': 4827, 'energet': 4828, 'cemeteri': 4829, 'snatch': 4830, 'bogu': 4831, 'sleaz': 4832, 'plate': 4833, 'client': 4834, 'monument': 4835, 'cuban': 4836, 'spawn': 4837, 'chip': 4838, 'boo': 4839, 'summar': 4840, 'collector': 4841, 'tara': 4842, '4th': 4843, 'breakdown': 4844, 'moe': 4845, 'conrad': 4846, 'braveheart': 4847, 'bastard': 4848, 'lavish': 4849, 'senat': 4850, 'spine': 4851, 'mitch': 4852, 'btw': 4853, 'phenomen': 4854, 'lifeless': 4855, 'whack': 4856, 'goldsworthi': 4857, 'potter': 4858, 'salman': 4859, 'inaccuraci': 4860, 'belli': 4861, 'lex': 4862, 'capot': 4863, 'jedi': 4864, 'signal': 4865, 'randolph': 4866, 'bulk': 4867, 'alleg': 4868, 'ol': 4869, 'eleven': 4870, 'firmli': 4871, 'constitut': 4872, 'pronounc': 4873, 'undertak': 4874, 'appl': 4875, 'nina': 4876, 'historian': 4877, 'wtf': 4878, 'embark': 4879, 'jam': 4880, 'fluid': 4881, 'bori': 4882, 'jule': 4883, 'sorrow': 4884, 'spectacl': 4885, 'neatli': 4886, 'occup': 4887, 'trauma': 4888, 'mcqueen': 4889, 'ie': 4890, 'creek': 4891, 'replay': 4892, '1974': 4893, 'cecil': 4894, 'jare': 4895, 'kazan': 4896, '1977': 4897, 'armstrong': 4898, 'judd': 4899, 'healthi': 4900, 'liu': 4901, 'luxuri': 4902, 'gilliam': 4903, 'clara': 4904, 'outright': 4905, 'undead': 4906, 'kent': 4907, 'evelyn': 4908, 'inaccur': 4909, 'subtli': 4910, 'propheci': 4911, 'decapit': 4912, 'forgiven': 4913, 'truman': 4914, 'antonio': 4915, 'carmen': 4916, 'sidewalk': 4917, 'cape': 4918, 'comb': 4919, 'congratul': 4920, 'miniseri': 4921, 'curtain': 4922, 'mum': 4923, 'groan': 4924, 'vignett': 4925, 'galaxi': 4926, 'vain': 4927, 'knee': 4928, 'comprehens': 4929, 'id': 4930, 'tokyo': 4931, 'relentless': 4932, 'spray': 4933, 'bsg': 4934, 'inclus': 4935, 'pepper': 4936, 'unattract': 4937, 'roar': 4938, 'kiddi': 4939, 'unsuspect': 4940, 'walt': 4941, '1985': 4942, 'poker': 4943, 'porter': 4944, 'palm': 4945, 'genet': 4946, 'conan': 4947, 'abound': 4948, 'miami': 4949, 'fruit': 4950, 'lanc': 4951, 'pioneer': 4952, 'lauren': 4953, 'paula': 4954, 'meal': 4955, 'ash': 4956, 'aussi': 4957, 'blur': 4958, 'basket': 4959, 'goldblum': 4960, 'rosario': 4961, 'sacrif': 4962, 'bait': 4963, 'vastli': 4964, 'profil': 4965, 'hackman': 4966, 'sophi': 4967, 'frontal': 4968, 'drone': 4969, 'reincarn': 4970, 'playboy': 4971, 'victorian': 4972, 'assort': 4973, 'incorrect': 4974, 'monti': 4975, 'handicap': 4976, 'optimist': 4977, 'epitom': 4978, 'verg': 4979, 'hostil': 4980, 'masterson': 4981, 'omin': 4982, 'substanti': 4983, 'detach': 4984, 'bravo': 4985, 'sparkl': 4986, 'ingrid': 4987, 'turtl': 4988, 'scariest': 4989, 'jill': 4990, 'ghetto': 4991, 'weaker': 4992, '21st': 4993, 'evan': 4994, 'growth': 4995, 'motorcycl': 4996, 'rapidli': 4997, 'weari': 4998, 'macabr': 4999}\n" ] ], [ [ "### Save `word_dict`\n\nLater on when we construct an endpoint which processes a submitted review we will need to make use of the `word_dict` which we have created. As such, we will save it to a file now for future use.", "_____no_output_____" ] ], [ [ "data_dir = '../data/pytorch' # The folder we will use for storing data\nif not os.path.exists(data_dir): # Make sure that the folder exists\n os.makedirs(data_dir)", "_____no_output_____" ], [ "with open(os.path.join(data_dir, 'word_dict.pkl'), \"wb\") as f:\n pickle.dump(word_dict, f)", "_____no_output_____" ] ], [ [ "### Transform the reviews\n\nNow that we have our word dictionary which allows us to transform the words appearing in the reviews into integers, it is time to make use of it and convert our reviews to their integer sequence representation, making sure to pad or truncate to a fixed length, which in our case is `500`.", "_____no_output_____" ] ], [ [ "def convert_and_pad(word_dict, sentence, pad=500):\n NOWORD = 0 # We will use 0 to represent the 'no word' category\n INFREQ = 1 # and we use 1 to represent the infrequent words, i.e., words not appearing in word_dict\n \n working_sentence = [NOWORD] * pad\n \n for word_index, word in enumerate(sentence[:pad]):\n if word in word_dict:\n working_sentence[word_index] = word_dict[word]\n else:\n working_sentence[word_index] = INFREQ\n \n return working_sentence, min(len(sentence), pad)\n\ndef convert_and_pad_data(word_dict, data, pad=500):\n result = []\n lengths = []\n \n for sentence in data:\n converted, leng = convert_and_pad(word_dict, sentence, pad)\n result.append(converted)\n lengths.append(leng)\n \n return np.array(result), np.array(lengths)", "_____no_output_____" ], [ "train_X, train_X_len = convert_and_pad_data(word_dict, train_X)\ntest_X, test_X_len = convert_and_pad_data(word_dict, test_X)", "_____no_output_____" ] ], [ [ "As a quick check to make sure that things are working as intended, check to see what one of the reviews in the training set looks like after having been processeed. Does this look reasonable? What is the length of a review in the training set?", "_____no_output_____" ] ], [ [ "# Use this cell to examine one of the processed reviews to make sure everything is working as intended.\nprint(train_X[100])\nprint()\nprint(len(train_X[100]))", "[ 38 21 1 947 172 68 11 144 35 371 11 124 267 1\n 47 304 1046 123 2433 2216 761 841 2 17 7 81 32 26\n 9 346 39 729 17 796 831 354 794 37 729 1 399 355\n 1 1737 60 188 11 121 440 30 356 947 172 11 22 171\n 236 226 7 287 761 426 541 1 727 1837 4974 124 922 796\n 243 9 58 99 60 53 2 106 7 2538 1 1 856 542\n 399 503 18 346 1431 25 33 2539 1 593 4 4 6 223\n 12 18 141 1327 59 30 32 108 4 319 1 286 241 973\n 211 78 62 346 593 1330 207 30 313 152 203 763 124 142\n 2433 2216 33 1174 327 68 10 11 99 26 2 2216 47 770\n 218 11 30 56 47 281 2216 2 32 1712 2 4 95 26\n 60 1 2382 1364 5 32 7 19 152 9 2 563 1948 908\n 152 2216 354 4445 2318 557 6 172 27 1930 84 208 14 29\n 611 1 1099 950 34 50 974 168 1 161 9 2216 33 2\n 27 20 7 2216 32 2 34 683 33 9 7 32 278 210\n 426 36 38 16 38 129 47 22 47 2 979 1 251 1378\n 1 189 251 146 236 231 50 936 99 2 99 66 16 558\n 66 39 1687 1946 0 0 0 0 0 0 0 0 0 0\n 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n 0 0 0 0 0 0 0 0 0 0]\n\n500\n" ] ], [ [ "**Question:** In the cells above we use the `preprocess_data` and `convert_and_pad_data` methods to process both the training and testing set. Why or why not might this be a problem?", "_____no_output_____" ], [ "**Answer:** Passing the training and testing sets through mentioned functions has its own set of advantages and disadvantages. For instance, preprocessing the data removes noise from datasets which makes the model more accurate and the training process more computationally efficient since it doesn't deal with useless data. However, truncating the data might affect the model understanding of a review sentiment as the indicating words of a particular review might be at the very end of it. ", "_____no_output_____" ], [ "## Step 3: Upload the data to S3\n\nAs in the XGBoost notebook, we will need to upload the training dataset to S3 in order for our training code to access it. For now we will save it locally and we will upload to S3 later on.\n\n### Save the processed training dataset locally\n\nIt is important to note the format of the data that we are saving as we will need to know it when we write the training code. In our case, each row of the dataset has the form `label`, `length`, `review[500]` where `review[500]` is a sequence of `500` integers representing the words in the review.", "_____no_output_____" ] ], [ [ "import pandas as pd\n \npd.concat([pd.DataFrame(train_y), pd.DataFrame(train_X_len), pd.DataFrame(train_X)], axis=1) \\\n .to_csv(os.path.join(data_dir, 'train.csv'), header=False, index=False)", "_____no_output_____" ] ], [ [ "### Uploading the training data\n\n\nNext, we need to upload the training data to the SageMaker default S3 bucket so that we can provide access to it while training our model.", "_____no_output_____" ] ], [ [ "import sagemaker\n\nsagemaker_session = sagemaker.Session()\n\nbucket = sagemaker_session.default_bucket()\nprefix = 'sagemaker/sentiment_rnn'\n\nrole = sagemaker.get_execution_role()", "_____no_output_____" ], [ "input_data = sagemaker_session.upload_data(path=data_dir, bucket=bucket, key_prefix=prefix)", "_____no_output_____" ] ], [ [ "**NOTE:** The cell above uploads the entire contents of our data directory. This includes the `word_dict.pkl` file. This is fortunate as we will need this later on when we create an endpoint that accepts an arbitrary review. For now, we will just take note of the fact that it resides in the data directory (and so also in the S3 training bucket) and that we will need to make sure it gets saved in the model directory.", "_____no_output_____" ], [ "## Step 4: Build and Train the PyTorch Model\n\nIn the XGBoost notebook we discussed what a model is in the SageMaker framework. In particular, a model comprises three objects\n\n - Model Artifacts,\n - Training Code, and\n - Inference Code,\n \neach of which interact with one another. In the XGBoost example we used training and inference code that was provided by Amazon. Here we will still be using containers provided by Amazon with the added benefit of being able to include our own custom code.\n\nWe will start by implementing our own neural network in PyTorch along with a training script. For the purposes of this project we have provided the necessary model object in the `model.py` file, inside of the `train` folder. You can see the provided implementation by running the cell below.", "_____no_output_____" ] ], [ [ "!pygmentize train/model.py", "\u001b[34mimport\u001b[39;49;00m \u001b[04m\u001b[36mtorch\u001b[39;49;00m\u001b[04m\u001b[36m.\u001b[39;49;00m\u001b[04m\u001b[36mnn\u001b[39;49;00m \u001b[34mas\u001b[39;49;00m \u001b[04m\u001b[36mnn\u001b[39;49;00m\n\n\u001b[34mclass\u001b[39;49;00m \u001b[04m\u001b[32mLSTMClassifier\u001b[39;49;00m(nn.Module):\n \u001b[33m\"\"\"\u001b[39;49;00m\n\u001b[33m This is the simple RNN model we will be using to perform Sentiment Analysis.\u001b[39;49;00m\n\u001b[33m \"\"\"\u001b[39;49;00m\n\n \u001b[34mdef\u001b[39;49;00m \u001b[32m__init__\u001b[39;49;00m(\u001b[36mself\u001b[39;49;00m, embedding_dim, hidden_dim, vocab_size):\n \u001b[33m\"\"\"\u001b[39;49;00m\n\u001b[33m Initialize the model by settingg up the various layers.\u001b[39;49;00m\n\u001b[33m \"\"\"\u001b[39;49;00m\n \u001b[36msuper\u001b[39;49;00m(LSTMClassifier, \u001b[36mself\u001b[39;49;00m).\u001b[32m__init__\u001b[39;49;00m()\n\n \u001b[36mself\u001b[39;49;00m.embedding = nn.Embedding(vocab_size, embedding_dim, padding_idx=\u001b[34m0\u001b[39;49;00m)\n \u001b[36mself\u001b[39;49;00m.lstm = nn.LSTM(embedding_dim, hidden_dim)\n \u001b[36mself\u001b[39;49;00m.dense = nn.Linear(in_features=hidden_dim, out_features=\u001b[34m1\u001b[39;49;00m)\n \u001b[36mself\u001b[39;49;00m.sig = nn.Sigmoid()\n \n \u001b[36mself\u001b[39;49;00m.word_dict = \u001b[34mNone\u001b[39;49;00m\n\n \u001b[34mdef\u001b[39;49;00m \u001b[32mforward\u001b[39;49;00m(\u001b[36mself\u001b[39;49;00m, x):\n \u001b[33m\"\"\"\u001b[39;49;00m\n\u001b[33m Perform a forward pass of our model on some input.\u001b[39;49;00m\n\u001b[33m \"\"\"\u001b[39;49;00m\n x = x.t()\n lengths = x[\u001b[34m0\u001b[39;49;00m,:]\n reviews = x[\u001b[34m1\u001b[39;49;00m:,:]\n embeds = \u001b[36mself\u001b[39;49;00m.embedding(reviews)\n lstm_out, _ = \u001b[36mself\u001b[39;49;00m.lstm(embeds)\n out = \u001b[36mself\u001b[39;49;00m.dense(lstm_out)\n out = out[lengths - \u001b[34m1\u001b[39;49;00m, \u001b[36mrange\u001b[39;49;00m(\u001b[36mlen\u001b[39;49;00m(lengths))]\n \u001b[34mreturn\u001b[39;49;00m \u001b[36mself\u001b[39;49;00m.sig(out.squeeze())\n" ] ], [ [ "The important takeaway from the implementation provided is that there are three parameters that we may wish to tweak to improve the performance of our model. These are the embedding dimension, the hidden dimension and the size of the vocabulary. We will likely want to make these parameters configurable in the training script so that if we wish to modify them we do not need to modify the script itself. We will see how to do this later on. To start we will write some of the training code in the notebook so that we can more easily diagnose any issues that arise.\n\nFirst we will load a small portion of the training data set to use as a sample. It would be very time consuming to try and train the model completely in the notebook as we do not have access to a gpu and the compute instance that we are using is not particularly powerful. However, we can work on a small bit of the data to get a feel for how our training script is behaving.", "_____no_output_____" ] ], [ [ "import torch\nimport torch.utils.data\n\n# Read in only the first 250 rows\ntrain_sample = pd.read_csv(os.path.join(data_dir, 'train.csv'), header=None, names=None, nrows=250)\n\n# Turn the input pandas dataframe into tensors\ntrain_sample_y = torch.from_numpy(train_sample[[0]].values).float().squeeze()\ntrain_sample_X = torch.from_numpy(train_sample.drop([0], axis=1).values).long()\n\n# Build the dataset\ntrain_sample_ds = torch.utils.data.TensorDataset(train_sample_X, train_sample_y)\n# Build the dataloader\ntrain_sample_dl = torch.utils.data.DataLoader(train_sample_ds, batch_size=50)", "_____no_output_____" ] ], [ [ "### (TODO) Writing the training method\n\nNext we need to write the training code itself. This should be very similar to training methods that you have written before to train PyTorch models. We will leave any difficult aspects such as model saving / loading and parameter loading until a little later.", "_____no_output_____" ] ], [ [ "def train(model, train_loader, epochs, optimizer, loss_fn, device):\n for epoch in range(1, epochs + 1):\n model.train()\n total_loss = 0\n for batch in train_loader: \n batch_X, batch_y = batch\n \n batch_X = batch_X.to(device)\n batch_y = batch_y.to(device)\n \n # TODO: Complete this train method to train the model provided.\n model.zero_grad()\n output = model(batch_X)\n loss = loss_fn(output, batch_y)\n loss.backward()\n optimizer.step()\n \n total_loss += loss.data.item()\n print(\"Epoch: {}, BCELoss: {}\".format(epoch, total_loss / len(train_loader)))", "_____no_output_____" ] ], [ [ "Supposing we have the training method above, we will test that it is working by writing a bit of code in the notebook that executes our training method on the small sample training set that we loaded earlier. The reason for doing this in the notebook is so that we have an opportunity to fix any errors that arise early when they are easier to diagnose.", "_____no_output_____" ] ], [ [ "import torch.optim as optim\nfrom train.model import LSTMClassifier\n\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\nmodel = LSTMClassifier(32, 100, 5000).to(device)\noptimizer = optim.Adam(model.parameters())\nloss_fn = torch.nn.BCELoss()\n\ntrain(model, train_sample_dl, 5, optimizer, loss_fn, device)", "Epoch: 1, BCELoss: 0.6950652122497558\nEpoch: 2, BCELoss: 0.6860353350639343\nEpoch: 3, BCELoss: 0.678533959388733\nEpoch: 4, BCELoss: 0.6703558683395385\nEpoch: 5, BCELoss: 0.6605463266372681\n" ] ], [ [ "In order to construct a PyTorch model using SageMaker we must provide SageMaker with a training script. We may optionally include a directory which will be copied to the container and from which our training code will be run. When the training container is executed it will check the uploaded directory (if there is one) for a `requirements.txt` file and install any required Python libraries, after which the training script will be run.", "_____no_output_____" ], [ "### (TODO) Training the model\n\nWhen a PyTorch model is constructed in SageMaker, an entry point must be specified. This is the Python file which will be executed when the model is trained. Inside of the `train` directory is a file called `train.py` which has been provided and which contains most of the necessary code to train our model. The only thing that is missing is the implementation of the `train()` method which you wrote earlier in this notebook.\n\n**TODO**: Copy the `train()` method written above and paste it into the `train/train.py` file where required.\n\nThe way that SageMaker passes hyperparameters to the training script is by way of arguments. These arguments can then be parsed and used in the training script. To see how this is done take a look at the provided `train/train.py` file.", "_____no_output_____" ] ], [ [ "from sagemaker.pytorch import PyTorch\n\nestimator = PyTorch(entry_point=\"train.py\",\n source_dir=\"train\",\n role=role,\n framework_version='0.4.0',\n train_instance_count=1,\n train_instance_type='ml.p2.xlarge',\n hyperparameters={\n 'epochs': 9,\n 'hidden_dim': 200,\n })", "_____no_output_____" ], [ "estimator.fit({'training': input_data})", "'create_image_uri' will be deprecated in favor of 'ImageURIProvider' class in SageMaker Python SDK v2.\n's3_input' class will be renamed to 'TrainingInput' in SageMaker Python SDK v2.\n'create_image_uri' will be deprecated in favor of 'ImageURIProvider' class in SageMaker Python SDK v2.\n" ] ], [ [ "## Step 5: Testing the model\n\nAs mentioned at the top of this notebook, we will be testing this model by first deploying it and then sending the testing data to the deployed endpoint. We will do this so that we can make sure that the deployed model is working correctly.\n\n## Step 6: Deploy the model for testing\n\nNow that we have trained our model, we would like to test it to see how it performs. Currently our model takes input of the form `review_length, review[500]` where `review[500]` is a sequence of `500` integers which describe the words present in the review, encoded using `word_dict`. Fortunately for us, SageMaker provides built-in inference code for models with simple inputs such as this.\n\nThere is one thing that we need to provide, however, and that is a function which loads the saved model. This function must be called `model_fn()` and takes as its only parameter a path to the directory where the model artifacts are stored. This function must also be present in the python file which we specified as the entry point. In our case the model loading function has been provided and so no changes need to be made.\n\n**NOTE**: When the built-in inference code is run it must import the `model_fn()` method from the `train.py` file. This is why the training code is wrapped in a main guard ( ie, `if __name__ == '__main__':` )\n\nSince we don't need to change anything in the code that was uploaded during training, we can simply deploy the current model as-is.\n\n**NOTE:** When deploying a model you are asking SageMaker to launch an compute instance that will wait for data to be sent to it. As a result, this compute instance will continue to run until *you* shut it down. This is important to know since the cost of a deployed endpoint depends on how long it has been running for.\n\nIn other words **If you are no longer using a deployed endpoint, shut it down!**\n\n**TODO:** Deploy the trained model.", "_____no_output_____" ] ], [ [ "# TODO: Deploy the trained model\npredictor = estimator.deploy(initial_instance_count=1, instance_type='ml.p2.xlarge')", "Parameter image will be renamed to image_uri in SageMaker Python SDK v2.\n'create_image_uri' will be deprecated in favor of 'ImageURIProvider' class in SageMaker Python SDK v2.\n" ] ], [ [ "## Step 7 - Use the model for testing\n\nOnce deployed, we can read in the test data and send it off to our deployed model to get some results. Once we collect all of the results we can determine how accurate our model is.", "_____no_output_____" ] ], [ [ "test_X = pd.concat([pd.DataFrame(test_X_len), pd.DataFrame(test_X)], axis=1)", "_____no_output_____" ], [ "# We split the data into chunks and send each chunk seperately, accumulating the results.\n\ndef predict(data, rows=512):\n split_array = np.array_split(data, int(data.shape[0] / float(rows) + 1))\n predictions = np.array([])\n for array in split_array:\n predictions = np.append(predictions, predictor.predict(array))\n \n return predictions", "_____no_output_____" ], [ "predictions = predict(test_X.values)\npredictions = [round(num) for num in predictions]", "_____no_output_____" ], [ "from sklearn.metrics import accuracy_score\naccuracy_score(test_y, predictions)", "_____no_output_____" ] ], [ [ "**Question:** How does this model compare to the XGBoost model you created earlier? Why might these two models perform differently on this dataset? Which do *you* think is better for sentiment analysis?", "_____no_output_____" ], [ "**Answer:** Recall that the XGBoost model accuracy is 0.85696, we can say that the difference between the two models in terms of accuracy isn't that great, however, I'd prefer the LSTM implementation more than the BoW's implementation since LSTM's keep track of previous inputs.", "_____no_output_____" ], [ "### (TODO) More testing\n\nWe now have a trained model which has been deployed and which we can send processed reviews to and which returns the predicted sentiment. However, ultimately we would like to be able to send our model an unprocessed review. That is, we would like to send the review itself as a string. For example, suppose we wish to send the following review to our model.", "_____no_output_____" ] ], [ [ "test_review = 'The simplest pleasures in life are the best, and this film is one of them. Combining a rather basic storyline of love and adventure this movie transcends the usual weekend fair with wit and unmitigated charm.'", "_____no_output_____" ] ], [ [ "The question we now need to answer is, how do we send this review to our model?\n\nRecall in the first section of this notebook we did a bunch of data processing to the IMDb dataset. In particular, we did two specific things to the provided reviews.\n - Removed any html tags and stemmed the input\n - Encoded the review as a sequence of integers using `word_dict`\n \nIn order process the review we will need to repeat these two steps.\n\n**TODO**: Using the `review_to_words` and `convert_and_pad` methods from section one, convert `test_review` into a numpy array `test_data` suitable to send to our model. Remember that our model expects input of the form `review_length, review[500]`.", "_____no_output_____" ] ], [ [ "# TODO: Convert test_review into a form usable by the model and save the results in test_data\ntest_review_words = review_to_words(test_review)\ntest_review_words, length = convert_and_pad(word_dict, test_review_words)\ntest_data = np.array([[length] + test_review_words])", "_____no_output_____" ] ], [ [ "Now that we have processed the review, we can send the resulting array to our model to predict the sentiment of the review.", "_____no_output_____" ] ], [ [ "predictor.predict(test_data)", "_____no_output_____" ] ], [ [ "Since the return value of our model is close to `1`, we can be certain that the review we submitted is positive.", "_____no_output_____" ], [ "### Delete the endpoint\n\nOf course, just like in the XGBoost notebook, once we've deployed an endpoint it continues to run until we tell it to shut down. Since we are done using our endpoint for now, we can delete it.", "_____no_output_____" ] ], [ [ "estimator.delete_endpoint()", "estimator.delete_endpoint() will be deprecated in SageMaker Python SDK v2. Please use the delete_endpoint() function on your predictor instead.\n" ] ], [ [ "## Step 6 (again) - Deploy the model for the web app\n\nNow that we know that our model is working, it's time to create some custom inference code so that we can send the model a review which has not been processed and have it determine the sentiment of the review.\n\nAs we saw above, by default the estimator which we created, when deployed, will use the entry script and directory which we provided when creating the model. However, since we now wish to accept a string as input and our model expects a processed review, we need to write some custom inference code.\n\nWe will store the code that we write in the `serve` directory. Provided in this directory is the `model.py` file that we used to construct our model, a `utils.py` file which contains the `review_to_words` and `convert_and_pad` pre-processing functions which we used during the initial data processing, and `predict.py`, the file which will contain our custom inference code. Note also that `requirements.txt` is present which will tell SageMaker what Python libraries are required by our custom inference code.\n\nWhen deploying a PyTorch model in SageMaker, you are expected to provide four functions which the SageMaker inference container will use.\n - `model_fn`: This function is the same function that we used in the training script and it tells SageMaker how to load our model.\n - `input_fn`: This function receives the raw serialized input that has been sent to the model's endpoint and its job is to de-serialize and make the input available for the inference code.\n - `output_fn`: This function takes the output of the inference code and its job is to serialize this output and return it to the caller of the model's endpoint.\n - `predict_fn`: The heart of the inference script, this is where the actual prediction is done and is the function which you will need to complete.\n\nFor the simple website that we are constructing during this project, the `input_fn` and `output_fn` methods are relatively straightforward. We only require being able to accept a string as input and we expect to return a single value as output. You might imagine though that in a more complex application the input or output may be image data or some other binary data which would require some effort to serialize.\n\n### (TODO) Writing inference code\n\nBefore writing our custom inference code, we will begin by taking a look at the code which has been provided.", "_____no_output_____" ] ], [ [ "!pygmentize serve/predict.py", "\u001b[34mimport\u001b[39;49;00m \u001b[04m\u001b[36margparse\u001b[39;49;00m\n\u001b[34mimport\u001b[39;49;00m \u001b[04m\u001b[36mjson\u001b[39;49;00m\n\u001b[34mimport\u001b[39;49;00m \u001b[04m\u001b[36mos\u001b[39;49;00m\n\u001b[34mimport\u001b[39;49;00m \u001b[04m\u001b[36mpickle\u001b[39;49;00m\n\u001b[34mimport\u001b[39;49;00m \u001b[04m\u001b[36msys\u001b[39;49;00m\n\u001b[34mimport\u001b[39;49;00m \u001b[04m\u001b[36msagemaker_containers\u001b[39;49;00m\n\u001b[34mimport\u001b[39;49;00m \u001b[04m\u001b[36mpandas\u001b[39;49;00m \u001b[34mas\u001b[39;49;00m \u001b[04m\u001b[36mpd\u001b[39;49;00m\n\u001b[34mimport\u001b[39;49;00m \u001b[04m\u001b[36mnumpy\u001b[39;49;00m \u001b[34mas\u001b[39;49;00m \u001b[04m\u001b[36mnp\u001b[39;49;00m\n\u001b[34mimport\u001b[39;49;00m \u001b[04m\u001b[36mtorch\u001b[39;49;00m\n\u001b[34mimport\u001b[39;49;00m \u001b[04m\u001b[36mtorch\u001b[39;49;00m\u001b[04m\u001b[36m.\u001b[39;49;00m\u001b[04m\u001b[36mnn\u001b[39;49;00m \u001b[34mas\u001b[39;49;00m \u001b[04m\u001b[36mnn\u001b[39;49;00m\n\u001b[34mimport\u001b[39;49;00m \u001b[04m\u001b[36mtorch\u001b[39;49;00m\u001b[04m\u001b[36m.\u001b[39;49;00m\u001b[04m\u001b[36moptim\u001b[39;49;00m \u001b[34mas\u001b[39;49;00m \u001b[04m\u001b[36moptim\u001b[39;49;00m\n\u001b[34mimport\u001b[39;49;00m \u001b[04m\u001b[36mtorch\u001b[39;49;00m\u001b[04m\u001b[36m.\u001b[39;49;00m\u001b[04m\u001b[36mutils\u001b[39;49;00m\u001b[04m\u001b[36m.\u001b[39;49;00m\u001b[04m\u001b[36mdata\u001b[39;49;00m\n\n\u001b[34mfrom\u001b[39;49;00m \u001b[04m\u001b[36mmodel\u001b[39;49;00m \u001b[34mimport\u001b[39;49;00m LSTMClassifier\n\n\u001b[34mfrom\u001b[39;49;00m \u001b[04m\u001b[36mutils\u001b[39;49;00m \u001b[34mimport\u001b[39;49;00m review_to_words, convert_and_pad\n\n\u001b[34mdef\u001b[39;49;00m \u001b[32mmodel_fn\u001b[39;49;00m(model_dir):\n \u001b[33m\"\"\"Load the PyTorch model from the `model_dir` directory.\"\"\"\u001b[39;49;00m\n \u001b[36mprint\u001b[39;49;00m(\u001b[33m\"\u001b[39;49;00m\u001b[33mLoading model.\u001b[39;49;00m\u001b[33m\"\u001b[39;49;00m)\n\n \u001b[37m# First, load the parameters used to create the model.\u001b[39;49;00m\n model_info = {}\n model_info_path = os.path.join(model_dir, \u001b[33m'\u001b[39;49;00m\u001b[33mmodel_info.pth\u001b[39;49;00m\u001b[33m'\u001b[39;49;00m)\n \u001b[34mwith\u001b[39;49;00m \u001b[36mopen\u001b[39;49;00m(model_info_path, \u001b[33m'\u001b[39;49;00m\u001b[33mrb\u001b[39;49;00m\u001b[33m'\u001b[39;49;00m) \u001b[34mas\u001b[39;49;00m f:\n model_info = torch.load(f)\n\n \u001b[36mprint\u001b[39;49;00m(\u001b[33m\"\u001b[39;49;00m\u001b[33mmodel_info: \u001b[39;49;00m\u001b[33m{}\u001b[39;49;00m\u001b[33m\"\u001b[39;49;00m.format(model_info))\n\n \u001b[37m# Determine the device and construct the model.\u001b[39;49;00m\n device = torch.device(\u001b[33m\"\u001b[39;49;00m\u001b[33mcuda\u001b[39;49;00m\u001b[33m\"\u001b[39;49;00m \u001b[34mif\u001b[39;49;00m torch.cuda.is_available() \u001b[34melse\u001b[39;49;00m \u001b[33m\"\u001b[39;49;00m\u001b[33mcpu\u001b[39;49;00m\u001b[33m\"\u001b[39;49;00m)\n model = LSTMClassifier(model_info[\u001b[33m'\u001b[39;49;00m\u001b[33membedding_dim\u001b[39;49;00m\u001b[33m'\u001b[39;49;00m], model_info[\u001b[33m'\u001b[39;49;00m\u001b[33mhidden_dim\u001b[39;49;00m\u001b[33m'\u001b[39;49;00m], model_info[\u001b[33m'\u001b[39;49;00m\u001b[33mvocab_size\u001b[39;49;00m\u001b[33m'\u001b[39;49;00m])\n\n \u001b[37m# Load the store model parameters.\u001b[39;49;00m\n model_path = os.path.join(model_dir, \u001b[33m'\u001b[39;49;00m\u001b[33mmodel.pth\u001b[39;49;00m\u001b[33m'\u001b[39;49;00m)\n \u001b[34mwith\u001b[39;49;00m \u001b[36mopen\u001b[39;49;00m(model_path, \u001b[33m'\u001b[39;49;00m\u001b[33mrb\u001b[39;49;00m\u001b[33m'\u001b[39;49;00m) \u001b[34mas\u001b[39;49;00m f:\n model.load_state_dict(torch.load(f))\n\n \u001b[37m# Load the saved word_dict.\u001b[39;49;00m\n word_dict_path = os.path.join(model_dir, \u001b[33m'\u001b[39;49;00m\u001b[33mword_dict.pkl\u001b[39;49;00m\u001b[33m'\u001b[39;49;00m)\n \u001b[34mwith\u001b[39;49;00m \u001b[36mopen\u001b[39;49;00m(word_dict_path, \u001b[33m'\u001b[39;49;00m\u001b[33mrb\u001b[39;49;00m\u001b[33m'\u001b[39;49;00m) \u001b[34mas\u001b[39;49;00m f:\n model.word_dict = pickle.load(f)\n\n model.to(device).eval()\n\n \u001b[36mprint\u001b[39;49;00m(\u001b[33m\"\u001b[39;49;00m\u001b[33mDone loading model.\u001b[39;49;00m\u001b[33m\"\u001b[39;49;00m)\n \u001b[34mreturn\u001b[39;49;00m model\n\n\u001b[34mdef\u001b[39;49;00m \u001b[32minput_fn\u001b[39;49;00m(serialized_input_data, content_type):\n \u001b[36mprint\u001b[39;49;00m(\u001b[33m'\u001b[39;49;00m\u001b[33mDeserializing the input data.\u001b[39;49;00m\u001b[33m'\u001b[39;49;00m)\n \u001b[34mif\u001b[39;49;00m content_type == \u001b[33m'\u001b[39;49;00m\u001b[33mtext/plain\u001b[39;49;00m\u001b[33m'\u001b[39;49;00m:\n data = serialized_input_data.decode(\u001b[33m'\u001b[39;49;00m\u001b[33mutf-8\u001b[39;49;00m\u001b[33m'\u001b[39;49;00m)\n \u001b[34mreturn\u001b[39;49;00m data\n \u001b[34mraise\u001b[39;49;00m \u001b[36mException\u001b[39;49;00m(\u001b[33m'\u001b[39;49;00m\u001b[33mRequested unsupported ContentType in content_type: \u001b[39;49;00m\u001b[33m'\u001b[39;49;00m + content_type)\n\n\u001b[34mdef\u001b[39;49;00m \u001b[32moutput_fn\u001b[39;49;00m(prediction_output, accept):\n \u001b[36mprint\u001b[39;49;00m(\u001b[33m'\u001b[39;49;00m\u001b[33mSerializing the generated output.\u001b[39;49;00m\u001b[33m'\u001b[39;49;00m)\n \u001b[34mreturn\u001b[39;49;00m \u001b[36mstr\u001b[39;49;00m(prediction_output)\n\n\u001b[34mdef\u001b[39;49;00m \u001b[32mpredict_fn\u001b[39;49;00m(input_data, model):\n \u001b[36mprint\u001b[39;49;00m(\u001b[33m'\u001b[39;49;00m\u001b[33mInferring sentiment of input data.\u001b[39;49;00m\u001b[33m'\u001b[39;49;00m)\n\n device = torch.device(\u001b[33m\"\u001b[39;49;00m\u001b[33mcuda\u001b[39;49;00m\u001b[33m\"\u001b[39;49;00m \u001b[34mif\u001b[39;49;00m torch.cuda.is_available() \u001b[34melse\u001b[39;49;00m \u001b[33m\"\u001b[39;49;00m\u001b[33mcpu\u001b[39;49;00m\u001b[33m\"\u001b[39;49;00m)\n \n \u001b[34mif\u001b[39;49;00m model.word_dict \u001b[35mis\u001b[39;49;00m \u001b[34mNone\u001b[39;49;00m:\n \u001b[34mraise\u001b[39;49;00m \u001b[36mException\u001b[39;49;00m(\u001b[33m'\u001b[39;49;00m\u001b[33mModel has not been loaded properly, no word_dict.\u001b[39;49;00m\u001b[33m'\u001b[39;49;00m)\n \n \u001b[37m# TODO: Process input_data so that it is ready to be sent to our model.\u001b[39;49;00m\n \u001b[37m# You should produce two variables:\u001b[39;49;00m\n \u001b[37m# data_X - A sequence of length 500 which represents the converted review\u001b[39;49;00m\n \u001b[37m# data_len - The length of the review\u001b[39;49;00m\n\n data_X, data_len = convert_and_pad(model.word_dict, review_to_words(input_data))\n\n \u001b[37m# Using data_X and data_len we construct an appropriate input tensor. Remember\u001b[39;49;00m\n \u001b[37m# that our model expects input data of the form 'len, review[500]'.\u001b[39;49;00m\n data_pack = np.hstack((data_len, data_X))\n data_pack = data_pack.reshape(\u001b[34m1\u001b[39;49;00m, -\u001b[34m1\u001b[39;49;00m)\n \n data = torch.from_numpy(data_pack)\n data = data.to(device)\n\n \u001b[37m# Make sure to put the model into evaluation mode\u001b[39;49;00m\n model.eval()\n\n \u001b[37m# TODO: Compute the result of applying the model to the input data. The variable `result` should\u001b[39;49;00m\n \u001b[37m# be a numpy array which contains a single integer which is either 1 or 0\u001b[39;49;00m\n\n \u001b[34mwith\u001b[39;49;00m torch.no_grad():\n output = model.forward(data)\n result = np.round(output.numpy())\n\n \u001b[34mreturn\u001b[39;49;00m result\n" ] ], [ [ "As mentioned earlier, the `model_fn` method is the same as the one provided in the training code and the `input_fn` and `output_fn` methods are very simple and your task will be to complete the `predict_fn` method. Make sure that you save the completed file as `predict.py` in the `serve` directory.\n\n**TODO**: Complete the `predict_fn()` method in the `serve/predict.py` file.", "_____no_output_____" ], [ "### Deploying the model\n\nNow that the custom inference code has been written, we will create and deploy our model. To begin with, we need to construct a new PyTorchModel object which points to the model artifacts created during training and also points to the inference code that we wish to use. Then we can call the deploy method to launch the deployment container.\n\n**NOTE**: The default behaviour for a deployed PyTorch model is to assume that any input passed to the predictor is a `numpy` array. In our case we want to send a string so we need to construct a simple wrapper around the `RealTimePredictor` class to accomodate simple strings. In a more complicated situation you may want to provide a serialization object, for example if you wanted to sent image data.", "_____no_output_____" ] ], [ [ "from sagemaker.predictor import RealTimePredictor\nfrom sagemaker.pytorch import PyTorchModel\n\nclass StringPredictor(RealTimePredictor):\n def __init__(self, endpoint_name, sagemaker_session):\n super(StringPredictor, self).__init__(endpoint_name, sagemaker_session, content_type='text/plain')\n\nmodel = PyTorchModel(model_data=estimator.model_data,\n role = role,\n framework_version='0.4.0',\n entry_point='predict.py',\n source_dir='serve',\n predictor_cls=StringPredictor)\npredictor = model.deploy(initial_instance_count=1, instance_type='ml.m4.xlarge')", "Parameter image will be renamed to image_uri in SageMaker Python SDK v2.\n'create_image_uri' will be deprecated in favor of 'ImageURIProvider' class in SageMaker Python SDK v2.\n" ] ], [ [ "### Testing the model\n\nNow that we have deployed our model with the custom inference code, we should test to see if everything is working. Here we test our model by loading the first `250` positive and negative reviews and send them to the endpoint, then collect the results. The reason for only sending some of the data is that the amount of time it takes for our model to process the input and then perform inference is quite long and so testing the entire data set would be prohibitive.", "_____no_output_____" ] ], [ [ "import glob\n\ndef test_reviews(data_dir='../data/aclImdb', stop=250):\n \n results = []\n ground = []\n \n # We make sure to test both positive and negative reviews \n for sentiment in ['pos', 'neg']:\n \n path = os.path.join(data_dir, 'test', sentiment, '*.txt')\n files = glob.glob(path)\n \n files_read = 0\n \n print('Starting ', sentiment, ' files')\n \n # Iterate through the files and send them to the predictor\n for f in files:\n with open(f) as review:\n # First, we store the ground truth (was the review positive or negative)\n if sentiment == 'pos':\n ground.append(1)\n else:\n ground.append(0)\n # Read in the review and convert to 'utf-8' for transmission via HTTP\n review_input = review.read().encode('utf-8')\n # Send the review to the predictor and store the results\n results.append(float(predictor.predict(review_input)))\n \n # Sending reviews to our endpoint one at a time takes a while so we\n # only send a small number of reviews\n files_read += 1\n if files_read == stop:\n break\n \n return ground, results", "_____no_output_____" ], [ "ground, results = test_reviews()", "Starting pos files\nStarting neg files\n" ], [ "from sklearn.metrics import accuracy_score\naccuracy_score(ground, results)", "_____no_output_____" ] ], [ [ "As an additional test, we can try sending the `test_review` that we looked at earlier.", "_____no_output_____" ] ], [ [ "predictor.predict(test_review)", "_____no_output_____" ] ], [ [ "Now that we know our endpoint is working as expected, we can set up the web page that will interact with it. If you don't have time to finish the project now, make sure to skip down to the end of this notebook and shut down your endpoint. You can deploy it again when you come back.", "_____no_output_____" ], [ "## Step 7 (again): Use the model for the web app\n\n> **TODO:** This entire section and the next contain tasks for you to complete, mostly using the AWS console.\n\nSo far we have been accessing our model endpoint by constructing a predictor object which uses the endpoint and then just using the predictor object to perform inference. What if we wanted to create a web app which accessed our model? The way things are set up currently makes that not possible since in order to access a SageMaker endpoint the app would first have to authenticate with AWS using an IAM role which included access to SageMaker endpoints. However, there is an easier way! We just need to use some additional AWS services.\n\n<img src=\"Web App Diagram.svg\">\n\nThe diagram above gives an overview of how the various services will work together. On the far right is the model which we trained above and which is deployed using SageMaker. On the far left is our web app that collects a user's movie review, sends it off and expects a positive or negative sentiment in return.\n\nIn the middle is where some of the magic happens. We will construct a Lambda function, which you can think of as a straightforward Python function that can be executed whenever a specified event occurs. We will give this function permission to send and recieve data from a SageMaker endpoint.\n\nLastly, the method we will use to execute the Lambda function is a new endpoint that we will create using API Gateway. This endpoint will be a url that listens for data to be sent to it. Once it gets some data it will pass that data on to the Lambda function and then return whatever the Lambda function returns. Essentially it will act as an interface that lets our web app communicate with the Lambda function.\n\n### Setting up a Lambda function\n\nThe first thing we are going to do is set up a Lambda function. This Lambda function will be executed whenever our public API has data sent to it. When it is executed it will receive the data, perform any sort of processing that is required, send the data (the review) to the SageMaker endpoint we've created and then return the result.\n\n#### Part A: Create an IAM Role for the Lambda function\n\nSince we want the Lambda function to call a SageMaker endpoint, we need to make sure that it has permission to do so. To do this, we will construct a role that we can later give the Lambda function.\n\nUsing the AWS Console, navigate to the **IAM** page and click on **Roles**. Then, click on **Create role**. Make sure that the **AWS service** is the type of trusted entity selected and choose **Lambda** as the service that will use this role, then click **Next: Permissions**.\n\nIn the search box type `sagemaker` and select the check box next to the **AmazonSageMakerFullAccess** policy. Then, click on **Next: Review**.\n\nLastly, give this role a name. Make sure you use a name that you will remember later on, for example `LambdaSageMakerRole`. Then, click on **Create role**.\n\n#### Part B: Create a Lambda function\n\nNow it is time to actually create the Lambda function.\n\nUsing the AWS Console, navigate to the AWS Lambda page and click on **Create a function**. When you get to the next page, make sure that **Author from scratch** is selected. Now, name your Lambda function, using a name that you will remember later on, for example `sentiment_analysis_func`. Make sure that the **Python 3.6** runtime is selected and then choose the role that you created in the previous part. Then, click on **Create Function**.\n\nOn the next page you will see some information about the Lambda function you've just created. If you scroll down you should see an editor in which you can write the code that will be executed when your Lambda function is triggered. In our example, we will use the code below. \n\n```python\n# We need to use the low-level library to interact with SageMaker since the SageMaker API\n# is not available natively through Lambda.\nimport boto3\n\ndef lambda_handler(event, context):\n\n # The SageMaker runtime is what allows us to invoke the endpoint that we've created.\n runtime = boto3.Session().client('sagemaker-runtime')\n\n # Now we use the SageMaker runtime to invoke our endpoint, sending the review we were given\n response = runtime.invoke_endpoint(EndpointName = '**ENDPOINT NAME HERE**', # The name of the endpoint we created\n ContentType = 'text/plain', # The data format that is expected\n Body = event['body']) # The actual review\n\n # The response is an HTTP response whose body contains the result of our inference\n result = response['Body'].read().decode('utf-8')\n\n return {\n 'statusCode' : 200,\n 'headers' : { 'Content-Type' : 'text/plain', 'Access-Control-Allow-Origin' : '*' },\n 'body' : result\n }\n```\n\nOnce you have copy and pasted the code above into the Lambda code editor, replace the `**ENDPOINT NAME HERE**` portion with the name of the endpoint that we deployed earlier. You can determine the name of the endpoint using the code cell below.", "_____no_output_____" ] ], [ [ "predictor.endpoint", "_____no_output_____" ] ], [ [ "Once you have added the endpoint name to the Lambda function, click on **Save**. Your Lambda function is now up and running. Next we need to create a way for our web app to execute the Lambda function.\n\n### Setting up API Gateway\n\nNow that our Lambda function is set up, it is time to create a new API using API Gateway that will trigger the Lambda function we have just created.\n\nUsing AWS Console, navigate to **Amazon API Gateway** and then click on **Get started**.\n\nOn the next page, make sure that **New API** is selected and give the new api a name, for example, `sentiment_analysis_api`. Then, click on **Create API**.\n\nNow we have created an API, however it doesn't currently do anything. What we want it to do is to trigger the Lambda function that we created earlier.\n\nSelect the **Actions** dropdown menu and click **Create Method**. A new blank method will be created, select its dropdown menu and select **POST**, then click on the check mark beside it.\n\nFor the integration point, make sure that **Lambda Function** is selected and click on the **Use Lambda Proxy integration**. This option makes sure that the data that is sent to the API is then sent directly to the Lambda function with no processing. It also means that the return value must be a proper response object as it will also not be processed by API Gateway.\n\nType the name of the Lambda function you created earlier into the **Lambda Function** text entry box and then click on **Save**. Click on **OK** in the pop-up box that then appears, giving permission to API Gateway to invoke the Lambda function you created.\n\nThe last step in creating the API Gateway is to select the **Actions** dropdown and click on **Deploy API**. You will need to create a new Deployment stage and name it anything you like, for example `prod`.\n\nYou have now successfully set up a public API to access your SageMaker model. Make sure to copy or write down the URL provided to invoke your newly created public API as this will be needed in the next step. This URL can be found at the top of the page, highlighted in blue next to the text **Invoke URL**.", "_____no_output_____" ], [ "## Step 4: Deploying our web app\n\nNow that we have a publicly available API, we can start using it in a web app. For our purposes, we have provided a simple static html file which can make use of the public api you created earlier.\n\nIn the `website` folder there should be a file called `index.html`. Download the file to your computer and open that file up in a text editor of your choice. There should be a line which contains **\\*\\*REPLACE WITH PUBLIC API URL\\*\\***. Replace this string with the url that you wrote down in the last step and then save the file.\n\nNow, if you open `index.html` on your local computer, your browser will behave as a local web server and you can use the provided site to interact with your SageMaker model.\n\nIf you'd like to go further, you can host this html file anywhere you'd like, for example using github or hosting a static site on Amazon's S3. Once you have done this you can share the link with anyone you'd like and have them play with it too!\n\n> **Important Note** In order for the web app to communicate with the SageMaker endpoint, the endpoint has to actually be deployed and running. This means that you are paying for it. Make sure that the endpoint is running when you want to use the web app but that you shut it down when you don't need it, otherwise you will end up with a surprisingly large AWS bill.\n\n**TODO:** Make sure that you include the edited `index.html` file in your project submission.", "_____no_output_____" ], [ "Now that your web app is working, trying playing around with it and see how well it works.\n\n**Question**: Give an example of a review that you entered into your web app. What was the predicted sentiment of your example review?", "_____no_output_____" ], [ "**Answer:** \n**_Example Review:_** In my opinion the greatest comedy ever made! Daniels and Carrey work magic together in this film that more than 20 years later is finally considered a classic. When it first came out it was labeled as complete garbage and toilet humour. But to those people I say it is the best garbage and toilet humour you could ask for. What do you expect to see when the title of the film is ''Dumb and Dumber''. The quick back and forth between the two lead actors and not so subtle chirping often goes unnoticed because your'e laughing so hard from the previous scene.the jokes and dialogue are so good and Carey and Daniels deliver them with such authenticity. It's because of this reason that I am able to watch this movie countless times and still be entertained, as I listen to funny remarks that I missed on the previous 100 viewings. What's truly great about this film is that even people who say they hate it will always without fail crack a laugh or two when re-watching it. They just don't want to admit that they find this nonsense to be funny...but it is. More than 20 years later and I still have not seen a buddy comedy that comes close to matching it. _[Source: Dumb and Dumber, IMDB]_ \n**_Predicted Sentiment:_** POSITIVE", "_____no_output_____" ], [ "### Delete the endpoint\n\nRemember to always shut down your endpoint if you are no longer using it. You are charged for the length of time that the endpoint is running so if you forget and leave it on you could end up with an unexpectedly large bill.", "_____no_output_____" ] ], [ [ "predictor.delete_endpoint()", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ] ]
cb6395411ba85f32de7aedf103dd6f3007edcac1
276,988
ipynb
Jupyter Notebook
1_1_Image_Representation/6_4. Classification.ipynb
fatmirboja/CVND_Exercises
c4b0b3bd1b75f9d75f44dc04e74a1a12f7cbabd1
[ "MIT" ]
null
null
null
1_1_Image_Representation/6_4. Classification.ipynb
fatmirboja/CVND_Exercises
c4b0b3bd1b75f9d75f44dc04e74a1a12f7cbabd1
[ "MIT" ]
null
null
null
1_1_Image_Representation/6_4. Classification.ipynb
fatmirboja/CVND_Exercises
c4b0b3bd1b75f9d75f44dc04e74a1a12f7cbabd1
[ "MIT" ]
null
null
null
673.93674
177,654
0.940062
[ [ [ "# Day and Night Image Classifier\n---\n\nThe day/night image dataset consists of 200 RGB color images in two categories: day and night. There are equal numbers of each example: 100 day images and 100 night images.\n\nWe'd like to build a classifier that can accurately label these images as day or night, and that relies on finding distinguishing features between the two types of images!\n\n*Note: All images come from the [AMOS dataset](http://cs.uky.edu/~jacobs/datasets/amos/) (Archive of Many Outdoor Scenes).*\n", "_____no_output_____" ], [ "### Import resources\n\nBefore you get started on the project code, import the libraries and resources that you'll need.", "_____no_output_____" ] ], [ [ "import cv2 # computer vision library\nimport helpers\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\n\n%matplotlib inline", "_____no_output_____" ] ], [ [ "## Training and Testing Data\nThe 200 day/night images are separated into training and testing datasets. \n\n* 60% of these images are training images, for you to use as you create a classifier.\n* 40% are test images, which will be used to test the accuracy of your classifier.\n\nFirst, we set some variables to keep track of some where our images are stored:\n\n image_dir_training: the directory where our training image data is stored\n image_dir_test: the directory where our test image data is stored", "_____no_output_____" ] ], [ [ "# Image data directories\nimage_dir_training = \"day_night_images/training/\"\nimage_dir_test = \"day_night_images/test/\"", "_____no_output_____" ] ], [ [ "## Load the datasets\n\nThese first few lines of code will load the training day/night images and store all of them in a variable, `IMAGE_LIST`. This list contains the images and their associated label (\"day\" or \"night\"). \n\nFor example, the first image-label pair in `IMAGE_LIST` can be accessed by index: \n``` IMAGE_LIST[0][:]```.\n", "_____no_output_____" ] ], [ [ "# Using the load_dataset function in helpers.py\n# Load training data\nIMAGE_LIST = helpers.load_dataset(image_dir_training)\n", "_____no_output_____" ] ], [ [ "## Construct a `STANDARDIZED_LIST` of input images and output labels.\n\nThis function takes in a list of image-label pairs and outputs a **standardized** list of resized images and numerical labels.", "_____no_output_____" ] ], [ [ "# Standardize all training images\nSTANDARDIZED_LIST = helpers.standardize(IMAGE_LIST)", "_____no_output_____" ] ], [ [ "## Visualize the standardized data\n\nDisplay a standardized image from STANDARDIZED_LIST.", "_____no_output_____" ] ], [ [ "# Display a standardized image and its label\n\n# Select an image by index\nimage_num = 0\nselected_image = STANDARDIZED_LIST[image_num][0]\nselected_label = STANDARDIZED_LIST[image_num][1]\n\n# Display image and data about it\nplt.imshow(selected_image)\nprint(\"Shape: \"+str(selected_image.shape))\nprint(\"Label [1 = day, 0 = night]: \" + str(selected_label))\n", "Shape: (600, 1100, 3)\nLabel [1 = day, 0 = night]: 1\n" ] ], [ [ "# Feature Extraction\n\nCreate a feature that represents the brightness in an image. We'll be extracting the **average brightness** using HSV colorspace. Specifically, we'll use the V channel (a measure of brightness), add up the pixel values in the V channel, then divide that sum by the area of the image to get the average Value of the image.\n", "_____no_output_____" ], [ "---\n### Find the average brightness using the V channel\n\nThis function takes in a **standardized** RGB image and returns a feature (a single value) that represent the average level of brightness in the image. We'll use this value to classify the image as day or night.", "_____no_output_____" ] ], [ [ "# Find the average Value or brightness of an image\ndef avg_brightness(rgb_image):\n # Convert image to HSV\n hsv = cv2.cvtColor(rgb_image, cv2.COLOR_RGB2HSV)\n\n # Add up all the pixel values in the V channel\n sum_brightness = np.sum(hsv[:,:,2])\n area = 600*1100.0 # pixels\n \n # find the avg\n avg = sum_brightness/area\n \n return avg", "_____no_output_____" ], [ "# Testing average brightness levels\n# Look at a number of different day and night images and think about \n# what average brightness value separates the two types of images\n\n# As an example, a \"night\" image is loaded in and its avg brightness is displayed\nimage_num = 190\ntest_im = STANDARDIZED_LIST[image_num][0]\n\navg = avg_brightness(test_im)\nprint('Avg brightness: ' + str(avg))\nplt.imshow(test_im)", "Avg brightness: 119.6223\n" ] ], [ [ "# Classification and Visualizing Error\n\nIn this section, we'll turn our average brightness feature into a classifier that takes in a standardized image and returns a `predicted_label` for that image. This `estimate_label` function should return a value: 0 or 1 (night or day, respectively).", "_____no_output_____" ], [ "---\n### TODO: Build a complete classifier \n\nSet a threshold that you think will separate the day and night images by average brightness.", "_____no_output_____" ] ], [ [ "# This function should take in RGB image input\ndef estimate_label(rgb_image):\n \n ## TODO: extract average brightness feature from an RGB image \n # Use the avg brightness feature to predict a label (0, 1)\n predicted_label = 0\n \n ## TODO: set the value of a threshold that will separate day and night images\n threshold = 100\n \n ## TODO: Return the predicted_label (0 or 1) based on whether the avg is \n # above or below the threshold\n if avg_brightness(rgb_image) > threshold:\n predicted_label = 1\n \n return predicted_label \n ", "_____no_output_____" ], [ "## Test out your code by calling the above function and seeing \n# how some of your training data is classified\nestimate_label(test_im)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ] ]
cb63a0258eae0e70b7d702454ec38bc86bdfbd3a
757,532
ipynb
Jupyter Notebook
notebooks/T42_issue.ipynb
pzharrington/WeatherBench
244b8452c928d825af388ba62e0c3c21affb32d3
[ "MIT" ]
2
2020-10-20T09:38:55.000Z
2021-02-16T04:53:47.000Z
notebooks/T42_issue.ipynb
pzharrington/WeatherBench
244b8452c928d825af388ba62e0c3c21affb32d3
[ "MIT" ]
8
2020-04-28T08:21:21.000Z
2020-12-08T06:07:52.000Z
notebooks/T42_issue.ipynb
sagar-garg/WeatherBench
fdf208d9af6a896ccb012146dfef268722380a0d
[ "MIT" ]
4
2020-03-10T08:34:47.000Z
2022-01-31T12:39:37.000Z
1,114.017647
100,200
0.956705
[ [ [ "Here is an illustration of the IFS T42 issue.", "_____no_output_____" ] ], [ [ "import xarray as xr\nimport matplotlib.pyplot as plt\nfrom src.score import *", "_____no_output_____" ], [ "# This is the regridded ERA data\nDATADIR = '/data/weather-benchmark/5.625deg/'\nz500_valid = load_test_data(f'{DATADIR}geopotential_500', 'z')\nt850_valid = load_test_data(f'{DATADIR}temperature_850', 't')\nera = xr.merge([z500_valid, t850_valid]).drop('level')", "_____no_output_____" ], [ "era", "_____no_output_____" ], [ "# This is the data that was regridded by Peter\nt42_raw = xr.open_dataset(f'/media/rasp/Elements/weather-benchmark/IFS_T42/output_42_pl_5.625.nc')", "_____no_output_____" ], [ "# Make longitude dimensions match \nt42_raw['lat'] = -era.lat\nt42_raw = t42_raw.roll(lon=32)\nt42_raw['lon'] = era.lon", "/home/rasp/miniconda3/lib/python3.7/site-packages/ipykernel_launcher.py:3: FutureWarning: roll_coords will be set to False in the future. Explicitly set roll_coords to silence warning.\n This is separate from the ipykernel package so we can avoid doing imports until\n" ], [ "t42_raw", "_____no_output_____" ] ], [ [ "Let's now plot the initial conditions of the first forecast.", "_____no_output_____" ] ], [ [ "# Plot for Z500 with difference\nfig, axs = plt.subplots(1, 3, figsize=(15, 4))\nt42_raw.z.isel(time=0).sel(lev=5e4).plot(ax=axs[0]);\nera.z.isel(time=0).plot(ax=axs[1])\n(t42_raw.z.isel(time=0).sel(lev=5e4)-era.z.isel(time=0)).plot(ax=axs[2]);", "_____no_output_____" ], [ "# Same for T850\nfig, axs = plt.subplots(1, 3, figsize=(15, 4))\nt42_raw.t.isel(time=0).sel(lev=8.5e4).plot(ax=axs[0]);\nera.t.isel(time=0).plot(ax=axs[1])\n(t42_raw.t.isel(time=0).sel(lev=8.5e4)-era.t.isel(time=0)).plot(ax=axs[2]);", "_____no_output_____" ] ], [ [ "We can see that the ERA field is a lot noisier that the smooth T42 field. This is obviously worse for T than for Z, which causes the RMSE for T to be much worse. ", "_____no_output_____" ] ], [ [ "# Now for a 5 day forecast\nfig, axs = plt.subplots(1, 3, figsize=(15, 4))\nt42_raw.z.isel(time=5*24//6).sel(lev=5e4).plot(ax=axs[0]);\nera.z.isel(time=5*24).plot(ax=axs[1])\n(t42_raw.z.isel(time=5*24//6).sel(lev=5e4)-era.z.isel(time=5*24)).plot(ax=axs[2]);", "_____no_output_____" ], [ "# Same for T850\nfig, axs = plt.subplots(1, 3, figsize=(15, 4))\nt42_raw.t.isel(time=5*24//6).sel(lev=8.5e4).plot(ax=axs[0]);\nera.t.isel(time=5*24).plot(ax=axs[1])\n(t42_raw.t.isel(time=5*24//6).sel(lev=8.5e4)-era.t.isel(time=5*24)).plot(ax=axs[2]);", "_____no_output_____" ] ], [ [ "So one weird thing here is that we have a 30(!) degree temperature error in the forecast. That doesn't seem physical, right?", "_____no_output_____" ], [ "Since T42 is started from ERA ICs the question is: Why is it so much smoother? Does it have to do with the interpolation. To check that, let's do the same analysis for the 2.8125 degree data.", "_____no_output_____" ] ], [ [ "# This is the regridded ERA data\nDATADIR = '/media/rasp/Elements/weather-benchmark/2.8125deg/'\nz500_valid = load_test_data(f'{DATADIR}geopotential', 'z')\nt850_valid = load_test_data(f'{DATADIR}temperature', 't')\nera = xr.merge([z500_valid, t850_valid])", "_____no_output_____" ], [ "era", "_____no_output_____" ], [ "# This is the data that was regridded by Peter\nt42_raw = xr.open_dataset(f'/media/rasp/Elements/weather-benchmark/IFS_T42/output_42_pl_2.8125.nc')", "_____no_output_____" ], [ "# Make longitude dimensions match \nt42_raw['lat'] = -era.lat\nt42_raw = t42_raw.roll(lon=64)\nt42_raw['lon'] = era.lon", "/home/rasp/miniconda3/lib/python3.7/site-packages/ipykernel_launcher.py:3: FutureWarning: roll_coords will be set to False in the future. Explicitly set roll_coords to silence warning.\n This is separate from the ipykernel package so we can avoid doing imports until\n" ], [ "t42_raw", "_____no_output_____" ] ], [ [ "Let's now plot the initial conditions of the first forecast.", "_____no_output_____" ] ], [ [ "# Plot for Z500 with difference\nfig, axs = plt.subplots(1, 3, figsize=(15, 4))\nt42_raw.z.isel(time=0).sel(lev=5e4).plot(ax=axs[0]);\nera.z.isel(time=0).plot(ax=axs[1])\n(t42_raw.z.isel(time=0).sel(lev=5e4)-era.z.isel(time=0)).plot(ax=axs[2]);", "_____no_output_____" ], [ "# Same for T850\nfig, axs = plt.subplots(1, 3, figsize=(15, 4))\nt42_raw.t.isel(time=0).sel(lev=8.5e4).plot(ax=axs[0]);\nera.t.isel(time=0).plot(ax=axs[1])\n(t42_raw.t.isel(time=0).sel(lev=8.5e4)-era.t.isel(time=0)).plot(ax=axs[2]);", "_____no_output_____" ] ], [ [ "As you can see the T42 forecasts are still much smoother. So why is that?", "_____no_output_____" ] ], [ [ "# Now for a 5 day forecast\nfig, axs = plt.subplots(1, 3, figsize=(15, 4))\nt42_raw.z.isel(time=5*24//6).sel(lev=5e4).plot(ax=axs[0]);\nera.z.isel(time=5*24).plot(ax=axs[1])\n(t42_raw.z.isel(time=5*24//6).sel(lev=5e4)-era.z.isel(time=5*24)).plot(ax=axs[2]);", "_____no_output_____" ], [ "# Same for T850\nfig, axs = plt.subplots(1, 3, figsize=(15, 4))\nt42_raw.t.isel(time=5*24//6).sel(lev=8.5e4).plot(ax=axs[0]);\nera.t.isel(time=5*24).plot(ax=axs[1])\n(t42_raw.t.isel(time=5*24//6).sel(lev=8.5e4)-era.t.isel(time=5*24)).plot(ax=axs[2]);", "_____no_output_____" ], [ "# Same for T850; now for 1 forecast lead time\nt = 24\nfig, axs = plt.subplots(1, 3, figsize=(15, 4))\nt42_raw.t.isel(time=t//6).sel(lev=8.5e4).plot(ax=axs[0]);\nera.t.isel(time=t).plot(ax=axs[1])\n(t42_raw.t.isel(time=t//6).sel(lev=8.5e4)-era.t.isel(time=t)).plot(ax=axs[2]);", "_____no_output_____" ] ], [ [ "We still have that huge temperature error. Let's check where that is.", "_____no_output_____" ] ], [ [ "import cartopy.crs as ccrs", "_____no_output_____" ], [ "diff = t42_raw.t.isel(time=5*24//6).sel(lev=8.5e4)-era.t.isel(time=5*24).load()", "_____no_output_____" ], [ "ax = plt.axes(projection=ccrs.PlateCarree())\ndiff.plot(ax=ax, transform=ccrs.PlateCarree())\nax.set_global(); ax.coastlines()", "_____no_output_____" ] ], [ [ "So the huge error is over Eastern China? I almost suspect that this is the main reason for the bad RMSEs. ", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ] ]
cb63a5f885fa205e11d871e324a9e93af1bcbaa8
131,295
ipynb
Jupyter Notebook
seminars/coordinate_descent.ipynb
amkatrutsa/cvx_opt_sda2021
2e2547075b47dd55370c40e28527bc03faf4ae70
[ "MIT" ]
3
2021-12-06T07:31:28.000Z
2021-12-22T20:48:53.000Z
seminars/coordinate_descent.ipynb
amkatrutsa/cvx_opt_sda2021
2e2547075b47dd55370c40e28527bc03faf4ae70
[ "MIT" ]
null
null
null
seminars/coordinate_descent.ipynb
amkatrutsa/cvx_opt_sda2021
2e2547075b47dd55370c40e28527bc03faf4ae70
[ "MIT" ]
null
null
null
331.55303
79,740
0.927149
[ [ [ "# Введение в координатный спуск (coordinate descent): теория и приложения", "_____no_output_____" ], [ "## Постановка задачи и основное предположение\n\n$$\n\\min_{x \\in \\mathbb{R}^n} f(x)\n$$ \n\n- $f$ выпуклая функция\n- Если по каждой координате будет выполнено $f(x + \\varepsilon e_i) \\geq f(x)$, будет ли это означать, что $x$ точка минимума?", "_____no_output_____" ], [ "- Если $f$ гладкая, то да, по критерию первого порядка $f'(x) = 0$\n- Если $f$ негладкая, то нет, так как условие может быть выполнено в \"угловых\" точках, которые не являются точками минимума\n- Если $f$ негладкая, но композитная с сепарабельной негладкой частью, то есть \n$$\nf(x) = g(x) + \\sum_{i=1}^n h_i(x_i),\n$$\nто да. Почему?", "_____no_output_____" ], [ "- Для любого $y$ и $x$, в котором выполнено условие оптимальности по каждому направлению, выполнено\n$$\nf(y) - f(x) = g(y) - g(x) + \\sum_{i=1}^n (h_i(y_i) - h_i(x_i)) \\geq \\langle g'(x), y - x \\rangle+ \\sum_{i=1}^n (h_i(y_i) - h_i(x_i)) = \\sum_{i=1}^n [g'_i(x)(y_i - x_i) + h_i(y_i) - h_i(x_i)] \\geq 0\n$$\n- Значит для функций такого вида поиск минимума можно проводить покоординатно, а в результате всё равно получить точку минимума", "_____no_output_____" ], [ "### Вычислительные нюансы\n\n- На этапе вычисления $i+1$ координаты используются обновлённые значения $1, 2, \\ldots, i$ координат при последовательном переборе координат\n- Вспомните разницу между методами Якоби и Гаусса-Зейделя для решения линейных систем!\n- Порядок выбора координат имеет значение\n- Сложность обновления полного вектора $\\sim$ сложности обновления $n$ его компонент, то есть для покоординатного обновления целевой переменной не требуется оперировать с полным градиентом!", "_____no_output_____" ], [ "## Простой пример\n\n- $f(x) = \\frac12 \\|Ax - b\\|_2^2$, где $A \\in \\mathbb{R}^{m \\times n}$ и $m \\gg n$\n- Выберем некоторую координату $i$\n- Тогда покоординатное условие оптимальности $[f'(x)]_i = A^{\\top}_i(Ax - b) = A^{\\top}_i(A_{-i} x_{-i} + A_ix_i - b) = 0$\n- Откуда $x_i = \\dfrac{A^{\\top}_i (b - A_{-i} x_{-i})}{\\|A_i\\|_2^2}$ - сложность $O(nm)$, что сопоставимо с вычислением полного градиента. Можно ли быстрее?", "_____no_output_____" ], [ "- Да, можно! Для этого необходимо заметить следующее\n\n$$\nx_i = \\dfrac{A^{\\top}_i (b - A_{-i} x_{-i})}{\\|A_i\\|_2^2} = \\dfrac{A^{\\top}_i (b - Ax + A_{i}x_i)}{\\|A_i\\|_2^2} = x_i - \\dfrac{A^{\\top}_i r}{\\|A_i\\|_2^2},\n$$\n\nгде $r = Ax - b$\n\n- Обновление $r$ - $\\mathcal{O}(m)$, вычисление $A^{\\top}_i r$ - $\\mathcal{O}(m)$\n- В итоге, обновить одну координату стоит $\\mathcal{O}(m)$, то есть сложность обновления всех координат сопоставима с вычислением полного градиента $\\mathcal{O}(mn)$", "_____no_output_____" ], [ "## Как выбирать координаты?\n\n- По циклы от 1 до $n$\n- Случайной перестановкой\n- Правило Gauss-Southwell: $i = \\arg\\max_k |f'_k(x)|$ - потенциально более дорогое чем остальные", "_____no_output_____" ] ], [ [ "import numpy as np\nimport matplotlib.pyplot as plt\nplt.rc(\"text\", usetex=True)\nm = 1000\nn = 100\nA = np.random.randn(m, n)\nu, s, v = np.linalg.svd(A, compute_uv=True, full_matrices=False)\nprint(s)\ns[-1] = 2\nA = u @ np.diag(s) @ v\nprint(np.linalg.cond(A))\nprint(np.linalg.cond(A.T @ A))\nx_true = np.random.randn(n)\nb = A @ x_true + 1e-7 * np.random.randn(m)", "[41.87167483 41.03208743 40.36712508 40.23613463 39.91280404 39.63874066\n 39.25346063 38.92924047 38.68883908 38.55809077 38.19017354 37.9954093\n 37.94243042 37.49896097 37.37641485 37.15264153 36.8067076 36.7826022\n 36.43746092 36.3130849 36.1581416 36.03280555 35.7131184 35.33921468\n 35.2146035 35.13086186 35.07834455 34.94445099 34.86345103 34.56454075\n 34.28215637 34.16519047 34.05672161 33.82230445 33.77635319 33.49227821\n 33.21382778 33.12755524 32.92605038 32.79201737 32.65550406 32.6225619\n 32.28944389 32.26616643 31.83082353 31.75568384 31.58771585 31.4659162\n 31.37748167 31.25681662 31.13332148 30.86272368 30.70214634 30.48319815\n 30.32923434 30.29713524 30.09469343 30.02074692 29.83461261 29.75773493\n 29.4212749 29.23740239 29.11117629 28.9255292 28.71829837 28.64553319\n 28.49634468 28.36642841 28.27627387 28.09394303 27.96487807 27.80108696\n 27.67798045 27.5004477 27.41908909 27.04310194 26.82992493 26.71257956\n 26.52113597 26.50753516 26.4480462 26.17834483 26.09759035 25.65641831\n 25.53386382 25.2707256 25.19583341 24.79536708 24.58534307 24.51633181\n 24.27358375 24.02999027 23.76836534 23.5249375 23.33974285 23.08111155\n 22.96845801 22.76345933 22.5731046 21.90935154]\n20.935837414907557\n438.3092882634393\n" ], [ "def coordinate_descent_lsq(x0, num_iter, sampler=\"sequential\"):\n conv = [x0]\n x = x0.copy()\n r = A @ x0 - b\n grad = A.T @ r\n if sampler == \"sequential\" or sampler == \"GS\":\n perm = np.arange(x.shape[0])\n elif sampler == \"random\":\n perm = np.random.permutation(x.shape[0])\n else:\n raise ValueError(\"Unknown sampler!\")\n \n for i in range(num_iter):\n for idx in perm:\n if sampler == \"GS\":\n idx = np.argmax(np.abs(grad))\n new_x_idx = x[idx] - A[:, idx] @ r / (A[:, idx] @ A[:, idx])\n r = r + A[:, idx] * (new_x_idx - x[idx])\n if sampler == \"GS\":\n grad = A.T @ r\n x[idx] = new_x_idx\n if sampler == \"random\":\n perm = np.random.permutation(x.shape[0])\n conv.append(x.copy())\n# print(np.linalg.norm(A @ x - b))\n return x, conv", "_____no_output_____" ], [ "x0 = np.random.randn(n)\nnum_iter = 500\nx_cd_seq, conv_cd_seq = coordinate_descent_lsq(x0, num_iter)\nx_cd_rand, conv_cd_rand = coordinate_descent_lsq(x0, num_iter, \"random\")\nx_cd_gs, conv_cd_gs = coordinate_descent_lsq(x0, num_iter, \"GS\")", "_____no_output_____" ], [ "# !pip install git+https://github.com/amkatrutsa/liboptpy\nimport liboptpy.unconstr_solvers as methods\nimport liboptpy.step_size as ss\n\ndef f(x):\n res = A @ x - b\n return 0.5 * res @ res\n\ndef gradf(x):\n res = A @ x - b\n return A.T @ res\n\nL = np.max(np.linalg.eigvalsh(A.T @ A))\n\ngd = methods.fo.GradientDescent(f, gradf, ss.ConstantStepSize(1 / L))\nx_gd = gd.solve(x0=x0, max_iter=num_iter)\n\nacc_gd = methods.fo.AcceleratedGD(f, gradf, ss.ConstantStepSize(1 / L))\nx_accgd = acc_gd.solve(x0=x0, max_iter=num_iter)", "_____no_output_____" ], [ "plt.figure(figsize=(15, 10))\nplt.semilogy([np.linalg.norm(A @ x - b) for x in conv_cd_rand], label=\"Random\")\nplt.semilogy([np.linalg.norm(A @ x - b) for x in conv_cd_seq], label=\"Sequential\")\nplt.semilogy([np.linalg.norm(A @ x - b) for x in conv_cd_gs], label=\"GS\")\nplt.semilogy([np.linalg.norm(A @ x - b) for x in gd.get_convergence()], label=\"GD\")\nplt.semilogy([np.linalg.norm(A @ x - b) for x in acc_gd.get_convergence()], label=\"Nesterov\")\n\nplt.legend(fontsize=20)\nplt.xlabel(\"Number of iterations\", fontsize=24)\nplt.ylabel(\"$\\|Ax - b\\|_2$\", fontsize=24)\nplt.grid(True)\nplt.xticks(fontsize=18)\nplt.yticks(fontsize=18)\nplt.show()", "_____no_output_____" ], [ "plt.semilogy([np.linalg.norm(x - x_true) for x in conv_cd_rand], label=\"Random\")\nplt.semilogy([np.linalg.norm(x - x_true) for x in conv_cd_seq], label=\"Sequential\")\nplt.semilogy([np.linalg.norm(x - x_true) for x in conv_cd_gs], label=\"GS\")\nplt.semilogy([np.linalg.norm(x - x_true) for x in gd.get_convergence()], label=\"GD\")\nplt.semilogy([np.linalg.norm(x - x_true) for x in acc_gd.get_convergence()], label=\"Nesterov\")\nplt.legend(fontsize=20)\nplt.xlabel(\"Number of iterations\", fontsize=24)\nplt.ylabel(\"$\\|x - x^*\\|_2$\", fontsize=24)\nplt.grid(True)\nplt.xticks(fontsize=18)\nplt.yticks(fontsize=18)\nplt.show()", "_____no_output_____" ] ], [ [ "## Сходимость\n\n- Сублинейная для выпуклых гладких с Липшицевым градиентом\n- Линейная для сильно выпуклых функций\n- Прямая аналогия с градиентным спуском\n- Но много особенностей использования", "_____no_output_____" ], [ "## Типичные примеры использования\n\n- Lasso (снова)\n- SMO метод обучения SVM - блочный координатный спуск с размером блока равным 2\n- Вывод в графических моделях", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ] ]
cb63ab27307f15b187cba8b4ba34cb7dd01ac68c
5,982
ipynb
Jupyter Notebook
01.Python-Basics/10. Strings/03.Concatenation-of-strings.ipynb
PramitSahoo/Python-with-Data-Structures-and-Algorithms
f0004e2f5f981da2ae9c2b81c36659b1b7d92cc8
[ "Apache-2.0" ]
null
null
null
01.Python-Basics/10. Strings/03.Concatenation-of-strings.ipynb
PramitSahoo/Python-with-Data-Structures-and-Algorithms
f0004e2f5f981da2ae9c2b81c36659b1b7d92cc8
[ "Apache-2.0" ]
null
null
null
01.Python-Basics/10. Strings/03.Concatenation-of-strings.ipynb
PramitSahoo/Python-with-Data-Structures-and-Algorithms
f0004e2f5f981da2ae9c2b81c36659b1b7d92cc8
[ "Apache-2.0" ]
null
null
null
17.963964
461
0.452023
[ [ [ "a = \"red\"", "_____no_output_____" ], [ "a", "_____no_output_____" ], [ "a = a + \"blue\"\na", "_____no_output_____" ], [ "a = a + \"Blue\" + \"Green\"\na", "_____no_output_____" ], [ "a += \"Red\"", "_____no_output_____" ], [ "a", "_____no_output_____" ], [ "id(a)", "_____no_output_____" ], [ "a += \"Yellow\"\na", "_____no_output_____" ], [ "id(a)", "_____no_output_____" ], [ "a = \"blue\"\na *= 3", "_____no_output_____" ], [ "a", "_____no_output_____" ], [ "id(a)", "_____no_output_____" ], [ "a = a* 2\nid(a)", "_____no_output_____" ], [ "a = 'red'\na = a + 3", "_____no_output_____" ], [ "a = a + str(3)", "_____no_output_____" ], [ "a", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb63ac5ba60061e1115a0fe533d3c5dbb9757272
4,473
ipynb
Jupyter Notebook
notebooks/drh382/knn-eval.ipynb
gmhurtado/fake-review-detection-project
2c8b0e1be5cdb23d4c946b99c6033c0819b9c891
[ "MIT" ]
null
null
null
notebooks/drh382/knn-eval.ipynb
gmhurtado/fake-review-detection-project
2c8b0e1be5cdb23d4c946b99c6033c0819b9c891
[ "MIT" ]
null
null
null
notebooks/drh382/knn-eval.ipynb
gmhurtado/fake-review-detection-project
2c8b0e1be5cdb23d4c946b99c6033c0819b9c891
[ "MIT" ]
2
2020-05-22T19:39:18.000Z
2021-09-08T00:22:47.000Z
34.674419
513
0.568746
[ [ [ "from sklearn.metrics import accuracy_score, roc_auc_score, average_precision_score\n\ndef ClassifierMetrics (X_train, Y_train, X_test, Y_test, fitted_model):\n \"\"\"\n X_train: training set features\n Y_train: training set labels\n X_test: dev/test set features\n Y_test: dev/test set labels\n fitted_model: \n \"\"\"\n Y_score = fitted_model.decision_function(X_test)\n # If your classifier doesn't output decision_function, use predict_proba. \n # Make sure it is taking the prob of the '1' class\n Y_score = fitted_model.predict_proba(X_test)[:,1]\n metrics = {'train_accuracy': fitted_model.score(X_train, Y_train),\n 'test_accuracy': fitted_model.score(X_test, Y_test),\n 'test_auc_pred': roc_auc_score(Y_test, Y_pred),\n 'test_auc_score': roc_auc_score(Y_test, Y_score),\n 'test_ap_pred': average_precision_score(Y_test, Y_pred),\n 'test_ap_score': average_precision_score(Y_test, Y_score)}\n return metrics", "_____no_output_____" ], [ "import json\nbase = '../data/processed/dev/'\ndef writeJsonFile(fname, data, base=base):\n with open(base + fname +'.json', 'w') as outfile:\n json.dump(data, outfile)\n print('Successfully written to {}'.format(fname))\n \ndef readJsonFile(fname, base=base):\n with open(base + fname + '.json', 'r') as f:\n data = json.load(f)\n return data", "_____no_output_____" ], [ "# Add more of your fitted models to my_models if there are any\n# assuming that the suffix for each model is the n_neighbors \n# and n_neighbors is the only parameter you adjusted.\n\nmy_models = [(10, classifier10), (11, classifier11), \n(13, classifier13), (14, classifier14)]", "_____no_output_____" ], [ "all_attempts = []\nfor (n, fitted_model) in my_models:\n params = {'n_neighbors': n}\n metrics = ClassifierMetrics (cnt_X_train, Y_train, cnt_X_dev, Y_dev, fitted_model)\n model_attempt_details = {'params': params, 'metrics': metrics}\n all_attempts.append(model_attempt_details)", "_____no_output_____" ], [ "# File name of the model attempts/results\nfname = 'all_attempts_drh382'\nwriteJsonFile(fname, all_attempts)", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code" ] ]
cb63af2343e1d74bfdec29cd728fab132dffa4b8
2,571
ipynb
Jupyter Notebook
examples/sample.ipynb
andrewnyu/optimstruct
c4b13cdcae21ef2f97a23c519549ccc054f08c56
[ "MIT" ]
2
2021-09-28T20:02:31.000Z
2021-10-30T08:03:27.000Z
examples/sample.ipynb
andrewnyu/scipy-optimstruct
c4b13cdcae21ef2f97a23c519549ccc054f08c56
[ "MIT" ]
null
null
null
examples/sample.ipynb
andrewnyu/scipy-optimstruct
c4b13cdcae21ef2f97a23c519549ccc054f08c56
[ "MIT" ]
null
null
null
19.044444
89
0.466744
[ [ [ "import numpy as np\nfrom optimstruct.optim_dict import optim_dict", "_____no_output_____" ] ], [ [ "Initialize the optim_dict", "_____no_output_____" ] ], [ [ "my_vars = optim_dict()", "_____no_output_____" ] ], [ [ "Add a variable", "_____no_output_____" ] ], [ [ "foo1 = np.array([[1,2,3], [2,4,5], [3,5,7]])\nmy_vars.add_var(\"foo1\", foo1)\n\nfoo2 = np.array([[1,12,3], [2,1,5], [3,55,7]])\nmy_vars.add_var(\"foo2\", foo2)", "_____no_output_____" ] ], [ [ "flatten into np.array ready to be used with Scipy Minimize", "_____no_output_____" ] ], [ [ "x = my_vars.toVector()\nprint(type(x), x)", "<class 'numpy.ndarray'> [ 1 2 3 2 4 5 3 5 7 1 12 3 2 1 5 3 55 7]\n" ] ], [ [ "return np.array into easily accessible dictionary", "_____no_output_____" ] ], [ [ "var_dict = my_vars.toDict(x)\nprint(var_dict)", "{'foo1': array([[1, 2, 3],\n [2, 4, 5],\n [3, 5, 7]]), 'foo2': array([[ 1, 12, 3],\n [ 2, 1, 5],\n [ 3, 55, 7]])}\n" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
cb63b0689ff9e54729d94836e1b5d163ac326966
49,717
ipynb
Jupyter Notebook
doc/ipython-notebooks/pca/pca_notebook.ipynb
avramidis/shogun
2cab19a30bb4405e9bb1f6177488969b1a36a1ef
[ "BSD-3-Clause" ]
2
2015-01-13T15:18:27.000Z
2015-05-01T13:28:48.000Z
doc/ipython-notebooks/pca/pca_notebook.ipynb
raamana/shogun
55f57c10f99dc948c0eeedeed28de9ddebd82a45
[ "BSD-3-Clause" ]
null
null
null
doc/ipython-notebooks/pca/pca_notebook.ipynb
raamana/shogun
55f57c10f99dc948c0eeedeed28de9ddebd82a45
[ "BSD-3-Clause" ]
null
null
null
38.811085
835
0.610777
[ [ [ "# Principal Component Analysis in Shogun", "_____no_output_____" ], [ "#### By Abhijeet Kislay (GitHub ID: <a href='https://github.com/kislayabhi'>kislayabhi</a>)", "_____no_output_____" ], [ "This notebook is about finding Principal Components (<a href=\"http://en.wikipedia.org/wiki/Principal_component_analysis\">PCA</a>) of data (<a href=\"http://en.wikipedia.org/wiki/Unsupervised_learning\">unsupervised</a>) in Shogun. Its <a href=\"http://en.wikipedia.org/wiki/Dimensionality_reduction\">dimensional reduction</a> capabilities are further utilised to show its application in <a href=\"http://en.wikipedia.org/wiki/Data_compression\">data compression</a>, image processing and <a href=\"http://en.wikipedia.org/wiki/Facial_recognition_system\">face recognition</a>. ", "_____no_output_____" ] ], [ [ "%pylab inline\n%matplotlib inline\nimport os\nSHOGUN_DATA_DIR=os.getenv('SHOGUN_DATA_DIR', '../../../data')\n# import all shogun classes\nfrom shogun import *", "_____no_output_____" ] ], [ [ "## Some Formal Background (Skip if you just want code examples)", "_____no_output_____" ], [ "PCA is a useful statistical technique that has found application in fields such as face recognition and image compression, and is a common technique for finding patterns in data of high dimension.\n\nIn machine learning problems data is often high dimensional - images, bag-of-word descriptions etc. In such cases we cannot expect the training data to densely populate the space, meaning that there will be large parts in which little is known about the data. Hence it is expected that only a small number of directions are relevant for describing the data to a reasonable accuracy.\n\nThe data vectors may be very high dimensional, they will therefore typically lie closer to a much lower dimensional 'manifold'.\nHere we concentrate on linear dimensional reduction techniques. In this approach a high dimensional datapoint $\\mathbf{x}$ is 'projected down' to a lower dimensional vector $\\mathbf{y}$ by:\n$$\\mathbf{y}=\\mathbf{F}\\mathbf{x}+\\text{const}.$$\nwhere the matrix $\\mathbf{F}\\in\\mathbb{R}^{\\text{M}\\times \\text{D}}$, with $\\text{M}<\\text{D}$. Here $\\text{M}=\\dim(\\mathbf{y})$ and $\\text{D}=\\dim(\\mathbf{x})$.\n\nFrom the above scenario, we assume that\n\n* The number of principal components to use is $\\text{M}$.\n* The dimension of each data point is $\\text{D}$.\n* The number of data points is $\\text{N}$.\n\nWe express the approximation for datapoint $\\mathbf{x}^n$ as:$$\\mathbf{x}^n \\approx \\mathbf{c} + \\sum\\limits_{i=1}^{\\text{M}}y_i^n \\mathbf{b}^i \\equiv \\tilde{\\mathbf{x}}^n.$$\n* Here the vector $\\mathbf{c}$ is a constant and defines a point in the lower dimensional space.\n* The $\\mathbf{b}^i$ define vectors in the lower dimensional space (also known as 'principal component coefficients' or 'loadings').\n* The $y_i^n$ are the low dimensional co-ordinates of the data.\n\nOur motive is to find the reconstruction $\\tilde{\\mathbf{x}}^n$ given the lower dimensional representation $\\mathbf{y}^n$(which has components $y_i^n,i = 1,...,\\text{M})$. For a data space of dimension $\\dim(\\mathbf{x})=\\text{D}$, we hope to accurately describe the data using only a small number $(\\text{M}\\ll \\text{D})$ of coordinates of $\\mathbf{y}$.\nTo determine the best lower dimensional representation it is convenient to use the square distance error between $\\mathbf{x}$ and its reconstruction $\\tilde{\\mathbf{x}}$:$$\\text{E}(\\mathbf{B},\\mathbf{Y},\\mathbf{c})=\\sum\\limits_{n=1}^{\\text{N}}\\sum\\limits_{i=1}^{\\text{D}}[x_i^n - \\tilde{x}_i^n]^2.$$\n* Here the basis vectors are defined as $\\mathbf{B} = [\\mathbf{b}^1,...,\\mathbf{b}^\\text{M}]$ (defining $[\\text{B}]_{i,j} = b_i^j$).\n* Corresponding low dimensional coordinates are defined as $\\mathbf{Y} = [\\mathbf{y}^1,...,\\mathbf{y}^\\text{N}].$\n* Also, $x_i^n$ and $\\tilde{x}_i^n$ represents the coordinates of the data points for the original and the reconstructed data respectively.\n* The bias $\\mathbf{c}$ is given by the mean of the data $\\sum_n\\mathbf{x}^n/\\text{N}$.\n\nTherefore, for simplification purposes we centre our data, so as to set $\\mathbf{c}$ to zero. Now we concentrate on finding the optimal basis $\\mathbf{B}$( which has the components $\\mathbf{b}^i, i=1,...,\\text{M} $).\n", "_____no_output_____" ], [ "#### Deriving the optimal linear reconstruction", "_____no_output_____" ], [ "To find the best basis vectors $\\mathbf{B}$ and corresponding low dimensional coordinates $\\mathbf{Y}$, we may minimize the sum of squared differences between each vector $\\mathbf{x}$ and its reconstruction $\\tilde{\\mathbf{x}}$:\n\n$\\text{E}(\\mathbf{B},\\mathbf{Y}) = \\sum\\limits_{n=1}^{\\text{N}}\\sum\\limits_{i=1}^{\\text{D}}\\left[x_i^n - \\sum\\limits_{j=1}^{\\text{M}}y_j^nb_i^j\\right]^2 = \\text{trace} \\left( (\\mathbf{X}-\\mathbf{B}\\mathbf{Y})^T(\\mathbf{X}-\\mathbf{B}\\mathbf{Y}) \\right)$\n\nwhere $\\mathbf{X} = [\\mathbf{x}^1,...,\\mathbf{x}^\\text{N}].$\nConsidering the above equation under the orthonormality constraint $\\mathbf{B}^T\\mathbf{B} = \\mathbf{I}$ (i.e the basis vectors are mutually orthogonal and of unit length), we differentiate it w.r.t $y_k^n$. The squared error $\\text{E}(\\mathbf{B},\\mathbf{Y})$ therefore has zero derivative when: \n\n$y_k^n = \\sum_i b_i^kx_i^n$\n\nBy substituting this solution in the above equation, the objective becomes\n\n$\\text{E}(\\mathbf{B}) = (\\text{N}-1)\\left[\\text{trace}(\\mathbf{S}) - \\text{trace}\\left(\\mathbf{S}\\mathbf{B}\\mathbf{B}^T\\right)\\right],$\n\nwhere $\\mathbf{S}$ is the sample covariance matrix of the data.\nTo minimise equation under the constraint $\\mathbf{B}^T\\mathbf{B} = \\mathbf{I}$, we use a set of Lagrange Multipliers $\\mathbf{L}$, so that the objective is to minimize: \n\n$-\\text{trace}\\left(\\mathbf{S}\\mathbf{B}\\mathbf{B}^T\\right)+\\text{trace}\\left(\\mathbf{L}\\left(\\mathbf{B}^T\\mathbf{B} - \\mathbf{I}\\right)\\right).$\n\nSince the constraint is symmetric, we can assume that $\\mathbf{L}$ is also symmetric. Differentiating with respect to $\\mathbf{B}$ and equating to zero we obtain that at the optimum \n\n$\\mathbf{S}\\mathbf{B} = \\mathbf{B}\\mathbf{L}$.\n\nThis is a form of eigen-equation so that a solution is given by taking $\\mathbf{L}$ to be diagonal and $\\mathbf{B}$ as the matrix whose columns are the corresponding eigenvectors of $\\mathbf{S}$. In this case,\n\n$\\text{trace}\\left(\\mathbf{S}\\mathbf{B}\\mathbf{B}^T\\right) =\\text{trace}(\\mathbf{L}),$\n\nwhich is the sum of the eigenvalues corresponding to the eigenvectors forming $\\mathbf{B}$. Since we wish to minimise $\\text{E}(\\mathbf{B})$, we take the eigenvectors with the largest corresponding eigenvalues.\nWhilst the solution to this eigen-problem is unique, this only serves to define the solution subspace since one may rotate and scale $\\mathbf{B}$ and $\\mathbf{Y}$ such that the value of the squared loss is exactly the same. The justification for choosing the non-rotated eigen solution is given by the additional requirement that the principal components corresponds to directions of maximal variance.", "_____no_output_____" ], [ "#### Maximum variance criterion", "_____no_output_____" ], [ "We aim to find that single direction $\\mathbf{b}$ such that, when the data is projected onto this direction, the variance of this projection is maximal amongst all possible such projections.\nThe projection of a datapoint onto a direction $\\mathbf{b}$ is $\\mathbf{b}^T\\mathbf{x}^n$ for a unit length vector $\\mathbf{b}$. Hence the sum of squared projections is: $$\\sum\\limits_{n}\\left(\\mathbf{b}^T\\mathbf{x}^n\\right)^2 = \\mathbf{b}^T\\left[\\sum\\limits_{n}\\mathbf{x}^n(\\mathbf{x}^n)^T\\right]\\mathbf{b} = (\\text{N}-1)\\mathbf{b}^T\\mathbf{S}\\mathbf{b} = \\lambda(\\text{N} - 1)$$ \nwhich ignoring constants, is simply the negative of the equation for a single retained eigenvector $\\mathbf{b}$(with $\\mathbf{S}\\mathbf{b} = \\lambda\\mathbf{b}$). Hence the optimal single $\\text{b}$ which maximises the projection variance is given by the eigenvector corresponding to the largest eigenvalues of $\\mathbf{S}.$ The second largest eigenvector corresponds to the next orthogonal optimal direction and so on. This explains why, despite the squared loss equation being invariant with respect to arbitrary rotation of the basis vectors, the ones given by the eigen-decomposition have the additional property that they correspond to directions of maximal variance. These maximal variance directions found by PCA are called the $\\text{principal} $ $\\text{directions}.$\n\nThere are two eigenvalue methods through which shogun can perform PCA namely\n* Eigenvalue Decomposition Method.\n* Singular Value Decomposition.\n", "_____no_output_____" ], [ "#### EVD vs SVD", "_____no_output_____" ], [ "* The EVD viewpoint requires that one compute the eigenvalues and eigenvectors of the covariance matrix, which is the product of $\\mathbf{X}\\mathbf{X}^\\text{T}$, where $\\mathbf{X}$ is the data matrix. Since the covariance matrix is symmetric, the matrix is diagonalizable, and the eigenvectors can be normalized such that they are orthonormal:\n\n$\\mathbf{S}=\\frac{1}{\\text{N}-1}\\mathbf{X}\\mathbf{X}^\\text{T},$\n\nwhere the $\\text{D}\\times\\text{N}$ matrix $\\mathbf{X}$ contains all the data vectors: $\\mathbf{X}=[\\mathbf{x}^1,...,\\mathbf{x}^\\text{N}].$\nWriting the $\\text{D}\\times\\text{N}$ matrix of eigenvectors as $\\mathbf{E}$ and the eigenvalues as an $\\text{N}\\times\\text{N}$ diagonal matrix $\\mathbf{\\Lambda}$, the eigen-decomposition of the covariance $\\mathbf{S}$ is\n\n$\\mathbf{X}\\mathbf{X}^\\text{T}\\mathbf{E}=\\mathbf{E}\\mathbf{\\Lambda}\\Longrightarrow\\mathbf{X}^\\text{T}\\mathbf{X}\\mathbf{X}^\\text{T}\\mathbf{E}=\\mathbf{X}^\\text{T}\\mathbf{E}\\mathbf{\\Lambda}\\Longrightarrow\\mathbf{X}^\\text{T}\\mathbf{X}\\tilde{\\mathbf{E}}=\\tilde{\\mathbf{E}}\\mathbf{\\Lambda},$\n\nwhere we defined $\\tilde{\\mathbf{E}}=\\mathbf{X}^\\text{T}\\mathbf{E}$. The final expression above represents the eigenvector equation for $\\mathbf{X}^\\text{T}\\mathbf{X}.$ This is a matrix of dimensions $\\text{N}\\times\\text{N}$ so that calculating the eigen-decomposition takes $\\mathcal{O}(\\text{N}^3)$ operations, compared with $\\mathcal{O}(\\text{D}^3)$ operations in the original high-dimensional space. We then can therefore calculate the eigenvectors $\\tilde{\\mathbf{E}}$ and eigenvalues $\\mathbf{\\Lambda}$ of this matrix more easily. Once found, we use the fact that the eigenvalues of $\\mathbf{S}$ are given by the diagonal entries of $\\mathbf{\\Lambda}$ and the eigenvectors by\n\n$\\mathbf{E}=\\mathbf{X}\\tilde{\\mathbf{E}}\\mathbf{\\Lambda}^{-1}$\n\n\n\n\n* On the other hand, applying SVD to the data matrix $\\mathbf{X}$ follows like:\n\n$\\mathbf{X}=\\mathbf{U}\\mathbf{\\Sigma}\\mathbf{V}^\\text{T}$\n\nwhere $\\mathbf{U}^\\text{T}\\mathbf{U}=\\mathbf{I}_\\text{D}$ and $\\mathbf{V}^\\text{T}\\mathbf{V}=\\mathbf{I}_\\text{N}$ and $\\mathbf{\\Sigma}$ is a diagonal matrix of the (positive) singular values. We assume that the decomposition has ordered the singular values so that the upper left diagonal element of $\\mathbf{\\Sigma}$ contains the largest singular value.\n\nAttempting to construct the covariance matrix $(\\mathbf{X}\\mathbf{X}^\\text{T})$from this decomposition gives:\n\n$\\mathbf{X}\\mathbf{X}^\\text{T} = \\left(\\mathbf{U}\\mathbf{\\Sigma}\\mathbf{V}^\\text{T}\\right)\\left(\\mathbf{U}\\mathbf{\\Sigma}\\mathbf{V}^\\text{T}\\right)^\\text{T}$\n\n$\\mathbf{X}\\mathbf{X}^\\text{T} = \\left(\\mathbf{U}\\mathbf{\\Sigma}\\mathbf{V}^\\text{T}\\right)\\left(\\mathbf{V}\\mathbf{\\Sigma}\\mathbf{U}^\\text{T}\\right)$\n\nand since $\\mathbf{V}$ is an orthogonal matrix $\\left(\\mathbf{V}^\\text{T}\\mathbf{V}=\\mathbf{I}\\right),$\n\n$\\mathbf{X}\\mathbf{X}^\\text{T}=\\left(\\mathbf{U}\\mathbf{\\Sigma}^\\mathbf{2}\\mathbf{U}^\\text{T}\\right)$\n\nSince it is in the form of an eigen-decomposition, the PCA solution given by performing the SVD decomposition of $\\mathbf{X}$, for which the eigenvectors are then given by $\\mathbf{U}$, and corresponding eigenvalues by the square of the singular values.\n\n", "_____no_output_____" ], [ "#### [CPCA](http://www.shogun-toolbox.org/doc/en/3.0.0/classshogun_1_1CPCA.html) Class Reference (Shogun) ", "_____no_output_____" ], [ "CPCA class of Shogun inherits from the [CPreprocessor](http://www.shogun-toolbox.org/doc/en/3.0.0/classshogun_1_1CPreprocessor.html) class. Preprocessors are transformation functions that doesn't change the domain of the input features. Specifically, CPCA performs principal component analysis on the input vectors and keeps only the specified number of eigenvectors. On preprocessing, the stored covariance matrix is used to project vectors into eigenspace.\n\nPerformance of PCA depends on the algorithm used according to the situation in hand.\nOur PCA preprocessor class provides 3 method options to compute the transformation matrix:\n\n* $\\text{PCA(EVD)}$ sets $\\text{PCAmethod == EVD}$ : Eigen Value Decomposition of Covariance Matrix $(\\mathbf{XX^T}).$\nThe covariance matrix $\\mathbf{XX^T}$ is first formed internally and then\nits eigenvectors and eigenvalues are computed using QR decomposition of the matrix.\nThe time complexity of this method is $\\mathcal{O}(D^3)$ and should be used when $\\text{N > D.}$\n\n\n* $\\text{PCA(SVD)}$ sets $\\text{PCAmethod == SVD}$ : Singular Value Decomposition of feature matrix $\\mathbf{X}$.\nThe transpose of feature matrix, $\\mathbf{X^T}$, is decomposed using SVD. $\\mathbf{X^T = UDV^T}.$\nThe matrix V in this decomposition contains the required eigenvectors and\nthe diagonal entries of the diagonal matrix D correspond to the non-negative\neigenvalues.The time complexity of this method is $\\mathcal{O}(DN^2)$ and should be used when $\\text{N < D.}$\n\n\n* $\\text{PCA(AUTO)}$ sets $\\text{PCAmethod == AUTO}$ : This mode automagically chooses one of the above modes for the user based on whether $\\text{N>D}$ (chooses $\\text{EVD}$) or $\\text{N<D}$ (chooses $\\text{SVD}$)", "_____no_output_____" ], [ "## PCA on 2D data", "_____no_output_____" ], [ "#### Step 1: Get some data", "_____no_output_____" ], [ "We will generate the toy data by adding orthogonal noise to a set of points lying on an arbitrary 2d line. We expect PCA to recover this line, which is a one-dimensional linear sub-space.", "_____no_output_____" ] ], [ [ "#number of data points.\nn=100\n\n#generate a random 2d line(y1 = mx1 + c)\nm = random.randint(1,10)\nc = random.randint(1,10)\nx1 = random.random_integers(-20,20,n)\ny1=m*x1+c\n\n#generate the noise.\nnoise=random.random_sample([n]) * random.random_integers(-35,35,n)\n\n#make the noise orthogonal to the line y=mx+c and add it.\nx=x1 + noise*m/sqrt(1+square(m))\ny=y1 + noise/sqrt(1+square(m))\n\ntwoD_obsmatrix=array([x,y])", "_____no_output_____" ], [ "#to visualise the data we must plot it.\n\nrcParams['figure.figsize'] = 7, 7 \nfigure,axis=subplots(1,1)\nxlim(-50,50)\nylim(-50,50)\naxis.plot(twoD_obsmatrix[0,:],twoD_obsmatrix[1,:],'o',color='green',markersize=6)\n\n#the line from which we generated the data is plotted in red\naxis.plot(x1[:],y1[:],linewidth=0.3,color='red')\ntitle('One-Dimensional sub-space with noise')\nxlabel(\"x axis\")\n_=ylabel(\"y axis\")", "_____no_output_____" ] ], [ [ "#### Step 2: Subtract the mean.", "_____no_output_____" ], [ "For PCA to work properly, we must subtract the mean from each of the data dimensions. The mean subtracted is the average across each dimension. So, all the $x$ values have $\\bar{x}$ subtracted, and all the $y$ values have $\\bar{y}$ subtracted from them, where:$$\\bar{\\mathbf{x}} = \\frac{\\sum\\limits_{i=1}^{n}x_i}{n}$$ $\\bar{\\mathbf{x}}$ denotes the mean of the $x_i^{'s}$", "_____no_output_____" ], [ "##### Shogun's way of doing things :", "_____no_output_____" ], [ "Preprocessor PCA performs principial component analysis on input feature vectors/matrices. It provides an interface to set the target dimension by $\\text{put('target_dim', target_dim) method}.$ When the $\\text{init()}$ method in $\\text{PCA}$ is called with proper\nfeature matrix $\\text{X}$ (with say $\\text{N}$ number of vectors and $\\text{D}$ feature dimension), a transformation matrix is computed and stored internally.It inherenty also centralizes the data by subtracting the mean from it.", "_____no_output_____" ] ], [ [ "#convert the observation matrix into dense feature matrix.\ntrain_features = features(twoD_obsmatrix)\n\n#PCA(EVD) is choosen since N=100 and D=2 (N>D).\n#However we can also use PCA(AUTO) as it will automagically choose the appropriate method. \npreprocessor = PCA(EVD)\n\n#since we are projecting down the 2d data, the target dim is 1. But here the exhaustive method is detailed by\n#setting the target dimension to 2 to visualize both the eigen vectors.\n#However, in future examples we will get rid of this step by implementing it directly.\npreprocessor.put('target_dim', 2)\n\n#Centralise the data by subtracting its mean from it.\npreprocessor.init(train_features)\n\n#get the mean for the respective dimensions.\nmean_datapoints=preprocessor.get_real_vector('mean_vector')\nmean_x=mean_datapoints[0]\nmean_y=mean_datapoints[1]", "_____no_output_____" ] ], [ [ "#### Step 3: Calculate the covariance matrix", "_____no_output_____" ], [ "To understand the relationship between 2 dimension we define $\\text{covariance}$. It is a measure to find out how much the dimensions vary from the mean $with$ $respect$ $to$ $each$ $other.$$$cov(X,Y)=\\frac{\\sum\\limits_{i=1}^{n}(X_i-\\bar{X})(Y_i-\\bar{Y})}{n-1}$$\nA useful way to get all the possible covariance values between all the different dimensions is to calculate them all and put them in a matrix.\n\nExample: For a 3d dataset with usual dimensions of $x,y$ and $z$, the covariance matrix has 3 rows and 3 columns, and the values are this:\n$$\\mathbf{S} = \\quad\\begin{pmatrix}cov(x,x)&cov(x,y)&cov(x,z)\\\\cov(y,x)&cov(y,y)&cov(y,z)\\\\cov(z,x)&cov(z,y)&cov(z,z)\\end{pmatrix}$$\n\n\n", "_____no_output_____" ], [ "#### Step 4: Calculate the eigenvectors and eigenvalues of the covariance matrix", "_____no_output_____" ], [ "Find the eigenvectors $e^1,....e^M$ of the covariance matrix $\\mathbf{S}$.", "_____no_output_____" ], [ "##### Shogun's way of doing things :", "_____no_output_____" ], [ "Step 3 and Step 4 are directly implemented by the PCA preprocessor of Shogun toolbar. The transformation matrix is essentially a $\\text{D}$$\\times$$\\text{M}$ matrix, the columns of which correspond to the eigenvectors of the covariance matrix $(\\text{X}\\text{X}^\\text{T})$ having top $\\text{M}$ eigenvalues.", "_____no_output_____" ] ], [ [ "#Get the eigenvectors(We will get two of these since we set the target to 2). \nE = preprocessor.get_real_matrix('transformation_matrix')\n\n#Get all the eigenvalues returned by PCA.\neig_value=preprocessor.get_real_vector('eigenvalues_vector')\n\ne1 = E[:,0]\ne2 = E[:,1]\neig_value1 = eig_value[0]\neig_value2 = eig_value[1]", "_____no_output_____" ] ], [ [ "#### Step 5: Choosing components and forming a feature vector.", "_____no_output_____" ], [ "Lets visualize the eigenvectors and decide upon which to choose as the $principle$ $component$ of the data set.", "_____no_output_____" ] ], [ [ "#find out the M eigenvectors corresponding to top M number of eigenvalues and store it in E\n#Here M=1\n\n#slope of e1 & e2\nm1=e1[1]/e1[0]\nm2=e2[1]/e2[0]\n\n#generate the two lines\nx1=range(-50,50)\nx2=x1\ny1=multiply(m1,x1)\ny2=multiply(m2,x2)", "_____no_output_____" ], [ "#plot the data along with those two eigenvectors\nfigure, axis = subplots(1,1)\nxlim(-50, 50)\nylim(-50, 50)\naxis.plot(x[:], y[:],'o',color='green', markersize=5, label=\"green\")\naxis.plot(x1[:], y1[:], linewidth=0.7, color='black')\naxis.plot(x2[:], y2[:], linewidth=0.7, color='blue')\np1 = Rectangle((0, 0), 1, 1, fc=\"black\")\np2 = Rectangle((0, 0), 1, 1, fc=\"blue\")\nlegend([p1,p2],[\"1st eigenvector\",\"2nd eigenvector\"],loc='center left', bbox_to_anchor=(1, 0.5))\ntitle('Eigenvectors selection')\nxlabel(\"x axis\")\n_=ylabel(\"y axis\")", "_____no_output_____" ] ], [ [ "In the above figure, the blue line is a good fit of the data. It shows the most significant relationship between the data dimensions.\nIt turns out that the eigenvector with the $highest$ eigenvalue is the $principle$ $component$ of the data set.\nForm the matrix $\\mathbf{E}=[\\mathbf{e}^1,...,\\mathbf{e}^M].$\nHere $\\text{M}$ represents the target dimension of our final projection", "_____no_output_____" ] ], [ [ "#The eigenvector corresponding to higher eigenvalue(i.e eig_value2) is choosen (i.e e2).\n#E is the feature vector.\nE=e2", "_____no_output_____" ] ], [ [ "#### Step 6: Projecting the data to its Principal Components.", "_____no_output_____" ], [ "This is the final step in PCA. Once we have choosen the components(eigenvectors) that we wish to keep in our data and formed a feature vector, we simply take the vector and multiply it on the left of the original dataset.\nThe lower dimensional representation of each data point $\\mathbf{x}^n$ is given by \n\n$\\mathbf{y}^n=\\mathbf{E}^T(\\mathbf{x}^n-\\mathbf{m})$\n\nHere the $\\mathbf{E}^T$ is the matrix with the eigenvectors in rows, with the most significant eigenvector at the top. The mean adjusted data, with data items in each column, with each row holding a seperate dimension is multiplied to it.", "_____no_output_____" ], [ "##### Shogun's way of doing things :", "_____no_output_____" ], [ "Step 6 can be performed by shogun's PCA preprocessor as follows:\n\nThe transformation matrix that we got after $\\text{init()}$ is used to transform all $\\text{D-dim}$ feature matrices (with $\\text{D}$ feature dimensions) supplied, via $\\text{apply_to_feature_matrix methods}$.This transformation outputs the $\\text{M-Dim}$ approximation of all these input vectors and matrices (where $\\text{M}$ $\\leq$ $\\text{min(D,N)}$).", "_____no_output_____" ] ], [ [ "#transform all 2-dimensional feature matrices to target-dimensional approximations.\nyn=preprocessor.apply_to_feature_matrix(train_features)\n\n#Since, here we are manually trying to find the eigenvector corresponding to the top eigenvalue.\n#The 2nd row of yn is choosen as it corresponds to the required eigenvector e2.\nyn1=yn[1,:]", "_____no_output_____" ] ], [ [ "Step 5 and Step 6 can be applied directly with Shogun's PCA preprocessor (from next example). It has been done manually here to show the exhaustive nature of Principal Component Analysis.", "_____no_output_____" ], [ "#### Step 7: Form the approximate reconstruction of the original data $\\mathbf{x}^n$", "_____no_output_____" ], [ "The approximate reconstruction of the original datapoint $\\mathbf{x}^n$ is given by : $\\tilde{\\mathbf{x}}^n\\approx\\text{m}+\\mathbf{E}\\mathbf{y}^n$", "_____no_output_____" ] ], [ [ "x_new=(yn1 * E[0]) + tile(mean_x,[n,1]).T[0]\ny_new=(yn1 * E[1]) + tile(mean_y,[n,1]).T[0]", "_____no_output_____" ] ], [ [ "The new data is plotted below", "_____no_output_____" ] ], [ [ "figure, axis = subplots(1,1)\nxlim(-50, 50)\nylim(-50, 50)\n\naxis.plot(x[:], y[:],'o',color='green', markersize=5, label=\"green\")\naxis.plot(x_new, y_new, 'o', color='blue', markersize=5, label=\"red\")\ntitle('PCA Projection of 2D data into 1D subspace')\nxlabel(\"x axis\")\nylabel(\"y axis\")\n\n#add some legend for information\np1 = Rectangle((0, 0), 1, 1, fc=\"r\")\np2 = Rectangle((0, 0), 1, 1, fc=\"g\")\np3 = Rectangle((0, 0), 1, 1, fc=\"b\")\nlegend([p1,p2,p3],[\"normal projection\",\"2d data\",\"1d projection\"],loc='center left', bbox_to_anchor=(1, 0.5))\n\n#plot the projections in red:\nfor i in range(n):\n axis.plot([x[i],x_new[i]],[y[i],y_new[i]] , color='red')", "_____no_output_____" ] ], [ [ "## PCA on a 3d data.", "_____no_output_____" ], [ "#### Step1: Get some data", "_____no_output_____" ], [ "We generate points from a plane and then add random noise orthogonal to it. The general equation of a plane is: $$\\text{a}\\mathbf{x}+\\text{b}\\mathbf{y}+\\text{c}\\mathbf{z}+\\text{d}=0$$", "_____no_output_____" ] ], [ [ "rcParams['figure.figsize'] = 8,8 \n#number of points\nn=100\n\n#generate the data\na=random.randint(1,20)\nb=random.randint(1,20)\nc=random.randint(1,20)\nd=random.randint(1,20)\n\nx1=random.random_integers(-20,20,n)\ny1=random.random_integers(-20,20,n)\nz1=-(a*x1+b*y1+d)/c\n\n#generate the noise\nnoise=random.random_sample([n])*random.random_integers(-30,30,n)\n\n#the normal unit vector is [a,b,c]/magnitude\nmagnitude=sqrt(square(a)+square(b)+square(c))\nnormal_vec=array([a,b,c]/magnitude)\n\n#add the noise orthogonally\nx=x1+noise*normal_vec[0]\ny=y1+noise*normal_vec[1]\nz=z1+noise*normal_vec[2]\nthreeD_obsmatrix=array([x,y,z])", "_____no_output_____" ], [ "#to visualize the data, we must plot it.\nfrom mpl_toolkits.mplot3d import Axes3D\n\nfig = pyplot.figure()\nax=fig.add_subplot(111, projection='3d')\n\n#plot the noisy data generated by distorting a plane\nax.scatter(x, y, z,marker='o', color='g')\n\nax.set_xlabel('x label')\nax.set_ylabel('y label')\nax.set_zlabel('z label')\nlegend([p2],[\"3d data\"],loc='center left', bbox_to_anchor=(1, 0.5))\ntitle('Two dimensional subspace with noise')\nxx, yy = meshgrid(range(-30,30), range(-30,30))\nzz=-(a * xx + b * yy + d) / c", "_____no_output_____" ] ], [ [ "#### Step 2: Subtract the mean.", "_____no_output_____" ] ], [ [ "#convert the observation matrix into dense feature matrix.\ntrain_features = features(threeD_obsmatrix)\n\n#PCA(EVD) is choosen since N=100 and D=3 (N>D).\n#However we can also use PCA(AUTO) as it will automagically choose the appropriate method. \npreprocessor = PCA(EVD)\n\n#If we set the target dimension to 2, Shogun would automagically preserve the required 2 eigenvectors(out of 3) according to their\n#eigenvalues.\npreprocessor.put('target_dim', 2)\npreprocessor.init(train_features)\n\n#get the mean for the respective dimensions.\nmean_datapoints=preprocessor.get_real_vector('mean_vector')\nmean_x=mean_datapoints[0]\nmean_y=mean_datapoints[1]\nmean_z=mean_datapoints[2]", "_____no_output_____" ] ], [ [ "#### Step 3 & Step 4: Calculate the eigenvectors of the covariance matrix", "_____no_output_____" ] ], [ [ "#get the required eigenvectors corresponding to top 2 eigenvalues.\nE = preprocessor.get_real_matrix('transformation_matrix')", "_____no_output_____" ] ], [ [ "#### Steps 5: Choosing components and forming a feature vector.", "_____no_output_____" ], [ "Since we performed PCA for a target $\\dim = 2$ for the $3 \\dim$ data, we are directly given \nthe two required eigenvectors in $\\mathbf{E}$", "_____no_output_____" ], [ "E is automagically filled by setting target dimension = M. This is different from the 2d data example where we implemented this step manually.", "_____no_output_____" ], [ "#### Step 6: Projecting the data to its Principal Components.", "_____no_output_____" ] ], [ [ "#This can be performed by shogun's PCA preprocessor as follows:\nyn=preprocessor.apply_to_feature_matrix(train_features)", "_____no_output_____" ] ], [ [ "#### Step 7: Form the approximate reconstruction of the original data $\\mathbf{x}^n$", "_____no_output_____" ], [ "The approximate reconstruction of the original datapoint $\\mathbf{x}^n$ is given by : $\\tilde{\\mathbf{x}}^n\\approx\\text{m}+\\mathbf{E}\\mathbf{y}^n$", "_____no_output_____" ] ], [ [ "new_data=dot(E,yn)\n\nx_new=new_data[0,:]+tile(mean_x,[n,1]).T[0]\ny_new=new_data[1,:]+tile(mean_y,[n,1]).T[0]\nz_new=new_data[2,:]+tile(mean_z,[n,1]).T[0]", "_____no_output_____" ], [ "#all the above points lie on the same plane. To make it more clear we will plot the projection also.\n\nfig=pyplot.figure()\nax=fig.add_subplot(111, projection='3d')\nax.scatter(x, y, z,marker='o', color='g')\nax.set_xlabel('x label')\nax.set_ylabel('y label')\nax.set_zlabel('z label')\nlegend([p1,p2,p3],[\"normal projection\",\"3d data\",\"2d projection\"],loc='center left', bbox_to_anchor=(1, 0.5))\ntitle('PCA Projection of 3D data into 2D subspace')\n\nfor i in range(100):\n ax.scatter(x_new[i], y_new[i], z_new[i],marker='o', color='b')\n ax.plot([x[i],x_new[i]],[y[i],y_new[i]],[z[i],z_new[i]],color='r') ", "_____no_output_____" ] ], [ [ "#### PCA Performance", "_____no_output_____" ], [ "Uptill now, we were using the EigenValue Decomposition method to compute the transformation matrix$\\text{(N>D)}$ but for the next example $\\text{(N<D)}$ we will be using Singular Value Decomposition.", "_____no_output_____" ], [ "## Practical Example : Eigenfaces", "_____no_output_____" ], [ "The problem with the image representation we are given is its high dimensionality. Two-dimensional $\\text{p} \\times \\text{q}$ grayscale images span a $\\text{m=pq}$ dimensional vector space, so an image with $\\text{100}\\times\\text{100}$ pixels lies in a $\\text{10,000}$ dimensional image space already. \n\nThe question is, are all dimensions really useful for us?\n \n$\\text{Eigenfaces}$ are based on the dimensional reduction approach of $\\text{Principal Component Analysis(PCA)}$. The basic idea is to treat each image as a vector in a high dimensional space. Then, $\\text{PCA}$ is applied to the set of images to produce a new reduced subspace that captures most of the variability between the input images. The $\\text{Pricipal Component Vectors}$(eigenvectors of the sample covariance matrix) are called the $\\text{Eigenfaces}$. Every input image can be represented as a linear combination of these eigenfaces by projecting the image onto the new eigenfaces space. Thus, we can perform the identfication process by matching in this reduced space. An input image is transformed into the $\\text{eigenspace,}$ and the nearest face is identified using a $\\text{Nearest Neighbour approach.}$", "_____no_output_____" ], [ "#### Step 1: Get some data.", "_____no_output_____" ], [ "Here data means those Images which will be used for training purposes.", "_____no_output_____" ] ], [ [ "rcParams['figure.figsize'] = 10, 10 \nimport os\ndef get_imlist(path):\n \"\"\" Returns a list of filenames for all jpg images in a directory\"\"\"\n return [os.path.join(path,f) for f in os.listdir(path) if f.endswith('.pgm')]\n\n#set path of the training images\npath_train=os.path.join(SHOGUN_DATA_DIR, 'att_dataset/training/')\n#set no. of rows that the images will be resized.\nk1=100\n#set no. of columns that the images will be resized.\nk2=100\n\nfilenames = get_imlist(path_train)\nfilenames = array(filenames)\n\n#n is total number of images that has to be analysed.\nn=len(filenames)", "_____no_output_____" ] ], [ [ "Lets have a look on the data:", "_____no_output_____" ] ], [ [ "# we will be using this often to visualize the images out there.\ndef showfig(image):\n imgplot=imshow(image, cmap='gray')\n imgplot.axes.get_xaxis().set_visible(False)\n imgplot.axes.get_yaxis().set_visible(False)\n \nfrom PIL import Image\nfrom scipy import misc\n\n# to get a hang of the data, lets see some part of the dataset images.\nfig = pyplot.figure()\ntitle('The Training Dataset')\n\nfor i in range(49):\n fig.add_subplot(7,7,i+1)\n train_img=array(Image.open(filenames[i]).convert('L'))\n train_img=misc.imresize(train_img, [k1,k2])\n showfig(train_img)", "_____no_output_____" ] ], [ [ "Represent every image $I_i$ as a vector $\\Gamma_i$", "_____no_output_____" ] ], [ [ "#To form the observation matrix obs_matrix.\n#read the 1st image.\ntrain_img = array(Image.open(filenames[0]).convert('L'))\n\n#resize it to k1 rows and k2 columns\ntrain_img=misc.imresize(train_img, [k1,k2])\n\n#since features accepts only data of float64 datatype, we do a type conversion\ntrain_img=array(train_img, dtype='double')\n\n#flatten it to make it a row vector.\ntrain_img=train_img.flatten()\n\n# repeat the above for all images and stack all those vectors together in a matrix\nfor i in range(1,n):\n temp=array(Image.open(filenames[i]).convert('L')) \n temp=misc.imresize(temp, [k1,k2])\n temp=array(temp, dtype='double')\n temp=temp.flatten()\n train_img=vstack([train_img,temp])\n\n#form the observation matrix \nobs_matrix=train_img.T", "_____no_output_____" ] ], [ [ "#### Step 2: Subtract the mean", "_____no_output_____" ], [ "It is very important that the face images $I_1,I_2,...,I_M$ are $centered$ and of the $same$ size", "_____no_output_____" ], [ "We observe here that the no. of $\\dim$ for each image is far greater than no. of training images. This calls for the use of $\\text{SVD}$.\n\nSetting the $\\text{PCA}$ in the $\\text{AUTO}$ mode does this automagically according to the situation.", "_____no_output_____" ] ], [ [ "train_features = features(obs_matrix)\npreprocessor=PCA(AUTO)\n\npreprocessor.put('target_dim', 100)\npreprocessor.init(train_features)\n\nmean=preprocessor.get_real_vector('mean_vector')", "_____no_output_____" ] ], [ [ "#### Step 3 & Step 4: Calculate the eigenvectors and eigenvalues of the covariance matrix.", "_____no_output_____" ] ], [ [ "#get the required eigenvectors corresponding to top 100 eigenvalues\nE = preprocessor.get_real_matrix('transformation_matrix')", "_____no_output_____" ], [ "#lets see how these eigenfaces/eigenvectors look like:\nfig1 = pyplot.figure()\ntitle('Top 20 Eigenfaces')\n\nfor i in range(20):\n a = fig1.add_subplot(5,4,i+1)\n eigen_faces=E[:,i].reshape([k1,k2])\n showfig(eigen_faces)\n \n", "_____no_output_____" ] ], [ [ "These 20 eigenfaces are not sufficient for a good image reconstruction. Having more eigenvectors gives us the most flexibility in the number of faces we can reconstruct. Though we are adding vectors with low variance, they are in directions of change nonetheless, and an external image that is not in our database could in fact need these eigenvectors to get even relatively close to it. But at the same time we must also keep in mind that adding excessive eigenvectors results in addition of little or no variance, slowing down the process.\n\nClearly a tradeoff is required.\n\nWe here set for M=100.", "_____no_output_____" ], [ "#### Step 5: Choosing components and forming a feature vector.", "_____no_output_____" ], [ "Since we set target $\\dim = 100$ for this $n \\dim$ data, we are directly given the $100$ required eigenvectors in $\\mathbf{E}$", "_____no_output_____" ], [ "E is automagically filled. This is different from the 2d data example where we implemented this step manually.", "_____no_output_____" ], [ "#### Step 6: Projecting the data to its Principal Components.", "_____no_output_____" ], [ "The lower dimensional representation of each data point $\\mathbf{x}^n$ is given by $$\\mathbf{y}^n=\\mathbf{E}^T(\\mathbf{x}^n-\\mathbf{m})$$", "_____no_output_____" ] ], [ [ "#we perform the required dot product.\nyn=preprocessor.apply_to_feature_matrix(train_features)", "_____no_output_____" ] ], [ [ "#### Step 7: Form the approximate reconstruction of the original image $I_n$", "_____no_output_____" ], [ "The approximate reconstruction of the original datapoint $\\mathbf{x}^n$ is given by : $\\mathbf{x}^n\\approx\\text{m}+\\mathbf{E}\\mathbf{y}^n$", "_____no_output_____" ] ], [ [ "re=tile(mean,[n,1]).T[0] + dot(E,yn)", "_____no_output_____" ], [ "#lets plot the reconstructed images.\nfig2 = pyplot.figure()\ntitle('Reconstructed Images from 100 eigenfaces')\nfor i in range(1,50):\n re1 = re[:,i].reshape([k1,k2])\n fig2.add_subplot(7,7,i)\n showfig(re1)", "_____no_output_____" ] ], [ [ "## Recognition part.", "_____no_output_____" ], [ "In our face recognition process using the Eigenfaces approach, in order to recognize an unseen image, we proceed with the same preprocessing steps as applied to the training images.\nTest images are represented in terms of eigenface coefficients by projecting them into face space$\\text{(eigenspace)}$ calculated during training. Test sample is recognized by measuring the similarity distance between the test sample and all samples in the training. The similarity measure is a metric of distance calculated between two vectors. Traditional Eigenface approach utilizes $\\text{Euclidean distance}$.", "_____no_output_____" ] ], [ [ "#set path of the training images\npath_train=os.path.join(SHOGUN_DATA_DIR, 'att_dataset/testing/')\ntest_files=get_imlist(path_train)\ntest_img=array(Image.open(test_files[0]).convert('L'))\n\nrcParams.update({'figure.figsize': (3, 3)})\n#we plot the test image , for which we have to identify a good match from the training images we already have\nfig = pyplot.figure()\ntitle('The Test Image')\nshowfig(test_img)", "_____no_output_____" ], [ "#We flatten out our test image just the way we have done for the other images\ntest_img=misc.imresize(test_img, [k1,k2])\ntest_img=array(test_img, dtype='double')\ntest_img=test_img.flatten()\n\n#We centralise the test image by subtracting the mean from it.\ntest_f=test_img-mean", "_____no_output_____" ] ], [ [ "Here we have to project our training image as well as the test image on the PCA subspace.", "_____no_output_____" ], [ "The Eigenfaces method then performs face recognition by:\n1. Projecting all training samples into the PCA subspace.\n2. Projecting the query image into the PCA subspace.\n3. Finding the nearest neighbour between the projected training images and the projected query image.", "_____no_output_____" ] ], [ [ "#We have already projected our training images into pca subspace as yn.\ntrain_proj = yn\n\n#Projecting our test image into pca subspace\ntest_proj = dot(E.T, test_f)", "_____no_output_____" ] ], [ [ "##### Shogun's way of doing things:", "_____no_output_____" ], [ "Shogun uses [CEuclideanDistance](http://www.shogun-toolbox.org/doc/en/3.0.0/classshogun_1_1CEuclideanDistance.html) class to compute the familiar Euclidean distance for real valued features. It computes the square root of the sum of squared disparity between the corresponding feature dimensions of two data points.\n\n$\\mathbf{d(x,x')=}$$\\sqrt{\\mathbf{\\sum\\limits_{i=0}^{n}}|\\mathbf{x_i}-\\mathbf{x'_i}|^2}$", "_____no_output_____" ] ], [ [ "#To get Eucledian Distance as the distance measure use EuclideanDistance.\nworkfeat = features(mat(train_proj))\ntestfeat = features(mat(test_proj).T)\nRaRb=EuclideanDistance(testfeat, workfeat)\n\n#The distance between one test image w.r.t all the training is stacked in matrix d.\nd=empty([n,1])\nfor i in range(n):\n d[i]= RaRb.distance(0,i)\n \n#The one having the minimum distance is found out\nmin_distance_index = d.argmin()\niden=array(Image.open(filenames[min_distance_index]))\ntitle('Identified Image')\nshowfig(iden)", "_____no_output_____" ] ], [ [ "## References:", "_____no_output_____" ], [ "[1] David Barber. Bayesian Reasoning and Machine Learning.\n\n[2] Lindsay I Smith. A tutorial on Principal Component Analysis.\n\n[3] Philipp Wanger. Face Recognition with GNU Octave/MATLAB.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ] ]
cb63b4df889f41677d615d803dac52f4b5241874
19,490
ipynb
Jupyter Notebook
python3/learn-python/metaprogramming/Object Oriented Python.ipynb
Nahid-Hassan/code-snippets
24bd4b81564887822a0801a696001fcbeb6a7a75
[ "MIT" ]
2
2020-09-29T04:09:41.000Z
2020-10-18T13:33:36.000Z
python3/learn-python/metaprogramming/Object Oriented Python.ipynb
Nahid-Hassan/code-snippets
24bd4b81564887822a0801a696001fcbeb6a7a75
[ "MIT" ]
null
null
null
python3/learn-python/metaprogramming/Object Oriented Python.ipynb
Nahid-Hassan/code-snippets
24bd4b81564887822a0801a696001fcbeb6a7a75
[ "MIT" ]
1
2021-12-26T04:55:55.000Z
2021-12-26T04:55:55.000Z
23.174792
919
0.467317
[ [ [ "# Making our own objects", "_____no_output_____" ], [ "class Foo:\n def hi(self): # self is the first parameter by convention\n print(self) # self is a pointer to the object", "_____no_output_____" ], [ "f = Foo() # create Foo class object", "_____no_output_____" ], [ "f.hi()", "<__main__.Foo object at 0x7f8cbe18aca0>\n" ], [ "f", "_____no_output_____" ], [ "Foo.hi", "_____no_output_____" ], [ "# Constructor", "_____no_output_____" ], [ "class Person:\n def __init__(self, name, age):\n self.name = name\n self.age = age\n \n def __str__(self):\n return f'{self.name} is {self.age} years old.'", "_____no_output_____" ], [ "person = Person('Mahin', 22)", "_____no_output_____" ], [ "str(person) # note: call str(object) calls obj.__str__()", "_____no_output_____" ], [ "dir(person) # showing all of the methods in person object.", "_____no_output_____" ], [ "# litle test\nclass Person:\n\n def __init__(self, name, age):\n self.name = name\n self.age = age\n\n \"\"\"__len__ return integer \"\"\"\n def __len__(self):\n return len(self.name) + 10\n\n\nperson = Person('Mahin', 23)\nprint(len(person)) # Haha 15. That's works.", "15\n" ], [ "# Fields", "_____no_output_____" ], [ "class Person:\n\n \"\"\"name and age are fileds, that are accessed by dot\"\"\"\n def __init__(self, name, age):\n self.name = name\n self.age = age\n \n def grow_up(self):\n self.age = self.age + 1", "_____no_output_____" ], [ "person = Person('Mahin', 22)", "_____no_output_____" ], [ "person.age # access by dot", "_____no_output_____" ], [ "person.grow_up()", "_____no_output_____" ], [ "person.age", "_____no_output_____" ], [ "# __init__ vs __new__", "_____no_output_____" ], [ "################### 1 #################################\nclass Test:\n\n \"\"\" cls: class Test itself. Not object of class Test. It class itself \"\"\" \n def __new__(cls, x):\n print(f'__new__, cls={cls}')\n # return super().__new__(cls)\n \n def __init__(self, x):\n print(f'__init__, self={self}')\n self.x = x\n ", "_____no_output_____" ], [ "test = Test(2)", "__new__, cls=<class '__main__.Test'>\n" ], [ "test.x", "_____no_output_____" ], [ "# see the difference", "_____no_output_____" ], [ "############################### 2 ######################\nclass Test:\n\n \"\"\" cls: class Test itself. Not object of class Test. It class itself \"\"\" \n def __new__(cls, x):\n print(f'__new__, cls={cls}')\n return super().__new__(cls)\n \n def __init__(self, x):\n print(f'__init__, self={self}')\n self.x = x", "_____no_output_____" ], [ "test = Test(3)", "__new__, cls=<class '__main__.Test'>\n__init__, self=<__main__.Test object at 0x7f8cbe44f700>\n" ], [ "test.x", "_____no_output_____" ], [ "######################## 3 ####################\nclass Test:\n\n \"\"\" cls: class Test itself. Not object of class Test. It class itself \"\"\" \n def __new__(cls, x):\n print(f'__new__, cls={cls}')\n return super().__new__(cls)\n \n def __init__(self, x):\n print(f'__init__, self={self}')\n self.x = x\n \n def __repr__(self):\n return 'Are you kidding me!!!'", "_____no_output_____" ], [ "test = Test(4)", "__new__, cls=<class '__main__.Test'>\n__init__, self=Are you kidding me!!!\n" ], [ "test.x", "_____no_output_____" ], [ "# eveything is an object", "_____no_output_____" ], [ "type(1)", "_____no_output_____" ], [ "type(1).mro() # Method Resolution Order. Show Inheritance Hierarchy ", "_____no_output_____" ], [ "type('name').mro()", "_____no_output_____" ], [ "type(print).mro()", "_____no_output_____" ], [ "'hi' == 'hi'", "_____no_output_____" ], [ "id('hi')", "_____no_output_____" ], [ "'hi'.__eq__('hi')", "_____no_output_____" ], [ "1 == 2", "_____no_output_____" ], [ "(1).__eq__(2)", "_____no_output_____" ], [ "# Duck Typing", "_____no_output_____" ], [ "def reverse(string):\n out = str()\n \n for i in string:\n out = i + out\n return out", "_____no_output_____" ], [ "print(reverse('hello'))", "olleh\n" ], [ "print(reverse(343))", "_____no_output_____" ], [ "print(reverse(['a', 'b', 'cd'])) # unexpected behavior. Did you get it???", "cdba\n" ], [ "type(dict()).__dict__", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb63b678af33665dc50f4cc8e6de2d9fe85889c6
4,656
ipynb
Jupyter Notebook
Section 7/Notebooks/7.6 Performance Comparision with Dask.ipynb
PacktPublishing/High-Performance-Computing-with-Python-3.x
0053977a3dc37a0d709d7aeede85181a85e3dc84
[ "MIT" ]
13
2019-03-01T09:23:23.000Z
2022-01-06T17:38:43.000Z
Section 7/Notebooks/7.6 Performance Comparision with Dask.ipynb
PacktPublishing/High-Performance-Computing-with-Python-3.x
0053977a3dc37a0d709d7aeede85181a85e3dc84
[ "MIT" ]
null
null
null
Section 7/Notebooks/7.6 Performance Comparision with Dask.ipynb
PacktPublishing/High-Performance-Computing-with-Python-3.x
0053977a3dc37a0d709d7aeede85181a85e3dc84
[ "MIT" ]
11
2019-11-15T17:25:21.000Z
2021-09-03T03:36:42.000Z
20.068966
89
0.491409
[ [ [ "# This function takes two arrays X, Y as input, and returns a new array Z where \n# Z[i] = X[i] + Y[i] + i\n\n# Sample Input: X = [1, 2, 3], Y = [4, 5, 6]\n# Sample Output: Z = [1+4+0, 2+5+1, 3+6+2] = [5, 8, 11]\n\ndef do_some_op(arr1, arr2):\n arr3 = []\n for idx in range(0, len(arr1)):\n arr3.append(arr1[idx] + arr2[idx] + idx)\n return arr3", "_____no_output_____" ], [ "arr1 = list(range(0, 5000000))\narr2 = list(range(0, 5000000))", "_____no_output_____" ], [ "%timeit do_some_op(arr1, arr2)", "840 ms ± 19.6 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)\n" ] ], [ [ "# Using Numpy", "_____no_output_____" ] ], [ [ "import numpy as np", "_____no_output_____" ], [ "def do_some_op_np(arr1, arr2):\n idx_array = np.arange(arr1.size)\n arr3 = arr1 + arr2 + idx_array\n return arr3\narr1_np = np.random.random(50000000)\narr2_np = np.random.random(50000000)\n", "_____no_output_____" ], [ "%timeit do_some_op_np(arr1_np, arr2_np)", "569 ms ± 37 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)\n" ] ], [ [ "# Using numpy add", "_____no_output_____" ] ], [ [ "idx_array = np.arange(arr1_np.size)", "_____no_output_____" ], [ "%timeit np.add(np.add(arr1_np, arr2_np), idx_array)", "449 ms ± 26.3 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)\n" ] ], [ [ "# Using Dask array", "_____no_output_____" ] ], [ [ "import dask.array as da\n\ndef do_some_op_da1(arr1, arr2):\n idx_array = da.arange(arr1.size, chunks= 1000)\n arr3 = arr1 + arr2 + idx_array\n return arr3\narr1_da = da.random.random(50000000, chunks=1000)\narr2_da = da.random.random(50000000, chunks=1000)", "_____no_output_____" ], [ "%timeit do_some_op_da1(arr1_da, arr2_da)", "127 ms ± 1.25 ms per loop (mean ± std. dev. of 7 runs, 10 loops each)\n" ] ], [ [ "# Using dask array add ", "_____no_output_____" ] ], [ [ "idx_array = da.arange(arr1_da.size, chunks=1000)", "_____no_output_____" ], [ "%timeit da.add(da.add(arr1_da, arr2_da), idx_array)", "34.1 ms ± 827 µs per loop (mean ± std. dev. of 7 runs, 10 loops each)\n" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ] ]
cb63be341af21d7036741f214c35227d97d8b91a
23,881
ipynb
Jupyter Notebook
examples/notebooks/statespace_forecasting.ipynb
alexlyttle/statsmodels
3ff6fac6d45195e8e93079b9d4cdd934d8e2394b
[ "BSD-3-Clause" ]
76
2019-12-28T08:37:10.000Z
2022-03-29T02:19:41.000Z
examples/notebooks/statespace_forecasting.ipynb
alexlyttle/statsmodels
3ff6fac6d45195e8e93079b9d4cdd934d8e2394b
[ "BSD-3-Clause" ]
null
null
null
examples/notebooks/statespace_forecasting.ipynb
alexlyttle/statsmodels
3ff6fac6d45195e8e93079b9d4cdd934d8e2394b
[ "BSD-3-Clause" ]
35
2020-02-04T14:46:25.000Z
2022-03-24T03:56:17.000Z
32.315291
531
0.611448
[ [ [ "# Forecasting in statsmodels\n\nThis notebook describes forecasting using time series models in statsmodels.\n\n**Note**: this notebook applies only to the state space model classes, which are:\n\n- `sm.tsa.SARIMAX`\n- `sm.tsa.UnobservedComponents`\n- `sm.tsa.VARMAX`\n- `sm.tsa.DynamicFactor`", "_____no_output_____" ] ], [ [ "%matplotlib inline\n\nimport numpy as np\nimport pandas as pd\nimport statsmodels.api as sm\nimport matplotlib.pyplot as plt\n\nmacrodata = sm.datasets.macrodata.load_pandas().data\nmacrodata.index = pd.period_range('1959Q1', '2009Q3', freq='Q')", "_____no_output_____" ] ], [ [ "## Basic example\n\nA simple example is to use an AR(1) model to forecast inflation. Before forecasting, let's take a look at the series:", "_____no_output_____" ] ], [ [ "endog = macrodata['infl']\nendog.plot(figsize=(15, 5))", "_____no_output_____" ] ], [ [ "### Constructing and estimating the model", "_____no_output_____" ], [ "The next step is to formulate the econometric model that we want to use for forecasting. In this case, we will use an AR(1) model via the `SARIMAX` class in statsmodels.\n\nAfter constructing the model, we need to estimate its parameters. This is done using the `fit` method. The `summary` method produces several convenient tables showing the results.", "_____no_output_____" ] ], [ [ "# Construct the model\nmod = sm.tsa.SARIMAX(endog, order=(1, 0, 0), trend='c')\n# Estimate the parameters\nres = mod.fit()\n\nprint(res.summary())", "_____no_output_____" ] ], [ [ "### Forecasting", "_____no_output_____" ], [ "Out-of-sample forecasts are produced using the `forecast` or `get_forecast` methods from the results object.\n\nThe `forecast` method gives only point forecasts.", "_____no_output_____" ] ], [ [ "# The default is to get a one-step-ahead forecast:\nprint(res.forecast())", "_____no_output_____" ] ], [ [ "The `get_forecast` method is more general, and also allows constructing confidence intervals.", "_____no_output_____" ] ], [ [ "# Here we construct a more complete results object.\nfcast_res1 = res.get_forecast()\n\n# Most results are collected in the `summary_frame` attribute.\n# Here we specify that we want a confidence level of 90%\nprint(fcast_res1.summary_frame(alpha=0.10))", "_____no_output_____" ] ], [ [ "The default confidence level is 95%, but this can be controlled by setting the `alpha` parameter, where the confidence level is defined as $(1 - \\alpha) \\times 100\\%$. In the example above, we specified a confidence level of 90%, using `alpha=0.10`.", "_____no_output_____" ], [ "### Specifying the number of forecasts\n\nBoth of the functions `forecast` and `get_forecast` accept a single argument indicating how many forecasting steps are desired. One option for this argument is always to provide an integer describing the number of steps ahead you want.", "_____no_output_____" ] ], [ [ "print(res.forecast(steps=2))", "_____no_output_____" ], [ "fcast_res2 = res.get_forecast(steps=2)\n# Note: since we did not specify the alpha parameter, the\n# confidence level is at the default, 95%\nprint(fcast_res2.summary_frame())", "_____no_output_____" ] ], [ [ "However, **if your data included a Pandas index with a defined frequency** (see the section at the end on Indexes for more information), then you can alternatively specify the date through which you want forecasts to be produced:", "_____no_output_____" ] ], [ [ "print(res.forecast('2010Q2'))", "_____no_output_____" ], [ "fcast_res3 = res.get_forecast('2010Q2')\nprint(fcast_res3.summary_frame())", "_____no_output_____" ] ], [ [ "### Plotting the data, forecasts, and confidence intervals\n\nOften it is useful to plot the data, the forecasts, and the confidence intervals. There are many ways to do this, but here's one example", "_____no_output_____" ] ], [ [ "fig, ax = plt.subplots(figsize=(15, 5))\n\n# Plot the data (here we are subsetting it to get a better look at the forecasts)\nendog.loc['1999':].plot(ax=ax)\n\n# Construct the forecasts\nfcast = res.get_forecast('2011Q4').summary_frame()\nfcast['mean'].plot(ax=ax, style='k--')\nax.fill_between(fcast.index, fcast['mean_ci_lower'], fcast['mean_ci_upper'], color='k', alpha=0.1);", "_____no_output_____" ] ], [ [ "### Note on what to expect from forecasts\n\nThe forecast above may not look very impressive, as it is almost a straight line. This is because this is a very simple, univariate forecasting model. Nonetheless, keep in mind that these simple forecasting models can be extremely competitive.", "_____no_output_____" ], [ "## Prediction vs Forecasting\n\nThe results objects also contain two methods that all for both in-sample fitted values and out-of-sample forecasting. They are `predict` and `get_prediction`. The `predict` method only returns point predictions (similar to `forecast`), while the `get_prediction` method also returns additional results (similar to `get_forecast`).\n\nIn general, if your interest is out-of-sample forecasting, it is easier to stick to the `forecast` and `get_forecast` methods.", "_____no_output_____" ], [ "## Cross validation\n\n**Note**: some of the functions used in this section were first introduced in statsmodels v0.11.0.\n\nA common use case is to cross-validate forecasting methods by performing h-step-ahead forecasts recursively using the following process:\n\n1. Fit model parameters on a training sample\n2. Produce h-step-ahead forecasts from the end of that sample\n3. Compare forecasts against test dataset to compute error rate\n4. Expand the sample to include the next observation, and repeat\n\nEconomists sometimes call this a pseudo-out-of-sample forecast evaluation exercise, or time-series cross-validation.", "_____no_output_____" ], [ "### Example", "_____no_output_____" ], [ "We will conduct a very simple exercise of this sort using the inflation dataset above. The full dataset contains 203 observations, and for expositional purposes we'll use the first 80% as our training sample and only consider one-step-ahead forecasts.", "_____no_output_____" ], [ "A single iteration of the above procedure looks like the following:", "_____no_output_____" ] ], [ [ "# Step 1: fit model parameters w/ training sample\ntraining_obs = int(len(endog) * 0.8)\n\ntraining_endog = endog[:training_obs]\ntraining_mod = sm.tsa.SARIMAX(\n training_endog, order=(1, 0, 0), trend='c')\ntraining_res = training_mod.fit()\n\n# Print the estimated parameters\nprint(training_res.params)", "_____no_output_____" ], [ "# Step 2: produce one-step-ahead forecasts\nfcast = training_res.forecast()\n\n# Step 3: compute root mean square forecasting error\ntrue = endog.reindex(fcast.index)\nerror = true - fcast\n\n# Print out the results\nprint(pd.concat([true.rename('true'),\n fcast.rename('forecast'),\n error.rename('error')], axis=1))", "_____no_output_____" ] ], [ [ "To add on another observation, we can use the `append` or `extend` results methods. Either method can produce the same forecasts, but they differ in the other results that are available:\n\n- `append` is the more complete method. It always stores results for all training observations, and it optionally allows refitting the model parameters given the new observations (note that the default is *not* to refit the parameters).\n- `extend` is a faster method that may be useful if the training sample is very large. It *only* stores results for the new observations, and it does not allow refitting the model parameters (i.e. you have to use the parameters estimated on the previous sample).\n\nIf your training sample is relatively small (less than a few thousand observations, for example) or if you want to compute the best possible forecasts, then you should use the `append` method. However, if that method is infeasible (for example, because you have a very large training sample) or if you are okay with slightly suboptimal forecasts (because the parameter estimates will be slightly stale), then you can consider the `extend` method.", "_____no_output_____" ], [ "A second iteration, using the `append` method and refitting the parameters, would go as follows (note again that the default for `append` does not refit the parameters, but we have overridden that with the `refit=True` argument):", "_____no_output_____" ] ], [ [ "# Step 1: append a new observation to the sample and refit the parameters\nappend_res = training_res.append(endog[training_obs:training_obs + 1], refit=True)\n\n# Print the re-estimated parameters\nprint(append_res.params)", "_____no_output_____" ] ], [ [ "Notice that these estimated parameters are slightly different than those we originally estimated. With the new results object, `append_res`, we can compute forecasts starting from one observation further than the previous call:", "_____no_output_____" ] ], [ [ "# Step 2: produce one-step-ahead forecasts\nfcast = append_res.forecast()\n\n# Step 3: compute root mean square forecasting error\ntrue = endog.reindex(fcast.index)\nerror = true - fcast\n\n# Print out the results\nprint(pd.concat([true.rename('true'),\n fcast.rename('forecast'),\n error.rename('error')], axis=1))", "_____no_output_____" ] ], [ [ "Putting it altogether, we can perform the recursive forecast evaluation exercise as follows:", "_____no_output_____" ] ], [ [ "# Setup forecasts\nnforecasts = 3\nforecasts = {}\n\n# Get the number of initial training observations\nnobs = len(endog)\nn_init_training = int(nobs * 0.8)\n\n# Create model for initial training sample, fit parameters\ninit_training_endog = endog.iloc[:n_init_training]\nmod = sm.tsa.SARIMAX(training_endog, order=(1, 0, 0), trend='c')\nres = mod.fit()\n\n# Save initial forecast\nforecasts[training_endog.index[-1]] = res.forecast(steps=nforecasts)\n\n# Step through the rest of the sample\nfor t in range(n_init_training, nobs):\n # Update the results by appending the next observation\n updated_endog = endog.iloc[t:t+1]\n res = res.append(updated_endog, refit=False)\n \n # Save the new set of forecasts\n forecasts[updated_endog.index[0]] = res.forecast(steps=nforecasts)\n\n# Combine all forecasts into a dataframe\nforecasts = pd.concat(forecasts, axis=1)\n\nprint(forecasts.iloc[:5, :5])", "_____no_output_____" ] ], [ [ "We now have a set of three forecasts made at each point in time from 1999Q2 through 2009Q3. We can construct the forecast errors by subtracting each forecast from the actual value of `endog` at that point.", "_____no_output_____" ] ], [ [ "# Construct the forecast errors\nforecast_errors = forecasts.apply(lambda column: endog - column).reindex(forecasts.index)\n\nprint(forecast_errors.iloc[:5, :5])", "_____no_output_____" ] ], [ [ "To evaluate our forecasts, we often want to look at a summary value like the root mean square error. Here we can compute that for each horizon by first flattening the forecast errors so that they are indexed by horizon and then computing the root mean square error fore each horizon.", "_____no_output_____" ] ], [ [ "# Reindex the forecasts by horizon rather than by date\ndef flatten(column):\n return column.dropna().reset_index(drop=True)\n\nflattened = forecast_errors.apply(flatten)\nflattened.index = (flattened.index + 1).rename('horizon')\n\nprint(flattened.iloc[:3, :5])", "_____no_output_____" ], [ "# Compute the root mean square error\nrmse = (flattened**2).mean(axis=1)**0.5\n\nprint(rmse)", "_____no_output_____" ] ], [ [ "#### Using `extend`\n\nWe can check that we get similar forecasts if we instead use the `extend` method, but that they are not exactly the same as when we use `append` with the `refit=True` argument. This is because `extend` does not re-estimate the parameters given the new observation.", "_____no_output_____" ] ], [ [ "# Setup forecasts\nnforecasts = 3\nforecasts = {}\n\n# Get the number of initial training observations\nnobs = len(endog)\nn_init_training = int(nobs * 0.8)\n\n# Create model for initial training sample, fit parameters\ninit_training_endog = endog.iloc[:n_init_training]\nmod = sm.tsa.SARIMAX(training_endog, order=(1, 0, 0), trend='c')\nres = mod.fit()\n\n# Save initial forecast\nforecasts[training_endog.index[-1]] = res.forecast(steps=nforecasts)\n\n# Step through the rest of the sample\nfor t in range(n_init_training, nobs):\n # Update the results by appending the next observation\n updated_endog = endog.iloc[t:t+1]\n res = res.extend(updated_endog)\n \n # Save the new set of forecasts\n forecasts[updated_endog.index[0]] = res.forecast(steps=nforecasts)\n\n# Combine all forecasts into a dataframe\nforecasts = pd.concat(forecasts, axis=1)\n\nprint(forecasts.iloc[:5, :5])", "_____no_output_____" ], [ "# Construct the forecast errors\nforecast_errors = forecasts.apply(lambda column: endog - column).reindex(forecasts.index)\n\nprint(forecast_errors.iloc[:5, :5])", "_____no_output_____" ], [ "# Reindex the forecasts by horizon rather than by date\ndef flatten(column):\n return column.dropna().reset_index(drop=True)\n\nflattened = forecast_errors.apply(flatten)\nflattened.index = (flattened.index + 1).rename('horizon')\n\nprint(flattened.iloc[:3, :5])", "_____no_output_____" ], [ "# Compute the root mean square error\nrmse = (flattened**2).mean(axis=1)**0.5\n\nprint(rmse)", "_____no_output_____" ] ], [ [ "By not re-estimating the parameters, our forecasts are slightly worse (the root mean square error is higher at each horizon). However, the process is faster, even with only 200 datapoints. Using the `%%timeit` cell magic on the cells above, we found a runtime of 570ms using `extend` versus 1.7s using `append` with `refit=True`. (Note that using `extend` is also faster than using `append` with `refit=False`).", "_____no_output_____" ], [ "## Indexes\n\nThroughout this notebook, we have been making use of Pandas date indexes with an associated frequency. As you can see, this index marks our data as at a quarterly frequency, between 1959Q1 and 2009Q3.", "_____no_output_____" ] ], [ [ "print(endog.index)", "_____no_output_____" ] ], [ [ "In most cases, if your data has an associated data/time index with a defined frequency (like quarterly, monthly, etc.), then it is best to make sure your data is a Pandas series with the appropriate index. Here are three examples of this:", "_____no_output_____" ] ], [ [ "# Annual frequency, using a PeriodIndex\nindex = pd.period_range(start='2000', periods=4, freq='A')\nendog1 = pd.Series([1, 2, 3, 4], index=index)\nprint(endog1.index)", "_____no_output_____" ], [ "# Quarterly frequency, using a DatetimeIndex\nindex = pd.date_range(start='2000', periods=4, freq='QS')\nendog2 = pd.Series([1, 2, 3, 4], index=index)\nprint(endog2.index)", "_____no_output_____" ], [ "# Monthly frequency, using a DatetimeIndex\nindex = pd.date_range(start='2000', periods=4, freq='M')\nendog3 = pd.Series([1, 2, 3, 4], index=index)\nprint(endog3.index)", "_____no_output_____" ] ], [ [ "In fact, if your data has an associated date/time index, it is best to use that even if does not have a defined frequency. An example of that kind of index is as follows - notice that it has `freq=None`:", "_____no_output_____" ] ], [ [ "index = pd.DatetimeIndex([\n '2000-01-01 10:08am', '2000-01-01 11:32am',\n '2000-01-01 5:32pm', '2000-01-02 6:15am'])\nendog4 = pd.Series([0.2, 0.5, -0.1, 0.1], index=index)\nprint(endog4.index)", "_____no_output_____" ] ], [ [ "You can still pass this data to statsmodels' model classes, but you will get the following warning, that no frequency data was found:", "_____no_output_____" ] ], [ [ "mod = sm.tsa.SARIMAX(endog4)\nres = mod.fit()", "_____no_output_____" ] ], [ [ "What this means is that you cannot specify forecasting steps by dates, and the output of the `forecast` and `get_forecast` methods will not have associated dates. The reason is that without a given frequency, there is no way to determine what date each forecast should be assigned to. In the example above, there is no pattern to the date/time stamps of the index, so there is no way to determine what the next date/time should be (should it be in the morning of 2000-01-02? the afternoon? or maybe not until 2000-01-03?).\n\nFor example, if we forecast one-step-ahead:", "_____no_output_____" ] ], [ [ "res.forecast(1)", "_____no_output_____" ] ], [ [ "The index associated with the new forecast is `4`, because if the given data had an integer index, that would be the next value. A warning is given letting the user know that the index is not a date/time index.\n\nIf we try to specify the steps of the forecast using a date, we will get the following exception:\n\n KeyError: 'The `end` argument could not be matched to a location related to the index of the data.'\n", "_____no_output_____" ] ], [ [ "# Here we'll catch the exception to prevent printing too much of\n# the exception trace output in this notebook\ntry:\n res.forecast('2000-01-03')\nexcept KeyError as e:\n print(e)", "_____no_output_____" ] ], [ [ "Ultimately there is nothing wrong with using data that does not have an associated date/time frequency, or even using data that has no index at all, like a Numpy array. However, if you can use a Pandas series with an associated frequency, you'll have more options for specifying your forecasts and get back results with a more useful index.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
cb63e137085a8bdfd3a9d39215ae02ce8c60aef2
19,987
ipynb
Jupyter Notebook
01_Getting_&_Knowing_Your_Data/[ ] Chipotle 1/Exercise_with_Solutions.ipynb
apoboldon/pandas_exercises
e2c0a73cb7e4a020db07577555c57f027de83466
[ "BSD-3-Clause" ]
null
null
null
01_Getting_&_Knowing_Your_Data/[ ] Chipotle 1/Exercise_with_Solutions.ipynb
apoboldon/pandas_exercises
e2c0a73cb7e4a020db07577555c57f027de83466
[ "BSD-3-Clause" ]
null
null
null
01_Getting_&_Knowing_Your_Data/[ ] Chipotle 1/Exercise_with_Solutions.ipynb
apoboldon/pandas_exercises
e2c0a73cb7e4a020db07577555c57f027de83466
[ "BSD-3-Clause" ]
null
null
null
22.81621
195
0.421874
[ [ [ "# Ex2 - Getting and Knowing your Data\n\nCheck out [Chipotle Exercises Video Tutorial](https://www.youtube.com/watch?v=lpuYZ5EUyS8&list=PLgJhDSE2ZLxaY_DigHeiIDC1cD09rXgJv&index=2) to watch a data scientist go through the exercises", "_____no_output_____" ], [ "This time we are going to pull data directly from the internet.\nSpecial thanks to: https://github.com/justmarkham for sharing the dataset and materials.\n\n### Step 1. Import the necessary libraries", "_____no_output_____" ] ], [ [ "import pandas as pd\nimport numpy as np", "_____no_output_____" ] ], [ [ "### Step 2. Import the dataset from this [address](https://raw.githubusercontent.com/justmarkham/DAT8/master/data/chipotle.tsv). ", "_____no_output_____" ], [ "### Step 3. Assign it to a variable called chipo.", "_____no_output_____" ] ], [ [ "url = 'https://raw.githubusercontent.com/justmarkham/DAT8/master/data/chipotle.tsv'\n \nchipo = pd.read_csv(url, sep = '\\t')", "_____no_output_____" ] ], [ [ "### Step 4. See the first 10 entries", "_____no_output_____" ] ], [ [ "chipo.head(10)", "_____no_output_____" ] ], [ [ "### Step 5. What is the number of observations in the dataset?", "_____no_output_____" ] ], [ [ "# Solution 1\n\nchipo.shape[0] # entries <= 4622 observations", "_____no_output_____" ], [ "# Solution 2\n\nchipo.info() # entries <= 4622 observations", "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 4622 entries, 0 to 4621\nData columns (total 5 columns):\norder_id 4622 non-null int64\nquantity 4622 non-null int64\nitem_name 4622 non-null object\nchoice_description 3376 non-null object\nitem_price 4622 non-null object\ndtypes: int64(2), object(3)\nmemory usage: 180.6+ KB\n" ] ], [ [ "### Step 6. What is the number of columns in the dataset?", "_____no_output_____" ] ], [ [ "chipo.shape[1]", "_____no_output_____" ] ], [ [ "### Step 7. Print the name of all the columns.", "_____no_output_____" ] ], [ [ "chipo.columns", "_____no_output_____" ] ], [ [ "### Step 8. How is the dataset indexed?", "_____no_output_____" ] ], [ [ "chipo.index", "_____no_output_____" ] ], [ [ "### Step 9. Which was the most-ordered item? ", "_____no_output_____" ] ], [ [ "c = chipo.groupby('item_name')\nc = c.sum()\nc = c.sort_values(['quantity'], ascending=False)\nc.head(1)", "_____no_output_____" ] ], [ [ "### Step 10. For the most-ordered item, how many items were ordered?", "_____no_output_____" ] ], [ [ "c = chipo.groupby('item_name')\nc = c.sum()\nc = c.sort_values(['quantity'], ascending=False)\nc.head(1)", "_____no_output_____" ] ], [ [ "### Step 11. What was the most ordered item in the choice_description column?", "_____no_output_____" ] ], [ [ "c = chipo.groupby('choice_description').sum()\nc = c.sort_values(['quantity'], ascending=False)\nc.head(1)\n# Diet Coke 159", "_____no_output_____" ] ], [ [ "### Step 12. How many items were orderd in total?", "_____no_output_____" ] ], [ [ "total_items_orders = chipo.quantity.sum()\ntotal_items_orders", "_____no_output_____" ] ], [ [ "### Step 13. Turn the item price into a float", "_____no_output_____" ], [ "#### Step 13.a. Check the item price type", "_____no_output_____" ] ], [ [ "chipo.item_price.dtype", "_____no_output_____" ] ], [ [ "#### Step 13.b. Create a lambda function and change the type of item price", "_____no_output_____" ] ], [ [ "dollarizer = lambda x: float(x[1:-1])\nchipo.item_price = chipo.item_price.apply(dollarizer)", "_____no_output_____" ] ], [ [ "#### Step 13.c. Check the item price type", "_____no_output_____" ] ], [ [ "chipo.item_price.dtype", "_____no_output_____" ] ], [ [ "### Step 14. How much was the revenue for the period in the dataset?", "_____no_output_____" ] ], [ [ "revenue = (chipo['quantity']* chipo['item_price']).sum()\n\nprint('Revenue was: $' + str(np.round(revenue,2)))", "Revenue was: $39237.02\n" ] ], [ [ "### Step 15. How many orders were made in the period?", "_____no_output_____" ] ], [ [ "orders = chipo.order_id.value_counts().count()\norders", "_____no_output_____" ] ], [ [ "### Step 16. What is the average revenue amount per order?", "_____no_output_____" ] ], [ [ "# Solution 1\n\nchipo['revenue'] = chipo['quantity'] * chipo['item_price']\norder_grouped = chipo.groupby(by=['order_id']).sum()\norder_grouped.mean()['revenue']", "_____no_output_____" ], [ "# Solution 2\n\nchipo.groupby(by=['order_id']).sum().mean()['revenue']", "_____no_output_____" ] ], [ [ "### Step 17. How many different items are sold?", "_____no_output_____" ] ], [ [ "chipo.item_name.value_counts().count()", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ] ]
cb63e2c602defbf6490b890ef5b2928fe8fb3dd1
36,990
ipynb
Jupyter Notebook
tutorials/c2_tensor_shape.ipynb
wuaalb/project-NN-Pytorch-scripts
3434179a919fe67a614d80761104010255eff752
[ "BSD-3-Clause" ]
null
null
null
tutorials/c2_tensor_shape.ipynb
wuaalb/project-NN-Pytorch-scripts
3434179a919fe67a614d80761104010255eff752
[ "BSD-3-Clause" ]
null
null
null
tutorials/c2_tensor_shape.ipynb
wuaalb/project-NN-Pytorch-scripts
3434179a919fe67a614d80761104010255eff752
[ "BSD-3-Clause" ]
null
null
null
155.420168
11,396
0.885969
[ [ [ "# Tensor shape\n\nAfter the raw and waveform data are loaded from external files, they are stored as Numpy array. However, to use those data in Pytorch, we need to further convert the Numpy arrays to Pytorch tensors. \n\nConversion from Numpy array to Pytorch tensor is straightforward (see [Pytorch tutorial](https://pytorch.org/tutorials/beginner/blitz/tensor_tutorial.html#numpy-bridge)). However, what is difficult is that different Pytorch APIs may expect different tensor shapes.\n\nIn this notebook, we mainly explain the semantics of tensor dimension for this NSF Pytorch project.\n\n**Tensor shape**: we assume all tensors are in shape **(batchsize, length, dim-1, dim-2, ...)**, where \n * batchsize: batch size of a data batch;\n * length: maximum length of data sequences in the batch;\n * dim-1: dimension of feature vector in one time step;\n * dim-2: when a feature vector per time step has more than 1 dimension;\n \nNote that *Length* is equivalent to the number of frames or number of waveform sampling points.\n \n<!-- Hidden layers should not change **batchsize** and **length** of input tensors unless specified (e.g., in down-sampling, up-sampling layers) -->\n", "_____no_output_____" ], [ "### 1. Examples on tensor shape", "_____no_output_____" ] ], [ [ "# At the begining, let's load packages \nfrom __future__ import absolute_import\nfrom __future__ import print_function\nimport sys\nimport numpy as np\nimport torch\n\nimport tool_lib\nimport matplotlib\nimport matplotlib.pyplot as plt\nmatplotlib.rcParams['figure.figsize'] = (10, 5)\n", "_____no_output_____" ], [ "# load mel and F0\nmel_dim = 80\ninput_mel = tool_lib.read_raw_mat(\"data_models/acoustic_features/hn_nsf/slt_arctic_b0474.mfbsp\", mel_dim)\n\n# convert it into the required tensor format\ninput_mel_tensor = torch.tensor(input_mel).unsqueeze(0)\n\nprint(\"Shape of original data: \" + str(input_mel.shape))\nprint(\"Shape of data as tensor: \" + str(input_mel_tensor.shape))", "Shape of original data: (554, 80)\nShape of data as tensor: torch.Size([1, 554, 80])\n" ], [ "input_mel_tensor[0] - input_mel", "_____no_output_____" ] ], [ [ "In the example above, the input_mel_tensor has shape (1, 554, 80), where\n* 1: this batch has only one data\n* 554: the data has 554 frames\n* 80: each frame has 80 dimensions", "_____no_output_____" ], [ "### 2. Note\n\nIn the tutorial notebooks, we manually add the dimension corresponding to batch and create tensors from the Numpy array. \n\nIn NSF project-NN-Pytorch-scripts, the default data io wrapped over [torch.utils.data.Dataset](https://pytorch.org/docs/stable/data.html#map-style-datasets) and [torch.utils.data.DataLoader](https://pytorch.org/docs/stable/data.html#torch.utils.data.DataLoader) will automatically create tensor in (batchsize, N, M). Users just need to store the raw data in correct float32 format, the default data IO will automatically handle the conversion.\n\n<!-- When all the data files in the dataset have the same shape \\[N, M\\], the default IO can return a mini-batch (batchsize>1, N, M). Otherwise, it can only put a single sample in each mini-batch. -->\n\nIn the tutorial notebooks, we may use batchsize>1 for demonstrations. In NSF project-NN-Pytorch-scripts, we only use batchsize=1 for NSF models training. \n", "_____no_output_____" ], [ "### 3. Misc\n\nHere is one function to plot tensors of shape (batchsize, length, dim)", "_____no_output_____" ] ], [ [ "import plot_lib\nimport torch\ndata = torch.zeros([2, 5, 3])\ndata[0] = torch.tensor([[1,2,3], [4,5,6], [7,8,9], [10,11,12],[13,14,15]])\ndata[1] = torch.tensor([[1,2,3], [4,5,6], [7,8,9], [10,11,12],[13,14,15]])\n\n# example 1\nplot_lib.plot_tensor(data.numpy(), color_on_value=True, shift=0.1, title='data in shape %s' % (str(data.shape)))\nplot_lib.plot_tensor(data[0:1], color_on_value=True, title='data[0:1]')\nplot_lib.plot_tensor(data[0:1].view(1, -1).unsqueeze(0), color_on_value=True, \n title='data[0:1].view(1,-1).unsqueeze(0)')\n\n# example 2\n# plot_lib.plot_tensor(torch.tensor([[1,2,3,4,5]]).unsqueeze(0).permute(0, 2, 1), color_on_value=True)", "_____no_output_____" ] ], [ [ "The end", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ] ]
cb63f02fdd3cb02debeb45f0701b6dfa1a0f211c
9,046
ipynb
Jupyter Notebook
_docs/nbs/reco-tut-srr-99-01-basics.ipynb
sparsh-ai/recohut
4121f665761ffe38c9b6337eaa9293b26bee2376
[ "Apache-2.0" ]
null
null
null
_docs/nbs/reco-tut-srr-99-01-basics.ipynb
sparsh-ai/recohut
4121f665761ffe38c9b6337eaa9293b26bee2376
[ "Apache-2.0" ]
1
2022-01-12T05:40:57.000Z
2022-01-12T05:40:57.000Z
_docs/nbs/reco-tut-srr-99-01-basics.ipynb
RecoHut-Projects/recohut
4121f665761ffe38c9b6337eaa9293b26bee2376
[ "Apache-2.0" ]
null
null
null
34.659004
249
0.364139
[ [ [ "import pandas as pd\nimport numpy as np", "_____no_output_____" ], [ "pd.DataFrame ([['Ivan', 'Borodinsky Bread', 1],\n ['Ivan', 'White Bread', 0],\n ['Vasily', 'Epica Yogurt', 1]],\n columns = ['user', 'item', 'purchase_fact'])", "_____no_output_____" ] ], [ [ "Wait, you can add features to user (average bill, number of purchases in categories, etc.), to item (price, number of sales per week, etc.), and solve the classification problem. What is the difference between RecSys and classification?\n\n- Many predictions for 1 user (extreme classification)\n- Much larger amount of data: 100K users, 10K items -> 1B predictions\n- Most of the products the user has never seen -> Did not interact -> 0 does not mean \"did not like\"\n- There is no explicit target. It is not clear what it means \"(not) liked\"\n- Feedback loop\n- The order of recommendations is always important", "_____no_output_____" ] ], [ [ "pd.DataFrame([['Ivan','BreadBorodinsky',1],\n ['Ivan','WhiteBread',0],\n ['Ivan','EpicaYogurt',\"?\"],\n ['Vasily','BorodinskyBread',\"?\"],\n ['Vasily','WhiteBread',\"?\"],\n ['Vasily','EpicaYogurt',1]],\n columns = ['user','item','purchase_fact'])", "_____no_output_____" ] ], [ [ "The main types of tasks: \n- Recommended top-K products : Given a list of products. Recommend K products to the user that they like\n - e-mail newsletter\n - push notifications\n - Recommendations in a separate window on the site\n - Ranking of goods : Given a list of goods. You need to rank it in descending order of interest for the user\n- Ranking of the product catalog\n - Ranking feed\n - Search engine ranking\n - Ranking of \"carousels\" of goods\n- Search for similar products : Given 1 product. You need to find the most similar products\n - \"You May Also Like\"\n - Similar users liked\n - You may be familiar\n- Additional product recommendation . Given 1 product. Find products that are buying with this product\n - Frequently bought with this product", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown" ]
[ [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
cb63f2fab6a30d047b4aff0e0a96f14f943df86d
738,040
ipynb
Jupyter Notebook
demos/sketchgraphs_demo.ipynb
JoeLambourne/SketchGraphs
183c65f82d71d82c62b253092e9b7fa65846a3e6
[ "MIT" ]
204
2020-07-17T15:00:31.000Z
2022-03-23T19:24:37.000Z
demos/sketchgraphs_demo.ipynb
JoeLambourne/SketchGraphs
183c65f82d71d82c62b253092e9b7fa65846a3e6
[ "MIT" ]
17
2020-08-20T18:04:37.000Z
2021-09-01T09:07:20.000Z
demos/sketchgraphs_demo.ipynb
JoeLambourne/SketchGraphs
183c65f82d71d82c62b253092e9b7fa65846a3e6
[ "MIT" ]
34
2020-07-18T17:38:31.000Z
2022-03-13T02:56:58.000Z
2,236.484848
236,487
0.885779
[ [ [ "### SketchGraphs demo\n\nIn this notebook, we'll first go through various ways of representing and inspecting sketches in SketchGraphs. We'll then take a look at using Onshape's API in order to solve sketch constraints.", "_____no_output_____" ] ], [ [ "%load_ext autoreload\n%autoreload 2\n\nimport os\nimport json\nfrom copy import deepcopy\n%matplotlib inline\nfrom matplotlib import pyplot as plt\nimport numpy as np\n# cd to top-level directory\nif os.path.isdir('../sketchgraphs/'): \n os.chdir('../')\n\nimport sketchgraphs.data as datalib\nfrom sketchgraphs.data import flat_array\nimport sketchgraphs.onshape.call as onshape_call", "_____no_output_____" ] ], [ [ "Let's first load in some sketch construction sequences. In this example, we'll be using the [validation set](https://sketchgraphs.cs.princeton.edu/sequence/sg_t16_validation.npy) (see [documentation](https://princetonlips.github.io/SketchGraphs/data) for details). This notebook assumes the data file is already downloaded and located in a directory `sequence_data`.", "_____no_output_____" ] ], [ [ "seq_data = flat_array.load_dictionary_flat('sequence_data/sg_t16_validation.npy')\nseq_data['sequences']", "_____no_output_____" ] ], [ [ "This file has 315,228 sequences. Let's take a look at some of the operations in one of the sequences.", "_____no_output_____" ] ], [ [ "seq = seq_data['sequences'][1327]\nprint(*seq[:20], sep='\\n')", "NodeOp(label=<EntityType.External: 7>, parameters={})\nNodeOp(label=<EntityType.Line: 1>, parameters={'isConstruction': False, 'dirX': 1.0, 'dirY': 0.0, 'pntX': -0.0011842120438814163, 'pntY': 0.0, 'startParam': -0.01264495235582774, 'endParam': 0.02118421204388142})\nEdgeOp(label=<ConstraintType.Horizontal: 4>, references=(1,), parameters={})\nNodeOp(label=<SubnodeType.SN_Start: 101>, parameters={})\nEdgeOp(label=<ConstraintType.Subnode: 101>, references=(2, 1), parameters={})\nEdgeOp(label=<ConstraintType.Horizontal: 4>, references=(2, 0), parameters={})\nNodeOp(label=<SubnodeType.SN_End: 102>, parameters={})\nEdgeOp(label=<ConstraintType.Subnode: 101>, references=(3, 1), parameters={})\nNodeOp(label=<EntityType.Line: 1>, parameters={'isConstruction': False, 'dirX': 0.0, 'dirY': 1.0, 'pntX': 0.020000000000000004, 'pntY': 0.004046052694320679, 'startParam': -0.004046052694320679, 'endParam': 0.0009539473056793214})\nEdgeOp(label=<ConstraintType.Vertical: 6>, references=(4,), parameters={})\nNodeOp(label=<SubnodeType.SN_Start: 101>, parameters={})\nEdgeOp(label=<ConstraintType.Subnode: 101>, references=(5, 4), parameters={})\nEdgeOp(label=<ConstraintType.Coincident: 0>, references=(5, 3), parameters={})\nNodeOp(label=<SubnodeType.SN_End: 102>, parameters={})\nEdgeOp(label=<ConstraintType.Subnode: 101>, references=(6, 4), parameters={})\nNodeOp(label=<EntityType.Line: 1>, parameters={'isConstruction': False, 'dirX': -1.0, 'dirY': -1.224646799147353e-16, 'pntX': 0.015093873720616102, 'pntY': 0.004999999999999999, 'startParam': -0.004906126279383902, 'endParam': 0.0050938737206161})\nEdgeOp(label=<ConstraintType.Horizontal: 4>, references=(7,), parameters={})\nNodeOp(label=<SubnodeType.SN_Start: 101>, parameters={})\nEdgeOp(label=<ConstraintType.Subnode: 101>, references=(8, 7), parameters={})\nEdgeOp(label=<ConstraintType.Coincident: 0>, references=(8, 6), parameters={})\n" ] ], [ [ "We see that a construction sequence is a list of `NodeOp` and `EdgeOp` instances denoting the addition of primitives (also referred to as entities) and constraints, respectively.\n\nNow let's instantiate a `Sketch` object from this sequence and render it.", "_____no_output_____" ] ], [ [ "sketch = datalib.sketch_from_sequence(seq)\ndatalib.render_sketch(sketch);", "_____no_output_____" ] ], [ [ "We can also render the sketch with a hand-drawn appearance using matplotlib's xkcd drawing mode.", "_____no_output_____" ] ], [ [ "datalib.render_sketch(sketch, hand_drawn=True);", "_____no_output_____" ] ], [ [ "Next, we'll build a graph representation of the sketch and visualize it with pygraphviz.", "_____no_output_____" ] ], [ [ "G = datalib.pgvgraph_from_sequence(seq)\ndatalib.render_graph(G, '/tmp/my_graph.png')", "_____no_output_____" ], [ "img = plt.imread('/tmp/my_graph.png')\nfig = plt.figure(dpi=500)\nplt.imshow(img[:, 500:1700])\nplt.axis('off');", "_____no_output_____" ] ], [ [ "The full graph image for this example is large so we only display a portion of it above. Node labels that begin with `SN` are _subnodes_, specifying a point on some primitive (e.g., an endpoint of a line segment).", "_____no_output_____" ], [ "### Solving\n\nWe'll now take a look at how we can interact with Onshape's API in order to pass sketches to a geometric constraint solver. Various command line utilities for the API are defined in `sketchgraphs/onshape/call.py`.\n\nOnshape developer credentials are required for this. Visit https://princetonlips.github.io/SketchGraphs/onshape_setup for directions. The default path for credentials is `sketchgraphs/onshape/creds/creds.json`.", "_____no_output_____" ], [ "We need to specify the URL of the Onshape document/PartStudio we'll be using. You should set the following `url` for your own document accordingly.", "_____no_output_____" ] ], [ [ "url = R'https://cad.onshape.com/documents/6f6d14f8facf0bba02184e88/w/66a5db71489c81f4893101ed/e/120c56983451157d26a7102d'", "_____no_output_____" ] ], [ [ "Let's test out Onshape's solver. We'll first make a copy of our sketch, remove its constraints, and manually add noise to the entity positions within Onshape's GUI.", "_____no_output_____" ] ], [ [ "no_constraint_sketch = deepcopy(sketch)\nno_constraint_sketch.constraints.clear()\nonshape_call.add_feature(url, no_constraint_sketch.to_dict(), 'No_Constraints_Sketch')", "_____no_output_____" ] ], [ [ "Before running the next code block, manually \"mess up\" the entities a bit in the GUI, i.e., drag the entities in order to leave the original constraints unsatisfied. The more drastic the change, the more difficult it will be for the solver to find a solution.\n\nNow we retrieve the noisy sketch.", "_____no_output_____" ] ], [ [ "unsolved_sketch_info = onshape_call.get_info(url, 'No_Constraints_Sketch')\nunsolved_sketch = datalib.Sketch.from_info(unsolved_sketch_info['geomEntities'])\ndatalib.render_sketch(unsolved_sketch);", "_____no_output_____" ] ], [ [ "Next, let's add the constraints back in and (attempt to) solve them.", "_____no_output_____" ] ], [ [ "with_constraints_sketch = deepcopy(unsolved_sketch)\nwith_constraints_sketch.constraints = sketch.constraints\nonshape_call.add_feature(url, with_constraints_sketch.to_dict(), 'With_Constraints_Sketch')\nsolved_sketch_info = onshape_call.get_info(url, 'With_Constraints_Sketch')\nsolved_sketch = datalib.Sketch.from_info(solved_sketch_info['geomEntities'])\ndatalib.render_sketch(solved_sketch);", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
cb63f52e17e0e11315bcdb953b4e46694b2b7955
37,058
ipynb
Jupyter Notebook
Relational Database.ipynb
DanielHLH/Where-is-the-Mosquito---HK-Dengue-Fever-Frontline
1f98fd07c5f211665551f01495064519b195bc91
[ "MIT" ]
1
2021-01-30T11:45:22.000Z
2021-01-30T11:45:22.000Z
Relational Database.ipynb
DanielHLH/Where-is-the-Mosquito---HK-Dengue-Fever-Frontline
1f98fd07c5f211665551f01495064519b195bc91
[ "MIT" ]
null
null
null
Relational Database.ipynb
DanielHLH/Where-is-the-Mosquito---HK-Dengue-Fever-Frontline
1f98fd07c5f211665551f01495064519b195bc91
[ "MIT" ]
null
null
null
31.431722
139
0.384559
[ [ [ "import sqlite3\nimport pandas as pd", "_____no_output_____" ], [ "def run_query(query):\n with sqlite3.connect('AreaOvitrap.db') as conn:\n return pd.read_sql(query,conn)\n\ndef run_command(command):\n with sqlite3.connect('AreaOvitrap.db') as conn:\n conn.execute('PRAGMA foreign_keys = ON;')\n conn.isolation_level = None\n conn.execute(command)\n\ndef show_tables(): \n query = '''\n SELECT name,\n type\n From sqlite_master\n WHERE type IN (\"type\",\"view\");\n '''\n return run_query(query)\n\ndef district_code_generator(area):\n num_range = len(district_only[district_only['area_id'] == area])\n for i in range(1,num_range+1):\n yield area + \"D{:02d}\".format(i)\n\ndef location_code_generator(district_id):\n num_range = len(locations[locations['District_id'] == district_id])\n for i in range(1,num_range+1):\n yield district_id + \"{:02d}\".format(i)\n \ndef match_district(area_districts):\n for index,value in enumerate(locations['Eng']):\n for key, item in area_districts.items():\n if value in item:\n locations.loc[index,'District'] = key\n return locations", "_____no_output_____" ], [ "data = pd.read_csv(\"Area_Ovitrap_Index_Jan2008-Jul2018.csv\")", "_____no_output_____" ], [ "all_districts = {\n 'HK':{\n 'Central Western':{'Central and Admiralty','Sai Wan','Sheung Wan and Sai Ying Pun'},\n 'Eastern':{'Chai Wan West','Shau Kei Wan & Sai Wan Ho','North Point'},\n 'Southern':{'Aberdeen and Ap Lei Chau','Pokfulam','Deep Water Bay & Repulse Bay'},\n 'Wanchai':{'Tin Hau','Wan Chai North','Happy Valley'}\n },\n \n 'KL':{\n 'Yau Tsim':{'Tsim Sha Tsui','Tsim Sha Tsui East'},\n 'Mong Kok':{'Mong Kok'},\n 'Sham Shui Po':{'Cheung Sha Wan','Lai Chi Kok','Sham Shui Po East'},\n 'Kowloon City':{'Ho Man Tin','Kowloon City North','Hung Hom','Lok Fu West','Kai Tak North'},\n 'Wong Tai Sin':{'Wong Tai Sin Central','Diamond Hill','Ngau Chi Wan'},\n 'Kwun Tong':{'Kwun Tong Central','Lam Tin','Yau Tong','Kowloon Bay'}\n },\n \n 'NT':{\n 'Sai Kung':{'Tseung Kwan O South','Tseung Kwan O North','Sai Kung Town'},\n 'Sha Tin':{'Tai Wai','Yuen Chau Kok','Ma On Shan','Lek Yuen','Wo Che'},\n 'Tai Po':{'Tai Po'},\n 'North':{'Fanling','Sheung Shui'},\n 'Yuen Long':{'Tin Shui Wai','Yuen Kong','Yuen Long Town'},\n 'Tuen Mun':{'Tuen Mun North','Tuen Mun South','Tuen Mun West','So Kwun Wat'},\n 'Tsuen Wan':{'Tsuen Wan Town','Tsuen Wan West','Ma Wan','Sheung Kwai Chung'},\n 'Kwai Tsing':{'Kwai Chung','Lai King','Tsing Yi North','Tsing Yi South'}\n },\n \n 'IL':{\n 'Islands':{'Cheung Chau','Tung Chung'}\n }\n}\n", "_____no_output_____" ], [ "# matching the Chinese and English names of the districts into variable \"translations\"\nchi_district = ['中西區','東區','南區','灣仔區','油尖區','旺角區','深水埗區','九龍城區','黃大仙區','觀塘區','西貢區','沙田區','大埔區','北區','元朗區','屯門區','荃灣區','葵青區','離島區']\neng_district = []\nfor area, district in all_districts.items():\n for key, _ in district.items():\n eng_district.append(key)\ntranslations = list(zip(eng_district,chi_district))", "_____no_output_____" ], [ "# group the districts into their corresponding area\narea_district = []\nfor area, district in all_districts.items():\n for key,value in district.items():\n area_district.append([area,key])\n\nfor index, value in enumerate(translations):\n area_district[index].append(value[1])\n", "_____no_output_____" ], [ "area_district", "_____no_output_____" ], [ "# create a pandas dataframe for the data of all districts\ndistrict_only = pd.DataFrame(area_district,columns=['area_id','eng_district','chi_district']) \nhk_code = district_code_generator('HK') # generate ID for main area \"Hong Kong Island\"\nkl_code = district_code_generator('KL') # generate ID for main area \"Kowloon\"\nnt_code = district_code_generator('NT') # generate ID for main area \"New Territories\"\nil_code = district_code_generator('IL') # generate ID for main area \"Islands\"\ndistrict_code = [hk_code,kl_code,nt_code,il_code]\n\nfor index,value in enumerate(district_only['area_id']):\n for i, area in enumerate(['HK','KL','NT','IL']):\n if value == area:\n district_only.loc[index,'District_id'] = next(district_code[i]) \n ", "_____no_output_____" ], [ "cols = district_only.columns.tolist()\ncols = cols[-1:]+cols[:-1]\ndistrict_only = district_only[cols]", "_____no_output_____" ], [ "area_dict = {'area_id':['HK','KL','IL','NT'],\n 'eng_area':['Hong Kong Island','Kowloon','Islands','New Territories'],\n 'chi_area':['香港島','九龍','離島','新界']}\n\nt_area = '''\n CREATE TABLE IF NOT EXISTS area(\n area_id TEXT PRIMARY KEY,\n eng_area TEXT,\n chi_area TEXT\n )\n'''\n\nrun_command(t_area)\n\narea = pd.DataFrame(area_dict)\nwith sqlite3.connect(\"AreaOviTrap.db\") as conn:\n area.to_sql('area',conn,if_exists='append',index=False)", "_____no_output_____" ], [ "run_query(\"SELECT * FROM area\")", "_____no_output_____" ], [ "t_district = '''\n CREATE TABLE IF NOT EXISTS district(\n district_id TEXT PRIMARY KEY,\n area_id TEXT,\n eng_district TEXT,\n chi_district TEXT,\n FOREIGN KEY (area_id) REFERENCES area(area_id)\n )\n'''\n\nrun_command(t_district)\nwith sqlite3.connect(\"AreaOviTrap.db\") as conn:\n district_only.to_sql('district',conn,if_exists='append',index=False)", "_____no_output_____" ], [ "run_query(\"SELECT * FROM district\")", "_____no_output_____" ], [ "# extracting unique location from the data\neng_loc = data['Eng'].unique()\nchi_loc = data['Chi'].unique()\nlocations = pd.DataFrame({'District':'','Eng':eng_loc,'Chi':chi_loc})\n\nhk_districts = all_districts['HK']\nkl_districts = all_districts['KL']\nnt_districts = all_districts['NT']\nil_districts = all_districts['IL']\nfour_district = [hk_districts,kl_districts,nt_districts,il_districts]\n\n# match the location with the correpsonding district\nfor each in four_district:\n locations = match_district(each)\n\n# match the location with corresponding district_id\nfor index, value in enumerate(locations['District']):\n for i, district in enumerate(district_only['eng_district']):\n if value == district:\n locations.loc[index,'District_id'] = district_only.loc[i,'District_id']\n \n# generate Location_id by using location_code_generator\nunique_district_id = locations['District_id'].unique().tolist() \nfor each in unique_district_id:\n code = location_code_generator(each)\n for index,value in enumerate(locations['District_id']):\n if value == each:\n locations.loc[index,'Location_id'] = next(code) ", "_____no_output_____" ], [ "locations.head()", "_____no_output_____" ], [ "for index,value in enumerate(data['Eng']):\n for i, name in enumerate(locations['Eng']):\n if value == name:\n data.loc[index,'District_id'] = locations.loc[i,'District_id']\n data.loc[index,'Location_id'] = locations.loc[i,'Location_id']\n \nwith sqlite3.connect('AreaOvitrap.db') as conn:\n data.to_sql('origin',conn,index=False) ", "_____no_output_____" ], [ "data.head(20) ", "_____no_output_____" ], [ "with sqlite3.connect('AreaOvitrap.db') as conn:\n data.to_sql('origin',conn,index=False)", "_____no_output_____" ], [ "t_location = '''\n CREATE TABLE IF NOT EXISTS location(\n location_id TEXT PRIMARY KEY,\n eng_location TEXT,\n chi_location TEXT,\n district_id,\n FOREIGN KEY (district_id) REFERENCES district(district_id)\n )\n'''\nlocation_data = '''\n INSERT OR IGNORE INTO location\n SELECT\n DISTINCT Location_id,\n Eng,\n Chi,\n District_id\n FROM origin\n'''\nrun_command(t_location)\nrun_command(location_data)", "_____no_output_____" ], [ "t_aoi = '''\n CREATE TABLE IF NOT EXISTS area_ovitrap_index(\n ID INTEGER PRIMARY KEY AUTOINCREMENT,\n location_id TEXT,\n date TEXT,\n AOI FLOAT,\n Classification INTEGER,\n FOREIGN KEY (location_id) REFERENCES location(location_id)\n )\n'''\naoi_data = '''\n INSERT OR IGNORE INTO area_ovitrap_index (location_id, date, AOI, Classification)\n SELECT\n Location_id,\n DATE,\n AOI,\n Classification\n FROM origin\n'''\nrun_command(t_aoi)\nrun_command(aoi_data)", "_____no_output_____" ], [ "run_command(\"DROP TABLE IF EXISTS origin\")\nshow_tables()", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb63f778ba25b9e6b0919f42d65da189cf9d9e1e
81,891
ipynb
Jupyter Notebook
eda.ipynb
qqullar/coursework
514876bfce0d55f0e99f8605c6a1d7b9bc3ae25b
[ "MIT" ]
null
null
null
eda.ipynb
qqullar/coursework
514876bfce0d55f0e99f8605c6a1d7b9bc3ae25b
[ "MIT" ]
null
null
null
eda.ipynb
qqullar/coursework
514876bfce0d55f0e99f8605c6a1d7b9bc3ae25b
[ "MIT" ]
null
null
null
81.646062
9,552
0.761378
[ [ [ "import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt", "_____no_output_____" ], [ "data = pd.read_csv(r\"csvs\\df_all.csv\")", "_____no_output_____" ], [ "data.describe()", "_____no_output_____" ], [ "data.drop_duplicates(inplace=True)", "_____no_output_____" ], [ "data.describe()", "_____no_output_____" ], [ "data.info()", "<class 'pandas.core.frame.DataFrame'>\nInt64Index: 3151 entries, 0 to 3535\nData columns (total 6 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 background_max_distance 3151 non-null float64\n 1 kernel_max_distance 3151 non-null float64\n 2 textures_max_distance 3151 non-null float64\n 3 textures_min_distance 3151 non-null float64\n 4 barycenter_distance 3151 non-null float64\n 5 group 3151 non-null int64 \ndtypes: float64(5), int64(1)\nmemory usage: 172.3 KB\n" ], [ "data[\"textures_max_distance\"].hist(bins=200)\nplt.title(\"textures_max_distance hist\")\nplt.savefig(\"textures_max_distance.png\", dpi=200)", "_____no_output_____" ], [ "data[\"textures_min_distance\"].hist(bins=30)\nplt.title(\"textures_min_distance hist\")\nplt.savefig(\"textures_min_distance.png\", dpi=200)", "_____no_output_____" ], [ "data.background_max_distance.hist(bins=200)\nplt.title(\"background_max_distance hist\")\nplt.savefig(\"background_max_distance.png\", dpi=200)", "_____no_output_____" ], [ "data.kernel_max_distance.hist(bins=200)\nplt.title(\"kernel_max_distance hist\")\nplt.savefig(\"kernel_max_distance.png\", dpi=200)", "_____no_output_____" ], [ "data.barycenter_distance.hist(bins=200)\nplt.title(\"barycenter_distance hist\")\nplt.savefig(\"barycenter_distance.png\", dpi=200)", "_____no_output_____" ], [ "#?pd.DataFrame.drop", "_____no_output_____" ], [ "data.drop([\"textures_min_distance\"], axis=1)", "_____no_output_____" ], [ "index_delete = data[data.barycenter_distance > 40].index\ndata.drop(index_delete, inplace=True)", "_____no_output_____" ], [ "data.barycenter_distance.hist(bins=200)", "_____no_output_____" ], [ "data.shape", "_____no_output_____" ], [ "n_control = len(data[data.group == 0].index)\nn_bc = len(data[data.group == 1].index)\nn_fam = len(data[data.group == 2].index)\n\nprint(n_control, n_bc, n_fam)", "1396 726 899\n" ], [ "index_delete = data[data.barycenter_distance > 27].index\ndata.drop(index_delete, inplace=True)", "_____no_output_____" ], [ "data.shape", "_____no_output_____" ], [ "index_delete = data[data.background_max_distance > 25].index\ndata.drop(index_delete, inplace=True)", "_____no_output_____" ], [ "data.shape", "_____no_output_____" ], [ "index_delete = data[data.kernel_max_distance > 25].index\ndata.drop(index_delete, inplace=True)", "_____no_output_____" ], [ "data.shape", "_____no_output_____" ], [ "feat = [\"background_max_distance\", \"kernel_max_distance\", \"textures_max_distance\", \"barycenter_distance\"]\ndata[feat].describe()", "_____no_output_____" ], [ "data.to_csv(\"last.csv\")", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb640eb61e7bf470cd18dac46ff9aa6ebcc8aebf
8,775
ipynb
Jupyter Notebook
notebooks/2020-04-23_th_load_data.ipynb
hummelriegel/meter_readings
5497d3a5dff6039a113b295e70a03da22d05f49f
[ "FTL" ]
null
null
null
notebooks/2020-04-23_th_load_data.ipynb
hummelriegel/meter_readings
5497d3a5dff6039a113b295e70a03da22d05f49f
[ "FTL" ]
null
null
null
notebooks/2020-04-23_th_load_data.ipynb
hummelriegel/meter_readings
5497d3a5dff6039a113b295e70a03da22d05f49f
[ "FTL" ]
null
null
null
24.579832
146
0.541538
[ [ [ "# Load Data\nBilder einlesen und Dateinamen anpassen", "_____no_output_____" ] ], [ [ "# OPTIONAL: Load the \"autoreload\" extension so that code can change\n%load_ext autoreload\n\n# OPTIONAL: always reload modules so that as you change code in src, it gets loaded\n%autoreload 2\n\nfrom src.data import make_dataset\n\nimport warnings\n", "_____no_output_____" ], [ "import pandas as pd # Deals with data", "_____no_output_____" ], [ "excel_readings = pd.read_excel('../data/raw/moroweg_strom_gas.xlsx')\nexcel_readings.head()", "_____no_output_____" ], [ "manual_readings = excel_readings.iloc[:,[0,1,3,5]]\nmanual_readings = manual_readings.melt(id_vars=[\"Date\", \"Kommentar\"], \n var_name=\"Meter Type\", \n value_name=\"Value\")\nmanual_readings[[\"Meter Type\", \"Unit\"]] = manual_readings['Meter Type'].str.split(' ',expand=True)\nmanual_readings = manual_readings[[\"Date\", \"Meter Type\", \"Unit\", \"Value\", \"Kommentar\"]]\nmanual_readings", "_____no_output_____" ], [ "## Code Snippet for exifread for a sample image\nimport exifread\nimport os\n\n#path_name = os.path.join(os.pardir, 'data', 'raw', '2017', '2017-03-03 15.06.47.jpg')\npath_name = \"..\\data\\processed\\gas\\IMG_20200405_173910.jpg\"\n\n# Open image file for reading (binary mode)\nf = open(path_name, 'rb')\n\n# Return Exif tags\ntags = exifread.process_file(f)\n\n# Show Tags\nfor tag in tags.keys():\n if tag not in ('JPEGThumbnail', 'TIFFThumbnail', 'Filename', 'EXIF MakerNote'):\n print(\"Key: %s, value %s\" % (tag, tags[tag]))", "_____no_output_____" ], [ "def extract_file_meta(file_path):\n basename = os.path.basename(file_path)\n \n # Open image file for reading (binary mode)\n f = open(file_path, 'rb')\n\n # Read EXIF\n tags = exifread.process_file(f)\n \n try:\n exif_datetime = str(tags[\"EXIF DateTimeOriginal\"])\n except KeyError:\n warnings.warn(\"File {file_path} does not appear to have a date in EXIF Tags.\".format(file_path=file_path))\n return()\n #exif_datetime = \"2020:01:01 00:00:00\"\n \n # Format Date\n datetime = pd.to_datetime(exif_datetime, format = \"%Y:%m:%d %H:%M:%S\")\n date = pd.to_datetime(datetime.date())\n \n return(basename, datetime, date, file_path)", "_____no_output_____" ], [ "def meta_from_files(files):\n files_meta = []\n for file_path in files:\n files_meta.append(extract_file_meta(file_path))\n df = pd.DataFrame.from_records(files_meta, columns = (\"Filename\", \"Datetime\", \"Date\", \"Filepath\"))\n return(df)", "_____no_output_____" ], [ "def meta_from_dir(dir_path):\n files = [top + os.sep + f for top, dirs, files in os.walk(dir_path) for f in files]\n files_meta = meta_from_files(files)\n return(files_meta)", "_____no_output_____" ], [ "gas_dir = os.path.join(os.pardir, \"data\", \"processed\", \"gas\")\ngas_files_meta = meta_from_dir(gas_dir)", "_____no_output_____" ], [ "gas_files_meta", "_____no_output_____" ], [ "strom_dir = os.path.join(os.pardir, \"data\", \"processed\", \"strom\")\nstrom_files_meta = meta_from_dir(strom_dir)", "_____no_output_____" ], [ "strom_files_meta", "_____no_output_____" ] ], [ [ "## Add Flag if Picture has been Labelled", "_____no_output_____" ] ], [ [ "gas_label_dir = os.path.join(os.pardir, \"data\", \"labelled\", \"gas\", \"vott-json-export\")\ngas_labelled_files = [os.path.basename(f) for top, dirs, files in os.walk(gas_label_dir) for f in files]\ngas_files_meta[\"Labelled\"] = gas_files_meta.apply(lambda row: True if row[\"Filename\"] in gas_labelled_files else False, axis=1)\ngas_files_meta", "_____no_output_____" ], [ "strom_label_dir = os.path.join(os.pardir, \"data\", \"labelled\", \"strom\", \"vott-json-export\")\nstrom_labelled_files = [os.path.basename(f) for top, dirs, files in os.walk(strom_label_dir) for f in files]\nstrom_files_meta[\"Labelled\"] = strom_files_meta.apply(lambda row: True if row[\"Filename\"] in strom_labelled_files else False, axis=1)\nstrom_files_meta", "_____no_output_____" ] ], [ [ "## Join Picture Data with Manual Readings", "_____no_output_____" ], [ "### Strom", "_____no_output_____" ] ], [ [ "manual_readings.head(2)", "_____no_output_____" ], [ "strom_readings_manual = manual_readings[manual_readings[\"Meter Type\"] == \"Strom\"]\nstrom_readings_manual.head(2)", "_____no_output_____" ], [ "strom_files_meta.head(2)", "_____no_output_____" ], [ "strom = strom_files_meta.merge(strom_readings_manual, left_on=\"Date\", right_on=\"Date\")\nstrom", "_____no_output_____" ] ], [ [ "### Gas", "_____no_output_____" ] ], [ [ "manual_readings.head(2)", "_____no_output_____" ], [ "gas_readings_manual = manual_readings[manual_readings[\"Meter Type\"] == \"Gas\"]\ngas_readings_manual.head(2)", "_____no_output_____" ], [ "gas_files_meta.head(2)", "_____no_output_____" ], [ "gas = gas_files_meta.merge(gas_readings_manual, left_on=\"Date\", right_on=\"Date\")\ngas", "_____no_output_____" ] ], [ [ "## Return one dataframe in the end", "_____no_output_____" ] ], [ [ "dataset = pd.concat([strom, gas])\ndataset", "_____no_output_____" ], [ "dataset.to_csv(\"../data/processed/dataset.csv\")", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ] ]
cb64165e2ebacfa97c19945a233bee3825853091
9,381
ipynb
Jupyter Notebook
locale/examples/01-filter/slicing.ipynb
tkoyama010/pyvista-doc-translations
23bb813387b7f8bfe17e86c2244d5dd2243990db
[ "MIT" ]
4
2020-08-07T08:19:19.000Z
2020-12-04T09:51:11.000Z
locale/examples/01-filter/slicing.ipynb
tkoyama010/pyvista-doc-translations
23bb813387b7f8bfe17e86c2244d5dd2243990db
[ "MIT" ]
19
2020-08-06T00:24:30.000Z
2022-03-30T19:22:24.000Z
locale/examples/01-filter/slicing.ipynb
tkoyama010/pyvista-doc-translations
23bb813387b7f8bfe17e86c2244d5dd2243990db
[ "MIT" ]
1
2021-03-09T07:50:40.000Z
2021-03-09T07:50:40.000Z
34.237226
584
0.558256
[ [ [ "%matplotlib inline\nfrom pyvista import set_plot_theme\nset_plot_theme('document')", "_____no_output_____" ] ], [ [ "Slicing {#slice_example}\n=======\n\nExtract thin planar slices from a volume.\n", "_____no_output_____" ] ], [ [ "# sphinx_gallery_thumbnail_number = 2\nimport pyvista as pv\nfrom pyvista import examples\nimport matplotlib.pyplot as plt\nimport numpy as np", "_____no_output_____" ] ], [ [ "PyVista meshes have several slicing filters bound directly to all\ndatasets. These filters allow you to slice through a volumetric dataset\nto extract and view sections through the volume of data.\n\nOne of the most common slicing filters used in PyVista is the\n`pyvista.DataSetFilters.slice_orthogonal`{.interpreted-text role=\"func\"}\nfilter which creates three orthogonal slices through the dataset\nparallel to the three Cartesian planes. For example, let\\'s slice\nthrough the sample geostatistical training image volume. First, load up\nthe volume and preview it:\n", "_____no_output_____" ] ], [ [ "mesh = examples.load_channels()\n# define a categorical colormap\ncmap = plt.cm.get_cmap(\"viridis\", 4)\n\n\nmesh.plot(cmap=cmap)", "_____no_output_____" ] ], [ [ "Note that this dataset is a 3D volume and there might be regions within\nthis volume that we would like to inspect. We can create slices through\nthe mesh to gain further insight about the internals of the volume.\n", "_____no_output_____" ] ], [ [ "slices = mesh.slice_orthogonal()\n\nslices.plot(cmap=cmap)", "_____no_output_____" ] ], [ [ "The orthogonal slices can be easily translated throughout the volume:\n", "_____no_output_____" ] ], [ [ "slices = mesh.slice_orthogonal(x=20, y=20, z=30)\nslices.plot(cmap=cmap)", "_____no_output_____" ] ], [ [ "We can also add just a single slice of the volume by specifying the\norigin and normal of the slicing plane with the\n`pyvista.DataSetFilters.slice`{.interpreted-text role=\"func\"} filter:\n", "_____no_output_____" ] ], [ [ "# Single slice - origin defaults to the center of the mesh\nsingle_slice = mesh.slice(normal=[1, 1, 0])\n\np = pv.Plotter()\np.add_mesh(mesh.outline(), color=\"k\")\np.add_mesh(single_slice, cmap=cmap)\np.show()", "_____no_output_____" ] ], [ [ "Adding slicing planes uniformly across an axial direction can also be\nautomated with the\n`pyvista.DataSetFilters.slice_along_axis`{.interpreted-text role=\"func\"}\nfilter:\n", "_____no_output_____" ] ], [ [ "slices = mesh.slice_along_axis(n=7, axis=\"y\")\n\nslices.plot(cmap=cmap)", "_____no_output_____" ] ], [ [ "Slice Along Line\n================\n\nWe can also slice a dataset along a `pyvista.Spline`{.interpreted-text\nrole=\"func\"} or `pyvista.Line`{.interpreted-text role=\"func\"} using the\n`DataSetFilters.slice_along_line`{.interpreted-text role=\"func\"} filter.\n\nFirst, define a line source through the dataset of interest. Please note\nthat this type of slicing is computationally expensive and might take a\nwhile if there are a lot of points in the line - try to keep the\nresolution of the line low.\n", "_____no_output_____" ] ], [ [ "model = examples.load_channels()\n\n\ndef path(y):\n \"\"\"Equation: x = a(y-h)^2 + k\"\"\"\n a = 110.0 / 160.0 ** 2\n x = a * y ** 2 + 0.0\n return x, y\n\n\nx, y = path(np.arange(model.bounds[2], model.bounds[3], 15.0))\nzo = np.linspace(9.0, 11.0, num=len(y))\npoints = np.c_[x, y, zo]\nspline = pv.Spline(points, 15)\nspline", "_____no_output_____" ] ], [ [ "Then run the filter\n", "_____no_output_____" ] ], [ [ "slc = model.slice_along_line(spline)\nslc", "_____no_output_____" ], [ "p = pv.Plotter()\np.add_mesh(slc, cmap=cmap)\np.add_mesh(model.outline())\np.show(cpos=[1, -1, 1])", "_____no_output_____" ] ], [ [ "Multiple Slices in Vector Direction\n===================================\n\nSlice a mesh along a vector direction perpendicularly.\n", "_____no_output_____" ] ], [ [ "mesh = examples.download_brain()\n\n# Create vector\nvec = np.random.rand(3)\n# Normalize the vector\nnormal = vec / np.linalg.norm(vec)\n\n# Make points along that vector for the extent of your slices\na = mesh.center + normal * mesh.length / 3.0\nb = mesh.center - normal * mesh.length / 3.0\n\n# Define the line/points for the slices\nn_slices = 5\nline = pv.Line(a, b, n_slices)\n\n# Generate all of the slices\nslices = pv.MultiBlock()\nfor point in line.points:\n slices.append(mesh.slice(normal=normal, origin=point))", "_____no_output_____" ], [ "p = pv.Plotter()\np.add_mesh(mesh.outline(), color=\"k\")\np.add_mesh(slices, opacity=0.75)\np.add_mesh(line, color=\"red\", line_width=5)\np.show()", "_____no_output_____" ] ], [ [ "Slice At Different Bearings\n===========================\n\nFrom\n[pyvista-support\\#23](https://github.com/pyvista/pyvista-support/issues/23)\n\nAn example of how to get many slices at different bearings all centered\naround a user-chosen location.\n\nCreate a point to orient slices around\n", "_____no_output_____" ] ], [ [ "ranges = np.array(model.bounds).reshape(-1, 2).ptp(axis=1)\npoint = np.array(model.center) - ranges*0.25", "_____no_output_____" ] ], [ [ "Now generate a few normal vectors to rotate a slice around the z-axis.\nUse equation for circle since its about the Z-axis.\n", "_____no_output_____" ] ], [ [ "increment = np.pi/6.\n# use a container to hold all the slices\nslices = pv.MultiBlock() # treat like a dictionary/list\nfor theta in np.arange(0, np.pi, increment):\n normal = np.array([np.cos(theta), np.sin(theta), 0.0]).dot(np.pi/2.)\n name = f'Bearing: {np.rad2deg(theta):.2f}'\n slices[name] = model.slice(origin=point, normal=normal)\nslices", "_____no_output_____" ] ], [ [ "And now display it!\n", "_____no_output_____" ] ], [ [ "p = pv.Plotter()\np.add_mesh(slices, cmap=cmap)\np.add_mesh(model.outline())\np.show()", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
cb6419a75a4c7e773c640731ecc816381d7ba066
45,314
ipynb
Jupyter Notebook
graphistry.ipynb
experoinc/notebooks
5e9de424a514512e32ae60fcaceb6310794b38f8
[ "Apache-2.0" ]
3
2018-11-26T13:33:08.000Z
2020-11-01T14:33:30.000Z
graphistry.ipynb
experoinc/notebooks
5e9de424a514512e32ae60fcaceb6310794b38f8
[ "Apache-2.0" ]
null
null
null
graphistry.ipynb
experoinc/notebooks
5e9de424a514512e32ae60fcaceb6310794b38f8
[ "Apache-2.0" ]
null
null
null
63.199442
22,180
0.696805
[ [ [ "# Exporing Graph Datasets in Jupyter\n\nJuypter notebooks are perfect environments for both carrying out and capturing exporatory work. Even on moderate sizes datasets they provide an interactive environement that can drive both local and remote computational tasks.\n\nIn this example, we will load a datatset using pandas, visualise it using the Graphistry graph service and import that into NetworkX so we can examine thje data and run analytics on the graph.\n\n### Python Package Network\nOur raw python module requriements data comes in the form of a csv, which we use pands to load and cresate a DataFrame for us. Each python module (Node) is related to another via a version number (Edge)", "_____no_output_____" ] ], [ [ "import pandas\nrawgraph = pandas.read_csv('./requirements.csv')", "_____no_output_____" ] ], [ [ "We also print out the first 15 rows of the data and we can see it contains", "_____no_output_____" ] ], [ [ "print('Number of Entries', rawgraph.count())\nrawgraph.head(15)", "Number of Entries package 110104\nrequirement 73773\npackage_name 108872\npackage_version 108872\ndtype: int64\n" ] ], [ [ "We notice straight away that our dataset has some NaN values for packages that have no requirements, this is a shortcoming of our dataset and we want to prevent those NaN's from propagating.\n\nThere are a few ways to handle this depending on whether we want to preserve the nodes in the graph or not, in this example we'll just drop that data using pandas.", "_____no_output_____" ] ], [ [ "rawgraph.dropna(inplace=True)\nrawgraph.head(15)", "_____no_output_____" ] ], [ [ "## Visualizing the Graph\n\nEfficient visualiations of anything but small graphs can be challenging in a local python environment, there are multiple ways around this but here we'll use a libreary and cloud based service called Graphisty.\n\nFirst we'll start up Graphistry using our API key in order to access the cloud based rendering service.", "_____no_output_____" ] ], [ [ "from os import environ\nfrom dotenv import load_dotenv, find_dotenv\nimport graphistry\n\nload_dotenv(find_dotenv())\ngraphistry.register(key=environ.get(\"GRAPHISTRY_API_KEY\"))", "A new version of PyGraphistry is available (installed=0.9.43 latest=0.9.49).\n" ] ], [ [ "Next we'll plot the raw graph. Graphistry provides an awesome interactive plot widget in Juypter that of couerse allows us to interact with the graph itself but have more options. If you have tiome to play check out in particular:\n\n- Full screen mode\n- Layout settings (via the cogs icon)\n- Histograms and Data Table\n- The Workbook which launches an cloud based instance of Graphistry outside of Jupyter\n- Visual Clustering!", "_____no_output_____" ] ], [ [ "plotter = graphistry.bind(source=\"requirement\", destination=\"package_name\")\nplotter.plot(rawgraph)", "_____no_output_____" ] ], [ [ "Next we'll load our raw graph data into a NetworkX graph and run some analytics on the network. This dataset is heavily weighted by packages with a few requirements. Note: We are loading this as a DirectedGraph which will allow the direction of dependencies to be preserved.", "_____no_output_____" ] ], [ [ "import networkx as nx\nG = nx.from_pandas_dataframe(rawgraph, 'package_name', 'requirement', \n edge_attr='package_version', create_using=nx.DiGraph())\nprint('Nodes:', G.number_of_nodes())\nprint('Edges:', G.number_of_edges())", "Nodes: 26234\nEdges: 72252\n" ], [ "import numpy as np\nimport matplotlib.pyplot as plt\n\ndegrees = np.array(nx.degree_histogram(G))\n\nplt.bar(range(1,20), degrees[1:20])\nplt.xlabel('# requirements')\nplt.ylabel('# packages')\nplt.title('Degree - Dependencies per Package')\nplt.grid(True)\nplt.show()", "_____no_output_____" ] ], [ [ "We can see this network is dominated with packages with a single requirement, accounting for 37% of the nodes.", "_____no_output_____" ] ], [ [ "print('% of packages with only 1 requirement', \n '{:.1f}%'.format(100 * degrees[1] / G.number_of_nodes()), ',', degrees[1], 'packages total')", "% of packages with only 1 requirement 37.0% , 9707 packages total\n" ], [ "highestDegree = len(degrees) - 1\nnodesByDegree = G.degree()\n\nmostConnectedNode = [n for n in nodesByDegree if nodesByDegree[n] == highestDegree][0]\n\nprint(mostConnectedNode)\n\nprint('The package with most requirements is >',mostConnectedNode,\n '< having a total of', len(degrees), 'first order dependencies')", "requests\nThe package with most requirements is > requests < having a total of 3081 first order dependencies\n" ] ], [ [ "However, we are looking at all connections to requests in this directed graph, so this is a combination of it's dependencies and packages that are dependent on it. We can see how that is split by looking at the in and out degree.", "_____no_output_____" ] ], [ [ "print('Depencencies:', G.out_degree([mostConnectedNode])[mostConnectedNode])\nprint('Dependants:', G.in_degree([mostConnectedNode])[mostConnectedNode])", "Depencencies: 6\nDependants: 3074\n" ] ], [ [ "So rather than having a lot of requirements, we've discovered that `requests` actually has few requirements and is instead a heavily used module. we can take a closer look by extracting a sub-graph of `requests`' immedate neighbours and visualising this.", "_____no_output_____" ] ], [ [ "R = G.subgraph([mostConnectedNode]+G.neighbors(mostConnectedNode)+G.predecessors(mostConnectedNode))\ngraphistry.bind(source=\"requirement\", destination=\"package_name\").plot(R)", "WARNING: \"node\" is unbound, automatically binding it to \"__nodeid__\".\n" ] ], [ [ "Note the visualizaton above was created by plotting the sub graph then using Grapistry's Visual Clustering to do its stuff.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
cb6430d4ca7856c61252f49fe84c778fd13a985b
73,421
ipynb
Jupyter Notebook
Python/Hands-on ML with Scikit-Learn, Keras and TensorFlow.ipynb
DANancy/SQL-Playground
b82e3689ccc4771ee59c3472db78333ba17671b9
[ "MIT" ]
null
null
null
Python/Hands-on ML with Scikit-Learn, Keras and TensorFlow.ipynb
DANancy/SQL-Playground
b82e3689ccc4771ee59c3472db78333ba17671b9
[ "MIT" ]
null
null
null
Python/Hands-on ML with Scikit-Learn, Keras and TensorFlow.ipynb
DANancy/SQL-Playground
b82e3689ccc4771ee59c3472db78333ba17671b9
[ "MIT" ]
null
null
null
158.576674
58,276
0.854878
[ [ [ "import os\nimport tarfile\nfrom six.moves import urllib", "_____no_output_____" ], [ "DOWNLOAD_ROOT = \"https://github.com/ageron/handson-ml2/raw/master/\"\nHOUSING_PATH = os.path.join(\"datasets\",\"housing\")\nHOUSING_URL = DOWNLOAD_ROOT + \"datasets/housing/housing.tgz\"", "_____no_output_____" ], [ "def fetch_housing_data(housing_url=HOUSING_URL,housing_path=HOUSING_PATH):\n if not os.path.isdir(housing_path):\n os.makedirs(housing_path)\n tgz_path = os.path.join(housing_path,\"housing.tgz\")\n urllib.request.urlretrieve(housing_url,tgz_path)\n housing_tgz = tarfile.open(tgz_path)\n housing_tgz.extractall(path=housing_path)\n housing_tgz.close()", "_____no_output_____" ], [ "import pandas as pd\ndef load_housing_data(housing_path=HOUSING_PATH):\n csv_path=os.path.join(housing_path,\"housing.csv\")\n return pd.read_csv(csv_path)", "_____no_output_____" ], [ "fetch_housing_data()\nhousing=load_housing_data()\nhousing.head(5)", "_____no_output_____" ], [ "housing.describe()", "_____no_output_____" ], [ "housing.info()", "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 20640 entries, 0 to 20639\nData columns (total 10 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 longitude 20640 non-null float64\n 1 latitude 20640 non-null float64\n 2 housing_median_age 20640 non-null float64\n 3 total_rooms 20640 non-null float64\n 4 total_bedrooms 20433 non-null float64\n 5 population 20640 non-null float64\n 6 households 20640 non-null float64\n 7 median_income 20640 non-null float64\n 8 median_house_value 20640 non-null float64\n 9 ocean_proximity 20640 non-null object \ndtypes: float64(9), object(1)\nmemory usage: 1.6+ MB\n" ], [ "%matplotlib inline\nimport matplotlib.pyplot as plt\nhousing.hist(bins=50,figsize=(20,15))\nplt.show()", "_____no_output_____" ], [ "import numpy as np\ndef split_train_test(data,test_ratio):\n shuffled_indices = np.random.permutation(len(data))\n test_set_size = int(len(data) * test_ratio)\n test_indices = shuffled_indices[:test_set_size]\n train_indieces = shuffled_indeices[test_set_size:]\n return data.iloc[train_indecies], data.iloc[test_indices]", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb643a6a2458d49f6d924a0e43e169cb795015d6
122,038
ipynb
Jupyter Notebook
src/ForestFiresAnalysis.ipynb
cemsinano/ForestFireData-ML
030ab7338b8d3edd187b01027cb5baa501e5e3a3
[ "MIT" ]
null
null
null
src/ForestFiresAnalysis.ipynb
cemsinano/ForestFireData-ML
030ab7338b8d3edd187b01027cb5baa501e5e3a3
[ "MIT" ]
null
null
null
src/ForestFiresAnalysis.ipynb
cemsinano/ForestFireData-ML
030ab7338b8d3edd187b01027cb5baa501e5e3a3
[ "MIT" ]
null
null
null
85.281621
17,138
0.775185
[ [ [ "# Forest Fire Mini Project \n\n> in this file, you can find analysis for this project. For the final report please visit `Report.ipynb`", "_____no_output_____" ] ], [ [ "import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import LinearRegression, Ridge, Lasso, ElasticNet\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.metrics import accuracy_score, mean_squared_error\n\nfrom sklearn.ensemble import RandomForestRegressor\n\nfrom sklearn.feature_selection import RFE\n\nfrom sklearn.svm import SVR\n\nimport warnings\nwarnings.filterwarnings('ignore')\n\n%matplotlib inline", "_____no_output_____" ] ], [ [ "## Introduction\n\n\n### Data Information\n\nThe Forest Fires data is available at UCI, to reach it please click [here](http://archive.ics.uci.edu/ml/datasets/Forest+Fires).\n\nThe citation to this data set: \n\n[Cortez and Morais, 2007] P. Cortez and A. Morais. A Data Mining Approach to Predict Forest Fires using Meteorological Data. In J. Neves, M. F. Santos and J. Machado Eds., New Trends in Artificial Intelligence, Proceedings of the 13th EPIA 2007 - Portuguese Conference on Artificial Intelligence, December, Guimarães, Portugal, pp. 512-523, 2007. APPIA, ISBN-13 978-989-95618-0-9. Available at: [http://www.dsi.uminho.pt/~pcortez/fires.pdf](http://www3.dsi.uminho.pt/pcortez/fires.pdf)\n \n#### Attributes:\n\n1. `X` - x-axis spatial coordinate within the Montesinho park map: 1 to 9 \n2. `Y` - y-axis spatial coordinate within the Montesinho park map: 2 to 9 \n3. `month` - month of the year: 'jan' to 'dec' \n4. `day` - day of the week: 'mon' to 'sun' \n5. `FFMC` - FFMC index from the FWI system: 18.7 to 96.20 \n6. `DMC` - DMC index from the FWI system: 1.1 to 291.3 \n7. `DC` - DC index from the FWI system: 7.9 to 860.6 \n8. `ISI` - ISI index from the FWI system: 0.0 to 56.10 \n9. `temp` - temperature in Celsius degrees: 2.2 to 33.30 \n10. `RH` - relative humidity in %: 15.0 to 100 \n11. `wind` - wind speed in km/h: 0.40 to 9.40 \n12. `rain` - outside rain in mm/m2 : 0.0 to 6.4 \n13. `area` - the burned area of the forest (in ha): 0.00 to 1090.84 \n\n\n#### Model and Feature Selection Process:\n\nI will also try predict the `area` variable via regression models.\n\n - First, I fit the data with all features to Random Forest Regression with pruned `depth` hyperparameters.\n - Then I will use to Lasso(L1 regularization) Regression and ElasticNet(L1+L2 regularization) Regression to select features. I will not use Ridge(L2 regularization) since it does not any exact zero weigthed features.\n - As last step, I will fit the data to Random Forest Regression with pruned `depth` hyperparameters onto both features selected by Lasso and ElasticNet.\n\n\n", "_____no_output_____" ] ], [ [ "# load the dataset: \nforestfires = pd.read_csv(\"http://archive.ics.uci.edu/ml/machine-learning-databases/forest-fires/forestfires.csv\")\n\n# Write the data frame to a csv file:\nforestfires.to_csv(\"../data/forestfires.csv\", encoding='utf-8', index=False)", "_____no_output_____" ], [ "forestfires.head()", "_____no_output_____" ], [ "forestfires.describe()", "_____no_output_____" ], [ "forestfires.info()", "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 517 entries, 0 to 516\nData columns (total 13 columns):\nX 517 non-null int64\nY 517 non-null int64\nmonth 517 non-null object\nday 517 non-null object\nFFMC 517 non-null float64\nDMC 517 non-null float64\nDC 517 non-null float64\nISI 517 non-null float64\ntemp 517 non-null float64\nRH 517 non-null int64\nwind 517 non-null float64\nrain 517 non-null float64\narea 517 non-null float64\ndtypes: float64(8), int64(3), object(2)\nmemory usage: 52.6+ KB\n" ] ], [ [ "### Response Variable and Predictors:\n\n**Response Variable:** `area` which is the burned area in forest. \n- We see the original paper used this variable after log transformation since *variable is very skewed towards 0.0*. After fitting the models, the outputs were post-processed with the inverse of the ln(x+1) transform\n\n**Predictiors:** We need to assign dummy variables for categorical variables `month` and `day`. ", "_____no_output_____" ] ], [ [ "# histogram for area response variable:\nplt.hist(forestfires.area)\nplt.title(\"Burned Area\")\nplt.savefig('../results/AreaBeforeTransformation.png')", "_____no_output_____" ], [ "## after log transformation : \nplt.hist(np.log(forestfires.area+1))\nplt.title(\"Log Transformed Burned Area\")\nplt.savefig('../results/AreaAfterTransformation.png')", "_____no_output_____" ] ], [ [ " > As we can see from the histograms, log transformation helps the area variable to spread out. ", "_____no_output_____" ] ], [ [ "## Encode the categorical Variables :\n\n# one way is: \n#le = LabelEncoder()\n#forestfires[\"month\"] = le.fit_transform(forestfires[\"month\"])\n#forestfires[\"day\"] = le.fit_transform(forestfires[\"day\"])\n\n\nforestfires = pd.get_dummies(forestfires, prefix='m', columns=['month'])\nforestfires = pd.get_dummies(forestfires, prefix='d', columns=['day'])", "_____no_output_____" ], [ "## after encoding: \nforestfires.head()", "_____no_output_____" ], [ "## X is perdictors' dataframe\n## y is response' vector\nX = forestfires.loc[:, forestfires.columns != \"area\"]\ny = np.log(forestfires.area +1)\n\n## save them into csv\nX.to_csv(\"../results/X.csv\", encoding='utf-8', index=False)\ny.to_csv(\"../results/y.csv\", encoding='utf-8', index=False)", "_____no_output_____" ], [ "## split the data: \n\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=19)", "_____no_output_____" ] ], [ [ "### Random Forest with all features:", "_____no_output_____" ] ], [ [ "randomforestscores = []", "_____no_output_____" ], [ "randf = RandomForestRegressor(max_depth=5)\nrandf.fit(X_train,y_train)\nrandomforestscores.append(randf.score(X_test,y_test))", "_____no_output_____" ] ], [ [ "### Lasso Regression (L1 Regularization): ", "_____no_output_____" ] ], [ [ "alpha_settings = [10**-4,10**-3,10**-2,10**-1,1,10**1,10**2,10**3,10**4]\nTrain_errorL = []\nValid_errorL = []\nNonzeros = []\n\nfor alp in alpha_settings:\n clfLasso = Lasso(alpha=alp)\n clfLasso.fit(X_train, y_train) \n \n Train_errorL.append(np.sqrt(mean_squared_error(y_train, clfLasso.predict(X_train))))\n Valid_errorL.append(np.sqrt(mean_squared_error(y_test, clfLasso.predict(X_test))))\n #Train_error.append(1- clf.score(X,y))\n #Valid_error.append(1- clf.score(Xvalidate,yvalidate))\n print(\"For alpha value of \", alp)\n print(\"RMSE Training error:\", np.sqrt(mean_squared_error(y_train, clfLasso.predict(X_train))))\n print(\"RMSE Validation error:\", np.sqrt(mean_squared_error(y_test, clfLasso.predict(X_test))))\n print(\"Number of non-zero features\",np.count_nonzero(clfLasso.coef_))\n Nonzeros.append(np.count_nonzero(clfLasso.coef_))\n print(\"-------------------------------------------\")\n\nplt.semilogx(alpha_settings, Train_errorL, label=\"training error\")\nplt.semilogx(alpha_settings, Valid_errorL, label=\"test error\")\nplt.legend()\nplt.ylabel(\"RMSE\")\nplt.xlabel(\"Alpha\")\nplt.title(\"Lasso\")\nplt.savefig('../results/LassoError.png')\n\nprint(\"---Optimal alpha for Lasso is\",alpha_settings[np.argmin(Valid_errorL)])", "For alpha value of 0.0001\nRMSE Training error: 1.32581403332\nRMSE Validation error: 1.47423953797\nNumber of non-zero features 27\n-------------------------------------------\nFor alpha value of 0.001\nRMSE Training error: 1.32680200412\nRMSE Validation error: 1.4533743819\nNumber of non-zero features 25\n-------------------------------------------\nFor alpha value of 0.01\nRMSE Training error: 1.34442253977\nRMSE Validation error: 1.3981621192\nNumber of non-zero features 18\n-------------------------------------------\nFor alpha value of 0.1\nRMSE Training error: 1.37631711496\nRMSE Validation error: 1.40485183101\nNumber of non-zero features 8\n-------------------------------------------\nFor alpha value of 1\nRMSE Training error: 1.38765527583\nRMSE Validation error: 1.4052674921\nNumber of non-zero features 3\n-------------------------------------------\nFor alpha value of 10\nRMSE Training error: 1.39149771914\nRMSE Validation error: 1.40836237248\nNumber of non-zero features 0\n-------------------------------------------\nFor alpha value of 100\nRMSE Training error: 1.39149771914\nRMSE Validation error: 1.40836237248\nNumber of non-zero features 0\n-------------------------------------------\nFor alpha value of 1000\nRMSE Training error: 1.39149771914\nRMSE Validation error: 1.40836237248\nNumber of non-zero features 0\n-------------------------------------------\nFor alpha value of 10000\nRMSE Training error: 1.39149771914\nRMSE Validation error: 1.40836237248\nNumber of non-zero features 0\n-------------------------------------------\n---Optimal alpha for Lasso is 0.01\n" ], [ "plt.figure()\nplt.semilogx(alpha_settings,Nonzeros)\nplt.title(\"Number of Non-zero Features vs Alpha\")\nplt.xlabel(\"Alpha\")\nplt.ylabel(\"Number of non-zero Features\")", "_____no_output_____" ] ], [ [ " > Lasso gives `alpha =0.01` as optimum value. So, let's choose alpha as 0.01 and select variables which are not zero with this value of alpha.", "_____no_output_____" ] ], [ [ "clfLasso = Lasso(alpha=0.01)\nclfLasso.fit(X_train, y_train) \nnp.count_nonzero(clfLasso.coef_)", "_____no_output_____" ], [ "# Lasso Selected Variables:\nrfe = RFE(Lasso(alpha=0.01),n_features_to_select = 18 )\nrfe.fit(X_train,y_train)\nrfe.score(X_test,y_test)", "_____no_output_____" ], [ "X.columns[rfe.support_]", "_____no_output_____" ], [ "Xlasso = X[(X.columns[rfe.support_])]\n#save Xlasso into csv\nXlasso.to_csv(\"../results/Xlasso.csv\", encoding='utf-8', index=False)", "_____no_output_____" ] ], [ [ "### ElasticNet Regression (L1+L2 Regularization): ", "_____no_output_____" ] ], [ [ "alpha_settings = [10**-4,10**-3,10**-2,10**-1,1,10**1,10**2,10**3,10**4]\nTrain_errorEN = []\nValid_errorEN = []\nNonzeros = []\n\nfor alp in alpha_settings:\n clfElasticNet = ElasticNet(alpha=alp,normalize =True)\n clfElasticNet.fit(X_train, y_train) \n # mean_squared_err = lambda y, yhat: np.mean((y-yhat)**2)\n \n Train_errorEN.append(np.sqrt(mean_squared_error(y_train, clfElasticNet.predict(X_train))))\n Valid_errorEN.append(np.sqrt(mean_squared_error(y_test, clfElasticNet.predict(X_test))))\n #Train_error.append(1- clf.score(X,y))\n #Valid_error.append(1- clf.score(Xvalidate,yvalidate))\n print(\"For alpha value of \", alp)\n print(\"RMSE Training error:\", np.sqrt(mean_squared_error(y_train, clfElasticNet.predict(X_train))))\n print(\"RMSE Validation error:\", np.sqrt(mean_squared_error(y_test, clfElasticNet.predict(X_test))))\n print(\"Number of non-zero features\",np.count_nonzero(clfElasticNet.coef_))\n Nonzeros.append(np.count_nonzero(clfElasticNet.coef_))\n print(\"-----------------------\")\n\nplt.semilogx(alpha_settings, Train_errorEN, label=\"training error\")\nplt.semilogx(alpha_settings, Valid_errorEN, label=\"test error\")\nplt.legend()\nplt.ylabel(\"RMSE\")\nplt.xlabel(\"Alpha\")\nplt.title(\"ElasticNet\")\nplt.savefig('../results/ElasticNetError.png')\n\nprint(\"---Optimal alpha for Elastic Net is\",alpha_settings[np.argmin(Valid_errorEN)])", "For alpha value of 0.0001\nRMSE Training error: 1.32852981904\nRMSE Validation error: 1.46517227674\nNumber of non-zero features 28\n-----------------------\nFor alpha value of 0.001\nRMSE Training error: 1.34089582112\nRMSE Validation error: 1.43086081709\nNumber of non-zero features 26\n-----------------------\nFor alpha value of 0.01\nRMSE Training error: 1.38692234617\nRMSE Validation error: 1.40371842361\nNumber of non-zero features 5\n-----------------------\nFor alpha value of 0.1\nRMSE Training error: 1.39149771914\nRMSE Validation error: 1.40836237248\nNumber of non-zero features 0\n-----------------------\nFor alpha value of 1\nRMSE Training error: 1.39149771914\nRMSE Validation error: 1.40836237248\nNumber of non-zero features 0\n-----------------------\nFor alpha value of 10\nRMSE Training error: 1.39149771914\nRMSE Validation error: 1.40836237248\nNumber of non-zero features 0\n-----------------------\nFor alpha value of 100\nRMSE Training error: 1.39149771914\nRMSE Validation error: 1.40836237248\nNumber of non-zero features 0\n-----------------------\nFor alpha value of 1000\nRMSE Training error: 1.39149771914\nRMSE Validation error: 1.40836237248\nNumber of non-zero features 0\n-----------------------\nFor alpha value of 10000\nRMSE Training error: 1.39149771914\nRMSE Validation error: 1.40836237248\nNumber of non-zero features 0\n-----------------------\n---Optimal alpha for Elastic Net is 0.01\n" ], [ "plt.figure()\nplt.semilogx(alpha_settings,Nonzeros)\nplt.title(\"Number of Non-zero Features vs Alpha for ElasticNet\")\nplt.xlabel(\"Alpha\")\nplt.ylabel(\"Number of non-zero Features\")\n", "_____no_output_____" ], [ "clfElasticNet = ElasticNet(alpha=0.01)\nclfElasticNet.fit(X_train, y_train) \nnp.count_nonzero(clfElasticNet.coef_)", "_____no_output_____" ], [ "# ElasticNet Selected Variables:\nrfe = RFE(ElasticNet(alpha=0.01), n_features_to_select=22)\nrfe.fit(X_train,y_train)\nrfe.score(X_test,y_test)", "_____no_output_____" ], [ "X.columns[rfe.support_]", "_____no_output_____" ], [ "XelasticNet = X[(X.columns[rfe.support_])]\n#save Xlasso into csv\nXelasticNet.to_csv(\"../results/XElasticNet.csv\", encoding='utf-8', index=False)\n", "_____no_output_____" ] ], [ [ "### Random Forest Regressions with only Selected Features from Lasso and ElasticNet", "_____no_output_____" ] ], [ [ "## for Lasso features:\nX_train, X_test, y_train, y_test = train_test_split(Xlasso, y, test_size=0.33, random_state=19)", "_____no_output_____" ], [ "randf = RandomForestRegressor(max_depth=5)\nrandf.fit(X_train,y_train)\nrandomforestscores.append(randf.score(X_test,y_test))\n", "_____no_output_____" ], [ "## for ElasticNet features:\n\nX_train, X_test, y_train, y_test = train_test_split(XelasticNet, y, test_size=0.33, random_state=19)", "_____no_output_____" ], [ "randf = RandomForestRegressor(max_depth=5)\nrandf.fit(X_train,y_train)\nrandomforestscores.append(randf.score(X_test,y_test))\nrandomforestscores = pd.DataFrame(randomforestscores)", "_____no_output_____" ], [ "randomforestscores = randomforestscores.rename(index={0: 'RandomForest with all features(29)'})\nrandomforestscores = randomforestscores.rename(index={1: 'RandomForest with Lasso features(18)'})\nrandomforestscores = randomforestscores.rename(index={2: 'RandomForest with ElasticNet features(22)'})\nrandomforestscores", "_____no_output_____" ], [ "pd.DataFrame(np.transpose(randomforestscores)).to_csv(\"../results/RandomForestScores.csv\", encoding='utf-8', index=False)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ] ]
cb643f0a108da0c196529045280c4c064bb3d63d
156,061
ipynb
Jupyter Notebook
notebook.ipynb
camilabga/lit
ea31a66186ae844001bea8efef28c62b143ebd53
[ "Apache-2.0" ]
null
null
null
notebook.ipynb
camilabga/lit
ea31a66186ae844001bea8efef28c62b143ebd53
[ "Apache-2.0" ]
null
null
null
notebook.ipynb
camilabga/lit
ea31a66186ae844001bea8efef28c62b143ebd53
[ "Apache-2.0" ]
null
null
null
143.306703
46,126
0.609845
[ [ [ "# MARATONA BEHIND THE CODE 2020\n\n## DESAFIO 6 - LIT", "_____no_output_____" ], [ "<hr>", "_____no_output_____" ], [ "## Installing Libs", "_____no_output_____" ] ], [ [ "!pip install scikit-learn --upgrade", "_____no_output_____" ], [ "!pip install xgboost --upgrade", "_____no_output_____" ], [ "!pip install imblearn --upgrade", "Collecting imblearn\n Downloading imblearn-0.0-py2.py3-none-any.whl (1.9 kB)\nRequirement already satisfied, skipping upgrade: imbalanced-learn in /home/barbosa/Libraries/anaconda3/envs/mlenv/lib/python3.8/site-packages (from imblearn) (0.7.0)\nRequirement already satisfied, skipping upgrade: joblib>=0.11 in /home/barbosa/Libraries/anaconda3/envs/mlenv/lib/python3.8/site-packages (from imbalanced-learn->imblearn) (0.16.0)\nRequirement already satisfied, skipping upgrade: scikit-learn>=0.23 in /home/barbosa/Libraries/anaconda3/envs/mlenv/lib/python3.8/site-packages (from imbalanced-learn->imblearn) (0.23.1)\nRequirement already satisfied, skipping upgrade: scipy>=0.19.1 in /home/barbosa/Libraries/anaconda3/envs/mlenv/lib/python3.8/site-packages (from imbalanced-learn->imblearn) (1.5.0)\nRequirement already satisfied, skipping upgrade: numpy>=1.13.3 in /home/barbosa/Libraries/anaconda3/envs/mlenv/lib/python3.8/site-packages (from imbalanced-learn->imblearn) (1.19.1)\nRequirement already satisfied, skipping upgrade: threadpoolctl>=2.0.0 in /home/barbosa/Libraries/anaconda3/envs/mlenv/lib/python3.8/site-packages (from scikit-learn>=0.23->imbalanced-learn->imblearn) (2.1.0)\nInstalling collected packages: imblearn\nSuccessfully installed imblearn-0.0\n" ] ], [ [ "<hr>", "_____no_output_____" ], [ "## Download dos conjuntos de dados em formato .csv", "_____no_output_____" ] ], [ [ "import pandas as pd", "_____no_output_____" ], [ "!wget --no-check-certificate --content-disposition https://raw.githubusercontent.com/vanderlei-test/dataset-3/master/training_dataset.csv\ndf_training_dataset = pd.read_csv(r'training_dataset.csv')\ndf_training_dataset.tail()", "--2020-09-04 17:09:43-- https://raw.githubusercontent.com/vanderlei-test/dataset-3/master/training_dataset.csv\nResolving raw.githubusercontent.com (raw.githubusercontent.com)...151.101.192.133, 151.101.128.133, 151.101.64.133, ...\nConnecting to raw.githubusercontent.com (raw.githubusercontent.com)|151.101.192.133|:443...connected.\nHTTP request sent, awaiting response...200 OK\nLength: 1736600 (1,7M) [text/plain]\nSaving to: ‘training_dataset.csv.4’\n\ntraining_dataset.cs 100%[===================>] 1,66M 1,74MB/s in 1,0s \n\n2020-09-04 17:09:45 (1,74 MB/s) - ‘training_dataset.csv.4’ saved [1736600/1736600]\n\n" ] ], [ [ "Sobre o arquivo \"training_dataset.csv\", temos algumas informações gerais sobre os usuários da plataforma:\n\n**id**\n\n**graduacao**\n\n**universidade**\n\n**profissao**\n\n**organizacao**\n\n**pretende_fazer_cursos_lit**\n\n**interesse_mba_lit**\n\n**importante_ter_certificado**\n\n**horas_semanais_estudo**\n\n**como_conheceu_lit**\n\n**total_modulos**\n\n**modulos_iniciados**\n\n**modulos_finalizados**\n\n**certificados**\n\n**categoria**", "_____no_output_____" ] ], [ [ "df_training_dataset.info()", "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 15857 entries, 0 to 15856\nData columns (total 15 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 id 13945 non-null float64\n 1 graduacao 13950 non-null object \n 2 universidade 13920 non-null object \n 3 profissao 13977 non-null object \n 4 organizacao 13961 non-null object \n 5 pretende_fazer_cursos_lit 13989 non-null float64\n 6 interesse_mba_lit 14003 non-null float64\n 7 importante_ter_certificado 13918 non-null float64\n 8 horas_semanais_estudo 13959 non-null float64\n 9 como_conheceu_lit 13915 non-null object \n 10 total_modulos 13987 non-null float64\n 11 modulos_iniciados 14044 non-null float64\n 12 modulos_finalizados 13924 non-null float64\n 13 certificados 13979 non-null float64\n 14 categoria 15857 non-null object \ndtypes: float64(9), object(6)\nmemory usage: 1.8+ MB\n" ], [ "df_training_dataset.nunique()", "_____no_output_____" ] ], [ [ "<hr>\n\n## Detalhamento do desafio: classificação multiclasse\n\nEste é um desafio cujo objetivo de negócio é a segmentação dos usuários de uma plataforma de ensino. Para tal, podemos utilizar duas abordagens: aprendizado de máquina supervisionado (classificação) ou não-supervisionado (clustering). Neste desafio será aplicada a classificação, pois é disponível um dataset já com \"labels\", ou em outras palavras, já com exemplos de dados juntamente com a variável alvo.\n\nNa biblioteca scikit-learn temos diversos algoritmos para classificação. O participante é livre para utilizar o framework que desejar para completar esse desafio.\n\nNeste notebook será mostrado um exeplo de uso do algoritmo \"Decision Tree\" para classificar parte dos estudantes em seis diferentes perfís.", "_____no_output_____" ], [ "# Atenção!\n\nA coluna-alvo neste desafio é a coluna ``categoria``", "_____no_output_____" ], [ "<hr>", "_____no_output_____" ] ], [ [ "for column in ['horas_semanais_estudo','total_modulos','modulos_iniciados','modulos_finalizados','certificados']:\n df_training_dataset[column] = df_training_dataset[column].fillna(df_training_dataset.groupby('categoria')[column].transform('mean'))", "_____no_output_____" ], [ "most_common = df_training_dataset[[\n 'categoria',\n 'graduacao','universidade','profissao','organizacao','pretende_fazer_cursos_lit', 'interesse_mba_lit', 'importante_ter_certificado','como_conheceu_lit'\n]].groupby(['categoria']).agg(lambda x:x.value_counts().index[0])\n\nfor column in ['graduacao','universidade','profissao','organizacao','pretende_fazer_cursos_lit', 'interesse_mba_lit', 'importante_ter_certificado','como_conheceu_lit']:\n df_training_dataset[column] = df_training_dataset.apply(\n lambda row: most_common.loc[row['categoria']][column] if pd.isna(row[column]) else row[column],\n axis=1\n )", "_____no_output_____" ] ], [ [ "## Pre-processando o dataset antes do treinamento", "_____no_output_____" ], [ "### Removendo todas as linhas que possuem algum valor nulos em determinadas colunas", "_____no_output_____" ], [ "Usando o método Pandas **DataFrame.dropna()** você pode remover todas as linhas nulas do dataset.\n\nDocs: https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.dropna.html", "_____no_output_____" ] ], [ [ "# Exibindo os dados ausentes do conjunto de dados antes da primeira transformação (df)\nprint(\"Valores nulos no df_training_dataset antes da transformação DropNA: \\n\\n{}\\n\".format(df_training_dataset.isnull().sum(axis = 0)))", "Valores nulos no df_training_dataset antes da transformação DropNA: \n\nid 1912\ngraduacao 0\nuniversidade 0\nprofissao 0\norganizacao 0\npretende_fazer_cursos_lit 0\ninteresse_mba_lit 0\nimportante_ter_certificado 0\nhoras_semanais_estudo 0\ncomo_conheceu_lit 0\ntotal_modulos 0\nmodulos_iniciados 0\nmodulos_finalizados 0\ncertificados 0\ncategoria 0\ndtype: int64\n\n" ], [ "# Aplicando a função para deletar todas as linhas com valor NaN na coluna ``certificados'' e ``total_modulos'':\ndf_training_dataset = df_training_dataset.dropna(axis='index', how='any', subset=['certificados', 'total_modulos'])", "_____no_output_____" ], [ "# Exibindo os dados ausentes do conjunto de dados após a primeira transformação (df)\nprint(\"Valores nulos no df_training_dataset após a transformação DropNA: \\n\\n{}\\n\".format(df_training_dataset.isnull().sum(axis = 0)))", "Valores nulos no df_training_dataset após a transformação DropNA: \n\nid 1912\ngraduacao 0\nuniversidade 0\nprofissao 0\norganizacao 0\npretende_fazer_cursos_lit 0\ninteresse_mba_lit 0\nimportante_ter_certificado 0\nhoras_semanais_estudo 0\ncomo_conheceu_lit 0\ntotal_modulos 0\nmodulos_iniciados 0\nmodulos_finalizados 0\ncertificados 0\ncategoria 0\ndtype: int64\n\n" ] ], [ [ "### Processando valores NaN com o SimpleImputer do sklearn\n\nPara os valores NaN, usaremos a substituição pela constante 0 como **exemplo**.\n\nVocê pode escolher a estratégia que achar melhor para tratar os valores nulos :)\n\nDocs: https://scikit-learn.org/stable/modules/generated/sklearn.impute.SimpleImputer.html?highlight=simpleimputer#sklearn.impute.SimpleImputer", "_____no_output_____" ] ], [ [ "from sklearn.impute import SimpleImputer\nimport numpy as np\n\n\nimpute_zeros = SimpleImputer(\n missing_values=np.nan,\n strategy='constant',\n fill_value=0,\n verbose=0,\n copy=True\n)", "_____no_output_____" ], [ "# Exibindo os dados ausentes do conjunto de dados antes da primeira transformação (df)\nprint(\"Valores nulos no df_training_dataset antes da transformação SimpleImputer: \\n\\n{}\\n\".format(df_training_dataset.isnull().sum(axis = 0)))\n\n# Aplicando a transformação ``SimpleImputer`` no conjunto de dados base\nimpute_zeros.fit(X=df_training_dataset)\n\n# Reconstruindo um Pandas DataFrame com os resultados\ndf_training_dataset_imputed = pd.DataFrame.from_records(\n data=impute_zeros.transform(\n X=df_training_dataset\n ),\n columns=df_training_dataset.columns\n)\n\n# Exibindo os dados ausentes do conjunto de dados após a primeira transformação (df)\nprint(\"Valores nulos no df_training_dataset após a transformação SimpleImputer: \\n\\n{}\\n\".format(df_training_dataset_imputed.isnull().sum(axis = 0)))", "Valores nulos no df_training_dataset antes da transformação SimpleImputer: \n\nid 1912\ngraduacao 0\nuniversidade 0\nprofissao 0\norganizacao 0\npretende_fazer_cursos_lit 0\ninteresse_mba_lit 0\nimportante_ter_certificado 0\nhoras_semanais_estudo 0\ncomo_conheceu_lit 0\ntotal_modulos 0\nmodulos_iniciados 0\nmodulos_finalizados 0\ncertificados 0\ncategoria 0\ndtype: int64\n\nValores nulos no df_training_dataset após a transformação SimpleImputer: \n\nid 0\ngraduacao 0\nuniversidade 0\nprofissao 0\norganizacao 0\npretende_fazer_cursos_lit 0\ninteresse_mba_lit 0\nimportante_ter_certificado 0\nhoras_semanais_estudo 0\ncomo_conheceu_lit 0\ntotal_modulos 0\nmodulos_iniciados 0\nmodulos_finalizados 0\ncertificados 0\ncategoria 0\ndtype: int64\n\n" ] ], [ [ "### Eliminando colunas indesejadas\n\nVamos **demonstrar** abaixo como usar o método **DataFrame.drop()**.\n\nDocs: https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.drop.html", "_____no_output_____" ] ], [ [ "df_training_dataset_imputed.tail()", "_____no_output_____" ], [ "df_training_dataset_rmcolumns = df_training_dataset_imputed.drop(columns=['id'], inplace=False)", "_____no_output_____" ], [ "df_training_dataset_rmcolumns.tail()", "_____no_output_____" ] ], [ [ "# Atenção!\n\nAs colunas removidas acima são apenas para fim de exemplo, você pode usar as colunas que quiser e inclusive criar novas colunas com dados que achar importantes!\n", "_____no_output_____" ], [ "### Tratamento de de variáveis categóricas\n\nComo mencionado antes, os computadores não são bons com variáveis \"categóricas\" (ou strings).\n\nDado uma coluna com variável categórica, o que podemos realizar é a codificação dessa coluna em múltiplas colunas contendo variáveis binárias. Esse processo é chamado de \"one-hot-encoding\" ou \"dummy encoding\". Se você não é familiarizado com esses termos, você pode pesquisar mais sobre isso na internet :)", "_____no_output_____" ] ], [ [ "# Tratando variáveis categóricas com o método Pandas ``get_dummies()''\ndf_training = pd.get_dummies(df_training_dataset_rmcolumns, columns=['graduacao','universidade','profissao','organizacao','como_conheceu_lit'])\ndf_training.tail()", "_____no_output_____" ] ], [ [ "# Atenção!\n\nA coluna **categoria** deve ser mantida como uma string. Você não precisa processar/codificar a variável-alvo.", "_____no_output_____" ], [ "<hr>", "_____no_output_____" ], [ "## Treinando um classificador com base em uma árvore de decisão", "_____no_output_____" ], [ "### Selecionando FEATURES e definindo a variável TARGET", "_____no_output_____" ] ], [ [ "df_training.columns", "_____no_output_____" ], [ "features = df_training[\n [\n 'pretende_fazer_cursos_lit', 'interesse_mba_lit',\n 'importante_ter_certificado', 'horas_semanais_estudo', 'total_modulos',\n 'modulos_iniciados', 'modulos_finalizados', 'certificados',\n 'graduacao_Bacharelado', 'graduacao_Especialização',\n 'graduacao_Licenciatura', 'graduacao_MBA', 'graduacao_SEM FORMAÇÃO',\n 'graduacao_Tecnólogo',\n 'universidade_CENTRO UNIVERSITÁRIO ESTÁCIO DA SÁ',\n 'universidade_Escola Paulista de Direito',\n 'universidade_FACULDADE ANHANGUERA', 'universidade_FATEC',\n 'universidade_FGV-RJ',\n 'universidade_INSPER INSTITUTO DE ENSINO E PESQUISA',\n 'universidade_UEPB', 'universidade_UFF', 'universidade_UFPE',\n 'universidade_UFRJ', 'universidade_UFRN', 'universidade_UFSCar',\n 'universidade_UNICAMP', 'universidade_UNIP',\n 'universidade_UNIVERSIDADE CRUZEIRO DO SUL',\n 'universidade_UNIVERSIDADE ESTADUAL DE PONTA GROSSA',\n 'universidade_UNIVERSIDADE NOVE DE JULHO',\n 'universidade_UNIVERSIDADE PRESBITERIANA MACKENZIE', 'universidade_USP',\n 'universidade_Unesp',\n 'universidade_Universidade Metodista de Sao Paulo',\n 'profissao_Advogado', 'profissao_Analista', 'profissao_Analista Senior',\n 'profissao_Assessor', 'profissao_Coordenador', 'profissao_Diretor',\n 'profissao_Engenheiro', 'profissao_Gerente', 'profissao_Outros',\n 'profissao_SEM EXPERIÊNCIA', 'profissao_Supervisor',\n 'profissao_Sócio/Dono/Proprietário', 'organizacao_Borracha',\n 'organizacao_Eletrodomesticos', 'organizacao_Eletroeletronicos',\n 'organizacao_Entretenimento', 'organizacao_Estado',\n 'organizacao_Laminados', 'organizacao_Montadora',\n 'organizacao_Oleo e Gas', 'organizacao_Siderurgica',\n 'organizacao_e-commerce', 'organizacao_servicos',\n 'como_conheceu_lit_Facebook', 'como_conheceu_lit_Google',\n 'como_conheceu_lit_Instagram', 'como_conheceu_lit_Linkedin',\n 'como_conheceu_lit_Minha empresa - benefício LITpass',\n 'como_conheceu_lit_Mídia (revista/jornal/web)',\n 'como_conheceu_lit_Outros', 'como_conheceu_lit_Saint Paul',\n 'como_conheceu_lit_YouTube'\n ]\n]\ntarget = df_training['categoria'] ## NÃO TROQUE O NOME DA VARIÁVEL TARGET.", "_____no_output_____" ] ], [ [ "### Dividindo nosso conjunto de dados em conjuntos de treinamento e teste", "_____no_output_____" ] ], [ [ "from sklearn.model_selection import train_test_split\n\nX_train, X_test, y_train, y_test = train_test_split(features, target, test_size=0.00001, random_state=133)", "_____no_output_____" ], [ "from imblearn.over_sampling import SMOTE\nsmt = SMOTE(random_state=0)\nX_train_SMOTE, y_train_SMOTE = smt.fit_sample(X_train, y_train)", "_____no_output_____" ] ], [ [ "### Treinando uma árvore de decisão", "_____no_output_____" ] ], [ [ "# Método para creacion de modelos basados en arbol de desición\nfrom sklearn.tree import DecisionTreeClassifier\nimport xgboost as xgb\ndtc = xgb.XGBClassifier(learning_rate=0.005, \n n_estimators=1000, \n max_depth=50, \n gamma=10).fit(X_train_SMOTE, y_train_SMOTE)", "_____no_output_____" ] ], [ [ "### Fazendo previsões na amostra de teste", "_____no_output_____" ] ], [ [ "y_pred = dtc.predict(X_test)\nprint(y_pred)", "['perfil2']\n" ] ], [ [ "### Analisando a qualidade do modelo através da matriz de confusão", "_____no_output_____" ] ], [ [ "import matplotlib.pyplot as plt\nimport numpy as np\nimport itertools\n\n\ndef plot_confusion_matrix(cm, target_names, title='Confusion matrix', cmap=None, normalize=True):\n accuracy = np.trace(cm) / float(np.sum(cm))\n misclass = 1 - accuracy\n if cmap is None:\n cmap = plt.get_cmap('Blues')\n plt.figure(figsize=(8, 6))\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n if target_names is not None:\n tick_marks = np.arange(len(target_names))\n plt.xticks(tick_marks, target_names, rotation=45)\n plt.yticks(tick_marks, target_names)\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n thresh = cm.max() / 1.5 if normalize else cm.max() / 2\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n if normalize:\n plt.text(j, i, \"{:0.4f}\".format(cm[i, j]),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n else:\n plt.text(j, i, \"{:,}\".format(cm[i, j]),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label\\naccuracy={:0.4f}; misclass={:0.4f}'.format(accuracy, misclass))\n plt.show()", "_____no_output_____" ], [ "from sklearn.metrics import confusion_matrix\n\n\nplot_confusion_matrix(confusion_matrix(y_test, y_pred), ['parfil1', 'perfil2', 'perfil3', 'perfil4', 'perfil5', 'perfil6'])", "_____no_output_____" ] ], [ [ "<hr>", "_____no_output_____" ] ], [ [ "from sklearn.metrics import classification_report\ntarget_names = ['perfil1', 'perfil2','perfil3','perfil4','perfil5','perfil6']\nprint(classification_report(y_test, y_pred, target_names=target_names))", "_____no_output_____" ] ], [ [ "## Scoring dos dados necessários para entregar a solução", "_____no_output_____" ], [ "Como entrega da sua solução, esperamos os resultados classificados no seguinte dataset chamado \"to_be_scored.csv\":", "_____no_output_____" ], [ "### Download da \"folha de respostas\"", "_____no_output_____" ] ], [ [ "!wget --no-check-certificate --content-disposition https://raw.githubusercontent.com/vanderlei-test/dataset-3/master/to_be_scored.csv\ndf_to_be_scored = pd.read_csv(r'to_be_scored.csv')\ndf_to_be_scored.tail()", "--2020-09-04 18:39:19-- https://raw.githubusercontent.com/vanderlei-test/dataset-3/master/to_be_scored.csv\nResolving raw.githubusercontent.com (raw.githubusercontent.com)...151.101.0.133\nConnecting to raw.githubusercontent.com (raw.githubusercontent.com)|151.101.0.133|:443...connected.\nHTTP request sent, awaiting response...200 OK\nLength: 96291 (94K) [text/plain]\nSaving to: ‘to_be_scored.csv.2’\n\nto_be_scored.csv.2 100%[===================>] 94,03K 481KB/s in 0,2s \n\n2020-09-04 18:39:19 (481 KB/s) - ‘to_be_scored.csv.2’ saved [96291/96291]\n\n" ] ], [ [ "# Atenção!\n\nO dataframe ``to_be_scored`` é a sua \"folha de respostas\". Note que a coluna \"categoria\" não existe nessa amostra, que não pode ser então utilizada para treino de modelos de aprendizado supervisionado.", "_____no_output_____" ] ], [ [ "df_to_be_scored.info()", "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 1000 entries, 0 to 999\nData columns (total 14 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 id 1000 non-null int64 \n 1 graduacao 1000 non-null object\n 2 universidade 1000 non-null object\n 3 profissao 1000 non-null object\n 4 organizacao 1000 non-null object\n 5 pretende_fazer_cursos_lit 1000 non-null int64 \n 6 interesse_mba_lit 1000 non-null int64 \n 7 importante_ter_certificado 1000 non-null int64 \n 8 horas_semanais_estudo 1000 non-null int64 \n 9 como_conheceu_lit 1000 non-null object\n 10 total_modulos 1000 non-null int64 \n 11 modulos_iniciados 1000 non-null int64 \n 12 modulos_finalizados 1000 non-null int64 \n 13 certificados 1000 non-null int64 \ndtypes: int64(9), object(5)\nmemory usage: 109.5+ KB\n" ] ], [ [ "<hr>\n\n# Atenção!\n\n# Para poder aplicar seu modelo e classificar a folha de respostas, você precisa primeiro aplicar as mesmas transformações com colunas que você aplicou no dataset de treino.\n\n# Não remova ou adicione linhas na folha de respostas. \n\n# Não altere a ordem das linhas na folha de respostas.\n\n# Ao final, as 1000 entradas devem estar classificadas, com os valores previstos em uma coluna chamada \"target\"\n\n<hr>", "_____no_output_____" ], [ "Na célula abaixo, repetimos rapidamente os mesmos passos de pré-processamento usados no exemplo dado com árvore de decisão", "_____no_output_____" ] ], [ [ "\n# 1 - Removendo linhas com valores NaN em \"certificados\" e \"total_modulos\"\ndf_to_be_scored_1 = df_to_be_scored.dropna(axis='index', how='any', subset=['certificados', 'total_modulos'])\n\n# 2 - Inputando zeros nos valores faltantes\nimpute_zeros.fit(X=df_to_be_scored_1)\ndf_to_be_scored_2 = pd.DataFrame.from_records(\n data=impute_zeros.transform(\n X=df_to_be_scored_1\n ),\n columns=df_to_be_scored_1.columns\n)\n\n# 3 - Remoção de colunas\ndf_to_be_scored_3 = df_to_be_scored_2.drop(columns=['id'], inplace=False)\n\n# 4 - Encoding com \"dummy variables\"\ndf_to_be_scored_4 = pd.get_dummies(df_to_be_scored_3, columns=['graduacao','universidade','profissao','organizacao','como_conheceu_lit'])\n\ndf_to_be_scored_4.tail()", "_____no_output_____" ] ], [ [ "<hr>\n\nPode ser verificado abaixo que as colunas da folha de resposta agora são idênticas às que foram usadas para treinar o modelo:", "_____no_output_____" ] ], [ [ "df_training[\n [\n 'pretende_fazer_cursos_lit', 'interesse_mba_lit',\n 'importante_ter_certificado', 'horas_semanais_estudo', 'total_modulos',\n 'modulos_iniciados', 'modulos_finalizados', 'certificados',\n 'graduacao_Bacharelado', 'graduacao_Especialização',\n 'graduacao_Licenciatura', 'graduacao_MBA', 'graduacao_SEM FORMAÇÃO',\n 'graduacao_Tecnólogo',\n 'universidade_CENTRO UNIVERSITÁRIO ESTÁCIO DA SÁ',\n 'universidade_Escola Paulista de Direito',\n 'universidade_FACULDADE ANHANGUERA', 'universidade_FATEC',\n 'universidade_FGV-RJ',\n 'universidade_INSPER INSTITUTO DE ENSINO E PESQUISA',\n 'universidade_UEPB', 'universidade_UFF', 'universidade_UFPE',\n 'universidade_UFRJ', 'universidade_UFRN', 'universidade_UFSCar',\n 'universidade_UNICAMP', 'universidade_UNIP',\n 'universidade_UNIVERSIDADE CRUZEIRO DO SUL',\n 'universidade_UNIVERSIDADE ESTADUAL DE PONTA GROSSA',\n 'universidade_UNIVERSIDADE NOVE DE JULHO',\n 'universidade_UNIVERSIDADE PRESBITERIANA MACKENZIE', 'universidade_USP',\n 'universidade_Unesp',\n 'universidade_Universidade Metodista de Sao Paulo',\n 'profissao_Advogado', 'profissao_Analista', 'profissao_Analista Senior',\n 'profissao_Assessor', 'profissao_Coordenador', 'profissao_Diretor',\n 'profissao_Engenheiro', 'profissao_Gerente', 'profissao_Outros',\n 'profissao_SEM EXPERIÊNCIA', 'profissao_Supervisor',\n 'profissao_Sócio/Dono/Proprietário', 'organizacao_Borracha',\n 'organizacao_Eletrodomesticos', 'organizacao_Eletroeletronicos',\n 'organizacao_Entretenimento', 'organizacao_Estado',\n 'organizacao_Laminados', 'organizacao_Montadora',\n 'organizacao_Oleo e Gas', 'organizacao_Siderurgica',\n 'organizacao_e-commerce', 'organizacao_servicos',\n 'como_conheceu_lit_Facebook', 'como_conheceu_lit_Google',\n 'como_conheceu_lit_Instagram', 'como_conheceu_lit_Linkedin',\n 'como_conheceu_lit_Minha empresa - benefício LITpass',\n 'como_conheceu_lit_Mídia (revista/jornal/web)',\n 'como_conheceu_lit_Outros', 'como_conheceu_lit_Saint Paul',\n 'como_conheceu_lit_YouTube'\n ]\n].columns", "_____no_output_____" ], [ "df_to_be_scored_4.columns", "_____no_output_____" ] ], [ [ "# Atenção\n\nPara todas colunas que não existirem no \"df_to_be_scored\", você pode usar a técnica abaixo para adicioná-las:", "_____no_output_____" ] ], [ [ "y_pred = dtc.predict(df_to_be_scored_4)\ndf_to_be_scored_4['target'] = y_pred\ndf_to_be_scored_4.tail()", "_____no_output_____" ] ], [ [ "### Salvando a folha de respostas como um arquivo .csv para ser submetido", "_____no_output_____" ] ], [ [ "df_to_be_scored_4.to_csv(\"results1.csv\")", "_____no_output_____" ] ], [ [ "# Atenção\n\n# A execução da célula acima irá criar um novo \"data asset\" no seu projeto no Watson Studio. Você precisará realizar o download deste arquivo juntamente com este notebook e criar um arquivo zip com os arquivos **results.csv** e **notebook.ipynb** para submissão. (os arquivos devem estar nomeados desta forma)", "_____no_output_____" ], [ "<hr>\n\n## Parabéns!\n\nSe você já está satisfeito com a sua solução, vá até a página abaixo e envie os arquivos necessários para submissão.\n\n# https://lit.maratona.dev\n", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ] ]
cb644cc10f4821b5a4120814aeee95a3d2d8d167
429,022
ipynb
Jupyter Notebook
assignments/HW1/HW1.ipynb
mycicle/cpe695
0f343dbc7674ec67fff9df5189ee1c17f9cf750a
[ "MIT" ]
null
null
null
assignments/HW1/HW1.ipynb
mycicle/cpe695
0f343dbc7674ec67fff9df5189ee1c17f9cf750a
[ "MIT" ]
null
null
null
assignments/HW1/HW1.ipynb
mycicle/cpe695
0f343dbc7674ec67fff9df5189ee1c17f9cf750a
[ "MIT" ]
null
null
null
628.143485
24,192
0.945488
[ [ [ "## Michael DiGregorio\n- Homework 1\n- CPE/EE 695 Applied Machine Learning", "_____no_output_____" ], [ "## Imports", "_____no_output_____" ] ], [ [ "import numpy as np\nimport matplotlib.pyplot as pt\n\nfrom typing import List, Tuple", "_____no_output_____" ] ], [ [ "## Function Definitions", "_____no_output_____" ] ], [ [ "def get_polynomial(x):\n return 5*x + 20*x**2 + 1*x**3", "_____no_output_____" ], [ "def get_dataset(number_of_samples, noise_scale) -> Tuple[np.ndarray]:\n x_rand = np.sort(25*(np.random.rand(number_of_samples, 1) - 0.8), -1).reshape((number_of_samples,))\n noise = (noise_scale*np.random.randn(number_of_samples, 1)).reshape((number_of_samples,))\n \n x = np.sort(x_rand)\n y = np.add(get_polynomial(x), noise)\n \n x_plot = np.linspace(x[0], x[-1], 100)\n y_plot = get_polynomial(x_plot)\n \n return (x, y, x_plot, y_plot, noise)", "_____no_output_____" ], [ "def plot_arbitrary_polynomial(x, coef, title: str = f\"Polynomial\"):\n x_plot = np.linspace(x[0], x[-1], 100)\n y_seq = np.zeros(len(x_plot))\n for i in range(len(coef)):\n y_seq = np.add(y_seq, coef[i]*x_plot**i)\n pt.plot(x_plot, y_seq)\n pt.title(title)\n pt.show()\n ", "_____no_output_____" ], [ "def polyfit(x: np.array, y: np.array, order: int, title: str = None) -> Tuple[np.ndarray]:\n poly = np.polyfit(x, y, order, full=True)\n coefficients = poly[0]\n residuals = poly[1]\n pt.plot(x, y, 'ro')\n if title is not None:\n plot_arbitrary_polynomial(x, coefficients[::-1], title=title)\n else:\n plot_arbitrary_polynomial(x, coefficients[::-1], titlet=title)\n return (coefficients, residuals)", "_____no_output_____" ] ], [ [ "## Assignment Main Logic Starts Here", "_____no_output_____" ] ], [ [ "pt.style.use('seaborn-whitegrid')\n\n(x, y, x_plot, y_plot, noise) = get_dataset(100, 50)\npt.plot(x_plot, y_plot, 'b')\npt.plot(x, y, 'ro')", "_____no_output_____" ] ], [ [ "### Question 1\n1) Please plot the noisy data and the polynomial you found (in the same figure). You can use any\nvalue of m selected from 2, 3, 4, 5, 6.\n", "_____no_output_____" ] ], [ [ "mse: List[float] = []\norder: List[int] = [1, 2, 3, 4, 5, 6, 7, 8]\nfor m in order:\n (coef, res) = polyfit(x, y, m, title=f\"Polynomial of order: {m}\")\n function_string = \"\"\n for i in range(len(coef)):\n function_string += f\" {coef[i]}*x^{len(coef)-i-1} \"\n if i != len(coef)-1:\n function_string += '+'\n print(f\"Function: {function_string}\\n\")\n print(f\"Coefficients: {coef}\")\n print(f\"Residuals: {res}\")\n mse += res.tolist()", "_____no_output_____" ] ], [ [ "### Question 2\n2) Plot MSE versus order m, for m = 1, 2, 3, 4, 5, 6, 7, 8 respectively. Identify the best choice of m.\n", "_____no_output_____" ] ], [ [ "pt.plot(order, mse)", "_____no_output_____" ], [ "best_order = order[mse.index(min(mse))]\nprint(f\"The best order for our function was {best_order}, however, we can see that the leading coefficients for all polynomials of orders higher than 3 are close to 0. This means that they are essentially order 3 polynomials. Additionally, the mse shows next to no improvement past order 3. Because of this, I will be using 3 as my 'best order'\")", "The best order for our function was 8, however, we can see that the leading coefficients for all polynomials of orders higher than 3 are close to 0. This means that they are essentially order 3 polynomials. Additionally, the mse shows next to no improvement past order 3. Because of this, I will be using 3 as my 'best order'\n" ] ], [ [ "### Question 3\n3) Change variable noise_scale to 150, 200, 400, 600, 1000 respectively, re-run the algorithm and\nplot the polynomials with the m found in 2). Discuss the impact of noise scale to the accuracy of\nthe returned parameters. [You need to plot a figure like in 1) for each choice of noise_scale.]\n", "_____no_output_____" ] ], [ [ "noise_scales: List[int] = [150, 200, 400, 600, 1000]\n\nfor i in range(len(noise_scales)):\n (x, y, x_plot, y_plot, noise) = get_dataset(50, noise_scales[i])\n (coef, res) = polyfit(x, y, 3, title=f\"Order = 3, noise_scale = {noise_scales[i]}\")\n function_string = \"\"\n for i in range(len(coef)):\n function_string += f\" {coef[i]}*x^{len(coef)-i-1} \"\n if i != len(coef)-1:\n function_string += '+'\n print(f\"Function: {function_string}\\n\")\n print(f\"Coefficients: {coef}\")\n print(f\"Residuals: {res}\")", "_____no_output_____" ] ], [ [ "### Question 4\n4) Change variable number_of_samples to 40, 30, 20, 10 respectively, re-ran the algorithm and plot\nthe polynomials with the m found in 2). Discuss the impact of the number of samples to the\naccuracy of the returned parameters. [You need to plot a figure like in 1) for each choice of\nnumber_of_samples.]", "_____no_output_____" ] ], [ [ "samples: List[int] = [40, 30, 20, 10]\n\nfor i in range(len(samples)):\n (x, y, x_plot, y_plot, noise) = get_dataset(samples[i], 100)\n (coef, res) = polyfit(x, y, 3, title=f\"Order = 3, num_samples = {samples[i]}\")\n function_string = \"\"\n for i in range(len(coef)):\n function_string += f\" {coef[i]}*x^{len(coef)-i-1} \"\n if i != len(coef)-1:\n function_string += '+'\n print(f\"Function: {function_string}\\n\")\n print(f\"Coefficients: {coef}\")\n print(f\"Residuals: {res}\")", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
cb644f3cfc85b69a86a40dfadddb2ce52689b3a8
4,225
ipynb
Jupyter Notebook
_notebooks/2020-10-15-p-hacking.ipynb
nocibambi/ds_blog
50022f35c7e49c6380714b696d67d473165dd88d
[ "Apache-2.0" ]
null
null
null
_notebooks/2020-10-15-p-hacking.ipynb
nocibambi/ds_blog
50022f35c7e49c6380714b696d67d473165dd88d
[ "Apache-2.0" ]
1
2020-10-03T18:10:40.000Z
2020-10-03T19:23:56.000Z
_notebooks/2020-10-15-p-hacking.ipynb
nocibambi/ds_blog
50022f35c7e49c6380714b696d67d473165dd88d
[ "Apache-2.0" ]
null
null
null
26.080247
377
0.578698
[ [ [ "# Programmatic p-hacking from scratch with Python\n", "_____no_output_____" ], [ "Below is a short script to demonstrate the 'process of p-hacking'.\n\nFrom the [Data Science from Scratch book](https://www.oreilly.com/library/view/data-science-from/9781492041122/).", "_____no_output_____" ] ], [ [ "import random\nfrom typing import List", "_____no_output_____" ] ], [ [ "First we define a usual experiment consisting of 1000 binomial trials with 0.5 probability.", "_____no_output_____" ] ], [ [ "def run_experiment(trials) -> List[bool]:\n return [random.random() < 0.5 for _ in range(trials)]\n\nexperiment = run_experiment(1000)\n\nprint(\"Proportion of heads:\", sum(experiment) / len(experiment))\nprint(\"First 10 elements:\", experiment[:10])", "Proportion of heads: 0.51\nFirst 10 elements: [True, True, True, False, False, False, False, True, True, True]\n" ] ], [ [ "Then we examine whether the outcome an experiment is beyond the 95% confidence levels around p = 0.5, that is, the hypothesis of having a fair coin.", "_____no_output_____" ] ], [ [ "def reject_fairness(experiment: List[bool]) -> bool:\n num_heads = sum(experiment)\n return num_heads < 469 or num_heads > 531\n\nreject_fairness(experiment)", "_____no_output_____" ] ], [ [ "We run 1000 independent experiments with the exact same parameters.", "_____no_output_____" ] ], [ [ "random.seed(42)\nexperiments = [run_experiment(1000) for _ in range(1000)]", "_____no_output_____" ] ], [ [ "Now we can simply pick those experiments which fall outside the confidence level.", "_____no_output_____" ] ], [ [ "number_of_unfair = sum([reject_fairness(experiment) for experiment in experiments])\n\nprint(\"Number of experiments 'showing' that the coin if unfair:\", number_of_unfair)\nprint(\"\\nProbabilities:\")\nprint(\"\\t\".join([str(sum(experiment) / len(experiment)) for experiment in experiments if reject_fairness(experiment)]))", "Number of experiments 'showing' that the coin if unfair: 42\n\nProbabilities:\n0.532\t0.539\t0.535\t0.461\t0.466\t0.539\t0.467\t0.468\t0.54\t0.458\t0.468\t0.463\t0.467\t0.46\t0.461\t0.463\t0.541\t0.464\t0.538\t0.542\t0.461\t0.465\t0.468\t0.538\t0.466\t0.46\t0.468\t0.534\t0.535\t0.468\t0.537\t0.468\t0.535\t0.538\t0.451\t0.537\t0.463\t0.466\t0.46\t0.536\t0.466\t0.467\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
cb64691578f0e71d859e669b2de9e49c6224ac56
22,952
ipynb
Jupyter Notebook
transformers_doc/training.ipynb
hellosamstuart/notebooks
3a3918acb0143dd034e53418738f755046883528
[ "Apache-2.0" ]
null
null
null
transformers_doc/training.ipynb
hellosamstuart/notebooks
3a3918acb0143dd034e53418738f755046883528
[ "Apache-2.0" ]
null
null
null
transformers_doc/training.ipynb
hellosamstuart/notebooks
3a3918acb0143dd034e53418738f755046883528
[ "Apache-2.0" ]
null
null
null
31.312415
178
0.619467
[ [ [ "empty" ] ] ]
[ "empty" ]
[ [ "empty" ] ]
cb6481f67049286016436657bddbad9c42ca9e74
11,210
ipynb
Jupyter Notebook
tensorflow-intro/notebooks/4-tensorboard.ipynb
alexandrnikitin/workshops
94dbdbdaf37aebb1689b13f96e911dd0362e617c
[ "MIT" ]
null
null
null
tensorflow-intro/notebooks/4-tensorboard.ipynb
alexandrnikitin/workshops
94dbdbdaf37aebb1689b13f96e911dd0362e617c
[ "MIT" ]
null
null
null
tensorflow-intro/notebooks/4-tensorboard.ipynb
alexandrnikitin/workshops
94dbdbdaf37aebb1689b13f96e911dd0362e617c
[ "MIT" ]
1
2017-05-01T01:33:46.000Z
2017-05-01T01:33:46.000Z
32.305476
117
0.553702
[ [ [ "# TensorBoard", "_____no_output_____" ] ], [ [ "import tensorflow as tf", "_____no_output_____" ], [ "from tensorflow.examples.tutorials.mnist import input_data\nmnist = input_data.read_data_sets('/tmp/tensorflow/alex/mnist/input_data', one_hot=True)", "Extracting /tmp/tensorflow/alex/mnist/input_data/train-images-idx3-ubyte.gz\nExtracting /tmp/tensorflow/alex/mnist/input_data/train-labels-idx1-ubyte.gz\nExtracting /tmp/tensorflow/alex/mnist/input_data/t10k-images-idx3-ubyte.gz\nExtracting /tmp/tensorflow/alex/mnist/input_data/t10k-labels-idx1-ubyte.gz\n" ], [ "def variable_summaries(var):\n with tf.name_scope('summaries'):\n mean = tf.reduce_mean(var)\n tf.summary.scalar('mean', mean)\n with tf.name_scope('stddev'):\n stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))\n tf.summary.scalar('stddev', stddev)\n tf.summary.scalar('max', tf.reduce_max(var))\n tf.summary.scalar('min', tf.reduce_min(var))\n tf.summary.histogram('histogram', var)", "_____no_output_____" ] ], [ [ "### Graph", "_____no_output_____" ] ], [ [ "run_id = 'initial'\nlog_dir = '/home/jovyan/work/logs/alex/' + run_id\n\nwith tf.name_scope('input'):\n x = tf.placeholder(tf.float32, [None, 784], name='x-input')\n y_ = tf.placeholder(tf.float32, [None, 10], name='y-input')\n\nwith tf.name_scope('input_reshape'):\n image_shaped_input = tf.reshape(x, [-1, 28, 28, 1])\n tf.summary.image('input', image_shaped_input, max_outputs=10)\n\nwith tf.name_scope('weights'):\n W = tf.Variable(initial_value=tf.zeros([784, 10], dtype=tf.float32), name='weights', trainable=True)\n variable_summaries(W)\nwith tf.name_scope('biases'):\n b = tf.Variable(initial_value=tf.zeros([10]), dtype=tf.float32, name='bias', trainable=True)\n variable_summaries(b)\nwith tf.name_scope('Wx_plus_b'):\n y = tf.matmul(x, W) + b\n tf.summary.histogram('predictions', y)\n\nwith tf.name_scope('cross_entropy'):\n diff = tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y)\n with tf.name_scope('total'):\n cross_entropy = tf.reduce_mean(diff)\n\ntf.summary.scalar('cross_entropy', cross_entropy)\n\nwith tf.name_scope('train'):\n train_step = tf.train.GradientDescentOptimizer(learning_rate=0.5).minimize(cross_entropy)\n\n\nwith tf.name_scope('accuracy'):\n with tf.name_scope('correct_prediction'):\n correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))\n with tf.name_scope('accuracy'):\n accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\ntf.summary.scalar('accuracy', accuracy)\n\nmerged = tf.summary.merge_all()", "_____no_output_____" ] ], [ [ "### Initialize", "_____no_output_____" ] ], [ [ "sess = tf.Session()\nsess.run(tf.global_variables_initializer())\n\ntrain_writer = tf.summary.FileWriter(log_dir + '/train', sess.graph)\ntest_writer = tf.summary.FileWriter(log_dir + '/test')\n\nprint(log_dir)", "/home/jovyan/work/logs/alex/initial\n" ] ], [ [ "### Train", "_____no_output_____" ] ], [ [ "for i in range(1000):\n if i % 10 == 0: # Record summaries and test-set accuracy\n summary, acc = sess.run([merged, accuracy], feed_dict={x: mnist.test.images, y_: mnist.test.labels})\n test_writer.add_summary(summary, i)\n print('Accuracy at step %s: %s' % (i, acc))\n else: # Record train set summaries, and train\n xs, ys = mnist.train.next_batch(100)\n if i % 100 == 99: # Record execution stats\n run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)\n run_metadata = tf.RunMetadata()\n summary, _ = sess.run([merged, train_step],\n feed_dict={x: xs, y_: ys},\n options=run_options,\n run_metadata=run_metadata)\n train_writer.add_run_metadata(run_metadata, 'step%03d' % i)\n train_writer.add_summary(summary, i)\n print('Adding run metadata for', i)\n else: # Record a summary\n summary, _ = sess.run([merged, train_step], feed_dict={x: xs, y_: ys})\n train_writer.add_summary(summary, i)\n\ntrain_writer.close()\ntest_writer.close()", "Accuracy at step 0: 0.098\nAccuracy at step 10: 0.719\nAccuracy at step 20: 0.8312\nAccuracy at step 30: 0.8688\nAccuracy at step 40: 0.8703\nAccuracy at step 50: 0.8703\nAccuracy at step 60: 0.8666\nAccuracy at step 70: 0.8828\nAccuracy at step 80: 0.8787\nAccuracy at step 90: 0.8911\nAdding run metadata for 99\nAccuracy at step 100: 0.8918\nAccuracy at step 110: 0.8922\nAccuracy at step 120: 0.8925\nAccuracy at step 130: 0.8975\nAccuracy at step 140: 0.8943\nAccuracy at step 150: 0.8995\nAccuracy at step 160: 0.8986\nAccuracy at step 170: 0.9007\nAccuracy at step 180: 0.9052\nAccuracy at step 190: 0.898\nAdding run metadata for 199\nAccuracy at step 200: 0.9035\nAccuracy at step 210: 0.9061\nAccuracy at step 220: 0.9047\nAccuracy at step 230: 0.8998\nAccuracy at step 240: 0.9078\nAccuracy at step 250: 0.8985\nAccuracy at step 260: 0.9082\nAccuracy at step 270: 0.9088\nAccuracy at step 280: 0.9051\nAccuracy at step 290: 0.9053\nAdding run metadata for 299\nAccuracy at step 300: 0.9088\nAccuracy at step 310: 0.9061\nAccuracy at step 320: 0.9082\nAccuracy at step 330: 0.9071\nAccuracy at step 340: 0.9077\nAccuracy at step 350: 0.9096\nAccuracy at step 360: 0.909\nAccuracy at step 370: 0.9096\nAccuracy at step 380: 0.9118\nAccuracy at step 390: 0.9097\nAdding run metadata for 399\nAccuracy at step 400: 0.9084\nAccuracy at step 410: 0.9116\nAccuracy at step 420: 0.9038\nAccuracy at step 430: 0.9101\nAccuracy at step 440: 0.9117\nAccuracy at step 450: 0.9118\nAccuracy at step 460: 0.9163\nAccuracy at step 470: 0.9135\nAccuracy at step 480: 0.9134\nAccuracy at step 490: 0.9127\nAdding run metadata for 499\nAccuracy at step 500: 0.9137\nAccuracy at step 510: 0.9128\nAccuracy at step 520: 0.9137\nAccuracy at step 530: 0.9126\nAccuracy at step 540: 0.9144\nAccuracy at step 550: 0.9113\nAccuracy at step 560: 0.9123\nAccuracy at step 570: 0.9165\nAccuracy at step 580: 0.9102\nAccuracy at step 590: 0.9174\nAdding run metadata for 599\nAccuracy at step 600: 0.9133\nAccuracy at step 610: 0.9141\nAccuracy at step 620: 0.9165\nAccuracy at step 630: 0.9167\nAccuracy at step 640: 0.9155\nAccuracy at step 650: 0.9125\nAccuracy at step 660: 0.9186\nAccuracy at step 670: 0.9167\nAccuracy at step 680: 0.9048\nAccuracy at step 690: 0.9142\nAdding run metadata for 699\nAccuracy at step 700: 0.9152\nAccuracy at step 710: 0.9153\nAccuracy at step 720: 0.9145\nAccuracy at step 730: 0.9066\nAccuracy at step 740: 0.9178\nAccuracy at step 750: 0.9168\nAccuracy at step 760: 0.9147\nAccuracy at step 770: 0.9108\nAccuracy at step 780: 0.9162\nAccuracy at step 790: 0.9154\nAdding run metadata for 799\nAccuracy at step 800: 0.9159\nAccuracy at step 810: 0.9111\nAccuracy at step 820: 0.9192\nAccuracy at step 830: 0.9179\nAccuracy at step 840: 0.9172\nAccuracy at step 850: 0.9185\nAccuracy at step 860: 0.9171\nAccuracy at step 870: 0.9184\nAccuracy at step 880: 0.9156\nAccuracy at step 890: 0.9158\nAdding run metadata for 899\nAccuracy at step 900: 0.9174\nAccuracy at step 910: 0.915\nAccuracy at step 920: 0.914\nAccuracy at step 930: 0.9195\nAccuracy at step 940: 0.9176\nAccuracy at step 950: 0.9175\nAccuracy at step 960: 0.918\nAccuracy at step 970: 0.9174\nAccuracy at step 980: 0.9188\nAccuracy at step 990: 0.9152\nAdding run metadata for 999\n" ], [ "sess.close()", "_____no_output_____" ], [ "print('tensorboard --logdir=' + log_dir)", "tensorboard --logdir=/home/jovyan/work/logs/alex/initial\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ] ]
cb6486492597f38807d11a8804831f1191d926eb
44,687
ipynb
Jupyter Notebook
examples/f1-mel-spectrogram.ipynb
shubham-kulkarniS/jaafe
7f22707daeab0f3bc9e547618e35e7fbb7076f81
[ "MIT" ]
null
null
null
examples/f1-mel-spectrogram.ipynb
shubham-kulkarniS/jaafe
7f22707daeab0f3bc9e547618e35e7fbb7076f81
[ "MIT" ]
null
null
null
examples/f1-mel-spectrogram.ipynb
shubham-kulkarniS/jaafe
7f22707daeab0f3bc9e547618e35e7fbb7076f81
[ "MIT" ]
null
null
null
39.970483
172
0.535972
[ [ [ "from types import SimpleNamespace\nfrom functools import lru_cache\nimport os\nimport time\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import roc_auc_score\nimport pandas as pd\nimport numpy as np\nimport scipy.io.wavfile\nimport scipy.fftpack\nimport scipy.linalg\nimport torch\nimport torch.utils.data as data\nimport torch.nn as nn\nimport torch.optim as optim\nimport math", "_____no_output_____" ], [ "@lru_cache(maxsize=10)\ndef get_window(n, type='hamming'):\n coefs = np.arange(n)\n window = 0.54 - 0.46 * np.cos(2 * np.pi * coefs / (n - 1))\n return window\n\ndef apply_preemphasis(y, preemCoef=0.97):\n y[1:] = y[1:] - preemCoef*y[:-1]\n y[0] *= (1 - preemCoef)\n return y\n\ndef freq_to_mel(freq):\n return 2595.0 * np.log10(1.0 + freq / 700.0)\n\ndef mel_to_freq(mels):\n return 700.0 * (np.power(10.0, mels / 2595.0) - 1.0)\n\n@lru_cache(maxsize=10)\ndef get_filterbank(numfilters, filterLen, lowFreq, highFreq, samplingFreq):\n minwarpfreq = freq_to_mel(lowFreq)\n maxwarpfreq = freq_to_mel(highFreq)\n dwarp = (maxwarpfreq - minwarpfreq) / (numfilters + 1)\n f = mel_to_freq(np.arange(numfilters + 2) * dwarp + minwarpfreq) * (filterLen - 1) * 2.0 / samplingFreq\n i = np.arange(filterLen)[None, :]\n f = f[:, None]\n hislope = (i - f[:numfilters]) / (f[1:numfilters+1] - f[:numfilters])\n loslope = (f[2:numfilters+2] - i) / (f[2:numfilters+2] - f[1:numfilters+1])\n H = np.maximum(0, np.minimum(hislope, loslope))\n return H\n\ndef normalized(y, threshold=0):\n y -= y.mean()\n stddev = y.std()\n if stddev > threshold:\n y /= stddev\n return y\n\ndef mfsc(y, sfr, window_size=0.025, window_stride=0.010, window='hamming', normalize=True, log=True, n_mels=80, preemCoef=0.97, melfloor=1.0):\n win_length = int(sfr * window_size)\n hop_length = int(sfr * window_stride)\n n_fft = 2048\n lowfreq = 0\n highfreq = sfr/2\n \n # get window\n window = get_window(win_length)\n padded_window = np.pad(window, (0, n_fft - win_length), mode='constant')[:, None]\n \n # preemphasis\n y = apply_preemphasis(y, preemCoef)\n\n # scale wave signal\n y *= 32768\n \n # get frames and scale input\n num_frames = 1 + (len(y) - win_length) // hop_length\n pad_after = num_frames*hop_length + (n_fft - hop_length) - len(y)\n if pad_after > 0:\n y = np.pad(y, (0, pad_after), mode='constant')\n frames = np.lib.stride_tricks.as_strided(y, shape=(n_fft, num_frames), strides=(y.itemsize, hop_length * y.itemsize), writeable=False)\n windowed_frames = padded_window * frames\n D = np.abs(np.fft.rfft(windowed_frames, axis=0))\n\n # mel filterbank\n filterbank = get_filterbank(n_mels, n_fft/2 + 1, lowfreq, highfreq, sfr)\n mf = np.dot(filterbank, D)\n mf = np.maximum(melfloor, mf)\n if log:\n mf = np.log(mf)\n if normalize:\n mf = normalized(mf)\n\n return mf", "_____no_output_____" ], [ "def make_dataset(kaldi_path, class_to_id):\n text_path = os.path.join(kaldi_path, 'text')\n wav_path = os.path.join(kaldi_path, 'wav.scp')\n\n key_to_word = dict()\n key_to_wav = dict()\n \n with open(wav_path, 'rt') as wav_scp:\n for line in wav_scp:\n key, wav = line.strip().split(' ', 1)\n key_to_wav[key] = wav\n key_to_word[key] = None # default\n\n if os.path.isfile(text_path):\n with open(text_path, 'rt') as text:\n for line in text:\n key, word = line.strip().split(' ', 1)\n key_to_word[key] = word\n\n wavs = []\n for key, wav_command in key_to_wav.items():\n word = key_to_word[key]\n word_id = class_to_id[word] if word is not None else -1 # default for test\n wav_item = [key, wav_command, word_id]\n wavs.append(wav_item)\n\n return wavs", "_____no_output_____" ], [ "def wav_read(path):\n sr, y = scipy.io.wavfile.read(path)\n y = y/32768 # Normalize to -1..1\n y -= y.mean()\n return y, sr", "_____no_output_____" ], [ "def param_loader(path, window_size, window_stride, window, normalize, max_len):\n y, sfr = wav_read(path)\n\n param = mfsc(y, sfr, window_size=window_size, window_stride=window_stride, window=window, normalize=normalize, log=False, n_mels=60, preemCoef=0, melfloor=1.0)\n\n # Add zero padding to make all param with the same dims\n if param.shape[1] < max_len:\n pad = np.zeros((param.shape[0], max_len - param.shape[1]))\n param = np.hstack((pad, param))\n\n # If exceeds max_len keep last samples\n elif param.shape[1] > max_len:\n param = param[:, -max_len:]\n\n param = torch.FloatTensor(param)\n\n return param", "_____no_output_____" ], [ "def get_classes():\n classes = ['neg', 'pos']\n weight = None\n class_to_id = {label: i for i, label in enumerate(classes)}\n return classes, weight, class_to_id", "_____no_output_____" ], [ "class Loader(data.Dataset):\n \"\"\"Data set loader::\n Args:\n root (string): Kaldi directory path.\n transform (callable, optional): A function/transform that takes in a spectrogram\n and returns a transformed version. E.g, ``transforms.RandomCrop``\n target_transform (callable, optional): A function/transform that takes in the\n target and transforms it.\n window_size: window size for the stft, default value is .02\n window_stride: window stride for the stft, default value is .01\n window_type: typye of window to extract the stft, default value is 'hamming'\n normalize: boolean, whether or not to normalize the param to have zero mean and one std\n max_len: the maximum length of frames to use\n Attributes:\n classes (list): List of the class names.\n class_to_id (dict): Dict with items (class_name, class_index).\n wavs (list): List of (wavs path, class_index) tuples\n STFT parameters: window_size, window_stride, window_type, normalize\n \"\"\"\n\n def __init__(self, root, transform=None, target_transform=None, window_size=.02,\n window_stride=.01, window_type='hamming', normalize=True, max_len=1000):\n\n classes, weight, class_to_id = get_classes()\n self.root = root\n self.wavs = make_dataset(root, class_to_id)\n self.classes = classes\n self.weight = weight\n self.class_to_id = class_to_id\n self.transform = transform\n self.target_transform = target_transform\n self.loader = param_loader\n self.window_size = window_size\n self.window_stride = window_stride\n self.window_type = window_type\n self.normalize = normalize\n self.max_len = max_len\n\n def __getitem__(self, index):\n \"\"\"\n Args:\n index (int): Index\n Returns:\n tuple: (key, params, target) where target is class_index of the target class.\n \"\"\"\n key, path, target = self.wavs[index]\n path = '../input/covid/wavs16k/' + path\n params = self.loader(path, self.window_size, self.window_stride, self.window_type, self.normalize, self.max_len) # pylint: disable=line-too-long\n if self.transform is not None:\n params = self.transform(params)\n if self.target_transform is not None:\n target = self.target_transform(target)\n\n return key, params, target\n\n def __len__(self):\n return len(self.wavs)", "_____no_output_____" ], [ "class VGG(nn.Module):\n\n def __init__(self, vgg_name, hidden=64, dropout=0.4):\n super(VGG, self).__init__()\n self.features = make_layers(cfg[vgg_name])\n self.classifier = nn.Sequential(\n nn.Dropout(dropout),\n nn.Linear(2*512, hidden),\n nn.ReLU(),\n nn.Dropout(dropout),\n nn.Linear(hidden, hidden),\n nn.ReLU(),\n nn.Dropout(dropout),\n nn.Linear(hidden, 1),\n )\n self._initialize_weights()\n\n def forward(self, x):\n x.unsqueeze_(1)\n x = self.features(x)\n x1, _ = x.max(dim=-1)\n x2 = x.mean(dim=-1)\n x = torch.cat((x1, x2), dim=-1)\n # print(x.shape)\n x = x.view(x.size(0), -1)\n x = self.classifier(x)\n return x.squeeze(-1)\n\n def _initialize_weights(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2. / n))\n if m.bias is not None:\n m.bias.data.zero_()\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n elif isinstance(m, nn.Linear):\n m.weight.data.normal_(0, 0.01)\n m.bias.data.zero_()\n\n\ndef make_layers(cfg, batch_norm=True):\n layers = []\n in_channels = 1\n for v in cfg:\n if v == 'M':\n layers += [nn.MaxPool2d(kernel_size=2, stride=2)]\n else:\n conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)\n if batch_norm:\n layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]\n else:\n layers += [conv2d, nn.ReLU(inplace=True)]\n in_channels = v\n return nn.Sequential(*layers)\n\n\ncfg = {\n 'VGG11': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],\n 'VGG13': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],\n 'VGG16': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],\n 'VGG19': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'],\n}", "_____no_output_____" ], [ "def train(loader, model, criterion, optimizer, epoch, cuda, log_interval, weight=None, verbose=True):\n model.train()\n global_epoch_loss = 0\n samples = 0\n for batch_idx, (_, data, target) in enumerate(loader):\n if cuda:\n data, target = data.cuda(), target.cuda()\n optimizer.zero_grad()\n output = model(data)\n loss = criterion(output, target.float())\n loss.backward()\n optimizer.step()\n global_epoch_loss += loss.data.item() * len(target)\n samples += len(target)\n if verbose:\n if batch_idx % log_interval == 0:\n print('Train Epoch: {} [{}/{} ({:.0f}%)]\\tLoss: {:.6f}'.format(\n epoch, samples, len(loader.dataset), 100 * samples / len(loader.dataset), global_epoch_loss / samples))\n return global_epoch_loss / samples", "_____no_output_____" ], [ "def test(loader, model, criterion, cuda, verbose=True, data_set='Test', save=None):\n model.eval()\n test_loss = 0\n tpred = []\n ttarget = []\n\n if save is not None:\n csv = open(save, 'wt')\n print('index,prob', file=csv)\n\n with torch.no_grad():\n for keys, data, target in loader:\n if cuda:\n data, target = data.cuda(), target.cuda()\n output = model(data)\n pred = output.sigmoid()\n tpred.append(pred.cpu().numpy())\n\n if target[0] != -1:\n loss = criterion(output, target.float()).data.item()\n test_loss += loss * len(target) # sum up batch loss \n ttarget.append(target.cpu().numpy())\n\n if save is not None:\n for i, key in enumerate(keys):\n print(f'{key},{pred[i]}', file=csv)\n \n if len(ttarget) > 0:\n test_loss /= len(loader.dataset)\n auc = roc_auc_score(np.concatenate(ttarget), np.concatenate(tpred))\n if verbose:\n print('\\n{} set: Average loss: {:.4f}, AUC: ({:.1f}%)\\n'.format(data_set, test_loss, 100 * auc))\n\n return test_loss, auc", "_____no_output_____" ], [ "args = SimpleNamespace(\n # general options\n train_path = '../input/covid/train', # train data folder\n valid_path = '../input/covid/valid', # valid data folder\n test_path = '../input/covid/test', # test data folder\n batch_size = 32, # training and valid batch size\n test_batch_size = 32, # batch size for testing\n arc = 'VGG13', # VGG11, VGG13, VGG16, VGG19\n epochs = 100, # maximum number of epochs to train\n lr = 0.0001, # learning rate\n momentum = 0.9, # SGD momentum, for SGD only\n optimizer = 'adam', # optimization method: sgd | adam\n seed = 1234, # random seed\n log_interval = 5, # how many batches to wait before logging training status\n patience = 5, # how many epochs of no loss improvement should we wait before stop training\n checkpoint = '.', # checkpoints directory\n train = True, # train before testing\n cuda = True, # use gpu\n\n # feature extraction options\n window_size = .04, # window size for the stft\n window_stride = .02, # window stride for the stft\n window_type = 'hamming', # window type for the stft\n normalize = True, # use spect normalization\n num_workers = 2, # how many subprocesses to use for data loading\n)", "_____no_output_____" ], [ "args.cuda = args.cuda and torch.cuda.is_available()\ntorch.manual_seed(args.seed)\nif args.cuda:\n torch.cuda.manual_seed(args.seed)\n print('Using CUDA with {0} GPUs'.format(torch.cuda.device_count()))\n\n\n# build model\nmodel = VGG(args.arc)\nif args.cuda:\n model.cuda()\n\n# Define criterion\ncriterion = nn.BCEWithLogitsLoss(reduction='mean') # This loss combines a Sigmoid layer and the BCELoss in one single class.", "Using CUDA with 1 GPUs\n" ] ], [ [ "## Train model", "_____no_output_____" ] ], [ [ "# loading data\nif args.train:\n train_dataset = Loader(args.train_path, window_size=args.window_size, window_stride=args.window_stride,\n window_type=args.window_type, normalize=args.normalize)\n train_loader = torch.utils.data.DataLoader(\n train_dataset, batch_size=args.batch_size, shuffle=True,\n num_workers=args.num_workers, pin_memory=args.cuda, sampler=None)\n\n valid_dataset = Loader(args.valid_path, window_size=args.window_size, window_stride=args.window_stride,\n window_type=args.window_type, normalize=args.normalize)\n valid_loader = torch.utils.data.DataLoader(\n valid_dataset, batch_size=args.batch_size, shuffle=None,\n num_workers=args.num_workers, pin_memory=args.cuda, sampler=None)\n\n # define optimizer\n if args.optimizer.lower() == 'adam':\n optimizer = optim.Adam(model.parameters(), lr=args.lr)\n else:\n optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum)\n\n best_valid_auc = 0\n iteration = 0\n epoch = 1\n\n # trainint with early stopping\n t0 = time.time()\n while (epoch < args.epochs + 1) and (iteration < args.patience):\n train(train_loader, model, criterion, optimizer, epoch, args.cuda, args.log_interval,\n weight=train_dataset.weight)\n valid_loss, valid_auc = test(valid_loader, model, criterion, args.cuda, data_set='Validation')\n if not os.path.isdir(args.checkpoint):\n os.mkdir(args.checkpoint)\n torch.save(model.state_dict(), './{}/model{:03d}.pt'.format(args.checkpoint, epoch))\n if valid_auc <= best_valid_auc:\n iteration += 1\n print('AUC was not improved, iteration {0}'.format(str(iteration)))\n else:\n print('Saving state')\n iteration = 0\n best_valid_auc = valid_auc\n state = {\n 'valid_auc': valid_auc,\n 'valid_loss': valid_loss,\n 'epoch': epoch,\n }\n if not os.path.isdir(args.checkpoint):\n os.mkdir(args.checkpoint)\n torch.save(state, './{}/ckpt.pt'.format(args.checkpoint))\n epoch += 1\n print(f'Elapsed seconds: ({time.time() - t0:.0f}s)')", "Train Epoch: 1 [32/2160 (1%)]\tLoss: 0.692940\nTrain Epoch: 1 [192/2160 (9%)]\tLoss: 0.693021\nTrain Epoch: 1 [352/2160 (16%)]\tLoss: 0.693135\nTrain Epoch: 1 [512/2160 (24%)]\tLoss: 0.692742\nTrain Epoch: 1 [672/2160 (31%)]\tLoss: 0.692705\nTrain Epoch: 1 [832/2160 (39%)]\tLoss: 0.692613\nTrain Epoch: 1 [992/2160 (46%)]\tLoss: 0.692540\nTrain Epoch: 1 [1152/2160 (53%)]\tLoss: 0.692520\nTrain Epoch: 1 [1312/2160 (61%)]\tLoss: 0.692795\nTrain Epoch: 1 [1472/2160 (68%)]\tLoss: 0.692748\nTrain Epoch: 1 [1632/2160 (76%)]\tLoss: 0.692740\nTrain Epoch: 1 [1792/2160 (83%)]\tLoss: 0.692135\nTrain Epoch: 1 [1952/2160 (90%)]\tLoss: 0.692509\nTrain Epoch: 1 [2112/2160 (98%)]\tLoss: 0.691998\n\nValidation set: Average loss: 0.6900, AUC: (61.0%)\n\nSaving state\nElapsed seconds: (67s)\nTrain Epoch: 2 [32/2160 (1%)]\tLoss: 0.687474\nTrain Epoch: 2 [192/2160 (9%)]\tLoss: 0.687831\nTrain Epoch: 2 [352/2160 (16%)]\tLoss: 0.691497\nTrain Epoch: 2 [512/2160 (24%)]\tLoss: 0.688524\nTrain Epoch: 2 [672/2160 (31%)]\tLoss: 0.687683\nTrain Epoch: 2 [832/2160 (39%)]\tLoss: 0.687783\nTrain Epoch: 2 [992/2160 (46%)]\tLoss: 0.687280\nTrain Epoch: 2 [1152/2160 (53%)]\tLoss: 0.689333\nTrain Epoch: 2 [1312/2160 (61%)]\tLoss: 0.688343\nTrain Epoch: 2 [1472/2160 (68%)]\tLoss: 0.688599\nTrain Epoch: 2 [1632/2160 (76%)]\tLoss: 0.689523\nTrain Epoch: 2 [1792/2160 (83%)]\tLoss: 0.688990\nTrain Epoch: 2 [1952/2160 (90%)]\tLoss: 0.689407\nTrain Epoch: 2 [2112/2160 (98%)]\tLoss: 0.688826\n\nValidation set: Average loss: 0.6852, AUC: (64.0%)\n\nSaving state\nElapsed seconds: (131s)\nTrain Epoch: 3 [32/2160 (1%)]\tLoss: 0.671406\nTrain Epoch: 3 [192/2160 (9%)]\tLoss: 0.679401\nTrain Epoch: 3 [352/2160 (16%)]\tLoss: 0.679308\nTrain Epoch: 3 [512/2160 (24%)]\tLoss: 0.678467\nTrain Epoch: 3 [672/2160 (31%)]\tLoss: 0.682635\nTrain Epoch: 3 [832/2160 (39%)]\tLoss: 0.681060\nTrain Epoch: 3 [992/2160 (46%)]\tLoss: 0.678921\nTrain Epoch: 3 [1152/2160 (53%)]\tLoss: 0.678960\nTrain Epoch: 3 [1312/2160 (61%)]\tLoss: 0.680909\nTrain Epoch: 3 [1472/2160 (68%)]\tLoss: 0.679136\nTrain Epoch: 3 [1632/2160 (76%)]\tLoss: 0.677021\nTrain Epoch: 3 [1792/2160 (83%)]\tLoss: 0.678754\nTrain Epoch: 3 [1952/2160 (90%)]\tLoss: 0.678569\nTrain Epoch: 3 [2112/2160 (98%)]\tLoss: 0.678173\n\nValidation set: Average loss: 0.6694, AUC: (64.4%)\n\nSaving state\nElapsed seconds: (192s)\nTrain Epoch: 4 [32/2160 (1%)]\tLoss: 0.698799\nTrain Epoch: 4 [192/2160 (9%)]\tLoss: 0.645726\nTrain Epoch: 4 [352/2160 (16%)]\tLoss: 0.656489\nTrain Epoch: 4 [512/2160 (24%)]\tLoss: 0.659085\nTrain Epoch: 4 [672/2160 (31%)]\tLoss: 0.658998\nTrain Epoch: 4 [832/2160 (39%)]\tLoss: 0.659342\nTrain Epoch: 4 [992/2160 (46%)]\tLoss: 0.661950\nTrain Epoch: 4 [1152/2160 (53%)]\tLoss: 0.658065\nTrain Epoch: 4 [1312/2160 (61%)]\tLoss: 0.658362\nTrain Epoch: 4 [1472/2160 (68%)]\tLoss: 0.657321\nTrain Epoch: 4 [1632/2160 (76%)]\tLoss: 0.656842\nTrain Epoch: 4 [1792/2160 (83%)]\tLoss: 0.657103\nTrain Epoch: 4 [1952/2160 (90%)]\tLoss: 0.656834\nTrain Epoch: 4 [2112/2160 (98%)]\tLoss: 0.655910\n\nValidation set: Average loss: 0.6898, AUC: (63.0%)\n\nAUC was not improved, iteration 1\nElapsed seconds: (255s)\nTrain Epoch: 5 [32/2160 (1%)]\tLoss: 0.663623\nTrain Epoch: 5 [192/2160 (9%)]\tLoss: 0.669114\nTrain Epoch: 5 [352/2160 (16%)]\tLoss: 0.666796\nTrain Epoch: 5 [512/2160 (24%)]\tLoss: 0.665851\nTrain Epoch: 5 [672/2160 (31%)]\tLoss: 0.670298\nTrain Epoch: 5 [832/2160 (39%)]\tLoss: 0.666080\nTrain Epoch: 5 [992/2160 (46%)]\tLoss: 0.660614\nTrain Epoch: 5 [1152/2160 (53%)]\tLoss: 0.660838\nTrain Epoch: 5 [1312/2160 (61%)]\tLoss: 0.659048\nTrain Epoch: 5 [1472/2160 (68%)]\tLoss: 0.654148\nTrain Epoch: 5 [1632/2160 (76%)]\tLoss: 0.649897\nTrain Epoch: 5 [1792/2160 (83%)]\tLoss: 0.651849\nTrain Epoch: 5 [1952/2160 (90%)]\tLoss: 0.648903\nTrain Epoch: 5 [2112/2160 (98%)]\tLoss: 0.649096\n\nValidation set: Average loss: 0.6669, AUC: (66.1%)\n\nSaving state\nElapsed seconds: (315s)\nTrain Epoch: 6 [32/2160 (1%)]\tLoss: 0.620860\nTrain Epoch: 6 [192/2160 (9%)]\tLoss: 0.656582\nTrain Epoch: 6 [352/2160 (16%)]\tLoss: 0.646915\nTrain Epoch: 6 [512/2160 (24%)]\tLoss: 0.647643\nTrain Epoch: 6 [672/2160 (31%)]\tLoss: 0.646524\nTrain Epoch: 6 [832/2160 (39%)]\tLoss: 0.635932\nTrain Epoch: 6 [992/2160 (46%)]\tLoss: 0.630001\nTrain Epoch: 6 [1152/2160 (53%)]\tLoss: 0.626126\nTrain Epoch: 6 [1312/2160 (61%)]\tLoss: 0.622514\nTrain Epoch: 6 [1472/2160 (68%)]\tLoss: 0.621734\nTrain Epoch: 6 [1632/2160 (76%)]\tLoss: 0.626502\nTrain Epoch: 6 [1792/2160 (83%)]\tLoss: 0.629545\nTrain Epoch: 6 [1952/2160 (90%)]\tLoss: 0.628871\nTrain Epoch: 6 [2112/2160 (98%)]\tLoss: 0.629745\n\nValidation set: Average loss: 0.6806, AUC: (67.1%)\n\nSaving state\nElapsed seconds: (377s)\nTrain Epoch: 7 [32/2160 (1%)]\tLoss: 0.561015\nTrain Epoch: 7 [192/2160 (9%)]\tLoss: 0.586453\nTrain Epoch: 7 [352/2160 (16%)]\tLoss: 0.586383\nTrain Epoch: 7 [512/2160 (24%)]\tLoss: 0.580895\nTrain Epoch: 7 [672/2160 (31%)]\tLoss: 0.588245\nTrain Epoch: 7 [832/2160 (39%)]\tLoss: 0.584126\nTrain Epoch: 7 [992/2160 (46%)]\tLoss: 0.579670\nTrain Epoch: 7 [1152/2160 (53%)]\tLoss: 0.588139\nTrain Epoch: 7 [1312/2160 (61%)]\tLoss: 0.588468\nTrain Epoch: 7 [1472/2160 (68%)]\tLoss: 0.593185\nTrain Epoch: 7 [1632/2160 (76%)]\tLoss: 0.595115\nTrain Epoch: 7 [1792/2160 (83%)]\tLoss: 0.595115\nTrain Epoch: 7 [1952/2160 (90%)]\tLoss: 0.593977\nTrain Epoch: 7 [2112/2160 (98%)]\tLoss: 0.594131\n\nValidation set: Average loss: 0.6614, AUC: (66.5%)\n\nAUC was not improved, iteration 1\nElapsed seconds: (439s)\nTrain Epoch: 8 [32/2160 (1%)]\tLoss: 0.579526\nTrain Epoch: 8 [192/2160 (9%)]\tLoss: 0.536440\nTrain Epoch: 8 [352/2160 (16%)]\tLoss: 0.524517\nTrain Epoch: 8 [512/2160 (24%)]\tLoss: 0.531187\nTrain Epoch: 8 [672/2160 (31%)]\tLoss: 0.523053\nTrain Epoch: 8 [832/2160 (39%)]\tLoss: 0.534029\nTrain Epoch: 8 [992/2160 (46%)]\tLoss: 0.540785\nTrain Epoch: 8 [1152/2160 (53%)]\tLoss: 0.564756\nTrain Epoch: 8 [1312/2160 (61%)]\tLoss: 0.575440\nTrain Epoch: 8 [1472/2160 (68%)]\tLoss: 0.580846\nTrain Epoch: 8 [1632/2160 (76%)]\tLoss: 0.585792\nTrain Epoch: 8 [1792/2160 (83%)]\tLoss: 0.583858\nTrain Epoch: 8 [1952/2160 (90%)]\tLoss: 0.586461\nTrain Epoch: 8 [2112/2160 (98%)]\tLoss: 0.586144\n\nValidation set: Average loss: 0.6576, AUC: (67.3%)\n\nSaving state\nElapsed seconds: (500s)\nTrain Epoch: 9 [32/2160 (1%)]\tLoss: 0.486425\nTrain Epoch: 9 [192/2160 (9%)]\tLoss: 0.555339\nTrain Epoch: 9 [352/2160 (16%)]\tLoss: 0.513918\nTrain Epoch: 9 [512/2160 (24%)]\tLoss: 0.511796\nTrain Epoch: 9 [672/2160 (31%)]\tLoss: 0.522759\nTrain Epoch: 9 [832/2160 (39%)]\tLoss: 0.519817\nTrain Epoch: 9 [992/2160 (46%)]\tLoss: 0.530947\nTrain Epoch: 9 [1152/2160 (53%)]\tLoss: 0.522198\nTrain Epoch: 9 [1312/2160 (61%)]\tLoss: 0.517988\nTrain Epoch: 9 [1472/2160 (68%)]\tLoss: 0.524123\nTrain Epoch: 9 [1632/2160 (76%)]\tLoss: 0.517983\nTrain Epoch: 9 [1792/2160 (83%)]\tLoss: 0.516840\nTrain Epoch: 9 [1952/2160 (90%)]\tLoss: 0.518370\nTrain Epoch: 9 [2112/2160 (98%)]\tLoss: 0.525810\n\nValidation set: Average loss: 0.7156, AUC: (63.5%)\n\nAUC was not improved, iteration 1\nElapsed seconds: (562s)\nTrain Epoch: 10 [32/2160 (1%)]\tLoss: 0.689886\nTrain Epoch: 10 [192/2160 (9%)]\tLoss: 0.595654\nTrain Epoch: 10 [352/2160 (16%)]\tLoss: 0.566257\nTrain Epoch: 10 [512/2160 (24%)]\tLoss: 0.563074\nTrain Epoch: 10 [672/2160 (31%)]\tLoss: 0.552662\nTrain Epoch: 10 [832/2160 (39%)]\tLoss: 0.542966\nTrain Epoch: 10 [992/2160 (46%)]\tLoss: 0.538365\nTrain Epoch: 10 [1152/2160 (53%)]\tLoss: 0.530672\nTrain Epoch: 10 [1312/2160 (61%)]\tLoss: 0.518773\nTrain Epoch: 10 [1472/2160 (68%)]\tLoss: 0.509750\nTrain Epoch: 10 [1632/2160 (76%)]\tLoss: 0.510615\nTrain Epoch: 10 [1792/2160 (83%)]\tLoss: 0.505780\nTrain Epoch: 10 [1952/2160 (90%)]\tLoss: 0.501523\nTrain Epoch: 10 [2112/2160 (98%)]\tLoss: 0.493545\n\nValidation set: Average loss: 0.9092, AUC: (66.4%)\n\nAUC was not improved, iteration 2\nElapsed seconds: (625s)\nTrain Epoch: 11 [32/2160 (1%)]\tLoss: 0.318452\nTrain Epoch: 11 [192/2160 (9%)]\tLoss: 0.380606\nTrain Epoch: 11 [352/2160 (16%)]\tLoss: 0.401958\nTrain Epoch: 11 [512/2160 (24%)]\tLoss: 0.410194\nTrain Epoch: 11 [672/2160 (31%)]\tLoss: 0.423455\nTrain Epoch: 11 [832/2160 (39%)]\tLoss: 0.426671\nTrain Epoch: 11 [992/2160 (46%)]\tLoss: 0.427439\nTrain Epoch: 11 [1152/2160 (53%)]\tLoss: 0.431935\nTrain Epoch: 11 [1312/2160 (61%)]\tLoss: 0.433284\nTrain Epoch: 11 [1472/2160 (68%)]\tLoss: 0.432811\nTrain Epoch: 11 [1632/2160 (76%)]\tLoss: 0.428141\nTrain Epoch: 11 [1792/2160 (83%)]\tLoss: 0.422615\nTrain Epoch: 11 [1952/2160 (90%)]\tLoss: 0.425415\nTrain Epoch: 11 [2112/2160 (98%)]\tLoss: 0.425537\n\nValidation set: Average loss: 1.3312, AUC: (39.1%)\n\nAUC was not improved, iteration 3\nElapsed seconds: (686s)\nTrain Epoch: 12 [32/2160 (1%)]\tLoss: 0.347860\nTrain Epoch: 12 [192/2160 (9%)]\tLoss: 0.369722\nTrain Epoch: 12 [352/2160 (16%)]\tLoss: 0.349409\nTrain Epoch: 12 [512/2160 (24%)]\tLoss: 0.350681\nTrain Epoch: 12 [672/2160 (31%)]\tLoss: 0.363364\nTrain Epoch: 12 [832/2160 (39%)]\tLoss: 0.368395\nTrain Epoch: 12 [992/2160 (46%)]\tLoss: 0.378292\nTrain Epoch: 12 [1152/2160 (53%)]\tLoss: 0.398189\nTrain Epoch: 12 [1312/2160 (61%)]\tLoss: 0.396637\nTrain Epoch: 12 [1472/2160 (68%)]\tLoss: 0.394447\nTrain Epoch: 12 [1632/2160 (76%)]\tLoss: 0.389685\nTrain Epoch: 12 [1792/2160 (83%)]\tLoss: 0.383875\nTrain Epoch: 12 [1952/2160 (90%)]\tLoss: 0.379776\nTrain Epoch: 12 [2112/2160 (98%)]\tLoss: 0.372151\n\nValidation set: Average loss: 1.1057, AUC: (66.5%)\n\nAUC was not improved, iteration 4\nElapsed seconds: (747s)\nTrain Epoch: 13 [32/2160 (1%)]\tLoss: 0.287454\nTrain Epoch: 13 [192/2160 (9%)]\tLoss: 0.325877\nTrain Epoch: 13 [352/2160 (16%)]\tLoss: 0.367565\nTrain Epoch: 13 [512/2160 (24%)]\tLoss: 0.336093\nTrain Epoch: 13 [672/2160 (31%)]\tLoss: 0.332688\nTrain Epoch: 13 [832/2160 (39%)]\tLoss: 0.314812\nTrain Epoch: 13 [992/2160 (46%)]\tLoss: 0.304224\nTrain Epoch: 13 [1152/2160 (53%)]\tLoss: 0.296127\nTrain Epoch: 13 [1312/2160 (61%)]\tLoss: 0.288650\nTrain Epoch: 13 [1472/2160 (68%)]\tLoss: 0.290329\nTrain Epoch: 13 [1632/2160 (76%)]\tLoss: 0.285095\nTrain Epoch: 13 [1792/2160 (83%)]\tLoss: 0.290124\nTrain Epoch: 13 [1952/2160 (90%)]\tLoss: 0.289550\nTrain Epoch: 13 [2112/2160 (98%)]\tLoss: 0.296263\n\nValidation set: Average loss: 1.5524, AUC: (45.8%)\n\nAUC was not improved, iteration 5\nElapsed seconds: (809s)\n" ] ], [ [ "## Test Model", "_____no_output_____" ] ], [ [ "test_dataset = Loader(args.test_path, window_size=args.window_size, window_stride=args.window_stride,\n window_type=args.window_type, normalize=args.normalize)\ntest_loader = torch.utils.data.DataLoader(\n test_dataset, batch_size=args.test_batch_size, shuffle=None,\n num_workers=args.num_workers, pin_memory=args.cuda, sampler=None)\n\n# get best epoch\nstate = torch.load('./{}/ckpt.pt'.format(args.checkpoint))\nepoch = state['epoch']\nprint(\"Testing model (epoch {})\".format(epoch))\nmodel.load_state_dict(torch.load('./{}/model{:03d}.pt'.format(args.checkpoint, epoch)))\nif args.cuda:\n model.cuda()\n\nresults = 'submission.csv'\nprint(\"Saving results in {}\".format(results))\ntest(test_loader, model, criterion, args.cuda, save=results)", "Testing model (epoch 8)\nSaving results in submission.csv\n" ] ] ]
[ "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
cb6487833120a0b04f07c3360b084dd5a1cb9b34
26,371
ipynb
Jupyter Notebook
doc/Programs/JupyterFiles/Examples/Intro to ML Examples/Flowers.ipynb
mortele/MachineLearning
86eeaed5c7f31ab0f37d451aaf5a5c311ffb7f19
[ "CC0-1.0" ]
1
2020-08-24T18:42:36.000Z
2020-08-24T18:42:36.000Z
doc/Programs/JupyterFiles/Examples/Intro to ML Examples/Flowers.ipynb
jingsun8803/MachineLearning
6c1d550402ee0ea620d8ddac1afdde4deaabba17
[ "CC0-1.0" ]
null
null
null
doc/Programs/JupyterFiles/Examples/Intro to ML Examples/Flowers.ipynb
jingsun8803/MachineLearning
6c1d550402ee0ea620d8ddac1afdde4deaabba17
[ "CC0-1.0" ]
1
2021-09-04T16:21:16.000Z
2021-09-04T16:21:16.000Z
133.186869
19,966
0.853817
[ [ [ "import mglearn\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport IPython\nimport sklearn\nfrom sklearn.datasets import load_iris\niris=load_iris()\n\nprint (iris['target'])\n\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test=train_test_split(iris['data'], iris['target'], random_state=0)\n\nfrom sklearn.neighbors import KNeighborsClassifier\nknn=KNeighborsClassifier(n_neighbors=1)\n\nprint (knn.fit(X_train, y_train))\nX_new=np.array([[4.9,2.9,1,0.2]])\nX_new.shape\n\nX_new2=np.array([[5,3,2,0.5]])\nX_new2.shape\nprediction=knn.predict(X_new2)\nprediction2=knn.predict(X_new)\n\nprint (\"Iris 1 is a \" +str(iris['target_names'][prediction]))\nprint (\"Iris 2 is a \" +str(iris['target_names'][prediction2]))\n\ny_pred=knn.predict(X_test)\nnp.mean(y_pred==y_test)\nprint(\"-----------------LASSO---------------------\")\nfrom sklearn.linear_model import Lasso\nlasso = Lasso().fit(X_train, y_train)\nprint(\"training set score: %f\" % lasso.score(X_train, y_train))\nprint(\"test set score: %f\" % lasso.score(X_test, y_test))\nprint(\"number of features used: %d\" % np.sum(lasso.coef_ != 3))\n\n\nprint (\"---------------SVC AND LOGISTIC REGRESSION-----------\")\n\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.svm import LinearSVC\n\nX, y=mglearn.datasets.make_forge()\n\nfig, axes= plt.subplots(1,2,figsize=(10,3))\n\nfor model, ax in zip([LinearSVC(), LogisticRegression()], axes):\n clf = model.fit(X, y)\n mglearn.plots.plot_2d_separator(clf, X, fill=False, eps=0.5, ax=ax, alpha=.7)\n ax.scatter(X[:, 0], X[:, 1], c=y, s=60, cmap=mglearn.cm2)\n ax.set_title(\"%s\" % clf.__class__.__name__)\nplt.show()\n\nprint (\"----------Multi-Class Uncertainty-------\")\nfrom sklearn.ensemble import GradientBoostingClassifier\ngbrt = GradientBoostingClassifier(learning_rate=0.01, random_state=0)\ngbrt.fit(X_train, y_train)\nGradientBoostingClassifier(init=None, learning_rate=0.01, loss='deviance',\n max_depth=3, max_features=None, max_leaf_nodes=None,\n min_samples_leaf=1, min_samples_split=2,\n min_weight_fraction_leaf=0.0, n_estimators=100,\n presort='auto', random_state=0, subsample=1.0, verbose=0,\n warm_start=False)\nprint(gbrt.decision_function(X_test).shape)\n# plot the first few entries of the decision function\nprint(gbrt.decision_function(X_test)[:6, :])\nprint(np.argmax(gbrt.decision_function(X_test), axis=1))\nprint(gbrt.predict(X_test))\n# show the first few entries of predict_proba\nprint(gbrt.predict_proba(X_test)[:6])", "[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1\n 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 2 2 2 2 2 2 2 2 2 2 2\n 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2\n 2 2]\nKNeighborsClassifier(algorithm='auto', leaf_size=30, metric='minkowski',\n metric_params=None, n_jobs=1, n_neighbors=1, p=2,\n weights='uniform')\nIris 1 is a ['setosa']\nIris 2 is a ['setosa']\n-----------------LASSO---------------------\ntraining set score: 0.464937\ntest set score: 0.430838\nnumber of features used: 4\n---------------SVC AND LOGISTIC REGRESSION-----------\n" ] ] ]
[ "code" ]
[ [ "code" ] ]
cb648b0909945a231d8e78c3bdcf387078bcfc92
131,111
ipynb
Jupyter Notebook
boston_housing.ipynb
guptashrey/Udacity-Predicting-Boston-Housing-Prices
e7b24740ab85fa9700436794999bfd0c8f228bce
[ "Apache-2.0" ]
null
null
null
boston_housing.ipynb
guptashrey/Udacity-Predicting-Boston-Housing-Prices
e7b24740ab85fa9700436794999bfd0c8f228bce
[ "Apache-2.0" ]
null
null
null
boston_housing.ipynb
guptashrey/Udacity-Predicting-Boston-Housing-Prices
e7b24740ab85fa9700436794999bfd0c8f228bce
[ "Apache-2.0" ]
null
null
null
158.346618
56,260
0.86815
[ [ [ "# Machine Learning Engineer Nanodegree\n## Model Evaluation & Validation\n## Project: Predicting Boston Housing Prices\n\nWelcome to the first project of the Machine Learning Engineer Nanodegree! In this notebook, some template code has already been provided for you, and you will need to implement additional functionality to successfully complete this project. You will not need to modify the included code beyond what is requested. Sections that begin with **'Implementation'** in the header indicate that the following block of code will require additional functionality which you must provide. Instructions will be provided for each section and the specifics of the implementation are marked in the code block with a 'TODO' statement. Please be sure to read the instructions carefully!\n\nIn addition to implementing code, there will be questions that you must answer which relate to the project and your implementation. Each section where you will answer a question is preceded by a **'Question X'** header. Carefully read each question and provide thorough answers in the following text boxes that begin with **'Answer:'**. Your project submission will be evaluated based on your answers to each of the questions and the implementation you provide. \n\n>**Note:** Code and Markdown cells can be executed using the **Shift + Enter** keyboard shortcut. In addition, Markdown cells can be edited by typically double-clicking the cell to enter edit mode.", "_____no_output_____" ], [ "## Getting Started\nIn this project, you will evaluate the performance and predictive power of a model that has been trained and tested on data collected from homes in suburbs of Boston, Massachusetts. A model trained on this data that is seen as a *good fit* could then be used to make certain predictions about a home — in particular, its monetary value. This model would prove to be invaluable for someone like a real estate agent who could make use of such information on a daily basis.\n\nThe dataset for this project originates from the [UCI Machine Learning Repository](https://archive.ics.uci.edu/ml/datasets/Housing). The Boston housing data was collected in 1978 and each of the 506 entries represent aggregated data about 14 features for homes from various suburbs in Boston, Massachusetts. For the purposes of this project, the following preprocessing steps have been made to the dataset:\n- 16 data points have an `'MEDV'` value of 50.0. These data points likely contain **missing or censored values** and have been removed.\n- 1 data point has an `'RM'` value of 8.78. This data point can be considered an **outlier** and has been removed.\n- The features `'RM'`, `'LSTAT'`, `'PTRATIO'`, and `'MEDV'` are essential. The remaining **non-relevant features** have been excluded.\n- The feature `'MEDV'` has been **multiplicatively scaled** to account for 35 years of market inflation.\n\nRun the code cell below to load the Boston housing dataset, along with a few of the necessary Python libraries required for this project. You will know the dataset loaded successfully if the size of the dataset is reported.", "_____no_output_____" ] ], [ [ "# Import libraries necessary for this project\nimport numpy as np\nimport pandas as pd\nfrom sklearn.cross_validation import ShuffleSplit\n\n# Import supplementary visualizations code visuals.py\nimport visuals as vs\n\n# Pretty display for notebooks\n%matplotlib inline\n\n# Load the Boston housing dataset\ndata = pd.read_csv('housing.csv')\nprices = data['MEDV']\nfeatures = data.drop('MEDV', axis = 1)\n \n# Success\nprint \"Boston housing dataset has {} data points with {} variables each.\".format(*data.shape)", "Boston housing dataset has 489 data points with 4 variables each.\n" ] ], [ [ "## Data Exploration\nIn this first section of this project, you will make a cursory investigation about the Boston housing data and provide your observations. Familiarizing yourself with the data through an explorative process is a fundamental practice to help you better understand and justify your results.\n\nSince the main goal of this project is to construct a working model which has the capability of predicting the value of houses, we will need to separate the dataset into **features** and the **target variable**. The **features**, `'RM'`, `'LSTAT'`, and `'PTRATIO'`, give us quantitative information about each data point. The **target variable**, `'MEDV'`, will be the variable we seek to predict. These are stored in `features` and `prices`, respectively.", "_____no_output_____" ], [ "### Implementation: Calculate Statistics\nFor your very first coding implementation, you will calculate descriptive statistics about the Boston housing prices. Since `numpy` has already been imported for you, use this library to perform the necessary calculations. These statistics will be extremely important later on to analyze various prediction results from the constructed model.\n\nIn the code cell below, you will need to implement the following:\n- Calculate the minimum, maximum, mean, median, and standard deviation of `'MEDV'`, which is stored in `prices`.\n - Store each calculation in their respective variable.", "_____no_output_____" ] ], [ [ "# TODO: Minimum price of the data\nminimum_price = np.min(prices)\n\n# TODO: Maximum price of the data\nmaximum_price = np.max(prices)\n\n# TODO: Mean price of the data\nmean_price = np.mean(prices)\n\n# TODO: Median price of the data\nmedian_price = np.median(prices)\n\n# TODO: Standard deviation of prices of the data\nstd_price = np.std(prices)\n\n# Show the calculated statistics\nprint \"Statistics for Boston housing dataset:\\n\"\nprint \"Minimum price: ${:,.2f}\".format(minimum_price)\nprint \"Maximum price: ${:,.2f}\".format(maximum_price)\nprint \"Mean price: ${:,.2f}\".format(mean_price)\nprint \"Median price ${:,.2f}\".format(median_price)\nprint \"Standard deviation of prices: ${:,.2f}\".format(std_price)", "Statistics for Boston housing dataset:\n\nMinimum price: $105,000.00\nMaximum price: $1,024,800.00\nMean price: $454,342.94\nMedian price $438,900.00\nStandard deviation of prices: $165,171.13\n" ] ], [ [ "### Question 1 - Feature Observation\nAs a reminder, we are using three features from the Boston housing dataset: `'RM'`, `'LSTAT'`, and `'PTRATIO'`. For each data point (neighborhood):\n- `'RM'` is the average number of rooms among homes in the neighborhood.\n- `'LSTAT'` is the percentage of homeowners in the neighborhood considered \"lower class\" (working poor).\n- `'PTRATIO'` is the ratio of students to teachers in primary and secondary schools in the neighborhood.\n\n\n** Using your intuition, for each of the three features above, do you think that an increase in the value of that feature would lead to an **increase** in the value of `'MEDV'` or a **decrease** in the value of `'MEDV'`? Justify your answer for each.**\n\n**Hint:** This problem can phrased using examples like below. \n* Would you expect a home that has an `'RM'` value(number of rooms) of 6 be worth more or less than a home that has an `'RM'` value of 7?\n* Would you expect a neighborhood that has an `'LSTAT'` value(percent of lower class workers) of 15 have home prices be worth more or less than a neighborhood that has an `'LSTAT'` value of 20?\n* Would you expect a neighborhood that has an `'PTRATIO'` value(ratio of students to teachers) of 10 have home prices be worth more or less than a neighborhood that has an `'PTRATIO'` value of 15?", "_____no_output_____" ], [ "**Answer: **\n\nThe values of these features will effect the value of MEDV as follows:\n* RM: According to me, as the number of rooms increase, it means a larger house. As a result, it will have a greater price. \n* LSTAT: I expect that greater the percentage of home owners of \"lower class\", lesser the price of the houses in the neighbourhood. \n* PTRATIO: High ratios of students/teachers mean that there are only few schools compared to the number of people living in the neighborhood. Therefore, this educational situation indicates an underdeveloped and less expensive neighborhood.", "_____no_output_____" ], [ "----\n\n## Developing a Model\nIn this second section of the project, you will develop the tools and techniques necessary for a model to make a prediction. Being able to make accurate evaluations of each model's performance through the use of these tools and techniques helps to greatly reinforce the confidence in your predictions.", "_____no_output_____" ], [ "### Implementation: Define a Performance Metric\nIt is difficult to measure the quality of a given model without quantifying its performance over training and testing. This is typically done using some type of performance metric, whether it is through calculating some type of error, the goodness of fit, or some other useful measurement. For this project, you will be calculating the [*coefficient of determination*](http://stattrek.com/statistics/dictionary.aspx?definition=coefficient_of_determination), R<sup>2</sup>, to quantify your model's performance. The coefficient of determination for a model is a useful statistic in regression analysis, as it often describes how \"good\" that model is at making predictions. \n\nThe values for R<sup>2</sup> range from 0 to 1, which captures the percentage of squared correlation between the predicted and actual values of the **target variable**. A model with an R<sup>2</sup> of 0 is no better than a model that always predicts the *mean* of the target variable, whereas a model with an R<sup>2</sup> of 1 perfectly predicts the target variable. Any value between 0 and 1 indicates what percentage of the target variable, using this model, can be explained by the **features**. _A model can be given a negative R<sup>2</sup> as well, which indicates that the model is **arbitrarily worse** than one that always predicts the mean of the target variable._\n\nFor the `performance_metric` function in the code cell below, you will need to implement the following:\n- Use `r2_score` from `sklearn.metrics` to perform a performance calculation between `y_true` and `y_predict`.\n- Assign the performance score to the `score` variable.", "_____no_output_____" ] ], [ [ "# TODO: Import 'r2_score'\nfrom sklearn.metrics import r2_score\n\ndef performance_metric(y_true, y_predict):\n \"\"\" Calculates and returns the performance score between \n true and predicted values based on the metric chosen. \"\"\"\n \n # TODO: Calculate the performance score between 'y_true' and 'y_predict'\n score = r2_score(y_true, y_predict)\n \n # Return the score\n return score", "_____no_output_____" ] ], [ [ "### Question 2 - Goodness of Fit\nAssume that a dataset contains five data points and a model made the following predictions for the target variable:\n\n| True Value | Prediction |\n| :-------------: | :--------: |\n| 3.0 | 2.5 |\n| -0.5 | 0.0 |\n| 2.0 | 2.1 |\n| 7.0 | 7.8 |\n| 4.2 | 5.3 |\n\nRun the code cell below to use the `performance_metric` function and calculate this model's coefficient of determination.", "_____no_output_____" ] ], [ [ "# Calculate the performance of this model\nscore = performance_metric([3, -0.5, 2, 7, 4.2], [2.5, 0.0, 2.1, 7.8, 5.3])\nprint \"Model has a coefficient of determination, R^2, of {:.3f}.\".format(score)", "Model has a coefficient of determination, R^2, of 0.923.\n" ] ], [ [ "* Would you consider this model to have successfully captured the variation of the target variable? \n* Why or why not?\n\n** Hint: ** The R2 score is the proportion of the variance in the dependent variable that is predictable from the independent variable. In other words:\n* R2 score of 0 means that the dependent variable cannot be predicted from the independent variable.\n* R2 score of 1 means the dependent variable can be predicted from the independent variable.\n* R2 score between 0 and 1 indicates the extent to which the dependent variable is predictable. \n* R2 score of 0.40 means that 40 percent of the variance in Y is predictable from X.", "_____no_output_____" ], [ "**Answer:**\n\nThe model has a coefficient of determination, R^2, of 0.923. Therefore, I would consider the model to have successfully captured the variation of the target variable. With that score, 92.3% of the variance in Y is predictable from X.", "_____no_output_____" ], [ "### Implementation: Shuffle and Split Data\nYour next implementation requires that you take the Boston housing dataset and split the data into training and testing subsets. Typically, the data is also shuffled into a random order when creating the training and testing subsets to remove any bias in the ordering of the dataset.\n\nFor the code cell below, you will need to implement the following:\n- Use `train_test_split` from `sklearn.cross_validation` to shuffle and split the `features` and `prices` data into training and testing sets.\n - Split the data into 80% training and 20% testing.\n - Set the `random_state` for `train_test_split` to a value of your choice. This ensures results are consistent.\n- Assign the train and testing splits to `X_train`, `X_test`, `y_train`, and `y_test`.", "_____no_output_____" ] ], [ [ "# TODO: Import 'train_test_split'\nfrom sklearn.cross_validation import train_test_split\n\n# TODO: Shuffle and split the data into training and testing subsets\nX = features\ny = prices\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.20, random_state = 1)\n\n# Success\nprint \"Training and testing split was successful.\"", "Training and testing split was successful.\n" ] ], [ [ "### Question 3 - Training and Testing\n\n* What is the benefit to splitting a dataset into some ratio of training and testing subsets for a learning algorithm?\n\n**Hint:** Think about how overfitting or underfitting is contingent upon how splits on data is done.", "_____no_output_____" ], [ "**Answer: **\n\nThe benefit of splitting of data into training and testing sets is that we can measure the accuracy of our model i.e how well it performs on unseen data. The testing data set works as a set of unseen data. The objective is to have the model perform on any data with the highest accuracy. Only when the model has been trained well and tested thoroughly, it is used for actual prediction applications. We should not use the same data for testing and training because that would make the model biased towards the test set. It wouldn't then show if overfitting occurred or not and how well generalized our model is.", "_____no_output_____" ], [ "----\n\n## Analyzing Model Performance\nIn this third section of the project, you'll take a look at several models' learning and testing performances on various subsets of training data. Additionally, you'll investigate one particular algorithm with an increasing `'max_depth'` parameter on the full training set to observe how model complexity affects performance. Graphing your model's performance based on varying criteria can be beneficial in the analysis process, such as visualizing behavior that may not have been apparent from the results alone.", "_____no_output_____" ], [ "### Learning Curves\nThe following code cell produces four graphs for a decision tree model with different maximum depths. Each graph visualizes the learning curves of the model for both training and testing as the size of the training set is increased. Note that the shaded region of a learning curve denotes the uncertainty of that curve (measured as the standard deviation). The model is scored on both the training and testing sets using R<sup>2</sup>, the coefficient of determination. \n\nRun the code cell below and use these graphs to answer the following question.", "_____no_output_____" ] ], [ [ "# Produce learning curves for varying training set sizes and maximum depths\nvs.ModelLearning(features, prices)", "_____no_output_____" ] ], [ [ "### Question 4 - Learning the Data\n* Choose one of the graphs above and state the maximum depth for the model. \n* What happens to the score of the training curve as more training points are added? What about the testing curve? \n* Would having more training points benefit the model? \n\n**Hint:** Are the learning curves converging to particular scores? Generally speaking, the more data you have, the better. But if your training and testing curves are converging with a score above your benchmark threshold, would this be necessary?\nThink about the pros and cons of adding more training points based on if the training and testing curves are converging.", "_____no_output_____" ], [ "**Answer: **\n\nI have chosen the first graph, with a max_depth = 1.\n- The training score decreases significantly between 0 and 50 training points, then gradually continues to decrease by small increments until about 300, where it levels off.\n\n- The testing score increases significantly between 0 and 50 training points, then gradually continues to increase (with a couple small increase points). It levels off around 300 points as well, which is the closest point to the training score.\n\n- More training points do not appear to benefit the model after 300. Adding more points shows stabilization of the data with slight fluctuations around 0.42.", "_____no_output_____" ], [ "### Complexity Curves\nThe following code cell produces a graph for a decision tree model that has been trained and validated on the training data using different maximum depths. The graph produces two complexity curves — one for training and one for validation. Similar to the **learning curves**, the shaded regions of both the complexity curves denote the uncertainty in those curves, and the model is scored on both the training and validation sets using the `performance_metric` function. \n\n** Run the code cell below and use this graph to answer the following two questions Q5 and Q6. **", "_____no_output_____" ] ], [ [ "vs.ModelComplexity(X_train, y_train)", "_____no_output_____" ] ], [ [ "### Question 5 - Bias-Variance Tradeoff\n* When the model is trained with a maximum depth of 1, does the model suffer from high bias or from high variance? \n* How about when the model is trained with a maximum depth of 10? What visual cues in the graph justify your conclusions?\n\n**Hint:** High bias is a sign of underfitting(model is not complex enough to pick up the nuances in the data) and high variance is a sign of overfitting(model is by-hearting the data and cannot generalize well). Think about which model(depth 1 or 10) aligns with which part of the tradeoff.", "_____no_output_____" ], [ "**Answer: **\n\nWhen the model is trained with a maximum depth of 1, it suffers from high bias. Both training and validation scores are low, and the model is not complex enough.\n\nWhen the model is trained with a maximum depth of 10, it suffers from high variance. There is a significant gap between the training score and valudation score.", "_____no_output_____" ], [ "### Question 6 - Best-Guess Optimal Model\n* Which maximum depth do you think results in a model that best generalizes to unseen data? \n* What intuition lead you to this answer?\n\n** Hint: ** Look at the graph above Question 5 and see where the validation scores lie for the various depths that have been assigned to the model. Does it get better with increased depth? At what point do we get our best validation score without overcomplicating our model? And remember, Occams Razor states \"Among competing hypotheses, the one with the fewest assumptions should be selected.\"", "_____no_output_____" ], [ "**Answer: **\n\nThe model that best generalizes to unseen data appears to be one with max_depth = 4, since the training and validation scores are both high and close together. After this point, they diverge significantly and validation score goes down.", "_____no_output_____" ], [ "-----\n\n## Evaluating Model Performance\nIn this final section of the project, you will construct a model and make a prediction on the client's feature set using an optimized model from `fit_model`.", "_____no_output_____" ], [ "### Question 7 - Grid Search\n* What is the grid search technique?\n* How it can be applied to optimize a learning algorithm?\n\n** Hint: ** When explaining the Grid Search technique, be sure to touch upon why it is used, what the 'grid' entails and what the end goal of this method is. To solidify your answer, you can also give an example of a parameter in a model that can be optimized using this approach.", "_____no_output_____" ], [ "**Answer: **\n\nGrid search is a hyperparameter optimization technique that is scored against a performance metric. It is an algorithm with the help of which we can find the best hyper-parameter combinations of a ML model. The grid consists of the hyper-parameters to tune along one axis and the values for those hyper-parameters along the opposite axis. Grid Search will exhaustively search for all the possible parameter combinations and weigh them against a specified performance metric.\n\nIt can be used to better classify parameters which will yield a better score and eleminate the parameters which have less or no effect thus, reducing the time taken for guessing the good parameters.", "_____no_output_____" ], [ "### Question 8 - Cross-Validation\n\n* What is the k-fold cross-validation training technique? \n\n* What benefit does this technique provide for grid search when optimizing a model?\n\n**Hint:** When explaining the k-fold cross validation technique, be sure to touch upon what 'k' is, how the dataset is split into different parts for training and testing and the number of times it is run based on the 'k' value.\n\nWhen thinking about how k-fold cross validation helps grid search, think about the main drawbacks of grid search which are hinged upon **using a particular subset of data for training or testing** and how k-fold cv could help alleviate that. You can refer to the [docs](http://scikit-learn.org/stable/modules/cross_validation.html#cross-validation) for your answer.", "_____no_output_____" ], [ "**Answer: **\n\nK-fold cross-validation helps prevent overfitting and helps maximize data usage if the dataset is small. It splits the training set into k smaller subsets of data, then trains a model (this is the training data) using k-1 of the folds, or subsets.The process eliminates the need for a validation set of data, since the training set it also used for validation. Then, the result can be validated against the remaining data, which is used as a test set.", "_____no_output_____" ], [ "### Implementation: Fitting a Model\nYour final implementation requires that you bring everything together and train a model using the **decision tree algorithm**. To ensure that you are producing an optimized model, you will train the model using the grid search technique to optimize the `'max_depth'` parameter for the decision tree. The `'max_depth'` parameter can be thought of as how many questions the decision tree algorithm is allowed to ask about the data before making a prediction. Decision trees are part of a class of algorithms called *supervised learning algorithms*.\n\nIn addition, you will find your implementation is using `ShuffleSplit()` for an alternative form of cross-validation (see the `'cv_sets'` variable). While it is not the K-Fold cross-validation technique you describe in **Question 8**, this type of cross-validation technique is just as useful!. The `ShuffleSplit()` implementation below will create 10 (`'n_splits'`) shuffled sets, and for each shuffle, 20% (`'test_size'`) of the data will be used as the *validation set*. While you're working on your implementation, think about the contrasts and similarities it has to the K-fold cross-validation technique.\n\nPlease note that ShuffleSplit has different parameters in scikit-learn versions 0.17 and 0.18.\nFor the `fit_model` function in the code cell below, you will need to implement the following:\n- Use [`DecisionTreeRegressor`](http://scikit-learn.org/stable/modules/generated/sklearn.tree.DecisionTreeRegressor.html) from `sklearn.tree` to create a decision tree regressor object.\n - Assign this object to the `'regressor'` variable.\n- Create a dictionary for `'max_depth'` with the values from 1 to 10, and assign this to the `'params'` variable.\n- Use [`make_scorer`](http://scikit-learn.org/stable/modules/generated/sklearn.metrics.make_scorer.html) from `sklearn.metrics` to create a scoring function object.\n - Pass the `performance_metric` function as a parameter to the object.\n - Assign this scoring function to the `'scoring_fnc'` variable.\n- Use [`GridSearchCV`](http://scikit-learn.org/stable/modules/generated/sklearn.model_selection.GridSearchCV.html) from `sklearn.grid_search` to create a grid search object.\n - Pass the variables `'regressor'`, `'params'`, `'scoring_fnc'`, and `'cv_sets'` as parameters to the object. \n - Assign the `GridSearchCV` object to the `'grid'` variable.", "_____no_output_____" ] ], [ [ "# TODO: Import 'make_scorer', 'DecisionTreeRegressor', and 'GridSearchCV'\nfrom sklearn.tree import DecisionTreeRegressor\nfrom sklearn.metrics import make_scorer \nfrom sklearn.grid_search import GridSearchCV\n\ndef fit_model(X, y):\n \"\"\" Performs grid search over the 'max_depth' parameter for a \n decision tree regressor trained on the input data [X, y]. \"\"\"\n \n # Create cross-validation sets from the training data\n # sklearn version 0.18: ShuffleSplit(n_splits=10, test_size=0.1, train_size=None, random_state=None)\n # sklearn versiin 0.17: ShuffleSplit(n, n_iter=10, test_size=0.1, train_size=None, random_state=None)\n cv_sets = ShuffleSplit(X.shape[0], n_iter = 10, test_size = 0.20, random_state = 0)\n\n # TODO: Create a decision tree regressor object\n regressor = DecisionTreeRegressor()\n\n # TODO: Create a dictionary for the parameter 'max_depth' with a range from 1 to 10\n params = {'max_depth': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]}\n\n # TODO: Transform 'performance_metric' into a scoring function using 'make_scorer' \n scoring_fnc = make_scorer(performance_metric)\n\n # TODO: Create the grid search cv object --> GridSearchCV()\n # Make sure to include the right parameters in the object:\n # (estimator, param_grid, scoring, cv) which have values 'regressor', 'params', 'scoring_fnc', and 'cv_sets' respectively.\n grid = GridSearchCV(estimator = regressor, param_grid = params, scoring = scoring_fnc, cv = cv_sets)\n\n # Fit the grid search object to the data to compute the optimal model\n grid = grid.fit(X, y)\n\n # Return the optimal model after fitting the data\n return grid.best_estimator_", "/home/shreygupta/anaconda3/envs/ipykernel_py2/lib/python2.7/site-packages/sklearn/grid_search.py:42: DeprecationWarning: This module was deprecated in version 0.18 in favor of the model_selection module into which all the refactored classes and functions are moved. This module will be removed in 0.20.\n DeprecationWarning)\n" ] ], [ [ "### Making Predictions\nOnce a model has been trained on a given set of data, it can now be used to make predictions on new sets of input data. In the case of a *decision tree regressor*, the model has learned *what the best questions to ask about the input data are*, and can respond with a prediction for the **target variable**. You can use these predictions to gain information about data where the value of the target variable is unknown — such as data the model was not trained on.", "_____no_output_____" ], [ "### Question 9 - Optimal Model\n\n* What maximum depth does the optimal model have? How does this result compare to your guess in **Question 6**? \n\nRun the code block below to fit the decision tree regressor to the training data and produce an optimal model.", "_____no_output_____" ] ], [ [ "# Fit the training data to the model using grid search\nreg = fit_model(X_train, y_train)\n\n# Produce the value for 'max_depth'\nprint \"Parameter 'max_depth' is {} for the optimal model.\".format(reg.get_params()['max_depth'])", "Parameter 'max_depth' is 5 for the optimal model.\n" ] ], [ [ "** Hint: ** The answer comes from the output of the code snipped above.\n\n**Answer: **\n\nThe optimal model has a maximum depth of 5.\n\nIn question 6, I guessed a max_depth of 4 where as the optimal value comes out to be 5. \nThis is completely okay as according to the graph in question 6, considering the value as 5, we get a good score and the training & testing scores tend to converge there too.", "_____no_output_____" ], [ "### Question 10 - Predicting Selling Prices\nImagine that you were a real estate agent in the Boston area looking to use this model to help price homes owned by your clients that they wish to sell. You have collected the following information from three of your clients:\n\n| Feature | Client 1 | Client 2 | Client 3 |\n| :---: | :---: | :---: | :---: |\n| Total number of rooms in home | 5 rooms | 4 rooms | 8 rooms |\n| Neighborhood poverty level (as %) | 17% | 32% | 3% |\n| Student-teacher ratio of nearby schools | 15-to-1 | 22-to-1 | 12-to-1 |\n\n* What price would you recommend each client sell his/her home at? \n* Do these prices seem reasonable given the values for the respective features? \n\n**Hint:** Use the statistics you calculated in the **Data Exploration** section to help justify your response. Of the three clients, client 3 has has the biggest house, in the best public school neighborhood with the lowest poverty level; while client 2 has the smallest house, in a neighborhood with a relatively high poverty rate and not the best public schools.\n\nRun the code block below to have your optimized model make predictions for each client's home.", "_____no_output_____" ] ], [ [ "# Produce a matrix for client data\nclient_data = [[5, 17, 15], # Client 1\n [4, 32, 22], # Client 2\n [8, 3, 12]] # Client 3\n\n# Show predictions\nfor i, price in enumerate(reg.predict(client_data)):\n print \"Predicted selling price for Client {}'s home: ${:,.2f}\".format(i+1, price)", "Predicted selling price for Client 1's home: $419,700.00\nPredicted selling price for Client 2's home: $287,100.00\nPredicted selling price for Client 3's home: $927,500.00\n" ] ], [ [ "**Answer: **\n\nThe recommended price for Client 1 is $419,700. It has the middle number of rooms (5), neighborhood poverty level of 17%, and student-teacher ratio of 15-to-1. It has average parameters in all categories, so it makes sense for it to be of average price.\n\nThe recommended price for Client 2 is $287,100. This is reasonable considering they have high poverty (32%) and student-to-teacher ratios (22-to-1), and the least number of rooms (4).\n\nThe recommended price for Client 3 is $927,500. This is reasonable since it has the greatest number of rooms (8), a very low poverty level (3%), and the lowest student-teacher ratio (12-to-1).", "_____no_output_____" ], [ "### Sensitivity\nAn optimal model is not necessarily a robust model. Sometimes, a model is either too complex or too simple to sufficiently generalize to new data. Sometimes, a model could use a learning algorithm that is not appropriate for the structure of the data given. Other times, the data itself could be too noisy or contain too few samples to allow a model to adequately capture the target variable — i.e., the model is underfitted. \n\n**Run the code cell below to run the `fit_model` function ten times with different training and testing sets to see how the prediction for a specific client changes with respect to the data it's trained on.**", "_____no_output_____" ] ], [ [ "vs.PredictTrials(features, prices, fit_model, client_data)", "Trial 1: $391,183.33\nTrial 2: $419,700.00\nTrial 3: $415,800.00\nTrial 4: $420,622.22\nTrial 5: $413,334.78\nTrial 6: $411,931.58\nTrial 7: $399,663.16\nTrial 8: $407,232.00\nTrial 9: $351,577.61\nTrial 10: $413,700.00\n\nRange in prices: $69,044.61\n" ] ], [ [ "### Question 11 - Applicability\n\n* In a few sentences, discuss whether the constructed model should or should not be used in a real-world setting. \n\n**Hint:** Take a look at the range in prices as calculated in the code snippet above. Some questions to answering:\n- How relevant today is data that was collected from 1978? How important is inflation?\n- Are the features present in the data sufficient to describe a home? Do you think factors like quality of apppliances in the home, square feet of the plot area, presence of pool or not etc should factor in?\n- Is the model robust enough to make consistent predictions?\n- Would data collected in an urban city like Boston be applicable in a rural city?\n- Is it fair to judge the price of an individual home based on the characteristics of the entire neighborhood?", "_____no_output_____" ], [ "**Answer: **\n\nThe range in prices is $69,044.61.\n\nThe constructed model should not be used in a real-world setting because it is 2018 now and the data we have worked upon is from 1978. The data collected from 1978 is still somewhat relevant, but doesn't account for several other factors that should be considered in today's time.\n\nThe model is not robust enough to make consistent predictions, but it can be used to generalize. For example, if we know that most homes in the target area are approximately 2,000 square feet and were built in 1950, we can rely more heavily on the model.\n\nData collected in an urban city like Boston would not be applicable in a rural city. Homes in rural cities often cost less due to their distance from jobs and city resources like shopping centers, and they also are often larger than city homes in general.\n\nIt is not fair to judge the price of an individual home based on the characteristics of the entire neighborhood. A home could have been renovated or could have extra amenities that the typical neighborhood house does not have.", "_____no_output_____" ], [ "> **Note**: Once you have completed all of the code implementations and successfully answered each question above, you may finalize your work by exporting the iPython Notebook as an HTML document. You can do this by using the menu above and navigating to \n**File -> Download as -> HTML (.html)**. Include the finished document along with this notebook as your submission.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ] ]
cb648e869941f1a3368d0d7f34fbdb3d1ae085ea
6,534
ipynb
Jupyter Notebook
sklearn/sklearn learning/demonstration/auto_examples_jupyter/ensemble/plot_gradient_boosting_early_stopping.ipynb
wangyendt/deeplearning_models
47883b6c65b8d05a0d1c5737f1552df6476ded34
[ "MIT" ]
1
2020-06-04T11:10:27.000Z
2020-06-04T11:10:27.000Z
sklearn/sklearn learning/demonstration/auto_examples_jupyter/ensemble/plot_gradient_boosting_early_stopping.ipynb
wangyendt/deeplearning_models
47883b6c65b8d05a0d1c5737f1552df6476ded34
[ "MIT" ]
null
null
null
sklearn/sklearn learning/demonstration/auto_examples_jupyter/ensemble/plot_gradient_boosting_early_stopping.ipynb
wangyendt/deeplearning_models
47883b6c65b8d05a0d1c5737f1552df6476ded34
[ "MIT" ]
null
null
null
72.6
1,881
0.618151
[ [ [ "%matplotlib inline", "_____no_output_____" ] ], [ [ "\n# Early stopping of Gradient Boosting\n\n\nGradient boosting is an ensembling technique where several weak learners\n(regression trees) are combined to yield a powerful single model, in an\niterative fashion.\n\nEarly stopping support in Gradient Boosting enables us to find the least number\nof iterations which is sufficient to build a model that generalizes well to\nunseen data.\n\nThe concept of early stopping is simple. We specify a ``validation_fraction``\nwhich denotes the fraction of the whole dataset that will be kept aside from\ntraining to assess the validation loss of the model. The gradient boosting\nmodel is trained using the training set and evaluated using the validation set.\nWhen each additional stage of regression tree is added, the validation set is\nused to score the model. This is continued until the scores of the model in\nthe last ``n_iter_no_change`` stages do not improve by atleast `tol`. After\nthat the model is considered to have converged and further addition of stages\nis \"stopped early\".\n\nThe number of stages of the final model is available at the attribute\n``n_estimators_``.\n\nThis example illustrates how the early stopping can used in the\n:class:`sklearn.ensemble.GradientBoostingClassifier` model to achieve\nalmost the same accuracy as compared to a model built without early stopping\nusing many fewer estimators. This can significantly reduce training time,\nmemory usage and prediction latency.\n", "_____no_output_____" ] ], [ [ "# Authors: Vighnesh Birodkar <[email protected]>\n# Raghav RV <[email protected]>\n# License: BSD 3 clause\n\nimport time\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom sklearn import ensemble\nfrom sklearn import datasets\nfrom sklearn.model_selection import train_test_split\n\nprint(__doc__)\n\ndata_list = [datasets.load_iris(), datasets.load_digits()]\ndata_list = [(d.data, d.target) for d in data_list]\ndata_list += [datasets.make_hastie_10_2()]\nnames = ['Iris Data', 'Digits Data', 'Hastie Data']\n\nn_gb = []\nscore_gb = []\ntime_gb = []\nn_gbes = []\nscore_gbes = []\ntime_gbes = []\n\nn_estimators = 500\n\nfor X, y in data_list:\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2,\n random_state=0)\n\n # We specify that if the scores don't improve by atleast 0.01 for the last\n # 10 stages, stop fitting additional stages\n gbes = ensemble.GradientBoostingClassifier(n_estimators=n_estimators,\n validation_fraction=0.2,\n n_iter_no_change=5, tol=0.01,\n random_state=0)\n gb = ensemble.GradientBoostingClassifier(n_estimators=n_estimators,\n random_state=0)\n start = time.time()\n gb.fit(X_train, y_train)\n time_gb.append(time.time() - start)\n\n start = time.time()\n gbes.fit(X_train, y_train)\n time_gbes.append(time.time() - start)\n\n score_gb.append(gb.score(X_test, y_test))\n score_gbes.append(gbes.score(X_test, y_test))\n\n n_gb.append(gb.n_estimators_)\n n_gbes.append(gbes.n_estimators_)\n\nbar_width = 0.2\nn = len(data_list)\nindex = np.arange(0, n * bar_width, bar_width) * 2.5\nindex = index[0:n]", "_____no_output_____" ] ], [ [ "Compare scores with and without early stopping\n----------------------------------------------\n\n", "_____no_output_____" ] ], [ [ "plt.figure(figsize=(9, 5))\n\nbar1 = plt.bar(index, score_gb, bar_width, label='Without early stopping',\n color='crimson')\nbar2 = plt.bar(index + bar_width, score_gbes, bar_width,\n label='With early stopping', color='coral')\n\nplt.xticks(index + bar_width, names)\nplt.yticks(np.arange(0, 1.3, 0.1))\n\n\ndef autolabel(rects, n_estimators):\n \"\"\"\n Attach a text label above each bar displaying n_estimators of each model\n \"\"\"\n for i, rect in enumerate(rects):\n plt.text(rect.get_x() + rect.get_width() / 2.,\n 1.05 * rect.get_height(), 'n_est=%d' % n_estimators[i],\n ha='center', va='bottom')\n\n\nautolabel(bar1, n_gb)\nautolabel(bar2, n_gbes)\n\nplt.ylim([0, 1.3])\nplt.legend(loc='best')\nplt.grid(True)\n\nplt.xlabel('Datasets')\nplt.ylabel('Test score')\n\nplt.show()", "_____no_output_____" ] ], [ [ "Compare fit times with and without early stopping\n-------------------------------------------------\n\n", "_____no_output_____" ] ], [ [ "plt.figure(figsize=(9, 5))\n\nbar1 = plt.bar(index, time_gb, bar_width, label='Without early stopping',\n color='crimson')\nbar2 = plt.bar(index + bar_width, time_gbes, bar_width,\n label='With early stopping', color='coral')\n\nmax_y = np.amax(np.maximum(time_gb, time_gbes))\n\nplt.xticks(index + bar_width, names)\nplt.yticks(np.linspace(0, 1.3 * max_y, 13))\n\nautolabel(bar1, n_gb)\nautolabel(bar2, n_gbes)\n\nplt.ylim([0, 1.3 * max_y])\nplt.legend(loc='best')\nplt.grid(True)\n\nplt.xlabel('Datasets')\nplt.ylabel('Fit Time')\n\nplt.show()", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
cb6490cbf6a66081c2913db3db55fa39ae452f7d
22,637
ipynb
Jupyter Notebook
python-plot/slice-restoration/compare-heuristic-failure-nc-cc.ipynb
qiaolunzhang/SciencePlots
a13cc4cc64554d91827dc5aed51de9c27800f04a
[ "MIT" ]
null
null
null
python-plot/slice-restoration/compare-heuristic-failure-nc-cc.ipynb
qiaolunzhang/SciencePlots
a13cc4cc64554d91827dc5aed51de9c27800f04a
[ "MIT" ]
null
null
null
python-plot/slice-restoration/compare-heuristic-failure-nc-cc.ipynb
qiaolunzhang/SciencePlots
a13cc4cc64554d91827dc5aed51de9c27800f04a
[ "MIT" ]
null
null
null
57.308861
8,744
0.637099
[ [ [ "import numpy as np\nimport matplotlib as mpl\n#mpl.use('pdf')\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nplt.rc('font', family='serif', serif='Times')\nplt.rc('text', usetex=True)\nplt.rc('xtick', labelsize=6)\nplt.rc('ytick', labelsize=6)\nplt.rc('axes', labelsize=6)\n#axes.linewidth : 0.5\nplt.rc('axes', linewidth=0.5)\n#ytick.major.width : 0.5\nplt.rc('ytick.major', width=0.5)\nplt.rcParams['xtick.direction'] = 'in'\nplt.rcParams['ytick.direction'] = 'in'\nplt.rc('ytick.minor', visible=True)\n\n#plt.style.use(r\"..\\..\\styles\\infocom.mplstyle\") # Insert your save location here\n\n# width as measured in inkscape\nfig_width = 3.487\n#height = width / 1.618 / 2\nfig_height = fig_width / 1.3 / 2", "_____no_output_____" ], [ "cc_folder_list = [\"SF_new_results/\", \"capacity_results/\", \"BF_new_results/\"]\nnc_folder_list = [\"SF_new_results_NC/\", \"capacity_resultsNC/\", \"BF_new_results_NC/\"]\ncc_folder_list = [\"failure20stages-new-rounding/\" + e for e in cc_folder_list]\nnc_folder_list = [\"failure20stages-new-rounding/\" + e for e in nc_folder_list]\nfile_list = [\"no-reconfig120.csv\", \"Link-reconfig120.csv\", \"LimitedReconfig120.csv\", \"Any-reconfig120.csv\"]\nprint(cc_folder_list)\nprint(nc_folder_list)", "['failure20stages-new-rounding/SF_new_results/', 'failure20stages-new-rounding/capacity_results/', 'failure20stages-new-rounding/BF_new_results/']\n['failure20stages-new-rounding/SF_new_results_NC/', 'failure20stages-new-rounding/capacity_resultsNC/', 'failure20stages-new-rounding/BF_new_results_NC/']\n" ], [ "nc_objective_data = np.full((5, 3), 0)\nmax_stage = 20\nselected_stage = 10\nfor i in range(3):\n for j in range(4):\n with open(nc_folder_list[i]+file_list[j], \"r\") as f:\n if j != 2:\n f1 = f.readlines()\n start_line = 0\n for line in f1:\n if line.find(\"%Stage\") >= 0:\n break\n else:\n start_line = start_line + 1\n #print(start_line)\n #print(len(f1))\n line = f1[selected_stage+start_line]\n line = line.split(\",\")\n if j == 0:\n nc_objective_data[0, i] = float(line[2])\n if j == 1:\n nc_objective_data[1, i] = float(line[2])\n if j == 3:\n nc_objective_data[4, i] = float(line[2])\n else:\n f1 = f.readlines()\n start_line = 0\n start_line1 = 0\n for line in f1:\n if line.find(\"%Stage\") >= 0:\n break\n else:\n start_line = start_line + 1\n for index in range(start_line+max_stage+1, len(f1)):\n start_line1 = index\n if f1[index].find(\"%Stage\") >= 0:\n break\n else:\n start_line1 = start_line1 + 1\n\n line = f1[selected_stage+start_line]\n line = line.split(\",\")\n nc_objective_data[2, i] = float(line[2])\n #mesh3data[2, index] = int(line[1])\n\n line = f1[selected_stage+start_line1]\n line = line.split(\",\")\n nc_objective_data[3, i] = float(line[2])\nprint(nc_objective_data)", "[[3612 2447 1360]\n [4293 3338 2383]\n [4959 4581 3617]\n [5079 4762 3743]\n [5798 5703 4398]]\n" ], [ "cc_objective_data = np.full((5, 3), 0)\nmax_stage = 10\nselected_stage = 10\nfor i in range(1):\n for j in range(4):\n with open(cc_folder_list[i]+file_list[j], \"r\") as f:\n if j != 2:\n f1 = f.readlines()\n start_line = 0\n for line in f1:\n if line.find(\"%Stage\") >= 0:\n break\n else:\n start_line = start_line + 1\n #print(start_line)\n #print(len(f1))\n line = f1[selected_stage+start_line]\n line = line.split(\",\")\n if j == 0:\n cc_objective_data[0, i] = float(line[2])\n if j == 1:\n cc_objective_data[1, i] = float(line[2])\n if j == 3:\n cc_objective_data[4, i] = float(line[2])\n else:\n f1 = f.readlines()\n start_line = 0\n start_line1 = 0\n for line in f1:\n if line.find(\"%Stage\") >= 0:\n break\n else:\n start_line = start_line + 1\n for index in range(start_line+max_stage+1, len(f1)):\n if f1[index].find(\"%Stage\") >= 0:\n start_line1 = index\n break\n else:\n start_line1 = start_line1 + 1\n\n line = f1[selected_stage+start_line]\n line = line.split(\",\")\n cc_objective_data[2, i] = float(line[2])\n #mesh3data[2, index] = int(line[1])\n\n line = f1[selected_stage+start_line1]\n line = line.split(\",\")\n cc_objective_data[3, i] = float(line[2])\nprint(cc_objective_data)", "[[3678 0 0]\n [4424 0 0]\n [5119 0 0]\n [5148 0 0]\n [5960 0 0]]\n" ], [ "max_stage = 20\nselected_stage = 10\nfor i in range(1, 3):\n for j in range(4):\n with open(cc_folder_list[i]+file_list[j], \"r\") as f:\n if j != 2:\n f1 = f.readlines()\n start_line = 0\n for line in f1:\n if line.find(\"%Stage\") >= 0:\n break\n else:\n start_line = start_line + 1\n #print(start_line)\n #print(len(f1))\n line = f1[selected_stage+start_line]\n line = line.split(\",\")\n if j == 0:\n cc_objective_data[0, i] = float(line[2])\n if j == 1:\n cc_objective_data[1, i] = float(line[2])\n if j == 3:\n cc_objective_data[4, i] = float(line[2])\n else:\n f1 = f.readlines()\n start_line = 0\n start_line1 = 0\n for line in f1:\n if line.find(\"%Stage\") >= 0:\n break\n else:\n start_line = start_line + 1\n for index in range(start_line+max_stage+1, len(f1)):\n if f1[index].find(\"%Stage\") >= 0:\n start_line1 = index\n break\n else:\n start_line1 = start_line1 + 1\n\n line = f1[selected_stage+start_line]\n line = line.split(\",\")\n cc_objective_data[2, i] = float(line[2])\n #mesh3data[2, index] = int(line[1])\n\n line = f1[selected_stage+start_line1]\n line = line.split(\",\")\n cc_objective_data[3, i] = float(line[2])\nprint(cc_objective_data)", "[[3678 2507 1378]\n [4424 3424 2454]\n [5119 4721 3754]\n [5148 4744 3736]\n [5960 5915 4273]]\n" ], [ "(cc_objective_data - nc_objective_data) / nc_objective_data", "_____no_output_____" ], [ "import numpy as np\nN = 3\nind = np.arange(N) \nwidth = 1 / 6\n\nx = [0, '20', '30', '40']\nx_tick_label_list = ['20', '30', '40']\n\nfig, (ax1, ax2) = plt.subplots(1, 2)\n#ax1.bar(x, objective)\n#ax1.bar(x, objective[0])\nlabel_list = ['No-rec', 'Link-rec', 'Lim-rec(3, 0)', 'Lim-rec(3, 1)', 'Any-rec']\npatterns = ('//////','\\\\\\\\\\\\','---', 'ooo', 'xxx', '\\\\', '\\\\\\\\','++', '*', 'O', '.')\n\nplt.rcParams['hatch.linewidth'] = 0.25 # previous pdf hatch linewidth\n#plt.rcParams['hatch.linewidth'] = 1.0 # previous svg hatch linewidth\n#plt.rcParams['hatch.color'] = 'r'\n\nfor i in range(5):\n ax1.bar(ind + width * (i-2), nc_objective_data[i], width, label=label_list[i],\n #alpha=0.7)\n hatch=patterns[i], alpha=0.7)\n #yerr=error[i], ecolor='black', capsize=1)\nax1.grid(lw = 0.25)\nax2.grid(lw = 0.25)\n\nax1.set_xticklabels(x)\nax1.set_ylabel('Objective value for NC')\nax1.set_xlabel('Percentage of substrate failures (\\%)')\n#ax1.set_ylabel('Objective value')\n#ax1.set_xlabel('Recovery Scenarios')\nax1.xaxis.set_label_coords(0.5,-0.17)\nax1.yaxis.set_label_coords(-0.17,0.5)\n\n#ax1.legend(loc='upper center', bbox_to_anchor=(0.5, 1.05),\n# ncol=3, fancybox=True, shadow=True, fontsize='small')\n\nfor i in range(5):\n ax2.bar(ind + width * (i-2), cc_objective_data[i], width, label=label_list[i],\n #alpha=0.7)\n hatch=patterns[i], alpha=0.7)\n\nax2.set_xticklabels(x)\nax2.set_ylabel('Objective value for CC')\nax2.set_xlabel('Percentage of substrate failures (\\%)')\nax2.xaxis.set_label_coords(0.5,-0.17)\nax2.yaxis.set_label_coords(-0.17,0.5)\n\nax1.legend(loc='upper center', bbox_to_anchor=(1.16, 1.2),\n ncol=5, prop={'size': 5})\n\nfig.set_size_inches(fig_width, fig_height)\nmpl.pyplot.subplots_adjust(wspace = 0.3)\n\nfig.subplots_adjust(left=.10, bottom=.20, right=.97, top=.85)\n#ax1.grid(color='b', ls = '-.', lw = 0.25)\n\n\n\nplt.show()\nfig.savefig('test-heuristic-failure-cc-nc.pdf')", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code" ] ]
cb649258227daf52fc82606e80e0095824191840
4,050
ipynb
Jupyter Notebook
1. Learn and Practice CNNs - Tensorflow/Other-concepts.ipynb
iVibudh/Convolutional-Neural-Network
6a47cb035c321da322f69aab0da24d14f40934c1
[ "MIT" ]
null
null
null
1. Learn and Practice CNNs - Tensorflow/Other-concepts.ipynb
iVibudh/Convolutional-Neural-Network
6a47cb035c321da322f69aab0da24d14f40934c1
[ "MIT" ]
null
null
null
1. Learn and Practice CNNs - Tensorflow/Other-concepts.ipynb
iVibudh/Convolutional-Neural-Network
6a47cb035c321da322f69aab0da24d14f40934c1
[ "MIT" ]
null
null
null
27
346
0.592099
[ [ [ "# WELCOME TO **\"Tensorflow for Convolutional Neural Networks\" Series** 😁\n\nTensorFlow makes it easy for beginners and experts to create machine learning models for desktop, mobile, web, and cloud. TensorFlow provides a collection of workflows to develop and train models using Python, JavaScript, or Swift, and to easily deploy in the cloud, on-prem, in the browser, or on-device no matter what language you use.\n\nIn this series of 6 project courses, you will understand how Convolutional Neural Networks work and learn how to create amazing models and build, train, and test Convolutional Neural Networks with Tensorflow! 😎 <br/>", "_____no_output_____" ], [ "<br/>\n\n\n## 👉🏻Learn and Practice CNNs", "_____no_output_____" ], [ "In this project, you will learn fundamentals of convolutional neural networks, and you will build an image classifier using a convolutional neural network with tensorflow... ✨ <br/>\n\nLearn and Practice CNNs\n\nhttps://github.com/iVibudh/Convolutional-Neural-Network/blob/main/CNN_series/1.%20Learn%20and%20Practice%20CNNs/Learn%20and%20Practice%20CNNs.ipynb", "_____no_output_____" ], [ "<br/>\n\n## 👉🏻Data Augmentation\n", "_____no_output_____" ], [ "In this project, you will learn how to apply data augmentation with images and learn how to create artificially new training data from existing training data!✨<br/>\n <a href=\"Load, train and prepare data.ipynb\">this project</a>", "_____no_output_____" ], [ "<br/>\n\n## 👉🏻Transfer Learning", "_____no_output_____" ], [ "In this project, you will apply transfer learning to fine-tune a pretrained model for your own image classes!✨<br/>\n <a href=\"Load, train and prepare data.ipynb\">this project</a>", "_____no_output_____" ], [ "<br/>\n\n## 👉🏻Multi-Class Classification", "_____no_output_____" ], [ "In this project, you will create and train a mul-class classifier on a real world data to be applied on your own data!✨<br/>\n <a href=\"Load, train and prepare data.ipynb\">this project</a>", "_____no_output_____" ], [ "<br/>\n\n## 👉🏻Image Segmentation", "_____no_output_____" ], [ "In this project, you will learn image segmentation which is a key topic in image processing and computer vision with real world applications!✨<br/>\n <a href=\"Load, train and prepare data.ipynb\">this project</a>", "_____no_output_____" ], [ "<br/>\n\n## 👉🏻Object Recognition", "_____no_output_____" ], [ "In this project, you will build an object recognition alorithm in a real world dataset and use it for your own projects!✨<br/>\n <a href=\"Load, train and prepare data.ipynb\">this project</a>", "_____no_output_____" ] ] ]
[ "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ] ]
cb649b87a820e66962cdfb22d28b43e986c70797
1,609
ipynb
Jupyter Notebook
scripts/test.ipynb
DefCon-007/rateMyProfessor
78a4a336136f3c5675a13b80b95963dbfdf2e88d
[ "MIT" ]
10
2018-12-30T15:44:59.000Z
2021-01-19T20:24:11.000Z
scripts/test.ipynb
DefCon-007/rateMyProfessor
78a4a336136f3c5675a13b80b95963dbfdf2e88d
[ "MIT" ]
6
2020-02-11T23:40:43.000Z
2021-06-10T21:13:42.000Z
scripts/test.ipynb
DefCon-007/rateMyProfessor
78a4a336136f3c5675a13b80b95963dbfdf2e88d
[ "MIT" ]
2
2018-12-31T15:47:43.000Z
2019-10-04T11:51:26.000Z
22.041096
97
0.546302
[ [ [ "from webview.src import utility", "_____no_output_____" ], [ "utility.sendMail(\"[email protected]\")", "202\nb''\nServer: nginx\nDate: Sat, 29 Dec 2018 12:31:59 GMT\nContent-Type: text/plain; charset=utf-8\nContent-Length: 0\nConnection: close\nX-Message-Id: vKzi6Y45TuaIh7Fn1zBCjQ\nAccess-Control-Allow-Origin: https://sendgrid.api-docs.io\nAccess-Control-Allow-Methods: POST\nAccess-Control-Allow-Headers: Authorization, Content-Type, On-behalf-of, x-sg-elas-acl\nAccess-Control-Max-Age: 600\nX-No-CORS-Reason: https://sendgrid.com/docs/Classroom/Basics/API/cors.html\n\n\n" ] ] ]
[ "code" ]
[ [ "code", "code" ] ]
cb64b377bfcbf816362ca2b2f886efca9cc65633
15,644
ipynb
Jupyter Notebook
Course2week3-bias&variance/week-3-polynomial-regression-assignment-blank.ipynb
bluove/Machine-Learning-Specialization
2be733fe2b5a0b2848001d851632f413c30ab93d
[ "Apache-2.0" ]
null
null
null
Course2week3-bias&variance/week-3-polynomial-regression-assignment-blank.ipynb
bluove/Machine-Learning-Specialization
2be733fe2b5a0b2848001d851632f413c30ab93d
[ "Apache-2.0" ]
null
null
null
Course2week3-bias&variance/week-3-polynomial-regression-assignment-blank.ipynb
bluove/Machine-Learning-Specialization
2be733fe2b5a0b2848001d851632f413c30ab93d
[ "Apache-2.0" ]
null
null
null
27.737589
363
0.593902
[ [ [ "# Regression Week 3: Assessing Fit (polynomial regression)", "_____no_output_____" ], [ "In this notebook you will compare different regression models in order to assess which model fits best. We will be using polynomial regression as a means to examine this topic. In particular you will:\n* Write a function to take an SArray and a degree and return an SFrame where each column is the SArray to a polynomial value up to the total degree e.g. degree = 3 then column 1 is the SArray column 2 is the SArray squared and column 3 is the SArray cubed\n* Use matplotlib to visualize polynomial regressions\n* Use matplotlib to visualize the same polynomial degree on different subsets of the data\n* Use a validation set to select a polynomial degree\n* Assess the final fit using test data\n\nWe will continue to use the House data from previous notebooks.", "_____no_output_____" ], [ "# Fire up graphlab create", "_____no_output_____" ] ], [ [ "import graphlab", "_____no_output_____" ] ], [ [ "Next we're going to write a polynomial function that takes an SArray and a maximal degree and returns an SFrame with columns containing the SArray to all the powers up to the maximal degree.\n\nThe easiest way to apply a power to an SArray is to use the .apply() and lambda x: functions. \nFor example to take the example array and compute the third power we can do as follows: (note running this cell the first time may take longer than expected since it loads graphlab)", "_____no_output_____" ] ], [ [ "tmp = graphlab.SArray([1., 2., 3.])\ntmp_cubed = tmp.apply(lambda x: x**3)\nprint tmp\nprint tmp_cubed", "_____no_output_____" ] ], [ [ "We can create an empty SFrame using graphlab.SFrame() and then add any columns to it with ex_sframe['column_name'] = value. For example we create an empty SFrame and make the column 'power_1' to be the first power of tmp (i.e. tmp itself).", "_____no_output_____" ] ], [ [ "ex_sframe = graphlab.SFrame()\nex_sframe['power_1'] = tmp\nprint ex_sframe", "_____no_output_____" ] ], [ [ "# Polynomial_sframe function", "_____no_output_____" ], [ "Using the hints above complete the following function to create an SFrame consisting of the powers of an SArray up to a specific degree:", "_____no_output_____" ] ], [ [ "def polynomial_sframe(feature, degree):\n # assume that degree >= 1\n # initialize the SFrame:\n poly_sframe = graphlab.SFrame()\n # and set poly_sframe['power_1'] equal to the passed feature\n\n # first check if degree > 1\n if degree > 1:\n # then loop over the remaining degrees:\n # range usually starts at 0 and stops at the endpoint-1. We want it to start at 2 and stop at degree\n for power in range(2, degree+1): \n # first we'll give the column a name:\n name = 'power_' + str(power)\n # then assign poly_sframe[name] to the appropriate power of feature\n\n return poly_sframe", "_____no_output_____" ] ], [ [ "To test your function consider the smaller tmp variable and what you would expect the outcome of the following call:", "_____no_output_____" ] ], [ [ "print polynomial_sframe(tmp, 3)", "_____no_output_____" ] ], [ [ "# Visualizing polynomial regression", "_____no_output_____" ], [ "Let's use matplotlib to visualize what a polynomial regression looks like on some real data.", "_____no_output_____" ] ], [ [ "sales = graphlab.SFrame('kc_house_data.gl/')", "_____no_output_____" ] ], [ [ "As in Week 3, we will use the sqft_living variable. For plotting purposes (connecting the dots), you'll need to sort by the values of sqft_living. For houses with identical square footage, we break the tie by their prices.", "_____no_output_____" ] ], [ [ "sales = sales.sort(['sqft_living', 'price'])", "_____no_output_____" ] ], [ [ "Let's start with a degree 1 polynomial using 'sqft_living' (i.e. a line) to predict 'price' and plot what it looks like.", "_____no_output_____" ] ], [ [ "poly1_data = polynomial_sframe(sales['sqft_living'], 1)\npoly1_data['price'] = sales['price'] # add price to the data since it's the target", "_____no_output_____" ] ], [ [ "NOTE: for all the models in this notebook use validation_set = None to ensure that all results are consistent across users.", "_____no_output_____" ] ], [ [ "model1 = graphlab.linear_regression.create(poly1_data, target = 'price', features = ['power_1'], validation_set = None)", "_____no_output_____" ], [ "#let's take a look at the weights before we plot\nmodel1.get(\"coefficients\")", "_____no_output_____" ], [ "import matplotlib.pyplot as plt\n%matplotlib inline", "_____no_output_____" ], [ "plt.plot(poly1_data['power_1'],poly1_data['price'],'.',\n poly1_data['power_1'], model1.predict(poly1_data),'-')", "_____no_output_____" ] ], [ [ "Let's unpack that plt.plot() command. The first pair of SArrays we passed are the 1st power of sqft and the actual price we then ask it to print these as dots '.'. The next pair we pass is the 1st power of sqft and the predicted values from the linear model. We ask these to be plotted as a line '-'. \n\nWe can see, not surprisingly, that the predicted values all fall on a line, specifically the one with slope 280 and intercept -43579. What if we wanted to plot a second degree polynomial?", "_____no_output_____" ] ], [ [ "poly2_data = polynomial_sframe(sales['sqft_living'], 2)\nmy_features = poly2_data.column_names() # get the name of the features\npoly2_data['price'] = sales['price'] # add price to the data since it's the target\nmodel2 = graphlab.linear_regression.create(poly2_data, target = 'price', features = my_features, validation_set = None)", "_____no_output_____" ], [ "model2.get(\"coefficients\")", "_____no_output_____" ], [ "plt.plot(poly2_data['power_1'],poly2_data['price'],'.',\n poly2_data['power_1'], model2.predict(poly2_data),'-')", "_____no_output_____" ] ], [ [ "The resulting model looks like half a parabola. Try on your own to see what the cubic looks like:", "_____no_output_____" ], [ "Now try a 15th degree polynomial:", "_____no_output_____" ], [ "What do you think of the 15th degree polynomial? Do you think this is appropriate? If we were to change the data do you think you'd get pretty much the same curve? Let's take a look.", "_____no_output_____" ], [ "# Changing the data and re-learning", "_____no_output_____" ], [ "We're going to split the sales data into four subsets of roughly equal size. Then you will estimate a 15th degree polynomial model on all four subsets of the data. Print the coefficients (you should use .print_rows(num_rows = 16) to view all of them) and plot the resulting fit (as we did above). The quiz will ask you some questions about these results.\n\nTo split the sales data into four subsets, we perform the following steps:\n* First split sales into 2 subsets with `.random_split(0.5, seed=0)`. \n* Next split the resulting subsets into 2 more subsets each. Use `.random_split(0.5, seed=0)`.\n\nWe set `seed=0` in these steps so that different users get consistent results.\nYou should end up with 4 subsets (`set_1`, `set_2`, `set_3`, `set_4`) of approximately equal size. ", "_____no_output_____" ], [ "Fit a 15th degree polynomial on set_1, set_2, set_3, and set_4 using sqft_living to predict prices. Print the coefficients and make a plot of the resulting model.", "_____no_output_____" ], [ "Some questions you will be asked on your quiz:\n\n**Quiz Question: Is the sign (positive or negative) for power_15 the same in all four models?**\n\n**Quiz Question: (True/False) the plotted fitted lines look the same in all four plots**", "_____no_output_____" ], [ "# Selecting a Polynomial Degree", "_____no_output_____" ], [ "Whenever we have a \"magic\" parameter like the degree of the polynomial there is one well-known way to select these parameters: validation set. (We will explore another approach in week 4).\n\nWe split the sales dataset 3-way into training set, test set, and validation set as follows:\n\n* Split our sales data into 2 sets: `training_and_validation` and `testing`. Use `random_split(0.9, seed=1)`.\n* Further split our training data into two sets: `training` and `validation`. Use `random_split(0.5, seed=1)`.\n\nAgain, we set `seed=1` to obtain consistent results for different users.", "_____no_output_____" ], [ "Next you should write a loop that does the following:\n* For degree in [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] (to get this in python type range(1, 15+1))\n * Build an SFrame of polynomial data of train_data['sqft_living'] at the current degree\n * hint: my_features = poly_data.column_names() gives you a list e.g. ['power_1', 'power_2', 'power_3'] which you might find useful for graphlab.linear_regression.create( features = my_features)\n * Add train_data['price'] to the polynomial SFrame\n * Learn a polynomial regression model to sqft vs price with that degree on TRAIN data\n * Compute the RSS on VALIDATION data (here you will want to use .predict()) for that degree and you will need to make a polynmial SFrame using validation data.\n* Report which degree had the lowest RSS on validation data (remember python indexes from 0)\n\n(Note you can turn off the print out of linear_regression.create() with verbose = False)", "_____no_output_____" ], [ "**Quiz Question: Which degree (1, 2, …, 15) had the lowest RSS on Validation data?**", "_____no_output_____" ], [ "Now that you have chosen the degree of your polynomial using validation data, compute the RSS of this model on TEST data. Report the RSS on your quiz.", "_____no_output_____" ], [ "**Quiz Question: what is the RSS on TEST data for the model with the degree selected from Validation data?**", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ] ]
cb64bbf05a46c2fb3b2ba3f249fc0a1145908b4a
64,459
ipynb
Jupyter Notebook
problems/chapter07_k_nearest_neighbors.ipynb
jeantardelli/data-mining-for-business-analytics
d1102e722290b0bbf9159de9236028f44b95fd93
[ "MIT" ]
1
2021-06-21T20:12:25.000Z
2021-06-21T20:12:25.000Z
problems/chapter07_k_nearest_neighbors.ipynb
jeantardelli/data-mining-for-business-analytics
d1102e722290b0bbf9159de9236028f44b95fd93
[ "MIT" ]
null
null
null
problems/chapter07_k_nearest_neighbors.ipynb
jeantardelli/data-mining-for-business-analytics
d1102e722290b0bbf9159de9236028f44b95fd93
[ "MIT" ]
null
null
null
34.880411
683
0.467429
[ [ [ "# Problems", "_____no_output_____" ] ], [ [ "import math\nimport pandas as pd\n\nfrom sklearn import preprocessing\nfrom sklearn.neighbors import NearestNeighbors, KNeighborsClassifier, KNeighborsRegressor\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics.pairwise import euclidean_distances\nfrom sklearn.metrics import accuracy_score, mean_squared_error\n\nfrom dmutils import classification_summary\nfrom dmutils import regression_summary", "_____no_output_____" ] ], [ [ "**1. Calculating Distance with Categorical Predictors.**\n\nThis exercise with a tiny dataset illustrates the calculation of Euclidean distance, and the creation of binary\ndummies. The online education company Statistics.com segments its customers and prospects into three main categories: IT professionals (IT), statisticians (Stat), and other (Other). It also tracks, for each customer, the number of years since first contact (years). Consider the following customers; information about whether they have taken a course or not (the outcome to be predicted) is included:\n\n Customer 1: Stat, 1 year, did not take course\n Customer 2: Other, 1.1 year, took course\n\n**a.** Consider now the following new prospect:\n\n Prospect 1: IT, 1 year\n\nUsing the above information on the two customers and one prospect, create one dataset for all three with the categorical predictor variable transformed into 2 binaries, and a similar dataset with the categorical predictor variable transformed into 3 binaries.", "_____no_output_____" ] ], [ [ "# dataset for all three customers with the categorical predictor (category)\n# transformed into 2 binaries\ntiny_two_cat_dummies_df = pd.DataFrame({\"IT\": [0, 0, 1], \"Stat\": [1, 0, 0],\n \"years_since_first_contact\": [1, 1.1, 1],\n \"course\": [0, 1, None]})\ntiny_two_cat_dummies_df", "_____no_output_____" ], [ "# dataset for all three customers with the categorical predictor (category)\n# transformed into 3 binaries\ntiny_all_cat_dummies_df = pd.DataFrame({\"IT\": [0, 0, 1], \"Stat\": [1, 0, 0], \n \"Other\": [0, 1, 0], \"years_since_first_contact\": [1, 1.1, 1],\n \"course\": [0, 1, None]})\ntiny_all_cat_dummies_df", "_____no_output_____" ] ], [ [ "**b.** For each derived dataset, calculate the Euclidean distance between the prospect and each of the other two customers. (Note: While it is typical to normalize data for k-NN, this is not an iron-clad rule and you may proceed here without normalization.)\n\n- Two categorical dummies (IT/Stat):", "_____no_output_____" ] ], [ [ "predictors = [\"IT\", \"Stat\", \"years_since_first_contact\"]\npd.DataFrame(euclidean_distances(tiny_two_cat_dummies_df[predictors],\n tiny_two_cat_dummies_df[predictors]),\n columns=[\"customer_1\", \"customer_2\", \"customer_3\"],\n index=[\"customer_1\", \"customer_2\", \"customer_3\"])", "_____no_output_____" ] ], [ [ "- Three categorical dummies (IT/Stat/Other):", "_____no_output_____" ] ], [ [ "predictors = [\"IT\", \"Stat\", \"Other\", \"years_since_first_contact\"]\n\npd.DataFrame(euclidean_distances(tiny_all_cat_dummies_df[predictors],\n tiny_all_cat_dummies_df[predictors]),\n columns=[\"customer_1\", \"customer_2\", \"customer_3\"],\n index=[\"customer_1\", \"customer_2\", \"customer_3\"])", "_____no_output_____" ] ], [ [ "We can already see the effect of using two/three dummy variables. For the two dummy variables dataset, the `customer_3` is nearer to `customer_2` than to `customer_1`. This happens because the variable `years_since_first_contact` are the same for the both customers. For the three dummy variables, we still see that the `customer_3` are nearer to `customer_1` than to `customer_2` though the distances are very close between all customers. This happens because the `Other` variable helps to discriminate each of the customers.\n\nIn contrast to the situation with statistical models such as regression, all *m* binaries should be created and\nused with *k*-NN. While mathematically this is redundant, since *m* - 1 dummies contain the same information as *m* dummies, this redundant information does not create the multicollinearity problems that it does for linear models. Moreover, in *k*-NN the use of *m* - 1 dummies can yield different classifications than the use of *m* dummies, and lead to an imbalance in the contribution of the different categories to the model.", "_____no_output_____" ], [ "**c.** Using k-NN with k = 1, classify the prospect as taking or not taking a course using each of the two derived datasets. Does it make a difference whether you use two or three dummies?\n\n- Two dummies variables (IT/Stat)", "_____no_output_____" ] ], [ [ "predictors = [\"IT\", \"Stat\", \"years_since_first_contact\"]\n\n# user NearestNeighbors from scikit-learn to compute knn\nknn = NearestNeighbors(n_neighbors=1)\nknn.fit(tiny_two_cat_dummies_df.loc[:1, predictors])\n\nnew_customer = pd.DataFrame({\"IT\": [1], \"Stat\": [0],\n \"years_since_first_contact\": [1]})\n\ndistances, indices = knn.kneighbors(new_customer)\n\n# indices is a list of lists, we are only interested in the first element\ntiny_two_cat_dummies_df.iloc[indices[0], :]", "_____no_output_____" ] ], [ [ "- Three dummies variable(IT/Stat/Other)", "_____no_output_____" ] ], [ [ "predictors = [\"IT\", \"Stat\", \"Other\", \"years_since_first_contact\"]\n\n# user NearestNeighbors from scikit-learn to compute knn\nknn = NearestNeighbors(n_neighbors=1)\nknn.fit(tiny_all_cat_dummies_df.loc[:1, predictors])\n\nnew_customer = pd.DataFrame({\"IT\": [1], \"Stat\": [0], \"Other\": [1],\n \"years_since_first_contact\": [1]})\n\ndistances, indices = knn.kneighbors(new_customer)\n\n# indices is a list of lists, we are only interested in the first element\ntiny_all_cat_dummies_df.iloc[indices[0], :]", "_____no_output_____" ] ], [ [ "If we use *k* = 1, the nearest customer is the one that took the course for both variables. Therefore, for this specific example there was no difference on using two or three categorical variable. Therefore, as indicated in the previous item (**b**), this redundant information does not create the multicollinearity problems that it does for linear models. Moreover, in *k*-NN the use of *m* - 1 dummies can yield different classifications than the use of *m* dummies, and lead to an imbalance in the contribution of the different categories to the model.", "_____no_output_____" ], [ "**2. Personal Loan Acceptance.** Universal Bank is a relatively young bank growing rapidly in terms of overall customer acquisition. The majority of these customers are liability customers (depositors) with varying sizes of relationship with the bank. The customer base of asset customers (borrowers) is quite small, and the bank is interested in expanding this base rapidly to bring in more loan business. In particular, it wants to explore ways of converting its liability customers to personal loan customers (while retaining them as depositors).\n\nA campaign that the bank ran last year for liability customers showed a healthy conversion rate of over 9% success. This has encouraged the retail marketing department to devise smarter campaigns with better target marketing. The goal is to use *k*-NN to predict whether a new customer will accept a loan offer. This will serve as the basis for the design of a new campaign.\n\nThe file `UniversalBank.csv` contains data on 5000 customers. The data include customer demographic information (age, income, etc.), the customer's relationship with the bank (mortgage, securities account, etc.), and the customer response to the last personal loan campaign (Personal Loan). Among these 5000 customers, only 480 (=9.6%) accepted the personal loan that was offered to them in the earlier campaign.\n\nPartition the data into training (60%) and validation (40%) sets.\n\n**a.** Consider the following customer:\n \n Age = 40, Experience = 10, Income = 84, Family = 2, CCAvg = 2, Education_1 = 0,\n Education_2 = 1, Education_3 = 0, Mortgage = 0, Securities Account = 0, CDAccount = 0,\n Online = 1, and Credit Card = 1.\n \nPerform a *k*-NN classification with all predictors except ID and ZIP code using k = 1. Remember to transform categorical predictors with more than two categories into dummy variables first. Specify the success class as 1 (loan acceptance), and use the default cutoff value of 0.5. How would this customer be classified?", "_____no_output_____" ] ], [ [ "customer_df = pd.read_csv(\"../datasets/UniversalBank.csv\")\ncustomer_df.head()", "_____no_output_____" ], [ "# define predictors and the outcome for this problem\npredictors = [\"Age\", \"Experience\", \"Income\", \"Family\", \"CCAvg\", \"Education\", \"Mortgage\",\n \"Securities Account\", \"CD Account\", \"Online\", \"CreditCard\"]\noutcome = \"Personal Loan\"\n\n# before k-NN, we will convert 'Education' to binary dummies.\n# 'Family' remains unchanged\ncustomer_df = pd.get_dummies(customer_df, columns=[\"Education\"], prefix_sep=\"_\")\n\n# update predictors to include the new dummy variables\npredictors = [\"Age\", \"Experience\", \"Income\", \"Family\", \"CCAvg\", \"Education_1\",\n \"Education_2\", \"Education_3\", \"Mortgage\",\n \"Securities Account\", \"CD Account\", \"Online\", \"CreditCard\"]\n\n# partition the data into training 60% and validation 40% sets\ntrain_data, valid_data = train_test_split(customer_df, test_size=0.4,\n random_state=26)\n\n# equalize the scales that the various predictors have(standardization)\nscaler = preprocessing.StandardScaler()\nscaler.fit(train_data[predictors])\n\n# transform the full dataset\ncustomer_norm = pd.concat([pd.DataFrame(scaler.transform(customer_df[predictors]),\n columns=[\"z\"+col for col in predictors]),\n customer_df[outcome]], axis=1)\n\ntrain_norm = customer_norm.iloc[train_data.index]\nvalid_norm = customer_norm.iloc[valid_data.index]\n\n# new customer\nnew_customer = pd.DataFrame({\"Age\": [40], \"Experience\": [10], \"Income\": [84], \"Family\": [2],\n \"CCAvg\": [2], \"Education_1\": [0], \"Education_2\": [1],\n \"Education_3\": [0], \"Mortgage\": [0], \"Securities Account\": [0],\n \"CDAccount\": [0], \"Online\": [1], \"Credit Card\": [1]})\nnew_customer_norm = pd.DataFrame(scaler.transform(new_customer),\n columns=[\"z\"+col for col in predictors])\n\n# use NearestNeighbors from scikit-learn to compute knn\n# using all the dataset (training + validation sets) here!\nknn = NearestNeighbors(n_neighbors=1)\nknn.fit(customer_norm.iloc[:, 0:-1])\n\ndistances, indices = knn.kneighbors(new_customer_norm)\n\n# indices is a list of lists, we are only interested in the first element\ncustomer_norm.iloc[indices[0], :]", "_____no_output_____" ] ], [ [ "Since the closest customer did not accepted the loan (=0), we can estimate for the new customer a probability of 1 of being an non-borrower (and 0 for being a borrower). Using a simple majority rule is equivalent to setting the cutoff value to 0.5. In the above results, we see that the software assigned class non-borrower to this record.", "_____no_output_____" ], [ "**b.** What is a choice of *k* that balances between overfitting and ignoring the predictor information?\n\nFirst, we need to remember that a balanced choice greatly depends on the nature of the data. The more complex and irregular the structure of the data, the lower the optimum value of *k*. Typically, values of *k* fall in the range of 1-20. We will use odd numbers to avoid ties.\n\nIf we choose *k* = 1, we will classify in a way that is very sensitive to the local characteristics of the training data. On the other hand, if we choose a large value of *k*, such as *k* = 14, we would simply predict the most frequent class in the dataset in all cases.\n\nTo find a balance, we examine the accuracy (of predictions in the validation set) that results from different choices of *k* between 1 and 14.", "_____no_output_____" ] ], [ [ "train_X = train_norm[[\"z\"+col for col in predictors]]\ntrain_y = train_norm[outcome]\nvalid_X = valid_norm[[\"z\"+col for col in predictors]]\nvalid_y = valid_norm[outcome]\n\n# Train a classifier for different values of k\nresults = []\nfor k in range(1, 15):\n knn = KNeighborsClassifier(n_neighbors=k).fit(train_X, train_y)\n results.append({\"k\": k,\n \"accuracy\": accuracy_score(valid_y, knn.predict(valid_X))})\n\n# Convert results to a pandas data frame\nresults = pd.DataFrame(results)\nresults", "_____no_output_____" ] ], [ [ "Based on the above table, we would choose **k = 3** (though **k = 5** appears to be another option too), which maximizes our accuracy in the validation set. Note, however, that now the validation set is used as part of the training process (to set *k*) and does not reflect a\ntrue holdout set as before. Ideally, we would want a third test set to evaluate the performance of the method on data that it did not see.", "_____no_output_____" ], [ "**c.** Show the confusion matrix for the validation data that results from using the best *k*.", "_____no_output_____" ], [ "- k = 3", "_____no_output_____" ] ], [ [ "knn = KNeighborsClassifier(n_neighbors=3).fit(train_X, train_y)\nclassification_summary(y_true=valid_y, y_pred=knn.predict(valid_X))", "Confusion Matrix (Accuracy 0.9555)\n\n Prediction\nActual 0 1\n 0 1779 6\n 1 83 132\n" ] ], [ [ "- k = 5", "_____no_output_____" ] ], [ [ "knn = KNeighborsClassifier(n_neighbors=5).fit(train_X, train_y)\nclassification_summary(y_true=valid_y, y_pred=knn.predict(valid_X))", "Confusion Matrix (Accuracy 0.9525)\n\n Prediction\nActual 0 1\n 0 1781 4\n 1 91 124\n" ] ], [ [ "**d.** Consider the following customer:\n\n Age = 40, Experience = 10, Income = 84, Family = 2, CCAvg = 2, Education_1 = 0,\n Education_2 = 1, Education_3 = 0, Mortgage = 0, Securities Account = 0, CD Account = 0,\n Online = 1 and Credit Card = 1.\n\nClassify the customer using the best *k*.\n\nNote: once *k* is chosen, we rerun the algorithm on the combined training and testing sets in order to generate classifications of new records.", "_____no_output_____" ] ], [ [ "# using the same user created before :) \nknn = KNeighborsClassifier(n_neighbors=3).fit(customer_norm.iloc[:, 0:-1],\n customer_norm.loc[:, \"Personal Loan\"])\nknn.predict(new_customer_norm), knn.predict_proba(new_customer_norm)", "_____no_output_____" ], [ "knn = KNeighborsClassifier(n_neighbors=5).fit(customer_norm.iloc[:, 0:-1],\n customer_norm.loc[:, \"Personal Loan\"])\nknn.predict(new_customer_norm), knn.predict_proba(new_customer_norm)", "_____no_output_____" ] ], [ [ "Using the best *k* (=3) the user was classified as a **non-borrower**. Also with *k* = 5", "_____no_output_____" ], [ "**e**. Repartition the data, this time into training, validation, and test sets (50%:30%:20%). Apply the *k*-NN method with the *k* chosen above. Compare the confusion matrix of the test set with that of the training and validation sets. Comment on the differences and their reason.", "_____no_output_____" ] ], [ [ "# using the customer_norm computed earlier\n# training: 50%\n# validation: 30% (0.5 * 0.6)\n# test: 20% (0.5 * 0.4)\ntrain_data, temp = train_test_split(customer_df, test_size=0.50, random_state=1)\nvalid_data, test_data = train_test_split(temp, test_size=0.40, random_state=1)\n\ntrain_norm = customer_norm.iloc[train_data.index]\nvalid_norm = customer_norm.iloc[valid_data.index]\ntest_norm = customer_norm.iloc[test_data.index]\n\ntrain_X = train_norm[[\"z\"+col for col in predictors]]\ntrain_y = train_norm[outcome]\nvalid_X = valid_norm[[\"z\"+col for col in predictors]]\nvalid_y = valid_norm[outcome]\ntest_X = test_norm[[\"z\"+col for col in predictors]]\ntest_y = test_norm[outcome]\n\nknn = KNeighborsClassifier(n_neighbors=3).fit(train_X, train_y)\n\nprint(\"Training set\\n\" + \"*\" * 12)\nclassification_summary(y_true=train_y, y_pred=knn.predict(train_X))\nprint(\"\\nValidation set\\n\" + \"*\" * 14)\nclassification_summary(y_true=valid_y, y_pred=knn.predict(valid_X))\nprint(\"\\nTest set\\n\" + \"*\" * 8)\nclassification_summary(y_true=test_y, y_pred=knn.predict(test_X))", "Training set\n************\nConfusion Matrix (Accuracy 0.9760)\n\n Prediction\nActual 0 1\n 0 2258 1\n 1 59 182\n\nValidation set\n**************\nConfusion Matrix (Accuracy 0.9553)\n\n Prediction\nActual 0 1\n 0 1343 6\n 1 61 90\n\nTest set\n********\nConfusion Matrix (Accuracy 0.9600)\n\n Prediction\nActual 0 1\n 0 906 6\n 1 34 54\n" ] ], [ [ "Based on the training, validation, and test matrices we can see a steady increase in the percentage error from training set and validation/test sets. As the model is being fit on the training data it would make intuitive sense that the classifications are most accurate on it rather than validation/test datasets. \n\nWe can see also that there does not appear to be overfitting due to the minimal error discrepancies among all three matrices, and specially between validation and test sets.", "_____no_output_____" ], [ "**3. Predicting Housing Median Prices.** The file `BostonHousing.csv` contains information on over 500 census tracts in Boston, where for each tract multiple variables are recorded. The last column (`CAT.MEDV`) was derived from `MEDV`, such that it obtains the value 1 if `MEDV` > 30 and 0 otherwise. Consider the goal of predicting the median value (`MEDV`) of a tract, given the information in the first 12 columns.\n\nPartition the data into training (60%) and validation (40%) sets.\n\n**a.** Perform a *k*-NN prediction with all 12 predictors (ignore the `CAT.MEDV` column), trying values of *k* from 1 to 5. Make sure to normalize the data. What is the best *k*? What does it mean?\n\nThe idea of *k*-NN can readily be extended to predicting a continuous value (as is our aim with multiple linear regression models). The first step of determining neighbors by computing distances remains unchanged. The second step, where a majority vote of the neighbors is used to determine class, is modified such that we take the average outcome value of the *k*-nearest neighbors to determine the prediction. Often, this average is a weighted average, with the weight decreasing with increasing distance from the point at which the prediction is required. In `scikit-learn`, we can use `KNeighborsRegressor` to compute *k*-NN numerical predictions for the validation set.\n\nAnother modification is in the error metric used for determining the \"best k\". Rather than the overall error rate used in classification, RMSE (root-mean-squared error) or another prediction error metric should be used in prediction.", "_____no_output_____" ] ], [ [ "housing_df = pd.read_csv(\"../datasets/BostonHousing.csv\")\nhousing_df.head()", "_____no_output_____" ], [ "# define predictors and the outcome for this problem\npredictors = [\"CRIM\", \"ZN\", \"INDUS\", \"CHAS\", \"NOX\", \"RM\", \"AGE\",\n \"DIS\", \"RAD\", \"TAX\", \"PTRATIO\", \"LSTAT\"]\noutcome = \"MEDV\"\n\n# partition the data into training 60% and validation 40% sets\ntrain_data, valid_data = train_test_split(housing_df, test_size=0.4,\n random_state=26)\n\n# equalize the scales that the various predictors have(standardization)\nscaler = preprocessing.StandardScaler()\nscaler.fit(train_data[predictors])\n\n# transform the full dataset\nhousing_norm = pd.concat([pd.DataFrame(scaler.transform(housing_df[predictors]),\n columns=[\"z\"+col for col in predictors]),\n housing_df[outcome]], axis=1)\n\ntrain_norm = housing_norm.iloc[train_data.index]\nvalid_norm = housing_norm.iloc[valid_data.index]\n\n# Perform a k-NN prediction with all 12 predictors\n# trying values of k from 1 to 5\ntrain_X = train_norm[[\"z\"+col for col in predictors]]\ntrain_y = train_norm[outcome]\nvalid_X = valid_norm[[\"z\"+col for col in predictors]]\nvalid_y = valid_norm[outcome]\n\n# Train a classifier for different values of k\n# Using weighted average\nresults = []\nfor k in range(1, 6):\n knn = KNeighborsRegressor(n_neighbors=k, weights=\"distance\").fit(train_X, train_y)\n y_pred = knn.predict(valid_X)\n y_res = valid_y - y_pred\n\n results.append({\"k\": k,\n \"mean_error\": sum(y_res) / len(y_res),\n \"rmse\": math.sqrt(mean_squared_error(valid_y, y_pred)),\n \"mae\": sum(abs(y_res)) / len(y_res)})\n\n# Convert results to a pandas data frame\nresults = pd.DataFrame(results)\nresults", "_____no_output_____" ] ], [ [ "Using the RMSE (root mean squared errors) as the *k* decision driver, the best *k* is 4. We choose 4 as a way to minimize the errors found in the validation set. Note, however, that now the validation set is used as part of the training process (to set *k*) and does not reflect a true holdout set as before.\n\nNote also that performance on validation data may be overly optimistic when it comes to predicting performance on data that have not been exposed to the model at all. This is because when the validation data are used to select a final model among a set of model, we are selecting based on how well the model performs with those data and therefore may be incorporating some of the random idiosyncrasies (bias) of the validation data into the judgment about the best model. \n\nThe model still may be the best for the validation data among those considered, but it will probably not do as well with the unseen data. Therefore, it is useful to evaluate the chosen model on a new test set to get a sense of how well it will perform on new data. In addition, one must consider practical issues such as costs of collecting variables, error-proneness, and model complexity in the selection of the final model.", "_____no_output_____" ], [ "**b.** Predict the `MEDV` for a tract with the following information, using the best *k*:\n\n CRIM: 0.2\n ZN: 0\n INDUS: 7\n CHAS: 0\n NOX: 0.538\n RM: 6\n AGE: 62\n DIS: 4.7\n RAD: 4\n TAX: 307\n PTRATIO: 21\n LSTAT: 10\n\nOnce *k* is chosen, we rerun the algorithm on the combined training and testing sets in order to generate classifications of new records.", "_____no_output_____" ] ], [ [ "# new house to be predicted. Before predicting the MEDV we normalize it\nnew_house = pd.DataFrame({\"CRIM\": [0.2], \"ZN\": [0], \"INDUS\": [7], \"CHAS\": [0],\n \"NOX\": [0.538], \"RM\": [6], \"AGE\": [62], \"DIS\": [4.7],\n \"RAD\": [4], \"TAX\": [307], \"PTRATIO\": [21], \"LSTAT\": [10]})\nnew_house_norm = pd.DataFrame(scaler.transform(new_house),\n columns=[\"z\"+col for col in predictors])\n\n# retrain the knn using the best k and all data\nknn = KNeighborsRegressor(n_neighbors=4, weights=\"distance\").fit(housing_norm[[\"z\"+col for col in predictors]],\n housing_norm[outcome])\nknn.predict(new_house_norm)", "_____no_output_____" ] ], [ [ "The new house has a predicted value of 19.6 (in \\\\$1000s)", "_____no_output_____" ], [ "**c.** If we used the above *k*-NN algorithm to score the training data, what would be the error of the training set?", "_____no_output_____" ], [ "It would be zero or near zero. This happens because the best *k* was selected from a model built using such dataset. Therefore, we have used the same data for fitting the classification functions and for estimating the error.", "_____no_output_____" ] ], [ [ "# Using the previous trained model (all data, k=5) \ny_pred = knn.predict(train_X)\ny_res = train_y - y_pred\n\nresults = {\"k\": 4,\n \"mean_error\": sum(y_res) / len(y_res),\n \"rmse\": math.sqrt(mean_squared_error(train_y, y_pred)),\n \"mae\": sum(abs(y_res)) / len(y_res)}\n\n# Convert results to a pandas data frame\nresults = pd.DataFrame(results, index=[0])\nresults", "_____no_output_____" ] ], [ [ "**d.** Why is the validation data error overly optimistic compared to the error rate when applying this *k*-NN predictor to new data?", "_____no_output_____" ], [ "When we use the validation data to assess multiple models and then choose the model that performs best with the validation data, we again encounter another (lesser) facet of the overfitting problem - chance aspects of the validation data that happen to match the chosen model better than they match other models. In other words, by using the validation data to choose one of several models, the performance of the chosen model on the validation data will be overly optimistic.\n\nIn other words, chances are that the training/validation sets can be biased, so cross-validation would give a better approximation in this scenario.", "_____no_output_____" ], [ "**e.** If the purpose is to predict `MEDV` for several thousands of new tracts, what would be the disadvantage of using *k*-NN prediction? List the operations that the algorithm goes through in order to produce each prediction.", "_____no_output_____" ], [ "The disadvantage of the *k*-NN in this case would be it's laziness characteristic meaning that it would take too much time to predict all the cases. \n\nBasically, the algorithm would need to perform the following operations repeatedly for each case to predict the `MEDV` value for them: \n\n- Normalize the data of each new variable for each case based on the mean and standard deviation in training data set; \n- Calculate the distance of this case from all the training data;\n- Sorting the new data based on the calculated distances;\n- Use the majority rule on the first *k* nearest neighbors to predict the new case; \n\nAnd as mentioned this process would be repeated for each of the thousands of new cases which would be computationally expensive and time consuming.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ] ]