hexsha
stringlengths
40
40
size
int64
6
14.9M
ext
stringclasses
1 value
lang
stringclasses
1 value
max_stars_repo_path
stringlengths
6
260
max_stars_repo_name
stringlengths
6
119
max_stars_repo_head_hexsha
stringlengths
40
41
max_stars_repo_licenses
list
max_stars_count
int64
1
191k
max_stars_repo_stars_event_min_datetime
stringlengths
24
24
max_stars_repo_stars_event_max_datetime
stringlengths
24
24
max_issues_repo_path
stringlengths
6
260
max_issues_repo_name
stringlengths
6
119
max_issues_repo_head_hexsha
stringlengths
40
41
max_issues_repo_licenses
list
max_issues_count
int64
1
67k
max_issues_repo_issues_event_min_datetime
stringlengths
24
24
max_issues_repo_issues_event_max_datetime
stringlengths
24
24
max_forks_repo_path
stringlengths
6
260
max_forks_repo_name
stringlengths
6
119
max_forks_repo_head_hexsha
stringlengths
40
41
max_forks_repo_licenses
list
max_forks_count
int64
1
105k
max_forks_repo_forks_event_min_datetime
stringlengths
24
24
max_forks_repo_forks_event_max_datetime
stringlengths
24
24
avg_line_length
float64
2
1.04M
max_line_length
int64
2
11.2M
alphanum_fraction
float64
0
1
cells
list
cell_types
list
cell_type_groups
list
cb35bd52a2347145cb2a4ef4f41695486ced781c
5,051
ipynb
Jupyter Notebook
graphics/population_progress_R_ladder.ipynb
crscardellino/dnnvsd
2de14f05b71199be1b0ee601287243ea25f92cba
[ "BSD-3-Clause" ]
3
2016-03-10T21:03:28.000Z
2018-04-09T03:53:58.000Z
graphics/population_progress_R_ladder.ipynb
crscardellino/dnnwsd
2de14f05b71199be1b0ee601287243ea25f92cba
[ "BSD-3-Clause" ]
null
null
null
graphics/population_progress_R_ladder.ipynb
crscardellino/dnnwsd
2de14f05b71199be1b0ee601287243ea25f92cba
[ "BSD-3-Clause" ]
null
null
null
32.171975
130
0.535142
[ [ [ "# Require the packages\nrequire(ggplot2)\nlibrary(repr)\n\noptions(repr.plot.width=15, repr.plot.height=4.5)", "_____no_output_____" ], [ "ladder_results_dir <- \"../resources/results/ladder_results_sensem/140\"\nbootstrap_results_dir <- \"../resources/results/results_semisupervised_sensem_7k/140\"\nlemma_data <- data.frame(iteration=integer(), sense=character(), count=integer(), experiment=character())\n\nfor(exp in c(\"bow_logreg\", \"wordvec_mlp_2_0\", \"wordvecpos_mlp_2_0\")) {\n data <- read.csv(paste(bootstrap_results_dir, exp, \"targets_distribution\", sep=\"/\"), header = F)\n names(data) <- c(\"iteration\", \"sense\", \"count\")\n data$experiment <- exp\n lemma_data <- rbind(lemma_data, data)\n}\n\nfor(exp in c(\"vec\", \"vecpos\")) {\n data <- read.csv(paste(ladder_results_dir, exp, \"population_growth\", sep=\"/\"), header = F)\n names(data) <- c(\"iteration\", \"sense\", \"count\")\n data$experiment <- exp\n lemma_data <- rbind(lemma_data, data)\n}\n\nlemma_data$experiment <- factor(lemma_data$experiment, levels=c(\"bow_logreg\", \"wordvec_mlp_2_0\",\n \"wordvecpos_mlp_2_0\", \"vec\", \"vecpos\"))\nlevels(lemma_data$experiment) <- c(\"Naive Bootstrap\\nBag-of-Words\\n& Logistic Regression\",\n \"Naive Bootstrap\\nWord Embeddings\\n& Multilayer Perceptron\",\n \"Naive Bootstrap\\nWord Embeddings\\nand PoS\\n& Multilayer Perceptron\",\n \"Ladder Networks\\nWord Embeddings\\n& Multilayer Perceptron\",\n \"Ladder Networks\\nWord Embeddings\\nand PoS\\n& Multilayer Perceptron\")", "_____no_output_____" ], [ "p <- ggplot(lemma_data, aes(x=iteration, y=count, fill=sense))\np <- p + facet_wrap(~ experiment, scales = 'free', ncol=5)\np <- p + geom_area(position=\"fill\")\np <- p + scale_x_continuous(breaks=seq(0, 20, 2))\np <- p + scale_y_continuous(breaks=seq(0, 1, 0.1), labels=seq(0, 100, 10))\np <- p + labs(title=\"Population percentage per sense for lemma \\\"limitar\\\"\", y=\"Percent\", x=\"Iteration Number\")\np <- p + scale_fill_brewer(name=\"Sense\", palette = \"Accent\", direction = 1,\n breaks=c(\"limitar-04\", \"limitar-03\", \"limitar-02\", \"limitar-01\"))\np <- p + theme(\n plot.title=element_text(size=15, face=\"bold\", margin=margin(10, 0, 10, 0), vjust=1, lineheight=0.6),\n strip.text.x=element_text(size=10),\n axis.title.x=element_text(size=12, margin=margin(10, 0, 0, 0)),\n axis.title.y=element_text(size=12, margin=margin(0, 10, 0, 0)),\n legend.title=element_text(face=\"bold\", size=13),\n legend.text=element_text(size=11),\n legend.key.height=unit(1.5,\"line\")\n )\np", "_____no_output_____" ], [ "# Save the plot\nggsave(\"~/Google Drive/Posgrado/WSD with WE/papers/esslli/plots/limitar.png\", plot=p, width=15, height=4.5)", "_____no_output_____" ], [ "library(grid)\nlibrary(gridExtra)", "_____no_output_____" ], [ "options(repr.plot.width=10.5, repr.plot.height=18)\nggsave(\"plots/population_progres.png\", plot=grid.arrange(p1, p2, p3, p4, ncol = 1), width=10.5, height=18)", "_____no_output_____" ], [ "levels(lemma_data$experiment)", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code" ] ]
cb35d3748b04322bc6b747a35d8e44040dfc626c
34,748
ipynb
Jupyter Notebook
study_roadmaps/4_image_classification_zoo/Classifier - Weather type classification.ipynb
take2rohit/monk_v1
9c567bf2c8b571021b120d879ba9edf7751b9f92
[ "Apache-2.0" ]
542
2019-11-10T12:09:31.000Z
2022-03-28T11:39:07.000Z
study_roadmaps/4_image_classification_zoo/Classifier - Weather type classification.ipynb
take2rohit/monk_v1
9c567bf2c8b571021b120d879ba9edf7751b9f92
[ "Apache-2.0" ]
117
2019-11-12T09:39:24.000Z
2022-03-12T00:20:41.000Z
study_roadmaps/4_image_classification_zoo/Classifier - Weather type classification.ipynb
take2rohit/monk_v1
9c567bf2c8b571021b120d879ba9edf7751b9f92
[ "Apache-2.0" ]
246
2019-11-09T21:53:24.000Z
2022-03-29T00:57:07.000Z
60.221837
14,501
0.779728
[ [ [ "<a href=\"https://colab.research.google.com/github/Tessellate-Imaging/monk_v1/blob/master/study_roadmaps/4_image_classification_zoo/Classifier%20-%20Weed%20Species%20Classification%20-%20Hyperparameter%20Tuning%20using%20Monk.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ], [ "# Table of contents\n\n\n## Install Monk\n\n\n## Using pretrained model for classifying weather type based on images\n\n\n## Training a classifier from scratch", "_____no_output_____" ], [ "<a id='0'></a>\n# Install Monk", "_____no_output_____" ], [ "## Using pip (Recommended)\n\n - colab (gpu) \n - All bakcends: `pip install -U monk-colab`\n \n\n - kaggle (gpu) \n - All backends: `pip install -U monk-kaggle`\n \n\n - cuda 10.2\t\n - All backends: `pip install -U monk-cuda102`\n - Gluon bakcned: `pip install -U monk-gluon-cuda102`\n\t - Pytorch backend: `pip install -U monk-pytorch-cuda102`\n - Keras backend: `pip install -U monk-keras-cuda102`\n \n\n - cuda 10.1\t\n - All backend: `pip install -U monk-cuda101`\n\t - Gluon bakcned: `pip install -U monk-gluon-cuda101`\n\t - Pytorch backend: `pip install -U monk-pytorch-cuda101`\n\t - Keras backend: `pip install -U monk-keras-cuda101`\n \n\n - cuda 10.0\t\n - All backend: `pip install -U monk-cuda100`\n\t - Gluon bakcned: `pip install -U monk-gluon-cuda100`\n\t - Pytorch backend: `pip install -U monk-pytorch-cuda100`\n\t - Keras backend: `pip install -U monk-keras-cuda100`\n \n\n - cuda 9.2\t\n - All backend: `pip install -U monk-cuda92`\n\t - Gluon bakcned: `pip install -U monk-gluon-cuda92`\n\t - Pytorch backend: `pip install -U monk-pytorch-cuda92`\n\t - Keras backend: `pip install -U monk-keras-cuda92`\n \n\n - cuda 9.0\t\n - All backend: `pip install -U monk-cuda90`\n\t - Gluon bakcned: `pip install -U monk-gluon-cuda90`\n\t - Pytorch backend: `pip install -U monk-pytorch-cuda90`\n\t - Keras backend: `pip install -U monk-keras-cuda90`\n \n\n - cpu \t\t\n - All backend: `pip install -U monk-cpu`\n\t - Gluon bakcned: `pip install -U monk-gluon-cpu`\n\t - Pytorch backend: `pip install -U monk-pytorch-cpu`\n\t - Keras backend: `pip install -U monk-keras-cpu`", "_____no_output_____" ], [ "## Install Monk Manually (Not recommended)\n \n### Step 1: Clone the library\n - git clone https://github.com/Tessellate-Imaging/monk_v1.git\n \n \n \n \n### Step 2: Install requirements \n - Linux\n - Cuda 9.0\n - `cd monk_v1/installation/Linux && pip install -r requirements_cu90.txt`\n - Cuda 9.2\n - `cd monk_v1/installation/Linux && pip install -r requirements_cu92.txt`\n - Cuda 10.0\n - `cd monk_v1/installation/Linux && pip install -r requirements_cu100.txt`\n - Cuda 10.1\n - `cd monk_v1/installation/Linux && pip install -r requirements_cu101.txt`\n - Cuda 10.2\n - `cd monk_v1/installation/Linux && pip install -r requirements_cu102.txt`\n - CPU (Non gpu system)\n - `cd monk_v1/installation/Linux && pip install -r requirements_cpu.txt`\n \n \n - Windows\n - Cuda 9.0 (Experimental support)\n - `cd monk_v1/installation/Windows && pip install -r requirements_cu90.txt`\n - Cuda 9.2 (Experimental support)\n - `cd monk_v1/installation/Windows && pip install -r requirements_cu92.txt`\n - Cuda 10.0 (Experimental support)\n - `cd monk_v1/installation/Windows && pip install -r requirements_cu100.txt`\n - Cuda 10.1 (Experimental support)\n - `cd monk_v1/installation/Windows && pip install -r requirements_cu101.txt`\n - Cuda 10.2 (Experimental support)\n - `cd monk_v1/installation/Windows && pip install -r requirements_cu102.txt`\n - CPU (Non gpu system)\n - `cd monk_v1/installation/Windows && pip install -r requirements_cpu.txt`\n \n \n - Mac\n - CPU (Non gpu system)\n - `cd monk_v1/installation/Mac && pip install -r requirements_cpu.txt`\n \n \n - Misc\n - Colab (GPU)\n - `cd monk_v1/installation/Misc && pip install -r requirements_colab.txt`\n - Kaggle (GPU)\n - `cd monk_v1/installation/Misc && pip install -r requirements_kaggle.txt`\n \n \n \n### Step 3: Add to system path (Required for every terminal or kernel run)\n - `import sys`\n - `sys.path.append(\"monk_v1/\");`", "_____no_output_____" ], [ "# Used trained classifier for demo", "_____no_output_____" ] ], [ [ "#Using pytorch backend \n\n# When installed using pip\nfrom monk.pytorch_prototype import prototype\n\n\n# When installed manually (Uncomment the following)\n#import os\n#import sys\n#sys.path.append(\"monk_v1/\");\n#sys.path.append(\"monk_v1/monk/\");\n#from monk.pytorch_prototype import prototype", "_____no_output_____" ], [ "# Download trained weights", "_____no_output_____" ], [ "! wget --load-cookies /tmp/cookies.txt \"https://docs.google.com/uc?export=download&confirm=$(wget --save-cookies /tmp/cookies.txt --keep-session-cookies --no-check-certificate 'https://docs.google.com/uc?export=download&id=1MkDsHcgqtnt3ZzfwYTuEsCd4buDSe9-g' -O- | sed -rn 's/.*confirm=([0-9A-Za-z_]+).*/\\1\\n/p')&id=1MkDsHcgqtnt3ZzfwYTuEsCd4buDSe9-g\" -O cls_weather_trained.zip && rm -rf /tmp/cookies.txt", "_____no_output_____" ], [ "! unzip -qq cls_weather_trained.zip ", "_____no_output_____" ], [ "ls workspace/Project-Weather", "\u001b[0m\u001b[01;34mPytorch-Wide-Resnet101\u001b[0m/ \u001b[01;34mPytorch-Wide-Resnet50\u001b[0m/\r\n" ], [ "# Load project in inference mode\n\ngtf = prototype(verbose=1);\ngtf.Prototype(\"Project-Weather\", \"Pytorch-Wide-Resnet50\", eval_infer=True);\n\n#Other trained models - uncomment \n#gtf.Prototype(\"Project-Weather\", \"Pytorch-Wide-Resnet101\", eval_infer=True);", "Pytorch Version: 1.4.0\n\nModel Details\n Loading model - workspace/Project-Weather/Pytorch-Wide-Resnet50/output/models/final\n Model loaded!\n\nExperiment Details\n Project: Project-Weather\n Experiment: Pytorch-Wide-Resnet50\n Dir: /home/ubuntu/Desktop/cls/workspace/Project-Weather/Pytorch-Wide-Resnet50/\n\n" ], [ "# Infer", "_____no_output_____" ], [ "img_name = \"workspace/test/test1.jpg\"\npredictions = gtf.Infer(img_name=img_name);\nfrom IPython.display import Image\nImage(filename=img_name) ", "Prediction\n Image name: workspace/test/test1.jpg\n Predicted class: cloudy\n Predicted score: 0.9999961853027344\n\n" ], [ "img_name = \"workspace/test/test2.jpg\"\npredictions = gtf.Infer(img_name=img_name);\nfrom IPython.display import Image\nImage(filename=img_name)", "Prediction\n Image name: workspace/test/test2.jpg\n Predicted class: shiny\n Predicted score: 0.9999122619628906\n\n" ] ], [ [ "# Training custom classifier from scratch", "_____no_output_____" ], [ "## Dataset\n - Credits: https://data.mendeley.com/datasets/4drtyfjtfy/1", "_____no_output_____" ], [ "## Download", "_____no_output_____" ] ], [ [ "! wget --load-cookies /tmp/cookies.txt \"https://docs.google.com/uc?export=download&confirm=$(wget --save-cookies /tmp/cookies.txt --keep-session-cookies --no-check-certificate 'https://docs.google.com/uc?export=download&id=1pxe_AmHYXwpTMRkMVwGeFgHS8ZpkzwMJ' -O- | sed -rn 's/.*confirm=([0-9A-Za-z_]+).*/\\1\\n/p')&id=1pxe_AmHYXwpTMRkMVwGeFgHS8ZpkzwMJ\" -O weather.zip && rm -rf /tmp/cookies.txt", "_____no_output_____" ], [ "! unzip -qq weather.zip", "_____no_output_____" ] ], [ [ "## Training", "_____no_output_____" ] ], [ [ "# Using mxnet-gluon backend \n#from monk.gluon_prototype import prototype\n\n# For pytorch backend\nfrom monk.pytorch_prototype import prototype\n\n# For Keras backend\n#from monk.keras_prototype import prototype", "_____no_output_____" ], [ "# Create Project and Experiment\n\ngtf = prototype(verbose=1);\ngtf.Prototype(\"Project-Weather\", \"Pytorch-Wide-Resnet50\");", "_____no_output_____" ], [ "gtf.Default(dataset_path=\"weather/train\",\n model_name=\"wide_resnet50_2\", \n freeze_base_network=False,\n num_epochs=2);", "_____no_output_____" ] ], [ [ "### How to change hyper parameters and models \n - Docs - https://github.com/Tessellate-Imaging/monk_v1#4\n - Examples - https://github.com/Tessellate-Imaging/monk_v1/tree/master/study_roadmaps/1_getting_started_roadmap", "_____no_output_____" ] ], [ [ "gtf.update_batch_size(8);\n\n# Very important to reload post updates\ngtf.Reload();", "_____no_output_____" ], [ "#Start Training\ngtf.Train();\n\n#Read the training summary generated once you run the cell and training is completed", "_____no_output_____" ] ], [ [ "## Validating on the same dataset", "_____no_output_____" ] ], [ [ "# Using mxnet-gluon backend \n#from monk.gluon_prototype import prototype\n\n# For pytorch backend\nfrom monk.pytorch_prototype import prototype\n\n# For Keras backend\n#from monk.keras_prototype import prototype", "_____no_output_____" ], [ "# Create Project and Experiment\n\ngtf = prototype(verbose=1);\ngtf.Prototype(\"Project-Weather\", \"Pytorch-Wide-Resnet50\", eval_infer=True);", "_____no_output_____" ], [ "# Load dataset for validaion\ngtf.Dataset_Params(dataset_path=\"weather/train\");\ngtf.Dataset();", "_____no_output_____" ], [ "# Run validation\naccuracy, class_based_accuracy = gtf.Evaluate();", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ] ]
cb35e3f695933bdb8503dd56f8cdcab71eb1bb46
154,345
ipynb
Jupyter Notebook
k-means clustering.ipynb
jonchar/ml-python
3fc041d9eda5829588e3e1b83a392912dbac0a0a
[ "MIT" ]
13
2017-01-07T19:30:54.000Z
2022-01-13T01:26:06.000Z
k-means clustering.ipynb
jonchar/ml-python
3fc041d9eda5829588e3e1b83a392912dbac0a0a
[ "MIT" ]
1
2019-02-13T10:20:17.000Z
2019-03-23T04:13:53.000Z
k-means clustering.ipynb
jonchar/ml-python
3fc041d9eda5829588e3e1b83a392912dbac0a0a
[ "MIT" ]
9
2017-01-19T08:20:18.000Z
2021-08-01T18:14:48.000Z
635.164609
76,784
0.937316
[ [ [ "## $k$-means clustering: An example implementation in Python 3 with numpy and matplotlib.", "_____no_output_____" ], [ "The [$k$-means](https://en.wikipedia.org/wiki/K-means_clustering) algorithm is an unsupervised learning method for identifying clusters within a dataset. The $k$ represents the number of clusters to be identified, which is specified by the user before starting the algorithm.\n\nThe algorithm goes like this:\n\n* Initialize the $k$ cluster centroids.\n* Repeat:\n 1. Cluster assignment: Assign each data point to the nearest cluster centroid.\n 2. Cluster updating: For each cluster centroid, average the locations of it's corresponding points and re-assign the centroid to that location.\n \nThe last two steps are repeated until stopping criteria are met such as a maximum number of iterations or the centroid velocity drops below a threshold. The results of the algorithm can be highly dependent on the cluster initialization step, especially when there are a large number of clusters and data points. Performance be improved in a few different ways such as running it multiple times and averaging the results or using different initalization methods such as [$k$-means plus plus](https://en.wikipedia.org/wiki/K-means%2B%2B). Here, we will initialize the $k$ cluster centroids by selecting $k$ random data points.\n\nMathematically, the cluster assignment step can be written as:\n\n$c^{(i)} = argmin_{k} \\left\\lVert x^{(i)} - \\mu_k\\right\\rVert^2$\n\nwhere $c^{(i)}$ is the centroid closest to sample $x^{(i)}$ and $\\mu_k$ represents the $k$-th centroid.\n\nSimilarly, the cluster update step can be written as:\n\n$\\mu_k = \\frac{1}{n}[x^{(k_1)}+x^{(k_2)}+...+x^{(k_n)}]$\n\nwhere, again $\\mu_k$ represents the $k$-th centroid and $x^{(k_n)}$ are the training examples assigned to that centroid.", "_____no_output_____" ], [ "First, some imports.", "_____no_output_____" ] ], [ [ "import numpy as np\nnp.random.seed(0)\nimport matplotlib.pyplot as plt\n%matplotlib inline\n%config InlineBackend.figure_format = 'retina'\n\nfrom sklearn.datasets import make_blobs", "_____no_output_____" ] ], [ [ "Next we'll define some functions based on steps in the K-means algorithm.", "_____no_output_____" ] ], [ [ "def initialize_clusters(points, k):\n \"\"\"Initializes clusters as k randomly selected points from points.\"\"\"\n return points[np.random.randint(points.shape[0], size=k)]\n \n# Function for calculating the distance between centroids\ndef get_distances(centroid, points):\n \"\"\"Returns the distance the centroid is from each data point in points.\"\"\"\n return np.linalg.norm(points - centroid, axis=1)", "_____no_output_____" ] ], [ [ "Here we'll generate some data using [scikit-learn](http://scikit-learn.org)'s [`make_blobs`](http://scikit-learn.org/stable/modules/generated/sklearn.datasets.make_blobs.html#sklearn.datasets.make_blobs) function. For this example we'll generate a dataset with three clusters. Using this function will give us access to the actual class labels for each group so we can assess accuracy later if we would like to. Normally when using K-means, you won't know the cluster assignments or the number of clusters in the dataset!", "_____no_output_____" ] ], [ [ "# Generate dataset\nX, y = make_blobs(centers=3, n_samples=500, random_state=1)\n\n# Visualize\nfig, ax = plt.subplots(figsize=(4,4))\nax.scatter(X[:,0], X[:,1], alpha=0.5)\nax.set_xlabel('$x_1$')\nax.set_ylabel('$x_2$');", "_____no_output_____" ] ], [ [ "Now let's implement K-means using k = 3.", "_____no_output_____" ] ], [ [ "k = 3\nmaxiter = 50\n\n# Initialize our centroids by picking random data points\ncentroids = initialize_clusters(X, k)\n\n# Initialize the vectors in which we will store the\n# assigned classes of each data point and the\n# calculated distances from each centroid\nclasses = np.zeros(X.shape[0], dtype=np.float64)\ndistances = np.zeros([X.shape[0], k], dtype=np.float64)\n\n# Loop for the maximum number of iterations\nfor i in range(maxiter):\n \n # Assign all points to the nearest centroid\n for i, c in enumerate(centroids):\n distances[:, i] = get_distances(c, X)\n \n # Determine class membership of each point\n # by picking the closest centroid\n classes = np.argmin(distances, axis=1)\n \n # Update centroid location using the newly\n # assigned data point classes\n for c in range(k):\n centroids[c] = np.mean(X[classes == c], 0)", "_____no_output_____" ] ], [ [ "Once we've finished running the algorithm, we can visualize the classified data and our calculated centroids locations.", "_____no_output_____" ] ], [ [ "group_colors = ['skyblue', 'coral', 'lightgreen']\ncolors = [group_colors[j] for j in classes]\n\nfig, ax = plt.subplots(figsize=(4,4))\nax.scatter(X[:,0], X[:,1], color=colors, alpha=0.5)\nax.scatter(centroids[:,0], centroids[:,1], color=['blue', 'darkred', 'green'], marker='o', lw=2)\nax.set_xlabel('$x_0$')\nax.set_ylabel('$x_1$');", "_____no_output_____" ] ], [ [ "Look's pretty good! In another post I'll discuss some limitations of the $k$-means algorithm and assess what happens when $k$ is chosen to be greater than or less than the actual number of clusters in your dataset.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
cb35eec6c5c9cc9579482826aaf86515c6fbb2c1
122,476
ipynb
Jupyter Notebook
.ipynb_checkpoints/veronica_le_all_cols-checkpoint.ipynb
vleong1/modeling_earthquake_damage
a76ffaacde8232c36854c45341104c6ef8fb17a5
[ "CC0-1.0" ]
null
null
null
.ipynb_checkpoints/veronica_le_all_cols-checkpoint.ipynb
vleong1/modeling_earthquake_damage
a76ffaacde8232c36854c45341104c6ef8fb17a5
[ "CC0-1.0" ]
null
null
null
.ipynb_checkpoints/veronica_le_all_cols-checkpoint.ipynb
vleong1/modeling_earthquake_damage
a76ffaacde8232c36854c45341104c6ef8fb17a5
[ "CC0-1.0" ]
1
2021-06-02T15:53:56.000Z
2021-06-02T15:53:56.000Z
84.175945
50,208
0.694667
[ [ [ "import pandas as pd\nimport numpy as np\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\nfrom sklearn.model_selection import train_test_split, cross_val_score, GridSearchCV\nfrom sklearn.preprocessing import StandardScaler, LabelEncoder, OrdinalEncoder\nfrom sklearn.pipeline import make_pipeline\nfrom category_encoders import OneHotEncoder\nfrom sklearn.metrics import f1_score, precision_score, recall_score\n\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.experimental import enable_hist_gradient_boosting\nfrom sklearn.ensemble import AdaBoostClassifier, GradientBoostingClassifier, HistGradientBoostingClassifier, RandomForestClassifier, BaggingClassifier, ExtraTreesClassifier\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.feature_selection import RFE\n\nimport warnings\nwarnings.simplefilter(action='ignore', category=FutureWarning)", "_____no_output_____" ], [ "# read in data\n\ntrain_values = pd.read_csv('data/Proj5_train_values.csv')\ntrain_labels = pd.read_csv('data/Proj5_train_labels.csv')", "_____no_output_____" ] ], [ [ "#### Label Encode", "_____no_output_____" ] ], [ [ "# Label Encode categorical features\n\nle = LabelEncoder()\ntrain_enc = train_values.apply(le.fit_transform)\ntrain_enc", "_____no_output_____" ] ], [ [ "## Modeling with 10% of data\n- For faster processing", "_____no_output_____" ] ], [ [ "# grab first 10% of rows\n\ntrain_enc_10pct = train_enc.head(int(len(train_values) * 0.1))\ntrain_labels_10pct = train_labels.head(int(len(train_labels) * 0.1))", "_____no_output_____" ] ], [ [ "#### Baseline + TTS", "_____no_output_____" ] ], [ [ "# baseline model\n\ntrain_labels_10pct['damage_grade'].value_counts(normalize = True)", "_____no_output_____" ], [ "# establish X + y\n\nX = train_enc_10pct.drop(columns = ['building_id'])\ny = train_labels_10pct['damage_grade']", "_____no_output_____" ], [ "# tts\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, stratify = y, random_state = 123)", "_____no_output_____" ] ], [ [ "#### Modeling", "_____no_output_____" ] ], [ [ "# Random Forest\n\npipe_forest = make_pipeline(StandardScaler(), RandomForestClassifier(n_jobs = -1, random_state = 123))\n\nparams = {'randomforestclassifier__max_depth' : [6, 7, 8, 9, 10, 11],\n 'randomforestclassifier__max_features' : [15, 20, 30, 35]}\n\ngrid_forest = GridSearchCV(pipe_forest, param_grid = params)\n\ngrid_forest.fit(X_train, y_train)\n\nprint(f'Train Score: {grid_forest.score(X_train, y_train)}')\nprint(f'Test Score: {grid_forest.score(X_test, y_test)}')\n\ngrid_forest.best_params_", "Train Score: 0.7793809158352519\nTest Score: 0.688871834228703\n" ], [ "# grab feature importances\n\npipe_forest_fi = make_pipeline(StandardScaler(), RandomForestClassifier(n_jobs = -1, random_state = 123, max_depth = 11, max_features = 15))\npipe_forest_fi.fit(X_train, y_train)", "_____no_output_____" ], [ "forest_fi_df = pd.DataFrame({'importances': pipe_forest_fi.named_steps['randomforestclassifier'].feature_importances_, \n 'name': X_train.columns}).sort_values('importances', ascending = False)\nforest_fi_df[:5]", "_____no_output_____" ], [ "# test to ensure X_train.columns + feature_importances are same length\n\nprint(len(X_train.columns))\nprint(len(pipe_forest_fi.named_steps['randomforestclassifier'].feature_importances_))", "38\n38\n" ], [ "# Extra Trees\n\npipe_trees = make_pipeline(StandardScaler(), ExtraTreesClassifier(n_jobs = -1, random_state = 123))\n\nparams = {'extratreesclassifier__max_depth' : [6, 7, 8, 9, 10, 11],\n 'extratreesclassifier__max_features' : [15, 20, 30, 35]}\n\ngrid_trees = GridSearchCV(pipe_trees, param_grid = params)\n\ngrid_trees.fit(X_train, y_train)\n\nprint(f'Train Score: {grid_trees.score(X_train, y_train)}')\nprint(f'Test Score: {grid_trees.score(X_test, y_test)}')\n\ngrid_trees.best_params_", "Train Score: 0.7532872857508314\nTest Score: 0.6785878741366078\n" ], [ "# grab feature importances\n\npipe_trees_fi = make_pipeline(StandardScaler(), ExtraTreesClassifier(n_jobs = -1, random_state = 123, max_depth = 6, max_features = 35))\npipe_trees_fi.fit(X_train, y_train)", "_____no_output_____" ], [ "trees_fi_df = pd.DataFrame({'importances': pipe_trees_fi.named_steps['extratreesclassifier'].feature_importances_, \n 'name': X_train.columns}).sort_values('importances', ascending = False)\ntrees_fi_df[:5]", "_____no_output_____" ], [ "# test to ensure X_train.columns + feature_importances are same length\n\nprint(len(X_train.columns))\nprint(len(pipe_trees_fi.named_steps['extratreesclassifier'].feature_importances_))", "38\n38\n" ], [ "earthquake = pd.merge(train_values.head(int(len(train_values) * 0.1)), train_labels.head(int(len(train_labels) * 0.1)), on = 'building_id')\nearthquake", "_____no_output_____" ], [ "sns.scatterplot(x = 'area_percentage', y = 'height_percentage', data = earthquake, hue = 'damage_grade');", "_____no_output_____" ], [ "sns.boxplot(x = 'damage_grade', y = 'height_percentage', data = earthquake);", "_____no_output_____" ], [ "# referenced https://seaborn.pydata.org/generated/seaborn.catplot.html\nsns.catplot(x = \"foundation_type\", data = earthquake, kind = \"count\", hue = 'damage_grade').set(xlabel = 'Roof Type', ylabel = 'Frequency');\nplt.suptitle('Damage Grade by Roof Type', y = 1.05);", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb35f44ab1ceecd6779d8bb3cefd81a93502ae67
691
ipynb
Jupyter Notebook
PDA/jupyter/jupyterNotebooks/otherpath/test_import_nb_Copy1.ipynb
EMbeDS-education/StatsAndComputing20212022
971e418882b206a1b5606d15d222cef1a5a04834
[ "MIT" ]
2
2022-02-24T09:35:15.000Z
2022-03-14T20:34:33.000Z
PDA/jupyter/jupyterNotebooks/otherpath/test_import_nb_Copy1.ipynb
GeorgiosArg/StatsAndComputing20212022
798d39af6aa5ef5eef49d5d6f43191351e8a49f3
[ "MIT" ]
null
null
null
PDA/jupyter/jupyterNotebooks/otherpath/test_import_nb_Copy1.ipynb
GeorgiosArg/StatsAndComputing20212022
798d39af6aa5ef5eef49d5d6f43191351e8a49f3
[ "MIT" ]
2
2022-03-15T21:40:35.000Z
2022-03-26T14:51:31.000Z
19.194444
68
0.565847
[ [ [ "def my_function_nb_otherpath():\n print('Successfully imported from notebook in otherpath!')", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code" ] ]
cb35f5709d8e4d6555b6d9272dd8e13049d007e4
20,092
ipynb
Jupyter Notebook
ibook/notebooks/references/CorrelationExamples.ipynb
dli-invest/dli-invest
ced871a9908477ad02beebd44600bf3a0478f8f5
[ "Unlicense" ]
1
2021-05-08T10:48:14.000Z
2021-05-08T10:48:14.000Z
ibook/notebooks/references/CorrelationExamples.ipynb
dli-invest/dli-invest
ced871a9908477ad02beebd44600bf3a0478f8f5
[ "Unlicense" ]
6
2020-09-20T04:59:21.000Z
2022-01-17T03:57:55.000Z
ibook/notebooks/references/CorrelationExamples.ipynb
dli-invest/dli-invest
ced871a9908477ad02beebd44600bf3a0478f8f5
[ "Unlicense" ]
1
2020-11-19T08:33:13.000Z
2020-11-19T08:33:13.000Z
85.863248
6,298
0.506122
[ [ [ "## Correlation Examples", "_____no_output_____" ], [ "<a href=\"https://colab.research.google.com/github/dli-invest/iref-book/blob/ibook/master/notebooks%5Creferences%5CCorrelationExamples.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ] ], [ [ "import pandas as pd\nimport yfinance as yf\nfrom mlfinlab.codependence import distance_correlation, angular_distance, absolute_angular_distance, squared_angular_distance\n\ndef get_prices(stocks, start_date=\"2020-03-01\", end_date=\"2020-05-30\"):\n stocks_string = \" \".join(stocks)\n data = yf.download(stocks_string, start=start_date, end=end_date,\n group_by=\"ticker\")\n data = data.fillna(method='ffill')\n # Drop columns with no entries\n data = data.dropna(axis='columns', how='all')\n\n prices_df = pd.concat([data[ticker][\"Close\"] for ticker in stocks], axis=1)\n prices_df.columns = stocks\n return prices_df\n \nasset_returns = get_prices(['IP.CN', 'NTAR.CN'])\nprint(asset_returns)\n# Calculate distance correlation between chosen assets\ndistance_corr = distance_correlation(asset_returns['IP.CN'], asset_returns['NTAR.CN'])\nprint(distance_corr)\n# Calculate angular distance between chosen assets\nangular_dist = angular_distance(asset_returns['IP.CN'], asset_returns['NTAR.CN'])\nprint(angular_dist)\n# Calculate absolute angular distance between chosen assets\nangular_dist = absolute_angular_distance(asset_returns['IP.CN'], asset_returns['NTAR.CN'])\nprint(angular_dist)\n# Calculate squared angular distance between chosen assets\nangular_dist = squared_angular_distance(asset_returns['IP.CN'], asset_returns['NTAR.CN'])\nprint(angular_dist)", "[*********************100%***********************] 2 of 2 completed\n IP.CN NTAR.CN\nDate \n2020-03-02 0.055 1.62\n2020-03-03 0.050 1.59\n2020-03-04 0.040 1.57\n2020-03-05 0.045 1.48\n2020-03-06 0.040 1.32\n... ... ...\n2020-05-25 0.065 2.07\n2020-05-26 0.065 2.20\n2020-05-27 0.060 2.18\n2020-05-28 0.065 2.07\n2020-05-29 0.065 2.11\n\n[63 rows x 2 columns]\n0.8200543899391678\n0.2742084582009295\n0.2742084582009295\n0.3729254632246639\n" ], [ "!pip3 install mlfinlab yfinance", "Collecting mlfinlab\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/48/76/00bf9715ca24b772ea0f3233403719390a8cf49e7564adae4713d68a025a/mlfinlab-0.12.3-py3-none-any.whl (851kB)\n\r\u001b[K |▍ | 10kB 16.7MB/s eta 0:00:01\r\u001b[K |▊ | 20kB 1.7MB/s eta 0:00:01\r\u001b[K |█▏ | 30kB 2.2MB/s eta 0:00:01\r\u001b[K |█▌ | 40kB 2.5MB/s eta 0:00:01\r\u001b[K |██ | 51kB 2.0MB/s eta 0:00:01\r\u001b[K |██▎ | 61kB 2.2MB/s eta 0:00:01\r\u001b[K |██▊ | 71kB 2.5MB/s eta 0:00:01\r\u001b[K |███ | 81kB 2.7MB/s eta 0:00:01\r\u001b[K |███▌ | 92kB 2.9MB/s eta 0:00:01\r\u001b[K |███▉ | 102kB 2.8MB/s eta 0:00:01\r\u001b[K |████▎ | 112kB 2.8MB/s eta 0:00:01\r\u001b[K |████▋ | 122kB 2.8MB/s eta 0:00:01\r\u001b[K |█████ | 133kB 2.8MB/s eta 0:00:01\r\u001b[K |█████▍ | 143kB 2.8MB/s eta 0:00:01\r\u001b[K |█████▊ | 153kB 2.8MB/s eta 0:00:01\r\u001b[K |██████▏ | 163kB 2.8MB/s eta 0:00:01\r\u001b[K |██████▌ | 174kB 2.8MB/s eta 0:00:01\r\u001b[K |███████ | 184kB 2.8MB/s eta 0:00:01\r\u001b[K |███████▎ | 194kB 2.8MB/s eta 0:00:01\r\u001b[K |███████▊ | 204kB 2.8MB/s eta 0:00:01\r\u001b[K |████████ | 215kB 2.8MB/s eta 0:00:01\r\u001b[K |████████▌ | 225kB 2.8MB/s eta 0:00:01\r\u001b[K |████████▉ | 235kB 2.8MB/s eta 0:00:01\r\u001b[K |█████████▎ | 245kB 2.8MB/s eta 0:00:01\r\u001b[K |█████████▋ | 256kB 2.8MB/s eta 0:00:01\r\u001b[K |██████████ | 266kB 2.8MB/s eta 0:00:01\r\u001b[K |██████████▍ | 276kB 2.8MB/s eta 0:00:01\r\u001b[K |██████████▊ | 286kB 2.8MB/s eta 0:00:01\r\u001b[K |███████████▏ | 296kB 2.8MB/s eta 0:00:01\r\u001b[K |███████████▌ | 307kB 2.8MB/s eta 0:00:01\r\u001b[K |████████████ | 317kB 2.8MB/s eta 0:00:01\r\u001b[K |████████████▎ | 327kB 2.8MB/s eta 0:00:01\r\u001b[K |████████████▊ | 337kB 2.8MB/s eta 0:00:01\r\u001b[K |█████████████ | 348kB 2.8MB/s eta 0:00:01\r\u001b[K |█████████████▌ | 358kB 2.8MB/s eta 0:00:01\r\u001b[K |█████████████▉ | 368kB 2.8MB/s eta 0:00:01\r\u001b[K |██████████████▎ | 378kB 2.8MB/s eta 0:00:01\r\u001b[K |██████████████▋ | 389kB 2.8MB/s eta 0:00:01\r\u001b[K |███████████████ | 399kB 2.8MB/s eta 0:00:01\r\u001b[K |███████████████▍ | 409kB 2.8MB/s eta 0:00:01\r\u001b[K |███████████████▊ | 419kB 2.8MB/s eta 0:00:01\r\u001b[K |████████████████▏ | 430kB 2.8MB/s eta 0:00:01\r\u001b[K |████████████████▌ | 440kB 2.8MB/s eta 0:00:01\r\u001b[K |█████████████████ | 450kB 2.8MB/s eta 0:00:01\r\u001b[K |█████████████████▎ | 460kB 2.8MB/s eta 0:00:01\r\u001b[K |█████████████████▊ | 471kB 2.8MB/s eta 0:00:01\r\u001b[K |██████████████████ | 481kB 2.8MB/s eta 0:00:01\r\u001b[K |██████████████████▌ | 491kB 2.8MB/s eta 0:00:01\r\u001b[K |██████████████████▉ | 501kB 2.8MB/s eta 0:00:01\r\u001b[K |███████████████████▎ | 512kB 2.8MB/s eta 0:00:01\r\u001b[K |███████████████████▋ | 522kB 2.8MB/s eta 0:00:01\r\u001b[K |████████████████████ | 532kB 2.8MB/s eta 0:00:01\r\u001b[K |████████████████████▍ | 542kB 2.8MB/s eta 0:00:01\r\u001b[K |████████████████████▊ | 552kB 2.8MB/s eta 0:00:01\r\u001b[K |█████████████████████▏ | 563kB 2.8MB/s eta 0:00:01\r\u001b[K |█████████████████████▌ | 573kB 2.8MB/s eta 0:00:01\r\u001b[K |██████████████████████ | 583kB 2.8MB/s eta 0:00:01\r\u001b[K |██████████████████████▎ | 593kB 2.8MB/s eta 0:00:01\r\u001b[K |██████████████████████▊ | 604kB 2.8MB/s eta 0:00:01\r\u001b[K |███████████████████████ | 614kB 2.8MB/s eta 0:00:01\r\u001b[K |███████████████████████▌ | 624kB 2.8MB/s eta 0:00:01\r\u001b[K |███████████████████████▉ | 634kB 2.8MB/s eta 0:00:01\r\u001b[K |████████████████████████▎ | 645kB 2.8MB/s eta 0:00:01\r\u001b[K |████████████████████████▋ | 655kB 2.8MB/s eta 0:00:01\r\u001b[K |█████████████████████████ | 665kB 2.8MB/s eta 0:00:01\r\u001b[K |█████████████████████████▍ | 675kB 2.8MB/s eta 0:00:01\r\u001b[K |█████████████████████████▊ | 686kB 2.8MB/s eta 0:00:01\r\u001b[K |██████████████████████████▏ | 696kB 2.8MB/s eta 0:00:01\r\u001b[K |██████████████████████████▌ | 706kB 2.8MB/s eta 0:00:01\r\u001b[K |███████████████████████████ | 716kB 2.8MB/s eta 0:00:01\r\u001b[K |███████████████████████████▎ | 727kB 2.8MB/s eta 0:00:01\r\u001b[K |███████████████████████████▊ | 737kB 2.8MB/s eta 0:00:01\r\u001b[K |████████████████████████████ | 747kB 2.8MB/s eta 0:00:01\r\u001b[K |████████████████████████████▌ | 757kB 2.8MB/s eta 0:00:01\r\u001b[K |████████████████████████████▉ | 768kB 2.8MB/s eta 0:00:01\r\u001b[K |█████████████████████████████▎ | 778kB 2.8MB/s eta 0:00:01\r\u001b[K |█████████████████████████████▋ | 788kB 2.8MB/s eta 0:00:01\r\u001b[K |██████████████████████████████ | 798kB 2.8MB/s eta 0:00:01\r\u001b[K |██████████████████████████████▍ | 808kB 2.8MB/s eta 0:00:01\r\u001b[K |██████████████████████████████▊ | 819kB 2.8MB/s eta 0:00:01\r\u001b[K |███████████████████████████████▏| 829kB 2.8MB/s eta 0:00:01\r\u001b[K |███████████████████████████████▌| 839kB 2.8MB/s eta 0:00:01\r\u001b[K |████████████████████████████████| 849kB 2.8MB/s eta 0:00:01\r\u001b[K |████████████████████████████████| 860kB 2.8MB/s \n\u001b[?25hCollecting yfinance\n Downloading https://files.pythonhosted.org/packages/c2/31/8b374a12b90def92a4e27d0fc595fc43635f395984e36a075244d98bd265/yfinance-0.1.54.tar.gz\nRequirement already satisfied: numpy==1.18.5 in /usr/local/lib/python3.6/dist-packages (from mlfinlab) (1.18.5)\nCollecting matplotlib==3.2.1\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/93/4b/52da6b1523d5139d04e02d9e26ceda6146b48f2a4e5d2abfdf1c7bac8c40/matplotlib-3.2.1-cp36-cp36m-manylinux1_x86_64.whl (12.4MB)\n\u001b[K |████████████████████████████████| 12.4MB 309kB/s \n\u001b[?25hCollecting pandas==1.0.4\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/8e/86/c14387d6813ebadb7bf61b9ad270ffff111c8b587e4d266e07de774e385e/pandas-1.0.4-cp36-cp36m-manylinux1_x86_64.whl (10.1MB)\n\u001b[K |████████████████████████████████| 10.1MB 47.7MB/s \n\u001b[?25hRequirement already satisfied: scipy==1.4.1 in /usr/local/lib/python3.6/dist-packages (from mlfinlab) (1.4.1)\nCollecting statsmodels==0.11.1\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/cb/83/540fd83238a18abe6c2d280fa8e489ac5fcefa1f370f0ca1acd16ae1b860/statsmodels-0.11.1-cp36-cp36m-manylinux1_x86_64.whl (8.7MB)\n\u001b[K |████████████████████████████████| 8.7MB 42.7MB/s \n\u001b[?25hCollecting cvxpy==1.1.1\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/fa/89/6e4f99b36ce2d002f2792529b130fd8ed5d7004c92ce8ae7d56496f51426/cvxpy-1.1.1.tar.gz (990kB)\n\u001b[K |████████████████████████████████| 993kB 42.0MB/s \n\u001b[?25h Installing build dependencies ... \u001b[?25l\u001b[?25hdone\n Getting requirements to build wheel ... \u001b[?25l\u001b[?25hdone\n Preparing wheel metadata ... \u001b[?25l\u001b[?25hdone\nCollecting scikit-learn==0.23.1\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/d9/3a/eb8d7bbe28f4787d140bb9df685b7d5bf6115c0e2a969def4027144e98b6/scikit_learn-0.23.1-cp36-cp36m-manylinux1_x86_64.whl (6.8MB)\n\u001b[K |████████████████████████████████| 6.9MB 32.7MB/s \n\u001b[?25hCollecting numba==0.49.1\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/56/dc/0e3d3732fd62c73fbb3317fc7bba22574832ab7a8e075620557bd4311641/numba-0.49.1-cp36-cp36m-manylinux2014_x86_64.whl (3.6MB)\n\u001b[K |████████████████████████████████| 3.6MB 33.9MB/s \n\u001b[?25hRequirement already satisfied: requests>=2.20 in /usr/local/lib/python3.6/dist-packages (from yfinance) (2.23.0)\nRequirement already satisfied: multitasking>=0.0.7 in /usr/local/lib/python3.6/dist-packages (from yfinance) (0.0.9)\nRequirement already satisfied: python-dateutil>=2.1 in /usr/local/lib/python3.6/dist-packages (from matplotlib==3.2.1->mlfinlab) (2.8.1)\nRequirement already satisfied: pyparsing!=2.0.4,!=2.1.2,!=2.1.6,>=2.0.1 in /usr/local/lib/python3.6/dist-packages (from matplotlib==3.2.1->mlfinlab) (2.4.7)\nRequirement already satisfied: kiwisolver>=1.0.1 in /usr/local/lib/python3.6/dist-packages (from matplotlib==3.2.1->mlfinlab) (1.2.0)\nRequirement already satisfied: cycler>=0.10 in /usr/local/lib/python3.6/dist-packages (from matplotlib==3.2.1->mlfinlab) (0.10.0)\nRequirement already satisfied: pytz>=2017.2 in /usr/local/lib/python3.6/dist-packages (from pandas==1.0.4->mlfinlab) (2018.9)\nRequirement already satisfied: patsy>=0.5 in /usr/local/lib/python3.6/dist-packages (from statsmodels==0.11.1->mlfinlab) (0.5.1)\nRequirement already satisfied: osqp>=0.4.1 in /usr/local/lib/python3.6/dist-packages (from cvxpy==1.1.1->mlfinlab) (0.6.1)\nRequirement already satisfied: ecos>=2 in /usr/local/lib/python3.6/dist-packages (from cvxpy==1.1.1->mlfinlab) (2.0.7.post1)\nRequirement already satisfied: scs>=1.1.3 in /usr/local/lib/python3.6/dist-packages (from cvxpy==1.1.1->mlfinlab) (2.1.2)\nRequirement already satisfied: joblib>=0.11 in /usr/local/lib/python3.6/dist-packages (from scikit-learn==0.23.1->mlfinlab) (0.16.0)\nCollecting threadpoolctl>=2.0.0\n Downloading https://files.pythonhosted.org/packages/f7/12/ec3f2e203afa394a149911729357aa48affc59c20e2c1c8297a60f33f133/threadpoolctl-2.1.0-py3-none-any.whl\nRequirement already satisfied: setuptools in /usr/local/lib/python3.6/dist-packages (from numba==0.49.1->mlfinlab) (49.2.0)\nRequirement already satisfied: llvmlite<=0.33.0.dev0,>=0.31.0.dev0 in /usr/local/lib/python3.6/dist-packages (from numba==0.49.1->mlfinlab) (0.31.0)\nRequirement already satisfied: chardet<4,>=3.0.2 in /usr/local/lib/python3.6/dist-packages (from requests>=2.20->yfinance) (3.0.4)\nRequirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.6/dist-packages (from requests>=2.20->yfinance) (2020.6.20)\nRequirement already satisfied: idna<3,>=2.5 in /usr/local/lib/python3.6/dist-packages (from requests>=2.20->yfinance) (2.10)\nRequirement already satisfied: urllib3!=1.25.0,!=1.25.1,<1.26,>=1.21.1 in /usr/local/lib/python3.6/dist-packages (from requests>=2.20->yfinance) (1.24.3)\nRequirement already satisfied: six>=1.5 in /usr/local/lib/python3.6/dist-packages (from python-dateutil>=2.1->matplotlib==3.2.1->mlfinlab) (1.15.0)\nRequirement already satisfied: future in /usr/local/lib/python3.6/dist-packages (from osqp>=0.4.1->cvxpy==1.1.1->mlfinlab) (0.16.0)\nBuilding wheels for collected packages: cvxpy\n Building wheel for cvxpy (PEP 517) ... \u001b[?25l\u001b[?25hdone\n Created wheel for cvxpy: filename=cvxpy-1.1.1-cp36-cp36m-linux_x86_64.whl size=2654188 sha256=a26a6bc4f5d1fd6c94fb0f8e6d7dc682cb1a0b40165cb1ce6e92e89c9d4f1d9c\n Stored in directory: /root/.cache/pip/wheels/06/db/59/b5af93d86703e0903b9b94ccc300ac70daf9d273f13e6c0350\nSuccessfully built cvxpy\nBuilding wheels for collected packages: yfinance\n Building wheel for yfinance (setup.py) ... \u001b[?25l\u001b[?25hdone\n Created wheel for yfinance: filename=yfinance-0.1.54-py2.py3-none-any.whl size=22409 sha256=c03517340ca865ab65213b423dde5625b362067eb62c54f5dcca90c92c2dbeae\n Stored in directory: /root/.cache/pip/wheels/f9/e3/5b/ec24dd2984b12d61e0abf26289746c2436a0e7844f26f2515c\nSuccessfully built yfinance\n\u001b[31mERROR: albumentations 0.1.12 has requirement imgaug<0.2.7,>=0.2.5, but you'll have imgaug 0.2.9 which is incompatible.\u001b[0m\nInstalling collected packages: matplotlib, pandas, statsmodels, cvxpy, threadpoolctl, scikit-learn, numba, mlfinlab, yfinance\n Found existing installation: matplotlib 3.2.2\n Uninstalling matplotlib-3.2.2:\n Successfully uninstalled matplotlib-3.2.2\n Found existing installation: pandas 1.0.5\n Uninstalling pandas-1.0.5:\n Successfully uninstalled pandas-1.0.5\n Found existing installation: statsmodels 0.10.2\n Uninstalling statsmodels-0.10.2:\n Successfully uninstalled statsmodels-0.10.2\n Found existing installation: cvxpy 1.0.31\n Uninstalling cvxpy-1.0.31:\n Successfully uninstalled cvxpy-1.0.31\n Found existing installation: scikit-learn 0.22.2.post1\n Uninstalling scikit-learn-0.22.2.post1:\n Successfully uninstalled scikit-learn-0.22.2.post1\n Found existing installation: numba 0.48.0\n Uninstalling numba-0.48.0:\n Successfully uninstalled numba-0.48.0\nSuccessfully installed cvxpy-1.1.1 matplotlib-3.2.1 mlfinlab-0.12.3 numba-0.49.1 pandas-1.0.4 scikit-learn-0.23.1 statsmodels-0.11.1 threadpoolctl-2.1.0 yfinance-0.1.54\n" ] ] ]
[ "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code" ] ]
cb35fe22c50d56025367bd7cff76a0bb26063337
2,883
ipynb
Jupyter Notebook
Notebooks/exception_handling.ipynb
erfan226/Julia_tutorial_for_ML
3519f7e264d18c9cf204824e586576d72daa6733
[ "MIT" ]
null
null
null
Notebooks/exception_handling.ipynb
erfan226/Julia_tutorial_for_ML
3519f7e264d18c9cf204824e586576d72daa6733
[ "MIT" ]
null
null
null
Notebooks/exception_handling.ipynb
erfan226/Julia_tutorial_for_ML
3519f7e264d18c9cf204824e586576d72daa6733
[ "MIT" ]
null
null
null
19.47973
126
0.481443
[ [ [ "empty" ] ] ]
[ "empty" ]
[ [ "empty" ] ]
cb36144ea23af18f4067d7457eb614780c3931c2
282,749
ipynb
Jupyter Notebook
wp6/analyse/sl_final_analysis_differenceindistanceofsameTFpair_part2.ipynb
loosolab/Datenanalyse-2021
2a94f6153a504bd6f1ee205eeeab279b20fb847d
[ "MIT" ]
2
2022-02-25T16:48:56.000Z
2022-03-29T16:43:38.000Z
wp6/analyse/sl_final_analysis_differenceindistanceofsameTFpair_part2.ipynb
loosolab/Datenanalyse-2021
2a94f6153a504bd6f1ee205eeeab279b20fb847d
[ "MIT" ]
5
2022-02-22T14:57:20.000Z
2022-03-29T12:33:30.000Z
wp6/analyse/sl_final_analysis_differenceindistanceofsameTFpair_part2.ipynb
loosolab/Datenanalyse-2021
2a94f6153a504bd6f1ee205eeeab279b20fb847d
[ "MIT" ]
null
null
null
111.891175
92,544
0.748031
[ [ [ "# Biological question: Are there differences in the binding distance of the same TF-pair in different clusters? - PART2\n\nThis notebook can be used to analyse if there are differences in the binding distance of the same TF-pair in two different clusters.\n\nIn \"Outline of this notebook\" the general steps in the notebook are explained. The details for each general step are described directly in the notebook for the general step. \n**Needed input for notebook:** .pkl file with performed market basket analysis for the second of two chosen clusters for comparison + -csv file of the results of the distance analysis of the first cluster\n\n(you can also have a look at TF-COMB docs)\n\n#### Exemplary Data: WP2 - A8CPH_esophagus_muscularis_mucosa_c1_Fibroblasts vs. A8CPH_esophagus_muscularis_mucosa_c7_Fibroblasts\n\n### Outline of this notebook:\nThis notbook presents the second part of the analysis to find out if there is a difference in the binding distance of the same TF-pair in different clusters. If you **have not done the PART 1** yet, **do the PART 1 first**. Here the second cluster is analysed. The **results of the first cluster are then imported**, merged together and the comparison of the binding distance are done. \n\n 1. Implementation of distance analysis for second cluster and transfering in dataframe\n 2. Importing the results of the first cluster\n 3. Merging the results of first and second cluster\n 4. Looking at the Distribution of the difference in binding distance between the same TF-pairs in the two clusters\n 5. Comparing the binding distances between the same TF-pairs in the two clusters\n 6. Possibility to have a closer look and to compare results of different clusters ", "_____no_output_____" ], [ "## 1. Implementation of distance analysis for cluster\n0. Creation of folders for the structure if necessary, so the needed path for the output are existing\n1. Read in **path of the .pkl file of the first chosen cluster from already performed market basket analysis** (alternative: perform normal market basket analysis) **(-> adjust for cluster)**\n2. Using .simplify_rules(), so the duplicates of a TF-pair (because of the two orientations TF1-TF2 or TF2-TF1) are not displayed\n3. Selection of TF-pairs by cosine and zscore\n4. Implementation of distance analysis with considering the noisiness (lower noise, \"clearer/better\" analysis)\n5. Creation of dataframe df_distance_clusterPART2 with the results of the distance analysis of the second cluster so it can be easily merged with the dataframe of the first cluster\n6. Reducing the TF co-occurrences by selecting the TF co-occurrences by peak hight above 2.8 (good proven boundary from other applications of distance analysis) ", "_____no_output_____" ] ], [ [ "# The following lines, initally check if all file/paths are available. \n#If a result folder does not exist it is created automatically\nimport os\nimport pathlib\nif not os.path.exists(\"./results/distanceresultsfordifference/\"):\n pathlib.Path(\"./results/distanceresultsfordifference/\").mkdir(parents=True, exist_ok=True)\n \nif not os.path.exists(\"./results/differencedistance_distributionplot/\"):\n pathlib.Path(\"./results/differencedistance_distributionplot/\").mkdir(parents=True, exist_ok=True)\n\nif not os.path.exists(\"./results/differencedistance_plot/\"):\n pathlib.Path(\"./results/differencedistance_plot/\").mkdir(parents=True, exist_ok=True)\n\nif not os.path.exists(\"./results/differencedistance_table/\"):\n pathlib.Path(\"./results/differencedistance_table/\").mkdir(parents=True, exist_ok=True)\n", "_____no_output_____" ], [ "import tfcomb.objects\nclusterPART2_object = tfcomb.objects.CombObj().from_pickle(\"/mnt/workspace_stud/stud6/repositories/Datenanalyse-2021/wp6/analyse/results/wp2/main/A8CPH_esophagus_muscularis_mucosa/A8CPH_esophagus_muscularis_mucosa_c7_Fibroblasts.pkl\")\nclusterPART2_object", "_____no_output_____" ], [ "clusterPART2_object.simplify_rules()", "_____no_output_____" ], [ "clusterPART2_object_selected = clusterPART2_object.select_significant_rules()", "INFO: x_threshold is None; trying to calculate optimal threshold\nINFO: y_threshold is None; trying to calculate optimal threshold\nINFO: Creating subset of TFBS and rules using thresholds\n" ], [ "clusterPART2_object_selected.analyze_distances(threads=6)", "INFO: DistObject successfully created! It can be accessed via combobj.distObj\nINFO: Calculating distances\nINFO: Normalizing data.\nINFO: Done finding distances! Results are found in .distances\nINFO: Run .linregress_all() to fit linear regression\nINFO: Fitting linear regression. With number of threads: 6\nINFO: Linear regression finished! Results can be found in .linres\nINFO: Correcting background with 6 threads.\nINFO: Background correction finished! Results can be found in .corrected\nINFO: Smoothing signals with window size 3\nINFO: Shifting signals above zero\nINFO: Analyzing Signal with threads 6\nINFO: Done analyzing signal. Results are found in .peaks\n" ], [ "clusterPART2_object_selected.distObj.evaluate_noise(threads=6)\nclusterPART2_object_selected.distObj.rank_rules()", "INFO: Evaluating noisiness of the signals with 6 threads\n" ], [ "df_distance_clusterPART2=clusterPART2_object_selected.distObj.peaks", "_____no_output_____" ], [ "df_distance_clusterPART2=df_distance_clusterPART2[(df_distance_clusterPART2[\"Peak Heights\"]>2.8)]\ndf_distance_clusterPART2", "_____no_output_____" ] ], [ [ "## 2. Importing the results of the first cluster\n1. Import of results of the first cluster and saving them in df_distance_clusterPART1_csv dataframe", "_____no_output_____" ] ], [ [ "import pandas as pd", "_____no_output_____" ], [ "df_distance_clusterPART1_csv=pd.read_csv(\"./results/distanceresultsfordifference/A8CPH_esophagus_muscularis_mucosa_c1_Fibroblasts.csv\")\ndf_distance_clusterPART1_csv", "_____no_output_____" ] ], [ [ " ## 3. Merging the results of first and second cluster\n 1. Merging the results of the distance analysis for the TF-pairs that are in first and second cluster in new dataframe df_distancedifference_2clusters so the binding distance between the two clusters can be compared. The columns with the suffix CPART1 have the information of the first cluster and columns with CPART2 have the information of the second cluster. ", "_____no_output_____" ] ], [ [ "df_distancedifference_2clusters=df_distance_clusterPART1_csv.merge(df_distance_clusterPART2,suffixes=('_CPART1', '_CPART2'), left_on =[\"TF1\",\"TF2\"], right_on = [\"TF1\",\"TF2\"])\ndf_distancedifference_2clusters", "_____no_output_____" ], [ "pd.set_option('max_columns', None)\npd.set_option('max_rows', 50)", "_____no_output_____" ] ], [ [ "## 4. Looking at the Distribution of the difference in binding distance between the same TF-pairs in the two clusters\n1. Calculation of the difference between the binding distance per TF-pair\n2. Calcultation of the average peak Height of a TF-pair as an additional assessment factor\n3. Selection of TF-pairs above 100 counts as TF-pairs of interest so they so probability is higher that they are important for cluster\n4. Sorting the dataframe df_distancedifference_2clusters by the difference in the binding distance for plotting them\n5. Calculating the Distribution in the difference in binding distance. This can be used for comparing different two clusters (2 clusters same celltype vs 2 clusters different celltype) concering their distribution in difference in binding distance\n6. Plotting the Distribution of the difference in binding distance and saving the **distribution plot .png file (-> adjust for cluster)**\n", "_____no_output_____" ] ], [ [ "df_distancedifference_2clusters['Difference between Distance'] = abs(df_distancedifference_2clusters['Distance_CPART1'] - df_distancedifference_2clusters['Distance_CPART2'])\ndf_distancedifference_2clusters['Average Peak Height'] = ((df_distancedifference_2clusters['Peak Heights_CPART1'] + df_distancedifference_2clusters['Peak Heights_CPART2'])/2)\ndf_distancedifference_2clusters['TF-pair'] = df_distancedifference_2clusters['TF1'] + \" + \" + df_distancedifference_2clusters['TF2']\ndf_distancedifference_2clusters = df_distancedifference_2clusters[(df_distancedifference_2clusters[\"TF1_TF2_count_CPART1\"]>100) &(df_distancedifference_2clusters[\"TF1_TF2_count_CPART2\"]>100)]\ndf_distancedifference_2clusters_sorted=df_distancedifference_2clusters.sort_values(by=['Difference between Distance'])\ndf_differencedistance_distribution=df_distancedifference_2clusters_sorted['Difference between Distance'].value_counts()\n", "_____no_output_____" ], [ "df_differencedistance_distribution", "_____no_output_____" ], [ "df_distancedifference_2clusters_sorted", "_____no_output_____" ], [ "import matplotlib.pyplot as plt\n\ndifferencedistance = df_differencedistance_distribution.keys()\noccurrence = df_differencedistance_distribution\n\nplt.figure(figsize=(15, 5))\nplt.scatter(differencedistance,occurrence)\nplt.xticks\nplt.grid(True)\nplt.xlabel('Difference in Distance')\nplt.ylabel('Occurrence')\nplt.title('Distribution of the Difference in binding Distance')\n\nplt.savefig(\"./results/differencedistance_distributionplot/A8CPH_esophagus_muscularis_mucosa_c1_Fibroblasts__A8CPH_esophagus_muscularis_mucosa_c7_Fibroblasts.png\") \nplt.show()", "_____no_output_____" ] ], [ [ "## 5. Comparing the binding distances between the same TF-pairs in the two clusters\n1. Selecting the difference in binding distance over 10 since now we only want to look at the TF-pairs with a difference in binding distance\n2. Sorting the TF-Pairs by noisiness of the first cluster and selecting the first 100. The noisiness is selected as an assessment factor since it distinguishes a clear signal from noisy signal in the distance analysis what was ranged as important factor for the qualitiy of the TF-pair binding distance. The reason for the selection of 100 TF-pairs is for the readability of the figure and can also be adjusted reasonable (as well as the other factors)\n3. Sorting the dataframe df_distancedifference_2clusters_withoutlowdifference_noisinesstop100 by the difference in the binding distance for plotting them and **saving finished distance difference table in .csv file (-> adjust for cluster)**\n10. Plotting the difference in distance over the TF-pairs and saving the **difference in distnace plot in a .png file (-> adjust for cluster)**", "_____no_output_____" ] ], [ [ "df_distancedifference_2clusters_withoutlowdifference = df_distancedifference_2clusters[(df_distancedifference_2clusters[\"Difference between Distance\"]>10)]\ndf_distancedifference_2clusters_withoutlowdifference_noisinesstop100=df_distancedifference_2clusters_withoutlowdifference.sort_values(by=['Noisiness_CPART1']).head(100)\ndf_distancedifference_2clusters_withoutlowdifference_noisinesstop100_sorted=df_distancedifference_2clusters_withoutlowdifference_noisinesstop100.sort_values(by=['Difference between Distance'])\ndf_distancedifference_2clusters_withoutlowdifference_noisinesstop100_sorted\n", "_____no_output_____" ], [ "df_distancedifference_2clusters_withoutlowdifference_noisinesstop100_sorted.to_csv(\"./results/differencedistance_table/A8CPH_esophagus_muscularis_mucosa_c1_Fibroblasts__A8CPH_esophagus_muscularis_mucosa_c7_Fibroblasts.csv\")", "_____no_output_____" ], [ "differencedistance = df_distancedifference_2clusters_withoutlowdifference_noisinesstop100_sorted['Difference between Distance']\nTFpairs = df_distancedifference_2clusters_withoutlowdifference_noisinesstop100_sorted['TF-pair']\naveragepeakheight = df_distancedifference_2clusters_withoutlowdifference_noisinesstop100_sorted['Average Peak Height']\n\nplt.figure(figsize=(20, 5))\nplt.scatter(TFpairs,differencedistance, c=averagepeakheight, cmap = 'cividis_r')\nplt.xticks(rotation='vertical')\nplt.grid(True, axis = 'y')\nplt.xlabel('TF-pairs')\nplt.ylabel('Difference in Distance')\ncbar = plt.colorbar()\ncbar.set_label(\"average Peak Height\")\nplt.title('Difference in Distance over the TF-pairs')\n\n\nplt.savefig(\"./results/differencedistance_plot/A8CPH_esophagus_muscularis_mucosa_c1_Fibroblasts__A8CPH_esophagus_muscularis_mucosa_c7_Fibroblasts.png\") \nplt.show()", "_____no_output_____" ] ], [ [ "## 5. Possibility to have a closer look and to compare results of different clusters ", "_____no_output_____" ], [ "#### Possibility to import other plots from different two clusters for comparisons (-> adjust for cluster)", "_____no_output_____" ] ], [ [ "from PIL import Image\n\nimage = Image.open(\"/mnt/workspace_stud/stud7/Datenanalyse-2021/wp6/analyse/results/differencedistance_plot/A8CPH_esophagus_muscularis_mucosa_c1_Fibroblasts__ACCQ1_colon_transverse_c3_Enterocytes.png\")\nimage.show()", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ] ]
cb362669501ec8c27828e76e7a0d16a0071c8946
34,326
ipynb
Jupyter Notebook
data_preprocessing_1023_with_negation.ipynb
nakamura41/EB5204_Sentiment_Mining_CA
5f8dc31747d460e12af4faa771dc7e3afb81c022
[ "Unlicense" ]
2
2018-11-04T00:15:25.000Z
2019-07-18T06:09:31.000Z
data_preprocessing_1023_with_negation.ipynb
nakamura41/EB5204_Sentiment_Mining_CA
5f8dc31747d460e12af4faa771dc7e3afb81c022
[ "Unlicense" ]
null
null
null
data_preprocessing_1023_with_negation.ipynb
nakamura41/EB5204_Sentiment_Mining_CA
5f8dc31747d460e12af4faa771dc7e3afb81c022
[ "Unlicense" ]
1
2018-11-03T03:00:34.000Z
2018-11-03T03:00:34.000Z
35.830898
402
0.405028
[ [ [ "#!pwd\nimport pandas as pd\nimport os\nimport string\nfrom nltk.corpus import stopwords\nfrom nltk import word_tokenize, WordNetLemmatizer\nfrom nltk import stem, pos_tag\nfrom nltk.corpus import wordnet as wn\nfrom sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer\nimport os\nimport re\ncwd = os.getcwd()\nfilepath = cwd+'/data/df_test_indian.csv'\noutputfilepath = cwd+'/data/df_test_indian_with_negation.csv'", "_____no_output_____" ], [ "df = pd.read_csv(filepath)\ndf.head()\n# ffile1 = open(filepath,\"r\", encoding = \"ISO-8859-1\")\n# df_standard = pd.read_csv(ffile1, encoding = \"utf-8\")\n# df_standard.drop(['restaurant_id', 'date', 'review_id'], inplace=True, axis=1)\n# df_standard.rename(columns={'text': 'review', 'Sentiment': 'sentiment'}, inplace=True)\n# df_standard.head()\n\n# mask = df_standard.sentiment == 'negative'\n# column_name = 'sentiment'\n# df_standard.loc[mask, column_name] = -1\n# mask = df_standard.sentiment == 'positive'\n# column_name = 'sentiment'\n# df_standard.loc[mask, column_name] = 1\n# df_standard = df_standard.rename(columns = {'stars':'rating'})\n# df_standard.head()\n\n# df = pd.concat([df, df_standard], axis=0)\n# df = df.reset_index().drop('index', axis=1)", "_____no_output_____" ], [ "ratings = df['rating']\nsentiments = df['sentiment']\nreviews = df['review']", "_____no_output_____" ], [ "sentiment_trans = []\nfor r in ratings.keys():\n rating = ratings.loc[r]\n sentiment = sentiments.loc[r]\n sentiment = int(sentiment)\n if rating <= 3:\n sentiment = -1\n else:\n sentiment = sentiment\n sentiment_trans.insert(r, sentiment)\nsentiments = pd.Series(sentiment_trans, index=ratings.keys())\n\ndf = pd.concat([ratings, reviews, sentiments], axis = 1)\ndf.columns = ['rating', 'review', 'sentiment']\ndf.head()", "_____no_output_____" ], [ "df", "_____no_output_____" ], [ "stop = stopwords.words('english')\nsnowball = stem.snowball.EnglishStemmer()\nwnl = WordNetLemmatizer()\nreviews = df['review']", "_____no_output_____" ], [ "def negation_Processing(reviews):\n def neg_tag(text):\n transformed = re.sub(r\"\\b(?:never|nothing|nowhere|noone|none|not|haven't|hasn't|hasnt|hadn't|hadnt|can't|cant|couldn't|couldnt|shouldn't|shouldnt|won't|wont|wouldn't|wouldnt|don't|dont|doesn't|doesnt|didn't|didnt|isnt|isn't|aren't|arent|aint|ain't|hardly|seldom)\\b[\\w\\s]+[^\\w\\s]\", lambda match: re.sub(r'(\\s+)(\\w+)', r'\\1NEG_\\2', match.group(0)), text, flags=re.IGNORECASE)\n return(transformed)\n \n negation_reviews = []\n\n # Append elements to the list\n for doc in reviews:\n trans = neg_tag(doc)\n negation_reviews.append(trans)\n return negation_reviews", "_____no_output_____" ], [ "# Remove all the punctuations and numbers\ndef removePunc(reviews):\n comwoPunc = str.maketrans({key: None for key in string.punctuation\n + string.digits})\n for i in reviews.keys():\n comment = reviews.loc[i]\n reviewswoPunc = reviews.replace(comment, comment.translate(comwoPunc))\n return reviewswoPunc", "_____no_output_____" ], [ "# Convert all characters to Lower case\ndef convToLow(words):\n reviewsToLow = words.apply(str.lower)\n return reviewsToLow", "_____no_output_____" ], [ "# Stopwords removal\ndef removeStop(text, stop):\n for i in text.keys():\n comment = text.loc[i]\n comment_nostop = \" \".join(filter(lambda word: word not in stop,\n comment.split()))\n reviewswoStop = text.replace(comment, comment_nostop)\n return reviewswoStop", "_____no_output_____" ], [ "# Tokenization\ndef token(text):\n reviewsToken = text.apply(word_tokenize)\n return reviewsToken", "_____no_output_____" ], [ "# pos tagging\ndef posTag(words):\n reviews_pos = words.apply(pos_tag)\n reviews_wnpos = []\n for i in reviews_pos.keys():\n comment = reviews_pos.loc[i]\n comment_wnpos = []\n for t in comment:\n t = list(t)\n tag = t[1]\n if t[1].startswith('J'):\n t[1] = wn.ADJ\n elif t[1].startswith('V'):\n t[1] = wn.VERB\n elif t[1].startswith('N'):\n t[1] = wn.NOUN\n elif t[1].startswith('R'):\n t[1] = wn.ADV\n else:\n del t\n t = None\n if t is not None:\n comment_wnpos.append(t)\n else:\n pass\n reviews_wnpos.append(comment_wnpos)\n reviews_wnpos = pd.Series(reviews_wnpos, index=reviews_pos.keys())\n return reviews_wnpos", "_____no_output_____" ], [ "# Lemmatization\ndef lemma(text, wnl):\n reviews_lem = []\n for i in text.keys():\n comment = text.loc[i]\n comment_lem = []\n for t in comment:\n word = t[0]\n tag = t[1]\n t = wnl.lemmatize(word, pos=tag)\n comment_lem.append(t)\n reviews_lem.append(comment_lem)\n allReviews = []\n for j in reviews_lem:\n reviews = ' '.join(j)\n allReviews.append(reviews)\n reviewsLemma = pd.Series(allReviews, index=text.keys())\n return reviewsLemma", "_____no_output_____" ], [ "if __name__ == '__main__':\n \n reviews = negation_Processing(reviews)\n reviews = pd.Series(reviews)\n\n reviews = removePunc(reviews)\n reviews = convToLow(reviews)\n reviews = removeStop(reviews, stop)\n reviews = token(reviews)\n reviews = posTag(reviews)\n final_reviews = lemma(reviews,wnl)\n ratings = df['rating']\n sentiments = df['sentiment']\n df = pd.concat([ratings, final_reviews, sentiments], axis = 1)\n df.columns = ['rating', 'review', 'sentiment']\n df.to_csv(outputfilepath, encoding='utf-8')", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb362afa330f508ef29d9dd6baeb33c5c8557e2b
34,776
ipynb
Jupyter Notebook
project-tv-script-generation/dlnd_tv_script_generation.ipynb
subham73/deep-learning-v2-pytorch
a200ee8d644fa176fc6f2b663cac6cc0207c1b40
[ "MIT" ]
4,850
2018-09-04T19:40:22.000Z
2022-03-31T10:21:49.000Z
project-tv-script-generation/dlnd_tv_script_generation.ipynb
subham73/deep-learning-v2-pytorch
a200ee8d644fa176fc6f2b663cac6cc0207c1b40
[ "MIT" ]
220
2018-09-15T20:30:55.000Z
2022-03-30T04:45:30.000Z
project-tv-script-generation/dlnd_tv_script_generation.ipynb
subham73/deep-learning-v2-pytorch
a200ee8d644fa176fc6f2b663cac6cc0207c1b40
[ "MIT" ]
5,729
2018-09-04T22:07:30.000Z
2022-03-31T11:52:07.000Z
36.995745
421
0.580343
[ [ [ "# TV Script Generation\n\nIn this project, you'll generate your own [Seinfeld](https://en.wikipedia.org/wiki/Seinfeld) TV scripts using RNNs. You'll be using part of the [Seinfeld dataset](https://www.kaggle.com/thec03u5/seinfeld-chronicles#scripts.csv) of scripts from 9 seasons. The Neural Network you'll build will generate a new ,\"fake\" TV script, based on patterns it recognizes in this training data.\n\n## Get the Data\n\nThe data is already provided for you in `./data/Seinfeld_Scripts.txt` and you're encouraged to open that file and look at the text. \n>* As a first step, we'll load in this data and look at some samples. \n* Then, you'll be tasked with defining and training an RNN to generate a new script!", "_____no_output_____" ] ], [ [ "\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL\n\"\"\"\n# load in data\nimport helper\ndata_dir = './data/Seinfeld_Scripts.txt'\ntext = helper.load_data(data_dir)", "_____no_output_____" ] ], [ [ "## Explore the Data\nPlay around with `view_line_range` to view different parts of the data. This will give you a sense of the data you'll be working with. You can see, for example, that it is all lowercase text, and each new line of dialogue is separated by a newline character `\\n`.", "_____no_output_____" ] ], [ [ "view_line_range = (0, 10)\n\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\n\"\"\"\nimport numpy as np\n\nprint('Dataset Stats')\nprint('Roughly the number of unique words: {}'.format(len({word: None for word in text.split()})))\n\nlines = text.split('\\n')\nprint('Number of lines: {}'.format(len(lines)))\nword_count_line = [len(line.split()) for line in lines]\nprint('Average number of words in each line: {}'.format(np.average(word_count_line)))\n\nprint('The lines {} to {}:'.format(*view_line_range))\nprint('\\n'.join(text.split('\\n')[view_line_range[0]:view_line_range[1]]))", "_____no_output_____" ] ], [ [ "---\n## Implement Pre-processing Functions\nThe first thing to do to any dataset is pre-processing. Implement the following pre-processing functions below:\n- Lookup Table\n- Tokenize Punctuation\n\n### Lookup Table\nTo create a word embedding, you first need to transform the words to ids. In this function, create two dictionaries:\n- Dictionary to go from the words to an id, we'll call `vocab_to_int`\n- Dictionary to go from the id to word, we'll call `int_to_vocab`\n\nReturn these dictionaries in the following **tuple** `(vocab_to_int, int_to_vocab)`", "_____no_output_____" ] ], [ [ "import problem_unittests as tests\n\ndef create_lookup_tables(text):\n \"\"\"\n Create lookup tables for vocabulary\n :param text: The text of tv scripts split into words\n :return: A tuple of dicts (vocab_to_int, int_to_vocab)\n \"\"\"\n # TODO: Implement Function\n \n # return tuple\n return (None, None)\n\n\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\n\"\"\"\ntests.test_create_lookup_tables(create_lookup_tables)", "_____no_output_____" ] ], [ [ "### Tokenize Punctuation\nWe'll be splitting the script into a word array using spaces as delimiters. However, punctuations like periods and exclamation marks can create multiple ids for the same word. For example, \"bye\" and \"bye!\" would generate two different word ids.\n\nImplement the function `token_lookup` to return a dict that will be used to tokenize symbols like \"!\" into \"||Exclamation_Mark||\". Create a dictionary for the following symbols where the symbol is the key and value is the token:\n- Period ( **.** )\n- Comma ( **,** )\n- Quotation Mark ( **\"** )\n- Semicolon ( **;** )\n- Exclamation mark ( **!** )\n- Question mark ( **?** )\n- Left Parentheses ( **(** )\n- Right Parentheses ( **)** )\n- Dash ( **-** )\n- Return ( **\\n** )\n\nThis dictionary will be used to tokenize the symbols and add the delimiter (space) around it. This separates each symbols as its own word, making it easier for the neural network to predict the next word. Make sure you don't use a value that could be confused as a word; for example, instead of using the value \"dash\", try using something like \"||dash||\".", "_____no_output_____" ] ], [ [ "def token_lookup():\n \"\"\"\n Generate a dict to turn punctuation into a token.\n :return: Tokenized dictionary where the key is the punctuation and the value is the token\n \"\"\"\n # TODO: Implement Function\n \n return None\n\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\n\"\"\"\ntests.test_tokenize(token_lookup)", "_____no_output_____" ] ], [ [ "## Pre-process all the data and save it\n\nRunning the code cell below will pre-process all the data and save it to file. You're encouraged to look at the code for `preprocess_and_save_data` in the `helpers.py` file to see what it's doing in detail, but you do not need to change this code.", "_____no_output_____" ] ], [ [ "\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL\n\"\"\"\n# pre-process training data\nhelper.preprocess_and_save_data(data_dir, token_lookup, create_lookup_tables)", "_____no_output_____" ] ], [ [ "# Check Point\nThis is your first checkpoint. If you ever decide to come back to this notebook or have to restart the notebook, you can start from here. The preprocessed data has been saved to disk.", "_____no_output_____" ] ], [ [ "\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL\n\"\"\"\nimport helper\nimport problem_unittests as tests\n\nint_text, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess()", "_____no_output_____" ] ], [ [ "## Build the Neural Network\nIn this section, you'll build the components necessary to build an RNN by implementing the RNN Module and forward and backpropagation functions.\n\n### Check Access to GPU", "_____no_output_____" ] ], [ [ "\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL\n\"\"\"\nimport torch\n\n# Check for a GPU\ntrain_on_gpu = torch.cuda.is_available()\nif not train_on_gpu:\n print('No GPU found. Please use a GPU to train your neural network.')", "_____no_output_____" ] ], [ [ "## Input\nLet's start with the preprocessed input data. We'll use [TensorDataset](http://pytorch.org/docs/master/data.html#torch.utils.data.TensorDataset) to provide a known format to our dataset; in combination with [DataLoader](http://pytorch.org/docs/master/data.html#torch.utils.data.DataLoader), it will handle batching, shuffling, and other dataset iteration functions.\n\nYou can create data with TensorDataset by passing in feature and target tensors. Then create a DataLoader as usual.\n```\ndata = TensorDataset(feature_tensors, target_tensors)\ndata_loader = torch.utils.data.DataLoader(data, \n batch_size=batch_size)\n```\n\n### Batching\nImplement the `batch_data` function to batch `words` data into chunks of size `batch_size` using the `TensorDataset` and `DataLoader` classes.\n\n>You can batch words using the DataLoader, but it will be up to you to create `feature_tensors` and `target_tensors` of the correct size and content for a given `sequence_length`.\n\nFor example, say we have these as input:\n```\nwords = [1, 2, 3, 4, 5, 6, 7]\nsequence_length = 4\n```\n\nYour first `feature_tensor` should contain the values:\n```\n[1, 2, 3, 4]\n```\nAnd the corresponding `target_tensor` should just be the next \"word\"/tokenized word value:\n```\n5\n```\nThis should continue with the second `feature_tensor`, `target_tensor` being:\n```\n[2, 3, 4, 5] # features\n6 # target\n```", "_____no_output_____" ] ], [ [ "from torch.utils.data import TensorDataset, DataLoader\n\n\ndef batch_data(words, sequence_length, batch_size):\n \"\"\"\n Batch the neural network data using DataLoader\n :param words: The word ids of the TV scripts\n :param sequence_length: The sequence length of each batch\n :param batch_size: The size of each batch; the number of sequences in a batch\n :return: DataLoader with batched data\n \"\"\"\n # TODO: Implement function\n \n # return a dataloader\n return None\n\n# there is no test for this function, but you are encouraged to create\n# print statements and tests of your own\n", "_____no_output_____" ] ], [ [ "### Test your dataloader \n\nYou'll have to modify this code to test a batching function, but it should look fairly similar.\n\nBelow, we're generating some test text data and defining a dataloader using the function you defined, above. Then, we are getting some sample batch of inputs `sample_x` and targets `sample_y` from our dataloader.\n\nYour code should return something like the following (likely in a different order, if you shuffled your data):\n\n```\ntorch.Size([10, 5])\ntensor([[ 28, 29, 30, 31, 32],\n [ 21, 22, 23, 24, 25],\n [ 17, 18, 19, 20, 21],\n [ 34, 35, 36, 37, 38],\n [ 11, 12, 13, 14, 15],\n [ 23, 24, 25, 26, 27],\n [ 6, 7, 8, 9, 10],\n [ 38, 39, 40, 41, 42],\n [ 25, 26, 27, 28, 29],\n [ 7, 8, 9, 10, 11]])\n\ntorch.Size([10])\ntensor([ 33, 26, 22, 39, 16, 28, 11, 43, 30, 12])\n```\n\n### Sizes\nYour sample_x should be of size `(batch_size, sequence_length)` or (10, 5) in this case and sample_y should just have one dimension: batch_size (10). \n\n### Values\n\nYou should also notice that the targets, sample_y, are the *next* value in the ordered test_text data. So, for an input sequence `[ 28, 29, 30, 31, 32]` that ends with the value `32`, the corresponding output should be `33`.", "_____no_output_____" ] ], [ [ "# test dataloader\n\ntest_text = range(50)\nt_loader = batch_data(test_text, sequence_length=5, batch_size=10)\n\ndata_iter = iter(t_loader)\nsample_x, sample_y = data_iter.next()\n\nprint(sample_x.shape)\nprint(sample_x)\nprint()\nprint(sample_y.shape)\nprint(sample_y)", "_____no_output_____" ] ], [ [ "---\n## Build the Neural Network\nImplement an RNN using PyTorch's [Module class](http://pytorch.org/docs/master/nn.html#torch.nn.Module). You may choose to use a GRU or an LSTM. To complete the RNN, you'll have to implement the following functions for the class:\n - `__init__` - The initialize function. \n - `init_hidden` - The initialization function for an LSTM/GRU hidden state\n - `forward` - Forward propagation function.\n \nThe initialize function should create the layers of the neural network and save them to the class. The forward propagation function will use these layers to run forward propagation and generate an output and a hidden state.\n\n**The output of this model should be the *last* batch of word scores** after a complete sequence has been processed. That is, for each input sequence of words, we only want to output the word scores for a single, most likely, next word.\n\n### Hints\n\n1. Make sure to stack the outputs of the lstm to pass to your fully-connected layer, you can do this with `lstm_output = lstm_output.contiguous().view(-1, self.hidden_dim)`\n2. You can get the last batch of word scores by shaping the output of the final, fully-connected layer like so:\n\n```\n# reshape into (batch_size, seq_length, output_size)\noutput = output.view(batch_size, -1, self.output_size)\n# get last batch\nout = output[:, -1]\n```", "_____no_output_____" ] ], [ [ "import torch.nn as nn\n\nclass RNN(nn.Module):\n \n def __init__(self, vocab_size, output_size, embedding_dim, hidden_dim, n_layers, dropout=0.5):\n \"\"\"\n Initialize the PyTorch RNN Module\n :param vocab_size: The number of input dimensions of the neural network (the size of the vocabulary)\n :param output_size: The number of output dimensions of the neural network\n :param embedding_dim: The size of embeddings, should you choose to use them \n :param hidden_dim: The size of the hidden layer outputs\n :param dropout: dropout to add in between LSTM/GRU layers\n \"\"\"\n super(RNN, self).__init__()\n # TODO: Implement function\n \n # set class variables\n \n # define model layers\n \n \n def forward(self, nn_input, hidden):\n \"\"\"\n Forward propagation of the neural network\n :param nn_input: The input to the neural network\n :param hidden: The hidden state \n :return: Two Tensors, the output of the neural network and the latest hidden state\n \"\"\"\n # TODO: Implement function \n\n # return one batch of output word scores and the hidden state\n return None, None\n \n \n def init_hidden(self, batch_size):\n '''\n Initialize the hidden state of an LSTM/GRU\n :param batch_size: The batch_size of the hidden state\n :return: hidden state of dims (n_layers, batch_size, hidden_dim)\n '''\n # Implement function\n \n # initialize hidden state with zero weights, and move to GPU if available\n \n return None\n\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\n\"\"\"\ntests.test_rnn(RNN, train_on_gpu)", "_____no_output_____" ] ], [ [ "### Define forward and backpropagation\n\nUse the RNN class you implemented to apply forward and back propagation. This function will be called, iteratively, in the training loop as follows:\n```\nloss = forward_back_prop(decoder, decoder_optimizer, criterion, inp, target)\n```\n\nAnd it should return the average loss over a batch and the hidden state returned by a call to `RNN(inp, hidden)`. Recall that you can get this loss by computing it, as usual, and calling `loss.item()`.\n\n**If a GPU is available, you should move your data to that GPU device, here.**", "_____no_output_____" ] ], [ [ "def forward_back_prop(rnn, optimizer, criterion, inp, target, hidden):\n \"\"\"\n Forward and backward propagation on the neural network\n :param rnn: The PyTorch Module that holds the neural network\n :param optimizer: The PyTorch optimizer for the neural network\n :param criterion: The PyTorch loss function\n :param inp: A batch of input to the neural network\n :param target: The target output for the batch of input\n :return: The loss and the latest hidden state Tensor\n \"\"\"\n \n # TODO: Implement Function\n \n # move data to GPU, if available\n \n # perform backpropagation and optimization\n\n # return the loss over a batch and the hidden state produced by our model\n return None, None\n\n# Note that these tests aren't completely extensive.\n# they are here to act as general checks on the expected outputs of your functions\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\n\"\"\"\ntests.test_forward_back_prop(RNN, forward_back_prop, train_on_gpu)", "_____no_output_____" ] ], [ [ "## Neural Network Training\n\nWith the structure of the network complete and data ready to be fed in the neural network, it's time to train it.\n\n### Train Loop\n\nThe training loop is implemented for you in the `train_decoder` function. This function will train the network over all the batches for the number of epochs given. The model progress will be shown every number of batches. This number is set with the `show_every_n_batches` parameter. You'll set this parameter along with other parameters in the next section.", "_____no_output_____" ] ], [ [ "\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL\n\"\"\"\n\ndef train_rnn(rnn, batch_size, optimizer, criterion, n_epochs, show_every_n_batches=100):\n batch_losses = []\n \n rnn.train()\n\n print(\"Training for %d epoch(s)...\" % n_epochs)\n for epoch_i in range(1, n_epochs + 1):\n \n # initialize hidden state\n hidden = rnn.init_hidden(batch_size)\n \n for batch_i, (inputs, labels) in enumerate(train_loader, 1):\n \n # make sure you iterate over completely full batches, only\n n_batches = len(train_loader.dataset)//batch_size\n if(batch_i > n_batches):\n break\n \n # forward, back prop\n loss, hidden = forward_back_prop(rnn, optimizer, criterion, inputs, labels, hidden) \n # record loss\n batch_losses.append(loss)\n\n # printing loss stats\n if batch_i % show_every_n_batches == 0:\n print('Epoch: {:>4}/{:<4} Loss: {}\\n'.format(\n epoch_i, n_epochs, np.average(batch_losses)))\n batch_losses = []\n\n # returns a trained rnn\n return rnn", "_____no_output_____" ] ], [ [ "### Hyperparameters\n\nSet and train the neural network with the following parameters:\n- Set `sequence_length` to the length of a sequence.\n- Set `batch_size` to the batch size.\n- Set `num_epochs` to the number of epochs to train for.\n- Set `learning_rate` to the learning rate for an Adam optimizer.\n- Set `vocab_size` to the number of unique tokens in our vocabulary.\n- Set `output_size` to the desired size of the output.\n- Set `embedding_dim` to the embedding dimension; smaller than the vocab_size.\n- Set `hidden_dim` to the hidden dimension of your RNN.\n- Set `n_layers` to the number of layers/cells in your RNN.\n- Set `show_every_n_batches` to the number of batches at which the neural network should print progress.\n\nIf the network isn't getting the desired results, tweak these parameters and/or the layers in the `RNN` class.", "_____no_output_____" ] ], [ [ "# Data params\n# Sequence Length\nsequence_length = # of words in a sequence\n# Batch Size\nbatch_size = \n\n# data loader - do not change\ntrain_loader = batch_data(int_text, sequence_length, batch_size)", "_____no_output_____" ], [ "# Training parameters\n# Number of Epochs\nnum_epochs = \n# Learning Rate\nlearning_rate = \n\n# Model parameters\n# Vocab size\nvocab_size = \n# Output size\noutput_size = \n# Embedding Dimension\nembedding_dim = \n# Hidden Dimension\nhidden_dim = \n# Number of RNN Layers\nn_layers = \n\n# Show stats for every n number of batches\nshow_every_n_batches = 500", "_____no_output_____" ] ], [ [ "### Train\nIn the next cell, you'll train the neural network on the pre-processed data. If you have a hard time getting a good loss, you may consider changing your hyperparameters. In general, you may get better results with larger hidden and n_layer dimensions, but larger models take a longer time to train. \n> **You should aim for a loss less than 3.5.** \n\nYou should also experiment with different sequence lengths, which determine the size of the long range dependencies that a model can learn.", "_____no_output_____" ] ], [ [ "\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL\n\"\"\"\n\n# create model and move to gpu if available\nrnn = RNN(vocab_size, output_size, embedding_dim, hidden_dim, n_layers, dropout=0.5)\nif train_on_gpu:\n rnn.cuda()\n\n# defining loss and optimization functions for training\noptimizer = torch.optim.Adam(rnn.parameters(), lr=learning_rate)\ncriterion = nn.CrossEntropyLoss()\n\n# training the model\ntrained_rnn = train_rnn(rnn, batch_size, optimizer, criterion, num_epochs, show_every_n_batches)\n\n# saving the trained model\nhelper.save_model('./save/trained_rnn', trained_rnn)\nprint('Model Trained and Saved')", "_____no_output_____" ] ], [ [ "### Question: How did you decide on your model hyperparameters? \nFor example, did you try different sequence_lengths and find that one size made the model converge faster? What about your hidden_dim and n_layers; how did you decide on those?", "_____no_output_____" ], [ "**Answer:** (Write answer, here)", "_____no_output_____" ], [ "---\n# Checkpoint\n\nAfter running the above training cell, your model will be saved by name, `trained_rnn`, and if you save your notebook progress, **you can pause here and come back to this code at another time**. You can resume your progress by running the next cell, which will load in our word:id dictionaries _and_ load in your saved model by name!", "_____no_output_____" ] ], [ [ "\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL\n\"\"\"\nimport torch\nimport helper\nimport problem_unittests as tests\n\n_, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess()\ntrained_rnn = helper.load_model('./save/trained_rnn')", "_____no_output_____" ] ], [ [ "## Generate TV Script\nWith the network trained and saved, you'll use it to generate a new, \"fake\" Seinfeld TV script in this section.\n\n### Generate Text\nTo generate the text, the network needs to start with a single word and repeat its predictions until it reaches a set length. You'll be using the `generate` function to do this. It takes a word id to start with, `prime_id`, and generates a set length of text, `predict_len`. Also note that it uses topk sampling to introduce some randomness in choosing the most likely next word, given an output set of word scores!", "_____no_output_____" ] ], [ [ "\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\n\"\"\"\nimport torch.nn.functional as F\n\ndef generate(rnn, prime_id, int_to_vocab, token_dict, pad_value, predict_len=100):\n \"\"\"\n Generate text using the neural network\n :param decoder: The PyTorch Module that holds the trained neural network\n :param prime_id: The word id to start the first prediction\n :param int_to_vocab: Dict of word id keys to word values\n :param token_dict: Dict of puncuation tokens keys to puncuation values\n :param pad_value: The value used to pad a sequence\n :param predict_len: The length of text to generate\n :return: The generated text\n \"\"\"\n rnn.eval()\n \n # create a sequence (batch_size=1) with the prime_id\n current_seq = np.full((1, sequence_length), pad_value)\n current_seq[-1][-1] = prime_id\n predicted = [int_to_vocab[prime_id]]\n \n for _ in range(predict_len):\n if train_on_gpu:\n current_seq = torch.LongTensor(current_seq).cuda()\n else:\n current_seq = torch.LongTensor(current_seq)\n \n # initialize the hidden state\n hidden = rnn.init_hidden(current_seq.size(0))\n \n # get the output of the rnn\n output, _ = rnn(current_seq, hidden)\n \n # get the next word probabilities\n p = F.softmax(output, dim=1).data\n if(train_on_gpu):\n p = p.cpu() # move to cpu\n \n # use top_k sampling to get the index of the next word\n top_k = 5\n p, top_i = p.topk(top_k)\n top_i = top_i.numpy().squeeze()\n \n # select the likely next word index with some element of randomness\n p = p.numpy().squeeze()\n word_i = np.random.choice(top_i, p=p/p.sum())\n \n # retrieve that word from the dictionary\n word = int_to_vocab[word_i]\n predicted.append(word) \n \n if(train_on_gpu):\n current_seq = current_seq.cpu() # move to cpu\n # the generated word becomes the next \"current sequence\" and the cycle can continue\n if train_on_gpu:\n current_seq = current_seq.cpu()\n current_seq = np.roll(current_seq, -1, 1)\n current_seq[-1][-1] = word_i\n \n gen_sentences = ' '.join(predicted)\n \n # Replace punctuation tokens\n for key, token in token_dict.items():\n ending = ' ' if key in ['\\n', '(', '\"'] else ''\n gen_sentences = gen_sentences.replace(' ' + token.lower(), key)\n gen_sentences = gen_sentences.replace('\\n ', '\\n')\n gen_sentences = gen_sentences.replace('( ', '(')\n \n # return all the sentences\n return gen_sentences", "_____no_output_____" ] ], [ [ "### Generate a New Script\nIt's time to generate the text. Set `gen_length` to the length of TV script you want to generate and set `prime_word` to one of the following to start the prediction:\n- \"jerry\"\n- \"elaine\"\n- \"george\"\n- \"kramer\"\n\nYou can set the prime word to _any word_ in our dictionary, but it's best to start with a name for generating a TV script. (You can also start with any other names you find in the original text file!)", "_____no_output_____" ] ], [ [ "# run the cell multiple times to get different results!\ngen_length = 400 # modify the length to your preference\nprime_word = 'jerry' # name for starting the script\n\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\n\"\"\"\npad_word = helper.SPECIAL_WORDS['PADDING']\ngenerated_script = generate(trained_rnn, vocab_to_int[prime_word + ':'], int_to_vocab, token_dict, vocab_to_int[pad_word], gen_length)\nprint(generated_script)", "_____no_output_____" ] ], [ [ "#### Save your favorite scripts\n\nOnce you have a script that you like (or find interesting), save it to a text file!", "_____no_output_____" ] ], [ [ "# save script to a text file\nf = open(\"generated_script_1.txt\",\"w\")\nf.write(generated_script)\nf.close()", "_____no_output_____" ] ], [ [ "# The TV Script is Not Perfect\nIt's ok if the TV script doesn't make perfect sense. It should look like alternating lines of dialogue, here is one such example of a few generated lines.\n\n### Example generated script\n\n>jerry: what about me?\n>\n>jerry: i don't have to wait.\n>\n>kramer:(to the sales table)\n>\n>elaine:(to jerry) hey, look at this, i'm a good doctor.\n>\n>newman:(to elaine) you think i have no idea of this...\n>\n>elaine: oh, you better take the phone, and he was a little nervous.\n>\n>kramer:(to the phone) hey, hey, jerry, i don't want to be a little bit.(to kramer and jerry) you can't.\n>\n>jerry: oh, yeah. i don't even know, i know.\n>\n>jerry:(to the phone) oh, i know.\n>\n>kramer:(laughing) you know...(to jerry) you don't know.\n\nYou can see that there are multiple characters that say (somewhat) complete sentences, but it doesn't have to be perfect! It takes quite a while to get good results, and often, you'll have to use a smaller vocabulary (and discard uncommon words), or get more data. The Seinfeld dataset is about 3.4 MB, which is big enough for our purposes; for script generation you'll want more than 1 MB of text, generally. \n\n# Submitting This Project\nWhen submitting this project, make sure to run all the cells before saving the notebook. Save the notebook file as \"dlnd_tv_script_generation.ipynb\" and save another copy as an HTML file by clicking \"File\" -> \"Download as..\"->\"html\". Include the \"helper.py\" and \"problem_unittests.py\" files in your submission. Once you download these files, compress them into one zip file for submission.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
cb363c6d6bdba5dfe4db11c4706c0495cd9bd903
7,611
ipynb
Jupyter Notebook
NN/toy_example.ipynb
ybdesire/machinelearning
0224746332e1085336e0b02e0ca3b11d74bd9a91
[ "MIT" ]
30
2017-02-28T13:52:58.000Z
2022-03-24T10:28:43.000Z
NN/toy_example.ipynb
ybdesire/machinelearning
0224746332e1085336e0b02e0ca3b11d74bd9a91
[ "MIT" ]
null
null
null
NN/toy_example.ipynb
ybdesire/machinelearning
0224746332e1085336e0b02e0ca3b11d74bd9a91
[ "MIT" ]
17
2017-03-03T12:38:04.000Z
2022-03-11T01:53:20.000Z
35.900943
189
0.514781
[ [ [ "import numpy as np\nimport pandas as pd\n\nimport lasagne\nfrom lasagne import layers\nfrom lasagne.updates import nesterov_momentum\nfrom nolearn.lasagne import NeuralNet\nfrom nolearn.lasagne import visualize\n\n\ndataset = pd.read_csv(\"toy_train.csv\")\ntarget = dataset[[0]].values.ravel()\ntrain = dataset.iloc[:,1:].values\ntest = pd.read_csv(\"toy_test.csv\").values\n\n# convert to array, specify data type, and reshape\ntarget = target.astype(np.uint8)\ntrain = np.array(train).reshape((-1, 1, 28, 28)).astype(np.uint8)\ntest = np.array(test).reshape((-1, 1, 28, 28)).astype(np.uint8)\n\n\nnet1 = NeuralNet(\n layers=[('input', layers.InputLayer),\n ('hidden', layers.DenseLayer),\n ('output', layers.DenseLayer),\n ],\n # layer parameters:\n input_shape=(None,1,28,28),\n hidden_num_units=1000, # number of units in 'hidden' layer\n output_nonlinearity=lasagne.nonlinearities.softmax,\n output_num_units=10, # 10 target values for the digits 0, 1, 2, ..., 9\n\n # optimization method:\n update=nesterov_momentum,\n update_learning_rate=0.0001,\n update_momentum=0.9,\n\n max_epochs=15,\n verbose=1,\n )\n\n# Train the network\nnet1.fit(train, target)\n", "# Neural Network with 795010 learnable parameters\n\n## Layer information\n\n # name size\n--- ------ -------\n 0 input 1x28x28\n 1 hidden 1000\n 2 output 10\n\n epoch train loss valid loss train/val valid acc dur\n------- ------------ ------------ ----------- ----------- ------\n 1 \u001b[36m108.92857\u001b[0m \u001b[32m84.75658\u001b[0m 1.28519 0.19231 31.94s\n 2 \u001b[36m64.84754\u001b[0m 95.53094 0.67881 0.15385 31.69s\n 3 \u001b[36m63.53240\u001b[0m \u001b[32m75.64333\u001b[0m 0.83989 0.42308 32.24s\n 4 \u001b[36m30.47790\u001b[0m \u001b[32m59.12283\u001b[0m 0.51550 0.42308 32.57s\n 5 \u001b[36m14.28162\u001b[0m \u001b[32m43.22949\u001b[0m 0.33037 0.50000 33.51s\n 6 \u001b[36m3.31916\u001b[0m \u001b[32m38.59443\u001b[0m 0.08600 0.53846 32.24s\n 7 \u001b[36m1.20370\u001b[0m \u001b[32m34.74778\u001b[0m 0.03464 0.53846 32.45s\n 8 \u001b[36m0.73275\u001b[0m \u001b[32m32.31742\u001b[0m 0.02267 0.50000 31.60s\n 9 \u001b[36m0.15696\u001b[0m \u001b[32m30.85520\u001b[0m 0.00509 0.57692 32.70s\n 10 \u001b[36m0.14614\u001b[0m \u001b[32m30.81158\u001b[0m 0.00474 0.57692 32.53s\n 11 \u001b[36m0.02039\u001b[0m \u001b[32m30.27885\u001b[0m 0.00067 0.57692 32.20s\n 12 0.04545 31.00821 0.00147 0.57692 32.09s\n 13 \u001b[36m0.00000\u001b[0m 31.76320 0.00000 0.53846 31.64s\n 14 \u001b[36m0.00000\u001b[0m 32.96451 0.00000 0.61538 31.64s\n 15 0.00000 34.20787 0.00000 0.61538 32.22s\n" ], [ "import sklearn\nprint(sklearn.__version__)", "0.17\n" ], [ "test = pd.read_csv(\"toy_test.csv\").values\ntest = np.array(test).reshape((-1, 1, 28, 28)).astype(np.uint8)\n\np = net1.predict(test)", "_____no_output_____" ], [ "p", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code" ] ]
cb364d3838f3ff74200dad686c598054d626ee48
52,181
ipynb
Jupyter Notebook
notebooks/NER.ipynb
vantuan5644/ReceiptOCR
5e167b55a73c935622ecdfb05f502855d8d44004
[ "MIT" ]
null
null
null
notebooks/NER.ipynb
vantuan5644/ReceiptOCR
5e167b55a73c935622ecdfb05f502855d8d44004
[ "MIT" ]
null
null
null
notebooks/NER.ipynb
vantuan5644/ReceiptOCR
5e167b55a73c935622ecdfb05f502855d8d44004
[ "MIT" ]
null
null
null
44.409362
1,658
0.553094
[ [ [ "# Custom NER", "_____no_output_____" ] ], [ [ "# !pip install spacy", "_____no_output_____" ], [ "import pandas as pd", "_____no_output_____" ], [ "import plac\nimport random\nimport warnings\nfrom pathlib import Path\nimport spacy\nfrom spacy.util import minibatch, compounding\n", "_____no_output_____" ], [ "data = pd.read_csv('./label_text_product_attrs.csv', dtype=str)", "_____no_output_____" ], [ "data.rename(columns={'sale_price': 'ppu', 'final_price': 'total_price'}, inplace=True)", "_____no_output_____" ], [ "row = data.iloc[0]\nrow", "_____no_output_____" ], [ "print(row['text'])", "04902430779746 NX DOWNY doahoa ngotngao\\nVAT10% 1 88,000.00 88,000.00 \\nGia goc: 103,000.00 \n" ], [ "attributes = data.columns.difference(['image_name', 'text'])", "_____no_output_____" ], [ "attributes", "_____no_output_____" ], [ "a = 'ABC12345ABC12AAABC'\ndef find_2nd(string, substring):\n return string.find(substring, string.find(substring) + 1)\nfind_2nd(a, '1')\n\nsub = 'ABC'\nimport re\n[i for i in range(len(a)) if a.startswith(sub, i)]\n", "_____no_output_____" ], [ "a[14:]", "_____no_output_____" ], [ "import numpy as np", "_____no_output_____" ], [ "def get_row_entities(row, debug=False):\n row_entities = []\n cache = []\n text = row['text']\n for attr in ['sku', 'product_name', 'quantity', 'ppu', 'total_price', 'discounted_part', 'original_price']:\n if debug: print(attr)\n if not pd.isnull(row[attr]):\n value = str(row[attr])\n \n value = value.strip()\n \n if not value in text:\n print(attr, value, 'not in text')\n \n else:\n try:\n indices = [i for i in range(len(text)) if text.startswith(value, i)]\n if debug: print(f'org indices = {indices}')\n \n indices_ = indices.copy()\n for item in indices_:\n if any(item in cache_ for cache_ in cache):\n indices.remove(item)\n if debug: print(f'indices = {indices}')\n start_index = indices[0] \n \n end_index = start_index + len(value)\n cache.append(range(start_index, end_index))\n if debug: print(f'cache = {cache}')\n if start_index < 0:\n print(start_index)\n if end_index < 0:\n print(end_index)\n row_entities.append((start_index, end_index, attr))\n except Exception as e:\n print(row)\n print(text)\n print('Error', row_entities, value)\n raise e\n \n for i, item in enumerate(row_entities):\n if item[-1] == 'product_name':\n product_name_range = item[0:-1]\n product_name_id = i\n\n if debug: print('Product name ranges', product_name_range)\n\n splits = row.text[product_name_range[0]: product_name_range[1]].split(' ')\n\n ranges = []\n for item in splits:\n ranges.append((row.text.find(item), row.text.find(item) + len(item), 'product_name'))\n\n row_entities.pop(product_name_id)\n row_entities += ranges\n \n def is_overlapped(entities):\n ranges = [range(item[0], item[1]) for item in entities]\n if debug: print('Total ranges', ranges)\n return len(reduce(lambda x, y: set(x).intersection(y), ranges)) > 0\n \n from functools import reduce\n \n assert not is_overlapped(row_entities)\n return row_entities", "_____no_output_____" ], [ "row = data.iloc[56]", "_____no_output_____" ], [ "entities = get_row_entities(row, debug=True)\nentities", "sku\nsku 4.90243E+12 not in text\nproduct_name\norg indices = [15]\nindices = [15]\ncache = [range(15, 39)]\nquantity\norg indices = [9, 44, 50, 55, 69, 93]\nindices = [9, 44, 50, 55, 69, 93]\ncache = [range(15, 39), range(9, 10)]\nppu\norg indices = [55, 69]\nindices = [55, 69]\ncache = [range(15, 39), range(9, 10), range(55, 65)]\ntotal_price\norg indices = [55, 69]\nindices = [69]\ncache = [range(15, 39), range(9, 10), range(55, 65), range(69, 79)]\ndiscounted_part\noriginal_price\norg indices = [93]\nindices = [93]\ncache = [range(15, 39), range(9, 10), range(55, 65), range(69, 79), range(93, 103)]\nProduct name ranges (15, 39)\nTotal ranges [range(9, 10), range(55, 65), range(69, 79), range(93, 103), range(15, 17), range(18, 23), range(24, 30), range(31, 39)]\n" ], [ "print(row)\nprint()\nfor i, j, name in entities:\n print(f\"{name}: {row['text'][i:j]}\")", "image_name img_16_padded_1.jpg\ntext 04902430418287 NX DOWNY doahoa ngotngao\\nVAT10...\nsku 4.90243E+12\nproduct_name NX DOWNY doahoa ngotngao \nquantity 1\nppu 129,000.00\ntotal_price 129,000.00\ndiscounted_part NaN\noriginal_price 159,000.00\nName: 56, dtype: object\n\nquantity: 1\nppu: 129,000.00\ntotal_price: 129,000.00\noriginal_price: 159,000.00\nproduct_name: NX\nproduct_name: DOWNY\nproduct_name: doahoa\nproduct_name: ngotngao\n" ], [ "from sklearn.model_selection import train_test_split\n\ntrain, test = train_test_split(data, test_size=0.2, random_state=42)\n", "_____no_output_____" ], [ "TRAIN_DATA = []\nfor index, row in train.iterrows():\n TRAIN_DATA.append((row['text'], {\"entities\": get_row_entities(row)}))", "sku 4.90243E+12 not in text\nsku 8.9385E+12 not in text\nsku 8.9351E+12 not in text\nsku 8.93471E+12 not in text\nsku 8.93851E+12 not in text\nsku 4.90243E+12 not in text\nsku 8.93605E+12 not in text\nsku 8.9361E+12 not in text\nsku 8.93473E+12 not in text\nsku 8.93601E+12 not in text\nsku 8.93487E+12 not in text\nsku 8.93601E+12 not in text\nsku 4.90243E+12 not in text\nsku 2.00013E+12 not in text\nsku 4.90243E+12 not in text\nsku 8.85113E+12 not in text\nsku 8.85205E+12 not in text\nsku 4.90243E+12 not in text\nsku 8.69122E+12 not in text\nsku 8.9348E+12 not in text\nsku 8.93482E+12 not in text\nsku 8.93602E+12 not in text\nsku 4.90243E+12 not in text\nsku 8.93511E+12 not in text\nsku 8.93604E+12 not in text\nsku 8.93507E+12 not in text\nsku 8.93468E+12 not in text\nsku 8.93487E+12 not in text\nsku 8.93522E+12 not in text\nsku 8.93607E+12 not in text\nsku 8.93501E+12 not in text\nsku 8.93603E+12 not in text\nsku 8.9385E+12 not in text\nsku 4.90243E+12 not in text\nsku 8.85113E+12 not in text\nsku 8.93506E+12 not in text\nsku 8.93466E+12 not in text\nsku 4.90243E+12 not in text\nsku 8.9385E+12 not in text\nsku 9.41501E+12 not in text\nsku 4.90243E+12 not in text\nsku 8.93467E+12 not in text\nsku 8.93506E+12 not in text\nsku 8.93458E+12 not in text\nsku 8.93524E+12 not in text\nsku 8.93476E+12 not in text\nsku 4.90243E+12 not in text\nsku 8.93604E+12 not in text\nsku 8.93467E+12 not in text\nsku 4.90243E+12 not in text\nsku 8.93471E+12 not in text\nsku 8.9385E+12 not in text\nsku 8.93467E+12 not in text\nsku 8.9361E+12 not in text\nsku 4.90243E+12 not in text\nsku 8.9385E+12 not in text\nsku 8.936E+12 not in text\nsku 8.93467E+12 not in text\nsku 8.9385E+12 not in text\nsku 8.9385E+12 not in text\nsku 8.9348E+12 not in text\nsku 8.93467E+12 not in text\nsku 8.93459E+12 not in text\nsku 4.90243E+12 not in text\nsku 5.99952E+12 not in text\nsku 8.9385E+12 not in text\nsku 8.93601E+12 not in text\nsku 4.90243E+12 not in text\nsku 4.90243E+12 not in text\nsku 8.93607E+12 not in text\nsku 8.93482E+12 not in text\nsku 8.93607E+12 not in text\nsku 4.90243E+12 not in text\nsku 8.80135E+12 not in text\nsku 8.93456E+12 not in text\nsku 8.93521E+12 not in text\nsku 4.90243E+12 not in text\nsku 4.90243E+12 not in text\nsku 4.90243E+12 not in text\nsku 8.85205E+12 not in text\nsku 8.93468E+12 not in text\nsku 4.90243E+12 not in text\nsku 8.93487E+12 not in text\nsku 4.90243E+12 not in text\nsku 4.90243E+12 not in text\nsku 4.90243E+12 not in text\nsku 8.93851E+12 not in text\nsku 8.93467E+12 not in text\nsku 4.90243E+12 not in text\nsku 8.93487E+12 not in text\nsku 8.93505E+12 not in text\nsku 4.90243E+12 not in text\nsku 8.93505E+12 not in text\nsku 8.93506E+12 not in text\nsku 2.00013E+12 not in text\nsku 8.85182E+12 not in text\nsku 8.9362E+12 not in text\nsku 8.93473E+12 not in text\nsku 4.90243E+12 not in text\nsku 4.90243E+12 not in text\nsku 8.9348E+12 not in text\nsku 4.90243E+12 not in text\nsku 4.90243E+12 not in text\nsku 4.90243E+12 not in text\nsku 8.93487E+12 not in text\nsku 4.90243E+12 not in text\nsku 8.93471E+12 not in text\nsku 8.93506E+12 not in text\nsku 8.93515E+12 not in text\nsku 8.85009E+12 not in text\nsku 4.90243E+12 not in text\nsku 8.93601E+12 not in text\nsku 4.90243E+12 not in text\nsku 8.9348E+12 not in text\nsku 8.80257E+12 not in text\nsku 8.93476E+12 not in text\nsku 8.93613E+12 not in text\nsku 8.93487E+12 not in text\nsku 4.90243E+12 not in text\nsku 4.90243E+12 not in text\nsku 8.93603E+12 not in text\nsku 8.9385E+12 not in text\nsku 8.88834E+12 not in text\nsku 8.93457E+12 not in text\nsku 8.85001E+12 not in text\nsku 4.90243E+12 not in text\nsku 8.93604E+12 not in text\nsku 8.93459E+12 not in text\nsku 8.93489E+12 not in text\nsku 8.85205E+12 not in text\nsku 4.90243E+12 not in text\nsku 8.93467E+12 not in text\nsku 8.93604E+12 not in text\nsku 8.93605E+12 not in text\nsku 4.90243E+12 not in text\nsku 8.93603E+12 not in text\nsku 4.90243E+12 not in text\nsku 8.85958E+12 not in text\nsku 4.90243E+12 not in text\nsku 4.90243E+12 not in text\nsku 8.69128E+12 not in text\nsku 8.93467E+12 not in text\nsku 4.90243E+12 not in text\nsku 8.9348E+12 not in text\nsku 8.93457E+12 not in text\nsku 4.90243E+12 not in text\nsku 4.90243E+12 not in text\nsku 8.93524E+12 not in text\nsku 8.93513E+12 not in text\nsku 8.93471E+12 not in text\nsku 8.93467E+12 not in text\nsku 8.93604E+12 not in text\nsku 4.90243E+12 not in text\nsku 4.90243E+12 not in text\nsku 8.93605E+12 not in text\nsku 8.93487E+12 not in text\nsku 4.90243E+12 not in text\nsku 8.93605E+12 not in text\nsku 8.93602E+12 not in text\nsku 8.93612E+12 not in text\nsku 8.93603E+12 not in text\nsku 4.90243E+12 not in text\nsku 8.99889E+12 not in text\nsku 8.85182E+12 not in text\nsku 4.90243E+12 not in text\nsku 8.93525E+12 not in text\nsku 8.93459E+12 not in text\nsku 9.55609E+12 not in text\nsku 8.93604E+12 not in text\nsku 8.93487E+12 not in text\nsku 8.93501E+12 not in text\nsku 8.93458E+12 not in text\nsku 8.93468E+12 not in text\nsku 8.93514E+12 not in text\nsku 8.93468E+12 not in text\nsku 8.93476E+12 not in text\nsku 8.93457E+12 not in text\nsku 8.85182E+12 not in text\nsku 8.93456E+12 not in text\nsku 8.93467E+12 not in text\nsku 8.93507E+12 not in text\nsku 4.90243E+12 not in text\nsku 8.936E+12 not in text\nsku 8.93468E+12 not in text\nsku 2.0035E+12 not in text\nsku 8.88834E+12 not in text\nsku 4.90243E+12 not in text\nsku 4.90243E+12 not in text\nsku 8.9385E+12 not in text\nsku 8.93457E+12 not in text\nsku 8.85001E+12 not in text\nsku 4.90243E+12 not in text\nsku 8.93487E+12 not in text\nsku 4.90243E+12 not in text\nsku 4.90243E+12 not in text\nsku 8.93496E+12 not in text\nsku 8.9385E+12 not in text\nsku 8.93487E+12 not in text\nsku 4.90243E+12 not in text\nsku 8.93499E+12 not in text\nsku 8.93457E+12 not in text\nsku 2.00013E+12 not in text\nsku 4.90243E+12 not in text\nsku 8.93613E+12 not in text\nsku 8.93461E+12 not in text\nsku 8.93487E+12 not in text\nsku 8.93464E+12 not in text\nsku 8.93605E+12 not in text\nsku 8.93608E+12 not in text\nsku 8.93851E+12 not in text\nsku 4.90243E+12 not in text\nsku 8.93522E+12 not in text\nsku 8.80906E+12 not in text\nsku 8.93604E+12 not in text\nsku 4.90243E+12 not in text\nsku 8.93487E+12 not in text\nsku 8.93603E+12 not in text\nsku 8.93502E+12 not in text\nsku 8.93531E+12 not in text\nsku 8.93614E+12 not in text\nsku 8.996E+12 not in text\nsku 8.93502E+12 not in text\nsku 8.93607E+12 not in text\nsku 4.90243E+12 not in text\nsku 8.93852E+12 not in text\nsku 8.93468E+12 not in text\nsku 8.9385E+12 not in text\nsku 8.93502E+12 not in text\nsku 8.93467E+12 not in text\nsku 4.90243E+12 not in text\nsku 5.09986E+12 not in text\nsku 8.93499E+12 not in text\nsku 4.90243E+12 not in text\nsku 8.93468E+12 not in text\nsku 8.9348E+12 not in text\nsku 8.9348E+12 not in text\nsku 4.90243E+12 not in text\nsku 8.93604E+12 not in text\nsku 4.90243E+12 not in text\nsku 8.88834E+12 not in text\nsku 8.93505E+12 not in text\n" ], [ "def trim_entity_spans(data: list) -> list:\n\n \"\"\"Removes leading and trailing white spaces from entity spans.\n\n Args:\n data (list): The data to be cleaned in spaCy JSON format.\n|\n Returns:\n list: The cleaned data.\n \"\"\"\n invalid_span_tokens = re.compile(r'\\s')\n\n cleaned_data = []\n for text, annotations in data:\n entities = annotations['entities']\n valid_entities = []\n for start, end, label in entities:\n valid_start = start\n valid_end = end\n # if there's preceding spaces, move the start position to nearest character\n while valid_start < len(text) and invalid_span_tokens.match(\n text[valid_start]):\n valid_start += 1\n while valid_end > 1 and invalid_span_tokens.match(\n text[valid_end - 1]):\n valid_end -= 1\n valid_entities.append([valid_start, valid_end, label])\n cleaned_data.append([text, {'entities': valid_entities}])\n return cleaned_data\n", "_____no_output_____" ], [ "trim_entity_spans(TRAIN_DATA[:1])", "_____no_output_____" ], [ "def train_spacy(TRAIN_DATA):\n nlp = spacy.blank('en') # create blank Language class\n # create the built-in pipeline components and add them to the pipeline\n # nlp.create_pipe works for built-ins that are registered with spaCy\n if 'ner' not in nlp.pipe_names:\n ner = nlp.create_pipe('ner')\n nlp.add_pipe(ner, last=True)\n\n\n # add labels\n for _, annotations in TRAIN_DATA:\n for ent in annotations.get('entities'):\n ner.add_label(ent[2])\n\n # get names of other pipes to disable them during training\n other_pipes = [pipe for pipe in nlp.pipe_names if pipe != 'ner']\n with nlp.disable_pipes(*other_pipes): # only train NER\n optimizer = nlp.begin_training()\n for itn in range(10):\n print(\"Statring iteration \" + str(itn))\n random.shuffle(TRAIN_DATA)\n losses = {}\n for text, annotations in TRAIN_DATA:\n nlp.update(\n [text], # batch of texts\n [annotations], # batch of annotations\n drop=0.2, # dropout - make it harder to memorise data\n sgd=optimizer, # callable to update weights\n losses=losses)\n print(losses)\n return nlp\n", "_____no_output_____" ], [ "def main(model=None, output_dir='.', n_iter=100):\n \"\"\"Load the model, set up the pipeline and train the entity recognizer.\"\"\"\n if model is not None:\n nlp = spacy.load(model) # load existing spaCy model\n print(\"Loaded model '%s'\" % model)\n else:\n nlp = spacy.blank(\"en\") # create blank Language class\n print(\"Created blank 'en' model\")\n\n # create the built-in pipeline components and add them to the pipeline\n # nlp.create_pipe works for built-ins that are registered with spaCy\n if \"ner\" not in nlp.pipe_names:\n ner = nlp.create_pipe(\"ner\")\n nlp.add_pipe(ner, last=True)\n # otherwise, get it so we can add labels\n else:\n ner = nlp.get_pipe(\"ner\")\n\n # add labels\n for _, annotations in TRAIN_DATA:\n for ent in annotations.get(\"entities\"):\n ner.add_label(ent[2])\n\n # get names of other pipes to disable them during training\n pipe_exceptions = [\"ner\", \"trf_wordpiecer\", \"trf_tok2vec\"]\n other_pipes = [pipe for pipe in nlp.pipe_names if pipe not in pipe_exceptions]\n # only train NER\n with nlp.disable_pipes(*other_pipes), warnings.catch_warnings():\n try:\n # show warnings for misaligned entity spans once\n warnings.filterwarnings(\"once\", category=UserWarning, module='spacy')\n\n # reset and initialize the weights randomly – but only if we're\n # training a new model\n if model is None:\n nlp.begin_training()\n for itn in range(n_iter):\n random.shuffle(TRAIN_DATA)\n losses = {}\n # batch up the examples using spaCy's minibatch\n batches = minibatch(TRAIN_DATA, size=compounding(4.0, 32.0, 1.001))\n for batch in batches:\n texts, annotations = zip(*batch)\n nlp.update(\n texts, # batch of texts\n annotations, # batch of annotations\n drop=0.5, # dropout - make it harder to memorise data\n losses=losses,\n )\n print(\"Losses\", losses)\n except Exception as e:\n print(texts, annotations)\n raise e\n# # test the trained model\n# for text, _ in TRAIN_DATA:\n# doc = nlp(text)\n# print(\"Entities\", [(ent.text, ent.label_) for ent in doc.ents])\n# print(\"Tokens\", [(t.text, t.ent_type_, t.ent_iob) for t in doc])\n\n # save model to output directory\n if output_dir is not None:\n output_dir = Path(output_dir)\n if not output_dir.exists():\n output_dir.mkdir()\n nlp.to_disk(output_dir)\n print(\"Saved model to\", output_dir)\n\n", "_____no_output_____" ], [ "nlp = spacy.load('en_core_web_sm')\n\ndocs = []\nfor text, annot in TRAIN_DATA:\n\n doc = nlp(text)\n\n tags = spacy.gold.biluo_tags_from_offsets(doc, annot['entities'])\n \n print(np.array(doc))\n print(np.array(tags))\n break", "_____no_output_____" ], [ "print(TRAIN_DATA[0][0][46:47])\nTRAIN_DATA[0]", "0\n" ], [ "# nlp = train_spacy(trim_entity_spans(TRAIN_DATA))", "_____no_output_____" ], [ "for i, row in enumerate(TRAIN_DATA):\n if row[0].startswith('08936034200116'):\n print(i)", "239\n" ], [ "main()", "Created blank 'en' model\n" ], [ "TEST_DATA = []\nfor index, row in test.iterrows():\n TEST_DATA.append((row['text'], {\"entities\": get_row_entities(row)}))", "sku 4.90243E+12 not in text\nsku 8.93467E+12 not in text\nsku 4.90243E+12 not in text\nsku 4.90243E+12 not in text\nsku 8.93604E+12 not in text\nsku 8.9351E+12 not in text\nsku 8.93607E+12 not in text\nsku 4.90243E+12 not in text\nsku 8.93614E+12 not in text\nsku 8.9348E+12 not in text\nsku 8.93601E+12 not in text\nsku 8.93466E+12 not in text\nsku 8.93851E+12 not in text\nsku 8.93604E+12 not in text\nsku 4.90243E+12 not in text\nsku 8.93511E+12 not in text\nsku 4.90243E+12 not in text\nsku 8.93511E+12 not in text\nsku 8.9385E+12 not in text\nsku 4.90243E+12 not in text\nsku 8.93604E+12 not in text\nsku 4.90243E+12 not in text\nsku 8.93603E+12 not in text\nsku 4.90243E+12 not in text\nsku 8.93487E+12 not in text\nsku 4.90243E+12 not in text\nsku 8.93458E+12 not in text\nsku 8.93487E+12 not in text\nsku 8.93505E+12 not in text\nsku 4.90243E+12 not in text\nsku 8.93606E+12 not in text\nsku 8.9348E+12 not in text\nsku 4.90243E+12 not in text\nsku 8.93601E+12 not in text\nsku 4.90243E+12 not in text\nsku 8.93502E+12 not in text\nsku 8.93504E+12 not in text\nsku 4.90243E+12 not in text\nsku 4.90243E+12 not in text\nsku 8.936E+12 not in text\nsku 4.90243E+12 not in text\nsku 4.90243E+12 not in text\nsku 8.93487E+12 not in text\nsku 4.90243E+12 not in text\nsku 8.93604E+12 not in text\nsku 8.93504E+12 not in text\nsku 4.90243E+12 not in text\nsku 8.93501E+12 not in text\nsku 4.90243E+12 not in text\nsku 8.93487E+12 not in text\nsku 4.90243E+12 not in text\nsku 8.93504E+12 not in text\nsku 8.93487E+12 not in text\nsku 8.93603E+12 not in text\nsku 4.90243E+12 not in text\nsku 8.93487E+12 not in text\nsku 4.90243E+12 not in text\nsku 6.93144E+12 not in text\nsku 1.93335E+13 not in text\nsku 4.90243E+12 not in text\nsku 4.90243E+12 not in text\nsku 8.93511E+12 not in text\nsku 8.93501E+12 not in text\nsku 8.93471E+12 not in text\n" ], [ "text = test.iloc[0].text", "_____no_output_____" ], [ "text", "_____no_output_____" ], [ "output_dir = '../pretrained_models/NER'\n\nprint(\"Loading from\", output_dir)\nnlp2 = spacy.load(output_dir)\n# for text, _ in TEST_DATA:\ndoc = nlp2(text)\nprint(\"Entities\", [(ent.text, ent.label_) for ent in doc.ents])\nprint(\"Tokens\", [(t.text, t.ent_type_, t.ent_iob) for t in doc])\n", "Loading from ../pretrained_models/NER\nEntities [('04902430418287', 'sku'), ('NX', 'product_name'), ('DOWNY', 'product_name'), ('doahoa', 'product_name'), ('2', 'quantity'), ('129,900.00', 'ppu'), ('259,800.00', 'total_price'), ('159,000.00', 'original_price')]\nTokens [('04902430418287', 'sku', 3), ('NX', 'product_name', 3), ('DOWNY', 'product_name', 3), ('doahoa', 'product_name', 3), ('ngotngao\\\\nVAT10', '', 2), ('%', '', 2), (' ', '', 2), ('2', 'quantity', 3), (' ', '', 2), ('129,900.00', 'ppu', 3), (' ', '', 2), ('259,800.00', 'total_price', 3), ('\\\\nGia', '', 2), ('goc', '', 2), (':', '', 2), (' ', '', 2), ('159,000.00', 'original_price', 3)]\n" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb3653510401300c9a67bf8b587b4288ab94de11
24,893
ipynb
Jupyter Notebook
docs/font-identifier.ipynb
Unbinilium/font-identifier
20717d956f85a892d3580d0974aafd755db11b7e
[ "MIT" ]
2
2020-09-26T00:58:09.000Z
2021-09-26T17:44:41.000Z
docs/font-identifier.ipynb
Unbinilium/Fontid
e59ccee9629589afe93def0f1b1abc00c8cdac85
[ "MIT" ]
null
null
null
docs/font-identifier.ipynb
Unbinilium/Fontid
e59ccee9629589afe93def0f1b1abc00c8cdac85
[ "MIT" ]
null
null
null
28.711649
435
0.532439
[ [ [ "### Requirement\n\n```\naliyun-python-sdk-core==2.13.25\naliyun-python-sdk-ocr==1.0.8\nFlask==1.1.2\nimutils==0.5.3\njson5==0.9.5\nKeras==2.4.3\nKeras-Preprocessing==1.1.2\nmatplotlib==3.3.0\nnumpy==1.18.5\nopencv-python==4.4.0.40\noss2==2.12.1\nPillow==7.0.0\nsklearn==0.0\ntensorflow==2.3.0\ntrdg==1.6.0\n```", "_____no_output_____" ], [ "### Import Aliyun python SDK modules\n\n- `aliyun-python-sdk-core`\n- `aliyun-python-sdk-ocr`\n- `oss2`", "_____no_output_____" ] ], [ [ "#Aliyun SDK Core\nfrom aliyunsdkcore.client import AcsClient\nfrom aliyunsdkcore.acs_exception.exceptions import ClientException\nfrom aliyunsdkcore.acs_exception.exceptions import ServerException\n\n#Aliyun SDK OSS\nimport oss2\n\n#Aliyun SDK OCR\nfrom aliyunsdkocr.request.v20191230.RecognizeCharacterRequest import RecognizeCharacterRequest", "_____no_output_____" ] ], [ [ "### Configure Aliyun python SDK", "_____no_output_____" ] ], [ [ "#Access_key\naccess_key_id = ''\naccess_key_secret = ''\n\n#OSS\nendpoint = ''\nbucket_name = ''\n\nauth = oss2.Auth(access_key_id, access_key_secret)\nbucket = oss2.Bucket(auth, endpoint, bucket_name)\n\n#OCR\nlocation = ''\n\nclient = AcsClient(access_key_id, access_key_secret, location)", "_____no_output_____" ] ], [ [ "### Instruct OCR request body, set return format to `json`", "_____no_output_____" ] ], [ [ "request = RecognizeCharacterRequest()\nrequest.set_accept_format('json')", "_____no_output_____" ] ], [ [ "### Upload local image with SHA1 hashed name to OSS\n\n- `image_path` is pointed to the local image\n- image format should be `.png`\n- image size should less than 3MB\n", "_____no_output_____" ] ], [ [ "import os\nfrom hashlib import sha1\n\nimage_path = ''\n\n#Upload with SHA1 hashed name\nfilename, file_extension = os.path.splitext(image_path)\nkey = sha1(open(image_path, 'rb').read()).hexdigest() + file_extension\nbucket.put_object_from_file(key, image_path)\n\nimport json\n\n#Get image info from OSS\ninfo = bucket.get_object(key, process = 'image/info')\ninfo_content = info.read()\ndecoded_info = json.loads(oss2.to_unicode(info_content))\n\nprint('Image Info ->')\nprint(json.dumps(decoded_info, indent = 4, sort_keys = True))\n\n#Struct image URL\nimage_url = 'https://' + bucket_name + '.' + endpoint.replace(\"https://\",\"\") + '/' + key\n\nprint('Image URL -> ' + image_url)\n\n#Set OCR image_url\nrequest.set_ImageURL(image_url)", "_____no_output_____" ] ], [ [ "### Send request and show OCR result\n\n- `MinHeight` is set to $\\frac{1}{20}$ of the image width\n- `OutputProbability` is set to `true`", "_____no_output_____" ] ], [ [ "#Pre-config request\nmin_height = int(int(decoded_info['ImageHeight']['value']) / 20)\nrequest.set_MinHeight(int(min_height))\nrequest.set_OutputProbability(True)\n\n#Send request to OCR server and get response\nresponse = client.do_action_with_exception(request)\n\n#Delete OSS image\nbucket.delete_object(key)\n\nimport json\n\n#Parse json response\nparsed = json.loads(response)\n\nprint('Response ->')\nprint(json.dumps(parsed, indent = 4, sort_keys = True))", "_____no_output_____" ] ], [ [ "### Parsed all `TextRectangle` and calculate the distance between image center and rect center", "_____no_output_____" ] ], [ [ "distances = []\nobjects = parsed['Data']['Results']\n\n#Cal image center O(o_x0, o_y0)\no_x0, o_y0 = int(decoded_info['ImageWidth']['value']) / 2.0, int(decoded_info['ImageHeight']['value']) / 2.0\n\nimport math\n\nfor object in objects:\n \n #Cal TextRectangle angle A, start point A(x0, y0) and endpoint B(x1, y1)\n A = object['TextRectangles']['Angle'] / 180.0\n x0, y0 = object['TextRectangles']['Left'], object['TextRectangles']['Top']\n x1, y1 = x0 + object['TextRectangles']['Width'], y0 + object['TextRectangles']['Height']\n \n #Cal vector AB = (v_x0, v_y0)\n v_x0, v_y0 = x1 - x0, y1 - y0\n \n #Cal angle A rotated and 1/2 lenthed vector AB' = (v_x1, v_y1)\n v_x1, v_y1 = (v_x0 * math.cos(A) - v_y0 * math.sin(A)) / 2.0, (v_y0 * math.cos(A) + v_x0 * math.sin(A)) / 2.0\n \n #Cal TextRectangle center point B'(x2, y2)\n x2, y2 = x0 + v_x1, y0 + v_y1\n \n print('TextRectangleCtr -> ', (x2, y2))\n \n #Cal distance between point B and O\n d = math.pow(x2 - o_x0, 2) + math.pow(y2 - o_y0, 2)\n distances.append(d)", "_____no_output_____" ] ], [ [ "### Find the nearest `TextRectangle` index to the image center", "_____no_output_____" ] ], [ [ "index_min = distances.index(min(distances))\n\nprint('Min_Index -> ', index_min)", "_____no_output_____" ] ], [ [ "### Draw all `TextRectangle`\n\n- ROI is **green** and others is **red**", "_____no_output_____" ] ], [ [ "from matplotlib import pyplot as plt\nfrom matplotlib import patches as patches\n\n%matplotlib inline\n\nimg = plt.imread(image_path)\n\nfig, ax = plt.subplots(1)\nax.imshow(img)\n\nindex = 0\n\nfor object in objects:\n if (index == index_min):\n c = 'g'\n else:\n c = 'r'\n \n index += 1\n \n ret = patches.Rectangle(\n (object['TextRectangles']['Left'], object['TextRectangles']['Top']),\n object['TextRectangles']['Width'],\n object['TextRectangles']['Height'],\n object['TextRectangles']['Angle'] / 180.0,\n linewidth = 2,\n edgecolor = c,\n facecolor = 'none'\n )\n \n ax.add_patch(ret)\n\nplt.show()", "_____no_output_____" ] ], [ [ "### ROI", "_____no_output_____" ] ], [ [ "import PIL\nfrom matplotlib import pyplot as plt\n\nA = - objects[index_min]['TextRectangles']['Angle'] / 180.0\n\nroi = PIL.Image.open(image_path)\nroi = roi.rotate(A)\n\ndef rotate(x, y, o_x, o_y, theta):\n x_r = math.cos(theta) * (x - o_x) - math.sin(theta) * (y - o_y) + o_x\n y_r = math.sin(theta) * (x - o_x) + math.cos(theta) * (y - o_y) + o_y\n return [x_r, y_r]\n\n#Cal start point A(x0, y0)\nx0, y0 = objects[index_min]['TextRectangles']['Left'], objects[index_min]['TextRectangles']['Top']\n\n#Cal angle A rotated A'(x1, y1)\nx1, y1 = rotate(x0, y0, o_x0, o_y0, A)\n\nroi = roi.crop((x1, y1, (x1 + objects[index_min]['TextRectangles']['Width']), (y1 + objects[index_min]['TextRectangles']['Height'])))\n\nfig, ax = plt.subplots(1)\nax.imshow(roi)\n\nplt.show()", "_____no_output_____" ] ], [ [ "### Load image function for DeepFont\n\n- color to gray\n- resize to (105, 105)", "_____no_output_____" ] ], [ [ "import PIL\nimport numpy as np\n\ndef pil_image(img_path):\n pil_img = PIL.Image.open(img_path).convert('L')\n pil_img = pil_img.resize((105, 105))\n return pil_img", "_____no_output_____" ] ], [ [ "### Preprocessing function\n\n- Noise a small Gaussian noise with 0 mean and standard deviation 3 is added to input.\n- Blur a random Gaussian blur with standard deviation from 2.5 to 3.5 is added to input.\n- Perspective Rotation a randomly-parameterized affine transformation is added to input.\n- Shading the input background is filled with a gradient in illumination.", "_____no_output_____" ] ], [ [ "import PIL\nimport cv2\nimport numpy as np\n\ndef noise_image(img):\n img_array = np.asarray(img)\n mean = 0.0\n std = 3\n noisy_img = img_array + np.random.normal(mean, std, img_array.shape)\n noisy_img_clipped = np.clip(noisy_img, 0, 255)\n noise_img = PIL.Image.fromarray(np.uint8(noisy_img_clipped))\n noise_img = noise_img.resize((105, 105))\n return noise_img\n\ndef blur_image(img):\n blur_img = img.filter(PIL.ImageFilter.GaussianBlur(radius = 3))\n blur_img = blur_img.resize((105, 105))\n return blur_img\n\ndef affine_rotation(img):\n rows, columns = img.shape\n point1 = np.float32([[10, 10], [30, 10], [10, 30]])\n point2 = np.float32([[20, 15], [40, 10], [20, 40]])\n anchor = cv2.getAffineTransform(point1, point2)\n output = cv2.warpAffine(img, anchor, (columns, rows))\n affine_img = PIL.Image.fromarray(np.uint8(output))\n affine_img = affine_img.resize((105, 105))\n return affine_img\n\ndef gradient_fill(img):\n output = cv2.Laplacian(img, cv2.CV_64F)\n laplacian_img = PIL.Image.fromarray(np.uint8(output))\n laplacian_img = laplacian_img.resize((105, 105))\n return laplacian_img", "_____no_output_____" ] ], [ [ "### Generate Datasets\n\n- `ttf_path` is a folder contains all the font file with correct font name and `.ttf` extension\n- `data_path` is a folder stores or contains generated datasets\n\nUses `TextRecognitionDataGenerator`", "_____no_output_____" ] ], [ [ "import os\n\nttf_path = ''\ndata_path = ''\n\nfor file in os.listdir(ttf_path):\n if file.endswith('.ttf'):\n path = os.path.join(ttf_path, file)\n name, ext = os.path.splitext(os.path.basename(path))\n out_path = data_path + '/' + name\n command = 'trdg -l en -c 30 -rs -let -num -r --length 1 -b 1 -e .png -fi -f 105 -ft ' + path + ' --output_dir ' + out_path \n os.system(command)", "_____no_output_____" ] ], [ [ "### Import Datasets\n\n- `label_path` should be defined", "_____no_output_____" ] ], [ [ "import os\nimport json\nfrom imutils import paths\nfrom random import seed, shuffle\n\nlabel_path = ''\n\n#Random image path from data_path\nimage_paths = sorted(list(paths.list_images(data_path)))\nseed(10)\nshuffle(image_paths)\n\n#Use folder name in data_path as font name\nfont_names = []\n\nfor f in os.listdir(data_path):\n if not f.startswith('.'):\n font_names.append(f)\n \nfont_names.sort()\n\nwith open(label_path, 'w') as outfile:\n json.dump(font_names, outfile)\n\nprint('Font Names -> ', font_names)", "_____no_output_____" ] ], [ [ "### Labeling font by the index of font name in `font_names`", "_____no_output_____" ] ], [ [ "def conv_label(label):\n return font_names.index(label)", "_____no_output_____" ] ], [ [ "### Preprocessing Datasets", "_____no_output_____" ] ], [ [ "import os\nimport itertools\nimport numpy as np\nfrom keras.preprocessing.image import img_to_array\n\ndata = []\nlabels = []\nauguments = [\"blur\", \"noise\", \"affine\", \"gradient\"]\n\nfor path in image_paths:\n \n #Labeling images\n label = path.split(os.path.sep)[-2]\n \n if not label.startswith('.'):\n label = conv_label(label)\n else:\n continue\n \n pil_img = pil_image(path)\n org_img = img_to_array(pil_img)\n \n data.append(org_img)\n labels.append(label)\n \n #Random auguments combinations\n for i in range(0, len(auguments)):\n for augument in list(itertools.combinations(auguments, i + 1)):\n \n temp_img = pil_img\n combinations = list(augument)\n \n for method in combinations:\n if method == 'noise':\n temp_img = noise_image(temp_img)\n \n elif method == 'blur':\n temp_img = blur_image(temp_img)\n \n elif method == 'affine':\n open_cv_affine = np.array(pil_img)\n temp_img = affine_rotation(open_cv_affine)\n\n elif method == 'gradient':\n open_cv_gradient = np.array(pil_img)\n temp_img = gradient_fill(open_cv_gradient)\n \n temp_img = img_to_array(temp_img)\n \n data.append(temp_img)\n labels.append(label)", "_____no_output_____" ] ], [ [ "### Partition Datasets and transform\n\n- $\\frac{3}{4}$ for training\n- $\\frac{1}{4}$ for testing", "_____no_output_____" ] ], [ [ "import numpy as np\nfrom sklearn.model_selection import train_test_split\nfrom keras.utils import to_categorical\n\n#Partition\ndata = np.asarray(data, dtype = \"float\") / 255.0\nlabels = np.array(labels)\n\n(trainX, testX, trainY, testY) = train_test_split(data, labels, test_size = 0.25, random_state = 10)\n\n#Converting labels from integers to vectors\ntrainY = to_categorical(trainY, num_classes = len(font_names))\ntestY = to_categorical(testY, num_classes = len(font_names))", "_____no_output_____" ] ], [ [ "### Additional Datasets process\n\n- **Variable Character Spacing** when rendering each synthetic image, set the character spacing (by pixel) to be a Gaussian random variable of mean 10 and standard deviation 40, bounded by [0, 50].\n- **Variable Aspect Ratio** Before cropping each image into a input patch, the image, with heigh fixed, is squeezed in width by a random ratio, drawn from a uniform distribution between $\\frac{5}{6}$ and $\\frac{7}{6}$.", "_____no_output_____" ] ], [ [ "from keras.preprocessing.image import ImageDataGenerator\n\naugmented_images = ImageDataGenerator(\n rotation_range = 30,\n width_shift_range = 0.1,\n height_shift_range = 0.1,\n shear_range = 0.2,\n zoom_range = 0.2,\n horizontal_flip = True\n)", "_____no_output_____" ] ], [ [ "### Re-arrange Datasets channels", "_____no_output_____" ] ], [ [ "from keras import backend as K\n\nK.set_image_data_format('channels_last')", "_____no_output_____" ] ], [ [ "### Create model\n\n- **Unsupervised cross-domain sub-network ${C_u}$**, which consists of the first *K* layers of *CNN*. It accounts for extracting low-level visual features shared by both syn- thetic and real-world data domains. ${C_u}$ will be trained in a unsupervised way, using unlabeled data from both domains. It constitutes the crucial step that further minimizes the low-level feature gap, beyond the previous data augmentation efforts.\n\n- **Supervised domain-specific sub-network ${C_s}$**, which consists of the remaining *N − K* layers. It accounts for learning higher-level discriminative features for classi- fication, based on the shared features from ${C_u}$. ${C_s}$ will be trained in a supervised way, using labeled data from the synthetic domain only.", "_____no_output_____" ] ], [ [ "from keras.models import Sequential\nfrom keras.layers.normalization import BatchNormalization\nfrom keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPooling2D , UpSampling2D ,Conv2DTranspose\n\ndef create_model():\n model = Sequential()\n\n #Cu Layers \n model.add(Conv2D(64, kernel_size = (48, 48), activation = 'relu', input_shape = (105, 105, 1)))\n model.add(BatchNormalization())\n model.add(MaxPooling2D(pool_size = (2, 2)))\n\n model.add(Conv2D(128, kernel_size = (24, 24), activation = 'relu'))\n model.add(BatchNormalization())\n model.add(MaxPooling2D(pool_size = (2, 2)))\n \n \n model.add(Conv2DTranspose(128, (24, 24), strides = (2, 2), activation = 'relu', padding = 'same', kernel_initializer = 'uniform'))\n model.add(UpSampling2D(size = (2, 2)))\n\n model.add(Conv2DTranspose(64, (12, 12), strides = (2, 2), activation = 'relu', padding = 'same', kernel_initializer = 'uniform'))\n model.add(UpSampling2D(size = (2, 2)))\n\n #Cs Layers\n model.add(Conv2D(256, kernel_size=(12, 12), activation = 'relu'))\n model.add(Conv2D(256, kernel_size=(12, 12), activation = 'relu'))\n model.add(Conv2D(256, kernel_size=(12, 12), activation = 'relu'))\n\n model.add(Flatten())\n model.add(Dense(4096, activation = 'relu'))\n model.add(Dropout(0.5))\n model.add(Dense(4096, activation = 'relu'))\n model.add(Dropout(0.5))\n model.add(Dense(2383, activation = 'relu'))\n model.add(Dense(len(font_names), activation = 'softmax'))\n \n return model", "_____no_output_____" ] ], [ [ "### Compile Model", "_____no_output_____" ] ], [ [ "from keras import optimizers\n\nbatch_size = 128\nepochs = 50\nmodel= create_model()\nopt = optimizers.SGD(lr = 0.01, decay = 1e-6, momentum = 0.9, nesterov = True)\nmodel.compile(loss = 'mean_squared_error', optimizer = opt, metrics = ['accuracy'])", "_____no_output_____" ] ], [ [ "### Fit and store Model\n\n- `model_path` should be defined", "_____no_output_____" ] ], [ [ "from keras import callbacks\n\nmodel_path = ''\n\nmy_callbacks = [\n callbacks.EarlyStopping(monitor = 'val_loss', min_delta = 0, patience = 10, verbose = 0, mode = 'min'),\n callbacks.ModelCheckpoint(model_path, monitor = 'val_loss', verbose = 1, save_best_only = True, mode = 'min')\n]\n\nmodel.fit(\n trainX,\n trainY,\n shuffle = True,\n batch_size = batch_size,\n epochs = epochs,\n verbose = 1,\n validation_data = (testX, testY),\n callbacks = my_callbacks\n)", "_____no_output_____" ] ], [ [ "### Evaluate", "_____no_output_____" ] ], [ [ "from keras.models import load_model\n\nmodel_path = ''\n\nmodel = load_model(model_path)\nscore = model.evaluate(testX, testY, verbose = 0)\n\nprint('Test loss ->', score[0])\nprint('Test accuracy ->', score[1])", "_____no_output_____" ] ], [ [ "### Revert font name from labels", "_____no_output_____" ] ], [ [ "def rev_conv_label(label):\n return font_names[label]", "_____no_output_____" ] ], [ [ "### Verify", "_____no_output_____" ] ], [ [ "import PIL\nimport numpy as np\nimport matplotlib.cm as cm\nimport matplotlib.pylab as plt\nfrom keras.preprocessing.image import img_to_array\n\n#Load image and de-noisy\ntmp_img = roi.copy().convert('L')\ntmp_img = blur_image(tmp_img)\narr_img = img_to_array(tmp_img)\n\n#Predict using trained model\ndata = []\ndata.append(arr_img)\ndata = np.asarray(data, dtype = \"float\") / 255.0\ny = np.argmax(model.predict(data), axis = -1)\n\n#Display result\nlabel = rev_conv_label(int(y[0]))\nfig, ax = plt.subplots(1)\nax.imshow(roi, interpolation = 'nearest', cmap = cm.gray)\nax.text(5, 5, label, bbox = {'facecolor': 'white', 'pad': 8})\nplt.show()", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
cb365da63d01affb29225dbe7261ccc76425328f
65,699
ipynb
Jupyter Notebook
Project.ipynb
anurag1paul/TimeseriesClassification
10abff14cf73f2d82e831b91f768c91d810607b0
[ "MIT" ]
null
null
null
Project.ipynb
anurag1paul/TimeseriesClassification
10abff14cf73f2d82e831b91f768c91d810607b0
[ "MIT" ]
11
2020-01-28T22:33:28.000Z
2022-03-11T23:37:56.000Z
Project.ipynb
anurag1paul/TimeseriesClassification
10abff14cf73f2d82e831b91f768c91d810607b0
[ "MIT" ]
null
null
null
173.348285
25,204
0.913271
[ [ [ "# Multivariate Timeseries Classification\n\nIn this project we have 205 samples of 89 rows each with 14 different features. The problem is similar to activity recognition and is solved using both traditional machine learning using feature engineering and also, using deep learning using LSTM, CNN1D-LSTM and CNN2D-LSTM. ", "_____no_output_____" ], [ "### Import Necessary Files", "_____no_output_____" ] ], [ [ "from data_loader import DataLoader\nfrom preprocessing import prepare_data\nfrom models import FeatureEngineeredModel, DeepLearningModel\nfrom utils import plot_confusion_matrix, plot_class_distribution, plot_best_accuracies", "Using TensorFlow backend.\n" ] ], [ [ "### Data File", "_____no_output_____" ] ], [ [ "data_file = \"challenge_dataset.xlsx\"", "_____no_output_____" ] ], [ [ "### Train and Test files", "_____no_output_____" ] ], [ [ "train_file = \"train.csv\"\ntest_file = \"test.csv\"", "_____no_output_____" ] ], [ [ "### Test size of 20%", "_____no_output_____" ] ], [ [ "test_size = 0.2", "_____no_output_____" ] ], [ [ "### Prepare train and test datasets\nprepare_data() renames \"ID_TestSet\" to \"id\" and splits the data into train and test set and creates two new csv files which are used for training and testing models", "_____no_output_____" ] ], [ [ "data = prepare_data(data_file, train_file, test_file, test_size, gen=False)", "_____no_output_____" ] ], [ [ "### Visualize the distribution of different classes", "_____no_output_____" ] ], [ [ "classes = data.groupby(\"goal\").id.count()\nplot_class_distribution(classes)", "_____no_output_____" ] ], [ [ "### Define dataset loader", "_____no_output_____" ] ], [ [ "data_loader = DataLoader(train_file, test_file)", "_____no_output_____" ] ], [ [ "### Feature Engineered Model\n\nFeatureEngineeredModel in ints constructor extracts and selects the relevant features", "_____no_output_____" ] ], [ [ "X_train, y_train, X_test, y_test, train_ids, test_ids = data_loader.get_train_test_data()\ntraditional_model = FeatureEngineeredModel(X_train, y_train, X_test, y_test, train_ids, test_ids)", "Feature Extraction: 100%|██████████| 30/30 [01:15<00:00, 1.42s/it]\nWARNING:tsfresh.utilities.dataframe_functions:The columns ['bl__fft_coefficient__coeff_45__attr_\"abs\"'\n 'bl__fft_coefficient__coeff_45__attr_\"angle\"'\n 'bl__fft_coefficient__coeff_45__attr_\"imag\"' ...\n 'vz__fft_coefficient__coeff_99__attr_\"angle\"'\n 'vz__fft_coefficient__coeff_99__attr_\"imag\"'\n 'vz__fft_coefficient__coeff_99__attr_\"real\"'] did not have any finite values. Filling with zeros.\n" ] ], [ [ "### Evaluate RandomForest and XGBoost for various num estimators and determine the best model", "_____no_output_____" ] ], [ [ "tm_best_model, tm_best_score = traditional_model.evaluate()", "Training Random Forest\nAccuracy: 63.41463414634146%\nTraining XGBoost\nAccuracy: 63.41463414634146%\n" ] ], [ [ "### Deep Learning Models - LSTM, CNN1D-LSTM, CNN2D-LSTM", "_____no_output_____" ] ], [ [ "X_train, y_train, X_test, y_test, train_ids, test_ids = data_loader.get_train_test_data_norm()\ndeep_learning_model = DeepLearningModel(X_train, y_train, X_test, y_test, train_ids, test_ids)", "_____no_output_____" ] ], [ [ "### Training the three deep learning architectures 10 times to get average and best accuracy", "_____no_output_____" ] ], [ [ "dl_best_model, dl_best_score = deep_learning_model.evaluate()", "Training LSTM\nAccuracy: Max:63.414634582473006% Avg:57.073% (+/-3.963)\nTraining CNN1D-LSTM\nAccuracy: Max:68.29268336296082% Avg:56.098% (+/-4.999)\nTraining CNN2D-LSTM\nAccuracy: Max:60.97561033760629% Avg:54.39% (+/-4.232)\n" ] ], [ [ "### Plot Confusion Matrices", "_____no_output_____" ] ], [ [ "plot_confusion_matrix(traditional_model.get_confusion_matrix(), classes = [0,1,2], \n title = traditional_model.best_model_name + \" Confusion Matrix\")", "_____no_output_____" ], [ "plot_confusion_matrix(deep_learning_model.get_confusion_matrix(), classes = [0,1,2], \n title = deep_learning_model.best_model_name + \" Confusion Matrix\")", "_____no_output_____" ] ], [ [ "### Comparison of the Accuracy of the best models of the five algorithms or architectures", "_____no_output_____" ] ], [ [ "model_names = traditional_model.model_names.copy()\nmodel_names.extend(deep_learning_model.model_names)\naccuracies = traditional_model.scores.copy()\naccuracies.extend(deep_learning_model.scores)\n\nplot_best_accuracies(model_names, accuracies)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ] ]
cb36667dffe526347657d6e5151061f50ca4a2d9
15,772
ipynb
Jupyter Notebook
1B_process_and_clean_data.ipynb
rnfinnegan/prostate-biological-model
71476080fd5f2741924dd2ba422cf5c4f0d594f8
[ "Apache-2.0" ]
null
null
null
1B_process_and_clean_data.ipynb
rnfinnegan/prostate-biological-model
71476080fd5f2741924dd2ba422cf5c4f0d594f8
[ "Apache-2.0" ]
null
null
null
1B_process_and_clean_data.ipynb
rnfinnegan/prostate-biological-model
71476080fd5f2741924dd2ba422cf5c4f0d594f8
[ "Apache-2.0" ]
null
null
null
42.058667
221
0.586799
[ [ [ "### This notebook contains some code for processing the atlas data:\n1. Hole filling, masking to prostate, extract individual histology labels (Gleason grade)\n2. Interpolate histology-derived data (5mm spacing vs. 2.5mm MRI axial slices)\n3. Interpolate to isotropic voxel sizes (0.8 x 0.8 x 0.8 mm^3)\n4. Write data to disk and save images for manual review", "_____no_output_____" ] ], [ [ "%load_ext autoreload\n%autoreload 2", "_____no_output_____" ], [ "import pathlib\n\nimport SimpleITK as sitk \n\nfrom platipy.imaging.label.utils import get_com\n\nfrom platipy.imaging.utils.vessel import vessel_spline_generation\n\nfrom platipy.imaging import ImageVisualiser\n\nfrom platipy.imaging.registration.utils import smooth_and_resample\n\n# import colorcet as cc\n\n%matplotlib notebook", "_____no_output_____" ], [ "from birt_utils import (\n interpolate_image,\n interpolate_histology_lesion_probability,\n generate_sampling_label\n)", "_____no_output_____" ], [ "# Set parameters\n\ncontour_fill_hole_mm = 5\n\ninput_dir = pathlib.Path(\"../../1_data/atlas_data/\")\ncase_id_list = sorted([i.name[6:] for i in input_dir.glob(\"*MRHIST*\")])\nprint(len(case_id_list), case_id_list)", "_____no_output_____" ], [ "\"\"\"\nSimplify the images/labels that we propagate\n\"\"\"\n\nlabels_linear = [\n \"TUMOUR_PROBABILITY_GRADE_2+2\",\n \"TUMOUR_PROBABILITY_GRADE_3+2\",\n \"TUMOUR_PROBABILITY_GRADE_3+3\",\n \"TUMOUR_PROBABILITY_GRADE_3+4\",\n \"TUMOUR_PROBABILITY_GRADE_4+3\",\n \"TUMOUR_PROBABILITY_GRADE_4+4\",\n \"TUMOUR_PROBABILITY_GRADE_4+5\",\n \"TUMOUR_PROBABILITY_GRADE_5+4\",\n \"TUMOUR_PROBABILITY_GRADE_5+5\",\n]\n\nlabels_nn = [\n \"CONTOUR_PROSTATE\",\n \"CONTOUR_PZ\",\n \"CONTOUR_URETHRA\",\n \"LABEL_HISTOLOGY\",\n \"LABEL_SAMPLING\"\n]\n\nimages_bspline = [\n \"MRI_T2W_2D\",\n]\n\nimages_linear = [\n \"CELL_DENSITY_MAP\",\n]\n\nimages_nn = [\n \"HISTOLOGY\"\n]\n\ndata_names = labels_linear + labels_nn + images_linear + images_nn", "_____no_output_____" ], [ "vals = []\n\nfor atlas_id in case_id_list:\n \n im = sitk.ReadImage( (input_dir / f\"MRHIST{atlas_id}\" / \"LABELS\" / f\"MRHIST{atlas_id}_LABEL_HISTOLOGY.nii.gz\").as_posix() )\n new_vals = np.unique(sitk.GetArrayViewFromImage(im))\n \n print(atlas_id, new_vals)\n \n vals += list(new_vals)", "_____no_output_____" ], [ "np.unique(vals)", "_____no_output_____" ], [ "\"\"\"\nRead in data\n\"\"\"\nhist_value_2p2 = 64\nhist_value_3p2 = 96\nhist_value_3p3 = 128\nhist_value_3p4 = 160\nhist_value_4p3 = 192\nhist_value_4p4 = 224\nhist_value_4p5 = 234\nhist_value_5p4 = 244\nhist_value_5p5 = 255\n\natlas_set = {}\n\nfor atlas_id in case_id_list:\n print(atlas_id, end=\" | \")\n atlas_set[atlas_id] = {}\n atlas_set[atlas_id][\"ORIGINAL\"] = {} \n \n # Read MRI\n atlas_set[atlas_id][\"ORIGINAL\"]['MRI_T2W_2D'] = sitk.ReadImage( (input_dir / f\"MRHIST{atlas_id}\" / \"IMAGES\" / f\"MRHIST{atlas_id}_MRI_T2W_2D.nii.gz\").as_posix() )\n \n # Resampling functions\n g_nn = lambda x: sitk.Resample(x, atlas_set[atlas_id][\"ORIGINAL\"]['MRI_T2W_2D'], sitk.Transform(), sitk.sitkNearestNeighbor)\n g_linear = lambda x: sitk.Resample(x, atlas_set[atlas_id][\"ORIGINAL\"]['MRI_T2W_2D'], sitk.Transform(), sitk.sitkLinear)\n \n # Read cell density and histology\n atlas_set[atlas_id][\"ORIGINAL\"]['CELL_DENSITY_MAP'] = g_linear( sitk.ReadImage( (input_dir / f\"MRHIST{atlas_id}\" / \"IMAGES\" / f\"MRHIST{atlas_id}_CELL_DENSITY_MAP.nii.gz\").as_posix() ) )\n atlas_set[atlas_id][\"ORIGINAL\"]['HISTOLOGY'] = g_nn( sitk.ReadImage( (input_dir / f\"MRHIST{atlas_id}\" / \"IMAGES\" / f\"MRHIST{atlas_id}_HISTOLOGY.nii.gz\").as_posix() ) )\n \n # Read whole prostate contour\n atlas_set[atlas_id][\"ORIGINAL\"]['CONTOUR_PROSTATE'] = g_nn( sitk.ReadImage( (input_dir / f\"MRHIST{atlas_id}\" / \"LABELS\" / f\"MRHIST{atlas_id}_CONTOUR_PROSTATE.nii.gz\").as_posix() ) )>0\n \n # Fill holes\n contour_fillhole_img = [int(contour_fill_hole_mm/i) for i in atlas_set[atlas_id][\"ORIGINAL\"]['MRI_T2W_2D'].GetSpacing()]\n atlas_set[atlas_id][\"ORIGINAL\"]['CONTOUR_PROSTATE'] = sitk.BinaryMorphologicalClosing(atlas_set[atlas_id][\"ORIGINAL\"]['CONTOUR_PROSTATE'], contour_fillhole_img)\n \n # Masking function\n mask_to_prostate = lambda x: sitk.Mask(x, atlas_set[atlas_id][\"ORIGINAL\"]['CONTOUR_PROSTATE'])\n \n # Read in PZ and urethtra contours (and mask)\n atlas_set[atlas_id][\"ORIGINAL\"]['CONTOUR_PZ'] = mask_to_prostate (g_nn( sitk.ReadImage( (input_dir / f\"MRHIST{atlas_id}\" / \"LABELS\" / f\"MRHIST{atlas_id}_CONTOUR_PZ_INTERP.nii.gz\").as_posix() ) ) )\n atlas_set[atlas_id][\"ORIGINAL\"]['CONTOUR_URETHRA'] = mask_to_prostate (g_nn( sitk.ReadImage( (input_dir / f\"MRHIST{atlas_id}\" / \"LABELS\" / f\"MRHIST{atlas_id}_CONTOUR_URETHRA.nii.gz\").as_posix() ) ) )\n \n # Read in histology labels (tumour annotation)\n atlas_set[atlas_id][\"ORIGINAL\"]['LABEL_HISTOLOGY'] = g_nn( sitk.ReadImage( (input_dir / f\"MRHIST{atlas_id}\" / \"LABELS\" / f\"MRHIST{atlas_id}_LABEL_HISTOLOGY.nii.gz\").as_posix() ) ) \n\n # Extract out individual labels\n atlas_set[atlas_id][\"ORIGINAL\"][\"TUMOUR_PROBABILITY_GRADE_2+2\"] = mask_to_prostate(interpolate_histology_lesion_probability(atlas_set[atlas_id][\"ORIGINAL\"]['LABEL_HISTOLOGY'] == hist_value_2p2))\n atlas_set[atlas_id][\"ORIGINAL\"][\"TUMOUR_PROBABILITY_GRADE_3+2\"] = mask_to_prostate(interpolate_histology_lesion_probability(atlas_set[atlas_id][\"ORIGINAL\"]['LABEL_HISTOLOGY'] == hist_value_3p2))\n atlas_set[atlas_id][\"ORIGINAL\"][\"TUMOUR_PROBABILITY_GRADE_3+3\"] = mask_to_prostate(interpolate_histology_lesion_probability(atlas_set[atlas_id][\"ORIGINAL\"]['LABEL_HISTOLOGY'] == hist_value_3p3))\n atlas_set[atlas_id][\"ORIGINAL\"][\"TUMOUR_PROBABILITY_GRADE_3+4\"] = mask_to_prostate(interpolate_histology_lesion_probability(atlas_set[atlas_id][\"ORIGINAL\"]['LABEL_HISTOLOGY'] == hist_value_3p4))\n atlas_set[atlas_id][\"ORIGINAL\"][\"TUMOUR_PROBABILITY_GRADE_4+3\"] = mask_to_prostate(interpolate_histology_lesion_probability(atlas_set[atlas_id][\"ORIGINAL\"]['LABEL_HISTOLOGY'] == hist_value_4p3))\n atlas_set[atlas_id][\"ORIGINAL\"][\"TUMOUR_PROBABILITY_GRADE_4+4\"] = mask_to_prostate(interpolate_histology_lesion_probability(atlas_set[atlas_id][\"ORIGINAL\"]['LABEL_HISTOLOGY'] == hist_value_4p4))\n atlas_set[atlas_id][\"ORIGINAL\"][\"TUMOUR_PROBABILITY_GRADE_4+5\"] = mask_to_prostate(interpolate_histology_lesion_probability(atlas_set[atlas_id][\"ORIGINAL\"]['LABEL_HISTOLOGY'] == hist_value_4p5))\n atlas_set[atlas_id][\"ORIGINAL\"][\"TUMOUR_PROBABILITY_GRADE_5+4\"] = mask_to_prostate(interpolate_histology_lesion_probability(atlas_set[atlas_id][\"ORIGINAL\"]['LABEL_HISTOLOGY'] == hist_value_5p4))\n atlas_set[atlas_id][\"ORIGINAL\"][\"TUMOUR_PROBABILITY_GRADE_5+5\"] = mask_to_prostate(interpolate_histology_lesion_probability(atlas_set[atlas_id][\"ORIGINAL\"]['LABEL_HISTOLOGY'] == hist_value_5p5))\n \n # Generate sampling label\n atlas_set[atlas_id][\"ORIGINAL\"][\"LABEL_SAMPLING\"] = mask_to_prostate( generate_sampling_label(atlas_set[atlas_id][\"ORIGINAL\"]['HISTOLOGY']) )", "_____no_output_____" ], [ "\"\"\"\nInterpolate missing cell density/histology images\n\"\"\"\n\nfor atlas_id in atlas_set:\n atlas_set[atlas_id][\"ORIGINAL\"]['CELL_DENSITY_MAP'] = interpolate_image(\n sitk.GrayscaleFillhole(\n sitk.Cast(\n atlas_set[atlas_id][\"ORIGINAL\"]['CELL_DENSITY_MAP'],\n sitk.sitkFloat32\n )\n )\n )\n \n atlas_set[atlas_id][\"ORIGINAL\"]['HISTOLOGY'] = interpolate_image(\n sitk.Cast(\n atlas_set[atlas_id][\"ORIGINAL\"]['HISTOLOGY'],\n sitk.sitkVectorFloat32\n )\n )", "_____no_output_____" ], [ "\"\"\"\nResample to 0.8mm (isotropic) voxel size\n\"\"\"\n\nf_nn = lambda x: smooth_and_resample(x, isotropic_voxel_size_mm = 0.8, interpolator=sitk.sitkNearestNeighbor)\nf_linear = lambda x: smooth_and_resample(x, isotropic_voxel_size_mm = 0.8, interpolator=sitk.sitkLinear)\nf_bspline = lambda x: smooth_and_resample(x, isotropic_voxel_size_mm = 0.8, interpolator=sitk.sitkBSpline)\n\nfor atlas_id in atlas_set:\n \n atlas_set[atlas_id]['RESAMPLED'] = {}\n \n for label_name in labels_linear + images_linear:\n atlas_set[atlas_id]['RESAMPLED'][label_name] = f_linear( atlas_set[atlas_id]['ORIGINAL'][label_name])\n \n for label_name in images_bspline:\n atlas_set[atlas_id]['RESAMPLED'][label_name] = f_bspline( atlas_set[atlas_id]['ORIGINAL'][label_name])\n \n for label_name in labels_nn + images_nn:\n atlas_set[atlas_id]['RESAMPLED'][label_name] = f_nn( atlas_set[atlas_id]['ORIGINAL'][label_name])\n \n # Memory saver\n atlas_set[atlas_id][\"ORIGINAL\"] = None", "_____no_output_____" ], [ "\"\"\"\nWrite atlas data\n\"\"\"\n\nfor atlas_id in list(atlas_set.keys()):\n \n print(atlas_id, end=\" | \")\n \n output_dir = pathlib.Path(f\"../1_processing/ATLAS_DATA_PROCESSED/MRHIST{atlas_id}\")\n (output_dir / \"IMAGES\").mkdir(exist_ok=True, parents=True)\n (output_dir / \"LABELS\").mkdir(exist_ok=True, parents=True)\n \n for label_name in labels_linear + labels_nn:\n sitk.WriteImage(atlas_set[atlas_id][\"RESAMPLED\"][label_name], str(output_dir / \"LABELS\" / f\"MRHIST{atlas_id}_{label_name}.nii.gz\"))\n \n for img_name in images_bspline + images_linear + images_nn:\n if \"CELL_DENSITY\" in img_name:\n sitk.WriteImage((8000/255 * atlas_set[atlas_id][\"RESAMPLED\"]['CELL_DENSITY_MAP'])**1.5, str(output_dir / \"IMAGES\" / f\"MRHIST{atlas_id}_{img_name}.nii.gz\"))\n else:\n sitk.WriteImage(atlas_set[atlas_id][\"RESAMPLED\"][img_name], str(output_dir / \"IMAGES\" / f\"MRHIST{atlas_id}_{img_name}.nii.gz\"))\n \n \"\"\"\n Generate some figures to check data integrity\n \"\"\"\n \n figure_dir = pathlib.Path(f\"../1_processing/FIGURES_PROCESSING\")\n figure_dir.mkdir(exist_ok=True, parents=True)\n \n # 1. Contour check\n vis = ImageVisualiser(atlas_set[atlas_id][\"RESAMPLED\"]['MRI_T2W_2D'], cut=get_com(atlas_set[atlas_id][\"RESAMPLED\"]['CONTOUR_PZ']), figure_size_in=6, window=[0,1200])\n vis.add_contour({\n 'WG':atlas_set[atlas_id][\"RESAMPLED\"]['CONTOUR_PROSTATE'],\n 'PZ':atlas_set[atlas_id][\"RESAMPLED\"]['CONTOUR_PZ'],\n 'U':atlas_set[atlas_id][\"RESAMPLED\"]['CONTOUR_URETHRA'],\n }, colormap=plt.cm.cool)\n fig = vis.show()\n fig.savefig(figure_dir / f\"MRHIST{atlas_id}_0_CONTOURS.jpeg\", dpi = 300)\n \n # 2. CD check\n vis = ImageVisualiser(atlas_set[atlas_id][\"RESAMPLED\"]['MRI_T2W_2D'], cut=get_com(atlas_set[atlas_id][\"RESAMPLED\"]['CONTOUR_PZ']), figure_size_in=6, window=[0,1200])\n vis.add_scalar_overlay((8000/255 * atlas_set[atlas_id][\"RESAMPLED\"]['CELL_DENSITY_MAP'])**1.5, min_value=0, max_value=200000, name='Cell density [mm'+r'$^{-3}$'+']', colormap=plt.cm.gnuplot2, alpha=1)\n fig = vis.show()\n fig.savefig(figure_dir / f\"MRHIST{atlas_id}_1_CELLDENSITY.jpeg\", dpi = 300)\n \n # 3. Histology\n vis = ImageVisualiser(atlas_set[atlas_id][\"RESAMPLED\"]['HISTOLOGY'], cut=get_com(atlas_set[atlas_id][\"RESAMPLED\"]['CONTOUR_PZ']), figure_size_in=6)\n vis.add_contour({\n 'SAMPLE (HALF)':atlas_set[atlas_id][\"RESAMPLED\"]['LABEL_SAMPLING']>=0.5,\n 'SAMPLE (FULL)':atlas_set[atlas_id][\"RESAMPLED\"]['LABEL_SAMPLING']<=0.5,\n }, colormap=plt.cm.cool)\n fig = vis.show()\n fig.savefig(figure_dir / f\"MRHIST{atlas_id}_2_HISTOLOGY.jpeg\", dpi = 300)\n \n # 4. Histology annotations\n vis = ImageVisualiser(atlas_set[atlas_id][\"RESAMPLED\"]['MRI_T2W_2D'], figure_size_in=6, window=[0,1], projection=\"median\")\n ctr_dict = {\n label[-3:]:atlas_set[atlas_id][\"RESAMPLED\"][label]\n for label in labels_linear\n }\n vis.add_contour(ctr_dict, colormap=plt.cm.jet)\n fig = vis.show()\n fig.savefig(figure_dir / f\"MRHIST{atlas_id}_3_ANNOTATIONS.jpeg\", dpi = 300)\n\n # Close\n plt.close(\"all\")", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb367f493b09eac53f72a41f1bd3dd17dd6ef991
811,161
ipynb
Jupyter Notebook
hyades/1.0-hyades-data-summary.ipynb
smoh/kinesis
452940768f1cb7a93f97302867ae4f02d772229a
[ "MIT" ]
6
2019-06-07T15:03:06.000Z
2020-12-19T21:57:15.000Z
hyades/1.0-hyades-data-summary.ipynb
smoh/kinesis
452940768f1cb7a93f97302867ae4f02d772229a
[ "MIT" ]
3
2019-08-08T21:19:58.000Z
2019-09-02T16:28:57.000Z
hyades/1.0-hyades-data-summary.ipynb
smoh/kinesis
452940768f1cb7a93f97302867ae4f02d772229a
[ "MIT" ]
2
2019-07-25T20:50:04.000Z
2019-08-08T16:59:05.000Z
630.272727
89,996
0.944168
[ [ [ "%matplotlib inline\n%run utils.ipynb", "_____no_output_____" ], [ "import matplotlib.pyplot as plt\nfrom matplotlib import colors, ticker\n# import cartopy.crs as ccrs\n\nimport pandas as pd\nimport numpy as np\nimport scipy as sp\nfrom astropy.table import Table\nimport astropy.units as u\nimport astropy.coordinates as coord\nimport arviz as az\nimport seaborn as sns\n\nimport kinesis as kn\nimport gapipes as gp\n\nplt.style.use(mystyledict)", "_____no_output_____" ], [ "%store -r out_full\ndf = out_full#.loc[out_full['Member_r19']!='other']", "_____no_output_____" ], [ "print(f\"{len(df)} rows, {len(df.columns)} columns\")", "1389 rows, 142 columns\n" ], [ "# slices of data\ngdr2 = df.groupby('in_dr2').get_group(True)", "_____no_output_____" ], [ "df[[\"in_dr2\", \"in_leao\", \"in_meingast\", \"in_roser\"]].fillna(False).groupby([\"in_dr2\"]).sum()", "_____no_output_____" ], [ "df[[\"in_dr2\", \"in_leao\", \"in_meingast\", \"in_roser\"]].fillna(False).groupby('in_dr2').get_group(False).groupby('in_meingast').sum()", "_____no_output_____" ], [ "df[[\"in_dr2\", \"in_leao\", \"in_meingast\", \"in_roser\"]].fillna(False).groupby('in_dr2').get_group(False).groupby('in_roser').sum()", "_____no_output_____" ], [ "fig, ax = plt.subplots()\nax.hist(df['radial_velocity_error'].dropna(), np.logspace(-1,1.2,32));\nmedian_rv_error = df['radial_velocity_error'].median()\nprint(median_rv_error)\nax.axvline(median_rv_error, c='k',lw=1);\nax.set_xscale('log');", "0.4289178378742824\n" ], [ "fig, ax = plt.subplots(1, 1, figsize=(4, 2.5), subplot_kw=dict(projection=ccrs.Mollweide()))\nax.gridlines(\n crs=ccrs.Geodetic(),\n xlocs=[-180, -90, 0, 90, 180],\n ylocs=[0, 45, 90, -45, -90],\n linewidth=0.5,\n zorder=0,\n)\nax.scatter(df[\"ra\"], df[\"dec\"], s=1, c='k', transform=ccrs.Geodetic())\nax.scatter(gdr2[\"ra\"], gdr2[\"dec\"], s=1, transform=ccrs.Geodetic())\nax.set_global()\nax.set_title(\"Sky distribution\")\nfig.tight_layout()\nfig.savefig('../plots/hyades-sky.pdf')", "_____no_output_____" ], [ "fig, ax = plt.subplots(1, 1, figsize=(4, 2.5),subplot_kw=dict(projection=ccrs.Mollweide(central_longitude=180)))\nax.gridlines(\n crs=ccrs.Geodetic(),\n xlocs=[-180, -90, 0, 90, 180],\n ylocs=[0, 45, 90, -45, -90],\n linewidth=0.5,\n zorder=0,\n)\nax.scatter(df[\"l\"], df[\"b\"], s=1, c='k', transform=ccrs.Geodetic())\nax.scatter(gdr2[\"l\"], gdr2[\"b\"], s=1, transform=ccrs.Geodetic())\nax.set_global()\nax.set_title(\"Galactic (centered on $l=180$)\")\nfig.tight_layout()\nfig.savefig('../plots/hyades-galactic-distribution.pdf')", "_____no_output_____" ], [ "fig, ax = plt.subplots(1, 2, figsize=(8, 4))\nfor cax in ax:\n cax.set_aspect(\"equal\")\nfor dset, color in zip([df, gdr2], [\"k\", None]):\n cartx, cartv = dset.g.icrs.cartesian, dset.g.icrs.velocity\n ax[0].scatter(cartx.x, cartx.y, s=1, c=color)\n ax[1].scatter(cartx.x, cartx.z, s=1, c=color)\nfor cax in ax:\n cax.set_xlabel(\"$X_\\mathrm{ICRS}$\")\nax[0].set_ylabel(\"$Y_\\mathrm{ICRS}$\")\nax[1].set_ylabel(\"$Z_\\mathrm{ICRS}$\")\nfig.tight_layout()\nfig.savefig('../plots/hyades-xyz-icrs.pdf')", "_____no_output_____" ], [ "xlims = (-115, 42)\nylims = (-145, 186)\nzlims = (-50, 30)\n\ntotheight = ((zlims[1]-zlims[0]) + (ylims[1]-ylims[0]))/(xlims[1]-xlims[0])\nheight_ratio = (ylims[1]-ylims[0])/(zlims[1]-zlims[0])\nfig_xsize = 3\nfig_ysize = totheight * fig_xsize\n\n\nfig, ax = plt.subplots(2, 1, figsize=(fig_xsize+1., fig_ysize),\n sharex=True, gridspec_kw={'height_ratios':[height_ratio, .8]})\nfor cax in ax: cax.set_aspect('equal');\nlabels = ['cl+tails ({})'.format(len(df)), 'cl ({})'.format(len(gdr2))]\nfor dset, color, label in zip([df, gdr2], ['k', None], labels):\n cartx, cartv = dset.g.galactic.cartesian, dset.g.galactic.velocity\n ax[0].scatter(cartx.x, cartx.y, s=1, c=color, label=label);\n ax[1].scatter(cartx.x, cartx.z, s=1, c=color);\nax[1].set_xlabel('$X$ [pc]')\nax[0].set_ylabel('$Y$ [pc]')\nax[1].set_ylabel('$Z$ [pc]');\nax[0].legend(loc='lower right', fontsize=12, markerscale=3, fancybox=False)\nfig.subplots_adjust(left=0.22, bottom=0.08,top=0.99, right=0.98, hspace=0.01)\nfig.savefig('../report/plots/hyades-data-dist.pdf')", "_____no_output_____" ], [ "gdr2_rv = df.loc[df['radial_velocity'].notnull()]\nharps_rv = df.loc[df['RV_HARPS_leao'].notnull()]\nxlims = (-115, 42)\nylims = (-145, 186)\nzlims = (-50, 30)\n\ntotheight = ((zlims[1]-zlims[0]) + (ylims[1]-ylims[0]))/(xlims[1]-xlims[0])\nheight_ratio = (ylims[1]-ylims[0])/(zlims[1]-zlims[0])\nfig_xsize = 3\nfig_ysize = totheight * fig_xsize\n\n\nfig, ax = plt.subplots(2, 1, figsize=(fig_xsize+1., fig_ysize),\n sharex=True, gridspec_kw={'height_ratios':[height_ratio, .8]})\nfor cax in ax: cax.set_aspect('equal');\nlabels = ['', 'has RV'.format(len(gdr2_rv)), 'has HARPS RV']\nfor dset, color, label, s in zip([df, gdr2_rv,harps_rv], ['k', None,'tab:red'], labels, [1, 4,1]):\n cartx, cartv = dset.g.galactic.cartesian, dset.g.galactic.velocity\n ax[0].scatter(cartx.x, cartx.y, s=s, c=color, label=label);\n ax[1].scatter(cartx.x, cartx.z, s=s, c=color);\nax[1].set_xlabel('$X$ [pc]')\nax[0].set_ylabel('$Y$ [pc]')\nax[1].set_ylabel('$Z$ [pc]');\nax[0].legend(loc='lower right', fontsize=12, markerscale=3, fancybox=False)\nfig.subplots_adjust(left=0.22, bottom=0.08,top=0.99, right=0.98, hspace=0.01)\n# fig.savefig('../report/plots/hyades-data-dist-rv.pdf')", "_____no_output_____" ], [ "df[[\"radial_velocity\", \"RV_HARPS_leao\", \"source_id\"]].notnull().groupby(\n [\"radial_velocity\", \"RV_HARPS_leao\"]\n).agg(\"count\")", "_____no_output_____" ], [ "delta_rv = df[\"radial_velocity\"] - df[\"RV_HARPS_leao\"]\ndelta_rv_sigma = delta_rv / np.hypot(df[\"radial_velocity_error\"], df[\"eRV_HARPS_leao\"])\n\nmean_delta_rv = np.nanmean(delta_rv)\nmean_delta_rv_sigma = np.nanmean(delta_rv_sigma)\nprint(f\"mean delta RV (DR2-HARPS) = {mean_delta_rv:-8.4f}\")\nprint(f\"mean delta RV (DR2-HARPS) / error = {mean_delta_rv_sigma:-8.4f}\")\n\nfig, (ax1, ax2) = plt.subplots(1, 2, figsize=(8, 4))\nax1 = sns.distplot(\n delta_rv[~np.isnan(delta_rv)],\n ax=ax1,\n color=\"k\",\n hist_kws={\"lw\":0},\n kde_kws={\"lw\": 1},\n)\nax1.axvline(0, c=\"k\", lw=1)\nax1.set_xlabel(r\"$\\mathrm{RV}_\\mathrm{DR2} - \\mathrm{RV}_\\mathrm{HARPS}$\")\nax1.set_ylabel(\"Density\")\nax1.text(\n 0.05,\n 0.95,\n f\"mean={mean_delta_rv:-.3f} km/s\",\n ha=\"left\",\n va=\"top\",\n size=12,\n transform=ax1.transAxes,\n)\nax1.set_yticks([0, .5, 1, 1.5, 2.])\nax1.set_ylim(0, 2.2)\n\nsns.distplot(\n delta_rv_sigma[~np.isnan(delta_rv_sigma)],\n ax=ax2,\n color=\"k\",\n hist_kws={\"lw\":0},\n kde_kws={\"lw\": 1},\n)\nax2.axvline(0, c=\"k\", lw=1)\nax2.set_xlabel(\n r\"$\\mathrm{RV}_\\mathrm{DR2} - \\mathrm{RV}_\\mathrm{HARPS}\"\n r\"/ \\sqrt{\\sigma_\\mathrm{RV, DR2}^2+\\sigma_\\mathrm{RV, HARPS}^2}$\"\n)\nax2.set_ylabel(\"Density\")\nfig.tight_layout()\nfig.savefig(\"../plots/compare-gaia-harps-rv.pdf\")", "mean delta RV (DR2-HARPS) = -0.1519\nmean delta RV (DR2-HARPS) / error = -0.6405\n" ], [ "mean_cartv_icrs = [-6.03, 45.56, 5.57]\nvx, vy, vz = mean_cartv_icrs\n\nfig, ax = plt.subplots(1, 2, figsize=(8, 4))\nfor cax in ax:\n cax.set_aspect(\"equal\")\nfor dset, color in zip([df, gdr2], [\"k\", None]):\n cartx, cartv = dset.g.icrs.cartesian, dset.g.icrs.velocity\n dvx, dvy, dvz = cartv.d_xyz.value - np.array(mean_cartv_icrs)[:,None]\n cond = (np.abs(dvx)<5) & (np.abs(dvy)<5) & (np.abs(dvz)<5)\n# ax[0].scatter(cartx.x, cartx.y, s=1, c=color)\n ax[0].quiver(cartx.x[cond], cartx.y[cond], dvx[cond], dvy[cond], color=color)\n ax[1].quiver(cartx.x[cond], cartx.z[cond], dvx[cond], dvz[cond], color=color)\nfor cax in ax:\n cax.set_xlabel(\"$X_\\mathrm{ICRS}$\")\nax[0].set_ylabel(\"$Y_\\mathrm{ICRS}$\")\nax[1].set_ylabel(\"$Z_\\mathrm{ICRS}$\")\nfig.tight_layout()\n# fig.savefig('../plots/hyades-xyz-vector-icrs.pdf')", "/home/soh/.conda/envs/nitro/lib/python3.7/site-packages/ipykernel_launcher.py:10: RuntimeWarning: invalid value encountered in less\n # Remove the CWD from sys.path while we load stuff.\n" ], [ "mean_cartv_galactic = [-42.24, -19.00, -1.48]\nfig, ax = plt.subplots(1, 2, figsize=(8, 4))\nfor cax in ax:\n cax.set_aspect(\"equal\")\nfor dset, color in zip([df, gdr2], [\"k\", None]):\n cartx, cartv = dset.g.galactic.cartesian, dset.g.galactic.velocity\n dvx, dvy, dvz = cartv.d_xyz.value - np.array(mean_cartv_galactic)[:, None]\n cond = (np.abs(dvx) < 3) & (np.abs(dvy) < 3) & (np.abs(dvz) < 3)\n # ax[0].scatter(cartx.x, cartx.y, s=1, c=color)\n ax[0].quiver(cartx.x[cond], cartx.y[cond], dvx[cond], dvy[cond], color=color)\n ax[1].quiver(cartx.x[cond], cartx.z[cond], dvx[cond], dvz[cond], color=color)\nfor cax in ax:\n cax.set_xlabel(\"$X_\\mathrm{Galactic}$\")\nax[0].set_ylabel(\"$Y_\\mathrm{Galactic}$\")\nax[1].set_ylabel(\"$Z_\\mathrm{Galactic}$\")\nfig.tight_layout()\nfig.savefig('../plots/hyades-xyz-vector-galactic.pdf')", "/home/soh/.conda/envs/matrix/lib/python3.7/site-packages/ipykernel_launcher.py:8: RuntimeWarning: invalid value encountered in less\n \n" ], [ "mean_cartv_galactic = [-42.24, -19.00, -1.48]\nfig, ax = plt.subplots(\n 3, 3, figsize=(6.5, 6.5), sharex=\"col\", sharey=\"all\"\n)\n\ndset = df\ncartx, cartv = dset.g.galactic.cartesian, dset.g.galactic.velocity\ndvx, dvy, dvz = cartv.d_xyz.value - np.array(mean_cartv_galactic)[:, None]\n\nxyz = cartx.xyz.value\ndvxyz = [dvx, dvy, dvz]\n\nfor icol in range(3):\n for irow in range(3):\n ax[irow, icol].scatter(xyz[icol], dvxyz[irow], s=1)\n\nax[0, 0].set_ylim(-5, 5)\nfor cax in ax.ravel():\n cax.set_yticks([-4, -2, 0, 2, 4])\n cax.tick_params(width=1, length=6)\nfig.subplots_adjust(wspace=0.04, hspace=0.04, left=0.15, bottom=0.15, top=0.94)\nfor cax, label in zip(ax[:, 0], [\"x\", \"y\", \"z\"]):\n cax.set_ylabel(\n r\"$\\Delta v_{0}$\".format(label) + r\" [$\\mathrm{km}\\,\\mathrm{s}^{-1}$]\"\n )\nax[2, 0].set_xlabel(\"$X$ [pc]\")\nax[2, 1].set_xlabel(\"$Y$ [pc]\")\nax[2, 2].set_xlabel(\"$Z$ [pc]\")\nfig.suptitle(\n \"Residual velocities vs. position (Galactic) $N$={}/{}\".format(\n (~np.isnan(dvx)).sum(), len(df)\n ), size=15\n)\nfig.subplots_adjust(right=0.98, left=0.1, bottom=0.1)\n# fig.savefig(\"../plots/residual-velocity-vs-position-galactic.pdf\")", "_____no_output_____" ], [ "error_summary = pd.DataFrame(\n dict(\n pmra_error_frac=np.abs(df[\"pmra_error\"] / df[\"pmra\"]),\n pmdec_error_frac=np.abs(df[\"pmdec_error\"] / df[\"pmdec\"]),\n parallax_error_frac=np.abs(df[\"parallax_error\"] / df[\"parallax\"]),\n )\n).describe()\nerror_summary", "_____no_output_____" ], [ "\npmdelta = np.hypot( *(df_gfr[['pmra', 'pmdec']].values - df[['pmra', 'pmdec']].values).T)\nplt.scatter(df['phot_g_mean_mag'], pmdelta, s=4);\nplt.xlabel('$G$ [mag]')\nplt.ylabel(r'$\\Delta \\mu$');", "_____no_output_____" ], [ "deltav = np.hypot((df_gfr.g.vra-df.g.vra).values, (df_gfr.g.vdec-df.g.vdec).values)\nplt.scatter(df['phot_g_mean_mag'], deltav, s=4);\nplt.xlabel('$G$ [mag]')\nplt.ylabel(r'$\\Delta v_{\\mathrm{tan}}$');", "_____no_output_____" ], [ "mean_cartv_icrs = [-6.03, 45.56, 5.57]\nfig, ax = plt.subplots(\n 3, 3, figsize=(6.5, 6.5), sharex=\"col\", sharey=\"all\"\n)\n\ndset = df\ncartx, cartv = dset.g.icrs.cartesian, dset.g.icrs.velocity\ndvx, dvy, dvz = cartv.d_xyz.value - np.array(mean_cartv_icrs)[:, None]\n\nxyz = cartx.xyz.value\ndvxyz = [dvx, dvy, dvz]\n\nfor icol in range(3):\n for irow in range(3):\n ax[irow, icol].scatter(xyz[icol], dvxyz[irow], s=1)\n\nax[0, 0].set_ylim(-5, 5)\nfor cax in ax.ravel():\n cax.set_yticks([-4, -2, 0, 2, 4])\n cax.tick_params(width=1, length=6)\nfig.subplots_adjust(wspace=0.04, hspace=0.04, left=0.15, bottom=0.15, top=0.85)\nfor cax, label in zip(ax[:, 0], [\"x\", \"y\", \"z\"]):\n cax.set_ylabel(r\"$\\Delta v_{0}$\".format(label)+r\" [$\\mathrm{km}\\,\\mathrm{s}^{-1}$]\")\nax[2,0].set_xlabel(\"$X$ [pc]\")\nax[2,1].set_xlabel(\"$Y$ [pc]\")\nax[2,2].set_xlabel(\"$Z$ [pc]\")\nfig.suptitle(\n \"Residual velocities vs. position (ICRS) $N$={}/{}\".format(\n (~np.isnan(dvx)).sum(), len(df)\n ), size=15\n)\nfig.subplots_adjust(right=0.98, left=0.1, bottom=0.1, top=0.94)\n# fig.savefig(\"../plots/residual-velocity-vs-position-icrs.pdf\")", "_____no_output_____" ], [ "fig, ax = plt.subplots(1, 1)\nax.set_xlabel(\"$G$ [mag]\")\nn_bright_sources = (df[\"phot_g_mean_mag\"] < 12).sum()\nprint(n_bright_sources)\n\nax.hist(\n df[\"phot_g_mean_mag\"],\n bins=np.linspace(0, 20, 21),\n histtype=\"step\",\n color=\"k\",\n label=\"all (N={})\".format(len(df)),\n)\nax.hist(\n df.dropna(subset=[\"radial_velocity\"])[\"phot_g_mean_mag\"],\n bins=np.linspace(0, 20, 21),\n histtype=\"step\",\n label=\"has Gaia RV (N={})\".format(df[\"radial_velocity\"].notna().sum()),\n)\nax.hist(\n df.dropna(subset=[\"RV_HARPS_leao\"])[\"phot_g_mean_mag\"],\n bins=np.linspace(0, 20, 21),\n histtype=\"step\",\n label=\"has HARPS RV (N={})\".format(df[\"RV_HARPS_leao\"].notna().sum()),\n)\nax.legend(loc=\"upper left\", fontsize=10, frameon=False);\nax.set_ylabel('Count');", "316\n" ], [ "df = out_full.loc[out_full[\"Member_r19\"] != \"other\"]\nfig, ax = plt.subplots()\nax.scatter(\n df[\"bp_rp\"],\n df[\"phot_g_mean_mag\"] + df.g.distmod,\n s=1, c='k'\n)\n\n\nax.invert_yaxis()\nax.set_xlabel(\"BP-RP [mag]\")\nax.set_ylabel(\"$M_G$ [mag]\");", "_____no_output_____" ], [ "# get tgas data for velocity uncertainty comparison\nhy_tgas = pd.read_csv(\"../data/reino_tgas_full.csv\", index_col=0)\nprint(f\"number of sources in Reino selection: {len(hy_tgas)} rows\")\n\ntmp = pd.concat(\n [\n hy_tgas.g.vra_error.rename(\"v\").to_frame().assign(label=r\"TGAS $v_\\alpha$\"),\n hy_tgas.g.vdec_error.rename(\"v\").to_frame().assign(label=r\"TGAS $v_\\delta$\"),\n df.g.vra_error.rename(\"v\").to_frame().assign(label=r\"DR2 $v_\\alpha$\"),\n df.g.vdec_error.rename(\"v\").to_frame().assign(label=r\"DR2 $v_\\delta$\"),\n # df.g.vra_error.rename('v').to_frame().assign(label='HG vra'),\n # df.g.vdec_error.rename('v').to_frame().assign(label='HG vdec'),\n df[\"radial_velocity_error\"].rename(\"v\").to_frame().assign(label=\"DR2 RV\"),\n df[\"eRV_HARPS_leao\"].rename(\"v\").to_frame().assign(label=\"HARPS RV\"),\n ]\n)\ntmp[\"v\"] = np.log10(tmp[\"v\"])\ntmp.groupby('label').describe()\n\ng = sns.FacetGrid(tmp, row=\"label\", aspect=5, height=0.8)\ng.map(sns.kdeplot, \"v\", clip_on=False, shade=True, alpha=1, lw=1.5, bw=0.2)\ng.set_titles(\"\")\ng.fig.subplots_adjust(hspace=0.1, top=0.95, right=0.95, left=0.05, bottom=0.12)\n\ng.set(xticklabels=[\"0.001\", \"0.01\", \"0.1\", \"1\", \"10\"], xticks=[-3, -2, -1, 0, 1])\ng.set(yticks=[])\nfor cax, label in zip(g.fig.axes, g.row_names):\n cax.spines[\"left\"].set_visible(False)\n cax.tick_params(length=5, labelsize=12)\n cax.text(0.95, 0.95, label, ha='right', va='top', transform=cax.transAxes,\n bbox=dict(facecolor='w'), size=12)\n cax.axvline(np.log10(0.3), c='k', lw=1, linestyle=':', zorder=-1);\ng.fig.axes[-1].set_xlabel(r'$\\log \\sigma_v\\,/\\,[\\mathrm{km}\\,\\mathrm{s}^{-1}$]');\ng.fig.savefig(\"../plots/hyades-velocity-uncertainty-distribution.pdf\")", "number of sources in Reino selection: 173 rows\n" ], [ "cl_center_icrs_cart = []", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb36be8925735b88a1180a454394e54df182f06d
4,044
ipynb
Jupyter Notebook
Day-1/Day1_assignment.ipynb
ChinmayJog89/LetsUpgrade_DataScience_Essentials
67aca30c62a46e70b6e09a70997db49334112d85
[ "Apache-2.0" ]
null
null
null
Day-1/Day1_assignment.ipynb
ChinmayJog89/LetsUpgrade_DataScience_Essentials
67aca30c62a46e70b6e09a70997db49334112d85
[ "Apache-2.0" ]
null
null
null
Day-1/Day1_assignment.ipynb
ChinmayJog89/LetsUpgrade_DataScience_Essentials
67aca30c62a46e70b6e09a70997db49334112d85
[ "Apache-2.0" ]
null
null
null
22.342541
115
0.510386
[ [ [ "# Question 1\n\nGiven the following jumbled word, OBANWRI guess the correct English word.\n\nA. RANIBOW\n\nB. RAINBOW\n\nC. BOWRANI\n\nD. ROBWANI", "_____no_output_____" ] ], [ [ "import random\n\ndef shuffling(given):\n given = str(given)\n words = ['RAINBOW','RANIBOW','BOWRANI','ROBWANI']\n shuffled = ''.join(random.sample(given,len(given)))\n if shuffled=='RAINBOW':\n return shuffled\n print(\"The correct option is: RAINBOW\")\n else:\n #shuffling(given)\n print(shuffled,\"is incorrect\")\n print(\"The correct option is: RAINBOW\")\nshuffling('OBANWRI')", "WRBNOAI is incorrect\nThe correct option is: RAINBOW\n" ] ], [ [ "# Question 2\n\nWrite a program which prints “LETS UPGRADE”. (Please note that you have to\nprint in ALL CAPS as given)", "_____no_output_____" ] ], [ [ "string = \"Lets upgrade\"\nprint(string.upper())", "LETS UPGRADE\n" ] ], [ [ "# Question 3\n\nWrite a program that takes Cost Price and Selling Price as input and displays whether the transaction is a\nProfit or a Loss or neither.\n\nINPUT FORMAT:\n1. The first line contains the cost price.\n2. The second line contains the selling price.\n\nOUTPUT FORMAT:\n1. Print \"Profit\" if the transaction is a profit or \"Loss\" if it is a loss. \n2. If it is neither profit nor loss, print \"Neither\". (You must not have quotes in your output)", "_____no_output_____" ] ], [ [ "CP = float(input())\nSP = float(input())\n\nif CP<SP:\n print(\"Profit\")\nelif CP>SP:\n print(\"Loss\")\nelse:\n print(\"Neither\")", "60\n80\nProfit\n" ] ], [ [ "# Question 4\n\nWrite a program that takes an amount in Euros as input. You need to find its equivalent in\nRupees and display it. Assume 1 Euro equals Rs. 80.\nPlease note that you are expected to stick to the given input and output\nformat as in sample test cases. Please don't add any extra lines such as\n'Enter a number', etc.\nYour program should take only one number as input and display the output.", "_____no_output_____" ] ], [ [ "Euro = float(input())\nRupees = Euro * 80\nprint(Rupees)", "96\n7680.0\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
cb36d3c3001fec1c39bf57da1392359eb28669d0
5,274
ipynb
Jupyter Notebook
black_box_test.ipynb
Jeevi10/AICR
53f16cf05a4f0ac315de81c5c94c5717356e8ac9
[ "MIT" ]
null
null
null
black_box_test.ipynb
Jeevi10/AICR
53f16cf05a4f0ac315de81c5c94c5717356e8ac9
[ "MIT" ]
null
null
null
black_box_test.ipynb
Jeevi10/AICR
53f16cf05a4f0ac315de81c5c94c5717356e8ac9
[ "MIT" ]
null
null
null
33.592357
263
0.578119
[ [ [ "import torch\nimport torch.nn as nn\nimport torchvision.transforms as transforms\nfrom torch.utils.data import DataLoader\nimport torchvision\nfrom networks import *\nfrom advertorch.attacks import LinfPGDAttack, GradientSignAttack, LinfBasicIterativeAttack, CarliniWagnerL2Attack, MomentumIterativeAttack, SpatialTransformAttack, LinfSPSAAttack, JacobianSaliencyMapAttack\nfrom blackbox.CNN3 import CNN", "_____no_output_____" ], [ "num_classes=10\n#torch.cuda.set_device(0)\n#model = resnet(num_classes=num_classes,depth=110)\nmodel = sixNet()\n#mdoel = model.cuda()\nBBox = CNN()\nif True:\n model = nn.DataParallel(model,device_ids=[0,2,1,3]).cuda()\n BBox = nn.DataParallel(BBox, device_ids=[0,2,1,3]).cuda()\n \n#Loading Trained Model\nBb_file = './saved_model/model_Blackbox_model_mnist'\nsoftmax_filename= './saved_model/model_pretrain_model_mnist'\n#filename= 'Models_PCL/CIFAR10_PCL.pth.tar' \nrobust_model= './saved_model/model_posttrain_model_mnist_prox'\ncheckpoint = torch.load(robust_model)\nBb = torch.load(Bb_file)\nmodel.load_state_dict(checkpoint)\nBBox.load_state_dict(Bb)\nmodel.eval()\nBBox.eval()\ndevice = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n\n# Loading Test Data (Un-normalized)\ntransform_test = transforms.Compose([transforms.ToTensor()])\n \ntestset = torchvision.datasets.MNIST(root='./file', train=False,\n download=True, transform=transform_test)\ntest_loader = torch.utils.data.DataLoader(testset, batch_size=1000, pin_memory=True,\n \n shuffle=False, num_workers=4)\n\neps = 0.4", "_____no_output_____" ], [ "pgd02 = LinfPGDAttack(\n BBox,loss_fn=nn.CrossEntropyLoss(reduction=\"sum\"), eps=eps,\n nb_iter=10, eps_iter=eps/10, rand_init=True, clip_min=0, clip_max=1,\n targeted=False)\n\nfsgm = GradientSignAttack(BBox,loss_fn=nn.CrossEntropyLoss(reduction=\"sum\"), eps=eps, clip_min=0, clip_max=1)\n\nMIM1 = MomentumIterativeAttack(BBox,loss_fn=nn.CrossEntropyLoss(reduction=\"sum\"), eps=eps, nb_iter=10, eps_iter=eps/10, clip_min=0, clip_max=1, targeted=False)\nBIM = LinfBasicIterativeAttack(BBox, loss_fn=nn.CrossEntropyLoss(reduction=\"sum\"),eps=eps, nb_iter=10, eps_iter=eps/10, clip_min=0, clip_max=1, targeted=False)\nCW = CarliniWagnerL2Attack(BBox,loss_fn=nn.CrossEntropyLoss(reduction=\"sum\"), num_classes=10, learning_rate=0.01, max_iterations=1000, initial_const=10, clip_min=0, clip_max=1)\nattacks={'pgd':pgd02,\n \"mim\":MIM1,\n \"fsgm\":fsgm,\n \"BIM\":BIM,\n \"CW\":CW}", "/home/C00289092/anaconda3/envs/advattack/lib/python3.8/site-packages/advertorch/attacks/carlini_wagner.py:68: UserWarning: This Attack currently do not support a different loss function other than the default. Setting loss_fn manually is not effective.\n warnings.warn(\n" ], [ "model.eval()\nfor attack_name in attacks:\n correct = 0\n size = 0 \n n = 0\n for tedata, tetarget in test_loader:\n n += 1\n size += tedata.shape[0]\n tedata, tetarget = tedata.to(device), tetarget.to(device)\n\n tedata = attacks[attack_name].perturb(tedata, tetarget).to(device)\n output = model(tedata)\n pred = output.argmax(dim=1, keepdim=True) \n correct += pred.eq(tetarget.view_as(pred)).sum().item()\n\n print(\"{:s} acc: {:.2f}\".format(attack_name, 100. * correct / size))", "pgd acc: 43.13\nmim acc: 26.99\nfsgm acc: 47.27\nBIM acc: 46.20\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code" ] ]
cb36e31a5e179c9587615883b42b77ab30f3f5ae
905
ipynb
Jupyter Notebook
Week4/Lec36-Exercise-Logical-XOR.ipynb
gymk/ANLP
3052b12d307e4b2c8e192f3ac3b6e393932a0d77
[ "MIT" ]
7
2020-08-25T23:10:34.000Z
2022-01-11T16:43:37.000Z
Week4/Lec36-Exercise-Logical-XOR.ipynb
gymk/ANLP
3052b12d307e4b2c8e192f3ac3b6e393932a0d77
[ "MIT" ]
null
null
null
Week4/Lec36-Exercise-Logical-XOR.ipynb
gymk/ANLP
3052b12d307e4b2c8e192f3ac3b6e393932a0d77
[ "MIT" ]
11
2020-09-28T09:08:01.000Z
2022-03-29T18:06:22.000Z
18.854167
83
0.542541
[ [ [ "# Exercise 6 - Logical XOR", "_____no_output_____" ], [ "- Add one more neuron in the hidden layer and compute the output matrix\n - See how values are reshaped in hidden layer and how it impacts the output", "_____no_output_____" ] ] ]
[ "markdown" ]
[ [ "markdown", "markdown" ] ]
cb36e6de32087ea3a1526e8b26897d98ec70c303
193,509
ipynb
Jupyter Notebook
DrifterAnalysis/InterpLocation.ipynb
shaunwbell/ipythonnb
c2f35b1524dc14fb0f12a8846a794af1bd3b3d3a
[ "MIT" ]
3
2017-03-23T16:52:44.000Z
2022-03-08T16:53:29.000Z
DrifterAnalysis/InterpLocation.ipynb
shaunwbell/ipythonnb
c2f35b1524dc14fb0f12a8846a794af1bd3b3d3a
[ "MIT" ]
null
null
null
DrifterAnalysis/InterpLocation.ipynb
shaunwbell/ipythonnb
c2f35b1524dc14fb0f12a8846a794af1bd3b3d3a
[ "MIT" ]
2
2017-03-30T22:01:25.000Z
2019-10-17T17:30:29.000Z
341.888693
174,268
0.907167
[ [ [ "# Interpolate between profiles (spatial or temporal)", "_____no_output_____" ] ], [ [ "import pandas as pd\nimport numpy as np", "_____no_output_____" ], [ "df = pd.read_csv('/Users/bell/Downloads/ALAMO SN9119.txt','\\t',\n parse_dates=['yyyy-mm-ddThh:mm:ss.sss'])", "_____no_output_____" ], [ "ddata = df.groupby('Station').first()\nddata", "_____no_output_____" ], [ "ddata[np.isnan(ddata['Longitude [degrees_east]'])]", "_____no_output_____" ], [ "fdata = ddata.set_index(pd.DatetimeIndex(ddata['yyyy-mm-ddThh:mm:ss.sss']))\nfdata['Longitude [degrees_east]'] = fdata['Longitude [degrees_east]'].interpolate()\nfdata['Latitude [degrees_north]'] = fdata['Latitude [degrees_north]'].interpolate()", "_____no_output_____" ], [ "import pygmt", "_____no_output_____" ], [ "fig = pygmt.Figure()\n\nfig.coast(region=[-168, -160, 66.5, 74.5], projection='B-164/54/52/56/6i', land='lightgray', water='gray',\n frame=True,timestamp=True)\n\nfig.plot(x=fdata['Longitude [degrees_east]'], y=fdata['Latitude [degrees_north]'], style='c0.05c',color='red')\n\nfig.plot(x=ddata['Longitude [degrees_east]'], y=ddata['Latitude [degrees_north]'], style='c0.05c')\n\nfig.show()\n# fig.savefig(\"~/Desktop/ALAMO_SN9119-interp-loc.eps\")", "_____no_output_____" ], [ "fdata.to_csv('/Users/bell/Downloads/ALAMO SN9119-interp.txt')", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb36e7a2b51d9f4b4f6c1f2cae722363a19112d9
343,904
ipynb
Jupyter Notebook
rr_gauge_wax.ipynb
rockandsalt/rr_gage_binder_jetting
bc038acb6d10e0b3a10271103495678ef27f33af
[ "MIT" ]
null
null
null
rr_gauge_wax.ipynb
rockandsalt/rr_gage_binder_jetting
bc038acb6d10e0b3a10271103495678ef27f33af
[ "MIT" ]
null
null
null
rr_gauge_wax.ipynb
rockandsalt/rr_gage_binder_jetting
bc038acb6d10e0b3a10271103495678ef27f33af
[ "MIT" ]
null
null
null
216.020101
214,540
0.874974
[ [ [ "%matplotlib inline\n\nimport numpy as np \nimport pandas as pd\n\nimport pymc3 as pm\n\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport arviz as az\n\nimport warnings\nwarnings.filterwarnings('ignore')", "WARNING (theano.tensor.blas): Using NumPy C-API based implementation for BLAS functions.\n" ] ], [ [ "## Archimedes procedure for porous material density determination\n\nExperimental procedure is as follow, for further detail refer to standard ASTM D7263. \n\n1. Measure temperature of liquid to determine its ($\\rho_{\\text{water}}$). Find object theoretical full density ( $\\rho_{\\text{solid}}$ ) and wax density ($\\rho_{wax}$) from datasheet\n2. Measure mass of sample(s) ($M_t$)\n3. Apply two coats of wax to the sample(s)\n4. Measure mass of Coated sample(s) ($M_c$)\n5. Measure mass of Coated Sample(s) submerged ($M_{sub}$)\n\n\n\\begin{equation}\n \\text{density}, \\rho_b = \\frac{M_t}{\\frac{(M_c - M_{sub})}{\\rho_{water}} - \\frac{(M_c - M_t)}{\\rho_{wax}}}\n\\end{equation}\n\n\n\\begin{equation}\n \\text{true density}, \\pi_d = \\frac{ \\rho_b}{\\rho_{\\text{solid}}} * 100\n\\end{equation}\n\nParts using two powder type are made. Parts labeled bimodal should have a higher density then parts labelled unimodal.", "_____no_output_____" ], [ "## Display first few row of excel sheet", "_____no_output_____" ] ], [ [ "data = pd.read_excel('./data/green_sample_benchmark.xlsx', sheet_name = 'wax', index_col= [0,1])\noil_density = pd.read_excel('./data/green_sample_benchmark.xlsx', sheet_name = 'wax_param', index_col= 0)", "_____no_output_____" ], [ "data.head()", "_____no_output_____" ] ], [ [ "## Display first few row of excel sheet", "_____no_output_____" ] ], [ [ "oil_density.head()", "_____no_output_____" ] ], [ [ "## Carrier fluid density\n\nDensity of the carrier fluid ($\\rho_{\\text{water}}$) is necessary for density calculation. Since the density of the fluid fluctuate depending on temperature and temperature varies from experiment to experiment, it is recorded for each experiment. For experiment 2 it was recorded at the beginning and at the end.", "_____no_output_____" ] ], [ [ "def plot_hist(data, b):\n fig, axes = plt.subplots(1,3)\n axes[0].hist(data['dry weight'], bins = b, density = True)\n axes[0].set_title('dry weight')\n axes[0].set_xlabel('weight (g)')\n axes[0].set_ylabel('%')\n\n axes[1].hist(data['wet coated'], bins = b, density = True)\n axes[1].set_title('wet weight')\n axes[1].set_xlabel('weight (g)')\n axes[1].set_ylabel('%')\n\n axes[2].hist(data['coated weight'], bins = b, density = True)\n axes[2].set_title('coated weight')\n axes[2].set_xlabel('weight (g)')\n axes[2].set_ylabel('%')\n\n plt.tight_layout()\n plt.show()\n\nplot_hist(data, 20)", "_____no_output_____" ], [ "data = data.dropna()", "_____no_output_____" ] ], [ [ "## Compute density\nEach sample was measured 3 times at each stage. Since we are only interested in the uncertainty in the true density ($\\pi_d$), a cartesian product of each measured weights (dry, wet and coated) and liquid density is done for each sample in order to compute true density.", "_____no_output_____" ] ], [ [ "from itertools import product\nfrom sklearn import preprocessing\n\ndensity_df = pd.DataFrame(columns = ['batch', 'id', 'density', 'operator', 'powder'])\n\n# density of stainless steel 316L (g/cc) taken from data sheet\nden_ss316 = 8.0\n\nunique_id = 0\n\n# iterate over each sample to do cartesian product\nfor idx in data.index.unique():\n sample = data.loc[idx]\n\n op = int(np.unique(sample['operator']).squeeze())\n powder = str(np.unique(sample['powder type']).squeeze())\n batch = idx[0]\n \n d_weight = sample['dry weight'].to_numpy()\n wc_weight = sample['wet coated'].to_numpy()\n c_weight = sample['coated weight'].to_numpy()\n\n exp_num = sample['exp']\n \n # density of liquid and wax taken from datasheet\n water_den = np.unique(oil_density.loc[exp_num]['water density'].to_numpy())\n wax_den = np.unique(oil_density.loc[exp_num]['wax density'].to_numpy())\n \n # cartesian product\n prod = product(d_weight, wc_weight, c_weight, water_den, wax_den)\n\n for d_w, wc_w, c_w, water_d, wax_d in prod:\n #compute true density refer to equation on top\n den = (d_w/((c_w - wc_w)/water_d - (c_w - d_w)/wax_d))/den_ss316*100\n new_entry = {\n 'batch' : batch,\n 'id' : unique_id,\n 'density' : float(den.squeeze()),\n 'operator' : op,\n 'powder' : powder\n }\n\n density_df = density_df.append(new_entry, ignore_index = True)\n \n unique_id += 1", "_____no_output_____" ], [ "ax = sns.histplot(density_df['density'])", "_____no_output_____" ], [ "density_df.to_csv('./data/density_wax.csv', index=False)", "_____no_output_____" ], [ "basic_model = pm.Model()\n\nn_part = unique_id\n\npowder_mean = density_df.groupby('powder').mean()['density']\n\npart_id = density_df['id'].to_numpy().astype(np.int32)\nobs_den = density_df['density'].to_numpy().astype(np.int32)\n\n# Relabel data to integers\npowder_enc = preprocessing.LabelEncoder()\npowder_enc.fit(['Unimodal','Bimodal'])\npowder_data = powder_enc.transform(density_df['powder'])\n\n#compute starting point for mean inference\ntest_val_mu = [powder_mean[c] for c in powder_enc.classes_]\n\nA = 10000\nwith basic_model:\n \n # setup independent mu_i\n mu_p = pm.Uniform('muP', 20.0, 60.0, shape = 2, testval = test_val_mu)\n mu_real = pm.math.switch(powder_data, mu_p[1], mu_p[0])\n \n # setup prior for part variance and error variance\n sig_repeat = pm.HalfCauchy('sig_repeat', 25)\n sig_part = pm.HalfCauchy('sig_part', 25)\n \n # each part will vary independently (nesting relationship)\n P_t = pm.Normal('P_t', mu = 0, sigma = 1, shape = n_part)\n \n # setup mu\n mu = pm.Deterministic('mu', mu_real + P_t[part_id]*sig_part)\n \n # add error and data to model\n density = pm.Normal('density', mu = mu, sigma = sig_repeat, observed = obs_den)\n", "_____no_output_____" ], [ "with basic_model:\n trace = pm.sample(A, chains = 4, tune = 2000, target_accept=0.9)\n prior = pm.sample_prior_predictive()\n posterior_predictive = pm.sample_posterior_predictive(trace)", "Auto-assigning NUTS sampler...\nInitializing NUTS using jitter+adapt_diag...\nMultiprocess sampling (4 chains in 4 jobs)\nNUTS: [P_t, sig_part, sig_repeat, muP]\nSampling 4 chains, 0 divergences: 100%|█████████████████████████████████████| 48000/48000 [00:37<00:00, 1276.94draws/s]\nThe number of effective samples is smaller than 25% for some parameters.\n100%|██████████████████████████████████████████████████████████████████████████| 40000/40000 [00:35<00:00, 1141.96it/s]\n" ] ], [ [ "Convert trace to arviz data format for diagnostic", "_____no_output_____" ] ], [ [ "trace_data_az = az.from_pymc3(\n trace=trace,\n prior=prior,\n posterior_predictive=posterior_predictive,\n model=basic_model\n)", "_____no_output_____" ], [ "trace_data_az.to_netcdf('./data/wax_trace.nc')", "_____no_output_____" ] ], [ [ "## Trace plot\nThe sampler is exploring the probability space by sampling different values of the model input parameters ($\\mu_i, \\sigma_{\\text{part}}, \\sigma_{\\text{gage}}$). The left graph shows the probability density of the sampled values. The right graph shows the sampled with respect to sample number. The left graph is computed by summing up the right graph. When the left graph looks skewed towards zero or the right graph does not resemble a thick caterpillar, this indicates a biased sampling and a lack of convergence. ", "_____no_output_____" ] ], [ [ "pm.traceplot(trace, var_names=['muP','sig_part','sig_repeat'])\nplt.show()", "_____no_output_____" ] ], [ [ "## Predictive check plot\n\nPredictive posterior plot overlays data generated by the model's posterior over the real data. This is useful to check the fitness of the model. ", "_____no_output_____" ] ], [ [ "az.plot_ppc(trace_data_az, data_pairs={\"density\":\"density\"}, kind = 'scatter')\nplt.show()", "_____no_output_____" ] ], [ [ "## Forest plot\n\nForest plot on estimated real mean $\\mu_{i}$ to check if the values make sense", "_____no_output_____" ] ], [ [ "pm.plots.forestplot(trace, var_names=['muP'])\nplt.show()", "_____no_output_____" ] ], [ [ "## Compute split $\\hat{R}$ \n\nIn the Markov-Chain Monte Carlo (MCMC) sampler, the sampling is done simultaneously in multiple processes (chain). The rank normalized $\\hat{R}$ checks for convergence by comparing the variance between chain with the variance within chain. If convergence has been achieved then the variance should be the same ($\\hat{R}=1$) (Vehtari et al). [Vehtari et al.](https://arxiv.org/pdf/1903.08008v1.pdf \"vehtari\") recommends a $\\hat{R} < 1.01$.\n", "_____no_output_____" ] ], [ [ "az.rhat(trace_data_az)", "_____no_output_____" ] ], [ [ "## Calculate highest density interval (HDI)\n\nTaken from [arviz doc](https://arviz-devs.github.io/arviz/api/generated/arviz.hdi.html#arviz.hdi \"arviz\") : The HDI is the minimum width Bayesian credible interval (BCI).", "_____no_output_____" ] ], [ [ "hdi_data = az.hdi(trace_data_az)\nhdi_data.to_netcdf('./data/wax_inference.nc')\nhdi_data", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
cb36f0a8f423322762dd09df3494dfc897165ecc
3,054
ipynb
Jupyter Notebook
Chapter6_ObjectOriented/static_method.ipynb
tomex74/UdemyPythonPro
b4b83483fa2d3337a2860d53ff38e68eb38b3ac4
[ "MIT" ]
null
null
null
Chapter6_ObjectOriented/static_method.ipynb
tomex74/UdemyPythonPro
b4b83483fa2d3337a2860d53ff38e68eb38b3ac4
[ "MIT" ]
null
null
null
Chapter6_ObjectOriented/static_method.ipynb
tomex74/UdemyPythonPro
b4b83483fa2d3337a2860d53ff38e68eb38b3ac4
[ "MIT" ]
null
null
null
22.791045
92
0.499018
[ [ [ "from time import localtime\n\nclass Date:\n def __init__(self, year, month, day):\n self.year = year\n self.month = month\n self.day = day\n\n # member/instance method\n def print_date(self):\n print('{0} {1} {2}'.format(self.year, self.month, self.day))\n\n # can only modify the state of the class, not a single instance\n @classmethod\n def get_todays_date(cls):\n date = cls.__new__(cls)\n time = localtime()\n date.year = time.tm_year\n date.month = time.tm_mon\n date.day = time.tm_mday\n return date\n\n # can neither modify the object state of an instance nor the class state itself\n # it is rather a way to namespace your methods\n @staticmethod\n def is_todays_date(year, month, day):\n time = localtime()\n if year == time.tm_year and month == time.tm_mon and day == time.tm_mday:\n return True\n else:\n return False", "_____no_output_____" ], [ "date1 = Date(year=2008, month=10, day=12)\ndate1.print_date()", "2008 10 12\n" ], [ "date2 = Date.get_todays_date()\ndate2.print_date()", "2020 10 12\n" ], [ "Date.is_todays_date(date2.year, date2.month, date2.day)", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code" ] ]
cb370a58e3dcca592835c08627003260e00a4e0e
17,794
ipynb
Jupyter Notebook
courses/machine_learning/deepdive/03_tensorflow/labs/e_ai_platform.ipynb
KayvanShah1/training-data-analyst
3f778a57b8e6d2446af40ca6063b2fd9c1b4bc88
[ "Apache-2.0" ]
6,140
2016-05-23T16:09:35.000Z
2022-03-30T19:00:46.000Z
courses/machine_learning/deepdive/03_tensorflow/labs/e_ai_platform.ipynb
KayvanShah1/training-data-analyst
3f778a57b8e6d2446af40ca6063b2fd9c1b4bc88
[ "Apache-2.0" ]
1,384
2016-07-08T22:26:41.000Z
2022-03-24T16:39:43.000Z
courses/machine_learning/deepdive/03_tensorflow/labs/e_ai_platform.ipynb
KayvanShah1/training-data-analyst
3f778a57b8e6d2446af40ca6063b2fd9c1b4bc88
[ "Apache-2.0" ]
5,110
2016-05-27T13:45:18.000Z
2022-03-31T18:40:42.000Z
31.438163
553
0.590817
[ [ [ "# Scaling up ML using Cloud AI Platform\n\nIn this notebook, we take a previously developed TensorFlow model to predict taxifare rides and package it up so that it can be run in Cloud AI Platform. For now, we'll run this on a small dataset. The model that was developed is rather simplistic, and therefore, the accuracy of the model is not great either. However, this notebook illustrates *how* to package up a TensorFlow model to run it within Cloud AI Platform. \n\nLater in the course, we will look at ways to make a more effective machine learning model.", "_____no_output_____" ], [ "## Environment variables for project and bucket\n\nNote that:\n<ol>\n<li> Your project id is the *unique* string that identifies your project (not the project name). You can find this from the GCP Console dashboard's Home page. My dashboard reads: <b>Project ID:</b> cloud-training-demos </li>\n<li> Cloud training often involves saving and restoring model files. If you don't have a bucket already, I suggest that you create one from the GCP console (because it will dynamically check whether the bucket name you want is available). A common pattern is to prefix the bucket name by the project id, so that it is unique. Also, for cost reasons, you might want to use a single region bucket. </li>\n</ol>\n<b>Change the cell below</b> to reflect your Project ID and bucket name.\n", "_____no_output_____" ] ], [ [ "!sudo chown -R jupyter:jupyter /home/jupyter/training-data-analyst", "_____no_output_____" ], [ "import os\nPROJECT = 'cloud-training-demos' # REPLACE WITH YOUR PROJECT ID\nBUCKET = 'cloud-training-demos-ml' # REPLACE WITH YOUR BUCKET NAME\nREGION = 'us-central1' # REPLACE WITH YOUR BUCKET REGION e.g. us-central1", "_____no_output_____" ], [ "# For Python Code\n# Model Info\nMODEL_NAME = 'taxifare'\n# Model Version\nMODEL_VERSION = 'v1'\n# Training Directory name\nTRAINING_DIR = 'taxi_trained'", "_____no_output_____" ], [ "# for bash\nos.environ['PROJECT'] = PROJECT\nos.environ['BUCKET'] = BUCKET\nos.environ['REGION'] = REGION\nos.environ['MODEL_NAME'] = MODEL_NAME\nos.environ['MODEL_VERSION'] = MODEL_VERSION\nos.environ['TRAINING_DIR'] = TRAINING_DIR \nos.environ['TFVERSION'] = '2.5' # Tensorflow version", "_____no_output_____" ], [ "%%bash\ngcloud config set project $PROJECT\ngcloud config set compute/region $REGION", "_____no_output_____" ] ], [ [ "## Packaging up the code\n\nTake your code and put into a standard Python package structure. <a href=\"taxifare/trainer/model.py\">model.py</a> and <a href=\"taxifare/trainer/task.py\">task.py</a> containing the Tensorflow code from earlier (explore the <a href=\"taxifare/trainer/\">directory structure</a>).", "_____no_output_____" ] ], [ [ "%%bash\nfind ${MODEL_NAME}", "_____no_output_____" ], [ "%%bash\ncat ${MODEL_NAME}/trainer/model.py", "_____no_output_____" ] ], [ [ "## Find absolute paths to your data", "_____no_output_____" ], [ "Note the absolute paths below. /content is mapped in Datalab to where the home icon takes you", "_____no_output_____" ] ], [ [ "%%bash\necho \"Working Directory: ${PWD}\"\necho \"Head of taxi-train.csv\"\nhead -1 $PWD/taxi-train.csv\necho \"Head of taxi-valid.csv\"\nhead -1 $PWD/taxi-valid.csv", "_____no_output_____" ] ], [ [ "## Running the Python module from the command-line", "_____no_output_____" ], [ "#### Clean model training dir/output dir", "_____no_output_____" ] ], [ [ "%%bash\n# This is so that the trained model is started fresh each time. However, this needs to be done before \nrm -rf $PWD/${TRAINING_DIR}", "_____no_output_____" ], [ "%%bash\n# Setup python so it sees the task module which controls the model.py\nexport PYTHONPATH=${PYTHONPATH}:${PWD}/${MODEL_NAME}\n# Currently set for python 2. To run with python 3 \n# 1. Replace 'python' with 'python3' in the following command\n# 2. Edit trainer/task.py to reflect proper module import method \npython -m trainer.task \\\n --train_data_paths=\"${PWD}/taxi-train*\" \\\n --eval_data_paths=${PWD}/taxi-valid.csv \\\n --output_dir=${PWD}/${TRAINING_DIR} \\\n --train_steps=1000 --job-dir=./tmp", "_____no_output_____" ], [ "%%bash\nls $PWD/${TRAINING_DIR}/export/exporter/", "_____no_output_____" ], [ "%%writefile ./test.json\n{\"pickuplon\": -73.885262,\"pickuplat\": 40.773008,\"dropofflon\": -73.987232,\"dropofflat\": 40.732403,\"passengers\": 2}", "_____no_output_____" ], [ "%%bash\nsudo find \"/usr/lib/google-cloud-sdk/lib/googlecloudsdk/command_lib/ml_engine\" -name '*.pyc' -delete", "_____no_output_____" ], [ "%%bash\n# This model dir is the model exported after training and is used for prediction\n#\nmodel_dir=$(ls ${PWD}/${TRAINING_DIR}/export/exporter | tail -1)\n# predict using the trained model\ngcloud ai-platform local predict \\\n --model-dir=${PWD}/${TRAINING_DIR}/export/exporter/${model_dir} \\\n --json-instances=./test.json", "_____no_output_____" ] ], [ [ "#### Clean model training dir/output dir", "_____no_output_____" ] ], [ [ "%%bash\n# This is so that the trained model is started fresh each time. However, this needs to be done before \nrm -rf $PWD/${TRAINING_DIR}", "_____no_output_____" ] ], [ [ "## Running locally using gcloud", "_____no_output_____" ] ], [ [ "%%bash\n# Use Cloud Machine Learning Engine to train the model in local file system\ngcloud ai-platform local train \\\n --module-name=trainer.task \\\n --package-path=${PWD}/${MODEL_NAME}/trainer \\\n -- \\\n --train_data_paths=${PWD}/taxi-train.csv \\\n --eval_data_paths=${PWD}/taxi-valid.csv \\\n --train_steps=1000 \\\n --output_dir=${PWD}/${TRAINING_DIR} ", "_____no_output_____" ], [ "%%bash\nls $PWD/${TRAINING_DIR}", "_____no_output_____" ] ], [ [ "## Submit training job using gcloud\n\nFirst copy the training data to the cloud. Then, launch a training job.\n\nAfter you submit the job, go to the cloud console (http://console.cloud.google.com) and select <b>AI Platform | Jobs</b> to monitor progress. \n\n<b>Note:</b> Don't be concerned if the notebook stalls (with a blue progress bar) or returns with an error about being unable to refresh auth tokens. This is a long-lived Cloud job and work is going on in the cloud. Use the Cloud Console link (above) to monitor the job.", "_____no_output_____" ] ], [ [ "%%bash\n# Clear Cloud Storage bucket and copy the CSV files to Cloud Storage bucket\necho $BUCKET\ngsutil -m rm -rf gs://${BUCKET}/${MODEL_NAME}/smallinput/\ngsutil -m cp ${PWD}/*.csv gs://${BUCKET}/${MODEL_NAME}/smallinput/", "_____no_output_____" ], [ "%%bash\nOUTDIR=gs://${BUCKET}/${MODEL_NAME}/smallinput/${TRAINING_DIR}\nJOBNAME=${MODEL_NAME}_$(date -u +%y%m%d_%H%M%S)\necho $OUTDIR $REGION $JOBNAME\n# Clear the Cloud Storage Bucket used for the training job\ngsutil -m rm -rf $OUTDIR\ngcloud ai-platform jobs submit training $JOBNAME \\\n --region=$REGION \\\n --module-name=trainer.task \\\n --package-path=${PWD}/${MODEL_NAME}/trainer \\\n --job-dir=$OUTDIR \\\n --staging-bucket=gs://$BUCKET \\\n --scale-tier=BASIC \\\n --runtime-version 2.3 \\\n --python-version 3.5 \\\n -- \\\n --train_data_paths=\"gs://${BUCKET}/${MODEL_NAME}/smallinput/taxi-train*\" \\\n --eval_data_paths=\"gs://${BUCKET}/${MODEL_NAME}/smallinput/taxi-valid*\" \\\n --output_dir=$OUTDIR \\\n --train_steps=10000", "_____no_output_____" ] ], [ [ "Don't be concerned if the notebook appears stalled (with a blue progress bar) or returns with an error about being unable to refresh auth tokens. This is a long-lived Cloud job and work is going on in the cloud. \n\n<b>Use the Cloud Console link to monitor the job and do NOT proceed until the job is done.</b>", "_____no_output_____" ] ], [ [ "%%bash\ngsutil ls gs://${BUCKET}/${MODEL_NAME}/smallinput", "_____no_output_____" ] ], [ [ "## Train on larger dataset\n\nI have already followed the steps below and the files are already available. <b> You don't need to do the steps in this comment. </b> In the next chapter (on feature engineering), we will avoid all this manual processing by using Cloud Dataflow.\n\nGo to http://bigquery.cloud.google.com/ and type the query:\n<pre>\nSELECT\n (tolls_amount + fare_amount) AS fare_amount,\n pickup_longitude AS pickuplon,\n pickup_latitude AS pickuplat,\n dropoff_longitude AS dropofflon,\n dropoff_latitude AS dropofflat,\n passenger_count*1.0 AS passengers,\n 'nokeyindata' AS key\nFROM\n [nyc-tlc:yellow.trips]\nWHERE\n trip_distance > 0\n AND fare_amount >= 2.5\n AND pickup_longitude > -78\n AND pickup_longitude < -70\n AND dropoff_longitude > -78\n AND dropoff_longitude < -70\n AND pickup_latitude > 37\n AND pickup_latitude < 45\n AND dropoff_latitude > 37\n AND dropoff_latitude < 45\n AND passenger_count > 0\n AND ABS(HASH(pickup_datetime)) % 1000 == 1\n</pre>\n\nNote that this is now 1,000,000 rows (i.e. 100x the original dataset). Export this to CSV using the following steps (Note that <b>I have already done this and made the resulting GCS data publicly available</b>, so you don't need to do it.):\n<ol>\n<li> Click on the \"Save As Table\" button and note down the name of the dataset and table.\n<li> On the BigQuery console, find the newly exported table in the left-hand-side menu, and click on the name.\n<li> Click on \"Export Table\"\n<li> Supply your bucket name and give it the name train.csv (for example: gs://cloud-training-demos-ml/taxifare/ch3/train.csv). Note down what this is. Wait for the job to finish (look at the \"Job History\" on the left-hand-side menu)\n<li> In the query above, change the final \"== 1\" to \"== 2\" and export this to Cloud Storage as valid.csv (e.g. gs://cloud-training-demos-ml/taxifare/ch3/valid.csv)\n<li> Download the two files, remove the header line and upload it back to GCS.\n</ol>\n\n<p/>\n<p/>\n\n<h2> Run Cloud training on 1-million row dataset </h2>\n\nThis took 60 minutes and uses as input 1-million rows. The model is exactly the same as above. The only changes are to the input (to use the larger dataset) and to the Cloud MLE tier (to use STANDARD_1 instead of BASIC -- STANDARD_1 is approximately 10x more powerful than BASIC). At the end of the training the loss was 32, but the RMSE (calculated on the validation dataset) was stubbornly at 9.03. So, simply adding more data doesn't help.", "_____no_output_____" ] ], [ [ "%%bash\n\nOUTDIR=gs://${BUCKET}/${MODEL_NAME}/${TRAINING_DIR}\nJOBNAME=${MODEL_NAME}_$(date -u +%y%m%d_%H%M%S)\nCRS_BUCKET=cloud-training-demos # use the already exported data\necho $OUTDIR $REGION $JOBNAME\ngsutil -m rm -rf $OUTDIR\ngcloud ai-platform jobs submit training $JOBNAME \\\n --region=$REGION \\\n --module-name=trainer.task \\\n --package-path=${PWD}/${MODEL_NAME}/trainer \\\n --job-dir=$OUTDIR \\\n --staging-bucket=gs://$BUCKET \\\n --scale-tier=STANDARD_1 \\\n --runtime-version 2.3 \\\n --python-version 3.5 \\\n -- \\\n --train_data_paths=\"gs://${CRS_BUCKET}/${MODEL_NAME}/ch3/train.csv\" \\\n --eval_data_paths=\"gs://${CRS_BUCKET}/${MODEL_NAME}/ch3/valid.csv\" \\\n --output_dir=$OUTDIR \\\n --train_steps=100000", "_____no_output_____" ] ], [ [ "## Challenge Exercise\n\nModify your solution to the challenge exercise in d_trainandevaluate.ipynb appropriately. Make sure that you implement training and deployment. Increase the size of your dataset by 10x since you are running on the cloud. Does your accuracy improve?", "_____no_output_____" ], [ "Copyright 2021 Google Inc. Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ] ]
cb370bc9ff38861891ad64b4b227365a100d4913
363,567
ipynb
Jupyter Notebook
examples/Tutorial 14.ipynb
RoloVoid/Riskfolio-Lib
3dbe4c3cd83a12cc4b933f255848d8040a2ef9a0
[ "BSD-3-Clause" ]
1,110
2020-03-07T09:12:19.000Z
2022-03-30T16:56:44.000Z
examples/Tutorial 14.ipynb
QingyunSun/Riskfolio-Lib
eea05f913e8e9f25fd69460a302a75b850c56977
[ "BSD-3-Clause" ]
56
2020-12-23T22:37:06.000Z
2022-03-28T20:45:58.000Z
examples/Tutorial 14.ipynb
QingyunSun/Riskfolio-Lib
eea05f913e8e9f25fd69460a302a75b850c56977
[ "BSD-3-Clause" ]
203
2020-03-07T09:12:25.000Z
2022-03-29T07:12:15.000Z
390.932258
104,672
0.918752
[ [ [ "# Riskfolio-Lib Tutorial: \n<br>__[Financionerioncios](https://financioneroncios.wordpress.com)__\n<br>__[Orenji](https://www.orenj-i.net)__\n<br>__[Riskfolio-Lib](https://riskfolio-lib.readthedocs.io/en/latest/)__\n<br>__[Dany Cajas](https://www.linkedin.com/in/dany-cajas/)__\n<a href='https://ko-fi.com/B0B833SXD' target='_blank'><img height='36' style='border:0px;height:36px;' src='https://cdn.ko-fi.com/cdn/kofi1.png?v=2' border='0' alt='Buy Me a Coffee at ko-fi.com' /></a> \n\n## Tutorial 14: Mean [Ulcer Index](https://en.wikipedia.org/wiki/Ulcer_index) Portfolio Optimization\n\n## 1. Downloading the data:", "_____no_output_____" ] ], [ [ "import numpy as np\nimport pandas as pd\nimport yfinance as yf\nimport warnings\n\nwarnings.filterwarnings(\"ignore\")\npd.options.display.float_format = '{:.4%}'.format\n\n# Date range\nstart = '2016-01-01'\nend = '2019-12-30'\n\n# Tickers of assets\nassets = ['JCI', 'TGT', 'CMCSA', 'CPB', 'MO', 'APA', 'MMC', 'JPM',\n 'ZION', 'PSA', 'BAX', 'BMY', 'LUV', 'PCAR', 'TXT', 'TMO',\n 'DE', 'MSFT', 'HPQ', 'SEE', 'VZ', 'CNP', 'NI', 'T', 'BA']\nassets.sort()\n\n# Downloading data\ndata = yf.download(assets, start = start, end = end)\ndata = data.loc[:,('Adj Close', slice(None))]\ndata.columns = assets", "[*********************100%***********************] 25 of 25 completed\n" ], [ "# Calculating returns\n\nY = data[assets].pct_change().dropna()\n\ndisplay(Y.head())", "_____no_output_____" ] ], [ [ "## 2. Estimating Mean Ulcer Index Portfolios\n\n### 2.1 Calculating the portfolio that maximizes Ulcer Performance Index (UPI) ratio.", "_____no_output_____" ] ], [ [ "import riskfolio as rp\n\n# Building the portfolio object\nport = rp.Portfolio(returns=Y)\n\n# Calculating optimal portfolio\n\n# Select method and estimate input parameters:\n\nmethod_mu='hist' # Method to estimate expected returns based on historical data.\nmethod_cov='hist' # Method to estimate covariance matrix based on historical data.\n\nport.assets_stats(method_mu=method_mu, method_cov=method_cov, d=0.94)\n\n# Estimate optimal portfolio:\n\nmodel='Classic' # Could be Classic (historical), BL (Black Litterman) or FM (Factor Model)\nrm = 'UCI' # Risk measure used, this time will be variance\nobj = 'Sharpe' # Objective function, could be MinRisk, MaxRet, Utility or Sharpe\nhist = True # Use historical scenarios for risk measures that depend on scenarios\nrf = 0 # Risk free rate\nl = 0 # Risk aversion factor, only useful when obj is 'Utility'\n\nw = port.optimization(model=model, rm=rm, obj=obj, rf=rf, l=l, hist=hist)\n\ndisplay(w.T)", "_____no_output_____" ] ], [ [ "### 2.2 Plotting portfolio composition", "_____no_output_____" ] ], [ [ "# Plotting the composition of the portfolio\n\nax = rp.plot_pie(w=w, title='Sharpe Mean Ulcer Index', others=0.05, nrow=25, cmap = \"tab20\",\n height=6, width=10, ax=None)", "_____no_output_____" ] ], [ [ "### 2.3 Calculate efficient frontier", "_____no_output_____" ] ], [ [ "points = 40 # Number of points of the frontier\n\nfrontier = port.efficient_frontier(model=model, rm=rm, points=points, rf=rf, hist=hist)\n\ndisplay(frontier.T.head())", "_____no_output_____" ], [ "# Plotting the efficient frontier\n\nlabel = 'Max Risk Adjusted Return Portfolio' # Title of point\nmu = port.mu # Expected returns\ncov = port.cov # Covariance matrix\nreturns = port.returns # Returns of the assets\n\nax = rp.plot_frontier(w_frontier=frontier, mu=mu, cov=cov, returns=returns, rm=rm,\n rf=rf, alpha=0.05, cmap='viridis', w=w, label=label,\n marker='*', s=16, c='r', height=6, width=10, ax=None)", "_____no_output_____" ], [ "# Plotting efficient frontier composition\n\nax = rp.plot_frontier_area(w_frontier=frontier, cmap=\"tab20\", height=6, width=10, ax=None)", "_____no_output_____" ] ], [ [ "## 3. Estimating Risk Parity Portfolios for Ulcer Index\n\n### 3.1 Calculating the risk parity portfolio for Ulcer Index.", "_____no_output_____" ] ], [ [ "b = None # Risk contribution constraints vector\n\nw_rp = port.rp_optimization(model=model, rm=rm, rf=rf, b=b, hist=hist)\n\ndisplay(w.T)", "_____no_output_____" ] ], [ [ "### 3.2 Plotting portfolio composition", "_____no_output_____" ] ], [ [ "ax = rp.plot_pie(w=w_rp, title='Risk Parity Ulcer Index', others=0.05, nrow=25, cmap = \"tab20\",\n height=6, width=10, ax=None)", "_____no_output_____" ] ], [ [ "### 3.3 Plotting Risk Composition", "_____no_output_____" ] ], [ [ "ax = rp.plot_risk_con(w_rp, cov=port.cov, returns=port.returns, rm=rm, rf=0, alpha=0.01,\n color=\"tab:blue\", height=6, width=10, ax=None)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
cb37175a5e46c1abe4d0c6741ea4fa91278ff580
38,035
ipynb
Jupyter Notebook
SUN/SUN Correlation Analysis.ipynb
mvp18/gAL-MELEX
58fd26b41d40acac3ab98f37c34e4de2d757ac01
[ "MIT" ]
null
null
null
SUN/SUN Correlation Analysis.ipynb
mvp18/gAL-MELEX
58fd26b41d40acac3ab98f37c34e4de2d757ac01
[ "MIT" ]
null
null
null
SUN/SUN Correlation Analysis.ipynb
mvp18/gAL-MELEX
58fd26b41d40acac3ab98f37c34e4de2d757ac01
[ "MIT" ]
null
null
null
25.138797
144
0.425897
[ [ [ "import numpy as np\nfrom scipy import io", "_____no_output_____" ], [ "img_list=[]\nimage_names = io.loadmat('images.mat')['images']\nimage_attributes = io.loadmat('attributeLabels_continuous.mat')['labels_cv']\nfor i in range(image_names.shape[0]):\n img_list.append(image_names[i][0][0])\natt_dict = dict(zip(img_list, image_attributes))", "_____no_output_____" ], [ "res101 = io.loadmat('../../resnet-feats/SUN/res101.mat')\natt_splits = io.loadmat('att_splits.mat')\ntrain_loc = 'train_loc'\nval_loc = 'val_loc'\ntest_loc = 'test_unseen_loc'\ntrain_images = res101['image_files'][np.squeeze(att_splits[train_loc]-1)]\nval_images = res101['image_files'][np.squeeze(att_splits[val_loc]-1)]\ntest_images = res101['image_files'][np.squeeze(att_splits[test_loc]-1)]\nclass_labels = res101['labels']\nprior_matrix = att_splits['att']\ntest_classes = class_labels[np.squeeze(att_splits[test_loc]-1)]\nprior_matrix_ts = prior_matrix[:,(np.unique(test_classes)-1)]", "_____no_output_____" ], [ "print 'Train images = ' + str(len(train_images)) + '\\nVal images = ' + str(len(val_images)) + '\\nTest images = ' + str(len(test_images))", "Train images = 11600\nVal images = 1300\nTest images = 1440\n" ], [ "train_att=np.zeros([train_images.shape[0], image_attributes.shape[1]])\nfor i in range(train_images.shape[0]):\n img_name=train_images[i][0][0].split('images/')[1]\n train_att[i] = np.round(att_dict[img_name])\nval_att=np.zeros([val_images.shape[0], image_attributes.shape[1]])\nfor i in range(val_images.shape[0]):\n img_name=val_images[i][0][0].split('images/')[1]\n val_att[i] = np.round(att_dict[img_name])\ntest_att=np.zeros([test_images.shape[0], image_attributes.shape[1]])\nfor i in range(test_images.shape[0]):\n img_name=test_images[i][0][0].split('images/')[1]\n test_att[i] = np.round(att_dict[img_name])", "_____no_output_____" ], [ "corr_train = np.corrcoef(train_att.transpose())\nnans = np.isnan(corr_train)\ncorr_train[nans] = 0\ncorr_test = np.corrcoef(prior_matrix_ts)\nnans = np.isnan(corr_test)\ncorr_test[nans] = 0\ndef diff_corr(corr_train, corr_test):\n dis_corr = (corr_train - corr_test)\n dis_corr = np.sign(corr_train)*dis_corr\n return dis_corr.clip(0,np.inf)\ndis_corr = diff_corr(corr_train, corr_test)", "_____no_output_____" ], [ "loc = np.unravel_index(np.argsort(-dis_corr, axis=None)[:100], dis_corr.shape)\ndis_corr[loc]", "_____no_output_____" ], [ "dis_corr.mean()", "_____no_output_____" ] ], [ [ "<br>\n<br>\n<br>\n### New split for SUN dataset", "_____no_output_____" ] ], [ [ "res101['image_files']", "_____no_output_____" ], [ "all_img = att_dict.keys()\ncls = [img.split('/')[1] for img in all_img]\nlen(np.unique(cls))", "_____no_output_____" ], [ "res101['labels']", "_____no_output_____" ], [ "att_splits = io.loadmat('att_splits.mat') #ZSL_GBU data\nimage_names = np.array([x[0][0].split('images/')[1] for x in res101['image_files']])\nclass_labels = res101['labels']\nname2class = dict(zip(image_names, np.squeeze(class_labels)))", "_____no_output_____" ], [ "class2name = {}\nfor n in name2class:\n c = name2class[n]\n if c not in class2name:\n class2name[c] = [n]\n else:\n class2name[c].append(n)", "_____no_output_____" ], [ "def diff_corr(corr_train, corr_test):\n dis_corr = (corr_train - corr_test)\n dis_corr = np.sign(corr_train)*dis_corr\n return dis_corr.clip(0,np.inf)", "_____no_output_____" ], [ "def get_corr_shift(att_dict, class2name, train_class, test_class):\n test_att_mat = []\n train_att_mat = []\n for cls in train_class:\n for img in class2name[cls]:\n train_att_mat.append(att_dict[img])\n for cls in test_class:\n for img in class2name[cls]:\n test_att_mat.append(att_dict[img])\n# print '#Train instances = ' + str(len(train_att_mat)) + '\\n#Test instances = ' + str(len(test_att_mat))\n \n train_att_mat = np.array(train_att_mat).transpose()\n test_att_mat = np.array(test_att_mat).transpose()\n \n corr_train = np.corrcoef(train_att_mat)\n corr_train[np.isnan(corr_train)] = 0.\n corr_test = np.corrcoef(test_att_mat)\n corr_test[np.isnan(corr_test)] = 0.\n \n dis_corr = diff_corr(corr_train, corr_test)\n \n # correlation shift score: \n # 1) mean\n # corr_shift_score = np.mean(dis_corr)\n \n # 2) average of top n%\n dis_corr_array = dis_corr.flatten()\n top_percentage = 100\n num_elements = int((top_percentage/100.)*len(dis_corr_array))\n corr_shift_score = np.mean(dis_corr_array[np.argsort(dis_corr_array)[-num_elements:]])\n \n return corr_shift_score", "_____no_output_____" ], [ "train_classes = np.unique([name2class['/'.join(tr[0][0].split('/')[8:])] for tr in train_images])\nval_classes = np.unique([name2class['/'.join(val[0][0].split('/')[8:])] for val in val_images])\ntest_classes = np.unique([name2class['/'.join(te[0][0].split('/')[8:])] for te in test_images])", "_____no_output_____" ], [ "get_corr_shift(att_dict, class2name, train_classes, test_classes)", "_____no_output_____" ], [ "import time\n\nselected_classes = []\nremaining_classes = range(718)\nremaining_classes.remove(0)\n\nstart_time = time.time()\nfor i in range(72):\n best_cls = ''\n best_score = -1.\n for cls in remaining_classes:\n new_sel = selected_classes[:]\n new_sel.append(cls)\n new_rem = remaining_classes[:]\n new_rem.remove(cls)\n shift_score = get_corr_shift(att_dict, class2name, new_rem, new_sel)\n if shift_score > best_score:\n best_score = shift_score\n best_cls = cls\n selected_classes.append(best_cls)\n remaining_classes.remove(best_cls)\n print str(i+1) + ') ' + str(selected_classes[-1]) + ' -> ' + str(best_score) + ' :' + str(time.time() - start_time)+ 's' \n\ntest_set = selected_classes[:]\ntrain_val = remaining_classes[:]", "1) 20 -> 0.20462419926437994 :18.3212909698s\n2) 141 -> 0.2093474935863399 :36.9060668945s\n3) 583 -> 0.20047480836126033 :56.3604898453s\n4) 662 -> 0.19322437318073618 :73.2223098278s\n5) 663 -> 0.18833242156442223 :92.0463619232s\n6) 660 -> 0.1837130609114906 :110.226444006s\n7) 388 -> 0.1802782199504697 :129.345793009s\n8) 664 -> 0.17829927410810625 :149.48506999s\n9) 661 -> 0.17622179441352848 :169.875679016s\n10) 666 -> 0.17429209823472316 :188.391891003s\n11) 609 -> 0.17174128692156115 :207.412191868s\n12) 145 -> 0.16907402098510294 :225.539800882s\n13) 156 -> 0.16604607730706417 :243.031993866s\n14) 455 -> 0.1634614413357417 :261.67262888s\n15) 460 -> 0.16136732161467804 :279.927438021s\n16) 486 -> 0.15907406963691845 :299.908885002s\n17) 94 -> 0.15716594599212874 :318.51018095s\n18) 193 -> 0.15553520096515455 :335.962906837s\n19) 414 -> 0.15411966550812403 :353.242909908s\n20) 615 -> 0.15254917703328633 :372.411615849s\n21) 535 -> 0.1519518071301455 :389.708205938s\n22) 552 -> 0.15070018122676382 :408.858501911s\n23) 379 -> 0.1499865977227534 :431.25242281s\n24) 52 -> 0.1487588123409769 :449.518332958s\n25) 355 -> 0.14765011237903833 :468.637346983s\n26) 673 -> 0.1466172561072345 :487.223641872s\n27) 112 -> 0.14582633372000822 :505.92610693s\n28) 36 -> 0.1449008596791561 :523.749419928s\n29) 674 -> 0.1440836628613993 :541.407087803s\n30) 438 -> 0.14314702393280285 :559.822089911s\n31) 318 -> 0.14220538039755481 :578.394204855s\n32) 119 -> 0.14125285482280225 :598.054668903s\n33) 307 -> 0.14035755854353357 :618.177596807s\n34) 154 -> 0.1395376377135495 :635.326699018s\n35) 258 -> 0.13863817730848743 :654.228767872s\n36) 348 -> 0.13777158571537737 :672.673973799s\n37) 614 -> 0.13698369464610588 :689.955814838s\n38) 665 -> 0.13647729098544611 :709.815603018s\n39) 292 -> 0.1357944381184984 :729.724856853s\n40) 14 -> 0.13514243527107667 :747.78211689s\n41) 580 -> 0.13456586062261497 :766.140393019s\n42) 581 -> 0.13412771906961654 :784.596393824s\n43) 347 -> 0.13370059384756794 :803.64388299s\n44) 195 -> 0.13315630781179005 :822.753786802s\n45) 520 -> 0.1326543908558324 :842.413699865s\n46) 415 -> 0.13222954752971755 :863.686564922s\n47) 585 -> 0.13183141445327656 :883.897001982s\n48) 476 -> 0.13139389110252536 :903.232925892s\n49) 23 -> 0.13107763918912566 :921.900249958s\n50) 362 -> 0.1306890687803099 :941.713186979s\n51) 363 -> 0.13035473861912977 :961.278983831s\n52) 582 -> 0.12999275345731323 :978.680159807s\n53) 579 -> 0.1297255576739692 :996.631824017s\n54) 227 -> 0.12939030550596411 :1016.867028s\n55) 163 -> 0.12906350475859077 :1033.83391881s\n56) 559 -> 0.12877875349309406 :1053.198452s\n57) 505 -> 0.12846322374820285 :1074.71728301s\n58) 179 -> 0.12818681766083642 :1093.56810999s\n59) 532 -> 0.12806082647163994 :1111.47461891s\n60) 572 -> 0.1278442591382658 :1130.70899892s\n61) 548 -> 0.12760881122022352 :1149.50367093s\n62) 246 -> 0.1274188045588027 :1169.81371999s\n63) 152 -> 0.1272539264205753 :1186.43982387s\n64) 702 -> 0.12714700339798019 :1203.05455589s\n65) 454 -> 0.12699453426864457 :1219.582793s\n66) 599 -> 0.12681107994653082 :1238.78178501s\n67) 401 -> 0.12668202550059554 :1259.60398388s\n68) 380 -> 0.126757299074712 :1278.51816082s\n69) 330 -> 0.12671514882854937 :1297.24143481s\n70) 108 -> 0.12663638107398384 :1315.92929196s\n71) 249 -> 0.12660661050339625 :1333.99572992s\n72) 443 -> 0.12657631504353678 :1352.69470286s\n" ], [ "all_classes = range(718)\nall_classes.remove(0)\ntrain_val = all_classes[:]\nfor cls in all_classes:\n if cls in test_set:\n train_val.remove(cls) \nlen(train_val)", "_____no_output_____" ], [ "selected_classes = []\nremaining_classes = train_val[:]\nfor i in range(65):\n best_cls = ''\n best_score = -1.\n for cls in remaining_classes:\n new_sel = selected_classes[:]\n new_sel.append(cls)\n new_rem = remaining_classes[:]\n new_rem.remove(cls)\n shift_score = get_corr_shift(att_dict, class2name, new_rem, test_set)\n if shift_score > best_score:\n best_score = shift_score\n best_cls = cls\n selected_classes.append(best_cls)\n remaining_classes.remove(best_cls)\n print str(i+1) + ') ' + str(selected_classes[-1]) + ' -> ' + str(best_score)\ntrain_set = remaining_classes[:]\nval_set = selected_classes[:]", "1) 354 -> 0.1269732853018949\n2) 345 -> 0.12722343007394799\n3) 538 -> 0.12740130037072006\n4) 283 -> 0.1276036353217557\n5) 271 -> 0.12783243105137734\n6) 477 -> 0.12802720410429658\n7) 219 -> 0.12822483603226514\n8) 655 -> 0.12841089436826603\n9) 82 -> 0.12859504961985427\n10) 508 -> 0.1287134120009919\n11) 270 -> 0.12883888644085156\n12) 334 -> 0.12899817623008233\n13) 333 -> 0.12913922529884012\n14) 416 -> 0.12926684676178427\n15) 494 -> 0.1294062882700574\n16) 53 -> 0.1295847677916621\n17) 261 -> 0.1297270759993281\n18) 516 -> 0.12987119027626\n19) 233 -> 0.13003370154504618\n20) 604 -> 0.13028866986827206\n21) 697 -> 0.13044779282426364\n22) 253 -> 0.13059373657837528\n23) 254 -> 0.13073933173013058\n24) 690 -> 0.13087716503395916\n25) 308 -> 0.13099731578571006\n26) 296 -> 0.13111697699816074\n27) 502 -> 0.13124521639087916\n28) 596 -> 0.1313744891650859\n29) 554 -> 0.13150024274430447\n30) 120 -> 0.1316546848234275\n31) 162 -> 0.1317680650012808\n32) 349 -> 0.13192344779564108\n33) 524 -> 0.13202762130807577\n34) 323 -> 0.1321317944865634\n35) 370 -> 0.13224370651078637\n36) 87 -> 0.132346231760096\n37) 42 -> 0.1324562846175756\n38) 628 -> 0.1325775755582808\n39) 540 -> 0.13268002833427747\n40) 17 -> 0.13277936059308756\n41) 468 -> 0.13287812076830469\n42) 111 -> 0.1329926917295272\n43) 336 -> 0.13311943025751624\n44) 76 -> 0.1332389603028809\n45) 286 -> 0.13348090258133127\n46) 570 -> 0.1335959081386135\n47) 289 -> 0.13371001453194428\n48) 64 -> 0.1337977555403638\n49) 393 -> 0.13388244194902166\n50) 705 -> 0.13398145914380363\n51) 294 -> 0.1340928520093043\n52) 703 -> 0.13420289692165277\n53) 105 -> 0.1342983750788611\n54) 235 -> 0.13441103334505378\n55) 688 -> 0.13453926153318066\n56) 123 -> 0.13465557231357517\n57) 51 -> 0.13478584497626522\n58) 106 -> 0.1348889300047058\n59) 29 -> 0.13501171370840612\n60) 543 -> 0.13511169059776268\n61) 3 -> 0.13522414003097077\n62) 462 -> 0.13532425152327388\n63) 155 -> 0.135416859846578\n64) 115 -> 0.13550991096956375\n65) 519 -> 0.13562266256141875\n" ], [ "print get_corr_shift(att_dict, class2name, train_classes, test_classes)\nprint get_corr_shift(att_dict, class2name, train_set, test_set)", "0.03247590150450682\n0.13562266256141875\n" ], [ "print get_corr_shift(att_dict, class2name, train_classes, test_classes)\nprint get_corr_shift(att_dict, class2name, train_set, test_set)", "0.016239680487793468\n0.07378322213034062\n" ], [ "test_count = 0\nfor cls in test_set:\n test_count += len(class2name[cls])", "_____no_output_____" ], [ "test_count", "_____no_output_____" ], [ "split_dict = {}\nsplit_dict['train_cls'] = train_set\nsplit_dict['val_cls'] = val_set\nsplit_dict['test_cls'] = test_set\n\nimport pickle\nwith open('sun_cs_split.npy', 'wb') as fp:\n np.save(fp, split_dict)", "_____no_output_____" ], [ "sd = np.load('sun_cs_split.npy', allow_pickle=True).item()\nsd", "_____no_output_____" ], [ "min(train_set+val_set+test_set)", "_____no_output_____" ], [ "train_loc = []\nval_loc = []\ntest_loc = []\n\nfor i, label in enumerate(class_labels):\n if label in sd['train_cls']:\n train_loc.append(i)\n elif label in sd['val_cls']:\n val_loc.append(i)\n elif label in sd['test_cls']:\n test_loc.append(i)", "_____no_output_____" ] ] ]
[ "code", "markdown", "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb372f0349e207381abc2c901acad7c28d01b636
6,358
ipynb
Jupyter Notebook
materials/third_day/Q65R_Phase_Kickback.ipynb
QRussia/bronze-qiskit-russian-edition
4f4ba1cff7b99161371d16d91b525107ed29174e
[ "Apache-2.0", "CC-BY-4.0" ]
null
null
null
materials/third_day/Q65R_Phase_Kickback.ipynb
QRussia/bronze-qiskit-russian-edition
4f4ba1cff7b99161371d16d91b525107ed29174e
[ "Apache-2.0", "CC-BY-4.0" ]
null
null
null
materials/third_day/Q65R_Phase_Kickback.ipynb
QRussia/bronze-qiskit-russian-edition
4f4ba1cff7b99161371d16d91b525107ed29174e
[ "Apache-2.0", "CC-BY-4.0" ]
null
null
null
38.533333
414
0.577697
[ [ [ "# Phase Kickback (фазовый откат?)\n\nВ деталях об этом явлении можно почитать в [qiskit texbook](https://qiskit.org/textbook/ch-algorithms/grover.html). Для нас оно будет нужно, чтобы сконветировать \"классическую функцию\" (оракул) $f$ в функцию определённого вида.\n\n## Фаза\n\nДля начала, напомним, что такое **фаза**. Существуют состояния-суперпозиции, которые при измерении дают нам одинаковые вероятности наблюдения, например\n\n$|\\psi_+\\rangle=|+\\rangle=\\frac{1}{\\sqrt{2}}(|0\\rangle+|1\\rangle)$\n\nи\n\n$|\\psi_-\\rangle=|-\\rangle=\\frac{1}{\\sqrt{2}}(|0\\rangle-|1\\rangle)$\n\nЕсли вы попытаетесь из измерить, то различить их не удастся. Тем не менее мы знаем, что если применить к каждому из них вентиль Адамара, то мы получим уже разные результаты! \n\nА вообще их бесконечно много. В общем виде все такие равноверятные состояния можно записать как\n\n$|\\psi\\rangle=\\frac{1}{\\sqrt{2}}(|0\\rangle+e^{i\\phi}|1\\rangle)$,\n\nгде $\\phi$ -- и есть фаза. Приведённые выше примеры -- частные случаи для $\\phi=0, \\pi$. Но именно они нам и будут интересны.\n\nNB! Непосредственно фазу наблюдать мы не можем, но она может влиять не результаты последующих вычислений.\n\n## Собственные векторы (состояния) и значения (фазы)\n\nСобственным (для матрицы) называется так вектор, который она не в состоянии \"испортить\". Самое страшное что матрица-оператор может сделать с вектором -- умножить его на **какую-нибудь константу** (эту константу и называют собственным значением). Сколько вы не применяйте $X$ к состоянию $|+\\rangle$, оно останется неизменным.\n\nУ матрицы 2x2 не больше чем 2 собственных вектора. Для оператора $NOT$ ими являются векторы-состояния $|+\\rangle$ и $|-\\rangle$ с собственными значениями 1 и -1 соответственно.\n\n$X|-\\rangle = X\\frac{1}{\\sqrt{2}}(|0\\rangle-|1\\rangle)=\\frac{1}{\\sqrt{2}}(|1\\rangle-|0\\rangle)=-\\frac{1}{\\sqrt{2}}(|0\\rangle-|1\\rangle)=-|-\\rangle$.\n\nЭтим интересными свойствами и пользуется phase kickback:\n\nЕсли мы применим контролируемый оператор (в нашем случае $CNOT$) к собственному состоянию (например, $|-\\rangle$), то собственное значение для этого состояния запрыгнет в **фазу управляющего кубита (или даже регистра)**. Такого мы не ожидали! Мы думали, что управляющий кубит (или регистр) неизменны, но это не так. Это математическая особенность нам будет очень на руку при реализации алгоритма Гровера.\n\n\nВот так например можно найти собственные вектора и значения для поизвольных операторов:", "_____no_output_____" ] ], [ [ "## TODO compute eigenstates on X\nimport numpy as np\nX = np.array([[0., 1], [1, 0]])\nevals, evecs = np.linalg.eig(X)\nfor i in range(X.shape[0]):\n print(\"Собственное значение:\", evals[i], end='\\t')\n print(\"Собственный вектор:\", evecs[:, i])", "Собственное значение: 1.0\tСобственный вектор: [0.70710678 0.70710678]\nСобственное значение: -1.0\tСобственный вектор: [-0.70710678 0.70710678]\n" ] ], [ [ "Как проверить этот эффект на $CNOT$? Очень просто. Если подготовить:\n- управляющий кубит в $|+\\rangle$,\n- управляемый кубит в $|-\\rangle$,\n\nто после применения CNOT можно ожидать, что управляющий $\\frac{1}{\\sqrt{2}}(|0\\rangle+|1\\rangle)$ превратится в $\\frac{1}{\\sqrt{2}}(|0\\rangle+(-1)|1\\rangle)=|-\\rangle$. А значит, применив $H$, мы должны будем получить $|1\\rangle$!", "_____no_output_____" ] ], [ [ "from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit, Aer, execute\nfrom qiskit.visualization import plot_histogram\nimport matplotlib.pyplot as plt\n\nqr = QuantumRegister(2, \"q\")\ncr = ClassicalRegister(1, \"c\")\nqc = QuantumCircuit(qr, cr)\n\n# Чтобы увидеть эффект, подготовим состояние |+> в управляющем кубите. \n# Если бы не работал, мы бы увидели - |0> -> |+> -> |0>,\n# Но мы увидим |0> -> |+> -> (!)|-> -> |1>\n\n#################################\n## TODO\n#################################\n\nqc.barrier()\n\n# подгтовим состояние |-> - собственное для X in qr[1]\n#################################\n## TODO\n#################################\n\nqc.barrier()\n\n# Controlled NOT\nqc.cx(0, 1)\nqc.barrier()\n\n# готовим однозначно измеримое состояние\n#################################\n## TODO\n#################################\n\nqc.measure(qr[0], cr[0])\nprint(qc.draw())\n\njob = execute(qc, Aer.get_backend('qasm_simulator'), shots=1)\ncounts = job.result().get_counts(qc)\nplot_histogram(counts)", "_____no_output_____" ] ], [ [ "[решение](./Q65R_Phase_Kickback_Solution.ipynb)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
cb37312ef55fc3a2a47d65bf4adc79e2cc800f74
16,767
ipynb
Jupyter Notebook
driver/makeDBs_ml-latest.ipynb
TheManohar/movie_recommender
eb7b2ab8824331142bdec5e5d2cd4cf4c432b285
[ "MIT" ]
2
2018-10-12T07:57:52.000Z
2018-10-12T08:02:21.000Z
driver/makeDBs_ml-latest.ipynb
TheManohar/movie_recommender
eb7b2ab8824331142bdec5e5d2cd4cf4c432b285
[ "MIT" ]
null
null
null
driver/makeDBs_ml-latest.ipynb
TheManohar/movie_recommender
eb7b2ab8824331142bdec5e5d2cd4cf4c432b285
[ "MIT" ]
null
null
null
26.404724
218
0.345619
[ [ [ "# def sqliteDBfromIMDB():\nimport pandas as pd\n#import postgres as pg\nimport sqlite3\nimport sys\nsys.path.insert(0, '../drivers')", "_____no_output_____" ], [ "df1 = pd.read_csv('../rawData/ml-latest/movies.csv', index_col=0) \ndf1.head()", "_____no_output_____" ], [ "df2 = pd.read_csv('../rawData/ml-latest/ratings.csv', index_col=1) \ndf2.head()", "/home/manohar/anaconda3/lib/python3.7/site-packages/numpy/lib/arraysetops.py:522: FutureWarning: elementwise comparison failed; returning scalar instead, but in the future will perform elementwise comparison\n mask |= (ar1 == a)\n" ], [ "df3 = pd.read_csv('../rawData/ml-latest/tags.csv', index_col=1) \ndf3.head()", "_____no_output_____" ], [ "df4 = pd.read_csv('../rawData/ml-latest/genome-scores.csv', index_col=0) \ndf4.head()", "_____no_output_____" ], [ "df5 = pd.read_csv('../rawData/ml-latest/genome-tags.csv', index_col=0) \ndf5.head()", "_____no_output_____" ], [ "df6 = pd.read_csv('../rawData/ml-latest/links.csv', index_col=0) \ndf6.head()", "_____no_output_____" ], [ "db = sqlite3.connect('../database/ML_latest.db')\n#db.executescript(DB_SETUP)\ndf1.to_sql('ml_movies', db)", "_____no_output_____" ], [ "df2.to_sql('ml_ratings', db)", "_____no_output_____" ], [ "df3.to_sql('ml_tags', db)", "_____no_output_____" ], [ "df4.to_sql('ml_genome-scores', db)", "_____no_output_____" ], [ "df5.to_sql('ml_genome-tags', db)", "_____no_output_____" ], [ "df6.to_sql('ml_links', db)", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb373712386d82800bc8528d71a43df4ca61c534
62,555
ipynb
Jupyter Notebook
pycaret.ipynb
hnguyen1174/ds-tutorials
88d21260ff77b8189653e9e9bde22f300ffae1bf
[ "MIT" ]
null
null
null
pycaret.ipynb
hnguyen1174/ds-tutorials
88d21260ff77b8189653e9e9bde22f300ffae1bf
[ "MIT" ]
null
null
null
pycaret.ipynb
hnguyen1174/ds-tutorials
88d21260ff77b8189653e9e9bde22f300ffae1bf
[ "MIT" ]
null
null
null
84.533784
1,631
0.630533
[ [ [ "## 1. Intro", "_____no_output_____" ], [ "* Source: https://towardsdatascience.com/introduction-to-binary-classification-with-pycaret-a37b3e89ad8d\n* Dataset: Lichman, M. (2013). UCI Machine Learning Repository. Irvine, CA: University of California, School of Information and Computer Science.\n* **Value proposition:** PyCaret is an open-source, low-code machine learning library in Python that automates machine learning workflows.\n* Website: https://www.pycaret.org/\n* Github: https://github.com/pycaret/pycaret", "_____no_output_____" ] ], [ [ "!pip install pycaret", "Collecting pycaret\n Downloading pycaret-2.3.5-py3-none-any.whl (288 kB)\n\u001b[K |████████████████████████████████| 288 kB 2.7 MB/s eta 0:00:01\n\u001b[?25hCollecting pandas-profiling>=2.8.0\n Downloading pandas_profiling-3.1.0-py2.py3-none-any.whl (261 kB)\n\u001b[K |████████████████████████████████| 261 kB 20.7 MB/s eta 0:00:01\n\u001b[?25hCollecting spacy<2.4.0\n Downloading spacy-2.3.7-cp38-cp38-macosx_10_9_x86_64.whl (10.3 MB)\n\u001b[K |████████████████████████████████| 10.3 MB 11.9 MB/s eta 0:00:01\n\u001b[?25hCollecting numpy==1.19.5\n Downloading numpy-1.19.5-cp38-cp38-macosx_10_9_x86_64.whl (15.6 MB)\n\u001b[K |████████████████████████████████| 15.6 MB 1.6 MB/s eta 0:00:01\n\u001b[?25hRequirement already satisfied: seaborn in /opt/anaconda3/lib/python3.8/site-packages (from pycaret) (0.11.1)\nCollecting scikit-learn==0.23.2\n Downloading scikit_learn-0.23.2-cp38-cp38-macosx_10_9_x86_64.whl (7.2 MB)\n\u001b[K |████████████████████████████████| 7.2 MB 12.8 MB/s eta 0:00:01\n\u001b[?25hCollecting mlxtend>=0.17.0\n Downloading mlxtend-0.19.0-py2.py3-none-any.whl (1.3 MB)\n\u001b[K |████████████████████████████████| 1.3 MB 26.7 MB/s eta 0:00:01\n\u001b[?25hCollecting pyod\n Downloading pyod-0.9.5.tar.gz (113 kB)\n\u001b[K |████████████████████████████████| 113 kB 29.4 MB/s eta 0:00:01\n\u001b[?25hCollecting gensim<4.0.0\n Downloading gensim-3.8.3-cp38-cp38-macosx_10_9_x86_64.whl (24.2 MB)\n\u001b[K |████████████████████████████████| 24.2 MB 22.5 MB/s eta 0:00:01\n\u001b[?25hRequirement already satisfied: nltk in /opt/anaconda3/lib/python3.8/site-packages (from pycaret) (3.6.1)\nCollecting scikit-plot\n Downloading scikit_plot-0.3.7-py3-none-any.whl (33 kB)\nCollecting umap-learn\n Downloading umap-learn-0.5.2.tar.gz (86 kB)\n\u001b[K |████████████████████████████████| 86 kB 17.6 MB/s eta 0:00:01\n\u001b[?25hCollecting yellowbrick>=1.0.1\n Downloading yellowbrick-1.3.post1-py3-none-any.whl (271 kB)\n\u001b[K |████████████████████████████████| 271 kB 19.9 MB/s eta 0:00:01\n\u001b[?25hCollecting lightgbm>=2.3.1\n Downloading lightgbm-3.3.1-py3-none-macosx_10_14_x86_64.macosx_10_15_x86_64.macosx_11_0_x86_64.whl (1.2 MB)\n\u001b[K |████████████████████████████████| 1.2 MB 24.7 MB/s eta 0:00:01\n\u001b[?25hCollecting Boruta\n Downloading Boruta-0.3-py3-none-any.whl (56 kB)\n\u001b[K |████████████████████████████████| 56 kB 19.6 MB/s eta 0:00:01\n\u001b[?25hRequirement already satisfied: joblib in /opt/anaconda3/lib/python3.8/site-packages (from pycaret) (1.0.1)\nCollecting cufflinks>=0.17.0\n Downloading cufflinks-0.17.3.tar.gz (81 kB)\n\u001b[K |████████████████████████████████| 81 kB 22.7 MB/s eta 0:00:01\n\u001b[?25hRequirement already satisfied: wordcloud in /opt/anaconda3/lib/python3.8/site-packages (from pycaret) (1.8.1)\nRequirement already satisfied: plotly>=4.4.1 in /opt/anaconda3/lib/python3.8/site-packages (from pycaret) (5.4.0)\nCollecting mlflow\n Downloading mlflow-1.21.0-py3-none-any.whl (16.9 MB)\n\u001b[K |████████████████████████████████| 16.9 MB 13.2 MB/s eta 0:00:01\n\u001b[?25hCollecting pyLDAvis\n Downloading pyLDAvis-3.3.1.tar.gz (1.7 MB)\n\u001b[K |████████████████████████████████| 1.7 MB 25.5 MB/s eta 0:00:01\n\u001b[?25h Installing build dependencies ... \u001b[?25ldone\n\u001b[?25h Getting requirements to build wheel ... \u001b[?25ldone\n\u001b[?25h Installing backend dependencies ... \u001b[?25ldone\n\u001b[?25h Preparing wheel metadata ... \u001b[?25ldone\n\u001b[?25hRequirement already satisfied: ipywidgets in /opt/anaconda3/lib/python3.8/site-packages (from pycaret) (7.6.3)\nCollecting imbalanced-learn==0.7.0\n Downloading imbalanced_learn-0.7.0-py3-none-any.whl (167 kB)\n\u001b[K |████████████████████████████████| 167 kB 24.0 MB/s eta 0:00:01\n\u001b[?25hRequirement already satisfied: pandas in /opt/anaconda3/lib/python3.8/site-packages (from pycaret) (1.2.4)\nCollecting textblob\n Downloading textblob-0.17.1-py2.py3-none-any.whl (636 kB)\n\u001b[K |████████████████████████████████| 636 kB 28.6 MB/s eta 0:00:01\n\u001b[?25hRequirement already satisfied: IPython in /opt/anaconda3/lib/python3.8/site-packages (from pycaret) (7.22.0)\nCollecting scipy<=1.5.4\n Downloading scipy-1.5.4-cp38-cp38-macosx_10_9_x86_64.whl (29.0 MB)\n\u001b[K |████████████████████████████████| 29.0 MB 135 kB/s eta 0:00:01\n\u001b[?25hRequirement already satisfied: matplotlib in /opt/anaconda3/lib/python3.8/site-packages (from pycaret) (3.3.4)\nCollecting kmodes>=0.10.1\n Downloading kmodes-0.11.1-py2.py3-none-any.whl (19 kB)\nRequirement already satisfied: threadpoolctl>=2.0.0 in /opt/anaconda3/lib/python3.8/site-packages (from scikit-learn==0.23.2->pycaret) (2.1.0)\nRequirement already satisfied: six>=1.9.0 in /opt/anaconda3/lib/python3.8/site-packages (from cufflinks>=0.17.0->pycaret) (1.15.0)\nCollecting colorlover>=0.2.1\n Downloading colorlover-0.3.0-py3-none-any.whl (8.9 kB)\nRequirement already satisfied: setuptools>=34.4.1 in /opt/anaconda3/lib/python3.8/site-packages (from cufflinks>=0.17.0->pycaret) (52.0.0.post20210125)\nCollecting smart-open>=1.8.1\n Downloading smart_open-5.2.1-py3-none-any.whl (58 kB)\n\u001b[K |████████████████████████████████| 58 kB 15.8 MB/s eta 0:00:01\n\u001b[?25hRequirement already satisfied: appnope in /opt/anaconda3/lib/python3.8/site-packages (from IPython->pycaret) (0.1.2)\nRequirement already satisfied: prompt-toolkit!=3.0.0,!=3.0.1,<3.1.0,>=2.0.0 in /opt/anaconda3/lib/python3.8/site-packages (from IPython->pycaret) (3.0.17)\nRequirement already satisfied: pygments in /opt/anaconda3/lib/python3.8/site-packages (from IPython->pycaret) (2.8.1)\nRequirement already satisfied: pickleshare in /opt/anaconda3/lib/python3.8/site-packages (from IPython->pycaret) (0.7.5)\nRequirement already satisfied: backcall in /opt/anaconda3/lib/python3.8/site-packages (from IPython->pycaret) (0.2.0)\nRequirement already satisfied: traitlets>=4.2 in /opt/anaconda3/lib/python3.8/site-packages (from IPython->pycaret) (5.0.5)\nRequirement already satisfied: jedi>=0.16 in /opt/anaconda3/lib/python3.8/site-packages (from IPython->pycaret) (0.17.2)\nRequirement already satisfied: decorator in /opt/anaconda3/lib/python3.8/site-packages (from IPython->pycaret) (5.0.6)\nRequirement already satisfied: pexpect>4.3 in /opt/anaconda3/lib/python3.8/site-packages (from IPython->pycaret) (4.8.0)\nRequirement already satisfied: ipykernel>=4.5.1 in /opt/anaconda3/lib/python3.8/site-packages (from ipywidgets->pycaret) (5.3.4)\nRequirement already satisfied: nbformat>=4.2.0 in /opt/anaconda3/lib/python3.8/site-packages (from ipywidgets->pycaret) (5.1.3)\nRequirement already satisfied: jupyterlab-widgets>=1.0.0 in /opt/anaconda3/lib/python3.8/site-packages (from ipywidgets->pycaret) (1.0.0)\nRequirement already satisfied: widgetsnbextension~=3.5.0 in /opt/anaconda3/lib/python3.8/site-packages (from ipywidgets->pycaret) (3.5.1)\nRequirement already satisfied: jupyter-client in /opt/anaconda3/lib/python3.8/site-packages (from ipykernel>=4.5.1->ipywidgets->pycaret) (6.1.12)\nRequirement already satisfied: tornado>=4.2 in /opt/anaconda3/lib/python3.8/site-packages (from ipykernel>=4.5.1->ipywidgets->pycaret) (6.1)\nRequirement already satisfied: parso<0.8.0,>=0.7.0 in /opt/anaconda3/lib/python3.8/site-packages (from jedi>=0.16->IPython->pycaret) (0.7.0)\nRequirement already satisfied: wheel in /opt/anaconda3/lib/python3.8/site-packages (from lightgbm>=2.3.1->pycaret) (0.36.2)\nRequirement already satisfied: pillow>=6.2.0 in /opt/anaconda3/lib/python3.8/site-packages (from matplotlib->pycaret) (8.2.0)\nRequirement already satisfied: cycler>=0.10 in /opt/anaconda3/lib/python3.8/site-packages (from matplotlib->pycaret) (0.10.0)\nRequirement already satisfied: python-dateutil>=2.1 in /opt/anaconda3/lib/python3.8/site-packages (from matplotlib->pycaret) (2.8.1)\nRequirement already satisfied: pyparsing!=2.0.4,!=2.1.2,!=2.1.6,>=2.0.3 in /opt/anaconda3/lib/python3.8/site-packages (from matplotlib->pycaret) (2.4.7)\nRequirement already satisfied: kiwisolver>=1.0.1 in /opt/anaconda3/lib/python3.8/site-packages (from matplotlib->pycaret) (1.3.1)\nRequirement already satisfied: jupyter-core in /opt/anaconda3/lib/python3.8/site-packages (from nbformat>=4.2.0->ipywidgets->pycaret) (4.7.1)\nRequirement already satisfied: jsonschema!=2.5.0,>=2.4 in /opt/anaconda3/lib/python3.8/site-packages (from nbformat>=4.2.0->ipywidgets->pycaret) (3.2.0)\nRequirement already satisfied: ipython-genutils in /opt/anaconda3/lib/python3.8/site-packages (from nbformat>=4.2.0->ipywidgets->pycaret) (0.2.0)\nRequirement already satisfied: attrs>=17.4.0 in /opt/anaconda3/lib/python3.8/site-packages (from jsonschema!=2.5.0,>=2.4->nbformat>=4.2.0->ipywidgets->pycaret) (20.3.0)\n" ], [ "# loading the dataset\nfrom pycaret.datasets import get_data\ndataset = get_data('credit')", "_____no_output_____" ] ], [ [ "**Columns:**\n\n* `ID`: ID of each client\n* `LIMIT_BAL`: credit limit (Taiwan NT)\n* `SEX`: 1 = Male; 2 = Female\n* `EDUCATION`: 1 = graduate school, 2 = university, 3 = high school, 4 = others, 5 = unknown, 6 = unknown\n* `MARRIAGE`: 1 = married; 2 = single; 3 = others\n* `AGE`: age in years\n* `PAY_0` to `PAY_6`: Repayment status by n months ago (PAY_0 = last month … PAY_6 = 6 months ago) (Labels: -1=pay duly, 1=payment delay for one month, 2=payment delay for two months, … 8=payment delay for eight months, 9=payment delay for nine months and above)\n* `BILL_AMT1` to `BILL_AMT6`: Amount of bill statement by n months ago ( BILL_AMT1 = last_month .. BILL_AMT6 = 6 months ago)\n* `PAY_AMT1` to `PAY_AMT6`: Amount of payment by n months ago ( BILL_AMT1 = last_month .. BILL_AMT6 = 6 months ago)\n* `default`: Default payment (1=yes, 0=no). Target Column", "_____no_output_____" ] ], [ [ "dataset.shape", "_____no_output_____" ], [ "# sample 5% of data to be used as unseen data\ndata = dataset.sample(frac=0.95, random_state=786)\ndata_unseen = dataset.drop(data.index)\ndata.reset_index(inplace=True, drop=True)\ndata_unseen.reset_index(inplace=True, drop=True)", "_____no_output_____" ], [ "# print the revised shape\nprint('Data for Modeling: ' + str(data.shape))\nprint('Unseen Data For Predictions: ' + str(data_unseen.shape))", "Data for Modeling: (22800, 24)\nUnseen Data For Predictions: (1200, 24)\n" ], [ "from pycaret.classification import *\ns = setup(data = data, target = 'default', session_id=123)", "_____no_output_____" ], [ "import lightgbm as lgb", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ] ]
cb375866fd5127ac57241e8224b31c57e15d8932
63,044
ipynb
Jupyter Notebook
language-translation/dlnd_language_translation.ipynb
thomasdunlap/deep-learning
bed57f90fc4ab70dcae11db0c6908f9541dec0ee
[ "MIT" ]
null
null
null
language-translation/dlnd_language_translation.ipynb
thomasdunlap/deep-learning
bed57f90fc4ab70dcae11db0c6908f9541dec0ee
[ "MIT" ]
null
null
null
language-translation/dlnd_language_translation.ipynb
thomasdunlap/deep-learning
bed57f90fc4ab70dcae11db0c6908f9541dec0ee
[ "MIT" ]
null
null
null
55.156605
1,977
0.622676
[ [ [ "# Language Translation\nIn this project, you’re going to take a peek into the realm of neural network machine translation. You’ll be training a sequence to sequence model on a dataset of English and French sentences that can translate new sentences from English to French.\n## Get the Data\nSince translating the whole language of English to French will take lots of time to train, we have provided you with a small portion of the English corpus.", "_____no_output_____" ] ], [ [ "\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL\n\"\"\"\nimport helper\nimport problem_unittests as tests\n\nsource_path = 'data/small_vocab_en'\ntarget_path = 'data/small_vocab_fr'\nsource_text = helper.load_data(source_path)\ntarget_text = helper.load_data(target_path)", "_____no_output_____" ] ], [ [ "## Explore the Data\nPlay around with view_sentence_range to view different parts of the data.", "_____no_output_____" ] ], [ [ "view_sentence_range = (0, 10)\n\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL\n\"\"\"\nimport numpy as np\n\nprint('Dataset Stats')\nprint('Roughly the number of unique words: {}'.format(len({word: None for word in source_text.split()})))\n\nsentences = source_text.split('\\n')\nword_counts = [len(sentence.split()) for sentence in sentences]\nprint('Number of sentences: {}'.format(len(sentences)))\nprint('Average number of words in a sentence: {}'.format(np.average(word_counts)))\n\nprint()\nprint('English sentences {} to {}:'.format(*view_sentence_range))\nprint('\\n'.join(source_text.split('\\n')[view_sentence_range[0]:view_sentence_range[1]]))\nprint()\nprint('French sentences {} to {}:'.format(*view_sentence_range))\nprint('\\n'.join(target_text.split('\\n')[view_sentence_range[0]:view_sentence_range[1]]))", "Dataset Stats\nRoughly the number of unique words: 227\nNumber of sentences: 137861\nAverage number of words in a sentence: 13.225277634719028\n\nEnglish sentences 0 to 10:\nnew jersey is sometimes quiet during autumn , and it is snowy in april .\nthe united states is usually chilly during july , and it is usually freezing in november .\ncalifornia is usually quiet during march , and it is usually hot in june .\nthe united states is sometimes mild during june , and it is cold in september .\nyour least liked fruit is the grape , but my least liked is the apple .\nhis favorite fruit is the orange , but my favorite is the grape .\nparis is relaxing during december , but it is usually chilly in july .\nnew jersey is busy during spring , and it is never hot in march .\nour least liked fruit is the lemon , but my least liked is the grape .\nthe united states is sometimes busy during january , and it is sometimes warm in november .\n\nFrench sentences 0 to 10:\nnew jersey est parfois calme pendant l' automne , et il est neigeux en avril .\nles états-unis est généralement froid en juillet , et il gèle habituellement en novembre .\ncalifornia est généralement calme en mars , et il est généralement chaud en juin .\nles états-unis est parfois légère en juin , et il fait froid en septembre .\nvotre moins aimé fruit est le raisin , mais mon moins aimé est la pomme .\nson fruit préféré est l'orange , mais mon préféré est le raisin .\nparis est relaxant en décembre , mais il est généralement froid en juillet .\nnew jersey est occupé au printemps , et il est jamais chaude en mars .\nnotre fruit est moins aimé le citron , mais mon moins aimé est le raisin .\nles états-unis est parfois occupé en janvier , et il est parfois chaud en novembre .\n" ] ], [ [ "## Implement Preprocessing Function\n### Text to Word Ids\nAs you did with other RNNs, you must turn the text into a number so the computer can understand it. In the function `text_to_ids()`, you'll turn `source_text` and `target_text` from words to ids. However, you need to add the `<EOS>` word id at the end of `target_text`. This will help the neural network predict when the sentence should end.\n\nYou can get the `<EOS>` word id by doing:\n```python\ntarget_vocab_to_int['<EOS>']\n```\nYou can get other word ids using `source_vocab_to_int` and `target_vocab_to_int`.", "_____no_output_____" ] ], [ [ "def get_id_text(input, vocab_to_int):\n return [[vocab_to_int[word] for word in sentence.split()] for sentence in input]\n", "_____no_output_____" ], [ "def text_to_ids(source_text, target_text, source_vocab_to_int, target_vocab_to_int):\n \"\"\"\n Convert source and target text to proper word ids\n :param source_text: String that contains all the source text.\n :param target_text: String that contains all the target text.\n :param source_vocab_to_int: Dictionary to go from the source words to an id\n :param target_vocab_to_int: Dictionary to go from the target words to an id\n :return: A tuple of lists (source_id_text, target_id_text)\n \"\"\"\n source_sentences = [sentence for sentence in source_text.split('\\n')]\n target_sentences = [sentence + ' <EOS>' for sentence in target_text.split('\\n')]\n \n source_id_text = get_id_text(source_sentences, source_vocab_to_int)\n target_id_text = get_id_text(target_sentences, target_vocab_to_int)\n \n return source_id_text, target_id_text\n\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\n\"\"\"\ntests.test_text_to_ids(text_to_ids)", "Tests Passed\n" ] ], [ [ "### Preprocess all the data and save it\nRunning the code cell below will preprocess all the data and save it to file.", "_____no_output_____" ] ], [ [ "\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL\n\"\"\"\nhelper.preprocess_and_save_data(source_path, target_path, text_to_ids)", "_____no_output_____" ] ], [ [ "# Check Point\nThis is your first checkpoint. If you ever decide to come back to this notebook or have to restart the notebook, you can start from here. The preprocessed data has been saved to disk.", "_____no_output_____" ] ], [ [ "\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL\n\"\"\"\nimport numpy as np\nimport helper\nimport problem_unittests as tests\n\n(source_int_text, target_int_text), (source_vocab_to_int, target_vocab_to_int), _ = helper.load_preprocess()", "_____no_output_____" ] ], [ [ "### Check the Version of TensorFlow and Access to GPU\nThis will check to make sure you have the correct version of TensorFlow and access to a GPU", "_____no_output_____" ] ], [ [ "\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL\n\"\"\"\nfrom distutils.version import LooseVersion\nimport warnings\nimport tensorflow as tf\nfrom tensorflow.python.layers.core import Dense\n\n# Check TensorFlow Version\nassert LooseVersion(tf.__version__) >= LooseVersion('1.1'), 'Please use TensorFlow version 1.1 or newer'\nprint('TensorFlow Version: {}'.format(tf.__version__))\n\n# Check for a GPU\nif not tf.test.gpu_device_name():\n warnings.warn('No GPU found. Please use a GPU to train your neural network.')\nelse:\n print('Default GPU Device: {}'.format(tf.test.gpu_device_name()))", "TensorFlow Version: 1.11.0\nDefault GPU Device: /device:GPU:0\n" ] ], [ [ "## Build the Neural Network\nYou'll build the components necessary to build a Sequence-to-Sequence model by implementing the following functions below:\n- `model_inputs`\n- `process_decoder_input`\n- `encoding_layer`\n- `decoding_layer_train`\n- `decoding_layer_infer`\n- `decoding_layer`\n- `seq2seq_model`\n\n### Input\nImplement the `model_inputs()` function to create TF Placeholders for the Neural Network. It should create the following placeholders:\n\n- Input text placeholder named \"input\" using the TF Placeholder name parameter with rank 2.\n- Targets placeholder with rank 2.\n- Learning rate placeholder with rank 0.\n- Keep probability placeholder named \"keep_prob\" using the TF Placeholder name parameter with rank 0.\n- Target sequence length placeholder named \"target_sequence_length\" with rank 1\n- Max target sequence length tensor named \"max_target_len\" getting its value from applying tf.reduce_max on the target_sequence_length placeholder. Rank 0.\n- Source sequence length placeholder named \"source_sequence_length\" with rank 1\n\nReturn the placeholders in the following the tuple (input, targets, learning rate, keep probability, target sequence length, max target sequence length, source sequence length)", "_____no_output_____" ] ], [ [ "def model_inputs():\n \"\"\"\n Create TF Placeholders for input, targets, learning rate, and lengths of source and target sequences.\n :return: Tuple (input, targets, learning rate, keep probability, target sequence length,\n max target sequence length, source sequence length)\n \"\"\"\n inputs = tf.placeholder(tf.int32, [None, None], name='input')\n targets = tf.placeholder(tf.int32, [None, None], name='targets')\n learning_rate = tf.placeholder(tf.float32, name='learning_rate')\n keep_prob = tf.placeholder(tf.float32, name='keep_prob')\n target_sequence_length = tf.placeholder(tf.int32, [None], name='target_sequence_length')\n max_target_len = tf.reduce_max(target_sequence_length,name='max_target_len')\n source_sequence_length = tf.placeholder(tf.int32, [None], name='source_sequence_length')\n \n return inputs, targets, learning_rate, keep_prob, target_sequence_length, max_target_len, source_sequence_length\n\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\n\"\"\"\ntests.test_model_inputs(model_inputs)", "Tests Passed\n" ] ], [ [ "### Process Decoder Input\nImplement `process_decoder_input` by removing the last word id from each batch in `target_data` and concat the GO ID to the begining of each batch.", "_____no_output_____" ] ], [ [ "def process_decoder_input(target_data, target_vocab_to_int, batch_size):\n \"\"\"\n Preprocess target data for encoding\n :param target_data: Target Placehoder\n :param target_vocab_to_int: Dictionary to go from the target words to an id\n :param batch_size: Batch Size\n :return: Preprocessed target data\n \"\"\"\n go = target_vocab_to_int['<GO>']\n ending = tf.strided_slice(target_data, [0, 0], [batch_size, -1], [1, 1])\n return tf.concat([tf.fill([batch_size, 1], go), ending], 1)\n\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\n\"\"\"\ntests.test_process_encoding_input(process_decoder_input)", "Tests Passed\n" ] ], [ [ "### Encoding\nImplement `encoding_layer()` to create a Encoder RNN layer:\n * Embed the encoder input using [`tf.contrib.layers.embed_sequence`](https://www.tensorflow.org/api_docs/python/tf/contrib/layers/embed_sequence)\n * Construct a [stacked](https://github.com/tensorflow/tensorflow/blob/6947f65a374ebf29e74bb71e36fd82760056d82c/tensorflow/docs_src/tutorials/recurrent.md#stacking-multiple-lstms) [`tf.contrib.rnn.LSTMCell`](https://www.tensorflow.org/api_docs/python/tf/contrib/rnn/LSTMCell) wrapped in a [`tf.contrib.rnn.DropoutWrapper`](https://www.tensorflow.org/api_docs/python/tf/contrib/rnn/DropoutWrapper)\n * Pass cell and embedded input to [`tf.nn.dynamic_rnn()`](https://www.tensorflow.org/api_docs/python/tf/nn/dynamic_rnn)", "_____no_output_____" ] ], [ [ "from imp import reload\nreload(tests)\n\ndef encoding_layer(rnn_inputs, rnn_size, num_layers, keep_prob, \n source_sequence_length, source_vocab_size, \n encoding_embedding_size):\n \"\"\"\n Create encoding layer\n :param rnn_inputs: Inputs for the RNN\n :param rnn_size: RNN Size\n :param num_layers: Number of layers\n :param keep_prob: Dropout keep probability\n :param source_sequence_length: a list of the lengths of each sequence in the batch\n :param source_vocab_size: vocabulary size of source data\n :param encoding_embedding_size: embedding size of source data\n :return: tuple (RNN output, RNN state)\n \"\"\"\n embedded_encoder_input = tf.contrib.layers.embed_sequence(rnn_inputs, source_vocab_size, encoding_embedding_size)\n \n def make_cell(rnn_size):\n lstm = tf.contrib.rnn.LSTMCell(rnn_size, initializer=tf.random_uniform_initializer(-0.1, 0.1, seed=2))\n drop = tf.contrib.rnn.DropoutWrapper(lstm, output_keep_prob=keep_prob)\n return drop\n cell = tf.contrib.rnn.MultiRNNCell([make_cell(rnn_size) for _ in range(num_layers)])\n rnn_out, rnn_state = tf.nn.dynamic_rnn(cell, embedded_encoder_input, sequence_length=source_sequence_length,\n dtype=tf.float32)\n \n return rnn_out, rnn_state\n\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\n\"\"\"\ntests.test_encoding_layer(encoding_layer)", "Tests Passed\n" ] ], [ [ "### Decoding - Training\nCreate a training decoding layer:\n* Create a [`tf.contrib.seq2seq.TrainingHelper`](https://www.tensorflow.org/api_docs/python/tf/contrib/seq2seq/TrainingHelper) \n* Create a [`tf.contrib.seq2seq.BasicDecoder`](https://www.tensorflow.org/api_docs/python/tf/contrib/seq2seq/BasicDecoder)\n* Obtain the decoder outputs from [`tf.contrib.seq2seq.dynamic_decode`](https://www.tensorflow.org/api_docs/python/tf/contrib/seq2seq/dynamic_decode)", "_____no_output_____" ] ], [ [ "\ndef decoding_layer_train(encoder_state, dec_cell, dec_embed_input, \n target_sequence_length, max_summary_length, \n output_layer, keep_prob):\n \"\"\"\n Create a decoding layer for training\n :param encoder_state: Encoder State\n :param dec_cell: Decoder RNN Cell\n :param dec_embed_input: Decoder embedded input\n :param target_sequence_length: The lengths of each sequence in the target batch\n :param max_summary_length: The length of the longest sequence in the batch\n :param output_layer: Function to apply the output layer\n :param keep_prob: Dropout keep probability\n :return: BasicDecoderOutput containing training logits and sample_id\n \"\"\"\n training_helper = tf.contrib.seq2seq.TrainingHelper(dec_embed_input, target_sequence_length)\n training_decoder = tf.contrib.seq2seq.BasicDecoder(dec_cell, training_helper, encoder_state, output_layer)\n output, _ = tf.contrib.seq2seq.dynamic_decode(training_decoder, maximum_iterations = max_summary_length)\n \n return output\n\n\n\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\n\"\"\"\ntests.test_decoding_layer_train(decoding_layer_train)", "_____no_output_____" ] ], [ [ "### Decoding - Inference\nCreate inference decoder:\n* Create a [`tf.contrib.seq2seq.GreedyEmbeddingHelper`](https://www.tensorflow.org/api_docs/python/tf/contrib/seq2seq/GreedyEmbeddingHelper)\n* Create a [`tf.contrib.seq2seq.BasicDecoder`](https://www.tensorflow.org/api_docs/python/tf/contrib/seq2seq/BasicDecoder)\n* Obtain the decoder outputs from [`tf.contrib.seq2seq.dynamic_decode`](https://www.tensorflow.org/api_docs/python/tf/contrib/seq2seq/dynamic_decode)", "_____no_output_____" ] ], [ [ "def decoding_layer_infer(encoder_state, dec_cell, dec_embeddings, start_of_sequence_id,\n end_of_sequence_id, max_target_sequence_length,\n vocab_size, output_layer, batch_size, keep_prob):\n \"\"\"\n Create a decoding layer for inference\n :param encoder_state: Encoder state\n :param dec_cell: Decoder RNN Cell\n :param dec_embeddings: Decoder embeddings\n :param start_of_sequence_id: GO ID\n :param end_of_sequence_id: EOS Id\n :param max_target_sequence_length: Maximum length of target sequences\n :param vocab_size: Size of decoder/target vocabulary\n :param decoding_scope: TenorFlow Variable Scope for decoding\n :param output_layer: Function to apply the output layer\n :param batch_size: Batch size\n :param keep_prob: Dropout keep probability\n :return: BasicDecoderOutput containing inference logits and sample_id\n \"\"\"\n infer_decoder_fn = tf.contrib.seq2seq.simple_decoder_fn_inference(\n output_fn,\n encoder_state,\n dec_embeddings,\n start_of_sequence_id,\n end_of_sequence_id,\n maximum_length,\n vocab_size)\n \n infer_logits, _, _ = tf.contrib.seq2seq.dynamic_rnn_decoder(dec_cell, decoder_fn=infer_decoder_fn, scope=decoding_scope)\n \n return infer_logits\n\n\n\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\n\"\"\"\ntests.test_decoding_layer_infer(decoding_layer_infer)", "_____no_output_____" ] ], [ [ "### Build the Decoding Layer\nImplement `decoding_layer()` to create a Decoder RNN layer.\n\n* Embed the target sequences\n* Construct the decoder LSTM cell (just like you constructed the encoder cell above)\n* Create an output layer to map the outputs of the decoder to the elements of our vocabulary\n* Use the your `decoding_layer_train(encoder_state, dec_cell, dec_embed_input, target_sequence_length, max_target_sequence_length, output_layer, keep_prob)` function to get the training logits.\n* Use your `decoding_layer_infer(encoder_state, dec_cell, dec_embeddings, start_of_sequence_id, end_of_sequence_id, max_target_sequence_length, vocab_size, output_layer, batch_size, keep_prob)` function to get the inference logits.\n\nNote: You'll need to use [tf.variable_scope](https://www.tensorflow.org/api_docs/python/tf/variable_scope) to share variables between training and inference.", "_____no_output_____" ] ], [ [ "def decoding_layer(dec_input, encoder_state,\n target_sequence_length, max_target_sequence_length,\n rnn_size,\n num_layers, target_vocab_to_int, target_vocab_size,\n batch_size, keep_prob, decoding_embedding_size):\n \"\"\"\n Create decoding layer\n :param dec_input: Decoder input\n :param encoder_state: Encoder state\n :param target_sequence_length: The lengths of each sequence in the target batch\n :param max_target_sequence_length: Maximum length of target sequences\n :param rnn_size: RNN Size\n :param num_layers: Number of layers\n :param target_vocab_to_int: Dictionary to go from the target words to an id\n :param target_vocab_size: Size of target vocabulary\n :param batch_size: The size of the batch\n :param keep_prob: Dropout keep probability\n :param decoding_embedding_size: Decoding embedding size\n :return: Tuple of (Training BasicDecoderOutput, Inference BasicDecoderOutput)\n \"\"\"\n with tf.variable_scope('decoding') as decoding_scope:\n dec_cell = tf.contrib.rnn.MultiRNNCell([tf.contrib.rnn.BasicLSTMCell(rnn_size)] * num_layers)\n dec_cell = tf.contrib.rnn.DropoutWrapper(dec_cell, input_keep_prob=keep_prob, output_keep_prob=keep_prob)\n output_fn = lambda x: tf.contrib.layers.fully_connected(x, vocab_size, None, scope=decoding_scope)\n \n with tf.variable_scope('decoding') as decoding_scope:\n train_logits = decoding_layer_train(encoder_state, dec_cell, dec_embed_input, sequence_length, decoding_scope, output_fn, keep_prob)\n\n with tf.variable_scope('decoding', reuse=True) as decoding_scope:\n infer_logits = decoding_layer_infer(encoder_state, dec_cell, dec_embeddings, target_vocab_to_int['<GO>'], target_vocab_to_int['<EOS>'], sequence_length - 1, vocab_size, decoding_scope, output_fn, keep_prob)\n \n \n return train_logits, infer_logits\n\n\n\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\n\"\"\"\ntests.test_decoding_layer(decoding_layer)", "_____no_output_____" ] ], [ [ "### Build the Neural Network\nApply the functions you implemented above to:\n\n- Encode the input using your `encoding_layer(rnn_inputs, rnn_size, num_layers, keep_prob, source_sequence_length, source_vocab_size, encoding_embedding_size)`.\n- Process target data using your `process_decoder_input(target_data, target_vocab_to_int, batch_size)` function.\n- Decode the encoded input using your `decoding_layer(dec_input, enc_state, target_sequence_length, max_target_sentence_length, rnn_size, num_layers, target_vocab_to_int, target_vocab_size, batch_size, keep_prob, dec_embedding_size)` function.", "_____no_output_____" ] ], [ [ "def seq2seq_model(input_data, target_data, keep_prob, batch_size,\n source_sequence_length, target_sequence_length,\n max_target_sentence_length,\n source_vocab_size, target_vocab_size,\n enc_embedding_size, dec_embedding_size,\n rnn_size, num_layers, target_vocab_to_int):\n \"\"\"\n Build the Sequence-to-Sequence part of the neural network\n :param input_data: Input placeholder\n :param target_data: Target placeholder\n :param keep_prob: Dropout keep probability placeholder\n :param batch_size: Batch Size\n :param source_sequence_length: Sequence Lengths of source sequences in the batch\n :param target_sequence_length: Sequence Lengths of target sequences in the batch\n :param source_vocab_size: Source vocabulary size\n :param target_vocab_size: Target vocabulary size\n :param enc_embedding_size: Decoder embedding size\n :param dec_embedding_size: Encoder embedding size\n :param rnn_size: RNN Size\n :param num_layers: Number of layers\n :param target_vocab_to_int: Dictionary to go from the target words to an id\n :return: Tuple of (Training BasicDecoderOutput, Inference BasicDecoderOutput)\n \"\"\"\n enc_inputs = tf.contrib.layers.embed_sequence(input_data, source_vocab_size, enc_embedding_size)\n enc_state = encoding_layer(enc_inputs, rnn_size, num_layers, keep_prob)\n \n dec_inputs = process_decoding_input(target_data, target_vocab_to_int, batch_size)\n dec_embeddings = tf.Variable(tf.truncated_normal([target_vocab_size, dec_embedding_size], stddev=0.01))\n dec_embed_inputs = tf.nn.embedding_lookup(dec_embeddings, dec_inputs)\n \n train_logits, infer_logits = decoding_layer(\n dec_embed_inputs,\n dec_embeddings,\n enc_state,\n target_vocab_size,\n sequence_length,\n rnn_size,\n num_layers,\n target_vocab_to_int,\n keep_prob\n )\n \n \n return train_logits, infer_logits\n\n\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\n\"\"\"\ntests.test_seq2seq_model(seq2seq_model)", "_____no_output_____" ] ], [ [ "## Neural Network Training\n### Hyperparameters\nTune the following parameters:\n\n- Set `epochs` to the number of epochs.\n- Set `batch_size` to the batch size.\n- Set `rnn_size` to the size of the RNNs.\n- Set `num_layers` to the number of layers.\n- Set `encoding_embedding_size` to the size of the embedding for the encoder.\n- Set `decoding_embedding_size` to the size of the embedding for the decoder.\n- Set `learning_rate` to the learning rate.\n- Set `keep_probability` to the Dropout keep probability\n- Set `display_step` to state how many steps between each debug output statement", "_____no_output_____" ] ], [ [ "# Number of Epochs\nepochs = 7\n# Batch Size\nbatch_size = 256\n# RNN Size\nrnn_size = 512\n# Number of Layers\nnum_layers = 2\n# Embedding Size\nencoding_embedding_size = 10\ndecoding_embedding_size = 10\n# Learning Rate\nlearning_rate = 0.001\n# Dropout Keep Probability\nkeep_probability = 0.7\ndisplay_step = 256", "_____no_output_____" ] ], [ [ "### Build the Graph\nBuild the graph using the neural network you implemented.", "_____no_output_____" ] ], [ [ "\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL\n\"\"\"\nsave_path = 'checkpoints/dev'\n(source_int_text, target_int_text), (source_vocab_to_int, target_vocab_to_int), _ = helper.load_preprocess()\nmax_target_sentence_length = max([len(sentence) for sentence in source_int_text])\n\ntrain_graph = tf.Graph()\nwith train_graph.as_default():\n input_data, targets, lr, keep_prob, target_sequence_length, max_target_sequence_length, source_sequence_length = model_inputs()\n\n #sequence_length = tf.placeholder_with_default(max_target_sentence_length, None, name='sequence_length')\n input_shape = tf.shape(input_data)\n\n train_logits, inference_logits = seq2seq_model(tf.reverse(input_data, [-1]),\n targets,\n keep_prob,\n batch_size,\n source_sequence_length,\n target_sequence_length,\n max_target_sequence_length,\n len(source_vocab_to_int),\n len(target_vocab_to_int),\n encoding_embedding_size,\n decoding_embedding_size,\n rnn_size,\n num_layers,\n target_vocab_to_int)\n\n\n training_logits = tf.identity(train_logits.rnn_output, name='logits')\n inference_logits = tf.identity(inference_logits.sample_id, name='predictions')\n\n masks = tf.sequence_mask(target_sequence_length, max_target_sequence_length, dtype=tf.float32, name='masks')\n\n with tf.name_scope(\"optimization\"):\n # Loss function\n cost = tf.contrib.seq2seq.sequence_loss(\n training_logits,\n targets,\n masks)\n\n # Optimizer\n optimizer = tf.train.AdamOptimizer(lr)\n\n # Gradient Clipping\n gradients = optimizer.compute_gradients(cost)\n capped_gradients = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in gradients if grad is not None]\n train_op = optimizer.apply_gradients(capped_gradients)\n", "_____no_output_____" ] ], [ [ "Batch and pad the source and target sequences", "_____no_output_____" ] ], [ [ "\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL\n\"\"\"\ndef pad_sentence_batch(sentence_batch, pad_int):\n \"\"\"Pad sentences with <PAD> so that each sentence of a batch has the same length\"\"\"\n max_sentence = max([len(sentence) for sentence in sentence_batch])\n return [sentence + [pad_int] * (max_sentence - len(sentence)) for sentence in sentence_batch]\n\n\ndef get_batches(sources, targets, batch_size, source_pad_int, target_pad_int):\n \"\"\"Batch targets, sources, and the lengths of their sentences together\"\"\"\n for batch_i in range(0, len(sources)//batch_size):\n start_i = batch_i * batch_size\n\n # Slice the right amount for the batch\n sources_batch = sources[start_i:start_i + batch_size]\n targets_batch = targets[start_i:start_i + batch_size]\n\n # Pad\n pad_sources_batch = np.array(pad_sentence_batch(sources_batch, source_pad_int))\n pad_targets_batch = np.array(pad_sentence_batch(targets_batch, target_pad_int))\n\n # Need the lengths for the _lengths parameters\n pad_targets_lengths = []\n for target in pad_targets_batch:\n pad_targets_lengths.append(len(target))\n\n pad_source_lengths = []\n for source in pad_sources_batch:\n pad_source_lengths.append(len(source))\n\n yield pad_sources_batch, pad_targets_batch, pad_source_lengths, pad_targets_lengths\n", "_____no_output_____" ] ], [ [ "### Train\nTrain the neural network on the preprocessed data. If you have a hard time getting a good loss, check the forms to see if anyone is having the same problem.", "_____no_output_____" ] ], [ [ "\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL\n\"\"\"\ndef get_accuracy(target, logits):\n \"\"\"\n Calculate accuracy\n \"\"\"\n max_seq = max(target.shape[1], logits.shape[1])\n if max_seq - target.shape[1]:\n target = np.pad(\n target,\n [(0,0),(0,max_seq - target.shape[1])],\n 'constant')\n if max_seq - logits.shape[1]:\n logits = np.pad(\n logits,\n [(0,0),(0,max_seq - logits.shape[1])],\n 'constant')\n\n return np.mean(np.equal(target, logits))\n\n# Split data to training and validation sets\ntrain_source = source_int_text[batch_size:]\ntrain_target = target_int_text[batch_size:]\nvalid_source = source_int_text[:batch_size]\nvalid_target = target_int_text[:batch_size]\n(valid_sources_batch, valid_targets_batch, valid_sources_lengths, valid_targets_lengths ) = next(get_batches(valid_source,\n valid_target,\n batch_size,\n source_vocab_to_int['<PAD>'],\n target_vocab_to_int['<PAD>'])) \nwith tf.Session(graph=train_graph) as sess:\n sess.run(tf.global_variables_initializer())\n\n for epoch_i in range(epochs):\n for batch_i, (source_batch, target_batch, sources_lengths, targets_lengths) in enumerate(\n get_batches(train_source, train_target, batch_size,\n source_vocab_to_int['<PAD>'],\n target_vocab_to_int['<PAD>'])):\n\n _, loss = sess.run(\n [train_op, cost],\n {input_data: source_batch,\n targets: target_batch,\n lr: learning_rate,\n target_sequence_length: targets_lengths,\n source_sequence_length: sources_lengths,\n keep_prob: keep_probability})\n\n\n if batch_i % display_step == 0 and batch_i > 0:\n\n\n batch_train_logits = sess.run(\n inference_logits,\n {input_data: source_batch,\n source_sequence_length: sources_lengths,\n target_sequence_length: targets_lengths,\n keep_prob: 1.0})\n\n\n batch_valid_logits = sess.run(\n inference_logits,\n {input_data: valid_sources_batch,\n source_sequence_length: valid_sources_lengths,\n target_sequence_length: valid_targets_lengths,\n keep_prob: 1.0})\n\n train_acc = get_accuracy(target_batch, batch_train_logits)\n\n valid_acc = get_accuracy(valid_targets_batch, batch_valid_logits)\n\n print('Epoch {:>3} Batch {:>4}/{} - Train Accuracy: {:>6.4f}, Validation Accuracy: {:>6.4f}, Loss: {:>6.4f}'\n .format(epoch_i, batch_i, len(source_int_text) // batch_size, train_acc, valid_acc, loss))\n\n # Save Model\n saver = tf.train.Saver()\n saver.save(sess, save_path)\n print('Model Trained and Saved')", "_____no_output_____" ] ], [ [ "### Save Parameters\nSave the `batch_size` and `save_path` parameters for inference.", "_____no_output_____" ] ], [ [ "\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL\n\"\"\"\n# Save parameters for checkpoint\nhelper.save_params(save_path)", "_____no_output_____" ] ], [ [ "# Checkpoint", "_____no_output_____" ] ], [ [ "\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL\n\"\"\"\nimport tensorflow as tf\nimport numpy as np\nimport helper\nimport problem_unittests as tests\n\n_, (source_vocab_to_int, target_vocab_to_int), (source_int_to_vocab, target_int_to_vocab) = helper.load_preprocess()\nload_path = helper.load_params()", "_____no_output_____" ] ], [ [ "## Sentence to Sequence\nTo feed a sentence into the model for translation, you first need to preprocess it. Implement the function `sentence_to_seq()` to preprocess new sentences.\n\n- Convert the sentence to lowercase\n- Convert words into ids using `vocab_to_int`\n - Convert words not in the vocabulary, to the `<UNK>` word id.", "_____no_output_____" ] ], [ [ "def sentence_to_seq(sentence, vocab_to_int):\n \"\"\"\n Convert a sentence to a sequence of ids\n :param sentence: String\n :param vocab_to_int: Dictionary to go from the words to an id\n :return: List of word ids\n \"\"\"\n # TODO: Implement Function\n return None\n\n\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\n\"\"\"\ntests.test_sentence_to_seq(sentence_to_seq)", "_____no_output_____" ] ], [ [ "## Translate\nThis will translate `translate_sentence` from English to French.", "_____no_output_____" ] ], [ [ "translate_sentence = 'he saw a old yellow truck .'\n\n\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL\n\"\"\"\ntranslate_sentence = sentence_to_seq(translate_sentence, source_vocab_to_int)\n\nloaded_graph = tf.Graph()\nwith tf.Session(graph=loaded_graph) as sess:\n # Load saved model\n loader = tf.train.import_meta_graph(load_path + '.meta')\n loader.restore(sess, load_path)\n\n input_data = loaded_graph.get_tensor_by_name('input:0')\n logits = loaded_graph.get_tensor_by_name('predictions:0')\n target_sequence_length = loaded_graph.get_tensor_by_name('target_sequence_length:0')\n source_sequence_length = loaded_graph.get_tensor_by_name('source_sequence_length:0')\n keep_prob = loaded_graph.get_tensor_by_name('keep_prob:0')\n\n translate_logits = sess.run(logits, {input_data: [translate_sentence]*batch_size,\n target_sequence_length: [len(translate_sentence)*2]*batch_size,\n source_sequence_length: [len(translate_sentence)]*batch_size,\n keep_prob: 1.0})[0]\n\nprint('Input')\nprint(' Word Ids: {}'.format([i for i in translate_sentence]))\nprint(' English Words: {}'.format([source_int_to_vocab[i] for i in translate_sentence]))\n\nprint('\\nPrediction')\nprint(' Word Ids: {}'.format([i for i in translate_logits]))\nprint(' French Words: {}'.format(\" \".join([target_int_to_vocab[i] for i in translate_logits])))\n", "_____no_output_____" ] ], [ [ "## Imperfect Translation\nYou might notice that some sentences translate better than others. Since the dataset you're using only has a vocabulary of 227 English words of the thousands that you use, you're only going to see good results using these words. For this project, you don't need a perfect translation. However, if you want to create a better translation model, you'll need better data.\n\nYou can train on the [WMT10 French-English corpus](http://www.statmt.org/wmt10/training-giga-fren.tar). This dataset has more vocabulary and richer in topics discussed. However, this will take you days to train, so make sure you've a GPU and the neural network is performing well on dataset we provided. Just make sure you play with the WMT10 corpus after you've submitted this project.\n## Submitting This Project\nWhen submitting this project, make sure to run all the cells before saving the notebook. Save the notebook file as \"dlnd_language_translation.ipynb\" and save it as a HTML file under \"File\" -> \"Download as\". Include the \"helper.py\" and \"problem_unittests.py\" files in your submission.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
cb375be154bf3868b6c45c23da7df96bb783160d
76,506
ipynb
Jupyter Notebook
qc_eda.ipynb
biodev/cycIF-workflow
b272ebf2808dec6847f45c5e5ab8d8e2f6bb143f
[ "MIT" ]
null
null
null
qc_eda.ipynb
biodev/cycIF-workflow
b272ebf2808dec6847f45c5e5ab8d8e2f6bb143f
[ "MIT" ]
null
null
null
qc_eda.ipynb
biodev/cycIF-workflow
b272ebf2808dec6847f45c5e5ab8d8e2f6bb143f
[ "MIT" ]
null
null
null
32.335587
841
0.579615
[ [ [ "## Quality control/Exploratory data analysis Notebook\n\nBy: Megan Grout ([email protected])\n\nAdapted from code written by Dr. Marilyne Labrie and Nick Kendsersky\n\n\nLast updated: 20200527", "_____no_output_____" ], [ "Import external libraries.", "_____no_output_____" ] ], [ [ "import os\nimport random\nimport re\nimport subprocess\nimport pandas as pd\nimport numpy as np\nimport seaborn as sb\nimport matplotlib.pyplot as plt\nimport matplotlib.colors as mplc\n\n\n\nfrom scipy import signal\n\nimport plotly.figure_factory as ff\nimport plotly\nimport plotly.graph_objs as go\nfrom plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot \nimport plotly.express as px\ninit_notebook_mode(connected = True)", "_____no_output_____" ] ], [ [ "Import function written for this project.", "_____no_output_____" ] ], [ [ "from cycif_modules import *", "_____no_output_____" ] ], [ [ "Define function to change header names. Not encapsutated in `cycif_modules`, so that user can change on the fly as necessary.", "_____no_output_____" ] ], [ [ "# This may change for each experiment, so I have not sequestered\n# this code in the cycif_modules.py file\n\n# This function takes in a dataframe, changes the names\n# of the column in various ways, and returns the dataframe.\n# For best accuracy and generalizability, the code uses\n# regular expressions (regex) to find strings for replacement.\ndef apply_header_changes(df):\n # remove lowercase x at beginning of name\n df.columns = df.columns.str.replace(\"^x\",\"\")\n # remove space at beginning of name\n df.columns = df.columns.str.replace(\"^ \",\"\")\n # replace space with underscore\n df.columns = df.columns.str.replace(\" \",\"_\")\n # fix typos\n #df.columns = df.columns.str.replace(\"typo\",\"correct_name\")\n return df", "_____no_output_____" ] ], [ [ "## Begin Workflow", "_____no_output_____" ], [ "### Get directories", "_____no_output_____" ] ], [ [ "# Base directory for project\nbase_dir = ''\n\n# Set name for of project\n# for use in directory creation\nproject_name = ''\n\n# Set string for current step, and for previous step\n# for use in file and direcotry naming\nstep_suffix = 'bs'\nprevious_step_suffix_long = \"_qc_eda\"\n\n# Initial input data directory\n#input_data_dir = r'/Users/groutm/Desktop/TMAdata'\n#input_data_dir = r'/Users/groutm/Desktop/ww_data'\ninput_data_dir = os.path.join(base_dir, project_name + previous_step_suffix_long)\n\n\n# BS directory\n#output_data_dir = r'/Users/groutm/Desktop/TMAoutputdata'\n#output_data_dir = r'/Users/groutm/Desktop/ww_outputdata'\noutput_data_dir = os.path.join(base_dir, project_name + \"_\" + step_suffix)\n\n# BS images subdirectory\n#output_images_dir = r'/Users/groutm/Desktop/TMAimages'\n#output_images_dir = r'/Users/groutm/Desktop/wwimages'\noutput_images_dir = os.path.join(output_data_dir,\"images\")\n\n# Metadata directories\nmetadata_dir = os.path.join(base_dir, project_name + \"_metadata\")\nmetadata_images_dir = os.path.join(metadata_dir,\"images\")\n\n# Create necessary directories for this step, if they don't already exist\nfor d in [base_dir, input_data_dir, output_data_dir, output_images_dir, \n metadata_dir, metadata_images_dir]:\n if not os.path.exists(d):\n os.makedirs(d)\n\n# Change directory to location of input files\nos.chdir(input_data_dir)\n\n", "_____no_output_____" ] ], [ [ "Create list of samples for use in this step of workflow. Do not include file extensions or steps labels.", "_____no_output_____" ] ], [ [ "# Provide list of samples whose files we want to read int\n# Needs to be a list of strings, which serve as bases for \n# input file names. Input files will be derived from base\n# sample names, previous step substring, and filetype \n# extension\n\n\nls_samples = ['TMA']", "_____no_output_____" ] ], [ [ "List of columns that are not marker intensities. It is okay if any of these are not actually present in a given dataframe. ", "_____no_output_____" ] ], [ [ "not_intensities = ['replicate_ID', 'cell_type', 'Nucleus_Roundness', 'Nucleus_Size', 'Cell_Size',\n 'Nuc_X', 'not','Nuc_X_Inv','Cell_ID','Nuc_Y_Inv','ROI_slide','ROI_index','Nuc_Y',\n 'cluster']", "_____no_output_____" ] ], [ [ "## Import segmentation files", "_____no_output_____" ], [ "First, ascertain header of first sample's input file. This information will be used as a template against which all other input data files' headers will be tested.", "_____no_output_____" ] ], [ [ "# Read in the first row of the file correpsonding to the first sample (index = 0)\n# in ls_samples\n\n# We do not need to specify a directory, since we earlier changed\n# the current working directory to be that containing these files\nfilename = ls_samples[0] + previous_step_suffix_long + \".csv\"\n\n# Read in only the first line\ndf = pd.read_csv(filename, index_col = 0, nrows = 1)\n\n# Verify that the ID column in input file became the index\n# For segmentation files, we need the first column to be the \n# cell index. In later steps, the cell index will actually not\n# be a proper dataframe data column, but the index of the saved\n# dataframe from the previous step.\nif df.index.name != \"ID\":\n print(\"Expected the first column in input file (index_col = 0) \"\n \"to be 'ID'. This column will be used to set the index names\"\n \"(cell number for each sample). It appears that the column '\"\n + df.index.name + \"' was actually the imported as the index \"\n \"column.\")\n\n# Apply the changes to the headers as specified in above funciton\ndf = apply_header_changes(df)\n\n# Set variable to hold default header values\nexpected_headers = df.columns.values", "_____no_output_____" ] ], [ [ "For this entry point into the workflow, we expect the first column to be the ID index.", "_____no_output_____" ] ], [ [ "df.index.name", "_____no_output_____" ] ], [ [ "FYI - What are the headers in our dataframe?", "_____no_output_____" ] ], [ [ "print(\"Used \" + ls_samples[0] + \".csv to determine the expected, corrected headers for all files.\")\nprint(\"There headers are: \\n\" + \", \".join([h for h in expected_headers]) + \".\")", "_____no_output_____" ] ], [ [ "#### Import segmentation files for analysis", "_____no_output_____" ] ], [ [ "# Set dictionary to hold all individual sample data\ndfs = {}\n\n# iterate through each sample in our list of samples\nfor sample in ls_samples:\n # open the file\n # set the index to be the first (0-based indexing, so 0th)\n # column in input file.\n df = pd.read_csv('{}.csv'.format(sample), index_col = 0)#,\n #nrows = 500) \n # use nrows = # to specify number of input rows if you want\n \n # Check for empty df\n # if so, don't continue trying to process df\n if df.shape[0] == 0:\n print('Zero content lines detected in ' + sample + ' file.'\n 'Removing from analysis...')\n # Remove from list, so further steps won't be looking\n # for data on this sample.\n # Note that for lists, we do not need to re-assign\n # the list when removing an item, i.e., we do not say\n # 'ls_samples = ls_samples.remove(sample)', since this\n # operation does not return anything.\n ls_samples.remove(sample)\n continue\n \n \n # Verify that the loaded df are the right length\n # commenting out because this code did not work on all\n # machines during testing (failed one PC, succeeded with\n # one PC and one MacBook)\n try:\n verify_line_no(sample + \".csv\", df.shape[0] + 1) \n except:\n pass\n # adding 1 because we expect the header was detected \n # during file import and not counted towards length of df\n \n # Manipulations necessary for concatenation\n df = apply_header_changes(df)\n # sort them alphanetically\n df = df[[x for x in sorted(df.columns.values)]]\n \n \n # Compare headers of new df against what is expected\n compare_headers(expected_headers, df.columns.values, sample)\n \n # Add Sample_ID column and set it equal to sample name for sample\n df['Sample_ID'] = sample\n \n \n \n # For cases where we have samples called TMA1.1, TMA1.2, TMA1.3, etc.\n # Using regular expressions (regex) to extract the characters in the\n # sample name from TMA to the following digits, stopping at the period\n #if 'ROI_index' in df.columns.values:\n # df['ROI_slide'] = re.findall(r'(TMA\\d+)',sample)[0]\n \n # Add to dictionary of dfs \n dfs[sample] = df\n \n\n\n\n#Merge dfs into one big df\ndf = pd.concat(dfs.values(), ignore_index=False , sort = False)\n# remove dfs from memory, since its big (relatively) and we\n# don't need a data struture of all samples' data separated\n# individually when we can extract information from the big\n# df using the Sample_ID column\ndel dfs\n\n# set index to Sample_ID + cell number\ndf = df.copy().reset_index(drop=True)\nindex = []\n# Iterate through each sample, and extract from the big\n# df just the rows corresponding to that sample. Then, \n# reassign the cell index based off of the Sample_ID value\n# and the row number within that chunk. Save that information\n# in a list of indices\nfor sample in ls_samples:\n df_chunk = df.loc[df['Sample_ID'] == sample,:].copy()\n old_index = df_chunk.index\n df_chunk = df_chunk.reset_index(drop=True)\n df_chunk = df_chunk.set_index(f'{sample}_Cell_' + df_chunk.index.astype(str))\n index = index + df_chunk.index.values.tolist()\n\n# Use our list of indices to reassign the big df index\ndf.index = index\n# Remove the 'level_0' and 'index' columns that resulted\n# from the above steps. This is not removing the actual index\n# of the df, just a data column CALLED index.\ndf = df.loc[:,~df.columns.isin(['level_0','index'])]", "_____no_output_____" ] ], [ [ "Let's take a look at a few features to make sure our dataframe is as expected. We want to make sure the data import and aggregation steps worked well.", "_____no_output_____" ] ], [ [ "df.index", "_____no_output_____" ], [ "df.shape", "_____no_output_____" ] ], [ [ "Check for NaN entries (should not be any unless columns do not align), which can result from stitching together dfs with different values in their headers.", "_____no_output_____" ] ], [ [ "# if there are any null values, then print names of columns containing\n# null values\nif df.isnull().any().any():\n print(df.columns[df.isnull().any()])\n\n#in 'if' statement, false means no NaN entries True means NaN entries ", "_____no_output_____" ] ], [ [ "Check that all expected files were imported into final dataframe by comparing our sample names to the unique values in the Sample_ID column.", "_____no_output_____" ] ], [ [ "if sorted(df.Sample_ID.unique()) == sorted(ls_samples):\n print(\"All expected filenames present in big df Sample_ID column.\")\nelse:\n compare_headers(['no samples'], df.Sample_ID.unique(), \"big df Sample_ID column\")", "_____no_output_____" ] ], [ [ "List of header values that are not intensities. Can include items that aren't in a given header.", "_____no_output_____" ], [ "Need to save `not_intensities` list for future reference.", "_____no_output_____" ] ], [ [ "fn = os.path.join(metadata_dir,\"not_intensities.csv\")\n\n# If this file already exists, add only not_intensities items not already present in file\nif os.path.exists(fn):\n print(\"'not_intensities.csv' already exists.\")\n print(\"Reconciling file and Jupyter notebook lists.\")\n # Open file as read-only, extract data\n fh = open(fn, \"r\")\n file_ni = fh.read().splitlines()\n # Set difference to identify items not already in file\n to_add = set(not_intensities) - set(file_ni)\n # We want not_intensities to the a complete list\n not_intensities = list(set(file_ni) | set(not_intensities))\n fh.close()\n # Open file for appending, writing new items\n fh = open(fn, \"a\")\n for item in to_add:\n fh.write(item +\"\\n\")\n fh.close()\n \n# The file does not yet exist\nelse:\n print(\"Could not find \" + fn + \". Creating now.\")\n # Open file for writing (will over-write exisiting file),\n # write all items\n fh = open(fn, \"w\")\n for item in not_intensities:\n fh.write(item + \"\\n\")\n fh.close()", "_____no_output_____" ] ], [ [ "### Drop unwanted columns", "_____no_output_____" ], [ "Here, we are dropping a number of columns that we are totally uninterested in. For example, in the current workflow of QI Tissue, we can either export all columns (all markers in all cell components--cell, nucleus, cytoplasm) or individually check each and every one we want. It is faster and easier for the user, and maybe less error-prone, to export all columns and then drop those we are unintersted in here. Not every marker is expected to express in every location; this is why we might drop certain columns. Likewise, we may only be intersted in Average intensity in some features and Maximum intensity in others.", "_____no_output_____" ] ], [ [ "# For development purposes, we kept all marker columns in the Cell and that were Intensity Averages.\n# So the columns we want to keep:\n# not_intensities, and any intensity column that contains 'Intensity_Average'\n# We will be listing those columns we want to keep. Alternatively, you could name the columns you want to drop,\n# or a mixture of both tactics.\n\n# To get the 'Intensity_Average' columns, we use list comprehension:\n# first get a list of all df columns not in 'not_intensities', aka, \n# those that ARE intensities, 'x for x in df....'\n# Then, we only include them if they contain 'Intensity_Average',\n# \"...if 'Intensity_Average' in x\"\n\n## Explain how to add more, beyond Cell_Intensity_Average, etc.\n\nto_keep = not_intensities \\\n + [x for x in df.columns.values[~df.columns.isin(not_intensities)] if 'Cell_Intensity_Average' in x]\n\n# If there are more columns we want to keep, we could include them by\n# adding them to our 'to_keep' list\n# to_keep.append(another_column)\n# NOTE - do NOT reassign this to to_keep (to_keep = to_keep.append(item)),\n# since the return value is None, for some reason. So you would be saying:\n# to_keep = to_keep.append(item)\n# to_keep = None\n# to_keep --> would display 'None'\n# to _keep = to_keep + [list, of, columns]\n# here, you DO ressign (list = list + other_list)\n\n# In order to extract only the columns we want from our big df, \n# we need to only ask for those that are IN the df.\n# Our to_keep list contains items that might not be in our df headers!\n# These items are from our not_intensities list. So let's ask for only those items\n# from to_keep that are actually found in our df\ndf = df[[x for x in to_keep if x in df.columns.values]]\n\n\n# What if we want to drop certain markers by name?\n# Drop specific markers\n#df = df.drop(columns = [])", "_____no_output_____" ] ], [ [ "Let's take a look at column names to make sure they are as expected.", "_____no_output_____" ] ], [ [ "df.columns.values", "_____no_output_____" ] ], [ [ "### Nucleus size analysis", "_____no_output_____" ], [ "#### Distribution plots", "_____no_output_____" ] ], [ [ "# Plot only cells where nucleus_size is [0, 500]\nmake_distr_plot_per_sample(\n title = \"Initial dataframe nucleus sizes - 500 cutoff\",\n location = output_images_dir, dfs = [df], \n df_names = [\"Initial dataframe\"], colors = [\"blue\"], \n x_label = \"Nucleus Size\", \n legend = False, xlims = [0,500], markers = ['Nucleus_Size'])\n", "_____no_output_____" ], [ "# Plot only cells where nucleus_size is [0, 100]\nmake_distr_plot_per_sample(title = \"Initial dataframe nucleus sizes to 100\",\n location = output_images_dir, dfs = [df], \n df_names = [\"Initial dataframe\"], colors = [\"blue\"], \n x_label = \"Nucleus Size\", \n legend = False, xlims = [0,100], markers = ['Nucleus_Size'])\n", "_____no_output_____" ] ], [ [ "#### Peak analysis", "_____no_output_____" ], [ "Find valleys between peaks in nucleus size data - unfinished, but left here in case it aids future development.", "_____no_output_____" ] ], [ [ "# Unfinished, but could consider using the following function\nm = signal.find_peaks(df[\"Nucleus_Size\"], prominence = 10, threshold = 20)\nm[0].shape", "_____no_output_____" ] ], [ [ "#### Quantiles", "_____no_output_____" ], [ "Get quantiles (5th, 50th, 95th)", "_____no_output_____" ] ], [ [ "qs = [0.05,0.50,0.95] # list of nucleus size percentiles to extract \n# Extract quantiles\nnuc_sizes = pd.DataFrame(df[\"Nucleus_Size\"].quantile(q=qs))\nnuc_sizes['quantiles'] = nuc_sizes.index\nnuc_sizes = nuc_sizes.reset_index().drop(columns = ['index'])\n\n# Display df\nnuc_sizes\n## Save these data to file\nfilename = \"nuc_quantile_sizes.csv\"\nfilename = os.path.join(output_data_dir,filename)\nnuc_sizes.to_csv(filename, index = False)", "_____no_output_____" ] ], [ [ "#### Nucleus size and other feature scatter plot", "_____no_output_____" ], [ "Scatter plot – to be most informative, ideally this would be cell size vs nucleus size, where color = nucleus roundness. Not all data used to develop workflow had all necessary features, so the actual data plotted below may not be terribly useful.\n", "_____no_output_____" ] ], [ [ "# Set string variables\ntitle = \"Nucleus size by cell size for initial dataframe\"\nx_label = \"Cell Size\"\ny_label = \"Nucleus Size\" # cell size - weewin data only has Nuc size!\n\n# Create figure\nfig = px.scatter(df, x=\"Cell_Size\", y=\"Nucleus_Size\",\n color='Nucleus_Roundness')\n\n# Update layout for the aesthetic parameters we want\nfig.update_layout(title_text=title, font=dict(size=18), \n plot_bgcolor = 'white', showlegend = True )\n# Adjust opacity\nfig.update_traces(opacity=0.6)\n# Adjust x-axis parameters\nfig.update_xaxes(title_text = x_label, showline=True, linewidth=2, linecolor='black', \n tickfont=dict(size=18))\n # Adjust y-axis parameters\nfig.update_yaxes(title_text = y_label, showline=True, linewidth=2, linecolor='black',\n tickfont=dict(size=18))\n\n# Display plot\n#plot(fig)\nfilename = os.path.join(output_images_dir, title.replace(\" \",\"_\") + \".png\")\nfig.write_image(filename)", "_____no_output_____" ] ], [ [ "### Delete columns as necessary", "_____no_output_____" ], [ "Move forward with only the columns of interest", "_____no_output_____" ] ], [ [ "# Remove columns containing \"DAPI\"\n# use list comprehension to extract only column headers\n# that do not contain the string \"DAPI\"\ndf = df[[x for x in df.columns.values if 'DAPI' not in x]]\n\nprint(\"Columns are now...\")\nprint([c for c in df.columns.values])", "_____no_output_____" ] ], [ [ "### Create lists of full names and shortened names to use in plotting", "_____no_output_____" ], [ "We want a list of shortened marker intensity column header values for use in plotting. For example 'pATR_Cell_Intensity_Average' would display as 'pATR' for readability. In the case of more than one column present for a given marker, e.g., the inclusion of 'pATR_Nucleus_Cell_Intensity_Average', the pltoted labels would be 'pATR_Cell' and 'pATR_Cell'. We want to create dictionaries of both full to short names and short to full names.", "_____no_output_____" ] ], [ [ "full_to_short_names, short_to_full_names = \\\n shorten_feature_names(df.columns.values[~df.columns.isin(not_intensities)])", "_____no_output_____" ] ], [ [ "Save this data to a metadata file. These devices will be used throughout the workflow.", "_____no_output_____" ] ], [ [ "filename = os.path.join(metadata_dir, \"full_to_short_column_names.csv\")\nfh = open(filename, \"w\")\nfh.write(\"full_name,short_name\\n\")\nfor k,v in full_to_short_names.items():\n fh.write(k + \",\" + v + \"\\n\")\n \nfh.close()", "_____no_output_____" ], [ "filename = os.path.join(metadata_dir, \"short_to_full_column_names.csv\")\nfh = open(filename, \"w\")\nfh.write(\"short_name,full_name\\n\")\nfor k,v in short_to_full_names.items():\n fh.write(k + \",\" + v + \"\\n\")\n \nfh.close()", "_____no_output_____" ], [ "## Print contents to screen if the user wants\n\n#for key, value in full_to_short_names.items():\n# print(key + \": \" + value)", "_____no_output_____" ] ], [ [ "### Import exposure time metadata", "_____no_output_____" ], [ "Here, we want to end up with a data structure that incorporates metadata on each intensity marker column used in our big dataframe in an easy-to-use format. This is going to include the full name of the intensity marker columns in the big data frame, the corresponding round and channel, the target protein (e.g., CD45), and the segmentation localization information (cell, cytoplasm, nucleus)... We can use this data structure to assign unique colors to all channels and rounds, for example, for use in later visualizations.", "_____no_output_____" ], [ "Here, we expect this exposure time metadata file to have four columns (more are accepted). These are as follows:\n\n- Round: The round in which the marker was assess. Should be in form 'r#'\n- Target: The target/marker used. This should be a string whose contents match in the imported segmentation data files. The capitalization does not need to be consistent. These values should be unique in this file, without duplicates.\n- Exp: The exposre time for this marker for this channel, in milliseconds. Not currently used in workflow.\n- Channel: THe channel in which the marker was assessed. Should be in form 'c#'.\n", "_____no_output_____" ] ], [ [ "filename = \"Exposure_Time.csv\"\n#filename = \"Exposure_Time_full.csv\"\nfilename = os.path.join(metadata_dir, filename)\n\n\nexp_df = pd.read_csv(filename)", "_____no_output_____" ], [ "# Verify file imported correctly\n\n# This part is wrapped in a try/except block because \n# it wasn't working on the PC workstation, but worked\n# on MG's personal PC laptop and department loaner MacBook\ntry:\n verify_line_no(filename, exp_df.shape[0] + 1)\n print(\"Ran file size verification.\")\nexcept:\n pass\n\n# Headers\nprint(\"Assessing whether column headers are as expected.\")\nexpected_headers =['Round','Target','Exp','Channel']\ncompare_headers(expected_headers, exp_df.columns.values, \"Imported metadata file\")\n\n# Missingness\nif exp_df.isnull().any().any():\n print(\"\\nexp_df has null value(s) in row(s):\")\n print(exp_df[exp_df.isna().any(axis=1)])\nelse:\n print(\"No null values detected.\")", "_____no_output_____" ] ], [ [ "Check to make sure that there are not duplicate values in the Target column.", "_____no_output_____" ] ], [ [ "if len(exp_df['Target']) > len(exp_df['Target'].unique()):\n print(\"One or more non-unique Target values in exp_df. Currently not supported.\")", "_____no_output_____" ], [ "exp_df.sort_values(by = ['Target']).head()", "_____no_output_____" ], [ "# Create lowercase version of target\nexp_df['target_lower'] = exp_df['Target'].str.lower()\nexp_df.head()", "_____no_output_____" ] ], [ [ "Create dataframe that contains marker intensity columns in our df that aren't in `not_intensities`", "_____no_output_____" ] ], [ [ "intensities = pd.DataFrame({'full_column':df.columns.values[~df.columns.isin(not_intensities)]})", "_____no_output_____" ], [ "intensities.head()", "_____no_output_____" ] ], [ [ "Extract the marker information from the `full_column`, which corresponds to full column in big dataframe.", "_____no_output_____" ] ], [ [ "# Use regular expressions (regex) to isolate the part of the field that\n# begins (^) with an alphanumeric value (W), and ends with an underscore (_)\n# '$' is end of line\nintensities['marker'] = intensities['full_column'].str.extract(r'([^\\W_]+)')\n# convert to lowercase\nintensities['marker_lower'] = intensities['marker'].str.lower()", "_____no_output_____" ], [ "# Subset the intensities df to exclude any column pertaining to DAPI\nintensities = intensities.loc[intensities['marker_lower'] != 'dapi']", "_____no_output_____" ] ], [ [ "Now merge the `intensities` and `exp_df` together to create `metadata`", "_____no_output_____" ] ], [ [ "metadata = pd.merge(exp_df, intensities, how = 'left',\n left_on = 'target_lower',right_on = 'marker_lower')\nmetadata = metadata.drop(columns = ['marker_lower'])\n\n# Target is the capitalization from the Exposure_Time.csv\n# target_lower is Target in all caps\n# marker is the extracted first component of the full column in segmentation data, with corresponding capitalization", "_____no_output_____" ] ], [ [ "Add a column to signify marker target location.", "_____no_output_____" ] ], [ [ "# Use a lambda to determine segmented location of intensity marker column and update metadata accordingly\n# This function determines what the location of the marker is in the cell\n# It looks for 'cytoplasm', 'cell',' or 'nucleus' string inside the \n# 'full_column' column of a given row, and returns the identifyied\n# area of 'unknown' if none of them\ndef add_metadata_location(row):\n fc = row['full_column'].lower()\n if 'cytoplasm' in fc and 'cell' not in fc and 'nucleus' not in fc:\n return 'cytoplasm'\n elif 'cell' in fc and 'cytoplasm' not in fc and 'nucleus' not in fc:\n return 'cell'\n elif 'nucleus' in fc and 'cell' not in fc and 'cytoplasm' not in fc:\n return 'nulceus'\n else:\n return 'unknown'\n\n# apply the function\nmetadata['location'] = metadata.apply(\n lambda row: add_metadata_location(row), axis = 1)", "_____no_output_____" ] ], [ [ "A peek at our `metadata` dataframe:", "_____no_output_____" ] ], [ [ "metadata.head()", "_____no_output_____" ] ], [ [ "Save this data structure to the metadata folder.", "_____no_output_____" ] ], [ [ "# don't want to add color in because that's better off treating color the same for round, channel, and sample\nfilename = \"marker_intensity_metadata.csv\"\nfilename = os.path.join(metadata_dir, filename)\n\nmetadata.to_csv(filename, index = False)", "_____no_output_____" ] ], [ [ "### Import sample metadata if applicable", "_____no_output_____" ] ], [ [ "filename = \"ROI_Map.csv\"\nfilename = os.path.join(metadata_dir, filename)\n\nsample_metadata = pd.read_csv(filename)", "_____no_output_____" ], [ "# Verify file imported correctly\n\n# Verify size\n# This part is wrapped in a try/except block because \n# it wasn't working on the PC workstation, but worked\n# on MG's personal PC laptop and department loaner MacBook\ntry:\n verify_line_no(filename, sample_metadata.shape[0] + 1)\n print(\"Ran file length verification.\")\nexcept:\n pass\n\n# Headers\nprint(\"Assessing whether column headers are as expected.\")\nexpected_headers =['Sample_ID', 'ROI_slide','ROI_index', 'TMA_Core', 'TMA_row',\n 'TMA_column', 'tissue_long', 'tissue_short', 'Replicate', 'Type']\ncompare_headers(expected_headers, sample_metadata.columns.values, \"Imported metadata file\")\n\n# Missingness\nif exp_df.isnull().any().any():\n print(\"\\nexp_df has null value(s) in row(s):\")\n print(sample_metadta[sample_metadata.isna().any(axis=1)])\nelse:\n print(\"No null values detected.\")", "_____no_output_____" ] ], [ [ "In this case, `sample_metadata` does not need to be merged with any other df and then saved again", "_____no_output_____" ] ], [ [ "sample_metadata.head()", "_____no_output_____" ] ], [ [ "## Establish colors to use throughout workflow", "_____no_output_____" ], [ "#### Channel colors", "_____no_output_____" ], [ "Channel colors - want colors that are categorical, since Channel is a non-ordered category (yes, they are numbered, but arbitrarily). A categorical color palette will have dissimilar colors. However, it we will typically use a prescribed set of channel colors that are consistent throughout experiments: c2 = green, c3 = orange, c4 = red, c5 = turquoise. The more automated channel color generation will be left below for reference.", "_____no_output_____" ] ], [ [ "# Get those unique colors\nif len(metadata.Channel.unique()) > 10:\n print(\"WARNING: There are more unique channel values than \\\n there are colors to choose from. Select different palette, e.g., \\\n continuous palette 'husl'.\")\nchannel_color_values = sb.color_palette(\"colorblind\",n_colors = len(metadata.Channel.unique()))#'HLS'\n# chose 'colorblind' because it is categorical and we're unlikely to have > 10\n\nprint(\"Unique channels are:\", metadata.Channel.unique())\n# Display those unique colors\nsb.palplot(sb.color_palette(channel_color_values))\n", "_____no_output_____" ] ], [ [ "Store in a dictionary", "_____no_output_____" ] ], [ [ "channel_color_dict = dict(zip(metadata.Channel.unique(), channel_color_values))\n\nchannel_color_dict", "_____no_output_____" ] ], [ [ "Let's choose our channel colors instead. We can use the function `matplotlib.colors.to_rbg(c)`, where `c` is a word color name, to convert to the (r, g, b) tuple needed for the workflow. At the top of the script, we imported `matplotlib.colors` as `mplc`, so we can save time and type out simply `mplc.to_rgb(c)` shorthand when using this function. Note that if you use any of the xkcd color survey colors (https://xkcd.com/color/rgb/), you will need to call these specify these as 'xkcd:colorname'.", "_____no_output_____" ], [ "I will demonstrate a couple of different ways of doing changing the colors we generated above, so the user can expand on the examples as necessary. We are holding all of our color information in several instances of a data structure called a dictionary. https://docs.python.org/3/library/stdtypes.html#typesmapping\n\nDictionaries are a way to store an unordered collection of items where each is composed of a key-value mapped pair. In the case of this workflow, each color dictionary has a string identifying the specific thing to be colored, e.g., 'c2', 'TMA', 'cluster1', or 'r5', and the corresponding value is a three-float tuple (r, g, b) that is the color of that thing. With dictionaries, we can remove an key-value pair, add a new key-value pair, or overwrite an existing key-value pair whenever we want. Keys can be many things, but often you will see them as a string. Values can be strings, lists, other dictionaries (as seen below for the heatmaps), etc. Nested dictionaries can be complicated to intuit, but they can be a good way to associate a bunch of information together easily, coding-wise. Keys are not ordered within a dictionary.", "_____no_output_____" ] ], [ [ "# get a new color for a channel, overwrite/replace the original channel color in the dictionary\n\nc2_color = \"green\"\nc2_color = mplc.to_rgb(\"green\")\nprint(\"Our new color in rbg form is \" + str(c2_color) + \".\")\n\nprint(\"Before replacement, c2 in the dictionary is: \" + str(channel_color_dict['c2']))\n\n# Replace value\nchannel_color_dict['c2'] = c2_color\nprint(\"After replacement, c2 in the dictionary is: \" + str(channel_color_dict['c2']))", "_____no_output_____" ], [ "# Here is how you delete an item from a dictionary\n\nprint(\"Keys in the channel color dictionary are: \" + str(channel_color_dict.keys()))\n\n# If we try to remove an existing key, we will get an error\nif 'c2' in channel_color_dict.keys():\n print(\"'c2' is in the dictionary. Removing now.\")\n channel_color_dict.pop('c2')\n \nprint(\"Keys in the channel color dictionary are: \" + str(channel_color_dict.keys()))", "_____no_output_____" ], [ "## Add in a new item\nprint(\"Keys in the channel color dictionary are: \" + str(channel_color_dict.keys()))\nprint(\"Adding in 'c2'...\")\nchannel_color_dict['c2'] = c2_color\nprint(\"Keys in the channel color dictionary are: \" + str(channel_color_dict.keys()))\n", "_____no_output_____" ], [ "## Let's finish the dictionary now\n\nchannel_color_dict['c2'] = mplc.to_rgb('green')\nchannel_color_dict['c3'] = mplc.to_rgb('orange')\nchannel_color_dict['c4'] = mplc.to_rgb('red')\nchannel_color_dict['c5'] = mplc.to_rgb('turquoise')", "_____no_output_____" ], [ "## And display the colors so we can see them\n\n# Instead of querying the dictionary to get each of our colors, THEN putting those colors in a list,\n# THEN feeding that list into the palplot/color_palette code as above, I will condense these steps\n# together. Here we are accessing each (r,g,b) color value in the dictionary using the key.\nprint(['c2','c3','c4','c5'])\nsb.palplot(sb.color_palette(\n [channel_color_dict['c2'],channel_color_dict['c3'],channel_color_dict['c4'],channel_color_dict['c5']]))\n", "_____no_output_____" ] ], [ [ "#### Round colors", "_____no_output_____" ], [ "Round colors - want colors that are sequential, since Round is an ordered category. We can still generate colors that are easy to distinguish. Also, many of the categorical palettes cap at at about 10 or so unique colors, and repeat from there. We do not want any repeats!", "_____no_output_____" ] ], [ [ "round_color_values = sb.cubehelix_palette(\n len(metadata.Round.unique()), start=1, rot= -0.75, dark=0.19, light=.85, reverse=True)\n#round_color_values = sb.color_palette(\"cubehelix\",n_colors = len(metadata.Round.unique()))\n# chose 'cubehelix' because it is sequential, and round is a continuous process\n# each color value is a tuple of three values: (R, G, B)\nprint(metadata.Round.unique())\n\nsb.palplot(sb.color_palette(round_color_values))\n\n## TO-DO: write what these parameters mean", "_____no_output_____" ] ], [ [ "Store in a dictionary", "_____no_output_____" ] ], [ [ "round_color_dict = dict(zip(metadata.Round.unique(), round_color_values))\n\nfor k,v in round_color_dict.items():\n round_color_dict[k] = np.float64(v)", "_____no_output_____" ] ], [ [ "#### Sample colors", "_____no_output_____" ], [ "Sample colors - want colors that are neither sequential nor categorical. Categorical would be ideal if we could generate an arbitrary number of colors, but I do not think that we can. Hense, we will choose `n` colors from a continuous palette. First we will generate the right number of colors. Later, we will assign TMA samples to gray.", "_____no_output_____" ] ], [ [ "# Get those unique colors\ncolor_values = sb.color_palette(\"husl\",n_colors = len(ls_samples))#'HLS'\n# each color value is a tuple of three values: (R, G, B)\n\n# Display those unique colors\nsb.palplot(sb.color_palette(color_values))", "_____no_output_____" ] ], [ [ "Generate enough gray shades for all TMA samples in dataset.", "_____no_output_____" ] ], [ [ "# Get list of all TMA samples\n# by looking for substring 'TMA' in all unique Sample_ID values\nTMA_samples = [s for s in df.Sample_ID.unique() if 'TMA' in s]\n\n# Now make a list of unique gray shades,\n# whose length equals the length of the list above\nTMA_color_values = sb.color_palette(n_colors = len(TMA_samples),palette = \"gray\")\n\n# Show the gray color(s) to the user\nsb.palplot(sb.color_palette(TMA_color_values))", "_____no_output_____" ] ], [ [ "#### Store in a dictionary", "_____no_output_____" ] ], [ [ "# Now we will create a dictionary to hold this information\n# Here we are mapping the unique Sample_ID values in df\n# (note that sorted() ensures they are in alphabetical\n# order) with the color_values list we derived above.\n# This list does NOT have our TMA gray(s) in it.\n# After we associate the two groups of items together\n# with zip, we turn it into a dictonary: key = Sample_ID,\n# value = color for that Sample_ID\nsample_color_dict = dict(zip(\n sorted(df.Sample_ID.unique()), color_values\n ))\n\n# Edit our dictioanry\n# Replace all TMA samples' colors with gray by\n# iterating through all keys in sorted order\n# and replacing the color with a gray one. We are\n# moving through our list of gray colors using our\n# index 'i', so that each TMA gets a different gray.\ni = 0\nfor key in sorted(sample_color_dict.keys()):\n if 'TMA' in key:\n sample_color_dict[key] = TMA_color_values[i]\n i +=1", "_____no_output_____" ], [ "sample_color_dict", "_____no_output_____" ] ], [ [ "Look at the (r,g,b) values of the colors above. Any TMA sample should have r ~= g ~= b.", "_____no_output_____" ], [ "Display the colors:", "_____no_output_____" ] ], [ [ "print(\"Our samples and corresponding colors are:\")\nprint([key for key in sorted(sample_color_dict.keys())])\nsb.palplot(sb.color_palette([sample_color_dict[key] for key in sorted(sample_color_dict.keys())]))", "_____no_output_____" ] ], [ [ "### Save color information (mapping and legend) to metadata directory", "_____no_output_____" ] ], [ [ "# let's look at the metadata again...\nmetadata.head()\n", "_____no_output_____" ] ], [ [ "Add in the color information in both RGB (range 0-1) and hex values, for use in visualizations", "_____no_output_____" ] ], [ [ "metadata['round_color'] = metadata.apply(lambda row: round_color_dict[row['Round']], axis = 1)\nmetadata['channel_color'] = metadata.apply(lambda row: channel_color_dict[row['Channel']], axis = 1)", "_____no_output_____" ], [ "# This function takes in a dictionary cd, a column_name string\n# and returs a dataframe. This df has the information that was\n# in the dictionary--'rgb' is the (fl, fl, fl) tuple corresponding\n# to the color names given as the cd keys, an 'hex' is the corresponding\n# hexademical value.\ndef color_dict_to_df(cd, column_name):\n df = pd.DataFrame.from_dict(cd, orient = 'index')\n df['rgb'] = df.apply(lambda row: (np.float64(row[0]), np.float(row[1]), np.float64(row[2])), axis = 1)\n df = df.drop(columns = [0,1,2])\n df['hex'] = df.apply(lambda row: mplc.to_hex(row['rgb']), axis = 1)\n df[column_name] = df.index\n return df", "_____no_output_____" ] ], [ [ "Sample", "_____no_output_____" ] ], [ [ "# Create dataframe\ncolor_df = color_dict_to_df(sample_color_dict, \"Sample_ID\")\ncolor_df.head()\n\n# Save to file in metadatadirectory\nfilename = \"sample_color_data.csv\"\nfilename = os.path.join(metadata_dir, filename)\ncolor_df.to_csv(filename, index = False)", "_____no_output_____" ], [ "# Legend of sample info only\n\ng = plt.figure(figsize = (1,1)).add_subplot(111)\ng.axis('off')\nhandles = []\n# To change the order of items on the legend, do\n# for item in [item1, item2, item3]:\nfor item in sorted(sample_color_dict.keys()):\n h = g.bar(0,0, color = sample_color_dict[item],\n label = item, linewidth =0)\n handles.append(h)\nfirst_legend = plt.legend(handles=handles, loc='upper right', title = 'Sample'),\n # bbox_to_anchor=(10,10), \n # bbox_transform=plt.gcf().transFigure)\n\n# Save the legend to a file\nfilename = \"Sample_legend.png\"\nfilename = os.path.join(metadata_images_dir, filename)\nplt.savefig(filename, bbox_inches = 'tight')", "_____no_output_____" ] ], [ [ "Channel", "_____no_output_____" ] ], [ [ "# Create dataframe\ncolor_df = color_dict_to_df(channel_color_dict, \"Channel\")\ncolor_df.head()\n\n# Save to file in metadatadirectory\nfilename = \"channel_color_data.csv\"\nfilename = os.path.join(metadata_dir, filename)\ncolor_df.to_csv(filename, index = False)", "_____no_output_____" ], [ "# Legend of channel info only\n\ng = plt.figure(figsize = (1,1)).add_subplot(111)\ng.axis('off')\nhandles = []\n# To change the order of items on the legend, do\n# for item in [item1, item2, item3]:\nfor item in sorted(channel_color_dict.keys()):\n h = g.bar(0,0, color = channel_color_dict[item],\n label = item, linewidth =0)\n handles.append(h)\nfirst_legend = plt.legend(handles=handles, loc='upper right', title = 'Channel'),\n # bbox_to_anchor=(10,10), \n # bbox_transform=plt.gcf().transFigure)\n\n# Save the legend to a file\nfilename = \"Channel_legend.png\"\nfilename = os.path.join(metadata_images_dir, filename)\nplt.savefig(filename, bbox_inches = 'tight')", "_____no_output_____" ] ], [ [ "Round", "_____no_output_____" ] ], [ [ "# Create dataframe\ncolor_df = color_dict_to_df(round_color_dict, \"Round\")\ncolor_df.head()\n\n# Save to file in metadatadirectory\nfilename = \"round_color_data.csv\"\nfilename = os.path.join(metadata_dir, filename)\ncolor_df.to_csv(filename, index = False)", "_____no_output_____" ], [ "# Legend of round info only\n\nround_legend = plt.figure(figsize = (1,1)).add_subplot(111)\nround_legend.axis('off')\nhandles = []\n# To change the order of items on the legend, do\n# for item in [item1, item2, item3]:\nfor item in round_color_dict.keys():\n h = round_legend.bar(0,0, color = round_color_dict[item],\n label = item, linewidth =0)\n handles.append(h)\nfirst_legend = plt.legend(handles=handles, loc='upper right', title = 'Round'),\n # bbox_to_anchor=(10,10), \n # bbox_transform=plt.gcf().transFigure)\n\n# Save the legend to a file\nfilename = \"Round_legend.png\"\nfilename = os.path.join(metadata_images_dir, filename)\nplt.savefig(filename, bbox_inches = 'tight')", "_____no_output_____" ] ], [ [ "## EDA scatterplot", "_____no_output_____" ], [ "Scatterplot of nucleus size by nucleus roundness, colored by sample", "_____no_output_____" ], [ "This was not working on my computer, probably due to the size of the data. Let's run this chunk using just a subset of the data. Here, we will want the subset to maintain the same proportion of cells for each Sample_ID as we had in the original dataframe.", "_____no_output_____" ] ], [ [ "subset_row_count = 10000", "_____no_output_____" ], [ "subset_df = create_subset(df, 'Sample_ID', subset_row_count, 'original')", "_____no_output_____" ] ], [ [ "How many lines for each sample ID are in our subset df?", "_____no_output_____" ] ], [ [ "subset_df['Sample_ID'].value_counts().sort_index()", "_____no_output_____" ] ], [ [ "How do the proportions of cells in the original and subset dfs compare?", "_____no_output_____" ] ], [ [ "df['Sample_ID'].value_counts().sort_index()/df.shape[0]", "_____no_output_____" ], [ "subset_df['Sample_ID'].value_counts().sort_index()/subset_df.shape[0]", "_____no_output_____" ] ], [ [ "Perform the plotting.", "_____no_output_____" ] ], [ [ "#By sample ID only\n\n# initiate figure\nfig = go.Figure()\ntitle = 'Nucleus size by nucleus roundess by Sample ID'\n\n# plot each trace separately\nfor sample in ls_samples:\n fig.add_trace(go.Scatter(\n x = subset_df.loc[subset_df['Sample_ID']==sample,'Nucleus_Roundness'],\n y = subset_df.loc[subset_df['Sample_ID']==sample,'Nucleus_Size'],\n mode = 'markers',\n name = sample,\n marker=dict(\n color='rgb' + str(sample_color_dict[sample])),\n showlegend = True\n \n ))\n \n\n# Update figure for aesthetic details\nfig.update_layout(title = title, plot_bgcolor = 'white')\nfig.update_xaxes(title_text = \"Nucleus roundness\", linecolor = 'black')\nfig.update_yaxes(title_text = \"Nucleus size\", linecolor = 'black')\n\n# Output\n#plot(fig) # plot generates in new Chrome tab\n# Write to file\nfilename = os.path.join(output_images_dir, title.replace(\" \",\"_\") + \".png\")\nfig.write_image(filename)", "_____no_output_____" ] ], [ [ "## Initial heatmap", "_____no_output_____" ], [ "We will only be plotting ~10k cells in the interest of time/computing resources. We want these 10k lines in our original df to be sampled randomly, without replacement, with the caveat that the proportions of all samples in the data are equal to each other (unless a particular sample does not have enough corresponding lines for the desired final df size). If the size of the dataframe is > 10k rows, then we will proceed with the entire dataset.", "_____no_output_____" ] ], [ [ "subset_row_count = 10000", "_____no_output_____" ], [ "subset_df = create_subset(df, 'Sample_ID', subset_row_count, 'equal')", "_____no_output_____" ] ], [ [ "How many lines for each sample ID are in our subset df?", "_____no_output_____" ] ], [ [ "subset_df['Sample_ID'].value_counts().sort_index()", "_____no_output_____" ] ], [ [ "How do the proportions of cells in the original and subset dfs compare?", "_____no_output_____" ] ], [ [ "df['Sample_ID'].value_counts().sort_index()/df.shape[0]", "_____no_output_____" ], [ "subset_df['Sample_ID'].value_counts().sort_index()/subset_df.shape[0]", "_____no_output_____" ] ], [ [ "### Get data structures to map colors to columns and rows...", "_____no_output_____" ], [ "## Row colors", "_____no_output_____" ], [ "For the row colors, we essentially just need to map the information in a given feature to the colors that correspond to that value in the right color dictionary. For example, it might be sample_3, sample_3, sample_4, , so we need the row colors to be (1, 1, 1), (1, 1, 1), (0, 0.25, 0.6). These are the initialy colors--if we are clustering rows or columns, the labels will still match the data with which they're associated.", "_____no_output_____" ] ], [ [ "row_sample_colors = subset_df.Sample_ID.map(sample_color_dict)\n\nrow_sample_colors[1:5]", "_____no_output_____" ] ], [ [ "## Column rows", "_____no_output_____" ], [ "For column rows, matching up the information in each column with the appropriate color is more difficult. ", "_____no_output_____" ] ], [ [ "# Here, we want to translate marker columns to their corresponding channel information,\n# and then match that up with the right color, as with row columns\n\n# First, we merge the (L) non-intensity column values, transformed into a dataframe,\n# with the metadata df (R), matching on the \"0\" column present in the L,\n# which is the only column in there, with the \"full_column\" (aka df header name)\n# column in the R, only including all cases where there is a match and any unmatched\n# L cases ('both' [?] would be only cases where ther is is a match, and 'right' would\n# be cases with a match and any unmatched R columns).\ncolumn_channel_colors = pd.merge(pd.DataFrame(pd.Series(\n subset_df.loc[:,~subset_df.columns.isin(not_intensities)].columns.values)), \n metadata, how = 'left',\n left_on = 0, right_on = 'full_column'\n # From that resulting df, extract the '0' and 'Channel' objects,\n # then only 'Channel', then map to the right colors\n )[[0,'Channel']]['Channel'].map(channel_color_dict)\n\n# Set the index to be the names of the colors. There is only one column, and that is the corresponding\n# colors\ncolumn_channel_colors.index = subset_df.loc[:,~subset_df.columns.isin(not_intensities)].columns.values\n\ncolumn_channel_colors.head()", "_____no_output_____" ], [ "# Here, we want to translate marker columns to their corresponding round information,\n# and then match that up with the right color, as with row columns\n\n# First, we merge the (L) non-intensity column values, transformed into a dataframe,\n# with the metadata df (R), matching on the \"0\" column present in the L,\n# which is the only column in there, with the \"full_column\" (aka df header name)\n# column in the R, only including all cases where there is a match and any unmatched\n# L cases ('both' [?] would be only cases where ther is is a match, and 'right' would\n# be cases with a match and any unmatched R columns).\ncolumn_round_colors = pd.merge(pd.DataFrame(pd.Series(\n subset_df.loc[:,~subset_df.columns.isin(not_intensities)].columns.values)), \n metadata, how = 'left',\n left_on = 0, right_on = 'full_column'\n # From that resulting df, extract the '0' and 'Channel' objects,\n # then only 'Channel', then map to the right colors\n )[[0,'Round']]['Round'].map(round_color_dict)\n\n# Set the index to be the names of the colors. There is only one column, and that is the corresponding\n# colors\ncolumn_round_colors.index = subset_df.loc[:,~subset_df.columns.isin(not_intensities)].columns.values\n\ncolumn_round_colors.head()", "_____no_output_____" ] ], [ [ "### Annotations data structure", "_____no_output_____" ] ], [ [ "# Create data structure to hold everything we need for row/column annotations\n# annotations is a dictionary\n## IMPORTANT - if you use 'annotations', it MUST have both 'rows' and 'cols'\n## objects inside. These can be empty lists, but they must be there!\nannotations = {}\n\n# create a data structure to hold everything we need for only row annotations\n# row_annotations is a list, where each item therein is a dictioary corresponding\n# to all of the data pertaining to that particular annotation\n# Adding each item (e.g., Sample, then Cluster), one at a time to ensure ordering\n# is as anticipated on figure\nrow_annotations = []\nrow_annotations.append({'label':'Sample','type':'row','mapping':row_sample_colors,'dict':sample_color_dict,\n 'location':'center left','bbox_to_anchor':(0, 0.5)})\n# Add all row information into the annotations dictionary\nannotations['rows'] = row_annotations\n\n\n# Now we repeat the process for column annotations\ncol_annotations = []\ncol_annotations.append({'label':'Round','type':'column','mapping':column_round_colors,'dict':round_color_dict,\n 'location':'upper right','bbox_to_anchor':(1,0.50)})\n\ncol_annotations.append({'label':'Column','type':'column','mapping':column_channel_colors,'dict':channel_color_dict,\n 'location':'upper right','bbox_to_anchor':(1,0.75)})\nannotations['cols'] = col_annotations", "_____no_output_____" ] ], [ [ "#### Actually plot the heatmap", "_____no_output_____" ] ], [ [ "heatmap_function(\n data = subset_df.loc[:,~subset_df.columns.isin(not_intensities)],\n title = \"Initial dataframe\",\n # define method, metric, and color map\n method = 'ward', metric = 'euclidean',cmap = 'coolwarm',\n # colorbar info (legend coloring of main plot) \n cbar_kws = {'label':'Intens.'},\n # xticklabels - want to have the nicknames instead of full names,\n # so we translate from full to short names; we also only want to include\n # non_intensity columns, to match the data we fed into under 'data'\n xticklabels = [full_to_short_names[name] for name in \n subset_df.loc[:,\n ~subset_df.columns.isin(not_intensities)].columns.values],\n # where to save the df\n save_loc = output_images_dir,\n # how to cluster on rows and columns\n row_cluster = True, col_cluster = True,\n # provide the dictionary of row and column coloring information\n # and legend information, as established above.\n annotations = annotations\n )", "_____no_output_____" ] ], [ [ "### Bar plot of count of all cells in all samples - no filtering yet", "_____no_output_____" ] ], [ [ "# Get counts for each Sample_ID, sorted by Sample_ID\ncounts = pd.DataFrame(df.Sample_ID.value_counts()).sort_index()\n\n# rename Sample_ID to counts\ncounts = counts.rename(columns = {'Sample_ID':'counts'})\n# add Sample_ID back in, as what's currently the index\ncounts['Sample_ID'] = counts.index\n# add 'color', which is derived from the row's Sample_ID fed into the right\n# color dictionary\ncounts['color'] = counts.apply(lambda row: sample_color_dict[row['Sample_ID']], axis = 1)\ncounts.head()", "_____no_output_____" ], [ "ls_samples", "_____no_output_____" ], [ "# By sample ID only\n\n# establish figure\nfig = go.Figure()\ntitle = 'Initial Cell counts by Sample ID'\n\n# Changing the ordering of the bars is a easy as iterating through a list\n# with the samples in a different order! For example, this order below:\n#for sample in ['TMA', 'GZ7.2', 'GZ10.3', 'GZ7.1', 'GZ10.2', 'GZ10.1', 'GZ6']:\nfor sample in ls_samples:\n # add trace for each sample\n fig.add_trace(go.Bar(\n x=counts.loc[counts['Sample_ID']==sample,'Sample_ID'], \n y = counts.loc[counts['Sample_ID']==sample,'counts'],\n text = counts.loc[counts['Sample_ID']==sample,'counts'], \n textposition='outside',\n marker=dict(\n color='rgb' + str(sample_color_dict[sample])),\n showlegend = False\n \n ))\n \n# update aesthetic parameters\nfig.update_layout(title = title, plot_bgcolor = 'white')\nfig.update_xaxes(title_text = \"Sample ID\", linecolor = 'black')\nfig.update_yaxes(title_text = \"Cell count\", linecolor = 'black')\n\n# Display plot\n#plot(fig)\nfilename = os.path.join(output_images_dir, title.replace(\" \",\"_\") + \".png\")\nfig.write_image(filename)\n", "_____no_output_____" ] ], [ [ "## PCA", "_____no_output_____" ], [ "This is how you might save data for the PCA, if you'd like to.", "_____no_output_____" ] ], [ [ "## for PCA\nfilename = \"[filename]_PCA_test.csv\"\ndf.to_csv(filename, index = False)\n", "_____no_output_____" ] ], [ [ "### Drop any other rows or columns we want to before saving data", "_____no_output_____" ] ], [ [ "# Let's take a look\ndf.columns.values", "_____no_output_____" ] ], [ [ "For the sake of example, I will operate on a copy of df, called df_copy", "_____no_output_____" ] ], [ [ "# You MUST do df.copy()\n# 'df_copy = df' would essentially \n# give you two different names for the\n# SAME dataframe, so operating on one\n# would also operate on the other\ndf_copy = df.copy()", "_____no_output_____" ] ], [ [ "#### Operate on entire rows or columns", "_____no_output_____" ] ], [ [ "# Drop columns\nmy_cols = []\ndf_copy = df_copy.drop(columns = my_cols)", "_____no_output_____" ], [ "# Keep only specific columns (explained below)\nmy_cols = []\nmy_cols = df.columns.values\ndf_copy = df_copy.loc[:,my_cols]\n", "_____no_output_____" ] ], [ [ "#### Operate on rows and columns using filtering criteria", "_____no_output_____" ] ], [ [ "# Keep only certain rows based off of criteria\n\n# use df.loc[] to filter\n# df.loc[rows,columns]\n# df.loc[:,certain_cols] --> keep all rows ':', only certain cols\n# df.loc[certain_rows,:] --> keep only certain row, all cols ':'\n\n# Say we only want certain values for Sample_ID\nprint(df_copy.Sample_ID.unique())\n#keep = ['TMA1.1','TMA1.2','TMA1.3','TMA2.1','TMA2.2','TMA2.3']\nkeep = []\ndf_copy = df_copy.loc[df_copy['Sample_ID'].isin(keep),:]\nprint(df_copy.Sample_ID.unique())", "_____no_output_____" ], [ "# Filter on multiple criteria\n# '&' or 'and'\n# '|' or 'or'\n# you MUST have parentheses around each logic expression!\ndf_copy = df_copy.loc[\n (df_copy['Sample_ID'].isin(['TMA1.1','TMA1.2','TMA1.3'])) \\\n ## backslash above used to break line for readability, but tell Python to act like it's all one line\n | (df_copy['Sample_ID'].isin(['TMA2.1','TMA2.2','TMA2.3'])) , :]\nprint(df_copy.Sample_ID.unique())", "_____no_output_____" ], [ "# Remove rows based off of certain criteria\n# note the negating tilde '~'!\n\ndf_copy = df_copy.loc[\n (~df_copy['Sample_ID'].isin(['TMA1.1','TMA1.2','TMA1.3'])) \\\n ## backslash above used to break line for readability, but tell Python to act like it's all one line\n & (~df_copy['Sample_ID'].isin(['TMA2.1','TMA2.2','TMA2.3'])),:]\nprint(df_copy.Sample_ID.unique())\n\n## include example for cell types: cancer, stroma, immune", "_____no_output_____" ] ], [ [ "### Save the data by Sample_ID", "_____no_output_____" ] ], [ [ "# Check for existence of output file first\nfor sample in ls_samples:\n filename = os.path.join(output_data_dir, sample + \"_\" + step_suffix + \".csv\")\n if os.path.exists(filename):\n print(\"File by name \"+filename+\" already exists.\")", "_____no_output_____" ], [ "# Save output files\nfor sample in ls_samples:\n df_save = df.loc[df['Sample_ID'] == sample,:]\n filename = os.path.join(output_data_dir, sample + \"_\" + step_suffix + \".csv\")\n df_save.to_csv(filename, index = True)\n", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "raw", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "raw" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ] ]
cb376f8fda41d4ee79d5593181c9ff44dc48afd8
442,818
ipynb
Jupyter Notebook
Deep Learning - Project 2/18L-1811/P2_18L-1811.ipynb
taimurzahid/FAST-Deep-Learning
cfb2417f0bed43838e5c2401105f7f03552c6734
[ "MIT" ]
null
null
null
Deep Learning - Project 2/18L-1811/P2_18L-1811.ipynb
taimurzahid/FAST-Deep-Learning
cfb2417f0bed43838e5c2401105f7f03552c6734
[ "MIT" ]
null
null
null
Deep Learning - Project 2/18L-1811/P2_18L-1811.ipynb
taimurzahid/FAST-Deep-Learning
cfb2417f0bed43838e5c2401105f7f03552c6734
[ "MIT" ]
null
null
null
120.330978
22,564
0.775097
[ [ [ "# KALEEM WAHEED 18L-1811 Project 2", "_____no_output_____" ], [ "### Import Libraries", "_____no_output_____" ] ], [ [ "import os\nimport cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport random\nimport tensorflow as tf\nimport keras\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Flatten\nfrom keras.layers import Conv2D, MaxPooling2D\nfrom keras import backend as K\nfrom keras.optimizers import SGD\nfrom keras import regularizers\nfrom keras.layers.normalization import BatchNormalization\nfrom keras import optimizers", "C:\\Users\\Sh-Ma\\Anaconda3\\lib\\site-packages\\h5py\\__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.\n from ._conv import register_converters as _register_converters\nUsing TensorFlow backend.\n" ] ], [ [ "# Enable Intellisense", "_____no_output_____" ] ], [ [ "%config IPCompleter.greedy=True", "_____no_output_____" ] ], [ [ "## Global Variable ", "_____no_output_____" ] ], [ [ "data = []\nlabels = []\nim_width = 64\nim_height = 64\nnum_classes = 7", "_____no_output_____" ] ], [ [ "# PreProcessing Data", "_____no_output_____" ], [ "### Generate new images Handle Class Balance Issue \n#### Now each class have 1833 images \n#### Remove noise/Irrelevent Images ", "_____no_output_____" ] ], [ [ "for i in range(7):\n path=os.getcwd()+\"/Project2Data/\"+str(i+1)+'/'\n print(path)\n progress = 0\n image_files = [f for f in os.listdir(path) if os.path.isfile(os.path.join(path,f))]\n for file_name in image_files:\n image_file = str(path + file_name)\n img = cv2.imread(image_file,cv2.IMREAD_GRAYSCALE)\n new_img = cv2.resize(img,(im_width,im_height))\n data.append(new_img)\n progress = progress+1\n# print(int(path[-2]))\n labels.append(int(path[-2])-1)\n if progress%100==0:\n print('Progress '+str(progress)+' Image done of Disease type:' + path[-2])", "F:\\University\\Semester1\\Deep Learning\\Project\\Project 2\\Code/Project2Data/1/\nProgress 100 Image done of Disease type:1\nProgress 200 Image done of Disease type:1\nProgress 300 Image done of Disease type:1\nProgress 400 Image done of Disease type:1\nProgress 500 Image done of Disease type:1\nProgress 600 Image done of Disease type:1\nProgress 700 Image done of Disease type:1\nProgress 800 Image done of Disease type:1\nProgress 900 Image done of Disease type:1\nProgress 1000 Image done of Disease type:1\nProgress 1100 Image done of Disease type:1\nProgress 1200 Image done of Disease type:1\nProgress 1300 Image done of Disease type:1\nProgress 1400 Image done of Disease type:1\nProgress 1500 Image done of Disease type:1\nProgress 1600 Image done of Disease type:1\nProgress 1700 Image done of Disease type:1\nProgress 1800 Image done of Disease type:1\nF:\\University\\Semester1\\Deep Learning\\Project\\Project 2\\Code/Project2Data/2/\nProgress 100 Image done of Disease type:2\nProgress 200 Image done of Disease type:2\nProgress 300 Image done of Disease type:2\nProgress 400 Image done of Disease type:2\nProgress 500 Image done of Disease type:2\nProgress 600 Image done of Disease type:2\nProgress 700 Image done of Disease type:2\nProgress 800 Image done of Disease type:2\nProgress 900 Image done of Disease type:2\nProgress 1000 Image done of Disease type:2\nProgress 1100 Image done of Disease type:2\nProgress 1200 Image done of Disease type:2\nProgress 1300 Image done of Disease type:2\nProgress 1400 Image done of Disease type:2\nProgress 1500 Image done of Disease type:2\nProgress 1600 Image done of Disease type:2\nProgress 1700 Image done of Disease type:2\nProgress 1800 Image done of Disease type:2\nF:\\University\\Semester1\\Deep Learning\\Project\\Project 2\\Code/Project2Data/3/\nProgress 100 Image done of Disease type:3\nProgress 200 Image done of Disease type:3\nProgress 300 Image done of Disease type:3\nProgress 400 Image done of Disease type:3\nProgress 500 Image done of Disease type:3\nProgress 600 Image done of Disease type:3\nProgress 700 Image done of Disease type:3\nProgress 800 Image done of Disease type:3\nProgress 900 Image done of Disease type:3\nProgress 1000 Image done of Disease type:3\nProgress 1100 Image done of Disease type:3\nProgress 1200 Image done of Disease type:3\nProgress 1300 Image done of Disease type:3\nProgress 1400 Image done of Disease type:3\nProgress 1500 Image done of Disease type:3\nProgress 1600 Image done of Disease type:3\nProgress 1700 Image done of Disease type:3\nProgress 1800 Image done of Disease type:3\nF:\\University\\Semester1\\Deep Learning\\Project\\Project 2\\Code/Project2Data/4/\nProgress 100 Image done of Disease type:4\nProgress 200 Image done of Disease type:4\nProgress 300 Image done of Disease type:4\nProgress 400 Image done of Disease type:4\nProgress 500 Image done of Disease type:4\nProgress 600 Image done of Disease type:4\nProgress 700 Image done of Disease type:4\nProgress 800 Image done of Disease type:4\nProgress 900 Image done of Disease type:4\nProgress 1000 Image done of Disease type:4\nProgress 1100 Image done of Disease type:4\nProgress 1200 Image done of Disease type:4\nProgress 1300 Image done of Disease type:4\nProgress 1400 Image done of Disease type:4\nProgress 1500 Image done of Disease type:4\nProgress 1600 Image done of Disease type:4\nProgress 1700 Image done of Disease type:4\nProgress 1800 Image done of Disease type:4\nF:\\University\\Semester1\\Deep Learning\\Project\\Project 2\\Code/Project2Data/5/\nProgress 100 Image done of Disease type:5\nProgress 200 Image done of Disease type:5\nProgress 300 Image done of Disease type:5\nProgress 400 Image done of Disease type:5\nProgress 500 Image done of Disease type:5\nProgress 600 Image done of Disease type:5\nProgress 700 Image done of Disease type:5\nProgress 800 Image done of Disease type:5\nProgress 900 Image done of Disease type:5\nProgress 1000 Image done of Disease type:5\nProgress 1100 Image done of Disease type:5\nProgress 1200 Image done of Disease type:5\nProgress 1300 Image done of Disease type:5\nProgress 1400 Image done of Disease type:5\nProgress 1500 Image done of Disease type:5\nProgress 1600 Image done of Disease type:5\nProgress 1700 Image done of Disease type:5\nProgress 1800 Image done of Disease type:5\nF:\\University\\Semester1\\Deep Learning\\Project\\Project 2\\Code/Project2Data/6/\nProgress 100 Image done of Disease type:6\nProgress 200 Image done of Disease type:6\nProgress 300 Image done of Disease type:6\nProgress 400 Image done of Disease type:6\nProgress 500 Image done of Disease type:6\nProgress 600 Image done of Disease type:6\nProgress 700 Image done of Disease type:6\nProgress 800 Image done of Disease type:6\nProgress 900 Image done of Disease type:6\nProgress 1000 Image done of Disease type:6\nProgress 1100 Image done of Disease type:6\nProgress 1200 Image done of Disease type:6\nProgress 1300 Image done of Disease type:6\nProgress 1400 Image done of Disease type:6\nProgress 1500 Image done of Disease type:6\nProgress 1600 Image done of Disease type:6\nProgress 1700 Image done of Disease type:6\nProgress 1800 Image done of Disease type:6\nF:\\University\\Semester1\\Deep Learning\\Project\\Project 2\\Code/Project2Data/7/\nProgress 100 Image done of Disease type:7\nProgress 200 Image done of Disease type:7\nProgress 300 Image done of Disease type:7\nProgress 400 Image done of Disease type:7\nProgress 500 Image done of Disease type:7\nProgress 600 Image done of Disease type:7\nProgress 700 Image done of Disease type:7\nProgress 800 Image done of Disease type:7\nProgress 900 Image done of Disease type:7\nProgress 1000 Image done of Disease type:7\nProgress 1100 Image done of Disease type:7\nProgress 1200 Image done of Disease type:7\nProgress 1300 Image done of Disease type:7\nProgress 1400 Image done of Disease type:7\nProgress 1500 Image done of Disease type:7\nProgress 1600 Image done of Disease type:7\nProgress 1700 Image done of Disease type:7\nProgress 1800 Image done of Disease type:7\n" ], [ "data = np.array(data)\nprint(data.shape)", "(12831, 64, 64)\n" ], [ "labels = np.array(labels)\nlabels.shape", "_____no_output_____" ], [ "data = data.reshape((data.shape)[0],(data.shape)[1],(data.shape)[2],1)\ndata.shape", "_____no_output_____" ], [ "from keras.utils import np_utils", "_____no_output_____" ], [ "labels.astype('uint8')\nlabels = keras.utils.to_categorical(labels, num_classes)", "_____no_output_____" ] ], [ [ "# Shuffle Data", "_____no_output_____" ] ], [ [ "def shuffle(a, b):\n rng_state = np.random.get_state()\n np.random.shuffle(a)\n np.random.set_state(rng_state)\n np.random.shuffle(b)\n\n", "_____no_output_____" ], [ "for i in range(10):\n shuffle(data,labels)", "_____no_output_____" ] ], [ [ "### Building Convolutional Neural Network Model 1\n#### batch size 100 , epoch 50 , Adam optimizer Default Learning Rate", "_____no_output_____" ] ], [ [ "model = Sequential()\nmodel.add(Conv2D(kernel_size=(3,3),filters=64,input_shape=(64,64,1),activation=\"relu\",padding=\"valid\"))\nmodel.add(Conv2D(kernel_size=(3,3),filters=64,activation=\"relu\",padding=\"same\"))\nmodel.add(MaxPooling2D(pool_size=(2,2),strides=(2,2)))\nmodel.add(Conv2D(kernel_size=(3,3),filters=32,activation=\"relu\",padding=\"same\"))\nmodel.add(Conv2D(kernel_size=(2,2),filters=32,activation=\"relu\",padding=\"same\"))\nmodel.add(MaxPooling2D(pool_size=(3,3),strides=(2,2)))\nmodel.add(Conv2D(kernel_size=(2,2),strides=(2,2),filters=64))\nmodel.add(Flatten())\nmodel.add(Dropout(0.8))\nmodel.add(Dense(128,activation=\"relu\"))\nmodel.add(Dense(7,activation=\"softmax\"))\nmodel.summary()\nmodel.compile(optimizer = 'Adam', loss = 'categorical_crossentropy', metrics = ['accuracy'])\n\n\n\n\n", "_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\nconv2d_6 (Conv2D) (None, 62, 62, 64) 640 \n_________________________________________________________________\nconv2d_7 (Conv2D) (None, 62, 62, 64) 36928 \n_________________________________________________________________\nmax_pooling2d_3 (MaxPooling2 (None, 31, 31, 64) 0 \n_________________________________________________________________\nconv2d_8 (Conv2D) (None, 31, 31, 32) 18464 \n_________________________________________________________________\nconv2d_9 (Conv2D) (None, 31, 31, 32) 4128 \n_________________________________________________________________\nmax_pooling2d_4 (MaxPooling2 (None, 15, 15, 32) 0 \n_________________________________________________________________\nconv2d_10 (Conv2D) (None, 7, 7, 64) 8256 \n_________________________________________________________________\nflatten_2 (Flatten) (None, 3136) 0 \n_________________________________________________________________\ndropout_2 (Dropout) (None, 3136) 0 \n_________________________________________________________________\ndense_3 (Dense) (None, 128) 401536 \n_________________________________________________________________\ndense_4 (Dense) (None, 7) 903 \n=================================================================\nTotal params: 470,855\nTrainable params: 470,855\nNon-trainable params: 0\n_________________________________________________________________\n" ] ], [ [ "# Fit model", "_____no_output_____" ] ], [ [ "history = model.fit(data, labels,\n batch_size=100,\n epochs=50,\n verbose=1, shuffle = True,validation_split=0.30)", "Train on 8981 samples, validate on 3850 samples\nEpoch 1/50\n8981/8981 [==============================] - 66s 7ms/step - loss: 2.7555 - acc: 0.1953 - val_loss: 1.8197 - val_acc: 0.3044\nEpoch 2/50\n8981/8981 [==============================] - 53s 6ms/step - loss: 1.7289 - acc: 0.3274 - val_loss: 1.5590 - val_acc: 0.4556\nEpoch 3/50\n8981/8981 [==============================] - 53s 6ms/step - loss: 1.5329 - acc: 0.4160 - val_loss: 1.3850 - val_acc: 0.5055\nEpoch 4/50\n8981/8981 [==============================] - 54s 6ms/step - loss: 1.3893 - acc: 0.4766 - val_loss: 1.2583 - val_acc: 0.5423\nEpoch 5/50\n8981/8981 [==============================] - 53s 6ms/step - loss: 1.2859 - acc: 0.5222 - val_loss: 1.1353 - val_acc: 0.5909\nEpoch 6/50\n8981/8981 [==============================] - 53s 6ms/step - loss: 1.2267 - acc: 0.5488 - val_loss: 1.0711 - val_acc: 0.6151\nEpoch 7/50\n8981/8981 [==============================] - 53s 6ms/step - loss: 1.1394 - acc: 0.5732 - val_loss: 1.0313 - val_acc: 0.6301\nEpoch 8/50\n8981/8981 [==============================] - 53s 6ms/step - loss: 1.0666 - acc: 0.6041 - val_loss: 1.0017 - val_acc: 0.6278\nEpoch 9/50\n8981/8981 [==============================] - 53s 6ms/step - loss: 1.0368 - acc: 0.6210 - val_loss: 0.9361 - val_acc: 0.6621\nEpoch 10/50\n8981/8981 [==============================] - 53s 6ms/step - loss: 0.9730 - acc: 0.6385 - val_loss: 0.8549 - val_acc: 0.6912\nEpoch 11/50\n8981/8981 [==============================] - 53s 6ms/step - loss: 0.9278 - acc: 0.6572 - val_loss: 0.8204 - val_acc: 0.7023\nEpoch 12/50\n8981/8981 [==============================] - 53s 6ms/step - loss: 0.8704 - acc: 0.6770 - val_loss: 0.7890 - val_acc: 0.7153\nEpoch 13/50\n8981/8981 [==============================] - 53s 6ms/step - loss: 0.8403 - acc: 0.6980 - val_loss: 0.7618 - val_acc: 0.7239\nEpoch 14/50\n8981/8981 [==============================] - 54s 6ms/step - loss: 0.8156 - acc: 0.6984 - val_loss: 0.7649 - val_acc: 0.7208\nEpoch 15/50\n8981/8981 [==============================] - 54s 6ms/step - loss: 0.7821 - acc: 0.7135 - val_loss: 0.7092 - val_acc: 0.7366\nEpoch 16/50\n8981/8981 [==============================] - 53s 6ms/step - loss: 0.7492 - acc: 0.7204 - val_loss: 0.7082 - val_acc: 0.7390\nEpoch 17/50\n8981/8981 [==============================] - 53s 6ms/step - loss: 0.7300 - acc: 0.7348 - val_loss: 0.7056 - val_acc: 0.7353\nEpoch 18/50\n8981/8981 [==============================] - 53s 6ms/step - loss: 0.6954 - acc: 0.7402 - val_loss: 0.6622 - val_acc: 0.7634\nEpoch 19/50\n8981/8981 [==============================] - 55s 6ms/step - loss: 0.6584 - acc: 0.7596 - val_loss: 0.6705 - val_acc: 0.7545\nEpoch 20/50\n8981/8981 [==============================] - 53s 6ms/step - loss: 0.6423 - acc: 0.7657 - val_loss: 0.6463 - val_acc: 0.7626\nEpoch 21/50\n8981/8981 [==============================] - 53s 6ms/step - loss: 0.6064 - acc: 0.7736 - val_loss: 0.6938 - val_acc: 0.7494\nEpoch 22/50\n8981/8981 [==============================] - 54s 6ms/step - loss: 0.6345 - acc: 0.7708 - val_loss: 0.6253 - val_acc: 0.7751\nEpoch 23/50\n8981/8981 [==============================] - 53s 6ms/step - loss: 0.5670 - acc: 0.7933 - val_loss: 0.6660 - val_acc: 0.7540\nEpoch 24/50\n8981/8981 [==============================] - 56s 6ms/step - loss: 0.5788 - acc: 0.7876 - val_loss: 0.5964 - val_acc: 0.7966\nEpoch 25/50\n8981/8981 [==============================] - 53s 6ms/step - loss: 0.5559 - acc: 0.7997 - val_loss: 0.5570 - val_acc: 0.8047\nEpoch 26/50\n8981/8981 [==============================] - 53s 6ms/step - loss: 0.5237 - acc: 0.8075 - val_loss: 0.5884 - val_acc: 0.7914\nEpoch 27/50\n8981/8981 [==============================] - 55s 6ms/step - loss: 0.5101 - acc: 0.8161 - val_loss: 0.5359 - val_acc: 0.8148\nEpoch 28/50\n8981/8981 [==============================] - 56s 6ms/step - loss: 0.4797 - acc: 0.8240 - val_loss: 0.5440 - val_acc: 0.8112\nEpoch 29/50\n8981/8981 [==============================] - 53s 6ms/step - loss: 0.5041 - acc: 0.8176 - val_loss: 0.5366 - val_acc: 0.8166\nEpoch 30/50\n8981/8981 [==============================] - 53s 6ms/step - loss: 0.4668 - acc: 0.8304 - val_loss: 0.5468 - val_acc: 0.8166\nEpoch 31/50\n8981/8981 [==============================] - 53s 6ms/step - loss: 0.4457 - acc: 0.8363 - val_loss: 0.5204 - val_acc: 0.8145\nEpoch 32/50\n8981/8981 [==============================] - 53s 6ms/step - loss: 0.4089 - acc: 0.8506 - val_loss: 0.4967 - val_acc: 0.8288\nEpoch 33/50\n8981/8981 [==============================] - 53s 6ms/step - loss: 0.4358 - acc: 0.8434 - val_loss: 0.4820 - val_acc: 0.8343\nEpoch 34/50\n8981/8981 [==============================] - 55s 6ms/step - loss: 0.4315 - acc: 0.8412 - val_loss: 0.4937 - val_acc: 0.8327\nEpoch 35/50\n8981/8981 [==============================] - 57s 6ms/step - loss: 0.4050 - acc: 0.8515 - val_loss: 0.4897 - val_acc: 0.8340\nEpoch 36/50\n8981/8981 [==============================] - 54s 6ms/step - loss: 0.3921 - acc: 0.8571 - val_loss: 0.5096 - val_acc: 0.8327\nEpoch 37/50\n8981/8981 [==============================] - 53s 6ms/step - loss: 0.3872 - acc: 0.8601 - val_loss: 0.4845 - val_acc: 0.8353\nEpoch 38/50\n8981/8981 [==============================] - 53s 6ms/step - loss: 0.3727 - acc: 0.8619 - val_loss: 0.4550 - val_acc: 0.8475\nEpoch 39/50\n8981/8981 [==============================] - 53s 6ms/step - loss: 0.3553 - acc: 0.8693 - val_loss: 0.5131 - val_acc: 0.8208\nEpoch 40/50\n8981/8981 [==============================] - 53s 6ms/step - loss: 0.3852 - acc: 0.8573 - val_loss: 0.4719 - val_acc: 0.8392\nEpoch 41/50\n8981/8981 [==============================] - 54s 6ms/step - loss: 0.3495 - acc: 0.8748 - val_loss: 0.4668 - val_acc: 0.8514\nEpoch 42/50\n8981/8981 [==============================] - 53s 6ms/step - loss: 0.3323 - acc: 0.8813 - val_loss: 0.4533 - val_acc: 0.8494\nEpoch 43/50\n8981/8981 [==============================] - 53s 6ms/step - loss: 0.3411 - acc: 0.8810 - val_loss: 0.4456 - val_acc: 0.8556\nEpoch 44/50\n8981/8981 [==============================] - 53s 6ms/step - loss: 0.3072 - acc: 0.8892 - val_loss: 0.4621 - val_acc: 0.8483\nEpoch 45/50\n8981/8981 [==============================] - 52s 6ms/step - loss: 0.2953 - acc: 0.8971 - val_loss: 0.4521 - val_acc: 0.8506\nEpoch 46/50\n8981/8981 [==============================] - 53s 6ms/step - loss: 0.3194 - acc: 0.8861 - val_loss: 0.4352 - val_acc: 0.8600\nEpoch 47/50\n8981/8981 [==============================] - 53s 6ms/step - loss: 0.3360 - acc: 0.8814 - val_loss: 0.4372 - val_acc: 0.8535\nEpoch 48/50\n8981/8981 [==============================] - 53s 6ms/step - loss: 0.2952 - acc: 0.8948 - val_loss: 0.4888 - val_acc: 0.8408\nEpoch 49/50\n8981/8981 [==============================] - 53s 6ms/step - loss: 0.3187 - acc: 0.8895 - val_loss: 0.4210 - val_acc: 0.8629\nEpoch 50/50\n8981/8981 [==============================] - 53s 6ms/step - loss: 0.2882 - acc: 0.8980 - val_loss: 0.4168 - val_acc: 0.8634\n" ], [ "plt.plot(history.history['loss'])\nplt.plot(history.history['val_loss'])\nplt.title('model loss')\nplt.ylabel('loss')\nplt.xlabel('epoch')\nplt.legend(['train', 'test'], loc='upper left')\nplt.show()", "_____no_output_____" ], [ "#accuracy\nplt.plot(history.history['acc'])\nplt.plot(history.history['val_acc'])\nplt.title('model accuracy')\nplt.ylabel('accuracy')\nplt.xlabel('epoch')\nplt.legend(['train', 'test'], loc='upper left')\nplt.show()\n#loss\nplt.plot(history.history['loss'])\nplt.plot(history.history['val_loss'])\nplt.title('model loss')\nplt.ylabel('loss')\nplt.xlabel('epoch')\nplt.legend(['train', 'test'], loc='upper left')\nplt.show()", "_____no_output_____" ], [ "(loss, accuracy) = model.evaluate(data, labels, batch_size=batchsize, verbose=1)\n", "12831/12831 [==============================] - 24s 2ms/step\n" ] ], [ [ "## Building Convolutional Neural Network Model Architecture 2\n#### use loss = 'categorical_crossentropy' batch size 100 , epoch 60 ,Optimizer Adam Default Learning Rate", "_____no_output_____" ] ], [ [ "\nmodel = Sequential()\nmodel.add(Conv2D(kernel_size=(3,3),filters=64,input_shape=(64,64,1),activation=\"relu\",padding=\"valid\"))\nmodel.add(Conv2D(kernel_size=(3,3),filters=64,activation=\"relu\",padding=\"same\"))\nmodel.add(MaxPooling2D(pool_size=(2,2),strides=(2,2)))\nmodel.add(Conv2D(kernel_size=(3,3),filters=32,activation=\"relu\",padding=\"same\"))\nmodel.add(Conv2D(kernel_size=(2,2),filters=32,activation=\"relu\",padding=\"same\"))\nmodel.add(MaxPooling2D(pool_size=(3,3),strides=(2,2)))\nmodel.add(Conv2D(kernel_size=(2,2),strides=(2,2),filters=64))\nmodel.add(Flatten())\nmodel.add(Dropout(0.8))\nmodel.add(Dense(128,activation=\"relu\"))\nmodel.add(Dense(7,activation=\"softmax\"))\nmodel.summary()\nmodel.compile(optimizer = 'Adam', loss = 'categorical_crossentropy', metrics = ['accuracy'])", "_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\nconv2d_11 (Conv2D) (None, 62, 62, 64) 640 \n_________________________________________________________________\nconv2d_12 (Conv2D) (None, 62, 62, 64) 36928 \n_________________________________________________________________\nmax_pooling2d_5 (MaxPooling2 (None, 31, 31, 64) 0 \n_________________________________________________________________\nconv2d_13 (Conv2D) (None, 31, 31, 32) 18464 \n_________________________________________________________________\nconv2d_14 (Conv2D) (None, 31, 31, 32) 4128 \n_________________________________________________________________\nmax_pooling2d_6 (MaxPooling2 (None, 15, 15, 32) 0 \n_________________________________________________________________\nconv2d_15 (Conv2D) (None, 7, 7, 64) 8256 \n_________________________________________________________________\nflatten_3 (Flatten) (None, 3136) 0 \n_________________________________________________________________\ndropout_3 (Dropout) (None, 3136) 0 \n_________________________________________________________________\ndense_5 (Dense) (None, 128) 401536 \n_________________________________________________________________\ndense_6 (Dense) (None, 7) 903 \n=================================================================\nTotal params: 470,855\nTrainable params: 470,855\nNon-trainable params: 0\n_________________________________________________________________\n" ], [ "batchsize=100\nepochs=60", "_____no_output_____" ], [ "history = model.fit(data, labels,\n batch_size=batchsize,\n epochs=epochs,\n verbose=1, shuffle = True,validation_split=0.30)", "Train on 8981 samples, validate on 3850 samples\nEpoch 1/60\n8981/8981 [==============================] - 67s 7ms/step - loss: 2.3655 - acc: 0.2044 - val_loss: 1.8000 - val_acc: 0.3249\nEpoch 2/60\n8981/8981 [==============================] - 54s 6ms/step - loss: 1.6596 - acc: 0.3648 - val_loss: 1.4690 - val_acc: 0.4306\nEpoch 3/60\n8981/8981 [==============================] - 55s 6ms/step - loss: 1.4435 - acc: 0.4594 - val_loss: 1.3206 - val_acc: 0.5127\nEpoch 4/60\n8981/8981 [==============================] - 53s 6ms/step - loss: 1.3448 - acc: 0.4973 - val_loss: 1.2585 - val_acc: 0.5332\nEpoch 5/60\n8981/8981 [==============================] - 54s 6ms/step - loss: 1.2389 - acc: 0.5366 - val_loss: 1.1519 - val_acc: 0.5748\nEpoch 6/60\n8981/8981 [==============================] - 54s 6ms/step - loss: 1.1585 - acc: 0.5703 - val_loss: 1.1456 - val_acc: 0.5673\nEpoch 7/60\n8981/8981 [==============================] - 53s 6ms/step - loss: 1.1022 - acc: 0.5872 - val_loss: 1.0020 - val_acc: 0.6273\nEpoch 8/60\n8981/8981 [==============================] - 54s 6ms/step - loss: 1.0064 - acc: 0.6245 - val_loss: 0.9537 - val_acc: 0.6449\nEpoch 9/60\n8981/8981 [==============================] - 54s 6ms/step - loss: 0.9525 - acc: 0.6421 - val_loss: 0.9480 - val_acc: 0.6582\nEpoch 10/60\n8981/8981 [==============================] - 54s 6ms/step - loss: 0.9067 - acc: 0.6595 - val_loss: 0.8665 - val_acc: 0.6896\nEpoch 11/60\n8981/8981 [==============================] - 53s 6ms/step - loss: 0.8713 - acc: 0.6846 - val_loss: 0.8365 - val_acc: 0.7091\nEpoch 12/60\n8981/8981 [==============================] - 54s 6ms/step - loss: 0.8358 - acc: 0.6841 - val_loss: 0.8582 - val_acc: 0.6919\nEpoch 13/60\n8981/8981 [==============================] - 54s 6ms/step - loss: 0.7803 - acc: 0.7047 - val_loss: 0.8072 - val_acc: 0.7164\nEpoch 14/60\n8981/8981 [==============================] - 54s 6ms/step - loss: 0.7502 - acc: 0.7238 - val_loss: 0.7862 - val_acc: 0.7200\nEpoch 15/60\n8981/8981 [==============================] - 54s 6ms/step - loss: 0.7108 - acc: 0.7347 - val_loss: 0.7267 - val_acc: 0.7468\nEpoch 16/60\n8981/8981 [==============================] - 54s 6ms/step - loss: 0.6742 - acc: 0.7530 - val_loss: 0.7710 - val_acc: 0.7312\nEpoch 17/60\n8981/8981 [==============================] - 53s 6ms/step - loss: 0.6565 - acc: 0.7589 - val_loss: 0.6926 - val_acc: 0.7631\nEpoch 18/60\n8981/8981 [==============================] - 54s 6ms/step - loss: 0.6521 - acc: 0.7598 - val_loss: 0.7041 - val_acc: 0.7494\nEpoch 19/60\n8981/8981 [==============================] - 54s 6ms/step - loss: 0.6024 - acc: 0.7804 - val_loss: 0.6312 - val_acc: 0.7852\nEpoch 20/60\n8981/8981 [==============================] - 53s 6ms/step - loss: 0.5951 - acc: 0.7801 - val_loss: 0.7056 - val_acc: 0.7610\nEpoch 21/60\n8981/8981 [==============================] - 53s 6ms/step - loss: 0.5838 - acc: 0.7902 - val_loss: 0.6624 - val_acc: 0.7821\nEpoch 22/60\n8981/8981 [==============================] - 54s 6ms/step - loss: 0.5725 - acc: 0.7945 - val_loss: 0.6343 - val_acc: 0.7883\nEpoch 23/60\n8981/8981 [==============================] - 54s 6ms/step - loss: 0.5532 - acc: 0.7976 - val_loss: 0.5978 - val_acc: 0.8010\nEpoch 24/60\n8981/8981 [==============================] - 54s 6ms/step - loss: 0.5495 - acc: 0.7997 - val_loss: 0.6056 - val_acc: 0.7992\nEpoch 25/60\n8981/8981 [==============================] - 55s 6ms/step - loss: 0.4950 - acc: 0.8176 - val_loss: 0.5885 - val_acc: 0.8057\nEpoch 26/60\n8981/8981 [==============================] - 54s 6ms/step - loss: 0.4826 - acc: 0.8234 - val_loss: 0.6067 - val_acc: 0.7894\nEpoch 27/60\n8981/8981 [==============================] - 53s 6ms/step - loss: 0.4421 - acc: 0.8378 - val_loss: 0.5567 - val_acc: 0.8171\nEpoch 28/60\n8981/8981 [==============================] - 54s 6ms/step - loss: 0.4328 - acc: 0.8457 - val_loss: 0.5554 - val_acc: 0.8145\nEpoch 29/60\n8981/8981 [==============================] - 54s 6ms/step - loss: 0.4219 - acc: 0.8463 - val_loss: 0.5249 - val_acc: 0.8234\nEpoch 30/60\n8981/8981 [==============================] - 54s 6ms/step - loss: 0.4393 - acc: 0.8416 - val_loss: 0.5931 - val_acc: 0.8078\nEpoch 31/60\n8981/8981 [==============================] - 54s 6ms/step - loss: 0.4185 - acc: 0.8502 - val_loss: 0.5551 - val_acc: 0.8234\nEpoch 32/60\n8981/8981 [==============================] - 53s 6ms/step - loss: 0.4062 - acc: 0.8497 - val_loss: 0.5457 - val_acc: 0.8205\nEpoch 33/60\n8981/8981 [==============================] - 54s 6ms/step - loss: 0.3708 - acc: 0.8683 - val_loss: 0.5402 - val_acc: 0.8299\nEpoch 34/60\n8981/8981 [==============================] - 54s 6ms/step - loss: 0.3752 - acc: 0.8657 - val_loss: 0.5277 - val_acc: 0.8288\nEpoch 35/60\n8981/8981 [==============================] - 54s 6ms/step - loss: 0.3991 - acc: 0.8587 - val_loss: 0.5255 - val_acc: 0.8356\nEpoch 36/60\n8981/8981 [==============================] - 53s 6ms/step - loss: 0.3499 - acc: 0.8774 - val_loss: 0.4827 - val_acc: 0.8400\nEpoch 37/60\n8981/8981 [==============================] - 54s 6ms/step - loss: 0.3522 - acc: 0.8709 - val_loss: 0.6630 - val_acc: 0.7948\nEpoch 38/60\n8981/8981 [==============================] - 54s 6ms/step - loss: 0.3652 - acc: 0.8663 - val_loss: 0.5471 - val_acc: 0.8216\nEpoch 39/60\n8981/8981 [==============================] - 53s 6ms/step - loss: 0.3418 - acc: 0.8763 - val_loss: 0.5145 - val_acc: 0.8361\nEpoch 40/60\n8981/8981 [==============================] - 54s 6ms/step - loss: 0.3345 - acc: 0.8792 - val_loss: 0.4886 - val_acc: 0.8457\nEpoch 41/60\n8981/8981 [==============================] - 55s 6ms/step - loss: 0.3179 - acc: 0.8870 - val_loss: 0.4728 - val_acc: 0.8478\nEpoch 42/60\n8981/8981 [==============================] - 55s 6ms/step - loss: 0.3150 - acc: 0.8856 - val_loss: 0.4991 - val_acc: 0.8483\nEpoch 43/60\n8981/8981 [==============================] - 54s 6ms/step - loss: 0.2956 - acc: 0.8969 - val_loss: 0.5077 - val_acc: 0.8460\nEpoch 44/60\n8981/8981 [==============================] - 54s 6ms/step - loss: 0.2865 - acc: 0.8971 - val_loss: 0.5084 - val_acc: 0.8488\nEpoch 45/60\n8981/8981 [==============================] - 54s 6ms/step - loss: 0.2753 - acc: 0.9034 - val_loss: 0.5047 - val_acc: 0.8434\nEpoch 46/60\n8981/8981 [==============================] - 54s 6ms/step - loss: 0.3118 - acc: 0.8899 - val_loss: 0.4915 - val_acc: 0.8481\nEpoch 47/60\n8981/8981 [==============================] - 56s 6ms/step - loss: 0.2814 - acc: 0.8957 - val_loss: 0.5066 - val_acc: 0.8356\nEpoch 48/60\n8981/8981 [==============================] - 54s 6ms/step - loss: 0.2784 - acc: 0.9009 - val_loss: 0.4467 - val_acc: 0.8636\nEpoch 49/60\n8981/8981 [==============================] - 54s 6ms/step - loss: 0.2823 - acc: 0.8978 - val_loss: 0.4749 - val_acc: 0.8558\nEpoch 50/60\n8981/8981 [==============================] - 53s 6ms/step - loss: 0.2640 - acc: 0.9056 - val_loss: 0.4956 - val_acc: 0.8535\nEpoch 51/60\n8981/8981 [==============================] - 54s 6ms/step - loss: 0.2647 - acc: 0.9072 - val_loss: 0.5113 - val_acc: 0.8545\nEpoch 52/60\n8981/8981 [==============================] - 54s 6ms/step - loss: 0.2666 - acc: 0.9094 - val_loss: 0.5491 - val_acc: 0.8423\nEpoch 53/60\n8981/8981 [==============================] - 55s 6ms/step - loss: 0.2560 - acc: 0.9099 - val_loss: 0.4752 - val_acc: 0.8571\nEpoch 54/60\n8981/8981 [==============================] - 55s 6ms/step - loss: 0.2529 - acc: 0.9111 - val_loss: 0.5806 - val_acc: 0.8288\nEpoch 55/60\n8981/8981 [==============================] - 54s 6ms/step - loss: 0.2645 - acc: 0.9065 - val_loss: 0.5207 - val_acc: 0.8486\nEpoch 56/60\n8981/8981 [==============================] - 55s 6ms/step - loss: 0.2521 - acc: 0.9123 - val_loss: 0.5350 - val_acc: 0.8548\nEpoch 57/60\n8981/8981 [==============================] - 54s 6ms/step - loss: 0.2501 - acc: 0.9101 - val_loss: 0.5157 - val_acc: 0.8452\nEpoch 58/60\n8981/8981 [==============================] - 55s 6ms/step - loss: 0.2587 - acc: 0.9075 - val_loss: 0.4554 - val_acc: 0.8610\nEpoch 59/60\n8981/8981 [==============================] - 53s 6ms/step - loss: 0.2326 - acc: 0.9179 - val_loss: 0.4662 - val_acc: 0.8595\nEpoch 60/60\n8981/8981 [==============================] - 53s 6ms/step - loss: 0.2364 - acc: 0.9159 - val_loss: 0.4886 - val_acc: 0.8642\n" ], [ "(loss, accuracy) = model.evaluate(data, labels, batch_size=batchsize, verbose=1)\nprint(\"accuracy: {:.2f}%\".format(accuracy * 100))", "12831/12831 [==============================] - 25s 2ms/step\naccuracy: 95.14%\n" ], [ "#accuracy\nplt.plot(history.history['acc'])\nplt.plot(history.history['val_acc'])\nplt.title('model accuracy')\nplt.ylabel('accuracy')\nplt.xlabel('epoch')\nplt.legend(['train', 'test'], loc='upper left')\nplt.show()\n#loss\nplt.plot(history.history['loss'])\nplt.plot(history.history['val_loss'])\nplt.title('model loss')\nplt.ylabel('loss')\nplt.xlabel('epoch')\nplt.legend(['train', 'test'], loc='upper left')\nplt.show()", "_____no_output_____" ] ], [ [ "# Transfer learning on ResNet ", "_____no_output_____" ], [ "## Variable ", "_____no_output_____" ] ], [ [ "data = []\nlabels = []\nim_width = 64\nim_height = 64\nnum_classes = 7", "_____no_output_____" ] ], [ [ "# Use provided Data \n### Remove irrelevent/Noise Data", "_____no_output_____" ] ], [ [ "for i in range(7):\n path=os.getcwd()+\"/Project2DataClean/\"+str(i+1)+'/'\n print(path)\n progress = 0\n image_files = [f for f in os.listdir(path) if os.path.isfile(os.path.join(path,f))]\n for file_name in image_files:\n image_file = str(path + file_name)\n img = cv2.imread(image_file)\n new_img = cv2.resize(img,(im_width,im_height))\n data.append(new_img)\n progress = progress+1\n# print(int(path[-2]))\n labels.append(int(path[-2])-1)\n if progress%100==0:\n print('Progress '+str(progress)+' Image done of Disease type:' + path[-2])", "F:\\University\\Semester1\\Deep Learning\\Project\\Project 2\\Code/Project2DataClean/1/\nProgress 100 Image done of Disease type:1\nF:\\University\\Semester1\\Deep Learning\\Project\\Project 2\\Code/Project2DataClean/2/\nProgress 100 Image done of Disease type:2\nF:\\University\\Semester1\\Deep Learning\\Project\\Project 2\\Code/Project2DataClean/3/\nF:\\University\\Semester1\\Deep Learning\\Project\\Project 2\\Code/Project2DataClean/4/\nF:\\University\\Semester1\\Deep Learning\\Project\\Project 2\\Code/Project2DataClean/5/\nF:\\University\\Semester1\\Deep Learning\\Project\\Project 2\\Code/Project2DataClean/6/\nProgress 100 Image done of Disease type:6\nProgress 200 Image done of Disease type:6\nProgress 300 Image done of Disease type:6\nProgress 400 Image done of Disease type:6\nProgress 500 Image done of Disease type:6\nProgress 600 Image done of Disease type:6\nProgress 700 Image done of Disease type:6\nProgress 800 Image done of Disease type:6\nProgress 900 Image done of Disease type:6\nProgress 1000 Image done of Disease type:6\nProgress 1100 Image done of Disease type:6\nProgress 1200 Image done of Disease type:6\nProgress 1300 Image done of Disease type:6\nProgress 1400 Image done of Disease type:6\nProgress 1500 Image done of Disease type:6\nProgress 1600 Image done of Disease type:6\nProgress 1700 Image done of Disease type:6\nProgress 1800 Image done of Disease type:6\nF:\\University\\Semester1\\Deep Learning\\Project\\Project 2\\Code/Project2DataClean/7/\n" ] ], [ [ "# Use Channel 3", "_____no_output_____" ] ], [ [ "data = np.array(data)\nlabels = np.array(labels)\nprint(len(data))\nprint(len(labels))\nprint(data.shape)\nprint(labels.shape)", "2317\n2317\n(2317, 64, 64, 3)\n(2317,)\n" ], [ "data = data.astype('uint8')\nlabels = keras.utils.to_categorical(labels, 7)\nprint(labels.shape)", "(2317, 7)\n" ], [ "import numpy as np\nimport os\nimport time\n#from resnet50 import ResNet50\nfrom keras.preprocessing import image\nfrom keras.layers import GlobalAveragePooling2D, Dense, Dropout,Activation,Flatten\n#from imagenet_utils import preprocess_input\nfrom keras.layers import Input\nfrom keras.models import Model\nfrom keras.utils import np_utils\nfrom sklearn.utils import shuffle\nfrom sklearn.cross_validation import train_test_split", "C:\\Users\\Sh-Ma\\Anaconda3\\lib\\site-packages\\sklearn\\cross_validation.py:41: DeprecationWarning: This module was deprecated in version 0.18 in favor of the model_selection module into which all the refactored classes and functions are moved. Also note that the interface of the new CV iterators are different from that of this module. This module will be removed in 0.20.\n \"This module will be removed in 0.20.\", DeprecationWarning)\n" ], [ "#Shuffle the dataset\nx,y = shuffle(data,labels, random_state=2)", "_____no_output_____" ], [ "X_train, X_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=2)", "_____no_output_____" ] ], [ [ "# Keras ResNet50 Model 1 With Transfer learning\n### Input(shape=(64, 64, 3)),include_top=False,weights='imagenet'\n### batch_size=40, epochs=15 Dropout 0.6 and 0.4\n#### Don't want to train all model \n##### Last 5 layer retrain \n", "_____no_output_____" ] ], [ [ "image_input = Input(shape=(64, 64, 3))\nmodel = keras.applications.resnet50.ResNet50(input_tensor=image_input, include_top=False,weights='imagenet')", "C:\\Users\\Sh-Ma\\Anaconda3\\lib\\site-packages\\keras_applications\\resnet50.py:265: UserWarning: The output shape of `ResNet50(include_top=False)` has been changed since Keras 2.2.0.\n warnings.warn('The output shape of `ResNet50(include_top=False)` '\n" ], [ "last_layer = model.output", "_____no_output_____" ], [ "\n# add a global spatial average pooling layer\nx = GlobalAveragePooling2D()(last_layer)\n", "_____no_output_____" ], [ "# add fully-connected & dropout layers\nx = Dense(512, activation='relu',name='fc-1')(x)\n", "_____no_output_____" ], [ "x = Dropout(0.4)(x)\nx = Dense(128, activation='relu',name='fc-2')(x)\nx = Dropout(0.6)(x)\n# a softmax layer for 7 classes\nout = Dense(7, activation='softmax',name='output_layer')(x)", "_____no_output_____" ], [ "custom_resnet_model2 = Model(inputs=model.input, outputs=out)\n\ncustom_resnet_model2.summary()\n", "__________________________________________________________________________________________________\nLayer (type) Output Shape Param # Connected to \n==================================================================================================\ninput_9 (InputLayer) (None, 64, 64, 3) 0 \n__________________________________________________________________________________________________\nconv1_pad (ZeroPadding2D) (None, 70, 70, 3) 0 input_9[0][0] \n__________________________________________________________________________________________________\nconv1 (Conv2D) (None, 32, 32, 64) 9472 conv1_pad[0][0] \n__________________________________________________________________________________________________\nbn_conv1 (BatchNormalization) (None, 32, 32, 64) 256 conv1[0][0] \n__________________________________________________________________________________________________\nactivation_390 (Activation) (None, 32, 32, 64) 0 bn_conv1[0][0] \n__________________________________________________________________________________________________\npool1_pad (ZeroPadding2D) (None, 34, 34, 64) 0 activation_390[0][0] \n__________________________________________________________________________________________________\nmax_pooling2d_9 (MaxPooling2D) (None, 16, 16, 64) 0 pool1_pad[0][0] \n__________________________________________________________________________________________________\nres2a_branch2a (Conv2D) (None, 16, 16, 64) 4160 max_pooling2d_9[0][0] \n__________________________________________________________________________________________________\nbn2a_branch2a (BatchNormalizati (None, 16, 16, 64) 256 res2a_branch2a[0][0] \n__________________________________________________________________________________________________\nactivation_391 (Activation) (None, 16, 16, 64) 0 bn2a_branch2a[0][0] \n__________________________________________________________________________________________________\nres2a_branch2b (Conv2D) (None, 16, 16, 64) 36928 activation_391[0][0] \n__________________________________________________________________________________________________\nbn2a_branch2b (BatchNormalizati (None, 16, 16, 64) 256 res2a_branch2b[0][0] \n__________________________________________________________________________________________________\nactivation_392 (Activation) (None, 16, 16, 64) 0 bn2a_branch2b[0][0] \n__________________________________________________________________________________________________\nres2a_branch2c (Conv2D) (None, 16, 16, 256) 16640 activation_392[0][0] \n__________________________________________________________________________________________________\nres2a_branch1 (Conv2D) (None, 16, 16, 256) 16640 max_pooling2d_9[0][0] \n__________________________________________________________________________________________________\nbn2a_branch2c (BatchNormalizati (None, 16, 16, 256) 1024 res2a_branch2c[0][0] \n__________________________________________________________________________________________________\nbn2a_branch1 (BatchNormalizatio (None, 16, 16, 256) 1024 res2a_branch1[0][0] \n__________________________________________________________________________________________________\nadd_128 (Add) (None, 16, 16, 256) 0 bn2a_branch2c[0][0] \n bn2a_branch1[0][0] \n__________________________________________________________________________________________________\nactivation_393 (Activation) (None, 16, 16, 256) 0 add_128[0][0] \n__________________________________________________________________________________________________\nres2b_branch2a (Conv2D) (None, 16, 16, 64) 16448 activation_393[0][0] \n__________________________________________________________________________________________________\nbn2b_branch2a (BatchNormalizati (None, 16, 16, 64) 256 res2b_branch2a[0][0] \n__________________________________________________________________________________________________\nactivation_394 (Activation) (None, 16, 16, 64) 0 bn2b_branch2a[0][0] \n__________________________________________________________________________________________________\nres2b_branch2b (Conv2D) (None, 16, 16, 64) 36928 activation_394[0][0] \n__________________________________________________________________________________________________\nbn2b_branch2b (BatchNormalizati (None, 16, 16, 64) 256 res2b_branch2b[0][0] \n__________________________________________________________________________________________________\nactivation_395 (Activation) (None, 16, 16, 64) 0 bn2b_branch2b[0][0] \n__________________________________________________________________________________________________\nres2b_branch2c (Conv2D) (None, 16, 16, 256) 16640 activation_395[0][0] \n__________________________________________________________________________________________________\nbn2b_branch2c (BatchNormalizati (None, 16, 16, 256) 1024 res2b_branch2c[0][0] \n__________________________________________________________________________________________________\nadd_129 (Add) (None, 16, 16, 256) 0 bn2b_branch2c[0][0] \n activation_393[0][0] \n__________________________________________________________________________________________________\nactivation_396 (Activation) (None, 16, 16, 256) 0 add_129[0][0] \n__________________________________________________________________________________________________\nres2c_branch2a (Conv2D) (None, 16, 16, 64) 16448 activation_396[0][0] \n__________________________________________________________________________________________________\nbn2c_branch2a (BatchNormalizati (None, 16, 16, 64) 256 res2c_branch2a[0][0] \n__________________________________________________________________________________________________\nactivation_397 (Activation) (None, 16, 16, 64) 0 bn2c_branch2a[0][0] \n__________________________________________________________________________________________________\nres2c_branch2b (Conv2D) (None, 16, 16, 64) 36928 activation_397[0][0] \n__________________________________________________________________________________________________\nbn2c_branch2b (BatchNormalizati (None, 16, 16, 64) 256 res2c_branch2b[0][0] \n__________________________________________________________________________________________________\nactivation_398 (Activation) (None, 16, 16, 64) 0 bn2c_branch2b[0][0] \n__________________________________________________________________________________________________\nres2c_branch2c (Conv2D) (None, 16, 16, 256) 16640 activation_398[0][0] \n__________________________________________________________________________________________________\nbn2c_branch2c (BatchNormalizati (None, 16, 16, 256) 1024 res2c_branch2c[0][0] \n__________________________________________________________________________________________________\nadd_130 (Add) (None, 16, 16, 256) 0 bn2c_branch2c[0][0] \n activation_396[0][0] \n__________________________________________________________________________________________________\nactivation_399 (Activation) (None, 16, 16, 256) 0 add_130[0][0] \n__________________________________________________________________________________________________\nres3a_branch2a (Conv2D) (None, 8, 8, 128) 32896 activation_399[0][0] \n__________________________________________________________________________________________________\nbn3a_branch2a (BatchNormalizati (None, 8, 8, 128) 512 res3a_branch2a[0][0] \n__________________________________________________________________________________________________\nactivation_400 (Activation) (None, 8, 8, 128) 0 bn3a_branch2a[0][0] \n__________________________________________________________________________________________________\nres3a_branch2b (Conv2D) (None, 8, 8, 128) 147584 activation_400[0][0] \n__________________________________________________________________________________________________\nbn3a_branch2b (BatchNormalizati (None, 8, 8, 128) 512 res3a_branch2b[0][0] \n__________________________________________________________________________________________________\nactivation_401 (Activation) (None, 8, 8, 128) 0 bn3a_branch2b[0][0] \n__________________________________________________________________________________________________\nres3a_branch2c (Conv2D) (None, 8, 8, 512) 66048 activation_401[0][0] \n__________________________________________________________________________________________________\nres3a_branch1 (Conv2D) (None, 8, 8, 512) 131584 activation_399[0][0] \n__________________________________________________________________________________________________\nbn3a_branch2c (BatchNormalizati (None, 8, 8, 512) 2048 res3a_branch2c[0][0] \n__________________________________________________________________________________________________\nbn3a_branch1 (BatchNormalizatio (None, 8, 8, 512) 2048 res3a_branch1[0][0] \n__________________________________________________________________________________________________\nadd_131 (Add) (None, 8, 8, 512) 0 bn3a_branch2c[0][0] \n bn3a_branch1[0][0] \n__________________________________________________________________________________________________\nactivation_402 (Activation) (None, 8, 8, 512) 0 add_131[0][0] \n__________________________________________________________________________________________________\nres3b_branch2a (Conv2D) (None, 8, 8, 128) 65664 activation_402[0][0] \n__________________________________________________________________________________________________\nbn3b_branch2a (BatchNormalizati (None, 8, 8, 128) 512 res3b_branch2a[0][0] \n__________________________________________________________________________________________________\nactivation_403 (Activation) (None, 8, 8, 128) 0 bn3b_branch2a[0][0] \n__________________________________________________________________________________________________\nres3b_branch2b (Conv2D) (None, 8, 8, 128) 147584 activation_403[0][0] \n__________________________________________________________________________________________________\nbn3b_branch2b (BatchNormalizati (None, 8, 8, 128) 512 res3b_branch2b[0][0] \n__________________________________________________________________________________________________\nactivation_404 (Activation) (None, 8, 8, 128) 0 bn3b_branch2b[0][0] \n__________________________________________________________________________________________________\nres3b_branch2c (Conv2D) (None, 8, 8, 512) 66048 activation_404[0][0] \n__________________________________________________________________________________________________\nbn3b_branch2c (BatchNormalizati (None, 8, 8, 512) 2048 res3b_branch2c[0][0] \n__________________________________________________________________________________________________\nadd_132 (Add) (None, 8, 8, 512) 0 bn3b_branch2c[0][0] \n activation_402[0][0] \n__________________________________________________________________________________________________\nactivation_405 (Activation) (None, 8, 8, 512) 0 add_132[0][0] \n__________________________________________________________________________________________________\nres3c_branch2a (Conv2D) (None, 8, 8, 128) 65664 activation_405[0][0] \n__________________________________________________________________________________________________\nbn3c_branch2a (BatchNormalizati (None, 8, 8, 128) 512 res3c_branch2a[0][0] \n__________________________________________________________________________________________________\nactivation_406 (Activation) (None, 8, 8, 128) 0 bn3c_branch2a[0][0] \n__________________________________________________________________________________________________\nres3c_branch2b (Conv2D) (None, 8, 8, 128) 147584 activation_406[0][0] \n__________________________________________________________________________________________________\nbn3c_branch2b (BatchNormalizati (None, 8, 8, 128) 512 res3c_branch2b[0][0] \n__________________________________________________________________________________________________\nactivation_407 (Activation) (None, 8, 8, 128) 0 bn3c_branch2b[0][0] \n__________________________________________________________________________________________________\nres3c_branch2c (Conv2D) (None, 8, 8, 512) 66048 activation_407[0][0] \n__________________________________________________________________________________________________\nbn3c_branch2c (BatchNormalizati (None, 8, 8, 512) 2048 res3c_branch2c[0][0] \n__________________________________________________________________________________________________\nadd_133 (Add) (None, 8, 8, 512) 0 bn3c_branch2c[0][0] \n activation_405[0][0] \n__________________________________________________________________________________________________\nactivation_408 (Activation) (None, 8, 8, 512) 0 add_133[0][0] \n__________________________________________________________________________________________________\nres3d_branch2a (Conv2D) (None, 8, 8, 128) 65664 activation_408[0][0] \n__________________________________________________________________________________________________\nbn3d_branch2a (BatchNormalizati (None, 8, 8, 128) 512 res3d_branch2a[0][0] \n__________________________________________________________________________________________________\nactivation_409 (Activation) (None, 8, 8, 128) 0 bn3d_branch2a[0][0] \n__________________________________________________________________________________________________\nres3d_branch2b (Conv2D) (None, 8, 8, 128) 147584 activation_409[0][0] \n__________________________________________________________________________________________________\nbn3d_branch2b (BatchNormalizati (None, 8, 8, 128) 512 res3d_branch2b[0][0] \n__________________________________________________________________________________________________\nactivation_410 (Activation) (None, 8, 8, 128) 0 bn3d_branch2b[0][0] \n__________________________________________________________________________________________________\nres3d_branch2c (Conv2D) (None, 8, 8, 512) 66048 activation_410[0][0] \n__________________________________________________________________________________________________\nbn3d_branch2c (BatchNormalizati (None, 8, 8, 512) 2048 res3d_branch2c[0][0] \n__________________________________________________________________________________________________\nadd_134 (Add) (None, 8, 8, 512) 0 bn3d_branch2c[0][0] \n activation_408[0][0] \n__________________________________________________________________________________________________\nactivation_411 (Activation) (None, 8, 8, 512) 0 add_134[0][0] \n__________________________________________________________________________________________________\nres4a_branch2a (Conv2D) (None, 4, 4, 256) 131328 activation_411[0][0] \n__________________________________________________________________________________________________\nbn4a_branch2a (BatchNormalizati (None, 4, 4, 256) 1024 res4a_branch2a[0][0] \n__________________________________________________________________________________________________\nactivation_412 (Activation) (None, 4, 4, 256) 0 bn4a_branch2a[0][0] \n__________________________________________________________________________________________________\nres4a_branch2b (Conv2D) (None, 4, 4, 256) 590080 activation_412[0][0] \n__________________________________________________________________________________________________\nbn4a_branch2b (BatchNormalizati (None, 4, 4, 256) 1024 res4a_branch2b[0][0] \n__________________________________________________________________________________________________\nactivation_413 (Activation) (None, 4, 4, 256) 0 bn4a_branch2b[0][0] \n__________________________________________________________________________________________________\nres4a_branch2c (Conv2D) (None, 4, 4, 1024) 263168 activation_413[0][0] \n__________________________________________________________________________________________________\nres4a_branch1 (Conv2D) (None, 4, 4, 1024) 525312 activation_411[0][0] \n__________________________________________________________________________________________________\nbn4a_branch2c (BatchNormalizati (None, 4, 4, 1024) 4096 res4a_branch2c[0][0] \n__________________________________________________________________________________________________\nbn4a_branch1 (BatchNormalizatio (None, 4, 4, 1024) 4096 res4a_branch1[0][0] \n__________________________________________________________________________________________________\nadd_135 (Add) (None, 4, 4, 1024) 0 bn4a_branch2c[0][0] \n bn4a_branch1[0][0] \n__________________________________________________________________________________________________\nactivation_414 (Activation) (None, 4, 4, 1024) 0 add_135[0][0] \n__________________________________________________________________________________________________\nres4b_branch2a (Conv2D) (None, 4, 4, 256) 262400 activation_414[0][0] \n__________________________________________________________________________________________________\nbn4b_branch2a (BatchNormalizati (None, 4, 4, 256) 1024 res4b_branch2a[0][0] \n__________________________________________________________________________________________________\nactivation_415 (Activation) (None, 4, 4, 256) 0 bn4b_branch2a[0][0] \n__________________________________________________________________________________________________\nres4b_branch2b (Conv2D) (None, 4, 4, 256) 590080 activation_415[0][0] \n__________________________________________________________________________________________________\nbn4b_branch2b (BatchNormalizati (None, 4, 4, 256) 1024 res4b_branch2b[0][0] \n__________________________________________________________________________________________________\nactivation_416 (Activation) (None, 4, 4, 256) 0 bn4b_branch2b[0][0] \n__________________________________________________________________________________________________\nres4b_branch2c (Conv2D) (None, 4, 4, 1024) 263168 activation_416[0][0] \n__________________________________________________________________________________________________\nbn4b_branch2c (BatchNormalizati (None, 4, 4, 1024) 4096 res4b_branch2c[0][0] \n__________________________________________________________________________________________________\nadd_136 (Add) (None, 4, 4, 1024) 0 bn4b_branch2c[0][0] \n activation_414[0][0] \n__________________________________________________________________________________________________\nactivation_417 (Activation) (None, 4, 4, 1024) 0 add_136[0][0] \n__________________________________________________________________________________________________\nres4c_branch2a (Conv2D) (None, 4, 4, 256) 262400 activation_417[0][0] \n__________________________________________________________________________________________________\nbn4c_branch2a (BatchNormalizati (None, 4, 4, 256) 1024 res4c_branch2a[0][0] \n__________________________________________________________________________________________________\nactivation_418 (Activation) (None, 4, 4, 256) 0 bn4c_branch2a[0][0] \n__________________________________________________________________________________________________\nres4c_branch2b (Conv2D) (None, 4, 4, 256) 590080 activation_418[0][0] \n__________________________________________________________________________________________________\nbn4c_branch2b (BatchNormalizati (None, 4, 4, 256) 1024 res4c_branch2b[0][0] \n__________________________________________________________________________________________________\nactivation_419 (Activation) (None, 4, 4, 256) 0 bn4c_branch2b[0][0] \n__________________________________________________________________________________________________\nres4c_branch2c (Conv2D) (None, 4, 4, 1024) 263168 activation_419[0][0] \n__________________________________________________________________________________________________\nbn4c_branch2c (BatchNormalizati (None, 4, 4, 1024) 4096 res4c_branch2c[0][0] \n__________________________________________________________________________________________________\nadd_137 (Add) (None, 4, 4, 1024) 0 bn4c_branch2c[0][0] \n activation_417[0][0] \n__________________________________________________________________________________________________\nactivation_420 (Activation) (None, 4, 4, 1024) 0 add_137[0][0] \n__________________________________________________________________________________________________\nres4d_branch2a (Conv2D) (None, 4, 4, 256) 262400 activation_420[0][0] \n__________________________________________________________________________________________________\nbn4d_branch2a (BatchNormalizati (None, 4, 4, 256) 1024 res4d_branch2a[0][0] \n__________________________________________________________________________________________________\nactivation_421 (Activation) (None, 4, 4, 256) 0 bn4d_branch2a[0][0] \n__________________________________________________________________________________________________\nres4d_branch2b (Conv2D) (None, 4, 4, 256) 590080 activation_421[0][0] \n__________________________________________________________________________________________________\nbn4d_branch2b (BatchNormalizati (None, 4, 4, 256) 1024 res4d_branch2b[0][0] \n__________________________________________________________________________________________________\nactivation_422 (Activation) (None, 4, 4, 256) 0 bn4d_branch2b[0][0] \n__________________________________________________________________________________________________\nres4d_branch2c (Conv2D) (None, 4, 4, 1024) 263168 activation_422[0][0] \n__________________________________________________________________________________________________\nbn4d_branch2c (BatchNormalizati (None, 4, 4, 1024) 4096 res4d_branch2c[0][0] \n__________________________________________________________________________________________________\nadd_138 (Add) (None, 4, 4, 1024) 0 bn4d_branch2c[0][0] \n activation_420[0][0] \n__________________________________________________________________________________________________\nactivation_423 (Activation) (None, 4, 4, 1024) 0 add_138[0][0] \n__________________________________________________________________________________________________\nres4e_branch2a (Conv2D) (None, 4, 4, 256) 262400 activation_423[0][0] \n__________________________________________________________________________________________________\nbn4e_branch2a (BatchNormalizati (None, 4, 4, 256) 1024 res4e_branch2a[0][0] \n__________________________________________________________________________________________________\nactivation_424 (Activation) (None, 4, 4, 256) 0 bn4e_branch2a[0][0] \n__________________________________________________________________________________________________\nres4e_branch2b (Conv2D) (None, 4, 4, 256) 590080 activation_424[0][0] \n__________________________________________________________________________________________________\nbn4e_branch2b (BatchNormalizati (None, 4, 4, 256) 1024 res4e_branch2b[0][0] \n__________________________________________________________________________________________________\nactivation_425 (Activation) (None, 4, 4, 256) 0 bn4e_branch2b[0][0] \n__________________________________________________________________________________________________\nres4e_branch2c (Conv2D) (None, 4, 4, 1024) 263168 activation_425[0][0] \n__________________________________________________________________________________________________\nbn4e_branch2c (BatchNormalizati (None, 4, 4, 1024) 4096 res4e_branch2c[0][0] \n__________________________________________________________________________________________________\nadd_139 (Add) (None, 4, 4, 1024) 0 bn4e_branch2c[0][0] \n activation_423[0][0] \n__________________________________________________________________________________________________\nactivation_426 (Activation) (None, 4, 4, 1024) 0 add_139[0][0] \n__________________________________________________________________________________________________\nres4f_branch2a (Conv2D) (None, 4, 4, 256) 262400 activation_426[0][0] \n__________________________________________________________________________________________________\nbn4f_branch2a (BatchNormalizati (None, 4, 4, 256) 1024 res4f_branch2a[0][0] \n__________________________________________________________________________________________________\nactivation_427 (Activation) (None, 4, 4, 256) 0 bn4f_branch2a[0][0] \n__________________________________________________________________________________________________\nres4f_branch2b (Conv2D) (None, 4, 4, 256) 590080 activation_427[0][0] \n__________________________________________________________________________________________________\nbn4f_branch2b (BatchNormalizati (None, 4, 4, 256) 1024 res4f_branch2b[0][0] \n__________________________________________________________________________________________________\nactivation_428 (Activation) (None, 4, 4, 256) 0 bn4f_branch2b[0][0] \n__________________________________________________________________________________________________\nres4f_branch2c (Conv2D) (None, 4, 4, 1024) 263168 activation_428[0][0] \n__________________________________________________________________________________________________\nbn4f_branch2c (BatchNormalizati (None, 4, 4, 1024) 4096 res4f_branch2c[0][0] \n__________________________________________________________________________________________________\nadd_140 (Add) (None, 4, 4, 1024) 0 bn4f_branch2c[0][0] \n activation_426[0][0] \n__________________________________________________________________________________________________\nactivation_429 (Activation) (None, 4, 4, 1024) 0 add_140[0][0] \n__________________________________________________________________________________________________\nres5a_branch2a (Conv2D) (None, 2, 2, 512) 524800 activation_429[0][0] \n__________________________________________________________________________________________________\nbn5a_branch2a (BatchNormalizati (None, 2, 2, 512) 2048 res5a_branch2a[0][0] \n__________________________________________________________________________________________________\nactivation_430 (Activation) (None, 2, 2, 512) 0 bn5a_branch2a[0][0] \n__________________________________________________________________________________________________\nres5a_branch2b (Conv2D) (None, 2, 2, 512) 2359808 activation_430[0][0] \n__________________________________________________________________________________________________\nbn5a_branch2b (BatchNormalizati (None, 2, 2, 512) 2048 res5a_branch2b[0][0] \n__________________________________________________________________________________________________\nactivation_431 (Activation) (None, 2, 2, 512) 0 bn5a_branch2b[0][0] \n__________________________________________________________________________________________________\nres5a_branch2c (Conv2D) (None, 2, 2, 2048) 1050624 activation_431[0][0] \n__________________________________________________________________________________________________\nres5a_branch1 (Conv2D) (None, 2, 2, 2048) 2099200 activation_429[0][0] \n__________________________________________________________________________________________________\nbn5a_branch2c (BatchNormalizati (None, 2, 2, 2048) 8192 res5a_branch2c[0][0] \n__________________________________________________________________________________________________\nbn5a_branch1 (BatchNormalizatio (None, 2, 2, 2048) 8192 res5a_branch1[0][0] \n__________________________________________________________________________________________________\nadd_141 (Add) (None, 2, 2, 2048) 0 bn5a_branch2c[0][0] \n bn5a_branch1[0][0] \n__________________________________________________________________________________________________\nactivation_432 (Activation) (None, 2, 2, 2048) 0 add_141[0][0] \n__________________________________________________________________________________________________\nres5b_branch2a (Conv2D) (None, 2, 2, 512) 1049088 activation_432[0][0] \n__________________________________________________________________________________________________\nbn5b_branch2a (BatchNormalizati (None, 2, 2, 512) 2048 res5b_branch2a[0][0] \n__________________________________________________________________________________________________\nactivation_433 (Activation) (None, 2, 2, 512) 0 bn5b_branch2a[0][0] \n__________________________________________________________________________________________________\nres5b_branch2b (Conv2D) (None, 2, 2, 512) 2359808 activation_433[0][0] \n__________________________________________________________________________________________________\nbn5b_branch2b (BatchNormalizati (None, 2, 2, 512) 2048 res5b_branch2b[0][0] \n__________________________________________________________________________________________________\nactivation_434 (Activation) (None, 2, 2, 512) 0 bn5b_branch2b[0][0] \n__________________________________________________________________________________________________\nres5b_branch2c (Conv2D) (None, 2, 2, 2048) 1050624 activation_434[0][0] \n__________________________________________________________________________________________________\nbn5b_branch2c (BatchNormalizati (None, 2, 2, 2048) 8192 res5b_branch2c[0][0] \n__________________________________________________________________________________________________\nadd_142 (Add) (None, 2, 2, 2048) 0 bn5b_branch2c[0][0] \n activation_432[0][0] \n__________________________________________________________________________________________________\nactivation_435 (Activation) (None, 2, 2, 2048) 0 add_142[0][0] \n__________________________________________________________________________________________________\nres5c_branch2a (Conv2D) (None, 2, 2, 512) 1049088 activation_435[0][0] \n__________________________________________________________________________________________________\nbn5c_branch2a (BatchNormalizati (None, 2, 2, 512) 2048 res5c_branch2a[0][0] \n__________________________________________________________________________________________________\nactivation_436 (Activation) (None, 2, 2, 512) 0 bn5c_branch2a[0][0] \n__________________________________________________________________________________________________\nres5c_branch2b (Conv2D) (None, 2, 2, 512) 2359808 activation_436[0][0] \n__________________________________________________________________________________________________\nbn5c_branch2b (BatchNormalizati (None, 2, 2, 512) 2048 res5c_branch2b[0][0] \n__________________________________________________________________________________________________\nactivation_437 (Activation) (None, 2, 2, 512) 0 bn5c_branch2b[0][0] \n__________________________________________________________________________________________________\nres5c_branch2c (Conv2D) (None, 2, 2, 2048) 1050624 activation_437[0][0] \n__________________________________________________________________________________________________\nbn5c_branch2c (BatchNormalizati (None, 2, 2, 2048) 8192 res5c_branch2c[0][0] \n__________________________________________________________________________________________________\nadd_143 (Add) (None, 2, 2, 2048) 0 bn5c_branch2c[0][0] \n activation_435[0][0] \n__________________________________________________________________________________________________\nactivation_438 (Activation) (None, 2, 2, 2048) 0 add_143[0][0] \n__________________________________________________________________________________________________\nglobal_average_pooling2d_8 (Glo (None, 2048) 0 activation_438[0][0] \n__________________________________________________________________________________________________\nfc-1 (Dense) (None, 512) 1049088 global_average_pooling2d_8[0][0] \n__________________________________________________________________________________________________\ndropout_17 (Dropout) (None, 512) 0 fc-1[0][0] \n__________________________________________________________________________________________________\nfc-2 (Dense) (None, 128) 65664 dropout_17[0][0] \n__________________________________________________________________________________________________\ndropout_18 (Dropout) (None, 128) 0 fc-2[0][0] \n__________________________________________________________________________________________________\noutput_layer (Dense) (None, 7) 903 dropout_18[0][0] \n==================================================================================================\nTotal params: 24,703,367\nTrainable params: 24,650,247\nNon-trainable params: 53,120\n__________________________________________________________________________________________________\n" ], [ "for layer in custom_resnet_model2.layers[:-5]:\n\tlayer.trainable = False", "_____no_output_____" ], [ "custom_resnet_model2.layers[-1].trainable\n\ncustom_resnet_model2.compile(loss='categorical_crossentropy',optimizer='adam',metrics=['accuracy'])", "_____no_output_____" ], [ "t=time.time()\nhist = custom_resnet_model2.fit(X_train, y_train, batch_size=40, epochs=15, verbose=1, validation_data=(X_test, y_test))", "Train on 1853 samples, validate on 464 samples\nEpoch 1/15\n1853/1853 [==============================] - 50s 27ms/step - loss: 1.1671 - acc: 0.7437 - val_loss: 0.6049 - val_acc: 0.8039\nEpoch 2/15\n1853/1853 [==============================] - 15s 8ms/step - loss: 0.7090 - acc: 0.8009 - val_loss: 0.5801 - val_acc: 0.8448\nEpoch 3/15\n1853/1853 [==============================] - 15s 8ms/step - loss: 0.5853 - acc: 0.8251 - val_loss: 0.6037 - val_acc: 0.8578\nEpoch 4/15\n1853/1853 [==============================] - 15s 8ms/step - loss: 0.5261 - acc: 0.8408 - val_loss: 0.6003 - val_acc: 0.8685\nEpoch 5/15\n1853/1853 [==============================] - 15s 8ms/step - loss: 0.4593 - acc: 0.8602 - val_loss: 0.5992 - val_acc: 0.8642\nEpoch 6/15\n1853/1853 [==============================] - 15s 8ms/step - loss: 0.4286 - acc: 0.8645 - val_loss: 0.5813 - val_acc: 0.8642\nEpoch 7/15\n1853/1853 [==============================] - 14s 8ms/step - loss: 0.3653 - acc: 0.8770 - val_loss: 0.6013 - val_acc: 0.8707\nEpoch 8/15\n1853/1853 [==============================] - 15s 8ms/step - loss: 0.2979 - acc: 0.9056 - val_loss: 0.6362 - val_acc: 0.8685\nEpoch 9/15\n1853/1853 [==============================] - 15s 8ms/step - loss: 0.2867 - acc: 0.9077 - val_loss: 0.6519 - val_acc: 0.8772\nEpoch 10/15\n1853/1853 [==============================] - 15s 8ms/step - loss: 0.2464 - acc: 0.9180 - val_loss: 0.7240 - val_acc: 0.8836\nEpoch 11/15\n1853/1853 [==============================] - 16s 9ms/step - loss: 0.2370 - acc: 0.9255 - val_loss: 0.7001 - val_acc: 0.8793\nEpoch 12/15\n1853/1853 [==============================] - 16s 9ms/step - loss: 0.2177 - acc: 0.9228 - val_loss: 0.9145 - val_acc: 0.8599\nEpoch 13/15\n1853/1853 [==============================] - 16s 8ms/step - loss: 0.1805 - acc: 0.9379 - val_loss: 0.8059 - val_acc: 0.8793\nEpoch 14/15\n1853/1853 [==============================] - 16s 8ms/step - loss: 0.1996 - acc: 0.9401 - val_loss: 0.6954 - val_acc: 0.8621\nEpoch 15/15\n1853/1853 [==============================] - 19s 10ms/step - loss: 0.1568 - acc: 0.9455 - val_loss: 0.9091 - val_acc: 0.8642\n" ], [ "print('Training time: %s' % (t - time.time()))\n(loss, accuracy) = custom_resnet_model2.evaluate(X_test, y_test, batch_size=32, verbose=1)\n\nprint(\"[INFO] loss={:.4f}, accuracy: {:.4f}%\".format(loss,accuracy * 100))", "Training time: -307.366849899292\n464/464 [==============================] - 3s 7ms/step\n[INFO] loss=0.9091, accuracy: 86.4224%\n" ], [ "#accuracy\nplt.plot(hist.history['acc'])\nplt.plot(hist.history['val_acc'])\nplt.title('model accuracy')\nplt.ylabel('accuracy')\nplt.xlabel('epoch')\nplt.legend(['train', 'test'], loc='upper left')\nplt.show()\n#loss\nplt.plot(hist.history['loss'])\nplt.plot(hist.history['val_loss'])\nplt.title('model loss')\nplt.ylabel('loss')\nplt.xlabel('epoch')\nplt.legend(['train', 'test'], loc='upper left')\nplt.show()", "_____no_output_____" ] ], [ [ "# Keras ResNet50 \n### Input(shape=(64, 64, 3)),include_top=False,weights='imagenet'\n### batch_size=38, epochs=25 Remove fully-connected 2nd last dense layer\n#### loss='categorical_crossentropy',optimizer='adam'\n#### Don't want to train all model \n##### Last 3 layer retrain ", "_____no_output_____" ] ], [ [ "image_input = Input(shape=(64, 64, 3))\nmodel = keras.applications.resnet50.ResNet50(input_tensor=image_input, include_top=False,weights='imagenet')", "C:\\Users\\Sh-Ma\\Anaconda3\\lib\\site-packages\\keras_applications\\resnet50.py:265: UserWarning: The output shape of `ResNet50(include_top=False)` has been changed since Keras 2.2.0.\n warnings.warn('The output shape of `ResNet50(include_top=False)` '\n" ], [ "last_layer = model.output\n", "_____no_output_____" ], [ "# add a global spatial average pooling layer\nx = GlobalAveragePooling2D()(last_layer)\n# add fully-connected & dropout layers\n#x = Dense(512, activation='relu',name='fc-1')(x)\nx = Dropout(0.6)(x)\nx = Dense(128, activation='relu',name='fc-2')(x)\nx = Dropout(0.4)(x)\n# a softmax layer for 7 classes\nout = Dense(7, activation='softmax',name='output_layer')(x)", "_____no_output_____" ], [ "custom_resnet_model2 = Model(inputs=model.input, outputs=out)\ncustom_resnet_model2.summary()", "__________________________________________________________________________________________________\nLayer (type) Output Shape Param # Connected to \n==================================================================================================\ninput_6 (InputLayer) (None, 64, 64, 3) 0 \n__________________________________________________________________________________________________\nconv1_pad (ZeroPadding2D) (None, 70, 70, 3) 0 input_6[0][0] \n__________________________________________________________________________________________________\nconv1 (Conv2D) (None, 32, 32, 64) 9472 conv1_pad[0][0] \n__________________________________________________________________________________________________\nbn_conv1 (BatchNormalization) (None, 32, 32, 64) 256 conv1[0][0] \n__________________________________________________________________________________________________\nactivation_246 (Activation) (None, 32, 32, 64) 0 bn_conv1[0][0] \n__________________________________________________________________________________________________\npool1_pad (ZeroPadding2D) (None, 34, 34, 64) 0 activation_246[0][0] \n__________________________________________________________________________________________________\nmax_pooling2d_6 (MaxPooling2D) (None, 16, 16, 64) 0 pool1_pad[0][0] \n__________________________________________________________________________________________________\nres2a_branch2a (Conv2D) (None, 16, 16, 64) 4160 max_pooling2d_6[0][0] \n__________________________________________________________________________________________________\nbn2a_branch2a (BatchNormalizati (None, 16, 16, 64) 256 res2a_branch2a[0][0] \n__________________________________________________________________________________________________\nactivation_247 (Activation) (None, 16, 16, 64) 0 bn2a_branch2a[0][0] \n__________________________________________________________________________________________________\nres2a_branch2b (Conv2D) (None, 16, 16, 64) 36928 activation_247[0][0] \n__________________________________________________________________________________________________\nbn2a_branch2b (BatchNormalizati (None, 16, 16, 64) 256 res2a_branch2b[0][0] \n__________________________________________________________________________________________________\nactivation_248 (Activation) (None, 16, 16, 64) 0 bn2a_branch2b[0][0] \n__________________________________________________________________________________________________\nres2a_branch2c (Conv2D) (None, 16, 16, 256) 16640 activation_248[0][0] \n__________________________________________________________________________________________________\nres2a_branch1 (Conv2D) (None, 16, 16, 256) 16640 max_pooling2d_6[0][0] \n__________________________________________________________________________________________________\nbn2a_branch2c (BatchNormalizati (None, 16, 16, 256) 1024 res2a_branch2c[0][0] \n__________________________________________________________________________________________________\nbn2a_branch1 (BatchNormalizatio (None, 16, 16, 256) 1024 res2a_branch1[0][0] \n__________________________________________________________________________________________________\nadd_81 (Add) (None, 16, 16, 256) 0 bn2a_branch2c[0][0] \n bn2a_branch1[0][0] \n__________________________________________________________________________________________________\nactivation_249 (Activation) (None, 16, 16, 256) 0 add_81[0][0] \n__________________________________________________________________________________________________\nres2b_branch2a (Conv2D) (None, 16, 16, 64) 16448 activation_249[0][0] \n__________________________________________________________________________________________________\nbn2b_branch2a (BatchNormalizati (None, 16, 16, 64) 256 res2b_branch2a[0][0] \n__________________________________________________________________________________________________\nactivation_250 (Activation) (None, 16, 16, 64) 0 bn2b_branch2a[0][0] \n__________________________________________________________________________________________________\nres2b_branch2b (Conv2D) (None, 16, 16, 64) 36928 activation_250[0][0] \n__________________________________________________________________________________________________\nbn2b_branch2b (BatchNormalizati (None, 16, 16, 64) 256 res2b_branch2b[0][0] \n__________________________________________________________________________________________________\nactivation_251 (Activation) (None, 16, 16, 64) 0 bn2b_branch2b[0][0] \n__________________________________________________________________________________________________\nres2b_branch2c (Conv2D) (None, 16, 16, 256) 16640 activation_251[0][0] \n__________________________________________________________________________________________________\nbn2b_branch2c (BatchNormalizati (None, 16, 16, 256) 1024 res2b_branch2c[0][0] \n__________________________________________________________________________________________________\nadd_82 (Add) (None, 16, 16, 256) 0 bn2b_branch2c[0][0] \n activation_249[0][0] \n__________________________________________________________________________________________________\nactivation_252 (Activation) (None, 16, 16, 256) 0 add_82[0][0] \n__________________________________________________________________________________________________\nres2c_branch2a (Conv2D) (None, 16, 16, 64) 16448 activation_252[0][0] \n__________________________________________________________________________________________________\nbn2c_branch2a (BatchNormalizati (None, 16, 16, 64) 256 res2c_branch2a[0][0] \n__________________________________________________________________________________________________\nactivation_253 (Activation) (None, 16, 16, 64) 0 bn2c_branch2a[0][0] \n__________________________________________________________________________________________________\nres2c_branch2b (Conv2D) (None, 16, 16, 64) 36928 activation_253[0][0] \n__________________________________________________________________________________________________\nbn2c_branch2b (BatchNormalizati (None, 16, 16, 64) 256 res2c_branch2b[0][0] \n__________________________________________________________________________________________________\nactivation_254 (Activation) (None, 16, 16, 64) 0 bn2c_branch2b[0][0] \n__________________________________________________________________________________________________\nres2c_branch2c (Conv2D) (None, 16, 16, 256) 16640 activation_254[0][0] \n__________________________________________________________________________________________________\nbn2c_branch2c (BatchNormalizati (None, 16, 16, 256) 1024 res2c_branch2c[0][0] \n__________________________________________________________________________________________________\nadd_83 (Add) (None, 16, 16, 256) 0 bn2c_branch2c[0][0] \n activation_252[0][0] \n__________________________________________________________________________________________________\nactivation_255 (Activation) (None, 16, 16, 256) 0 add_83[0][0] \n__________________________________________________________________________________________________\nres3a_branch2a (Conv2D) (None, 8, 8, 128) 32896 activation_255[0][0] \n__________________________________________________________________________________________________\nbn3a_branch2a (BatchNormalizati (None, 8, 8, 128) 512 res3a_branch2a[0][0] \n__________________________________________________________________________________________________\nactivation_256 (Activation) (None, 8, 8, 128) 0 bn3a_branch2a[0][0] \n__________________________________________________________________________________________________\nres3a_branch2b (Conv2D) (None, 8, 8, 128) 147584 activation_256[0][0] \n__________________________________________________________________________________________________\nbn3a_branch2b (BatchNormalizati (None, 8, 8, 128) 512 res3a_branch2b[0][0] \n__________________________________________________________________________________________________\nactivation_257 (Activation) (None, 8, 8, 128) 0 bn3a_branch2b[0][0] \n__________________________________________________________________________________________________\nres3a_branch2c (Conv2D) (None, 8, 8, 512) 66048 activation_257[0][0] \n__________________________________________________________________________________________________\nres3a_branch1 (Conv2D) (None, 8, 8, 512) 131584 activation_255[0][0] \n__________________________________________________________________________________________________\nbn3a_branch2c (BatchNormalizati (None, 8, 8, 512) 2048 res3a_branch2c[0][0] \n__________________________________________________________________________________________________\nbn3a_branch1 (BatchNormalizatio (None, 8, 8, 512) 2048 res3a_branch1[0][0] \n__________________________________________________________________________________________________\nadd_84 (Add) (None, 8, 8, 512) 0 bn3a_branch2c[0][0] \n bn3a_branch1[0][0] \n__________________________________________________________________________________________________\nactivation_258 (Activation) (None, 8, 8, 512) 0 add_84[0][0] \n__________________________________________________________________________________________________\nres3b_branch2a (Conv2D) (None, 8, 8, 128) 65664 activation_258[0][0] \n__________________________________________________________________________________________________\nbn3b_branch2a (BatchNormalizati (None, 8, 8, 128) 512 res3b_branch2a[0][0] \n__________________________________________________________________________________________________\nactivation_259 (Activation) (None, 8, 8, 128) 0 bn3b_branch2a[0][0] \n__________________________________________________________________________________________________\nres3b_branch2b (Conv2D) (None, 8, 8, 128) 147584 activation_259[0][0] \n__________________________________________________________________________________________________\nbn3b_branch2b (BatchNormalizati (None, 8, 8, 128) 512 res3b_branch2b[0][0] \n__________________________________________________________________________________________________\nactivation_260 (Activation) (None, 8, 8, 128) 0 bn3b_branch2b[0][0] \n__________________________________________________________________________________________________\nres3b_branch2c (Conv2D) (None, 8, 8, 512) 66048 activation_260[0][0] \n__________________________________________________________________________________________________\nbn3b_branch2c (BatchNormalizati (None, 8, 8, 512) 2048 res3b_branch2c[0][0] \n__________________________________________________________________________________________________\nadd_85 (Add) (None, 8, 8, 512) 0 bn3b_branch2c[0][0] \n activation_258[0][0] \n__________________________________________________________________________________________________\nactivation_261 (Activation) (None, 8, 8, 512) 0 add_85[0][0] \n__________________________________________________________________________________________________\nres3c_branch2a (Conv2D) (None, 8, 8, 128) 65664 activation_261[0][0] \n__________________________________________________________________________________________________\nbn3c_branch2a (BatchNormalizati (None, 8, 8, 128) 512 res3c_branch2a[0][0] \n__________________________________________________________________________________________________\nactivation_262 (Activation) (None, 8, 8, 128) 0 bn3c_branch2a[0][0] \n__________________________________________________________________________________________________\nres3c_branch2b (Conv2D) (None, 8, 8, 128) 147584 activation_262[0][0] \n__________________________________________________________________________________________________\nbn3c_branch2b (BatchNormalizati (None, 8, 8, 128) 512 res3c_branch2b[0][0] \n__________________________________________________________________________________________________\nactivation_263 (Activation) (None, 8, 8, 128) 0 bn3c_branch2b[0][0] \n__________________________________________________________________________________________________\nres3c_branch2c (Conv2D) (None, 8, 8, 512) 66048 activation_263[0][0] \n__________________________________________________________________________________________________\nbn3c_branch2c (BatchNormalizati (None, 8, 8, 512) 2048 res3c_branch2c[0][0] \n__________________________________________________________________________________________________\nadd_86 (Add) (None, 8, 8, 512) 0 bn3c_branch2c[0][0] \n activation_261[0][0] \n__________________________________________________________________________________________________\nactivation_264 (Activation) (None, 8, 8, 512) 0 add_86[0][0] \n__________________________________________________________________________________________________\nres3d_branch2a (Conv2D) (None, 8, 8, 128) 65664 activation_264[0][0] \n__________________________________________________________________________________________________\nbn3d_branch2a (BatchNormalizati (None, 8, 8, 128) 512 res3d_branch2a[0][0] \n__________________________________________________________________________________________________\nactivation_265 (Activation) (None, 8, 8, 128) 0 bn3d_branch2a[0][0] \n__________________________________________________________________________________________________\nres3d_branch2b (Conv2D) (None, 8, 8, 128) 147584 activation_265[0][0] \n__________________________________________________________________________________________________\nbn3d_branch2b (BatchNormalizati (None, 8, 8, 128) 512 res3d_branch2b[0][0] \n__________________________________________________________________________________________________\nactivation_266 (Activation) (None, 8, 8, 128) 0 bn3d_branch2b[0][0] \n__________________________________________________________________________________________________\nres3d_branch2c (Conv2D) (None, 8, 8, 512) 66048 activation_266[0][0] \n__________________________________________________________________________________________________\nbn3d_branch2c (BatchNormalizati (None, 8, 8, 512) 2048 res3d_branch2c[0][0] \n__________________________________________________________________________________________________\nadd_87 (Add) (None, 8, 8, 512) 0 bn3d_branch2c[0][0] \n activation_264[0][0] \n__________________________________________________________________________________________________\nactivation_267 (Activation) (None, 8, 8, 512) 0 add_87[0][0] \n__________________________________________________________________________________________________\nres4a_branch2a (Conv2D) (None, 4, 4, 256) 131328 activation_267[0][0] \n__________________________________________________________________________________________________\nbn4a_branch2a (BatchNormalizati (None, 4, 4, 256) 1024 res4a_branch2a[0][0] \n__________________________________________________________________________________________________\nactivation_268 (Activation) (None, 4, 4, 256) 0 bn4a_branch2a[0][0] \n__________________________________________________________________________________________________\nres4a_branch2b (Conv2D) (None, 4, 4, 256) 590080 activation_268[0][0] \n__________________________________________________________________________________________________\nbn4a_branch2b (BatchNormalizati (None, 4, 4, 256) 1024 res4a_branch2b[0][0] \n__________________________________________________________________________________________________\nactivation_269 (Activation) (None, 4, 4, 256) 0 bn4a_branch2b[0][0] \n__________________________________________________________________________________________________\nres4a_branch2c (Conv2D) (None, 4, 4, 1024) 263168 activation_269[0][0] \n__________________________________________________________________________________________________\nres4a_branch1 (Conv2D) (None, 4, 4, 1024) 525312 activation_267[0][0] \n__________________________________________________________________________________________________\nbn4a_branch2c (BatchNormalizati (None, 4, 4, 1024) 4096 res4a_branch2c[0][0] \n__________________________________________________________________________________________________\nbn4a_branch1 (BatchNormalizatio (None, 4, 4, 1024) 4096 res4a_branch1[0][0] \n__________________________________________________________________________________________________\nadd_88 (Add) (None, 4, 4, 1024) 0 bn4a_branch2c[0][0] \n bn4a_branch1[0][0] \n__________________________________________________________________________________________________\nactivation_270 (Activation) (None, 4, 4, 1024) 0 add_88[0][0] \n__________________________________________________________________________________________________\nres4b_branch2a (Conv2D) (None, 4, 4, 256) 262400 activation_270[0][0] \n__________________________________________________________________________________________________\nbn4b_branch2a (BatchNormalizati (None, 4, 4, 256) 1024 res4b_branch2a[0][0] \n__________________________________________________________________________________________________\nactivation_271 (Activation) (None, 4, 4, 256) 0 bn4b_branch2a[0][0] \n__________________________________________________________________________________________________\nres4b_branch2b (Conv2D) (None, 4, 4, 256) 590080 activation_271[0][0] \n__________________________________________________________________________________________________\nbn4b_branch2b (BatchNormalizati (None, 4, 4, 256) 1024 res4b_branch2b[0][0] \n__________________________________________________________________________________________________\nactivation_272 (Activation) (None, 4, 4, 256) 0 bn4b_branch2b[0][0] \n__________________________________________________________________________________________________\nres4b_branch2c (Conv2D) (None, 4, 4, 1024) 263168 activation_272[0][0] \n__________________________________________________________________________________________________\nbn4b_branch2c (BatchNormalizati (None, 4, 4, 1024) 4096 res4b_branch2c[0][0] \n__________________________________________________________________________________________________\nadd_89 (Add) (None, 4, 4, 1024) 0 bn4b_branch2c[0][0] \n activation_270[0][0] \n__________________________________________________________________________________________________\nactivation_273 (Activation) (None, 4, 4, 1024) 0 add_89[0][0] \n__________________________________________________________________________________________________\nres4c_branch2a (Conv2D) (None, 4, 4, 256) 262400 activation_273[0][0] \n__________________________________________________________________________________________________\nbn4c_branch2a (BatchNormalizati (None, 4, 4, 256) 1024 res4c_branch2a[0][0] \n__________________________________________________________________________________________________\nactivation_274 (Activation) (None, 4, 4, 256) 0 bn4c_branch2a[0][0] \n__________________________________________________________________________________________________\nres4c_branch2b (Conv2D) (None, 4, 4, 256) 590080 activation_274[0][0] \n__________________________________________________________________________________________________\nbn4c_branch2b (BatchNormalizati (None, 4, 4, 256) 1024 res4c_branch2b[0][0] \n__________________________________________________________________________________________________\nactivation_275 (Activation) (None, 4, 4, 256) 0 bn4c_branch2b[0][0] \n__________________________________________________________________________________________________\nres4c_branch2c (Conv2D) (None, 4, 4, 1024) 263168 activation_275[0][0] \n__________________________________________________________________________________________________\nbn4c_branch2c (BatchNormalizati (None, 4, 4, 1024) 4096 res4c_branch2c[0][0] \n__________________________________________________________________________________________________\nadd_90 (Add) (None, 4, 4, 1024) 0 bn4c_branch2c[0][0] \n activation_273[0][0] \n__________________________________________________________________________________________________\nactivation_276 (Activation) (None, 4, 4, 1024) 0 add_90[0][0] \n__________________________________________________________________________________________________\nres4d_branch2a (Conv2D) (None, 4, 4, 256) 262400 activation_276[0][0] \n__________________________________________________________________________________________________\nbn4d_branch2a (BatchNormalizati (None, 4, 4, 256) 1024 res4d_branch2a[0][0] \n__________________________________________________________________________________________________\nactivation_277 (Activation) (None, 4, 4, 256) 0 bn4d_branch2a[0][0] \n__________________________________________________________________________________________________\nres4d_branch2b (Conv2D) (None, 4, 4, 256) 590080 activation_277[0][0] \n__________________________________________________________________________________________________\nbn4d_branch2b (BatchNormalizati (None, 4, 4, 256) 1024 res4d_branch2b[0][0] \n__________________________________________________________________________________________________\nactivation_278 (Activation) (None, 4, 4, 256) 0 bn4d_branch2b[0][0] \n__________________________________________________________________________________________________\nres4d_branch2c (Conv2D) (None, 4, 4, 1024) 263168 activation_278[0][0] \n__________________________________________________________________________________________________\nbn4d_branch2c (BatchNormalizati (None, 4, 4, 1024) 4096 res4d_branch2c[0][0] \n__________________________________________________________________________________________________\nadd_91 (Add) (None, 4, 4, 1024) 0 bn4d_branch2c[0][0] \n activation_276[0][0] \n__________________________________________________________________________________________________\nactivation_279 (Activation) (None, 4, 4, 1024) 0 add_91[0][0] \n__________________________________________________________________________________________________\nres4e_branch2a (Conv2D) (None, 4, 4, 256) 262400 activation_279[0][0] \n__________________________________________________________________________________________________\nbn4e_branch2a (BatchNormalizati (None, 4, 4, 256) 1024 res4e_branch2a[0][0] \n__________________________________________________________________________________________________\nactivation_280 (Activation) (None, 4, 4, 256) 0 bn4e_branch2a[0][0] \n__________________________________________________________________________________________________\nres4e_branch2b (Conv2D) (None, 4, 4, 256) 590080 activation_280[0][0] \n__________________________________________________________________________________________________\nbn4e_branch2b (BatchNormalizati (None, 4, 4, 256) 1024 res4e_branch2b[0][0] \n__________________________________________________________________________________________________\nactivation_281 (Activation) (None, 4, 4, 256) 0 bn4e_branch2b[0][0] \n__________________________________________________________________________________________________\nres4e_branch2c (Conv2D) (None, 4, 4, 1024) 263168 activation_281[0][0] \n__________________________________________________________________________________________________\nbn4e_branch2c (BatchNormalizati (None, 4, 4, 1024) 4096 res4e_branch2c[0][0] \n__________________________________________________________________________________________________\nadd_92 (Add) (None, 4, 4, 1024) 0 bn4e_branch2c[0][0] \n activation_279[0][0] \n__________________________________________________________________________________________________\nactivation_282 (Activation) (None, 4, 4, 1024) 0 add_92[0][0] \n__________________________________________________________________________________________________\nres4f_branch2a (Conv2D) (None, 4, 4, 256) 262400 activation_282[0][0] \n__________________________________________________________________________________________________\nbn4f_branch2a (BatchNormalizati (None, 4, 4, 256) 1024 res4f_branch2a[0][0] \n__________________________________________________________________________________________________\nactivation_283 (Activation) (None, 4, 4, 256) 0 bn4f_branch2a[0][0] \n__________________________________________________________________________________________________\nres4f_branch2b (Conv2D) (None, 4, 4, 256) 590080 activation_283[0][0] \n__________________________________________________________________________________________________\nbn4f_branch2b (BatchNormalizati (None, 4, 4, 256) 1024 res4f_branch2b[0][0] \n__________________________________________________________________________________________________\nactivation_284 (Activation) (None, 4, 4, 256) 0 bn4f_branch2b[0][0] \n__________________________________________________________________________________________________\nres4f_branch2c (Conv2D) (None, 4, 4, 1024) 263168 activation_284[0][0] \n__________________________________________________________________________________________________\nbn4f_branch2c (BatchNormalizati (None, 4, 4, 1024) 4096 res4f_branch2c[0][0] \n__________________________________________________________________________________________________\nadd_93 (Add) (None, 4, 4, 1024) 0 bn4f_branch2c[0][0] \n activation_282[0][0] \n__________________________________________________________________________________________________\nactivation_285 (Activation) (None, 4, 4, 1024) 0 add_93[0][0] \n__________________________________________________________________________________________________\nres5a_branch2a (Conv2D) (None, 2, 2, 512) 524800 activation_285[0][0] \n__________________________________________________________________________________________________\nbn5a_branch2a (BatchNormalizati (None, 2, 2, 512) 2048 res5a_branch2a[0][0] \n__________________________________________________________________________________________________\nactivation_286 (Activation) (None, 2, 2, 512) 0 bn5a_branch2a[0][0] \n__________________________________________________________________________________________________\nres5a_branch2b (Conv2D) (None, 2, 2, 512) 2359808 activation_286[0][0] \n__________________________________________________________________________________________________\nbn5a_branch2b (BatchNormalizati (None, 2, 2, 512) 2048 res5a_branch2b[0][0] \n__________________________________________________________________________________________________\nactivation_287 (Activation) (None, 2, 2, 512) 0 bn5a_branch2b[0][0] \n__________________________________________________________________________________________________\nres5a_branch2c (Conv2D) (None, 2, 2, 2048) 1050624 activation_287[0][0] \n__________________________________________________________________________________________________\nres5a_branch1 (Conv2D) (None, 2, 2, 2048) 2099200 activation_285[0][0] \n__________________________________________________________________________________________________\nbn5a_branch2c (BatchNormalizati (None, 2, 2, 2048) 8192 res5a_branch2c[0][0] \n__________________________________________________________________________________________________\nbn5a_branch1 (BatchNormalizatio (None, 2, 2, 2048) 8192 res5a_branch1[0][0] \n__________________________________________________________________________________________________\nadd_94 (Add) (None, 2, 2, 2048) 0 bn5a_branch2c[0][0] \n bn5a_branch1[0][0] \n__________________________________________________________________________________________________\nactivation_288 (Activation) (None, 2, 2, 2048) 0 add_94[0][0] \n__________________________________________________________________________________________________\nres5b_branch2a (Conv2D) (None, 2, 2, 512) 1049088 activation_288[0][0] \n__________________________________________________________________________________________________\nbn5b_branch2a (BatchNormalizati (None, 2, 2, 512) 2048 res5b_branch2a[0][0] \n__________________________________________________________________________________________________\nactivation_289 (Activation) (None, 2, 2, 512) 0 bn5b_branch2a[0][0] \n__________________________________________________________________________________________________\nres5b_branch2b (Conv2D) (None, 2, 2, 512) 2359808 activation_289[0][0] \n__________________________________________________________________________________________________\nbn5b_branch2b (BatchNormalizati (None, 2, 2, 512) 2048 res5b_branch2b[0][0] \n__________________________________________________________________________________________________\nactivation_290 (Activation) (None, 2, 2, 512) 0 bn5b_branch2b[0][0] \n__________________________________________________________________________________________________\nres5b_branch2c (Conv2D) (None, 2, 2, 2048) 1050624 activation_290[0][0] \n__________________________________________________________________________________________________\nbn5b_branch2c (BatchNormalizati (None, 2, 2, 2048) 8192 res5b_branch2c[0][0] \n__________________________________________________________________________________________________\nadd_95 (Add) (None, 2, 2, 2048) 0 bn5b_branch2c[0][0] \n activation_288[0][0] \n__________________________________________________________________________________________________\nactivation_291 (Activation) (None, 2, 2, 2048) 0 add_95[0][0] \n__________________________________________________________________________________________________\nres5c_branch2a (Conv2D) (None, 2, 2, 512) 1049088 activation_291[0][0] \n__________________________________________________________________________________________________\nbn5c_branch2a (BatchNormalizati (None, 2, 2, 512) 2048 res5c_branch2a[0][0] \n__________________________________________________________________________________________________\nactivation_292 (Activation) (None, 2, 2, 512) 0 bn5c_branch2a[0][0] \n__________________________________________________________________________________________________\nres5c_branch2b (Conv2D) (None, 2, 2, 512) 2359808 activation_292[0][0] \n__________________________________________________________________________________________________\nbn5c_branch2b (BatchNormalizati (None, 2, 2, 512) 2048 res5c_branch2b[0][0] \n__________________________________________________________________________________________________\nactivation_293 (Activation) (None, 2, 2, 512) 0 bn5c_branch2b[0][0] \n__________________________________________________________________________________________________\nres5c_branch2c (Conv2D) (None, 2, 2, 2048) 1050624 activation_293[0][0] \n__________________________________________________________________________________________________\nbn5c_branch2c (BatchNormalizati (None, 2, 2, 2048) 8192 res5c_branch2c[0][0] \n__________________________________________________________________________________________________\nadd_96 (Add) (None, 2, 2, 2048) 0 bn5c_branch2c[0][0] \n activation_291[0][0] \n__________________________________________________________________________________________________\nactivation_294 (Activation) (None, 2, 2, 2048) 0 add_96[0][0] \n__________________________________________________________________________________________________\nglobal_average_pooling2d_7 (Glo (None, 2048) 0 activation_294[0][0] \n__________________________________________________________________________________________________\ndropout_13 (Dropout) (None, 2048) 0 global_average_pooling2d_7[0][0] \n__________________________________________________________________________________________________\nfc-2 (Dense) (None, 128) 262272 dropout_13[0][0] \n__________________________________________________________________________________________________\ndropout_14 (Dropout) (None, 128) 0 fc-2[0][0] \n__________________________________________________________________________________________________\noutput_layer (Dense) (None, 7) 903 dropout_14[0][0] \n==================================================================================================\nTotal params: 23,850,887\nTrainable params: 23,797,767\nNon-trainable params: 53,120\n__________________________________________________________________________________________________\n" ], [ "for layer in custom_resnet_model2.layers[:-3]:\n\tlayer.trainable = False", "_____no_output_____" ], [ "custom_resnet_model2.layers[-1].trainable\ncustom_resnet_model2.compile(loss='categorical_crossentropy',optimizer='adam',metrics=['accuracy'])", "_____no_output_____" ], [ "t=time.time()\nhistory = custom_resnet_model2.fit(X_train, y_train, batch_size=38, epochs=25, verbose=1, validation_data=(X_test, y_test))", "Train on 1853 samples, validate on 464 samples\nEpoch 1/25\n1853/1853 [==============================] - 70s 38ms/step - loss: 1.5459 - acc: 0.7232 - val_loss: 0.6151 - val_acc: 0.8147\nEpoch 2/25\n1853/1853 [==============================] - 48s 26ms/step - loss: 0.7419 - acc: 0.8095 - val_loss: 0.5617 - val_acc: 0.8384\nEpoch 3/25\n1853/1853 [==============================] - 48s 26ms/step - loss: 0.5861 - acc: 0.8381 - val_loss: 0.5202 - val_acc: 0.8534\nEpoch 4/25\n1853/1853 [==============================] - 48s 26ms/step - loss: 0.5567 - acc: 0.8332 - val_loss: 0.5139 - val_acc: 0.8491\nEpoch 5/25\n1853/1853 [==============================] - 48s 26ms/step - loss: 0.4887 - acc: 0.8494 - val_loss: 0.5315 - val_acc: 0.8578\nEpoch 6/25\n1853/1853 [==============================] - 48s 26ms/step - loss: 0.4568 - acc: 0.8640 - val_loss: 0.5953 - val_acc: 0.8362\nEpoch 7/25\n1853/1853 [==============================] - 48s 26ms/step - loss: 0.4146 - acc: 0.8699 - val_loss: 0.6077 - val_acc: 0.8621\nEpoch 8/25\n1853/1853 [==============================] - 48s 26ms/step - loss: 0.3758 - acc: 0.8780 - val_loss: 0.5719 - val_acc: 0.8642\nEpoch 9/25\n1853/1853 [==============================] - 48s 26ms/step - loss: 0.3610 - acc: 0.8942 - val_loss: 0.5730 - val_acc: 0.8750\nEpoch 10/25\n1853/1853 [==============================] - 48s 26ms/step - loss: 0.3624 - acc: 0.8851 - val_loss: 0.5515 - val_acc: 0.8728\nEpoch 11/25\n1853/1853 [==============================] - 48s 26ms/step - loss: 0.3322 - acc: 0.8921 - val_loss: 0.5812 - val_acc: 0.8685\nEpoch 12/25\n1853/1853 [==============================] - 48s 26ms/step - loss: 0.3341 - acc: 0.8926 - val_loss: 0.5758 - val_acc: 0.8685\nEpoch 13/25\n1853/1853 [==============================] - 48s 26ms/step - loss: 0.2941 - acc: 0.9012 - val_loss: 0.5959 - val_acc: 0.8685\nEpoch 14/25\n1853/1853 [==============================] - 48s 26ms/step - loss: 0.3266 - acc: 0.9072 - val_loss: 0.6331 - val_acc: 0.8685\nEpoch 15/25\n1853/1853 [==============================] - 48s 26ms/step - loss: 0.2740 - acc: 0.9066 - val_loss: 0.5673 - val_acc: 0.8642\nEpoch 16/25\n1853/1853 [==============================] - 48s 26ms/step - loss: 0.2506 - acc: 0.9120 - val_loss: 0.5560 - val_acc: 0.8772\nEpoch 17/25\n1853/1853 [==============================] - 48s 26ms/step - loss: 0.2536 - acc: 0.9239 - val_loss: 0.5914 - val_acc: 0.8750\nEpoch 18/25\n1853/1853 [==============================] - 48s 26ms/step - loss: 0.2731 - acc: 0.9120 - val_loss: 0.5814 - val_acc: 0.8599\nEpoch 19/25\n1853/1853 [==============================] - 48s 26ms/step - loss: 0.2545 - acc: 0.9066 - val_loss: 0.5729 - val_acc: 0.8728\nEpoch 20/25\n1853/1853 [==============================] - 48s 26ms/step - loss: 0.2418 - acc: 0.9212 - val_loss: 0.6652 - val_acc: 0.8599\nEpoch 21/25\n1853/1853 [==============================] - 48s 26ms/step - loss: 0.2505 - acc: 0.9217 - val_loss: 0.6245 - val_acc: 0.8728\nEpoch 22/25\n1853/1853 [==============================] - 49s 27ms/step - loss: 0.2289 - acc: 0.9293 - val_loss: 0.6104 - val_acc: 0.8685\nEpoch 23/25\n1853/1853 [==============================] - 48s 26ms/step - loss: 0.2249 - acc: 0.9212 - val_loss: 0.5854 - val_acc: 0.8728\nEpoch 24/25\n1853/1853 [==============================] - 49s 26ms/step - loss: 0.2326 - acc: 0.9234 - val_loss: 0.6275 - val_acc: 0.8728\nEpoch 25/25\n1853/1853 [==============================] - 49s 27ms/step - loss: 0.2218 - acc: 0.9234 - val_loss: 0.6937 - val_acc: 0.8664\n" ], [ "print('Training time: %s' % (t - time.time()))\n(loss, accuracy) = custom_resnet_model2.evaluate(X_test, y_test, batch_size=32, verbose=1)\nprint(\"[INFO] loss={:.4f}, accuracy: {:.4f}%\".format(loss,accuracy * 100))", "Training time: -1231.0342094898224\n464/464 [==============================] - 20s 43ms/step\n[INFO] loss=0.6937, accuracy: 86.6379%\n" ], [ "#accuracy\nplt.plot(history.history['acc'])\nplt.plot(history.history['val_acc'])\nplt.title('model accuracy')\nplt.ylabel('accuracy')\nplt.xlabel('epoch')\nplt.legend(['train', 'test'], loc='upper left')\nplt.show()\n#loss\nplt.plot(history.history['loss'])\nplt.plot(history.history['val_loss'])\nplt.title('model loss')\nplt.ylabel('loss')\nplt.xlabel('epoch')\nplt.legend(['train', 'test'], loc='upper left')\nplt.show()", "_____no_output_____" ] ], [ [ "# GoogleNet InceptionV3\n### Input(shape=(150, 150, 3)),include_top=False,weights='imagenet'\n### batch_size=30, epochs=20 Remove fully-connected 2nd last dense layer\n#### loss='categorical_crossentropy',optimizer='adam'\n#### Don't want to train all model \n##### Last 3 layer retrain ", "_____no_output_____" ] ], [ [ "import numpy as np\nimport os\nimport time\n#from resnet50 import ResNet50\nfrom keras.preprocessing import image\nfrom keras.layers import GlobalAveragePooling2D, Dense, Dropout,Activation,Flatten\n\n#from imagenet_utils import preprocess_input\nfrom keras.layers import Input\nfrom keras.models import Model\nfrom keras.utils import np_utils\nfrom sklearn.utils import shuffle\nfrom sklearn.cross_validation import train_test_split", "_____no_output_____" ], [ "data = []\nlabels = []\nim_width = 150\nim_height = 150\nnum_classes = 7", "_____no_output_____" ], [ "for i in range(7):\n path=os.getcwd()+\"/Project2DataClean/\"+str(i+1)+'/'\n print(path)\n progress = 0\n image_files = [f for f in os.listdir(path) if os.path.isfile(os.path.join(path,f))]\n for file_name in image_files:\n image_file = str(path + file_name)\n img = cv2.imread(image_file)\n new_img = cv2.resize(img,(im_width,im_height))\n data.append(new_img)\n progress = progress+1\n# print(int(path[-2]))\n labels.append(int(path[-2])-1)\n if progress%100==0:\n print('Progress '+str(progress)+' Image done of Disease type:' + path[-2])", "F:\\University\\Semester1\\Deep Learning\\Project\\Project 2\\Code/Project2DataClean/1/\nProgress 100 Image done of Disease type:1\nF:\\University\\Semester1\\Deep Learning\\Project\\Project 2\\Code/Project2DataClean/2/\nProgress 100 Image done of Disease type:2\nF:\\University\\Semester1\\Deep Learning\\Project\\Project 2\\Code/Project2DataClean/3/\nF:\\University\\Semester1\\Deep Learning\\Project\\Project 2\\Code/Project2DataClean/4/\nF:\\University\\Semester1\\Deep Learning\\Project\\Project 2\\Code/Project2DataClean/5/\nF:\\University\\Semester1\\Deep Learning\\Project\\Project 2\\Code/Project2DataClean/6/\nProgress 100 Image done of Disease type:6\nProgress 200 Image done of Disease type:6\nProgress 300 Image done of Disease type:6\nProgress 400 Image done of Disease type:6\nProgress 500 Image done of Disease type:6\nProgress 600 Image done of Disease type:6\nProgress 700 Image done of Disease type:6\nProgress 800 Image done of Disease type:6\nProgress 900 Image done of Disease type:6\nProgress 1000 Image done of Disease type:6\nProgress 1100 Image done of Disease type:6\nProgress 1200 Image done of Disease type:6\nProgress 1300 Image done of Disease type:6\nProgress 1400 Image done of Disease type:6\nProgress 1500 Image done of Disease type:6\nProgress 1600 Image done of Disease type:6\nProgress 1700 Image done of Disease type:6\nProgress 1800 Image done of Disease type:6\nF:\\University\\Semester1\\Deep Learning\\Project\\Project 2\\Code/Project2DataClean/7/\n" ], [ "data = np.array(data)\nlabels = np.array(labels)\nprint(len(data))\nprint(len(labels))\nprint(data.shape)\nprint(labels.shape)", "2317\n2317\n(2317, 150, 150, 3)\n(2317,)\n" ], [ "data = data.astype('uint8')\nlabels = keras.utils.to_categorical(labels, 7)\nprint(labels.shape)", "(2317, 7)\n" ], [ "x,y = shuffle(data,labels, random_state=2)\nX_train, X_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=2)", "_____no_output_____" ], [ "image_input = Input(shape=(150, 150, 3))\nmodel = keras.applications.inception_v3.InceptionV3(include_top=False, weights='imagenet', input_shape=(150, 150, 3))", "_____no_output_____" ], [ "last_layer = model.output", "_____no_output_____" ], [ "# add a global spatial average pooling layer\nx = GlobalAveragePooling2D()(last_layer)\n# add fully-connected & dropout layers\n# x = Dense(128, activation='relu',name='fc-1')(x)\nx = Dropout(0.1)(x)\nx = Dense(128, activation='relu',name='fc-2')(x)\nx = Dropout(0.9)(x)\n# a softmax layer for 7 classes\nout = Dense(7, activation='softmax',name='output_layer')(x)", "_____no_output_____" ], [ "custom_resnet_model2 = Model(inputs=model.input, outputs=out)\ncustom_resnet_model2.summary()", "__________________________________________________________________________________________________\nLayer (type) Output Shape Param # Connected to \n==================================================================================================\ninput_11 (InputLayer) (None, 150, 150, 3) 0 \n__________________________________________________________________________________________________\nconv2d_392 (Conv2D) (None, 74, 74, 32) 864 input_11[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_377 (BatchN (None, 74, 74, 32) 96 conv2d_392[0][0] \n__________________________________________________________________________________________________\nactivation_426 (Activation) (None, 74, 74, 32) 0 batch_normalization_377[0][0] \n__________________________________________________________________________________________________\nconv2d_393 (Conv2D) (None, 72, 72, 32) 9216 activation_426[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_378 (BatchN (None, 72, 72, 32) 96 conv2d_393[0][0] \n__________________________________________________________________________________________________\nactivation_427 (Activation) (None, 72, 72, 32) 0 batch_normalization_378[0][0] \n__________________________________________________________________________________________________\nconv2d_394 (Conv2D) (None, 72, 72, 64) 18432 activation_427[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_379 (BatchN (None, 72, 72, 64) 192 conv2d_394[0][0] \n__________________________________________________________________________________________________\nactivation_428 (Activation) (None, 72, 72, 64) 0 batch_normalization_379[0][0] \n__________________________________________________________________________________________________\nmax_pooling2d_24 (MaxPooling2D) (None, 35, 35, 64) 0 activation_428[0][0] \n__________________________________________________________________________________________________\nconv2d_395 (Conv2D) (None, 35, 35, 80) 5120 max_pooling2d_24[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_380 (BatchN (None, 35, 35, 80) 240 conv2d_395[0][0] \n__________________________________________________________________________________________________\nactivation_429 (Activation) (None, 35, 35, 80) 0 batch_normalization_380[0][0] \n__________________________________________________________________________________________________\nconv2d_396 (Conv2D) (None, 33, 33, 192) 138240 activation_429[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_381 (BatchN (None, 33, 33, 192) 576 conv2d_396[0][0] \n__________________________________________________________________________________________________\nactivation_430 (Activation) (None, 33, 33, 192) 0 batch_normalization_381[0][0] \n__________________________________________________________________________________________________\nmax_pooling2d_25 (MaxPooling2D) (None, 16, 16, 192) 0 activation_430[0][0] \n__________________________________________________________________________________________________\nconv2d_400 (Conv2D) (None, 16, 16, 64) 12288 max_pooling2d_25[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_385 (BatchN (None, 16, 16, 64) 192 conv2d_400[0][0] \n__________________________________________________________________________________________________\nactivation_434 (Activation) (None, 16, 16, 64) 0 batch_normalization_385[0][0] \n__________________________________________________________________________________________________\nconv2d_398 (Conv2D) (None, 16, 16, 48) 9216 max_pooling2d_25[0][0] \n__________________________________________________________________________________________________\nconv2d_401 (Conv2D) (None, 16, 16, 96) 55296 activation_434[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_383 (BatchN (None, 16, 16, 48) 144 conv2d_398[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_386 (BatchN (None, 16, 16, 96) 288 conv2d_401[0][0] \n__________________________________________________________________________________________________\nactivation_432 (Activation) (None, 16, 16, 48) 0 batch_normalization_383[0][0] \n__________________________________________________________________________________________________\nactivation_435 (Activation) (None, 16, 16, 96) 0 batch_normalization_386[0][0] \n__________________________________________________________________________________________________\naverage_pooling2d_37 (AveragePo (None, 16, 16, 192) 0 max_pooling2d_25[0][0] \n__________________________________________________________________________________________________\nconv2d_397 (Conv2D) (None, 16, 16, 64) 12288 max_pooling2d_25[0][0] \n__________________________________________________________________________________________________\nconv2d_399 (Conv2D) (None, 16, 16, 64) 76800 activation_432[0][0] \n__________________________________________________________________________________________________\nconv2d_402 (Conv2D) (None, 16, 16, 96) 82944 activation_435[0][0] \n__________________________________________________________________________________________________\nconv2d_403 (Conv2D) (None, 16, 16, 32) 6144 average_pooling2d_37[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_382 (BatchN (None, 16, 16, 64) 192 conv2d_397[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_384 (BatchN (None, 16, 16, 64) 192 conv2d_399[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_387 (BatchN (None, 16, 16, 96) 288 conv2d_402[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_388 (BatchN (None, 16, 16, 32) 96 conv2d_403[0][0] \n__________________________________________________________________________________________________\nactivation_431 (Activation) (None, 16, 16, 64) 0 batch_normalization_382[0][0] \n__________________________________________________________________________________________________\nactivation_433 (Activation) (None, 16, 16, 64) 0 batch_normalization_384[0][0] \n__________________________________________________________________________________________________\nactivation_436 (Activation) (None, 16, 16, 96) 0 batch_normalization_387[0][0] \n__________________________________________________________________________________________________\nactivation_437 (Activation) (None, 16, 16, 32) 0 batch_normalization_388[0][0] \n__________________________________________________________________________________________________\nmixed0 (Concatenate) (None, 16, 16, 256) 0 activation_431[0][0] \n activation_433[0][0] \n activation_436[0][0] \n activation_437[0][0] \n__________________________________________________________________________________________________\nconv2d_407 (Conv2D) (None, 16, 16, 64) 16384 mixed0[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_392 (BatchN (None, 16, 16, 64) 192 conv2d_407[0][0] \n__________________________________________________________________________________________________\nactivation_441 (Activation) (None, 16, 16, 64) 0 batch_normalization_392[0][0] \n__________________________________________________________________________________________________\nconv2d_405 (Conv2D) (None, 16, 16, 48) 12288 mixed0[0][0] \n__________________________________________________________________________________________________\nconv2d_408 (Conv2D) (None, 16, 16, 96) 55296 activation_441[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_390 (BatchN (None, 16, 16, 48) 144 conv2d_405[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_393 (BatchN (None, 16, 16, 96) 288 conv2d_408[0][0] \n__________________________________________________________________________________________________\nactivation_439 (Activation) (None, 16, 16, 48) 0 batch_normalization_390[0][0] \n__________________________________________________________________________________________________\nactivation_442 (Activation) (None, 16, 16, 96) 0 batch_normalization_393[0][0] \n__________________________________________________________________________________________________\naverage_pooling2d_38 (AveragePo (None, 16, 16, 256) 0 mixed0[0][0] \n__________________________________________________________________________________________________\nconv2d_404 (Conv2D) (None, 16, 16, 64) 16384 mixed0[0][0] \n__________________________________________________________________________________________________\nconv2d_406 (Conv2D) (None, 16, 16, 64) 76800 activation_439[0][0] \n__________________________________________________________________________________________________\nconv2d_409 (Conv2D) (None, 16, 16, 96) 82944 activation_442[0][0] \n__________________________________________________________________________________________________\nconv2d_410 (Conv2D) (None, 16, 16, 64) 16384 average_pooling2d_38[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_389 (BatchN (None, 16, 16, 64) 192 conv2d_404[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_391 (BatchN (None, 16, 16, 64) 192 conv2d_406[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_394 (BatchN (None, 16, 16, 96) 288 conv2d_409[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_395 (BatchN (None, 16, 16, 64) 192 conv2d_410[0][0] \n__________________________________________________________________________________________________\nactivation_438 (Activation) (None, 16, 16, 64) 0 batch_normalization_389[0][0] \n__________________________________________________________________________________________________\nactivation_440 (Activation) (None, 16, 16, 64) 0 batch_normalization_391[0][0] \n__________________________________________________________________________________________________\nactivation_443 (Activation) (None, 16, 16, 96) 0 batch_normalization_394[0][0] \n__________________________________________________________________________________________________\nactivation_444 (Activation) (None, 16, 16, 64) 0 batch_normalization_395[0][0] \n__________________________________________________________________________________________________\nmixed1 (Concatenate) (None, 16, 16, 288) 0 activation_438[0][0] \n activation_440[0][0] \n activation_443[0][0] \n activation_444[0][0] \n__________________________________________________________________________________________________\nconv2d_414 (Conv2D) (None, 16, 16, 64) 18432 mixed1[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_399 (BatchN (None, 16, 16, 64) 192 conv2d_414[0][0] \n__________________________________________________________________________________________________\nactivation_448 (Activation) (None, 16, 16, 64) 0 batch_normalization_399[0][0] \n__________________________________________________________________________________________________\nconv2d_412 (Conv2D) (None, 16, 16, 48) 13824 mixed1[0][0] \n__________________________________________________________________________________________________\nconv2d_415 (Conv2D) (None, 16, 16, 96) 55296 activation_448[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_397 (BatchN (None, 16, 16, 48) 144 conv2d_412[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_400 (BatchN (None, 16, 16, 96) 288 conv2d_415[0][0] \n__________________________________________________________________________________________________\nactivation_446 (Activation) (None, 16, 16, 48) 0 batch_normalization_397[0][0] \n__________________________________________________________________________________________________\nactivation_449 (Activation) (None, 16, 16, 96) 0 batch_normalization_400[0][0] \n__________________________________________________________________________________________________\naverage_pooling2d_39 (AveragePo (None, 16, 16, 288) 0 mixed1[0][0] \n__________________________________________________________________________________________________\nconv2d_411 (Conv2D) (None, 16, 16, 64) 18432 mixed1[0][0] \n__________________________________________________________________________________________________\nconv2d_413 (Conv2D) (None, 16, 16, 64) 76800 activation_446[0][0] \n__________________________________________________________________________________________________\nconv2d_416 (Conv2D) (None, 16, 16, 96) 82944 activation_449[0][0] \n__________________________________________________________________________________________________\nconv2d_417 (Conv2D) (None, 16, 16, 64) 18432 average_pooling2d_39[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_396 (BatchN (None, 16, 16, 64) 192 conv2d_411[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_398 (BatchN (None, 16, 16, 64) 192 conv2d_413[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_401 (BatchN (None, 16, 16, 96) 288 conv2d_416[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_402 (BatchN (None, 16, 16, 64) 192 conv2d_417[0][0] \n__________________________________________________________________________________________________\nactivation_445 (Activation) (None, 16, 16, 64) 0 batch_normalization_396[0][0] \n__________________________________________________________________________________________________\nactivation_447 (Activation) (None, 16, 16, 64) 0 batch_normalization_398[0][0] \n__________________________________________________________________________________________________\nactivation_450 (Activation) (None, 16, 16, 96) 0 batch_normalization_401[0][0] \n__________________________________________________________________________________________________\nactivation_451 (Activation) (None, 16, 16, 64) 0 batch_normalization_402[0][0] \n__________________________________________________________________________________________________\nmixed2 (Concatenate) (None, 16, 16, 288) 0 activation_445[0][0] \n activation_447[0][0] \n activation_450[0][0] \n activation_451[0][0] \n__________________________________________________________________________________________________\nconv2d_419 (Conv2D) (None, 16, 16, 64) 18432 mixed2[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_404 (BatchN (None, 16, 16, 64) 192 conv2d_419[0][0] \n__________________________________________________________________________________________________\nactivation_453 (Activation) (None, 16, 16, 64) 0 batch_normalization_404[0][0] \n__________________________________________________________________________________________________\nconv2d_420 (Conv2D) (None, 16, 16, 96) 55296 activation_453[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_405 (BatchN (None, 16, 16, 96) 288 conv2d_420[0][0] \n__________________________________________________________________________________________________\nactivation_454 (Activation) (None, 16, 16, 96) 0 batch_normalization_405[0][0] \n__________________________________________________________________________________________________\nconv2d_418 (Conv2D) (None, 7, 7, 384) 995328 mixed2[0][0] \n__________________________________________________________________________________________________\nconv2d_421 (Conv2D) (None, 7, 7, 96) 82944 activation_454[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_403 (BatchN (None, 7, 7, 384) 1152 conv2d_418[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_406 (BatchN (None, 7, 7, 96) 288 conv2d_421[0][0] \n__________________________________________________________________________________________________\nactivation_452 (Activation) (None, 7, 7, 384) 0 batch_normalization_403[0][0] \n__________________________________________________________________________________________________\nactivation_455 (Activation) (None, 7, 7, 96) 0 batch_normalization_406[0][0] \n__________________________________________________________________________________________________\nmax_pooling2d_26 (MaxPooling2D) (None, 7, 7, 288) 0 mixed2[0][0] \n__________________________________________________________________________________________________\nmixed3 (Concatenate) (None, 7, 7, 768) 0 activation_452[0][0] \n activation_455[0][0] \n max_pooling2d_26[0][0] \n__________________________________________________________________________________________________\nconv2d_426 (Conv2D) (None, 7, 7, 128) 98304 mixed3[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_411 (BatchN (None, 7, 7, 128) 384 conv2d_426[0][0] \n__________________________________________________________________________________________________\nactivation_460 (Activation) (None, 7, 7, 128) 0 batch_normalization_411[0][0] \n__________________________________________________________________________________________________\nconv2d_427 (Conv2D) (None, 7, 7, 128) 114688 activation_460[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_412 (BatchN (None, 7, 7, 128) 384 conv2d_427[0][0] \n__________________________________________________________________________________________________\nactivation_461 (Activation) (None, 7, 7, 128) 0 batch_normalization_412[0][0] \n__________________________________________________________________________________________________\nconv2d_423 (Conv2D) (None, 7, 7, 128) 98304 mixed3[0][0] \n__________________________________________________________________________________________________\nconv2d_428 (Conv2D) (None, 7, 7, 128) 114688 activation_461[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_408 (BatchN (None, 7, 7, 128) 384 conv2d_423[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_413 (BatchN (None, 7, 7, 128) 384 conv2d_428[0][0] \n__________________________________________________________________________________________________\nactivation_457 (Activation) (None, 7, 7, 128) 0 batch_normalization_408[0][0] \n__________________________________________________________________________________________________\nactivation_462 (Activation) (None, 7, 7, 128) 0 batch_normalization_413[0][0] \n__________________________________________________________________________________________________\nconv2d_424 (Conv2D) (None, 7, 7, 128) 114688 activation_457[0][0] \n__________________________________________________________________________________________________\nconv2d_429 (Conv2D) (None, 7, 7, 128) 114688 activation_462[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_409 (BatchN (None, 7, 7, 128) 384 conv2d_424[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_414 (BatchN (None, 7, 7, 128) 384 conv2d_429[0][0] \n__________________________________________________________________________________________________\nactivation_458 (Activation) (None, 7, 7, 128) 0 batch_normalization_409[0][0] \n__________________________________________________________________________________________________\nactivation_463 (Activation) (None, 7, 7, 128) 0 batch_normalization_414[0][0] \n__________________________________________________________________________________________________\naverage_pooling2d_40 (AveragePo (None, 7, 7, 768) 0 mixed3[0][0] \n__________________________________________________________________________________________________\nconv2d_422 (Conv2D) (None, 7, 7, 192) 147456 mixed3[0][0] \n__________________________________________________________________________________________________\nconv2d_425 (Conv2D) (None, 7, 7, 192) 172032 activation_458[0][0] \n__________________________________________________________________________________________________\nconv2d_430 (Conv2D) (None, 7, 7, 192) 172032 activation_463[0][0] \n__________________________________________________________________________________________________\nconv2d_431 (Conv2D) (None, 7, 7, 192) 147456 average_pooling2d_40[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_407 (BatchN (None, 7, 7, 192) 576 conv2d_422[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_410 (BatchN (None, 7, 7, 192) 576 conv2d_425[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_415 (BatchN (None, 7, 7, 192) 576 conv2d_430[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_416 (BatchN (None, 7, 7, 192) 576 conv2d_431[0][0] \n__________________________________________________________________________________________________\nactivation_456 (Activation) (None, 7, 7, 192) 0 batch_normalization_407[0][0] \n__________________________________________________________________________________________________\nactivation_459 (Activation) (None, 7, 7, 192) 0 batch_normalization_410[0][0] \n__________________________________________________________________________________________________\nactivation_464 (Activation) (None, 7, 7, 192) 0 batch_normalization_415[0][0] \n__________________________________________________________________________________________________\nactivation_465 (Activation) (None, 7, 7, 192) 0 batch_normalization_416[0][0] \n__________________________________________________________________________________________________\nmixed4 (Concatenate) (None, 7, 7, 768) 0 activation_456[0][0] \n activation_459[0][0] \n activation_464[0][0] \n activation_465[0][0] \n__________________________________________________________________________________________________\nconv2d_436 (Conv2D) (None, 7, 7, 160) 122880 mixed4[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_421 (BatchN (None, 7, 7, 160) 480 conv2d_436[0][0] \n__________________________________________________________________________________________________\nactivation_470 (Activation) (None, 7, 7, 160) 0 batch_normalization_421[0][0] \n__________________________________________________________________________________________________\nconv2d_437 (Conv2D) (None, 7, 7, 160) 179200 activation_470[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_422 (BatchN (None, 7, 7, 160) 480 conv2d_437[0][0] \n__________________________________________________________________________________________________\nactivation_471 (Activation) (None, 7, 7, 160) 0 batch_normalization_422[0][0] \n__________________________________________________________________________________________________\nconv2d_433 (Conv2D) (None, 7, 7, 160) 122880 mixed4[0][0] \n__________________________________________________________________________________________________\nconv2d_438 (Conv2D) (None, 7, 7, 160) 179200 activation_471[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_418 (BatchN (None, 7, 7, 160) 480 conv2d_433[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_423 (BatchN (None, 7, 7, 160) 480 conv2d_438[0][0] \n__________________________________________________________________________________________________\nactivation_467 (Activation) (None, 7, 7, 160) 0 batch_normalization_418[0][0] \n__________________________________________________________________________________________________\nactivation_472 (Activation) (None, 7, 7, 160) 0 batch_normalization_423[0][0] \n__________________________________________________________________________________________________\nconv2d_434 (Conv2D) (None, 7, 7, 160) 179200 activation_467[0][0] \n__________________________________________________________________________________________________\nconv2d_439 (Conv2D) (None, 7, 7, 160) 179200 activation_472[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_419 (BatchN (None, 7, 7, 160) 480 conv2d_434[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_424 (BatchN (None, 7, 7, 160) 480 conv2d_439[0][0] \n__________________________________________________________________________________________________\nactivation_468 (Activation) (None, 7, 7, 160) 0 batch_normalization_419[0][0] \n__________________________________________________________________________________________________\nactivation_473 (Activation) (None, 7, 7, 160) 0 batch_normalization_424[0][0] \n__________________________________________________________________________________________________\naverage_pooling2d_41 (AveragePo (None, 7, 7, 768) 0 mixed4[0][0] \n__________________________________________________________________________________________________\nconv2d_432 (Conv2D) (None, 7, 7, 192) 147456 mixed4[0][0] \n__________________________________________________________________________________________________\nconv2d_435 (Conv2D) (None, 7, 7, 192) 215040 activation_468[0][0] \n__________________________________________________________________________________________________\nconv2d_440 (Conv2D) (None, 7, 7, 192) 215040 activation_473[0][0] \n__________________________________________________________________________________________________\nconv2d_441 (Conv2D) (None, 7, 7, 192) 147456 average_pooling2d_41[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_417 (BatchN (None, 7, 7, 192) 576 conv2d_432[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_420 (BatchN (None, 7, 7, 192) 576 conv2d_435[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_425 (BatchN (None, 7, 7, 192) 576 conv2d_440[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_426 (BatchN (None, 7, 7, 192) 576 conv2d_441[0][0] \n__________________________________________________________________________________________________\nactivation_466 (Activation) (None, 7, 7, 192) 0 batch_normalization_417[0][0] \n__________________________________________________________________________________________________\nactivation_469 (Activation) (None, 7, 7, 192) 0 batch_normalization_420[0][0] \n__________________________________________________________________________________________________\nactivation_474 (Activation) (None, 7, 7, 192) 0 batch_normalization_425[0][0] \n__________________________________________________________________________________________________\nactivation_475 (Activation) (None, 7, 7, 192) 0 batch_normalization_426[0][0] \n__________________________________________________________________________________________________\nmixed5 (Concatenate) (None, 7, 7, 768) 0 activation_466[0][0] \n activation_469[0][0] \n activation_474[0][0] \n activation_475[0][0] \n__________________________________________________________________________________________________\nconv2d_446 (Conv2D) (None, 7, 7, 160) 122880 mixed5[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_431 (BatchN (None, 7, 7, 160) 480 conv2d_446[0][0] \n__________________________________________________________________________________________________\nactivation_480 (Activation) (None, 7, 7, 160) 0 batch_normalization_431[0][0] \n__________________________________________________________________________________________________\nconv2d_447 (Conv2D) (None, 7, 7, 160) 179200 activation_480[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_432 (BatchN (None, 7, 7, 160) 480 conv2d_447[0][0] \n__________________________________________________________________________________________________\nactivation_481 (Activation) (None, 7, 7, 160) 0 batch_normalization_432[0][0] \n__________________________________________________________________________________________________\nconv2d_443 (Conv2D) (None, 7, 7, 160) 122880 mixed5[0][0] \n__________________________________________________________________________________________________\nconv2d_448 (Conv2D) (None, 7, 7, 160) 179200 activation_481[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_428 (BatchN (None, 7, 7, 160) 480 conv2d_443[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_433 (BatchN (None, 7, 7, 160) 480 conv2d_448[0][0] \n__________________________________________________________________________________________________\nactivation_477 (Activation) (None, 7, 7, 160) 0 batch_normalization_428[0][0] \n__________________________________________________________________________________________________\nactivation_482 (Activation) (None, 7, 7, 160) 0 batch_normalization_433[0][0] \n__________________________________________________________________________________________________\nconv2d_444 (Conv2D) (None, 7, 7, 160) 179200 activation_477[0][0] \n__________________________________________________________________________________________________\nconv2d_449 (Conv2D) (None, 7, 7, 160) 179200 activation_482[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_429 (BatchN (None, 7, 7, 160) 480 conv2d_444[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_434 (BatchN (None, 7, 7, 160) 480 conv2d_449[0][0] \n__________________________________________________________________________________________________\nactivation_478 (Activation) (None, 7, 7, 160) 0 batch_normalization_429[0][0] \n__________________________________________________________________________________________________\nactivation_483 (Activation) (None, 7, 7, 160) 0 batch_normalization_434[0][0] \n__________________________________________________________________________________________________\naverage_pooling2d_42 (AveragePo (None, 7, 7, 768) 0 mixed5[0][0] \n__________________________________________________________________________________________________\nconv2d_442 (Conv2D) (None, 7, 7, 192) 147456 mixed5[0][0] \n__________________________________________________________________________________________________\nconv2d_445 (Conv2D) (None, 7, 7, 192) 215040 activation_478[0][0] \n__________________________________________________________________________________________________\nconv2d_450 (Conv2D) (None, 7, 7, 192) 215040 activation_483[0][0] \n__________________________________________________________________________________________________\nconv2d_451 (Conv2D) (None, 7, 7, 192) 147456 average_pooling2d_42[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_427 (BatchN (None, 7, 7, 192) 576 conv2d_442[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_430 (BatchN (None, 7, 7, 192) 576 conv2d_445[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_435 (BatchN (None, 7, 7, 192) 576 conv2d_450[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_436 (BatchN (None, 7, 7, 192) 576 conv2d_451[0][0] \n__________________________________________________________________________________________________\nactivation_476 (Activation) (None, 7, 7, 192) 0 batch_normalization_427[0][0] \n__________________________________________________________________________________________________\nactivation_479 (Activation) (None, 7, 7, 192) 0 batch_normalization_430[0][0] \n__________________________________________________________________________________________________\nactivation_484 (Activation) (None, 7, 7, 192) 0 batch_normalization_435[0][0] \n__________________________________________________________________________________________________\nactivation_485 (Activation) (None, 7, 7, 192) 0 batch_normalization_436[0][0] \n__________________________________________________________________________________________________\nmixed6 (Concatenate) (None, 7, 7, 768) 0 activation_476[0][0] \n activation_479[0][0] \n activation_484[0][0] \n activation_485[0][0] \n__________________________________________________________________________________________________\nconv2d_456 (Conv2D) (None, 7, 7, 192) 147456 mixed6[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_441 (BatchN (None, 7, 7, 192) 576 conv2d_456[0][0] \n__________________________________________________________________________________________________\nactivation_490 (Activation) (None, 7, 7, 192) 0 batch_normalization_441[0][0] \n__________________________________________________________________________________________________\nconv2d_457 (Conv2D) (None, 7, 7, 192) 258048 activation_490[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_442 (BatchN (None, 7, 7, 192) 576 conv2d_457[0][0] \n__________________________________________________________________________________________________\nactivation_491 (Activation) (None, 7, 7, 192) 0 batch_normalization_442[0][0] \n__________________________________________________________________________________________________\nconv2d_453 (Conv2D) (None, 7, 7, 192) 147456 mixed6[0][0] \n__________________________________________________________________________________________________\nconv2d_458 (Conv2D) (None, 7, 7, 192) 258048 activation_491[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_438 (BatchN (None, 7, 7, 192) 576 conv2d_453[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_443 (BatchN (None, 7, 7, 192) 576 conv2d_458[0][0] \n__________________________________________________________________________________________________\nactivation_487 (Activation) (None, 7, 7, 192) 0 batch_normalization_438[0][0] \n__________________________________________________________________________________________________\nactivation_492 (Activation) (None, 7, 7, 192) 0 batch_normalization_443[0][0] \n__________________________________________________________________________________________________\nconv2d_454 (Conv2D) (None, 7, 7, 192) 258048 activation_487[0][0] \n__________________________________________________________________________________________________\nconv2d_459 (Conv2D) (None, 7, 7, 192) 258048 activation_492[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_439 (BatchN (None, 7, 7, 192) 576 conv2d_454[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_444 (BatchN (None, 7, 7, 192) 576 conv2d_459[0][0] \n__________________________________________________________________________________________________\nactivation_488 (Activation) (None, 7, 7, 192) 0 batch_normalization_439[0][0] \n__________________________________________________________________________________________________\nactivation_493 (Activation) (None, 7, 7, 192) 0 batch_normalization_444[0][0] \n__________________________________________________________________________________________________\naverage_pooling2d_43 (AveragePo (None, 7, 7, 768) 0 mixed6[0][0] \n__________________________________________________________________________________________________\nconv2d_452 (Conv2D) (None, 7, 7, 192) 147456 mixed6[0][0] \n__________________________________________________________________________________________________\nconv2d_455 (Conv2D) (None, 7, 7, 192) 258048 activation_488[0][0] \n__________________________________________________________________________________________________\nconv2d_460 (Conv2D) (None, 7, 7, 192) 258048 activation_493[0][0] \n__________________________________________________________________________________________________\nconv2d_461 (Conv2D) (None, 7, 7, 192) 147456 average_pooling2d_43[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_437 (BatchN (None, 7, 7, 192) 576 conv2d_452[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_440 (BatchN (None, 7, 7, 192) 576 conv2d_455[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_445 (BatchN (None, 7, 7, 192) 576 conv2d_460[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_446 (BatchN (None, 7, 7, 192) 576 conv2d_461[0][0] \n__________________________________________________________________________________________________\nactivation_486 (Activation) (None, 7, 7, 192) 0 batch_normalization_437[0][0] \n__________________________________________________________________________________________________\nactivation_489 (Activation) (None, 7, 7, 192) 0 batch_normalization_440[0][0] \n__________________________________________________________________________________________________\nactivation_494 (Activation) (None, 7, 7, 192) 0 batch_normalization_445[0][0] \n__________________________________________________________________________________________________\nactivation_495 (Activation) (None, 7, 7, 192) 0 batch_normalization_446[0][0] \n__________________________________________________________________________________________________\nmixed7 (Concatenate) (None, 7, 7, 768) 0 activation_486[0][0] \n activation_489[0][0] \n activation_494[0][0] \n activation_495[0][0] \n__________________________________________________________________________________________________\nconv2d_464 (Conv2D) (None, 7, 7, 192) 147456 mixed7[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_449 (BatchN (None, 7, 7, 192) 576 conv2d_464[0][0] \n__________________________________________________________________________________________________\nactivation_498 (Activation) (None, 7, 7, 192) 0 batch_normalization_449[0][0] \n__________________________________________________________________________________________________\nconv2d_465 (Conv2D) (None, 7, 7, 192) 258048 activation_498[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_450 (BatchN (None, 7, 7, 192) 576 conv2d_465[0][0] \n__________________________________________________________________________________________________\nactivation_499 (Activation) (None, 7, 7, 192) 0 batch_normalization_450[0][0] \n__________________________________________________________________________________________________\nconv2d_462 (Conv2D) (None, 7, 7, 192) 147456 mixed7[0][0] \n__________________________________________________________________________________________________\nconv2d_466 (Conv2D) (None, 7, 7, 192) 258048 activation_499[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_447 (BatchN (None, 7, 7, 192) 576 conv2d_462[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_451 (BatchN (None, 7, 7, 192) 576 conv2d_466[0][0] \n__________________________________________________________________________________________________\nactivation_496 (Activation) (None, 7, 7, 192) 0 batch_normalization_447[0][0] \n__________________________________________________________________________________________________\nactivation_500 (Activation) (None, 7, 7, 192) 0 batch_normalization_451[0][0] \n__________________________________________________________________________________________________\nconv2d_463 (Conv2D) (None, 3, 3, 320) 552960 activation_496[0][0] \n__________________________________________________________________________________________________\nconv2d_467 (Conv2D) (None, 3, 3, 192) 331776 activation_500[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_448 (BatchN (None, 3, 3, 320) 960 conv2d_463[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_452 (BatchN (None, 3, 3, 192) 576 conv2d_467[0][0] \n__________________________________________________________________________________________________\nactivation_497 (Activation) (None, 3, 3, 320) 0 batch_normalization_448[0][0] \n__________________________________________________________________________________________________\nactivation_501 (Activation) (None, 3, 3, 192) 0 batch_normalization_452[0][0] \n__________________________________________________________________________________________________\nmax_pooling2d_27 (MaxPooling2D) (None, 3, 3, 768) 0 mixed7[0][0] \n__________________________________________________________________________________________________\nmixed8 (Concatenate) (None, 3, 3, 1280) 0 activation_497[0][0] \n activation_501[0][0] \n max_pooling2d_27[0][0] \n__________________________________________________________________________________________________\nconv2d_472 (Conv2D) (None, 3, 3, 448) 573440 mixed8[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_457 (BatchN (None, 3, 3, 448) 1344 conv2d_472[0][0] \n__________________________________________________________________________________________________\nactivation_506 (Activation) (None, 3, 3, 448) 0 batch_normalization_457[0][0] \n__________________________________________________________________________________________________\nconv2d_469 (Conv2D) (None, 3, 3, 384) 491520 mixed8[0][0] \n__________________________________________________________________________________________________\nconv2d_473 (Conv2D) (None, 3, 3, 384) 1548288 activation_506[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_454 (BatchN (None, 3, 3, 384) 1152 conv2d_469[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_458 (BatchN (None, 3, 3, 384) 1152 conv2d_473[0][0] \n__________________________________________________________________________________________________\nactivation_503 (Activation) (None, 3, 3, 384) 0 batch_normalization_454[0][0] \n__________________________________________________________________________________________________\nactivation_507 (Activation) (None, 3, 3, 384) 0 batch_normalization_458[0][0] \n__________________________________________________________________________________________________\nconv2d_470 (Conv2D) (None, 3, 3, 384) 442368 activation_503[0][0] \n__________________________________________________________________________________________________\nconv2d_471 (Conv2D) (None, 3, 3, 384) 442368 activation_503[0][0] \n__________________________________________________________________________________________________\nconv2d_474 (Conv2D) (None, 3, 3, 384) 442368 activation_507[0][0] \n__________________________________________________________________________________________________\nconv2d_475 (Conv2D) (None, 3, 3, 384) 442368 activation_507[0][0] \n__________________________________________________________________________________________________\naverage_pooling2d_44 (AveragePo (None, 3, 3, 1280) 0 mixed8[0][0] \n__________________________________________________________________________________________________\nconv2d_468 (Conv2D) (None, 3, 3, 320) 409600 mixed8[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_455 (BatchN (None, 3, 3, 384) 1152 conv2d_470[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_456 (BatchN (None, 3, 3, 384) 1152 conv2d_471[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_459 (BatchN (None, 3, 3, 384) 1152 conv2d_474[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_460 (BatchN (None, 3, 3, 384) 1152 conv2d_475[0][0] \n__________________________________________________________________________________________________\nconv2d_476 (Conv2D) (None, 3, 3, 192) 245760 average_pooling2d_44[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_453 (BatchN (None, 3, 3, 320) 960 conv2d_468[0][0] \n__________________________________________________________________________________________________\nactivation_504 (Activation) (None, 3, 3, 384) 0 batch_normalization_455[0][0] \n__________________________________________________________________________________________________\nactivation_505 (Activation) (None, 3, 3, 384) 0 batch_normalization_456[0][0] \n__________________________________________________________________________________________________\nactivation_508 (Activation) (None, 3, 3, 384) 0 batch_normalization_459[0][0] \n__________________________________________________________________________________________________\nactivation_509 (Activation) (None, 3, 3, 384) 0 batch_normalization_460[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_461 (BatchN (None, 3, 3, 192) 576 conv2d_476[0][0] \n__________________________________________________________________________________________________\nactivation_502 (Activation) (None, 3, 3, 320) 0 batch_normalization_453[0][0] \n__________________________________________________________________________________________________\nmixed9_0 (Concatenate) (None, 3, 3, 768) 0 activation_504[0][0] \n activation_505[0][0] \n__________________________________________________________________________________________________\nconcatenate_9 (Concatenate) (None, 3, 3, 768) 0 activation_508[0][0] \n activation_509[0][0] \n__________________________________________________________________________________________________\nactivation_510 (Activation) (None, 3, 3, 192) 0 batch_normalization_461[0][0] \n__________________________________________________________________________________________________\nmixed9 (Concatenate) (None, 3, 3, 2048) 0 activation_502[0][0] \n mixed9_0[0][0] \n concatenate_9[0][0] \n activation_510[0][0] \n__________________________________________________________________________________________________\nconv2d_481 (Conv2D) (None, 3, 3, 448) 917504 mixed9[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_466 (BatchN (None, 3, 3, 448) 1344 conv2d_481[0][0] \n__________________________________________________________________________________________________\nactivation_515 (Activation) (None, 3, 3, 448) 0 batch_normalization_466[0][0] \n__________________________________________________________________________________________________\nconv2d_478 (Conv2D) (None, 3, 3, 384) 786432 mixed9[0][0] \n__________________________________________________________________________________________________\nconv2d_482 (Conv2D) (None, 3, 3, 384) 1548288 activation_515[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_463 (BatchN (None, 3, 3, 384) 1152 conv2d_478[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_467 (BatchN (None, 3, 3, 384) 1152 conv2d_482[0][0] \n__________________________________________________________________________________________________\nactivation_512 (Activation) (None, 3, 3, 384) 0 batch_normalization_463[0][0] \n__________________________________________________________________________________________________\nactivation_516 (Activation) (None, 3, 3, 384) 0 batch_normalization_467[0][0] \n__________________________________________________________________________________________________\nconv2d_479 (Conv2D) (None, 3, 3, 384) 442368 activation_512[0][0] \n__________________________________________________________________________________________________\nconv2d_480 (Conv2D) (None, 3, 3, 384) 442368 activation_512[0][0] \n__________________________________________________________________________________________________\nconv2d_483 (Conv2D) (None, 3, 3, 384) 442368 activation_516[0][0] \n__________________________________________________________________________________________________\nconv2d_484 (Conv2D) (None, 3, 3, 384) 442368 activation_516[0][0] \n__________________________________________________________________________________________________\naverage_pooling2d_45 (AveragePo (None, 3, 3, 2048) 0 mixed9[0][0] \n__________________________________________________________________________________________________\nconv2d_477 (Conv2D) (None, 3, 3, 320) 655360 mixed9[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_464 (BatchN (None, 3, 3, 384) 1152 conv2d_479[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_465 (BatchN (None, 3, 3, 384) 1152 conv2d_480[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_468 (BatchN (None, 3, 3, 384) 1152 conv2d_483[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_469 (BatchN (None, 3, 3, 384) 1152 conv2d_484[0][0] \n__________________________________________________________________________________________________\nconv2d_485 (Conv2D) (None, 3, 3, 192) 393216 average_pooling2d_45[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_462 (BatchN (None, 3, 3, 320) 960 conv2d_477[0][0] \n__________________________________________________________________________________________________\nactivation_513 (Activation) (None, 3, 3, 384) 0 batch_normalization_464[0][0] \n__________________________________________________________________________________________________\nactivation_514 (Activation) (None, 3, 3, 384) 0 batch_normalization_465[0][0] \n__________________________________________________________________________________________________\nactivation_517 (Activation) (None, 3, 3, 384) 0 batch_normalization_468[0][0] \n__________________________________________________________________________________________________\nactivation_518 (Activation) (None, 3, 3, 384) 0 batch_normalization_469[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_470 (BatchN (None, 3, 3, 192) 576 conv2d_485[0][0] \n__________________________________________________________________________________________________\nactivation_511 (Activation) (None, 3, 3, 320) 0 batch_normalization_462[0][0] \n__________________________________________________________________________________________________\nmixed9_1 (Concatenate) (None, 3, 3, 768) 0 activation_513[0][0] \n activation_514[0][0] \n__________________________________________________________________________________________________\nconcatenate_10 (Concatenate) (None, 3, 3, 768) 0 activation_517[0][0] \n activation_518[0][0] \n__________________________________________________________________________________________________\nactivation_519 (Activation) (None, 3, 3, 192) 0 batch_normalization_470[0][0] \n__________________________________________________________________________________________________\nmixed10 (Concatenate) (None, 3, 3, 2048) 0 activation_511[0][0] \n mixed9_1[0][0] \n concatenate_10[0][0] \n activation_519[0][0] \n__________________________________________________________________________________________________\nglobal_average_pooling2d_11 (Gl (None, 2048) 0 mixed10[0][0] \n__________________________________________________________________________________________________\ndropout_24 (Dropout) (None, 2048) 0 global_average_pooling2d_11[0][0]\n__________________________________________________________________________________________________\nfc-2 (Dense) (None, 128) 262272 dropout_24[0][0] \n__________________________________________________________________________________________________\ndropout_25 (Dropout) (None, 128) 0 fc-2[0][0] \n__________________________________________________________________________________________________\noutput_layer (Dense) (None, 7) 903 dropout_25[0][0] \n==================================================================================================\nTotal params: 22,065,959\nTrainable params: 22,031,527\nNon-trainable params: 34,432\n__________________________________________________________________________________________________\n" ], [ "for layer in custom_resnet_model2.layers[:-5]:\n\tlayer.trainable = False", "_____no_output_____" ], [ "custom_resnet_model2.layers[-1].trainable\ncustom_resnet_model2.compile(loss='categorical_crossentropy',optimizer='adam',metrics=['accuracy'])", "_____no_output_____" ], [ "t=time.time()\nhistory = custom_resnet_model2.fit(X_train, y_train, batch_size=30, epochs=15, verbose=1, validation_data=(X_test, y_test))", "Train on 1853 samples, validate on 464 samples\nEpoch 1/15\n1853/1853 [==============================] - 43s 23ms/step - loss: 1.8310 - acc: 0.6465 - val_loss: 3.4390 - val_acc: 0.7866\nEpoch 2/15\n1853/1853 [==============================] - 30s 16ms/step - loss: 1.1984 - acc: 0.7652 - val_loss: 3.4390 - val_acc: 0.7866\nEpoch 3/15\n1853/1853 [==============================] - 30s 16ms/step - loss: 1.0619 - acc: 0.7868 - val_loss: 3.4390 - val_acc: 0.7866\nEpoch 4/15\n1853/1853 [==============================] - 30s 16ms/step - loss: 1.0281 - acc: 0.7890 - val_loss: 3.4390 - val_acc: 0.7866\nEpoch 5/15\n1853/1853 [==============================] - 30s 16ms/step - loss: 0.9385 - acc: 0.7917 - val_loss: 3.4390 - val_acc: 0.7866\nEpoch 6/15\n1853/1853 [==============================] - 30s 16ms/step - loss: 0.9034 - acc: 0.7922 - val_loss: 3.4390 - val_acc: 0.7866\nEpoch 7/15\n1853/1853 [==============================] - 30s 16ms/step - loss: 0.8836 - acc: 0.7917 - val_loss: 3.4390 - val_acc: 0.7866\nEpoch 8/15\n1853/1853 [==============================] - 29s 16ms/step - loss: 0.8725 - acc: 0.7922 - val_loss: 3.4390 - val_acc: 0.7866\nEpoch 9/15\n1853/1853 [==============================] - 29s 16ms/step - loss: 0.8345 - acc: 0.7922 - val_loss: 3.4390 - val_acc: 0.7866\nEpoch 10/15\n1853/1853 [==============================] - 30s 16ms/step - loss: 0.8376 - acc: 0.7922 - val_loss: 3.4390 - val_acc: 0.7866\nEpoch 11/15\n1853/1853 [==============================] - 29s 16ms/step - loss: 0.8234 - acc: 0.7922 - val_loss: 3.4390 - val_acc: 0.7866\nEpoch 12/15\n1853/1853 [==============================] - 30s 16ms/step - loss: 0.8050 - acc: 0.7922 - val_loss: 3.4390 - val_acc: 0.7866\nEpoch 13/15\n1853/1853 [==============================] - 30s 16ms/step - loss: 0.7786 - acc: 0.7922 - val_loss: 3.4390 - val_acc: 0.7866\nEpoch 14/15\n1853/1853 [==============================] - 30s 16ms/step - loss: 0.8006 - acc: 0.7922 - val_loss: 3.4390 - val_acc: 0.7866\nEpoch 15/15\n1853/1853 [==============================] - 30s 16ms/step - loss: 0.7604 - acc: 0.7922 - val_loss: 3.4390 - val_acc: 0.7866\n" ], [ "print('Training time: %s' % (t - time.time()))\n(loss, accuracy) = custom_resnet_model2.evaluate(X_test, y_test, batch_size=33, verbose=1)\nprint(\"[INFO] loss={:.4f}, accuracy: {:.4f}%\".format(loss,accuracy * 100))", "Training time: -583.1648933887482\n464/464 [==============================] - 6s 14ms/step\n[INFO] loss=3.4390, accuracy: 78.6638%\n" ], [ "#accuracy\nplt.plot(history.history['acc'])\nplt.plot(history.history['val_acc'])\nplt.title('model accuracy')\nplt.ylabel('accuracy')\nplt.xlabel('epoch')\nplt.legend(['train', 'test'], loc='upper left')\nplt.show()\n#loss\nplt.plot(history.history['loss'])\nplt.plot(history.history['val_loss'])\nplt.title('model loss')\nplt.ylabel('loss')\nplt.xlabel('epoch')\nplt.legend(['train', 'test'], loc='upper left')\nplt.show()", "_____no_output_____" ] ], [ [ "# CNN Model 2 (60 Epochs)\n## Training Accuracy: 0.9159 validation accuracy: 0.8642", "_____no_output_____" ], [ "# ResNet Model 1(15 Epochs) \n### Training Accuracy: 0.9455 validation accuracy: 0.86", "_____no_output_____" ], [ "# GoogleNet\n### Training Accuracy: 0.79 validation accuracy: 0.78", "_____no_output_____" ], [ "# GoogleNet(Colab)\n### with different dataset \n### Training Accuracy: 0.85 validation accuracy: 0.13", "_____no_output_____" ] ], [ [ "CS5102-Deep Learning\nKALEEM WAHEED 18L-1811 Project 2(B)\n\nPreprocessing \n1) Generate new images for Handle Class Balance Issue \n2) Now each class have 1833 images \n3) Remove noise/Irrelevant Images\n\n1.\tClassify the diseases using best deep learning architecture for classification\nFirst Model\nBuilding Convolutional Neural Network Model 1\nBatch size 100, epoch 50, Adam optimizer Default Learning Rate\nTraining Accuracy: 0.8980 validation accuracy: 0.8634\n\n \n\n\n\n\nSecond Model\nI apply different model with changes in drop out for over fitting and increase layer and Epochs\nBuilding Convolutional Neural Network Model Architecture 2\nUse loss = 'categorical_crossentropy' \nBatch size 100, \nEpoch 60, Optimizer \nAdam Default Learning Rate\nModel 2 give better performance when increase Epochs\nTraining Accuracy: 0.9159 validation accuracy: 0.8642\n\n \n\n\n\n\n2. Use transfer learning on ResNet, GoogleNet to retrain some part of the network\n\nResNet Model 1\nKeras ResNet50 Model 1 with Transfer learning\nInput (shape=(64, 64, 3)), include_top=False, weights='imagenet'\nbatch_size=40, epochs=15 Dropout 0.6 and 0.4\n Don't want to train all model only Last 5 layer retrain\n\n\n\nTraining Accuracy: 0.9455 validation accuracy: 0.86\n\n \nResNet Model 2\nInput(shape=(64, 64, 3)),include_top=False,weights='imagenet' batch_size=38, epochs=25 \nRemove fully-connected 2nd last dense layer\n loss='categorical_crossentropy',optimizer='adam'\nDon't want to train all model Last 3 layer retrain only due to limitation\nTraining Accuracy: 0.92 validation accuracy: 0.86\n \n\nGoogleNet Model 1\nInput(shape=(150, 150, 3)), include_top=False,weights='imagenet'\nbatch_size=30, epochs=20 Remove fully-connected 2nd last dense layer\nloss='categorical_crossentropy',optimizer='adam'\nDon't want to train all model Retrain only Last 3 layer\nTraining Accuracy: 0.79 validation accuracy: 0.78\n \nGoogleNet Model 2 (colab)\nInput(shape=(150, 150, 3)), include_top=False,weights='imagenet'\nDecrease batch_size=20, Decrease epochs=10 Add fully-connected 2nd last dense layer\nloss='categorical_crossentropy',optimizer='adam'\nRetrain Last 300 layer\n I retrain last 300 layer but its validation accuracy is poor\nIt did not train \nTraining Accuracy: 0.85 validation accuracy: 0.13\n \n\n\n\n\n\nGenerate adversarial network (GAN)\nUsing GAN Model generate 1 channel images.\nIt highlight skin decease. \nhttps://github.com/eriklindernoren/Keras-GAN/blob/master/dcgan/dcgan.py\n \n", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "raw" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "raw" ] ]
cb377be3331dee59736c70ea05b9f04f2b79e27f
218,449
ipynb
Jupyter Notebook
d2l/pytorch/chapter_multilayer-perceptrons/mlp.ipynb
541979210/xdf
ab99d242fbabe56a7b4d7723605cc17aab7888b2
[ "Apache-2.0" ]
2
2021-12-11T07:19:34.000Z
2022-03-11T09:29:49.000Z
d2l/pytorch/chapter_multilayer-perceptrons/mlp.ipynb
541979210/xdf
ab99d242fbabe56a7b4d7723605cc17aab7888b2
[ "Apache-2.0" ]
null
null
null
d2l/pytorch/chapter_multilayer-perceptrons/mlp.ipynb
541979210/xdf
ab99d242fbabe56a7b4d7723605cc17aab7888b2
[ "Apache-2.0" ]
null
null
null
41.443559
259
0.471872
[ [ [ "# 多层感知机\n:label:`sec_mlp`\n\n在 :numref:`chap_linear`中,\n我们介绍了softmax回归( :numref:`sec_softmax`),\n然后我们从零开始实现softmax回归( :numref:`sec_softmax_scratch`),\n接着使用高级API实现了算法( :numref:`sec_softmax_concise`),\n并训练分类器从低分辨率图像中识别10类服装。\n在这个过程中,我们学习了如何处理数据,如何将输出转换为有效的概率分布,\n并应用适当的损失函数,根据模型参数最小化损失。\n我们已经在简单的线性模型背景下掌握了这些知识,\n现在我们可以开始对深度神经网络的探索,这也是本书主要涉及的一类模型。\n\n## 隐藏层\n\n我们在 :numref:`subsec_linear_model`中描述了仿射变换,\n它是一个带有偏置项的线性变换。\n首先,回想一下如 :numref:`fig_softmaxreg`中所示的softmax回归的模型架构。\n该模型通过单个仿射变换将我们的输入直接映射到输出,然后进行softmax操作。\n如果我们的标签通过仿射变换后确实与我们的输入数据相关,那么这种方法就足够了。\n但是,仿射变换中的*线性*是一个很强的假设。\n\n### 线性模型可能会出错\n\n例如,线性意味着*单调*假设:\n任何特征的增大都会导致模型输出的增大(如果对应的权重为正),\n或者导致模型输出的减小(如果对应的权重为负)。\n有时这是有道理的。\n例如,如果我们试图预测一个人是否会偿还贷款。\n我们可以认为,在其他条件不变的情况下,\n收入较高的申请人总是比收入较低的申请人更有可能偿还贷款。\n但是,虽然收入与还款概率存在单调性,但它们不是线性相关的。\n收入从0增加到5万,可能比从100万增加到105万带来更大的还款可能性。\n处理这一问题的一种方法是对我们的数据进行预处理,\n使线性变得更合理,如使用收入的对数作为我们的特征。\n\n然而我们可以很容易找出违反单调性的例子。\n例如,我们想要根据体温预测死亡率。\n对于体温高于37摄氏度的人来说,温度越高风险越大。\n然而,对于体温低于37摄氏度的人来说,温度越高风险就越低。\n在这种情况下,我们也可以通过一些巧妙的预处理来解决问题。\n例如,我们可以使用与37摄氏度的距离作为特征。\n\n但是,如何对猫和狗的图像进行分类呢?\n增加位置$(13, 17)$处像素的强度是否总是增加(或降低)图像描绘狗的似然?\n对线性模型的依赖对应于一个隐含的假设,\n即区分猫和狗的唯一要求是评估单个像素的强度。\n在一个倒置图像后依然保留类别的世界里,这种方法注定会失败。\n\n与我们前面的例子相比,这里的线性很荒谬,\n而且我们难以通过简单的预处理来解决这个问题。\n这是因为任何像素的重要性都以复杂的方式取决于该像素的上下文(周围像素的值)。\n我们的数据可能会有一种表示,这种表示会考虑到我们在特征之间的相关交互作用。\n在此表示的基础上建立一个线性模型可能会是合适的,\n但我们不知道如何手动计算这么一种表示。\n对于深度神经网络,我们使用观测数据来联合学习隐藏层表示和应用于该表示的线性预测器。\n\n### 在网络中加入隐藏层\n\n我们可以通过在网络中加入一个或多个隐藏层来克服线性模型的限制,\n使其能处理更普遍的函数关系类型。\n要做到这一点,最简单的方法是将许多全连接层堆叠在一起。\n每一层都输出到上面的层,直到生成最后的输出。\n我们可以把前$L-1$层看作表示,把最后一层看作线性预测器。\n这种架构通常称为*多层感知机*(multilayer perceptron),通常缩写为*MLP*。\n下面,我们以图的方式描述了多层感知机( :numref:`fig_mlp`)。\n\n![一个单隐藏层的多层感知机,具有5个隐藏单元](../img/mlp.svg)\n:label:`fig_mlp`\n\n这个多层感知机有4个输入,3个输出,其隐藏层包含5个隐藏单元。\n输入层不涉及任何计算,因此使用此网络产生输出只需要实现隐藏层和输出层的计算。\n因此,这个多层感知机中的层数为2。\n注意,这两个层都是全连接的。\n每个输入都会影响隐藏层中的每个神经元,\n而隐藏层中的每个神经元又会影响输出层中的每个神经元。\n\n然而,正如 :numref:`subsec_parameterization-cost-fc-layers`所说,\n具有全连接层的多层感知机的参数开销可能会高得令人望而却步,\n即使在不改变输入或输出大小的情况下,\n也可能促使在参数节约和模型有效性之间进行权衡 :cite:`Zhang.Tay.Zhang.ea.2021`。\n\n### 从线性到非线性\n\n同之前的章节一样,\n我们通过矩阵$\\mathbf{X} \\in \\mathbb{R}^{n \\times d}$\n来表示$n$个样本的小批量,\n其中每个样本具有$d$个输入特征。\n对于具有$h$个隐藏单元的单隐藏层多层感知机,\n用$\\mathbf{H} \\in \\mathbb{R}^{n \\times h}$表示隐藏层的输出,\n称为*隐藏表示*(hidden representations)。\n在数学或代码中,$\\mathbf{H}$也被称为*隐藏层变量*(hidden-layer variable)\n或*隐藏变量*(hidden variable)。\n因为隐藏层和输出层都是全连接的,\n所以我们有隐藏层权重$\\mathbf{W}^{(1)} \\in \\mathbb{R}^{d \\times h}$\n和隐藏层偏置$\\mathbf{b}^{(1)} \\in \\mathbb{R}^{1 \\times h}$\n以及输出层权重$\\mathbf{W}^{(2)} \\in \\mathbb{R}^{h \\times q}$\n和输出层偏置$\\mathbf{b}^{(2)} \\in \\mathbb{R}^{1 \\times q}$。\n形式上,我们按如下方式计算单隐藏层多层感知机的输出\n$\\mathbf{O} \\in \\mathbb{R}^{n \\times q}$:\n\n$$\n\\begin{aligned}\n \\mathbf{H} & = \\mathbf{X} \\mathbf{W}^{(1)} + \\mathbf{b}^{(1)}, \\\\\n \\mathbf{O} & = \\mathbf{H}\\mathbf{W}^{(2)} + \\mathbf{b}^{(2)}.\n\\end{aligned}\n$$\n\n注意在添加隐藏层之后,模型现在需要跟踪和更新额外的参数。\n可我们能从中得到什么好处呢?\n你可能会惊讶地发现:在上面定义的模型里,我们没有好处!\n原因很简单:上面的隐藏单元由输入的仿射函数给出,\n而输出(softmax操作前)只是隐藏单元的仿射函数。\n仿射函数的仿射函数本身就是仿射函数,\n但是我们之前的线性模型已经能够表示任何仿射函数。\n\n我们可以证明这一等价性,即对于任意权重值,\n我们只需合并隐藏层,便可产生具有参数\n$\\mathbf{W} = \\mathbf{W}^{(1)}\\mathbf{W}^{(2)}$\n和$\\mathbf{b} = \\mathbf{b}^{(1)} \\mathbf{W}^{(2)} + \\mathbf{b}^{(2)}$\n的等价单层模型:\n\n$$\n\\mathbf{O} = (\\mathbf{X} \\mathbf{W}^{(1)} + \\mathbf{b}^{(1)})\\mathbf{W}^{(2)} + \\mathbf{b}^{(2)} = \\mathbf{X} \\mathbf{W}^{(1)}\\mathbf{W}^{(2)} + \\mathbf{b}^{(1)} \\mathbf{W}^{(2)} + \\mathbf{b}^{(2)} = \\mathbf{X} \\mathbf{W} + \\mathbf{b}.\n$$\n\n为了发挥多层架构的潜力,\n我们还需要一个额外的关键要素:\n在仿射变换之后对每个隐藏单元应用非线性的*激活函数*(activation function)$\\sigma$。\n激活函数的输出(例如,$\\sigma(\\cdot)$)被称为*活性值*(activations)。\n一般来说,有了激活函数,就不可能再将我们的多层感知机退化成线性模型:\n\n$$\n\\begin{aligned}\n \\mathbf{H} & = \\sigma(\\mathbf{X} \\mathbf{W}^{(1)} + \\mathbf{b}^{(1)}), \\\\\n \\mathbf{O} & = \\mathbf{H}\\mathbf{W}^{(2)} + \\mathbf{b}^{(2)}.\\\\\n\\end{aligned}\n$$\n\n由于$\\mathbf{X}$中的每一行对应于小批量中的一个样本,\n出于记号习惯的考量,\n我们定义非线性函数$\\sigma$也以按行的方式作用于其输入,\n即一次计算一个样本。\n我们在 :numref:`subsec_softmax_vectorization`中\n以相同的方式使用了softmax符号来表示按行操作。\n但是在本节中,我们应用于隐藏层的激活函数通常不仅按行操作,也按元素操作。\n这意味着在计算每一层的线性部分之后,我们可以计算每个活性值,\n而不需要查看其他隐藏单元所取的值。对于大多数激活函数都是这样。\n\n为了构建更通用的多层感知机,\n我们可以继续堆叠这样的隐藏层,\n例如$\\mathbf{H}^{(1)} = \\sigma_1(\\mathbf{X} \\mathbf{W}^{(1)} + \\mathbf{b}^{(1)})$和$\\mathbf{H}^{(2)} = \\sigma_2(\\mathbf{H}^{(1)} \\mathbf{W}^{(2)} + \\mathbf{b}^{(2)})$,\n一层叠一层,从而产生更有表达能力的模型。\n\n### 通用近似定理\n\n多层感知机可以通过隐藏神经元,捕捉到输入之间复杂的相互作用,\n这些神经元依赖于每个输入的值。\n我们可以很容易地设计隐藏节点来执行任意计算。\n例如,在一对输入上进行基本逻辑操作,多层感知机是通用近似器。\n即使是网络只有一个隐藏层,给定足够的神经元和正确的权重,\n我们可以对任意函数建模,尽管实际中学习该函数是很困难的。\n你可能认为神经网络有点像C语言。\nC语言和任何其他现代编程语言一样,能够表达任何可计算的程序。\n但实际上,想出一个符合规范的程序才是最困难的部分。\n\n而且,虽然一个单隐层网络能学习任何函数,\n但并不意味着我们应该尝试使用单隐藏层网络来解决所有问题。\n事实上,通过使用更深(而不是更广)的网络,我们可以更容易地逼近许多函数。\n我们将在后面的章节中进行更细致的讨论。\n\n## 激活函数\n:label:`subsec_activation_functions`\n\n*激活函数*(activation function)通过计算加权和并加上偏置来确定神经元是否应该被激活,\n它们将输入信号转换为输出的可微运算。\n大多数激活函数都是非线性的。\n由于激活函数是深度学习的基础,下面(**简要介绍一些常见的激活函数**)。\n", "_____no_output_____" ] ], [ [ "%matplotlib inline\nimport torch\nfrom d2l import torch as d2l", "_____no_output_____" ] ], [ [ "### ReLU函数\n\n最受欢迎的激活函数是*修正线性单元*(Rectified linear unit,*ReLU*),\n因为它实现简单,同时在各种预测任务中表现良好。\n[**ReLU提供了一种非常简单的非线性变换**]。\n给定元素$x$,ReLU函数被定义为该元素与$0$的最大值:\n\n(**$$\\operatorname{ReLU}(x) = \\max(x, 0).$$**)\n\n通俗地说,ReLU函数通过将相应的活性值设为0,仅保留正元素并丢弃所有负元素。\n为了直观感受一下,我们可以画出函数的曲线图。\n正如从图中所看到,激活函数是分段线性的。\n", "_____no_output_____" ] ], [ [ "x = torch.arange(-8.0, 8.0, 0.1, requires_grad=True)\ny = torch.relu(x)\nd2l.plot(x.detach(), y.detach(), 'x', 'relu(x)', figsize=(5, 2.5))", "_____no_output_____" ] ], [ [ "当输入为负时,ReLU函数的导数为0,而当输入为正时,ReLU函数的导数为1。\n注意,当输入值精确等于0时,ReLU函数不可导。\n在此时,我们默认使用左侧的导数,即当输入为0时导数为0。\n我们可以忽略这种情况,因为输入可能永远都不会是0。\n这里引用一句古老的谚语,“如果微妙的边界条件很重要,我们很可能是在研究数学而非工程”,\n这个观点正好适用于这里。\n下面我们绘制ReLU函数的导数。\n", "_____no_output_____" ] ], [ [ "y.backward(torch.ones_like(x), retain_graph=True)\nd2l.plot(x.detach(), x.grad, 'x', 'grad of relu', figsize=(5, 2.5))", "_____no_output_____" ] ], [ [ "使用ReLU的原因是,它求导表现得特别好:要么让参数消失,要么让参数通过。\n这使得优化表现的更好,并且ReLU减轻了困扰以往神经网络的梯度消失问题(稍后将详细介绍)。\n\n注意,ReLU函数有许多变体,包括*参数化ReLU*(Parameterized ReLU,*pReLU*)\n函数 :cite:`He.Zhang.Ren.ea.2015`。\n该变体为ReLU添加了一个线性项,因此即使参数是负的,某些信息仍然可以通过:\n\n$$\\operatorname{pReLU}(x) = \\max(0, x) + \\alpha \\min(0, x).$$\n\n### sigmoid函数\n\n[**对于一个定义域在$\\mathbb{R}$中的输入,\n*sigmoid函数*将输入变换为区间(0, 1)上的输出**]。\n因此,sigmoid通常称为*挤压函数*(squashing function):\n它将范围(-inf, inf)中的任意输入压缩到区间(0, 1)中的某个值:\n\n(**$$\\operatorname{sigmoid}(x) = \\frac{1}{1 + \\exp(-x)}.$$**)\n\n在最早的神经网络中,科学家们感兴趣的是对“激发”或“不激发”的生物神经元进行建模。\n因此,这一领域的先驱可以一直追溯到人工神经元的发明者麦卡洛克和皮茨,他们专注于阈值单元。\n阈值单元在其输入低于某个阈值时取值0,当输入超过阈值时取值1。\n\n当人们的注意力逐渐转移到基于梯度的学习时,\nsigmoid函数是一个自然的选择,因为它是一个平滑的、可微的阈值单元近似。\n当我们想要将输出视作二元分类问题的概率时,\nsigmoid仍然被广泛用作输出单元上的激活函数\n(你可以将sigmoid视为softmax的特例)。\n然而,sigmoid在隐藏层中已经较少使用,\n它在大部分时候被更简单、更容易训练的ReLU所取代。\n在后面关于循环神经网络的章节中,我们将描述利用sigmoid单元来控制时序信息流的架构。\n\n下面,我们绘制sigmoid函数。\n注意,当输入接近0时,sigmoid函数接近线性变换。\n", "_____no_output_____" ] ], [ [ "y = torch.sigmoid(x)\nd2l.plot(x.detach(), y.detach(), 'x', 'sigmoid(x)', figsize=(5, 2.5))", "_____no_output_____" ] ], [ [ "sigmoid函数的导数为下面的公式:\n\n$$\\frac{d}{dx} \\operatorname{sigmoid}(x) = \\frac{\\exp(-x)}{(1 + \\exp(-x))^2} = \\operatorname{sigmoid}(x)\\left(1-\\operatorname{sigmoid}(x)\\right).$$\n\nsigmoid函数的导数图像如下所示。\n注意,当输入为0时,sigmoid函数的导数达到最大值0.25;\n而输入在任一方向上越远离0点时,导数越接近0。\n", "_____no_output_____" ] ], [ [ "# 清除以前的梯度\nx.grad.data.zero_()\ny.backward(torch.ones_like(x),retain_graph=True)\nd2l.plot(x.detach(), x.grad, 'x', 'grad of sigmoid', figsize=(5, 2.5))", "_____no_output_____" ] ], [ [ "### tanh函数\n\n与sigmoid函数类似,\n[**tanh(双曲正切)函数也能将其输入压缩转换到区间(-1, 1)上**]。\ntanh函数的公式如下:\n\n(**$$\\operatorname{tanh}(x) = \\frac{1 - \\exp(-2x)}{1 + \\exp(-2x)}.$$**)\n\n下面我们绘制tanh函数。\n注意,当输入在0附近时,tanh函数接近线性变换。\n函数的形状类似于sigmoid函数,\n不同的是tanh函数关于坐标系原点中心对称。\n", "_____no_output_____" ] ], [ [ "y = torch.tanh(x)\nd2l.plot(x.detach(), y.detach(), 'x', 'tanh(x)', figsize=(5, 2.5))", "_____no_output_____" ] ], [ [ "tanh函数的导数是:\n\n$$\\frac{d}{dx} \\operatorname{tanh}(x) = 1 - \\operatorname{tanh}^2(x).$$\n\ntanh函数的导数图像如下所示。\n当输入接近0时,tanh函数的导数接近最大值1。\n与我们在sigmoid函数图像中看到的类似,\n输入在任一方向上越远离0点,导数越接近0。\n", "_____no_output_____" ] ], [ [ "# 清除以前的梯度\nx.grad.data.zero_()\ny.backward(torch.ones_like(x),retain_graph=True)\nd2l.plot(x.detach(), x.grad, 'x', 'grad of tanh', figsize=(5, 2.5))", "_____no_output_____" ] ], [ [ "总结一下,我们现在了解了如何结合非线性函数来构建具有更强表达能力的多层神经网络架构。\n顺便说一句,这些知识已经让你掌握了一个类似于1990年左右深度学习从业者的工具。\n在某些方面,你比在20世纪90年代工作的任何人都有优势,\n因为你可以利用功能强大的开源深度学习框架,只需几行代码就可以快速构建模型,\n而以前训练这些网络需要研究人员编写数千行的C或Fortran代码。\n\n## 小结\n\n* 多层感知机在输出层和输入层之间增加一个或多个全连接隐藏层,并通过激活函数转换隐藏层的输出。\n* 常用的激活函数包括ReLU函数、sigmoid函数和tanh函数。\n\n## 练习\n\n1. 计算pReLU激活函数的导数。\n1. 证明一个仅使用ReLU(或pReLU)的多层感知机构造了一个连续的分段线性函数。\n1. 证明$\\operatorname{tanh}(x) + 1 = 2 \\operatorname{sigmoid}(2x)$。\n1. 假设我们有一个非线性单元,将它一次应用于一个小批量的数据。你认为这会导致什么样的问题?\n", "_____no_output_____" ], [ "[Discussions](https://discuss.d2l.ai/t/1796)\n", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ] ]
cb37810e9b639c9ce901ebe96a781cf5afb814c4
4,997
ipynb
Jupyter Notebook
5_Geocoding_Tweets.ipynb
Rienje/Twitter_analysis_Dutch_elections_2021
e74e49fb1db085c6bd07846bdbc4a1d992991d37
[ "MIT" ]
null
null
null
5_Geocoding_Tweets.ipynb
Rienje/Twitter_analysis_Dutch_elections_2021
e74e49fb1db085c6bd07846bdbc4a1d992991d37
[ "MIT" ]
null
null
null
5_Geocoding_Tweets.ipynb
Rienje/Twitter_analysis_Dutch_elections_2021
e74e49fb1db085c6bd07846bdbc4a1d992991d37
[ "MIT" ]
null
null
null
33.763514
413
0.608965
[ [ [ "## Geocoding Tweets\nThis script showcases how we geocoded (e.g. added coordinates) to tweets containing a placename in the Netherlands. For this, we made use of cbsodata and Nominatim from geopy, which uses the OSM database for geocoding strings.", "_____no_output_____" ] ], [ [ "# Import needed libraries\nimport pandas as pd\nimport numpy as np\nimport re\nimport geopy\nfrom geopy.geocoders import Nominatim\nimport cbsodata\n\n# Load cleaned locations from previous script\ndf = pd.read_csv('cleaned_geo_tweets.csv')", "_____no_output_____" ] ], [ [ "#### Final check for geocoding\nIn order to be sure that the geolocator will pick up the names from our tweets, we run the tweets through a loop that compares them with an official list of residences (e.g. villages, cities, hamlets, etc.) from [CBS](https://opendata.cbs.nl/statline/portal.html?_la=nl&_catalog=CBS&tableId=84992NED&_theme=238), so that we are sure the geolocater won't crash.", "_____no_output_____" ] ], [ [ "# Retrieve metadata from cbsodata\nmetadata = pd.DataFrame(cbsodata.get_meta('84992NED', 'DataProperties'))\n\n# Save placenames as dataframe\nplaces = pd.DataFrame(cbsodata.get_data('84992NED', select = 'Woonplaatsen'))\n\n# Read the places csv file \n#places = pd.read_csv('Woonplaatsen_in_Nederland.csv',sep = ';')\n\n# Make sure the names are in lower case to match our names \nplaces['Woonplaatsen'] = places['Woonplaatsen'].str.lower()\n\n# Create an empty list for the place names to be added to\nlegit_locs = []\n# Create the loop\nfor i in df['location']:\n for j in places['Woonplaatsen']:\n if i == j:\n legit_locs.append(i)\n else:\n pass\n\n# Create dataframe of location and count\nfnl_df = pd.DataFrame(legit_locs)\n\n# Name the column for clarity\nfnl_df.columns =['Location']\n\n# Add a count column\nfnl_df['count'] = fnl_df.groupby('Location')['Location'].transform('count')\n\n# Remove the duplicates\nfnl_df.drop_duplicates(subset=['Location'], keep = 'first', inplace=True)", "_____no_output_____" ] ], [ [ "#### Geocoding\nNow, it's time for the actual geocoding. Be aware that this line takes around 10 minutes to locate all the placenames. The code was inspired by [this](https://medium.com/analytics-vidhya/exploring-twitter-data-using-python-part-iii-analyzing-the-data-e883aa340dff) tutorial. Alternatively, the 'tweets_with_location' file is also provided in the next notebook, so the user doesn't have to run the next part.", "_____no_output_____" ] ], [ [ "# Inititiate user\ngeolocator = Nominatim(user_agent='twitter-analysis')\n# note that user_agent is a random name\n\n# Convert locations to a list\nfnl_locs = list(fnl_df.Location)\n\n# This line takes about 10 minutes to run!\ngeolocated = list(map(lambda x: [x,geolocator.geocode(x)[1] if geolocator.geocode(x) else None],fnl_locs))\n\n# Check the result\ngeolocated.head(5)\n\n# Transform to lat and long\ngeolocated = pd.DataFrame(geolocated)\ngeolocated.columns = ['locat','latlong']\ngeolocated['lat'] = geolocated.latlong.apply(lambda x: x[0])\ngeolocated['lon'] = geolocated.latlong.apply(lambda x: x[1])\ngeolocated.drop('latlong',axis=1, inplace=True)\n\n# Procedure to merge the sentiment and spatial analysis\ntweets_with_location = df.join(geolocated.set_index('locat'), on = 'location')\n\n# Export to csv for the final notebook!\ntweets_with_location.to_csv('tweets_with_location.csv', header=True, index=False)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
cb378229128234f1ad261125e41818e81a9fde15
22,437
ipynb
Jupyter Notebook
NumPy_Hashtag/Cartilha Numpy.ipynb
thiagoreis96/General_Python_Exercises
ed18d2f2158fd1e33dd0b7c406b7f42ca9b66bc1
[ "MIT" ]
null
null
null
NumPy_Hashtag/Cartilha Numpy.ipynb
thiagoreis96/General_Python_Exercises
ed18d2f2158fd1e33dd0b7c406b7f42ca9b66bc1
[ "MIT" ]
null
null
null
NumPy_Hashtag/Cartilha Numpy.ipynb
thiagoreis96/General_Python_Exercises
ed18d2f2158fd1e33dd0b7c406b7f42ca9b66bc1
[ "MIT" ]
null
null
null
29.291123
6,004
0.592058
[ [ [ "# Numpy:", "_____no_output_____" ], [ "Documentação : https://numpy.org/doc/stable/user/absolute_beginners.html#numpy-the-absolute-basics-for-beginners", "_____no_output_____" ], [ "## Instalação:", "_____no_output_____" ], [ "Insira o código abaixo no Anaconda Prompt:\n<b>pip install numpy</b>\n\nhttps://numpy.org/install/", "_____no_output_____" ], [ "## Importação:\nhttps://numpy.org/doc/stable/user/absolute_beginners.html#how-to-import-numpy", "_____no_output_____" ] ], [ [ "import numpy as np", "_____no_output_____" ] ], [ [ "## O que é um array?\nTipos de arrays:\nndarrays -> significam arrays com N dimensões<br>\n1-D array-> Possui apenas uma dimensão. Será comumente chamado de <b> vetor ou vector </b> <br>\n2-D array -> Possui 2 dimensões. Será comumente chamado de <b> matriz ou matrix</b> <br>\n3-D ou Mais array -> Possui 3 ou mais dimensões. Será comumente chamado de <b>tensor</b> \n \nhttps://numpy.org/doc/stable/reference/arrays.html#arrays", "_____no_output_____" ], [ "## Criando um Array:", "_____no_output_____" ], [ "### np.array()\nhttps://numpy.org/doc/stable/reference/generated/numpy.array.html?highlight=numpy%20array#numpy-array", "_____no_output_____" ] ], [ [ "a = np.array([[1, 2, 3, 4, 5, 6], [3, 4, 5, 6, 7, 8]])\nprint(a)\nprint(type(a))", "[[1 2 3 4 5 6]\n [3 4 5 6 7 8]]\n<class 'numpy.ndarray'>\n" ] ], [ [ "### np.zeros()\nhttps://numpy.org/doc/stable/reference/generated/numpy.zeros.html", "_____no_output_____" ] ], [ [ "zero_array = np.zeros(shape = (5, 3, 6))\nprint(zero_array)", "[[[0. 0. 0. 0. 0. 0.]\n [0. 0. 0. 0. 0. 0.]\n [0. 0. 0. 0. 0. 0.]]\n\n [[0. 0. 0. 0. 0. 0.]\n [0. 0. 0. 0. 0. 0.]\n [0. 0. 0. 0. 0. 0.]]\n\n [[0. 0. 0. 0. 0. 0.]\n [0. 0. 0. 0. 0. 0.]\n [0. 0. 0. 0. 0. 0.]]\n\n [[0. 0. 0. 0. 0. 0.]\n [0. 0. 0. 0. 0. 0.]\n [0. 0. 0. 0. 0. 0.]]\n\n [[0. 0. 0. 0. 0. 0.]\n [0. 0. 0. 0. 0. 0.]\n [0. 0. 0. 0. 0. 0.]]]\n" ] ], [ [ "### np.ones()\nhttps://numpy.org/doc/stable/reference/generated/numpy.ones.html", "_____no_output_____" ] ], [ [ "one_array = np.ones(2)\nprint(one_array)", "[1. 1.]\n" ] ], [ [ "### no.empty()\nhttps://numpy.org/doc/stable/reference/generated/numpy.empty.html", "_____no_output_____" ] ], [ [ "vazio = np.empty((3, 4))\nprint(vazio)", "[[1.32802657e-311 3.16202013e-322 0.00000000e+000 0.00000000e+000]\n [0.00000000e+000 2.02374799e-052 1.28970185e+165 1.04784113e+165]\n [5.34848672e-038 1.04047556e-071 1.27913029e+165 2.70237624e-056]]\n" ] ], [ [ "### np.arange()\nhttps://numpy.org/doc/stable/reference/generated/numpy.arange.html", "_____no_output_____" ] ], [ [ "arr = np.arange(50, 201, 30)\nprint(arr)", "[ 50 80 110 140 170 200]\n" ] ], [ [ "### np.linspace()\nhttps://numpy.org/doc/stable/reference/generated/numpy.linspace.html", "_____no_output_____" ] ], [ [ "linear_array = np.linspace(0, 100, num = 40, retstep = True)\nprint(linear_array)", "(array([ 0. , 2.56410256, 5.12820513, 7.69230769,\n 10.25641026, 12.82051282, 15.38461538, 17.94871795,\n 20.51282051, 23.07692308, 25.64102564, 28.20512821,\n 30.76923077, 33.33333333, 35.8974359 , 38.46153846,\n 41.02564103, 43.58974359, 46.15384615, 48.71794872,\n 51.28205128, 53.84615385, 56.41025641, 58.97435897,\n 61.53846154, 64.1025641 , 66.66666667, 69.23076923,\n 71.79487179, 74.35897436, 76.92307692, 79.48717949,\n 82.05128205, 84.61538462, 87.17948718, 89.74358974,\n 92.30769231, 94.87179487, 97.43589744, 100. ]), 2.5641025641025643)\n" ] ], [ [ "## Descobrindo o tamanho de um array:\nNúmero de dimensões : https://numpy.org/doc/stable/reference/generated/numpy.ndarray.ndim.html <br>\nNúmero de items: https://numpy.org/doc/stable/reference/generated/numpy.ndarray.size.html\nFormato : https://numpy.org/doc/stable/reference/generated/numpy.ndarray.shape.html\n", "_____no_output_____" ] ], [ [ "zero_array = np.zeros(shape = (5,3,6))\nzero_array", "_____no_output_____" ], [ "# tamanhos das dimensões\nprint(zero_array.shape)\n\n# quantidade de elementos\nprint(zero_array.size)\n\n# quanditade de dimensões\nprint(zero_array.ndim)", "(5, 3, 6)\n90\n3\n" ] ], [ [ "### Mudando o tamanho de um array:\n\nhttps://numpy.org/doc/stable/reference/generated/numpy.reshape.html", "_____no_output_____" ], [ "### Rankeando um array:", "_____no_output_____" ], [ "https://numpy.org/doc/stable/reference/generated/numpy.sort.html", "_____no_output_____" ], [ "## Transformando um Vetor (1-D) em uma matrix(2-D)\n.newaxis:https://numpy.org/doc/stable/reference/constants.html?#numpy.newaxis <br>\n.expand_dims:https://numpy.org/doc/stable/reference/generated/numpy.expand_dims.html#numpy.expand_dims", "_____no_output_____" ] ], [ [ "a = np.array([1, 2, 3])\nprint(a.ndim)\nprint(a.shape)", "1\n(3,)\n" ], [ "a2_1 = a[np.newaxis,:]\nprint(a2.shape)\nprint(a2.ndim)\nprint(a2_1)", "(1, 3)\n2\n[[1 2 3]]\n" ], [ "a2_2 = a[:,np.newaxis]\nprint(a2.shape)\nprint(a2.ndim)\nprint(a2_2)", "(1, 3)\n2\n[[1]\n [2]\n [3]]\n" ], [ "print(a2_2[2][0])", "3\n" ] ], [ [ "### Concatenando arrays:\nhttps://numpy.org/doc/stable/reference/generated/numpy.concatenate.html", "_____no_output_____" ] ], [ [ "a = np.array([1, 2, 3])\nb = np.array([4, 5, 6])\n\nc = np.concatenate((a,b))\nd = np.concatenate((b,a))\n\nprint(f\"{c}\\n{d}\")", "[1 2 3 4 5 6]\n[4 5 6 1 2 3]\n" ] ], [ [ "### Consultando itens de uma array:\nhttps://numpy.org/doc/stable/user/absolute_beginners.html#indexing-and-slicing", "_____no_output_____" ] ], [ [ "a = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]])\n\nprint(a)\nprint(\"-=\"*10)\n\nmaior_8 = a[a>8]\nprint(maior_8)", "[[ 1 2 3 4]\n [ 5 6 7 8]\n [ 9 10 11 12]]\n-=-=-=-=-=-=-=-=-=-=\n[ 9 10 11 12]\n" ] ], [ [ "### Operações com Arrays:\nSoma : https://numpy.org/doc/stable/reference/generated/numpy.sum.html#numpy.sum <br>\nValor mínimo : https://numpy.org/doc/stable/reference/generated/numpy.ndarray.min.html<br>\nValor máximo : https://numpy.org/doc/stable/reference/generated/numpy.ndarray.max.html<br>\nMédia : https://numpy.org/doc/stable/reference/generated/numpy.ndarray.mean.html", "_____no_output_____" ] ], [ [ "a = np.array([1, 2, 3])\nprint(a.sum())\nprint(a.max())\nprint(a.min())\nprint(a.mean())", "6\n3\n1\n2.0\n" ] ], [ [ "### Gerando amostras aleatórias:\n", "_____no_output_____" ] ], [ [ "from numpy.random import default_rng", "_____no_output_____" ], [ "rng=default_rng()\naleatorio = rng.integers(20, size = (2, 4))\nprint(aleatorio)", "[[ 5 15 17 11]\n [11 1 0 4]]\n" ] ], [ [ "## Diferença entre Arrays e Listas:", "_____no_output_____" ] ], [ [ "a = np.array([1, 'Thiago', 2, 3, 4, 5, 6, 7, 8])\nprint(a)\nprint(type(a))\n\nprint('-='*25)\n\nlista_a = [1, 'Thiago', 2, 3, 4, 5, 6, 7, 8]\nprint(lista_a)\nprint(type(lista_a))", "['1' 'Thiago' '2' '3' '4' '5' '6' '7' '8']\n<class 'numpy.ndarray'>\n-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=\n[1, 'Thiago', 2, 3, 4, 5, 6, 7, 8]\n<class 'list'>\n" ] ], [ [ "## Comparando o processamento:", "_____no_output_____" ] ], [ [ "from time import process_time\n\nlista_a = list(rng.integers(10, 100, 10000000))\nlista_b = list(rng.integers(10, 100, 10000000))\n\n# lista_c = lista_a * lista_b\n\n# print(lista_c)", "_____no_output_____" ], [ "lista_c = []\n\nt1 = process_time()\nfor i in range(len(lista_a)):\n lista_c.append(lista_a[i]*lista_b[i])\n \nt2 = process_time()\nprint(t2-t1)", "2.484375\n" ], [ "a = rng.integers(10, 100, 10000000)\nb = rng.integers(10, 100, 10000000)\n\nt1a = process_time()\nc = a*b\nt2a = process_time()\n\nprint(t2a-t1a)", "0.03125\n" ], [ "(t2-t1)/(t2a-t1a)", "_____no_output_____" ], [ "import matplotlib.pyplot as plt\n\ndados_x = rng.integers(20, size = 30)\ndados_y = rng.integers(12, size = 30)\n\nplt.scatter(x = dados_x, y = dados_y)\nplt.show()", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ] ]
cb37829263ed0f12ddda87ee6a352a6fd09d2b87
301,664
ipynb
Jupyter Notebook
results/bernoulli/plot_data/nonlinear_bernouli_1stOrder.ipynb
blindedjoy/RcTorch
5582cf829d2ee2b10dba80b125e44d47aee27f82
[ "MIT" ]
3
2021-05-28T14:55:18.000Z
2022-01-18T08:38:11.000Z
results/bernoulli/plot_data/nonlinear_bernouli_1stOrder.ipynb
blindedjoy/RcTorch
5582cf829d2ee2b10dba80b125e44d47aee27f82
[ "MIT" ]
1
2021-05-07T13:47:59.000Z
2021-05-07T13:47:59.000Z
results/bernoulli/plot_data/nonlinear_bernouli_1stOrder.ipynb
blindedjoy/RcTorch
5582cf829d2ee2b10dba80b125e44d47aee27f82
[ "MIT" ]
1
2021-10-21T08:13:26.000Z
2021-10-21T08:13:26.000Z
196.65189
44,804
0.887978
[ [ [ "import numpy as np\nfrom numpy import loadtxt\nimport pylab as pl\nfrom IPython import display\nfrom RcTorchPrivate import *\nfrom matplotlib import pyplot as plt\nfrom scipy.integrate import odeint\n%matplotlib inline", "_____no_output_____" ], [ "#this method will ensure that the notebook can use multiprocessing on jupyterhub or any other linux based system.\ntry:\n mp.set_start_method(\"spawn\")\nexcept:\n pass\ntorch.set_default_tensor_type(torch.FloatTensor)\n%matplotlib inline", "_____no_output_____" ], [ "lineW = 3\nlineBoxW=2\n\nfont = {'family' : 'normal',\n 'weight' : 'normal',#'bold',\n 'size' : 22}\n\n#plt.rc('font', **font)\n#plt.rcParams['text.usetex'] = True", "_____no_output_____" ], [ "#helper functions\ndef pltTr(x,y,clr='cyan', mark='o'):\n plt.plot(x.detach().numpy(), y.detach().numpy(),\n marker=mark, color=clr, markersize=8, label='truth', alpha = 0.9)\n\ndef pltPred(x,y,clr='red', linS='-'):\n plt.plot(x.detach().numpy(), y.detach().numpy(),\n color=clr, marker='.', linewidth=2, label='RC')\nfrom decimal import Decimal\n\ndef convert2pd(tensor1, tensor2):\n pd_ = pd.DataFrame(np.hstack((tensor1.detach().cpu().numpy(), tensor2.detach().cpu().numpy())))\n pd_.columns = [\"t\", \"y\"]\n return pd_\n'%.2E' % Decimal('40800000000.00000000000000')\n\ndef param(t,N,y0):\n f = 1 - torch.exp(-t)\n f_dot = 1 - f\n #f = t\n #f_dot=1\n return y0 + f*N\n\n#define a reparameterization function\ndef reparam(t, y0 = None, N = None, dN_dt = None, t_only = False):\n f = 1 - torch.exp(-t)\n f_dot = 1 - f\n \n if t_only:\n return f, f_dot\n\n y = y0 + N*f \n if dN_dt:\n ydot = dN_dt * f + f_dot * N\n else:\n ydot = None\n return y, ydot\n\ndef reparam(t, order = 1):\n exp_t = torch.exp(-t)\n \n derivatives_of_g = []\n \n g = 1 - exp_t\n \n #0th derivative\n derivatives_of_g.append(g)\n \n g_dot = 1 - g\n \n #first derivative\n #derivatives_of_g.append(g_dot)\n \n# for i in range(order):\n# if i %2 == 0:\n# #print(\"even\")\n# derivatives_of_g.append(g_dot)\n# else:\n# #print(\"odd\")\n# derivatives_of_g.append(-g_dot)\n# return derivatives_of_g\n return g, g_dot\n\ndef force(X, A = 0):\n return torch.zeros_like(X)", "_____no_output_____" ], [ "q = 0.5\ndef custom_loss(X , y, ydot, out_weights, f = force, \n reg = True, ode_coefs = None, q = q, init_conds = None, enet_strength = None, enet_alpha = None):\n \n #with paramization\n L = ydot + ode_coefs[0]* y - f(X) + q*y**2\n \"\"\"\n if reg:\n weight_size_sq = torch.mean(torch.square(out_weights))\n weight_size_L1 = torch.mean(torch.abs(out_weights))\n L_reg = 0.1*(weight_size_sq + weight_size_L1)/2\n L = L + L_reg \n \"\"\"\n L = torch.mean(torch.square(L))\n return L", "_____no_output_____" ], [ "def plot_result(esn, xtrain, lams = [1], y0s = [1], plot_gt = True, loglog = False,\n ode_coefs = None, force_k = 0, fileName=None, q = q,backprop_f = None, time_result = True,\n solve = None):\n \n RC = esn\n fig = plt.figure(figsize = (8, 6)) \n t_pow = 0\n \n gts, ys, ydots, ws, bs, Ls = [], [], [], [], [], []\n \n for i, lam in enumerate(lams):\n y0 = y0s[i]\n ode_coefs[0] = lam\n #fit the echo state network\n \n train_args = {\"burn_in\" : int(BURN_IN), \n \"ODE_order\" : 1,\n #\"track_in_grad\" : self.track_in_grad,\n \"force\" : force,\n \"reparam_f\" : reparam,\n #\"nl_f\" : self.nl_f,\n \"init_conditions\" : [float(y0)],\n \"ode_coefs\" : ode_coefs,\n \"y\" : None,\n \"X\" : xtrain.view(-1,1),\n \"q\" : q,\n \"nl\" : True,\n \n\n }\n \n if not i:\n y, ydot = esn.fit(**train_args, SOLVE = solve)\n \n ode_coefs_copy = ode_coefs.copy()\n states_dict = {\"s\" : RC.states.clone(),\n \"s1\" : RC.states_dot.clone(), \n \"G\" : RC.G,\n \"ex\" : RC.extended_states.clone(),\n \"sb1\": RC.sb1,\n \"sb\" : RC.sb\n }\n if esn.ODE_order == 2:\n states_dict[\"s2\"] = RC.states_dot2.clone()\n states_dict[\"sb2\"] = RC.sb2.clone()\n t2 = time.perf_counter()\n else:\n y, ydot = RC.fit(preloaded_states_dict = states_dict, SOLVE = solve,\n **train_args)\n \n if backprop_f:\n weight_dict = backprop_f(esn)\n y, ydot = esn.fit(**train_args, out_weights = weight_dict, SOLVE = False)\n \n ode_coefs_copy = ode_coefs.copy()\n if ode_coefs[0] == \"t**2\":\n sp = esn.X**2\n t_pow = 2\n ode_coefs_copy[0] = sp\n\n\n def ODE_numSolver(y,t, q = q):\n k = 1\n# dydt = -k * y *t**t_pow + force_k*np.sin(t)\n dydt = -k * y -q*y**2\n return dydt\n y_truth = odeint(ODE_numSolver,y0,np.array(esn.X.cpu().view(-1,)))\n y_truth = torch.tensor(y_truth)\n# y_exac = y0*torch.exp(-lam*(esn.X))\n \n if y0==1:\n extraWidth = 2; color = 'k'\n else: extraWidth=0; color = 'b'\n #line to ensure that cuda tensors can move to cpu for plotti\n X = esn.X.cpu().detach()\n y = y.cpu().detach()\n y_truth = y_truth.cpu().detach()\n if not i:\n plt.plot(X, y,color, linewidth=lineW+extraWidth, label = \"pred\" )\n plt.plot(X, y_truth,'--r', linewidth=lineW, alpha=0.85, label = \"gt\")\n else:\n plt.plot(X, y,color, linewidth=lineW+extraWidth)\n plt.plot(X, y_truth,'--r', linewidth=lineW, alpha=0.85)\n \n ## Formating Figure\n # Changing spine style\n ax = plt.gca()\n for ps in ['top','bottom','left','right']:\n ax.spines[ps].set_linewidth(lineBoxW)\n\n plt.xlabel(r'$t$')\n plt.ylabel(r'$y(t)$')\n plt.legend()\n \n gts.append(y_truth.cpu())\n ys.append(y.cpu())\n ydots.append(ydot.cpu())\n if backprop_f:\n Ls.append(weight_dict[\"loss\"])\n #Ls.append(esn.backprop_args)\n bs.append(esn.LinOut.bias.data.cpu())\n ws.append(esn.LinOut.weight.data.cpu())\n if time_result:\n return t2, ys, ydots, gts, ws, bs, Ls\n else: \n return ys, ydots, gts, ws, bs, Ls\n \n# plt.savefig(fileName+\"Trajectories\",format='png')#, dpi=600,transparent=True)\n# plt.savefig(fileName+'Trajectories.eps',format='eps')\n\n# return residuals\ndef optimize_last_layer(esn, \n SAVE_AFTER_EPOCHS = 1,\n epochs = 30000,\n custom_loss = custom_loss,\n loss_threshold = 10 ** -8,\n EPOCHS_TO_TERMINATION = None,\n f = force,\n learning_rate = 0.01,\n plott = True,\n spikethreshold = 0.25):\n #define new_x\n new_X = esn.extended_states.detach()\n\n #force detach states_dot\n esn.states_dot = esn.states_dot.detach().requires_grad_(False)\n\n #define criterion\n criterion = torch.nn.MSELoss()\n \n try:\n assert esn.LinOut.weight.requires_grad and esn.LinOut.bias.requires_grad\n except:\n esn.LinOut.weight.requires_grad_(True)\n esn.LinOut.bias.requires_grad_(True)\n #define previous_loss (could be used to do a convergence stop)\n previous_loss = 0\n\n #define best score so that we can save the best weights\n best_score = 0\n\n #define the optimizer\n optimizer = optim.Adam(esn.parameters(), lr = learning_rate)\n\n #define the loss history\n loss_history = []\n \n if plott:\n #use pl for live plotting\n fig, ax = pl.subplots(1,3, figsize = (16,4))\n \n t = esn.X#.view(*N.shape).detach()\n g, g_dot = esn.G\n y0 = esn.init_conds[0]\n \n #optimizer = torch.optim.SGD(model.parameters(), lr=100)\n scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.5)\n lrs = []\n \n floss_last = 0\n\n #begin optimization loop\n for e in range(epochs):\n\n optimizer.zero_grad()\n \n N = esn.forward( esn.extended_states )\n N_dot = esn.calc_Ndot(esn.states_dot)\n\n y = y0 + g *N \n \n ydot = g_dot * N + g * N_dot\n\n assert N.shape == N_dot.shape, f'{N.shape} != {N_dot.shape}'\n \n assert esn.LinOut.weight.requires_grad and esn.LinOut.bias.requires_grad\n \n assert False, f'{esn.LinOut.weight}'\n\n total_ws = esn.LinOut.weight.shape[0] + 1\n weight_size_sq = torch.mean(torch.square(esn.LinOut.weight))\n \n loss = custom_loss(esn.X, y, ydot, esn.LinOut.weight, reg = False, ode_coefs = esn.ode_coefs)\n loss.backward()\n optimizer.step()\n floss = float(loss)\n loss_history.append(floss)\n \n if not e and not best_score:\n best_bias, best_weight = esn.LinOut.bias.detach(), esn.LinOut.weight.detach()\n\n if e > SAVE_AFTER_EPOCHS:\n if not best_score:\n \n if floss <= min(loss_history):\n best_pred = y.clone()\n best_pred = y.clone()\n best_ydot = ydot.clone()\n best_bias, best_weight = esn.LinOut.bias.detach(), esn.LinOut.weight.detach()\n best_score = float(loss)\n else:\n if floss < best_score:\n best_pred = y.clone()\n best_ydot = ydot.clone()\n best_bias, best_weight = esn.LinOut.bias.detach(), esn.LinOut.weight.detach()\n best_score = float(loss)\n \n if not EPOCHS_TO_TERMINATION:\n if float(loss) < loss_threshold:\n EPOCHS_TO_TERMINATION = e + 100\n else:\n if e >= EPOCHS_TO_TERMINATION:\n backprop_args = {\"loss_history\" : loss_history, \n \"lr\" : learning_rate,\n \"epochs\" : epochs\n }\n return {\"weights\": best_weight, \n \"bias\" : best_bias, \n \"loss\" : backprop_args,\n \"ydot\" : best_ydot, \n \"y\" : best_pred}\n \n if e > 1:\n if float(np.log(floss_last) - np.log(floss)) > spikethreshold:\n lrs.append(optimizer.param_groups[0][\"lr\"])\n scheduler.step()\n for param_group in optimizer.param_groups:\n print('lr', param_group['lr'])\n floss_last = floss\n if plott:\n\n if e % 1500 == 0:\n ax[0].clear()\n logloss_str = 'Log(L) ' + '%.2E' % Decimal((loss).item())\n delta_loss = ' delta Log(L) ' + '%.2E' % Decimal((loss-previous_loss).item())\n\n print(logloss_str + \", \" + delta_loss)\n ax[0].plot(N.detach().cpu(), label = \"exact\")\n ax[0].set_title(f\"Epoch {e}\" + \", \" + logloss_str)\n ax[0].set_xlabel(\"epochs\")\n\n ax[1].set_title(delta_loss)\n ax[1].plot(N_dot.detach().cpu())\n #ax[0].plot(y_dot.detach(), label = \"dy_dx\")\n ax[2].clear()\n weight_size = str(weight_size_sq.detach().item())\n ax[2].set_title(\"loss history \\n and \"+ weight_size)\n\n ax[2].loglog(loss_history)\n\n [ax[i].legend() for i in range(3)]\n previous_loss = loss.item()\n\n #clear the plot outputt and then re-plot\n display.clear_output(wait=True) \n display.display(pl.gcf())\n backprop_args = {\"loss_history\" : loss_history, \n \"lr\" : learning_rate,\n \"epochs\" : epochs\n }\n return {\"weights\": best_weight, \n \"bias\" : best_bias, \n \"loss\" : backprop_args,\n \"ydot\" : best_ydot, \n \"y\" : best_pred}", "_____no_output_____" ], [ "#optimized_hyper_params", "_____no_output_____" ], [ "x0,xf, nsteps = 0, 5, 1000 #int(2000 * ratio_up)\nxtrain = torch.linspace(x0, xf, steps = nsteps, requires_grad=False)", "_____no_output_____" ], [ "BURN_IN = 500\n\ny0 = 1 ; lam = 1\n\n#the length of xtrain won't matter. Only dt , x0, and xf matter.\nxtrain = torch.linspace(x0, xf, steps = nsteps, requires_grad=False).view(-1,1)\nxtrain.shape", "_____no_output_____" ], [ "#q = 0.7\n\nhybrid_hps_q07 = {'dt': 0.01,\n 'n_nodes': 500,\n 'connectivity': 0.005200326335063122,\n 'spectral_radius': 4.063828945159912,\n 'regularization': 0.16819202592057847,\n 'leaking_rate': 0.07071314752101898,\n 'bias': 0.6888809204101562}\n\n\n#q = 0.5\n########################################################################################\n\nhybrid_hps_q05 = {'dt': 0.007943282347242814,\n 'n_nodes': 500,\n 'connectivity': 0.0003179179463749722,\n 'spectral_radius': 7.975825786590576,\n 'regularization': 0.3332787303378571,\n 'leaking_rate': 0.07119506597518921,\n 'bias': -0.9424528479576111}\n\n########################################################################################\n\n\n#q = 0.3\n########################################################################################\nexact_hps_q03 = {'dt': 0.007943282347242814,\n 'n_nodes': 500,\n 'connectivity': 0.0020952467703604792,\n 'spectral_radius': 0.37082985043525696,\n 'regularization': 0.361264334627276,\n 'leaking_rate': 0.012962563894689083,\n 'bias': 0.15055322647094727}\n\nanother_exact_03_run = {'dt': 0.007943282347242814,\n 'n_nodes': 500,\n 'connectivity': 0.00010646483429429022,\n 'spectral_radius': 9.755386352539062,\n 'regularization': 0.001061326151397624,\n 'leaking_rate': 0.015667859464883804,\n 'bias': -0.6486743688583374}\n\n# 3000 epochs\nhybrid_03_hps = {'dt': 0.007943282347242814,\n 'n_nodes': 500,\n 'connectivity': 0.000876183849077606,\n 'spectral_radius': 7.2928466796875,\n 'regularization': 0.6050492589156197,\n 'leaking_rate': 0.014219114556908607,\n 'bias': 0.18588018417358398}\n\n########################################################################################\n\n#q = 0.1\n########################################################################################\n#y0s = array([-1. , -0.25, 0.5 , 1.25])\nhybrid_hyper_params = {'dt': 0.007943282347242814,\n 'n_nodes': 500,\n 'connectivity': 0.0001340433236446365,\n 'spectral_radius': 7.1109442710876465,\n 'regularization': 0.0040541553015366605,\n 'leaking_rate': 0.022500092163681984,\n 'bias': 0.7761751413345337}\n\nexact_hyper_params = {'dt': 0.007943282347242814,\n 'n_nodes': 500,\n 'connectivity': 0.00457819326682001,\n 'spectral_radius': 4.214494228363037,\n 'regularization': 672.3718753390342,\n 'leaking_rate': 0.11203678697347641,\n 'bias': 0.7799162864685059}\n\n########################################################################################\n\n\n#esn.fit", "_____no_output_____" ], [ "dRay=0.75\nnp.arange(-1., 1 + dRay, dRay)", "_____no_output_____" ], [ "y0s = np.arange(-1., 1 + dRay, dRay)\n", "_____no_output_____" ] ], [ [ "dt -2.1\nn_nodes 500\nconnectivity -3.8727548122406006\nspectral_radius 7.1109442710876465\nregularization -2.392099618911743\nleaking_rate 0.022500092163681984\nbias 0.7761751413345337", "_____no_output_____" ] ], [ [ "log_vars = ['connectivity', 'llambda', 'llambda2', 'noise', 'regularization', 'dt']\n\nhps = {'dt': 0.01,\n 'n_nodes': 500,\n 'connectivity': 0.0008771738385033052,\n 'spectral_radius': 3.8187756538391113,\n 'regularization': 2.6243606290132924,\n 'leaking_rate': 0.05788800120353699,\n 'bias': -0.4182356595993042}\n\nfor key, val in hps.items():\n if key in log_vars:\n print(key, np.log10(val))\n else:\n print(key, val)", "dt -2.0\nn_nodes 500\nconnectivity -3.0569143295288086\nspectral_radius 3.8187756538391113\nregularization 0.4190235137939453\nleaking_rate 0.05788800120353699\nbias -0.4182356595993042\n" ], [ "#declare the bounds dict. See above for which variables are optimized in linear vs logarithmic space.\nbounds_dict = {\"connectivity\" : (-4, -0.1), \n \"spectral_radius\" : (1.5, 8.5),\n \"n_nodes\" : 500,\n \"regularization\" : (-2, 2),\n \"leaking_rate\" : (0, 0.1),\n #\"input_scaling\" : (0, 1),\n #\"feedback_scaling\" : (0, 1),\n \"dt\" : -2,\n \"bias\": (-1,1)\n }\n\n#declare the esn_cv optimizer: this class will run bayesian optimization to optimize the bounds dict.\nesn_cv = EchoStateNetworkCV(bounds = bounds_dict,\n interactive = True, \n batch_size = 1, cv_samples = 2, initial_samples = 50, #200\n subsequence_length = int(xtrain.shape[0] * 0.8),\n random_seed = 209, success_tolerance = 10,\n windowsOS =False, validate_fraction = 0.3, \n ODE_order = 1, length_min = 2 **(-7),\n esn_burn_in = BURN_IN, log_score = True\n )\n#optimize:\nopt = False\nif opt:\n \n opt_hps = esn_cv.optimize(y = None, x = xtrain,#.cuda(),\n reparam_f = reparam, \n ODE_criterion = custom_loss,\n init_conditions = [(y0s[0], y0s[-1])], \n force = force,\n rounds = 5,\n ode_coefs = [1, 1],\n backprop_f = optimize_last_layer, \n solve = False,\n q = q,\n n_outputs = 1,\n eq_system = False,\n nonlinear_ode = True) #\n#", "FEEDBACK: None , device: None\ncpu\n" ], [ "new_prop_hps = {'dt': 0.01,\n 'n_nodes': 500,\n 'connectivity': 0.001237975145359088,\n 'spectral_radius': 5.298933029174805,\n 'regularization': 18.616127927682236,\n 'leaking_rate': 0.0048981658183038235,\n 'bias': -0.40049731731414795}", "_____no_output_____" ], [ "#opt_hps", "_____no_output_____" ], [ "#assert False", "_____no_output_____" ], [ "esn = EchoStateNetwork(**hybrid_hps_q05 , \n random_state = 209, \n id_ = 10,\n dtype = torch.float32)", "\u001b[91mLoaded Reservoir is Nilpotent (max_eigenvalue =0.0), connectivity =0.00031792.. .regenerating\u001b[0m\n\u001b[91m1\u001b[0m \u001b[91m2\u001b[0m " ], [ "sns.heatmap(esn.LinOut.weight[:,:5].detach());", "_____no_output_____" ], [ "#1. get the linear trajectories\n\n#2. do a pure backprop training\nrez = plot_result(esn, xtrain.cpu(), \n lams = torch.ones_like(torch.tensor(y0s)),#np.array([1] * len(y0s)),#[1, 1, 1, 2]), \n y0s = y0s, #[1, 2, 3, 1],\n plot_gt = True, \n ode_coefs = [1,1],\n q = q, \n backprop_f = None,#optimize_last_layer,\n solve = True)\n", "_____no_output_____" ], [ " esn.init_conds\nRC =esn", "_____no_output_____" ], [ "RC.DH1[0:10,:]", "_____no_output_____" ], [ "sns.heatmap(RC.DH2); plt.show();\nsns.heatmap(matmul(-esn.DH2, esn.D_A).view(-1,1))", "_____no_output_____" ], [ "esn.init_conds", "_____no_output_____" ], [ "sns.heatmap(esn.LinOut.weight[0].detach().view(-1,1))", "_____no_output_____" ], [ "#t2_, ys_, gts_, ws_, bs_, l_trajs = rez\n#linear_trajs = \n_, l_trajs, l_trajs_dot, _, _, _, _ = plot_result(esn, xtrain.cpu(), \n lams = np.array([1] * len(y0s)),#[1, 1, 1, 2]), \n y0s = y0s, #[1, 2, 3, 1],\n plot_gt = True, \n ode_coefs = [1,1],\n q = q, \n backprop_f = None,#optimize_last_layer,\n solve = True)", "_____no_output_____" ], [ "esn.ode_coefs", "_____no_output_____" ], [ "esn.DH2.shape", "_____no_output_____" ], [ "sns.heatmap(esn.DH);\ntorch.mean(esn.D_A)", "_____no_output_____" ], [ "sns.heatmap(esn.DH2);\nplt.show();\ntorch.mean(esn.D_A)\nsns.heatmap(esn.LinOut.weight.detach())", "_____no_output_____" ], [ "sns.heatmap(esn.D_A)", "_____no_output_____" ], [ "assert False", "_____no_output_____" ], [ "t2, ys, ydots, gts, ws, bs, Ls = rez\nn = 3\nplt.loglog(Ls[n][\"loss_history\"], label = \"prop_only\")\n#plt.loglog(h[\"loss\"][n][\"loss_history\"], label = \"hybrid\")\nplt.legend();", "_____no_output_____" ], [ "assert False\nimport pickle\nfilename = 'bernoulli_q05_hybrid'\nwith open(filename + '_plot_data_.pickle', 'rb') as handle:\n h = pickle.load(handle)\n#filename = 'bernoulli_q05_backprop' \nwith open(filename + '_plot_data_.pickle', 'rb') as handle:\n b = pickle.load(handle)", "_____no_output_____" ], [ "with open(filename + '_plot_data_.pickle', 'rb') as handle:\n b = pickle.load(handle)", "_____no_output_____" ], [ "n = 3\nplt.loglog(b['loss'][n][\"loss_history\"], color = \"blue\", label = \"backprop_only\")\nplt.loglog(h['loss'][n][\"loss_history\"], color = \"red\", label = \"hybrid\")\nplt.legend()\n# for i, key in enumerate(b['loss']):\n# plt.loglog(key[\"loss_history\"], color = \"blue\")\n# for i, key in enumerate(a['loss']):\n# plt.loglog(key[\"loss_history\"], color = \"red\")", "_____no_output_____" ], [ "assert False", "_____no_output_____" ], [ "ls", "_____no_output_____" ], [ "import pickle\nfilename = 'bernoulli_q05_linear'\n#t2, ys, ydots, gts, ws, bs, Ls = rez\nq05_data = {\n# \"time\": esn.X, \n# \"ys\" : ys,\n# \"ydots\" : ydots,\n# \"gts\" : gts,\n# \"q\": 0.5,\n# \"loss\": Ls,\n \"linear_trajectories\" : l_trajs,\n \"linear_trajectories_dot\" : l_trajs_dot\n }\n #\"bprop_only_loss\" : Ls_bprop}\nwith open(filename + '_plot_data.pickle', 'wb') as handle:\n pickle.dump(q05_data, handle, protocol=pickle.HIGHEST_PROTOCOL)\n \nwith open(filename + '_plot_data.pickle', 'rb') as handle:\n b = pickle.load(handle)", "_____no_output_____" ], [ "b.keys()", "_____no_output_____" ], [ "for i in b['linear_trajectories']:\n plt.plot(i)", "_____no_output_____" ], [ "import matplotlib.pyplot as plt\nwith open(filename + '_plot_data.pickle', 'rb') as handle:\n b = pickle.load(handle)\nb.keys()", "_____no_output_____" ], [ "plt.plot(b[\"ydots\"][0])", "_____no_output_____" ], [ "import pickle\n\nq05 = {\"time\": esn.X, \n \"hyper_params\" : hybrid_hps_q05,\n \"out_weights\" : {\"weights\": ws, \"bias\": bs}, \n \"burn_in\" : BURN_IN, \n \"epochs\" : 30000,\n \"learning_rate\": 0.0001,\n \"y0s\" : y0s,\n \"q\" : 0.5}\n\nwith open(filename + '_reproduce.pickle', 'wb') as handle:\n pickle.dump(q05, handle, protocol=pickle.HIGHEST_PROTOCOL)\n\nwith open(filename + '_reproduce.pickle', 'rb') as handle:\n b = pickle.load(handle)", "_____no_output_____" ], [ "for param in esn.parameters():\n #print(param)\n if param.requires_grad:\n print(param)", "_____no_output_____" ], [ "plot_result(esn, xtrain, lams = np.array([1] * len(y0s)),#[1, 1, 1, 2]), \n y0s = y0s, #[1, 2, 3, 1],\n lam_title = 1, y0_title = \"[-5, 5]\", \n plot_gt = True, ode_coefs = [1,1], \n force_k = 0, fileName='population', backprop_f = optimize_last_layer,\n q = a)\n", "_____no_output_____" ], [ "opt_hps", "_____no_output_____" ], [ "plt.plot(esn.states[:,7])", "_____no_output_____" ], [ "correction = (esn.D_A.T * esn.gH.T @ esn.gH)", "_____no_output_____" ], [ "esn.DH.shape", "_____no_output_____" ], [ "(esn.DH1 + correction).shape", "_____no_output_____" ], [ "%%time\nesn = EchoStateNetwork(**hybrid_03_hps, \n random_state = 109, \n feedback = False, \n id_ = 10,\n backprop = False,\n dtype = torch.float32)\n# y0s = np.linspace(-2, 2, 10) \ndRay=0.75\ny0s = np.arange(-1., 1 + dRay, dRay)\n\n\n\nplot_result(esn, xtrain, lams = np.array([1] * len(y0s)),#[1, 1, 1, 2]), \n y0s = y0s, #[1, 2, 3, 1],\n lam_title = 1, y0_title = \"[-5, 5]\", plot_gt = True, ode_coefs = [1,1], \n force_k = 0, fileName='population',\n backprop_f = optimize_last_layer, q = 0.1)\n", "_____no_output_____" ], [ "%%time\nesn = EchoStateNetwork(**hybrid_hyper_params, \n random_state = 109, \n feedback = False, \n id_ = 10,\n backprop = False,\n dtype = torch.float32)\n# y0s = np.linspace(-2, 2, 10) \ndRay=0.75\ny0s = np.arange(-1., 1 + dRay, dRay)\n#A * torch.sin(X)\n\nplot_result(esn, xtrain, lams = np.array([1] * len(y0s)),#[1, 1, 1, 2]), \n y0s = y0s, #[1, 2, 3, 1],\n lam_title = 1, y0_title = \"[-5, 5]\", plot_gt = True, ode_coefs = [1,1], \n force_k = 0, fileName='population',\n backprop_f = None)#optimize_last_layer)\n", "_____no_output_____" ], [ "%%time\nesn = EchoStateNetwork(**exact_hyper_params, \n random_state = 109, \n feedback = False, \n id_ = 10,\n backprop = False,\n dtype = torch.float32)\n# y0s = np.linspace(-2, 2, 10) \ndRay=0.75\ny0s = np.arange(-1., 1 + dRay, dRay)\n\n\n\nplot_result(esn, xtrain, lams = np.array([1] * len(y0s)),#[1, 1, 1, 2]), \n y0s = y0s, #[1, 2, 3, 1],\n lam_title = 1, y0_title = \"[-5, 5]\", plot_gt = True, ode_coefs = [1,1], \n force_k = 0, fileName='population',\n backprop_f = optimize_last_layer, q = 0.1)\n", "_____no_output_____" ] ] ]
[ "code", "markdown", "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb3782d8ca59bbd07668f54eff8c338f886d417c
9,310
ipynb
Jupyter Notebook
Python4Scientitsts/1 mon.pdf/05. Booleans, Tuples, and, Dictionaries.ipynb
kbroaders/F20-Chem-291
3297e1b37c52e1bff246b654eac97d8a064f7d7a
[ "CC-BY-4.0" ]
null
null
null
Python4Scientitsts/1 mon.pdf/05. Booleans, Tuples, and, Dictionaries.ipynb
kbroaders/F20-Chem-291
3297e1b37c52e1bff246b654eac97d8a064f7d7a
[ "CC-BY-4.0" ]
null
null
null
Python4Scientitsts/1 mon.pdf/05. Booleans, Tuples, and, Dictionaries.ipynb
kbroaders/F20-Chem-291
3297e1b37c52e1bff246b654eac97d8a064f7d7a
[ "CC-BY-4.0" ]
null
null
null
17.051282
293
0.486359
[ [ [ "empty" ] ] ]
[ "empty" ]
[ [ "empty" ] ]
cb37838abb9e60774736be44ae08adb930997d8d
47,212
ipynb
Jupyter Notebook
decision_tree_classification.ipynb
sonyParas1187/Classification-for-Social-ads-
f1945b14029ff2d4ab8cb48ca56a6ef6c72308eb
[ "Unlicense" ]
null
null
null
decision_tree_classification.ipynb
sonyParas1187/Classification-for-Social-ads-
f1945b14029ff2d4ab8cb48ca56a6ef6c72308eb
[ "Unlicense" ]
null
null
null
decision_tree_classification.ipynb
sonyParas1187/Classification-for-Social-ads-
f1945b14029ff2d4ab8cb48ca56a6ef6c72308eb
[ "Unlicense" ]
null
null
null
191.918699
23,128
0.898437
[ [ [ "# Decision Tree Classification", "_____no_output_____" ] ], [ [ "# Importing the libraries\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n", "_____no_output_____" ], [ "# Importing the dataset\n\ndataset = pd.read_csv('Social_Network_Ads.csv')\nX = dataset.iloc[:, [2, 3]].values\ny = dataset.iloc[:, 4].values", "_____no_output_____" ], [ "# Splitting the dataset into the Training set and Test set\n\nfrom sklearn.cross_validation import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.25, random_state = 0)", "C:\\Users\\armando_galeana\\AppData\\Local\\Continuum\\anaconda3\\lib\\site-packages\\sklearn\\cross_validation.py:41: DeprecationWarning: This module was deprecated in version 0.18 in favor of the model_selection module into which all the refactored classes and functions are moved. Also note that the interface of the new CV iterators are different from that of this module. This module will be removed in 0.20.\n \"This module will be removed in 0.20.\", DeprecationWarning)\n" ], [ "# Feature Scaling\n\nfrom sklearn.preprocessing import StandardScaler\nsc = StandardScaler()\nX_train = sc.fit_transform(X_train)\nX_test = sc.transform(X_test)", "C:\\Users\\armando_galeana\\AppData\\Local\\Continuum\\anaconda3\\lib\\site-packages\\sklearn\\utils\\validation.py:475: DataConversionWarning: Data with input dtype int64 was converted to float64 by StandardScaler.\n warnings.warn(msg, DataConversionWarning)\n" ], [ "# Fitting Decision Tree Classification to the Training set\n\nfrom sklearn.tree import DecisionTreeClassifier\nclassifier = DecisionTreeClassifier(criterion = 'entropy', random_state = 0)\nclassifier.fit(X_train, y_train)", "_____no_output_____" ], [ "# Predicting the Test set results\n\ny_pred = classifier.predict(X_test)", "_____no_output_____" ], [ "# Making the Confusion Matrix\n\nfrom sklearn.metrics import confusion_matrix\ncm = confusion_matrix(y_test, y_pred)", "_____no_output_____" ], [ "# Visualising the Training set results\n\nfrom matplotlib.colors import ListedColormap\nX_set, y_set = X_train, y_train\nX1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = 0.01),\n np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = 0.01))\nplt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),\n alpha = 0.75, cmap = ListedColormap(('red', 'green')))\nplt.xlim(X1.min(), X1.max())\nplt.ylim(X2.min(), X2.max())\nfor i, j in enumerate(np.unique(y_set)):\n plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],\n c = ListedColormap(('red', 'green'))(i), label = j)\nplt.title('Decision Tree Classification (Training set)')\nplt.xlabel('Age')\nplt.ylabel('Estimated Salary')\nplt.legend()\nplt.show()", "_____no_output_____" ], [ "# Visualising the Test set results\n\nfrom matplotlib.colors import ListedColormap\nX_set, y_set = X_test, y_test\nX1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = 0.01),\n np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = 0.01))\nplt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),\n alpha = 0.75, cmap = ListedColormap(('red', 'green')))\nplt.xlim(X1.min(), X1.max())\nplt.ylim(X2.min(), X2.max())\nfor i, j in enumerate(np.unique(y_set)):\n plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],\n c = ListedColormap(('red', 'green'))(i), label = j)\nplt.title('Decision Tree Classification (Test set)')\nplt.xlabel('Age')\nplt.ylabel('Estimated Salary')\nplt.legend()\nplt.show()", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb378897c3b8d5829efcdc175051fb3600bc35bb
95,289
ipynb
Jupyter Notebook
examples/tensornet_basic.ipynb
shan18/TensorNet
c79a0c64152dbeb3499d204994772858326f668c
[ "MIT" ]
6
2020-06-04T16:01:38.000Z
2021-11-28T17:47:13.000Z
examples/tensornet_basic.ipynb
shan18/TensorNet
c79a0c64152dbeb3499d204994772858326f668c
[ "MIT" ]
22
2020-03-20T22:00:32.000Z
2021-02-08T19:32:32.000Z
examples/tensornet_basic.ipynb
shan18/TensorNet
c79a0c64152dbeb3499d204994772858326f668c
[ "MIT" ]
5
2020-03-24T11:29:22.000Z
2020-11-01T11:45:20.000Z
79.739749
24,838
0.696019
[ [ [ "# Using TensorNet (Basic)\n\nThis notebook will demonstrate some of the core functionalities of TensorNet:\n\n- Creating and setting up a dataset\n- Augmenting the dataset\n- Creating and configuring a model and viewing its summary\n- Defining an optimizer and a criterion\n- Setting up callbacks\n- Training and validating the model\n- Displaying plots for viewing the change in accuracy during training", "_____no_output_____" ], [ "# Installing Packages", "_____no_output_____" ] ], [ [ "!pip install --upgrade --no-cache-dir torch-tensornet", "_____no_output_____" ] ], [ [ "# Imports\nImporting necessary packages and modules", "_____no_output_____" ] ], [ [ "%matplotlib inline\n\nimport matplotlib.pyplot as plt\n\nfrom tensornet.data import CIFAR10\nfrom tensornet.models import mobilenet_v2\nfrom tensornet.models.loss import cross_entropy_loss\nfrom tensornet.models.optimizer import sgd\nfrom tensornet.utils import initialize_cuda, plot_metric\nfrom tensornet.engine.ops import ModelCheckpoint\nfrom tensornet.engine.ops.lr_scheduler import reduce_lr_on_plateau", "_____no_output_____" ] ], [ [ "## Set Seed and Get GPU Availability", "_____no_output_____" ] ], [ [ "# Initialize CUDA and set random seed\ncuda, device = initialize_cuda(1) # random seed is set to 1", "GPU Available? True\n" ] ], [ [ "## Setup Dataset\n\nDownloading and initializing `CIFAR-10` dataset and applying the following augmentations:\n- Horizontal Flip\n- Random Rotation\n- Cutout Augmentation", "_____no_output_____" ] ], [ [ "dataset = CIFAR10(\n train_batch_size=64,\n val_batch_size=64,\n cuda=cuda,\n num_workers=4,\n horizontal_flip_prob=0.2,\n rotate_degree=20,\n cutout_prob=0.3,\n cutout_dim=(8, 8),\n)", "Downloading https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz to /content/tensornet/data/datasets/.cache/cifar-10-python.tar.gz\n" ] ], [ [ "## Data Visualization\nLet's see how our data looks like. This information will help us decide the transformations that can be used on the dataset.", "_____no_output_____" ] ], [ [ "# Fetch data\nclasses = dataset.classes\nsample_data, sample_targets = dataset.data()\n\n# Set number of images to display\nnum_images = 4\n\n# Display images with labels\nfig, axs = plt.subplots(1, 4, figsize=(8, 8))\nfig.tight_layout()\n\nfor i in range(num_images):\n axs[i].axis('off')\n axs[i].set_title(f'Label: {classes[sample_targets[i]]}')\n axs[i].imshow(sample_data[i])", "\n" ] ], [ [ "## Training and Validation Dataloaders\nThis is the final step in data preparation. It sets the dataloader arguments and then creates the dataloader", "_____no_output_____" ] ], [ [ "# Create train data loader\ntrain_loader = dataset.loader(train=True)\n\n# Create val data loader\nval_loader = dataset.loader(train=False)", "_____no_output_____" ] ], [ [ "# Model Architecture and Summary\n\nWe'll download a pretrained ResNet18 model and train it on our dataset using fine-tuning.", "_____no_output_____" ] ], [ [ "model = mobilenet_v2(pretrained=True).to(device) # Create model\nmodel.summary(dataset.image_size) # Display model summary", "Downloading: \"https://download.pytorch.org/models/mobilenet_v2-b0353104.pth\" to /root/.cache/torch/hub/checkpoints/mobilenet_v2-b0353104.pth\n" ] ], [ [ "# Model Training and Validation\n\n- Loss Function: `Cross Entropy Loss`\n- Optimizer: `SGD`\n- Callbacks: `Model Checkpoint` and `Reduce LR on Plateau`", "_____no_output_____" ] ], [ [ "criterion = cross_entropy_loss() # Create loss function\noptimizer = sgd(model) # Create optimizer with deafult learning rate\n\n# Create callbacks\ncheckpoint_path = 'checkpoints'\ncallbacks = [\n ModelCheckpoint(checkpoint_path, monitor='val_accuracy'),\n reduce_lr_on_plateau(optimizer, factor=0.2, patience=2, min_lr=1e-6),\n]", "_____no_output_____" ], [ "model.fit(\n train_loader,\n optimizer,\n criterion,\n device=device,\n epochs=10,\n val_loader=val_loader,\n callbacks=callbacks,\n metrics=['accuracy'],\n)", "Epoch 1:\n782/782 [========] - 30s 39ms/step - loss: 1.4347 - accuracy: 46.9997\nValidation set (took 0 minutes, 2 seconds): Average loss: 0.0162, accuracy: 65.59\n\nEpoch 2:\n782/782 [========] - 30s 39ms/step - loss: 0.8717 - accuracy: 69.0421\nValidation set (took 0 minutes, 2 seconds): Average loss: 0.0110, accuracy: 75.27\n\nEpoch 3:\n782/782 [========] - 31s 39ms/step - loss: 0.7492 - accuracy: 73.5877\nValidation set (took 0 minutes, 2 seconds): Average loss: 0.0112, accuracy: 75.26\n\nEpoch 4:\n782/782 [========] - 31s 40ms/step - loss: 0.6778 - accuracy: 76.0332\nValidation set (took 0 minutes, 2 seconds): Average loss: 0.0095, accuracy: 79.06\n\nEpoch 5:\n782/782 [========] - 31s 39ms/step - loss: 0.6273 - accuracy: 78.2375\nValidation set (took 0 minutes, 2 seconds): Average loss: 0.0089, accuracy: 80.71\n\nEpoch 6:\n782/782 [========] - 30s 39ms/step - loss: 0.5841 - accuracy: 79.8276\nValidation set (took 0 minutes, 2 seconds): Average loss: 0.0130, accuracy: 76.22\n\nEpoch 7:\n782/782 [========] - 30s 39ms/step - loss: 0.5600 - accuracy: 80.7387\nValidation set (took 0 minutes, 2 seconds): Average loss: 0.0323, accuracy: 40.15\n\nEpoch 8:\n782/782 [========] - 32s 41ms/step - loss: 0.5782 - accuracy: 78.2819\nValidation set (took 0 minutes, 2 seconds): Average loss: 0.0092, accuracy: 79.9\n\nEpoch 9:\n782/782 [========] - 31s 40ms/step - loss: 0.4930 - accuracy: 82.5756\nValidation set (took 0 minutes, 2 seconds): Average loss: 0.0077, accuracy: 83.37\n\nEpoch 10:\n782/782 [========] - 31s 40ms/step - loss: 0.4713 - accuracy: 83.3421\nValidation set (took 0 minutes, 2 seconds): Average loss: 0.0078, accuracy: 83.43\n\n" ] ], [ [ "## Result Analysis\n\nDisplaying the change in accuracy of the training and the validation set during training", "_____no_output_____" ] ], [ [ "plot_metric({\n 'Training': model.learner.train_metrics[0]['accuracy'],\n 'Validation': model.learner.val_metrics[0]['accuracy']\n}, 'Accuracy')", "_____no_output_____" ], [ "", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ] ]
cb378b52c0ef4fdbe6e0f2053554909a95ab4901
87,473
ipynb
Jupyter Notebook
section_particle_filter/mcl2.ipynb
kentaroy47/LNPR_BOOK_CODES
f0d1bef336423ebdf04539ce833f0ce4cffc51f5
[ "MIT" ]
148
2019-03-27T00:20:16.000Z
2022-03-30T22:34:11.000Z
section_particle_filter/mcl2.ipynb
kentaroy47/LNPR_BOOK_CODES
f0d1bef336423ebdf04539ce833f0ce4cffc51f5
[ "MIT" ]
3
2018-11-07T04:33:13.000Z
2018-12-31T01:35:16.000Z
section_particle_filter/mcl2.ipynb
kentaroy47/LNPR_BOOK_CODES
f0d1bef336423ebdf04539ce833f0ce4cffc51f5
[ "MIT" ]
116
2019-04-18T08:35:53.000Z
2022-03-24T05:17:46.000Z
96.655249
49,951
0.754999
[ [ [ "import sys \nsys.path.append('../scripts/')\nfrom robot import *", "_____no_output_____" ], [ "class Particle: ###particle_class###\n def __init__(self, init_pose):\n self.pose = init_pose", "_____no_output_____" ], [ "class Mcl: ###Mcl_class###\n def __init__(self, init_pose, num):\n self.particles = [Particle(init_pose) for i in range(num)]", "_____no_output_____" ], [ "class EstimationAgent(Agent): ###EstimationAgent2###\n def __init__(self, nu, omega, estimator): #引数を追加\n super().__init__(nu, omega)\n self.estimator = estimator\n \n def draw(self, ax, elems):\n elems.append(ax.text(0, 0, \"hoge\", fontsize=10))", "_____no_output_____" ], [ "world = World(30, 0.1) ###1--15行目をmcl2_12として1行掲載\n\n### 地図を生成して3つランドマークを追加 ###\nm = Map()\nfor ln in [(-4,2), (2,-3), (3,3)]: m.append_landmark(Landmark(*ln))\nworld.append(m) \n\n### ロボットを作る ###\ninitial_pose = np.array([2, 2, math.pi/6]).T\nestimator = Mcl(initial_pose, 100) #パーティクルファイルタを作る\ncircling = EstimationAgent(0.2, 10.0/180*math.pi, estimator) #estimatorを渡す\nr = Robot(initial_pose, sensor=Camera(m), agent=circling) \nworld.append(r)\n\n### アニメーション実行 ###\nworld.draw()", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code" ] ]
cb378c0cbeef12b304113650f40e2d89bfa9b350
126,432
ipynb
Jupyter Notebook
Step1 - Mask Generation.ipynb
Jasonnor/PConv-Keras-Food
046291e826d33fff92e74fcc782c16f417103dc4
[ "MIT" ]
1
2019-03-23T08:37:58.000Z
2019-03-23T08:37:58.000Z
Step1 - Mask Generation.ipynb
Jasonnor/PConv-Keras-Food
046291e826d33fff92e74fcc782c16f417103dc4
[ "MIT" ]
null
null
null
Step1 - Mask Generation.ipynb
Jasonnor/PConv-Keras-Food
046291e826d33fff92e74fcc782c16f417103dc4
[ "MIT" ]
1
2019-11-28T20:39:38.000Z
2019-11-28T20:39:38.000Z
1,204.114286
124,010
0.949554
[ [ [ "# Mask Generation with OpenCV\nIn the paper they generate irregular masks by using occlusion/dis-occlusion between two consecutive frames of videos, as described in [this paper](https://lmb.informatik.uni-freiburg.de/Publications/2010/Bro10e/sundaram_eccv10.pdf). \n\nInstead we'll simply be using OpenCV to generate some irregular masks, which will hopefully perform just as well. We've implemented this in the function `random_mask`, which is located in the `util.py` file int he libs directory", "_____no_output_____" ] ], [ [ "import itertools\nimport matplotlib\nimport matplotlib.pyplot as plt\nfrom libs.util import random_mask\n\n%matplotlib inline", "_____no_output_____" ] ], [ [ "Let us review of the code of this function", "_____no_output_____" ] ], [ [ "??random_mask", "_____no_output_____" ] ], [ [ "Finally, let's create some output samples with this function to see what it does", "_____no_output_____" ] ], [ [ "# Plot the results\n_, axes = plt.subplots(5, 5, figsize=(20, 20))\naxes = list(itertools.chain.from_iterable(axes))\n\nfor i in range(len(axes)):\n \n # Generate image\n img = random_mask(500, 500)\n \n # Plot image on axis\n axes[i].imshow(img*255)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
cb3790e428b1a73e3d07e99fe3fec9516c86f6e0
33,762
ipynb
Jupyter Notebook
regression/data/generate_samples.ipynb
zimmermant/tutorials
bdaf138d1832573d2e7c542a6824ce3bda5a6f57
[ "MIT" ]
3
2019-07-17T09:38:54.000Z
2021-07-07T18:57:52.000Z
regression/data/generate_samples.ipynb
zimmermant/tutorials
bdaf138d1832573d2e7c542a6824ce3bda5a6f57
[ "MIT" ]
13
2018-06-14T09:16:27.000Z
2019-07-20T17:04:09.000Z
regression/data/generate_samples.ipynb
zimmermant/tutorials
bdaf138d1832573d2e7c542a6824ce3bda5a6f57
[ "MIT" ]
7
2019-07-05T16:00:39.000Z
2021-12-11T07:34:47.000Z
65.303675
10,516
0.711214
[ [ [ "from sklearn.datasets import make_regression\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport numpy as np\n%matplotlib inline", "_____no_output_____" ], [ "X, y = make_regression(n_samples=10000,\n n_features=4,\n noise=25,\n random_state=0)\nX = pd.DataFrame(data=X)\ny = pd.DataFrame(data=y)\n\ndf = pd.concat([y,X], axis=1)\ndf[4] = df[2].apply(lambda x: 1 if x > 0.2 else 0)\ndf.columns = ['predict_this', 'feature_1', 'feature_2', 'feature_3', 'feature_4', 'feature_5']", "_____no_output_____" ], [ "df.head()", "_____no_output_____" ], [ "df.corr()", "_____no_output_____" ], [ "df.describe()", "_____no_output_____" ], [ "df.to_csv('../data/sample.csv', sep=',', encoding='utf-8', index=False)", "_____no_output_____" ], [ "plt.figure(figsize=(8,8), dpi=150);\nX.hist()\nplt.xlabel('X')\nplt.ylabel('counts')\nplt.legend('x')", "_____no_output_____" ], [ "X.hist(cumulative=True, density=1, bins=100)\nplt.xlabel('X')\nplt.ylabel('counts')\nplt.legend('x')", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb3793ca28d1459dc99262fe55cdafbd42aac93f
215,997
ipynb
Jupyter Notebook
docs/ipynb/micromagneticmodel.ipynb
gamdow/oommfc
de33ae2a8348ca78d9e16fe18bc562393703c215
[ "BSD-3-Clause" ]
null
null
null
docs/ipynb/micromagneticmodel.ipynb
gamdow/oommfc
de33ae2a8348ca78d9e16fe18bc562393703c215
[ "BSD-3-Clause" ]
null
null
null
docs/ipynb/micromagneticmodel.ipynb
gamdow/oommfc
de33ae2a8348ca78d9e16fe18bc562393703c215
[ "BSD-3-Clause" ]
null
null
null
99.813771
44,336
0.76639
[ [ [ "# Micromagnetic model\n\n## Mesh", "_____no_output_____" ] ], [ [ "import oommfc as oc", "_____no_output_____" ], [ "%matplotlib inline\nL = 100e-9\nd = 10e-9\nmesh = oc.Mesh(p1=(0, 0, 0), p2=(L, L, L), cell=(d, d, d), name=\"mesh\")\nmesh", "_____no_output_____" ] ], [ [ "The domain edge lengths in x, y, and z directions are:", "_____no_output_____" ] ], [ [ "mesh.l", "_____no_output_____" ] ], [ [ "The number of cells in all three directions:", "_____no_output_____" ] ], [ [ "mesh.n", "_____no_output_____" ] ], [ [ "The centre point of the mesh:", "_____no_output_____" ] ], [ [ "mesh.centre", "_____no_output_____" ] ], [ [ "## System", "_____no_output_____" ], [ "The basic block of our micromagnetic model is the system object. It is fully defined if:\n\n1. Hamiltonian,\n2. dynamics equation,\n3. magnetisation.\n\nare provided. The system is then \"driven\" in phase space using particular drivers.", "_____no_output_____" ] ], [ [ "system = oc.System(name=\"system\")", "_____no_output_____" ] ], [ [ "## Hamiltonian", "_____no_output_____" ] ], [ [ "exchange = oc.Exchange(A=1e-12)", "_____no_output_____" ], [ "exchange", "_____no_output_____" ], [ "exchange.A", "_____no_output_____" ], [ "repr(exchange)", "_____no_output_____" ], [ "hamiltonian = exchange + oc.Zeeman(H=(1e6, 0, 0))\n\ntype(hamiltonian)", "_____no_output_____" ], [ "system.hamiltonian = hamiltonian", "_____no_output_____" ], [ "system.hamiltonian", "_____no_output_____" ], [ "system.hamiltonian.exchange.A", "_____no_output_____" ], [ "system.hamiltonian.zeeman.H", "_____no_output_____" ], [ "system.hamiltonian += oc.Demag(name=\"energy_term_name\")", "_____no_output_____" ], [ "system.hamiltonian.energy_term_name", "_____no_output_____" ] ], [ [ "## Dynamics", "_____no_output_____" ] ], [ [ "system.dynamics = oc.Precession(gamma=2.211e5) + oc.Damping(alpha=0.1)", "_____no_output_____" ], [ "system.dynamics", "_____no_output_____" ] ], [ [ "## Magnetisation", "_____no_output_____" ] ], [ [ "import discretisedfield as df", "_____no_output_____" ], [ "Ms = 8e5\nm = df.Field(mesh, value=(1, 0, 1), norm=Ms, name=\"m\")", "_____no_output_____" ], [ "p = (50e-9, 50e-9, 5e-9)\nm(p) # sampling", "_____no_output_____" ], [ "m.f = lambda pos: (pos[0]+pos[2]+1, pos[2], 0)", "_____no_output_____" ], [ "system.m = m", "_____no_output_____" ] ], [ [ "## Drivers", "_____no_output_____" ], [ "### MinDriver", "_____no_output_____" ] ], [ [ "md = oc.MinDriver()\nmd.drive(system)", "2018/3/26 16:34: Calling OOMMF (system/system.mif) ... [0.7s]\n" ], [ "system.m.average", "_____no_output_____" ], [ "system.m.plot_plane(\"z\")", "_____no_output_____" ], [ "system.hamiltonian.zeeman.H = (0, 1e7, 0)", "_____no_output_____" ], [ "md.drive(system)", "2018/3/26 16:34: Calling OOMMF (system/system.mif) ... [0.6s]\n" ], [ "system.m.plot_plane(\"z\")", "_____no_output_____" ] ], [ [ "### Time driver", "_____no_output_____" ] ], [ [ "system.hamiltonian += oc.UniaxialAnisotropy(K1=5e3, u=(0, 0, 1))\nsystem.hamiltonian.zeeman.H = (1e6, 0, 0)", "_____no_output_____" ], [ "td = oc.TimeDriver()\ntd.drive(system, t=0.2e-9, n=50)", "2018/3/26 16:34: Calling OOMMF (system/system.mif) ... [1.2s]\n" ], [ "system.m.plot_plane(\"z\")", "_____no_output_____" ], [ "system.dt", "_____no_output_____" ], [ "system.dt.plot(\"t\", \"E\")", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ] ]
cb37ad7be95c2d312334ee5ee699ac328abf131b
11,407
ipynb
Jupyter Notebook
iml2020/examples/Model Selection with Sklearn.ipynb
AriJordan/IML_project
5f303931a2c2c4de907b84dfe1047afe6e0cf221
[ "MIT" ]
null
null
null
iml2020/examples/Model Selection with Sklearn.ipynb
AriJordan/IML_project
5f303931a2c2c4de907b84dfe1047afe6e0cf221
[ "MIT" ]
null
null
null
iml2020/examples/Model Selection with Sklearn.ipynb
AriJordan/IML_project
5f303931a2c2c4de907b84dfe1047afe6e0cf221
[ "MIT" ]
null
null
null
36.915858
249
0.473131
[ [ [ "# Code source: Sebastian Curi and Andreas Krause.\n\n# Python Notebook Commands\n%matplotlib inline\n%reload_ext autoreload\n%load_ext autoreload\n%autoreload 2\n\n# Numerical Libraries\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib import rcParams\nrcParams['figure.figsize'] = (10, 5) # Change this if figures look ugly. \nrcParams['font.size'] = 16\n# IPython Libraries\nimport IPython\nimport ipywidgets\nfrom IPython.display import display\nfrom ipywidgets import interact, interactive, interact_manual\n\n\n# sklearn library\nimport sklearn\nfrom sklearn.datasets import make_regression\nfrom sklearn.linear_model import Ridge\nfrom sklearn.preprocessing import PolynomialFeatures\nfrom sklearn.pipeline import make_pipeline\n\nimport warnings\nwarnings.filterwarnings('ignore')\n\n# Custom Libraries\nfrom utilities import plot_helpers", "_____no_output_____" ] ], [ [ "# Model Selection \n\nIn this task we have noisy samples of the function $f(x) = x \\sin(x) $, and our objective is to learn it from data (here we're cheating because we already know the function). \n\nIn this demo we will see how model selection works and how to use K-fold cross-validation. ", "_____no_output_____" ] ], [ [ "rcParams['figure.figsize'] = (10, 5) # Change this if figures look ugly. \nrcParams['font.size'] = 16\n# Let's plot the function first (without noise)\ndef f(x):\n return x * np.sin(x) \n\nx_plot = np.linspace(-1, 11, 100)\nf_plot = f(x_plot)\nX_plot = x_plot[:, np.newaxis]\n\n\nplot_opts = {'x_label': '$x$', 'y_label': '$y$', 'y_lim': [np.min(f_plot)-3, np.max(f_plot)+3], \n 'legend':False, 'legend_loc': 'lower left'}\nplot_helpers.plot_data(x_plot, f_plot, fig=plt.subplot(111), options=plot_opts)", "_____no_output_____" ], [ "rcParams['figure.figsize'] = (10, 5) # Change this if figures look ugly. \nrcParams['font.size'] = 16\nnoise_widget = ipywidgets.FloatSlider(value=1,\n min=0,\n max=3,\n step=0.5,\n readout_format='.1f',\n description='Noise level:',\n style={'description_width': 'initial'},\n continuous_update=False)\nresample_button = ipywidgets.ToggleButton(description=\"Resample!\")\n\ndegree_widget = ipywidgets.IntSlider(min=1,\n max=19,\n step=1,\n description='Polynomial Degree:',\n style={'description_width': 'initial'},\n continuous_update=False)\nreg_widget = ipywidgets.Dropdown(\n options=[0, 1e-6, 1e-5, 1e-4, 1e-3, 1e-2, 1e-1, 1e0, 1e1, 1e2],\n value=0,\n description='Regularizer:',\n disabled=False,\n style={'description_width': 'initial'},\n continuous_update=False\n)\n\ndef resample(b, noise):\n x = 10 * np.random.rand(20)\n\n y = f(x) + np.random.normal(size=(20,)) * noise\n\n # create matrix versions of these arrays\n X = x[:, np.newaxis]\n\n def change_degree(degree, reg):\n model = make_pipeline(PolynomialFeatures(degree), Ridge(alpha=reg))\n model.fit(X, y) \n \n fig = plt.figure()\n lw = 2\n plt.plot(x_plot, f_plot, color='cornflowerblue', linewidth=lw, label=\"Ground Truth\")\n y_plot = model.predict(X_plot)\n plt.plot(x_plot, y_plot, color='r', linewidth=lw, label=\"Degree %d\" % degree)\n \n plot_opts = {'x_label': '$x$', 'y_label': '$y$', 'y_lim': [np.min(f_plot)-3, np.max(f_plot)+3], \n 'legend':True, 'legend_loc': 'lower left'}\n \n opts = {'marker': 'b*', 'label': 'Training Points'}\n plot_opts.update(opts)\n \n plot_helpers.plot_data(X, y, fig=fig.gca(), options=plot_opts)\n plt.show()\n coefs = model._final_estimator.coef_\n coefs[0] = model._final_estimator.intercept_\n print(\"Estimated coefficients{}\".format(coefs))\n \n\n interact(change_degree, degree=degree_widget, reg=reg_widget);\ninteract(resample, b=resample_button, noise=noise_widget);\n", "_____no_output_____" ] ], [ [ "# K-Fold Cross-Validation\n\nThe idea of this method is to split the dataset into K different bins, use K-1 to learn and 1 to validate. Then you can interchange which split you validate on and make statistics on the different errors on each split (such as avg, std, etc). ", "_____no_output_____" ] ], [ [ "rcParams['figure.figsize'] = (10, 5) # Change this if figures look ugly. \nrcParams['font.size'] = 16\nfolds = 5\nN = 50\nn = int(N/folds)\nresample_button = ipywidgets.ToggleButton(description=\"Resample!\")\n\ndegree_widget = ipywidgets.IntSlider(value=1,\n min=1,\n max=19,\n step=1,\n description='Polynomial Degree:',\n style={'description_width': 'initial'},\n continuous_update=False)\nfold_widget = ipywidgets.ToggleButtons(value=1,\n options=np.arange(1, folds+1),\n description='Validation fold:',\n style={'description_width': 'initial'},\n continuous_update=False)\nnoise_widget = ipywidgets.FloatSlider(value=1, \n min=0, \n max=3, \n step=0.5, \n readout_format='.1f',\n description='Noise level:', \n style={'description_width': 'initial'},\n continuous_update=False)\n\nreg_widget = reg_widget = ipywidgets.Dropdown(\n options=[0, 1e-6, 1e-5, 1e-4, 1e-3, 1e-2, 1e-1, 1e0, 1e1, 1e2],\n value=0,\n description='Regularizer:',\n disabled=False,\n style={'description_width': 'initial'},\n continuous_update=False\n)\n\ndef resample(b, noise):\n xraw = 10 * np.random.rand(N)\n # rng = np.random.RandomState(0)\n np.random.shuffle(xraw)\n\n #noise=1\n\n x = dict()\n y = dict()\n for i in range(folds):\n x[i] = xraw[n*i:n*(i+1)]\n y[i] = f(x[i]) + np.random.normal(size=(n,)) * noise\n\n \n def change_degree(degree, reg, fold):\n X = np.array(())\n Y = np.array(())\n for i in range(folds):\n if i == (fold-1):\n Xval = x[i]\n Yval = y[i]\n else:\n X = np.concatenate((X, x[i]))\n Y = np.concatenate((Y, y[i]))\n\n\n X = X[:, np.newaxis]\n Xval = Xval[:, np.newaxis]\n\n model = make_pipeline(PolynomialFeatures(degree), Ridge(alpha=reg))\n\n model.fit(X, Y)\n \n fig = plt.subplot(111)\n lw = 2\n plt.plot(x_plot, f_plot, color='cornflowerblue', linewidth=lw, label=\"Ground Truth\")\n y_plot = model.predict(X_plot)\n plt.plot(x_plot, y_plot, color='r', linewidth=lw, label=\"Degree %d\" % degree)\n \n opts = {'marker': 'b*', 'label': 'Training Points'}\n plot_helpers.plot_data(X, Y, fig=fig, options=opts)\n \n plot_opts = {'x_label': '$x$', 'y_label': '$y$', 'y_lim': [np.min(f_plot)-3, np.max(f_plot)+3], \n 'legend':True, 'legend_loc': 'lower left'}\n opts = {'marker': 'mX', 'label': 'Validation Points'}\n plot_opts.update(opts)\n plot_helpers.plot_data(Xval, Yval, fig=fig, options=plot_opts)\n \n plt.show()\n print(\"Train. Error: {:.2f}\".format(1/X.size * np.linalg.norm(model.predict(X) - Y, 2)))\n print(\"Valid. Error: {:.2f}\".format(1/Xval.size * np.linalg.norm(model.predict(Xval) - Yval, 2)))\n \n \n interact(change_degree, degree=degree_widget, reg=reg_widget, fold=fold_widget);\n\ninteract(resample, b=resample_button, noise=noise_widget);", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ] ]
cb37d87867f6cadccfe04ed7e36ab7c50cdc6e4c
1,863
ipynb
Jupyter Notebook
modules/alg_modules/Untitled-1.ipynb
akorzunin/binance_candle_plot
834991b2a95be045771a5560a1ccd6ac90895347
[ "Apache-2.0" ]
null
null
null
modules/alg_modules/Untitled-1.ipynb
akorzunin/binance_candle_plot
834991b2a95be045771a5560a1ccd6ac90895347
[ "Apache-2.0" ]
null
null
null
modules/alg_modules/Untitled-1.ipynb
akorzunin/binance_candle_plot
834991b2a95be045771a5560a1ccd6ac90895347
[ "Apache-2.0" ]
null
null
null
21.662791
80
0.528717
[ [ [ "import pandas as pd\nfrom datetime import datetime\nfrom alg_ma import AlgMa\nfrom random import randint\ndf = pd.DataFrame()\ndf['Test'] = [randint(0, 100) for i in range(1000)]\n# print(\nMA_lines = AlgMa.alg_main(df['Test'], MA_list=(7, 25, 100))\nprint(type(MA_lines[0]))\n# find intersections\ndf['Time'] = [datetime.fromtimestamp(i*10**6) for i in range(1000)]\n", "<class 'numpy.ndarray'>\n" ], [ "%%timeit -n 100\np = AlgMa.find_intersections(df['Time'], MA_lines[1], MA_lines[2])\n", "13.9 ms ± 744 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)\n" ], [ "# 20.1 .. 20.6 enum\n# 21 .. 20.8\n#13.9 np array", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code" ] ]
cb37e52dcffdf36b2de809ccf21ee7d3f57edff5
5,953
ipynb
Jupyter Notebook
LSTMInputGenerator.ipynb
jmonish/Deep-Learning-in-Market-Intelligence---Report-Classification
75bb000a1e34c5288fd0ceb80838fe322765f83a
[ "BSD-3-Clause" ]
null
null
null
LSTMInputGenerator.ipynb
jmonish/Deep-Learning-in-Market-Intelligence---Report-Classification
75bb000a1e34c5288fd0ceb80838fe322765f83a
[ "BSD-3-Clause" ]
null
null
null
LSTMInputGenerator.ipynb
jmonish/Deep-Learning-in-Market-Intelligence---Report-Classification
75bb000a1e34c5288fd0ceb80838fe322765f83a
[ "BSD-3-Clause" ]
null
null
null
44.096296
153
0.537544
[ [ [ "#Install bert package for tensorflow v1\n!pip install bert-tensorflow==1.0.1\nimport bert\nfrom bert import run_classifier\nfrom bert import optimization\nfrom bert import tokenization\n\nfrom datetime import datetime\nimport keras\nfrom keras import layers\nfrom keras.callbacks import ReduceLROnPlateau\nimport numpy as np\nimport pandas as pd\nimport tensorflow as tf\nimport tensorflow_hub as hub\nfrom tqdm.notebook import tqdm #adds progress bars to show loop status\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import LabelEncoder\n\nclass Generator(object):\n\n \"\"\"This class consists of functions to convert the training, validation and test datasets into a format acceptable by LSTM model. \n LSTM takes inputs with fixed width only. But the vector representations of every report are of variable length as different reports have\n different number of words and thus different number of text splits. Each generator function takes batches of given size, gets the size of \n the largest input and extends the remaining inputs to the size of the largest, filling them with a special value. This process is along all \n the data. This way, all batches sequences would have the same length. \"\"\"\n\n def __init__(self, config):\n self.config = config\n\n def train_generator(self, df):\n num_sequences = len(df['emb'].to_list())\n batch_size = self.config.training.batch_size_train\n batches_per_epoch = self.config.training.batches_per_epoch_train\n\n #make sure that all input data passes throught training\n assert batch_size * batches_per_epoch == num_sequences \n\n num_features= 768\n x_list= df['emb'].to_list()\n y_list = df.label.to_list()\n # Generate batches\n while True:\n for b in range(batches_per_epoch):\n longest_index = (b + 1) * batch_size - 1\n timesteps = len(max(df['emb'].to_list()[:(b + 1) * batch_size][-batch_size:], key=len))\n x_train = np.full((batch_size, timesteps, num_features), -99.)\n y_train = np.zeros((batch_size, 1))\n for i in range(batch_size):\n li = b * batch_size + i\n x_train[i, 0:len(x_list[li]), :] = x_list[li]\n y_train[i] = y_list[li]\n yield x_train, y_train\n\n def val_generator(self, df):\n num_sequences = len(df['emb'].to_list())\n batch_size = self.config.training.batch_size_val\n batches_per_epoch = self.config.training.batches_per_epoch_val\n\n #make sure that all input data passes throught training\n assert batch_size * batches_per_epoch == num_sequences\n\n num_features= 768\n x_list= df['emb'].to_list()\n y_list = df.label.to_list()\n # Generate batches\n while True:\n for b in range(batches_per_epoch):\n longest_index = (b + 1) * batch_size - 1\n timesteps = len(max(df['emb'].to_list()[:(b + 1) * batch_size][-31:], key=len))\n x_train = np.full((batch_size, timesteps, num_features), -99.)\n y_train = np.zeros((batch_size, 1))\n for i in range(batch_size):\n li = b * batch_size + i\n x_train[i, 0:len(x_list[li]), :] = x_list[li]\n y_train[i] = y_list[li]\n yield x_train, y_train\n\n def test_generator(self, df):\n num_sequences = len(df['emb'].to_list())\n batch_size = self.config.training.batch_size_test\n batches_per_epoch = self.config.training.batches_per_epoch_test\n\n #make sure that all input data passes throught training\n assert batch_size * batches_per_epoch == num_sequences\n \n num_features= 768\n x_list= df['emb'].to_list()\n y_list = df.label.to_list()\n # Generate batches\n while True:\n for b in range(batches_per_epoch):\n longest_index = (b + 1) * batch_size - 1\n timesteps = len(max(df['emb'].to_list()[:(b + 1) * batch_size][-31:], key=len))\n x_train = np.full((batch_size, timesteps, num_features), -99.)\n y_train = np.zeros((batch_size, 1))\n for i in range(batch_size):\n li = b * batch_size + i\n x_train[i, 0:len(x_list[li]), :] = x_list[li]\n y_train[i] = y_list[li]\n yield x_train, y_train", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code" ] ]
cb37f92fb834eab333bf556b8fcd4b496c77e65d
60,871
ipynb
Jupyter Notebook
Basic/Basic Fashion MNIST.ipynb
anishmo99/DL-Codes
d0df6e85912eb5784d609921f3de386278feae46
[ "MIT" ]
null
null
null
Basic/Basic Fashion MNIST.ipynb
anishmo99/DL-Codes
d0df6e85912eb5784d609921f3de386278feae46
[ "MIT" ]
null
null
null
Basic/Basic Fashion MNIST.ipynb
anishmo99/DL-Codes
d0df6e85912eb5784d609921f3de386278feae46
[ "MIT" ]
null
null
null
60,871
60,871
0.731235
[ [ [ "import tensorflow as tf\nprint(tf.__version__)", "2.3.0\n" ] ], [ [ "The Fashion MNIST data is available directly in the tf.keras datasets API. You load it like this:", "_____no_output_____" ] ], [ [ "mnist = tf.keras.datasets.fashion_mnist", "_____no_output_____" ] ], [ [ "Calling load_data on this object will give you two sets of two lists, these will be the training and testing values for the graphics that contain the clothing items and their labels.\n", "_____no_output_____" ] ], [ [ "(training_images, training_labels), (test_images, test_labels) = mnist.load_data()", "Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/train-labels-idx1-ubyte.gz\n32768/29515 [=================================] - 0s 0us/step\nDownloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/train-images-idx3-ubyte.gz\n26427392/26421880 [==============================] - 0s 0us/step\nDownloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/t10k-labels-idx1-ubyte.gz\n8192/5148 [===============================================] - 0s 0us/step\nDownloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/t10k-images-idx3-ubyte.gz\n4423680/4422102 [==============================] - 0s 0us/step\n" ] ], [ [ "What does these values look like? Let's print a training image, and a training label to see...Experiment with different indices in the array. For example, also take a look at index 42...that's a a different boot than the one at index 0\n", "_____no_output_____" ] ], [ [ "import numpy as np\nnp.set_printoptions(linewidth=200)\nimport matplotlib.pyplot as plt\nplt.imshow(training_images[0])\nprint(training_labels[0])\nprint(training_images[0])", "9\n[[ 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0]\n [ 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0]\n [ 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0]\n [ 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 13 73 0 0 1 4 0 0 0 0 1 1 0]\n [ 0 0 0 0 0 0 0 0 0 0 0 0 3 0 36 136 127 62 54 0 0 0 1 3 4 0 0 3]\n [ 0 0 0 0 0 0 0 0 0 0 0 0 6 0 102 204 176 134 144 123 23 0 0 0 0 12 10 0]\n [ 0 0 0 0 0 0 0 0 0 0 0 0 0 0 155 236 207 178 107 156 161 109 64 23 77 130 72 15]\n [ 0 0 0 0 0 0 0 0 0 0 0 1 0 69 207 223 218 216 216 163 127 121 122 146 141 88 172 66]\n [ 0 0 0 0 0 0 0 0 0 1 1 1 0 200 232 232 233 229 223 223 215 213 164 127 123 196 229 0]\n [ 0 0 0 0 0 0 0 0 0 0 0 0 0 183 225 216 223 228 235 227 224 222 224 221 223 245 173 0]\n [ 0 0 0 0 0 0 0 0 0 0 0 0 0 193 228 218 213 198 180 212 210 211 213 223 220 243 202 0]\n [ 0 0 0 0 0 0 0 0 0 1 3 0 12 219 220 212 218 192 169 227 208 218 224 212 226 197 209 52]\n [ 0 0 0 0 0 0 0 0 0 0 6 0 99 244 222 220 218 203 198 221 215 213 222 220 245 119 167 56]\n [ 0 0 0 0 0 0 0 0 0 4 0 0 55 236 228 230 228 240 232 213 218 223 234 217 217 209 92 0]\n [ 0 0 1 4 6 7 2 0 0 0 0 0 237 226 217 223 222 219 222 221 216 223 229 215 218 255 77 0]\n [ 0 3 0 0 0 0 0 0 0 62 145 204 228 207 213 221 218 208 211 218 224 223 219 215 224 244 159 0]\n [ 0 0 0 0 18 44 82 107 189 228 220 222 217 226 200 205 211 230 224 234 176 188 250 248 233 238 215 0]\n [ 0 57 187 208 224 221 224 208 204 214 208 209 200 159 245 193 206 223 255 255 221 234 221 211 220 232 246 0]\n [ 3 202 228 224 221 211 211 214 205 205 205 220 240 80 150 255 229 221 188 154 191 210 204 209 222 228 225 0]\n [ 98 233 198 210 222 229 229 234 249 220 194 215 217 241 65 73 106 117 168 219 221 215 217 223 223 224 229 29]\n [ 75 204 212 204 193 205 211 225 216 185 197 206 198 213 240 195 227 245 239 223 218 212 209 222 220 221 230 67]\n [ 48 203 183 194 213 197 185 190 194 192 202 214 219 221 220 236 225 216 199 206 186 181 177 172 181 205 206 115]\n [ 0 122 219 193 179 171 183 196 204 210 213 207 211 210 200 196 194 191 195 191 198 192 176 156 167 177 210 92]\n [ 0 0 74 189 212 191 175 172 175 181 185 188 189 188 193 198 204 209 210 210 211 188 188 194 192 216 170 0]\n [ 2 0 0 0 66 200 222 237 239 242 246 243 244 221 220 193 191 179 182 182 181 176 166 168 99 58 0 0]\n [ 0 0 0 0 0 0 0 40 61 44 72 41 35 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0]\n [ 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0]\n [ 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0]]\n" ] ], [ [ "You'll notice that all of the values in the number are between 0 and 255. If we are training a neural network, for various reasons it's easier if we treat all values as between 0 and 1, a process called '**normalizing**'...and fortunately in Python it's easy to normalize a list like this without looping. You do it like this:", "_____no_output_____" ] ], [ [ "training_images = training_images / 255.0\ntest_images = test_images / 255.0", "_____no_output_____" ] ], [ [ "Now you might be wondering why there are 2 sets...training and testing -- remember we spoke about this in the intro? The idea is to have 1 set of data for training, and then another set of data...that the model hasn't yet seen...to see how good it would be at classifying values. After all, when you're done, you're going to want to try it out with data that it hadn't previously seen!", "_____no_output_____" ], [ "Let's now design the model. There's quite a few new concepts here, but don't worry, you'll get the hang of them. ", "_____no_output_____" ] ], [ [ "model = tf.keras.models.Sequential([tf.keras.layers.Flatten(), \n tf.keras.layers.Dense(128, activation=tf.nn.relu), \n tf.keras.layers.Dense(10, activation=tf.nn.softmax)])", "_____no_output_____" ] ], [ [ "**Sequential**: That defines a SEQUENCE of layers in the neural network\n\n**Flatten**: Remember earlier where our images were a square, when you printed them out? Flatten just takes that square and turns it into a 1 dimensional set.\n\n**Dense**: Adds a layer of neurons\n\nEach layer of neurons need an **activation function** to tell them what to do. There's lots of options, but just use these for now. \n\n**Relu** effectively means \"If X>0 return X, else return 0\" -- so what it does it it only passes values 0 or greater to the next layer in the network.\n\n**Softmax** takes a set of values, and effectively picks the biggest one, so, for example, if the output of the last layer looks like [0.1, 0.1, 0.05, 0.1, 9.5, 0.1, 0.05, 0.05, 0.05], it saves you from fishing through it looking for the biggest value, and turns it into [0,0,0,0,1,0,0,0,0] -- The goal is to save a lot of coding!\n", "_____no_output_____" ], [ "The next thing to do, now the model is defined, is to actually build it. You do this by compiling it with an optimizer and loss function as before -- and then you train it by calling **model.fit ** asking it to fit your training data to your training labels -- i.e. have it figure out the relationship between the training data and its actual labels, so in future if you have data that looks like the training data, then it can make a prediction for what that data would look like. ", "_____no_output_____" ] ], [ [ "model.compile(optimizer = tf.optimizers.Adam(),\n loss = 'sparse_categorical_crossentropy',\n metrics=['accuracy'])\n\nmodel.fit(training_images, training_labels, epochs=5)", "Epoch 1/5\n1875/1875 [==============================] - 4s 2ms/step - loss: 0.4967 - accuracy: 0.8265\nEpoch 2/5\n1875/1875 [==============================] - 4s 2ms/step - loss: 0.3739 - accuracy: 0.8643\nEpoch 3/5\n1875/1875 [==============================] - 6s 3ms/step - loss: 0.3366 - accuracy: 0.8767\nEpoch 4/5\n1875/1875 [==============================] - 7s 3ms/step - loss: 0.3134 - accuracy: 0.8849\nEpoch 5/5\n1875/1875 [==============================] - 4s 2ms/step - loss: 0.2949 - accuracy: 0.8914\n" ] ], [ [ "Once it's done training -- you should see an accuracy value at the end of the final epoch. It might look something like 0.9098. This tells you that your neural network is about 91% accurate in classifying the training data. I.E., it figured out a pattern match between the image and the labels that worked 91% of the time. Not great, but not bad considering it was only trained for 5 epochs and done quite quickly.\n\nBut how would it work with unseen data? That's why we have the test images. We can call model.evaluate, and pass in the two sets, and it will report back the loss for each. Let's give it a try:", "_____no_output_____" ] ], [ [ "model.evaluate(test_images, test_labels)", "313/313 [==============================] - 0s 1ms/step - loss: 0.3680 - accuracy: 0.8691\n" ] ], [ [ "For me, that returned a accuracy of about .8838, which means it was about 88% accurate. As expected it probably would not do as well with *unseen* data as it did with data it was trained on! As you go through this course, you'll look at ways to improve this. \n\nTo explore further, try the below exercises:\n", "_____no_output_____" ], [ "# Exploration Exercises", "_____no_output_____" ], [ "###Exercise 1:\nFor this first exercise run the below code: It creates a set of classifications for each of the test images, and then prints the first entry in the classifications. The output, after you run it is a list of numbers. Why do you think this is, and what do those numbers represent? ", "_____no_output_____" ] ], [ [ "test_images.shape", "_____no_output_____" ], [ "classifications = model.predict(test_images)\n\nprint(classifications[0])\nprint(classifications.shape)\n# the picture is given to us\n# the model predicts the probability according to each label with the index\n# the index having the highest probability is the predicted label\n# here, index 9 has the highest probability\n# thus, it means that the item is most likely to be 9 (ankle boot)", "[2.8583752e-08 2.2256762e-08 3.9368930e-09 4.4951143e-08 1.2739662e-07 2.2488129e-03 1.1568574e-07 2.3034498e-02 6.2267384e-07 9.7471583e-01]\n(10000, 10)\n" ] ], [ [ "Hint: try running print(test_labels[0]) -- and you'll get a 9. Does that help you understand why this list looks the way it does? ", "_____no_output_____" ] ], [ [ "print(test_labels[0])", "9\n" ] ], [ [ "### What does this list represent?\n\n\n1. It's 10 random meaningless values\n2. It's the first 10 classifications that the computer made\n3. It's the probability that this item is each of the 10 classes\n", "_____no_output_____" ], [ "####Answer: \nThe correct answer is (3)\n\nThe output of the model is a list of 10 numbers. These numbers are a probability that the value being classified is the corresponding value (https://github.com/zalandoresearch/fashion-mnist#labels), i.e. the first value in the list is the probability that the image is of a '0' (T-shirt/top), the next is a '1' (Trouser) etc. Notice that they are all VERY LOW probabilities.\n\nFor the 9 (Ankle boot), the probability was in the 90's, i.e. the neural network is telling us that it's almost certainly a 7.", "_____no_output_____" ], [ "### How do you know that this list tells you that the item is an ankle boot?\n\n\n1. There's not enough information to answer that question\n2. The 10th element on the list is the biggest, and the ankle boot is labelled 9\n2. The ankle boot is label 9, and there are 0->9 elements in the list\n", "_____no_output_____" ], [ "####Answer\nThe correct answer is (2). Both the list and the labels are 0 based, so the ankle boot having label 9 means that it is the 10th of the 10 classes. The list having the 10th element being the highest value means that the Neural Network has predicted that the item it is classifying is most likely an ankle boot", "_____no_output_____" ], [ "##Exercise 2: \nLet's now look at the layers in your model. Experiment with different values for the dense layer with 512 neurons. What different results do you get for loss, training time etc? Why do you think that's the case? \n", "_____no_output_____" ] ], [ [ "import tensorflow as tf\nprint(tf.__version__)\n\nmnist = tf.keras.datasets.mnist\n\n(training_images, training_labels) , (test_images, test_labels) = mnist.load_data()\n\ntraining_images = training_images/255.0\ntest_images = test_images/255.0\n\nmodel = tf.keras.models.Sequential([tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(1024, activation=tf.nn.relu),\n tf.keras.layers.Dense(10, activation=tf.nn.softmax)])\n\nmodel.compile(optimizer = 'adam',\n loss = 'sparse_categorical_crossentropy',\n metrics=['accuracy'])\n\nmodel.fit(training_images, training_labels, epochs=5)\n\nmodel.evaluate(test_images, test_labels)\n\nclassifications = model.predict(test_images)\n\nprint(classifications[0])\nprint(test_labels[0])", "2.3.0\nDownloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/mnist.npz\n11493376/11490434 [==============================] - 0s 0us/step\nEpoch 1/5\n1875/1875 [==============================] - 14s 8ms/step - loss: 0.1856 - accuracy: 0.9444\nEpoch 2/5\n1875/1875 [==============================] - 17s 9ms/step - loss: 0.0746 - accuracy: 0.9767\nEpoch 3/5\n1875/1875 [==============================] - 14s 7ms/step - loss: 0.0491 - accuracy: 0.9840\nEpoch 4/5\n1875/1875 [==============================] - 14s 7ms/step - loss: 0.0337 - accuracy: 0.9887\nEpoch 5/5\n1875/1875 [==============================] - 14s 7ms/step - loss: 0.0275 - accuracy: 0.9909\n313/313 [==============================] - 1s 3ms/step - loss: 0.0787 - accuracy: 0.9774\n[2.59692534e-09 1.55522759e-08 3.75387749e-10 1.49690220e-06 8.16664783e-14 7.96652078e-10 4.12208245e-13 9.99998450e-01 4.48192455e-10 1.37411025e-08]\n7\n" ] ], [ [ "###Question 1. Increase to 1024 Neurons -- What's the impact?\n\n1. Training takes longer, but is more accurate\n2. Training takes longer, but no impact on accuracy\n3. Training takes the same time, but is more accurate\n", "_____no_output_____" ], [ "####Answer\nThe correct answer is (1) by adding more Neurons we have to do more calculations, slowing down the process, but in this case they have a good impact -- we do get more accurate. That doesn't mean it's always a case of 'more is better', you can hit the law of diminishing returns very quickly!", "_____no_output_____" ], [ "##Exercise 3: \n\nWhat would happen if you remove the Flatten() layer. Why do you think that's the case? \n\nYou get an error about the shape of the data. It may seem vague right now, but it reinforces the rule of thumb that the first layer in your network should be the same shape as your data. Right now our data is 28x28 images, and 28 layers of 28 neurons would be infeasible, so it makes more sense to 'flatten' that 28,28 into a 784x1. Instead of wriitng all the code to handle that ourselves, we add the Flatten() layer at the begining, and when the arrays are loaded into the model later, they'll automatically be flattened for us.", "_____no_output_____" ] ], [ [ "import tensorflow as tf\nprint(tf.__version__)\n\nmnist = tf.keras.datasets.mnist\n\n(training_images, training_labels) , (test_images, test_labels) = mnist.load_data()\n\ntraining_images = training_images/255.0\ntest_images = test_images/255.0\n\nmodel = tf.keras.models.Sequential([#tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(64, activation=tf.nn.relu),\n tf.keras.layers.Dense(10, activation=tf.nn.softmax)])\n\nmodel.compile(optimizer = 'adam',\n loss = 'sparse_categorical_crossentropy')\n\nmodel.fit(training_images, training_labels, epochs=5)\n\nmodel.evaluate(test_images, test_labels)\n\nclassifications = model.predict(test_images)\n\nprint(classifications[0])\nprint(test_labels[0])", "2.3.0\nEpoch 1/5\n" ] ], [ [ "##Exercise 4: \n\nConsider the final (output) layers. Why are there 10 of them? What would happen if you had a different amount than 10? For example, try training the network with 5\n\nYou get an error as soon as it finds an unexpected value. Another rule of thumb -- the number of neurons in the last layer should match the number of classes you are classifying for. In this case it's the digits 0-9, so there are 10 of them, hence you should have 10 neurons in your final layer.", "_____no_output_____" ] ], [ [ "import tensorflow as tf\nprint(tf.__version__)\n\nmnist = tf.keras.datasets.mnist\n\n(training_images, training_labels) , (test_images, test_labels) = mnist.load_data()\n\ntraining_images = training_images/255.0\ntest_images = test_images/255.0\n\nmodel = tf.keras.models.Sequential([tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(64, activation=tf.nn.relu),\n tf.keras.layers.Dense(5, activation=tf.nn.softmax)])\n\nmodel.compile(optimizer = 'adam',\n loss = 'sparse_categorical_crossentropy')\n\nmodel.fit(training_images, training_labels, epochs=5)\n\nmodel.evaluate(test_images, test_labels)\n\nclassifications = model.predict(test_images)\n\nprint(classifications[0])\nprint(test_labels[0])", "_____no_output_____" ] ], [ [ "##Exercise 5: \n\nConsider the effects of additional layers in the network. What will happen if you add another layer between the one with 512 and the final layer with 10. \n\nAns: There isn't a significant impact -- because this is relatively simple data. For far more complex data (including color images to be classified as flowers that you'll see in the next lesson), extra layers are often necessary. ", "_____no_output_____" ] ], [ [ "import tensorflow as tf\nprint(tf.__version__)\n\nmnist = tf.keras.datasets.mnist\n\n(training_images, training_labels) , (test_images, test_labels) = mnist.load_data()\n\ntraining_images = training_images/255.0\ntest_images = test_images/255.0\n\nmodel = tf.keras.models.Sequential([tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(512, activation=tf.nn.relu),\n tf.keras.layers.Dense(256, activation=tf.nn.relu),\n tf.keras.layers.Dense(10, activation=tf.nn.softmax)])\n\nmodel.compile(optimizer = 'adam',\n loss = 'sparse_categorical_crossentropy',\n metrics = ['accuracy'])\n\nmodel.fit(training_images, training_labels, epochs=5)\n\nmodel.evaluate(test_images, test_labels)\n\nclassifications = model.predict(test_images)\n\nprint(classifications[0])\nprint(test_labels[0])", "_____no_output_____" ] ], [ [ "#Exercise 6: \n\nConsider the impact of training for more or less epochs. Why do you think that would be the case? \n\nTry 15 epochs -- you'll probably get a model with a much better loss than the one with 5\nTry 30 epochs -- you might see the loss value stops decreasing, and sometimes increases. This is a side effect of something called 'overfitting' which you can learn about [somewhere] and it's something you need to keep an eye out for when training neural networks. There's no point in wasting your time training if you aren't improving your loss, right! :)", "_____no_output_____" ] ], [ [ "import tensorflow as tf\nprint(tf.__version__)\n\nmnist = tf.keras.datasets.mnist\n\n(training_images, training_labels) , (test_images, test_labels) = mnist.load_data()\n\ntraining_images = training_images/255.0\ntest_images = test_images/255.0\n\nmodel = tf.keras.models.Sequential([tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(128, activation=tf.nn.relu),\n tf.keras.layers.Dense(10, activation=tf.nn.softmax)])\n\nmodel.compile(optimizer = 'adam',\n loss = 'sparse_categorical_crossentropy')\n\nmodel.fit(training_images, training_labels, epochs=30)\n\nmodel.evaluate(test_images, test_labels)\n\nclassifications = model.predict(test_images)\n\nprint(classifications[34])\nprint(test_labels[34])", "_____no_output_____" ] ], [ [ "#Exercise 7: \n\nBefore you trained, you normalized the data, going from values that were 0-255 to values that were 0-1. What would be the impact of removing that? Here's the complete code to give it a try. Why do you think you get different results? ", "_____no_output_____" ] ], [ [ "import tensorflow as tf\nprint(tf.__version__)\n\nmnist = tf.keras.datasets.mnist\n\n(training_images, training_labels), (test_images, test_labels) = mnist.load_data()\n\n# training_images=training_images/255.0\n# test_images=test_images/255.0\n\nmodel = tf.keras.models.Sequential([\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(512, activation=tf.nn.relu),\n tf.keras.layers.Dense(10, activation=tf.nn.softmax)\n])\n\nmodel.compile(optimizer='adam', loss='sparse_categorical_crossentropy')\n\nmodel.fit(training_images, training_labels, epochs=5)\n\nmodel.evaluate(test_images, test_labels)\n\nclassifications = model.predict(test_images)\n\nprint(classifications[0])\nprint(test_labels[0])", "_____no_output_____" ] ], [ [ "#Exercise 8: \n\nEarlier when you trained for extra epochs you had an issue where your loss might change. It might have taken a bit of time for you to wait for the training to do that, and you might have thought 'wouldn't it be nice if I could stop the training when I reach a desired value?' -- i.e. 95% accuracy might be enough for you, and if you reach that after 3 epochs, why sit around waiting for it to finish a lot more epochs....So how would you fix that? Like any other program...you have callbacks! Let's see them in action...", "_____no_output_____" ] ], [ [ "import tensorflow as tf\nprint(tf.__version__)\n\nclass myCallback(tf.keras.callbacks.Callback):\n def on_epoch_end(self, epoch, logs={}):\n if(logs.get('loss')<0.4):\n print(\"\\nReached 60% accuracy so cancelling training!\")\n self.model.stop_training = True\n\ncallbacks = myCallback()\n\nmnist = tf.keras.datasets.fashion_mnist\n\n(training_images, training_labels), (test_images, test_labels) = mnist.load_data()\n\ntraining_images=training_images/255.0\ntest_images=test_images/255.0\n\nmodel = tf.keras.models.Sequential([\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(512, activation=tf.nn.relu),\n tf.keras.layers.Dense(10, activation=tf.nn.softmax)\n])\n\nmodel.compile(optimizer='adam', loss='sparse_categorical_crossentropy')\n\nmodel.fit(training_images, training_labels, epochs=5, callbacks=[callbacks])\n", "_____no_output_____" ], [ "", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ] ]
cb37f943050919452f757f3ba2c2e2c957426cc7
5,079
ipynb
Jupyter Notebook
notebooks/test_generate_dataset.ipynb
nodtem66/PINN_Implicit_SDF
74585fbcc691c9e0ecb4633c616f8b159448499f
[ "MIT" ]
null
null
null
notebooks/test_generate_dataset.ipynb
nodtem66/PINN_Implicit_SDF
74585fbcc691c9e0ecb4633c616f8b159448499f
[ "MIT" ]
null
null
null
notebooks/test_generate_dataset.ipynb
nodtem66/PINN_Implicit_SDF
74585fbcc691c9e0ecb4633c616f8b159448499f
[ "MIT" ]
null
null
null
28.374302
143
0.56645
[ [ [ "%load_ext autoreload\n%autoreload 2\n# Add parent directory into system path\nimport sys, os\nsys.path.insert(1, os.path.abspath(os.path.normpath('..')))\n\nfrom utils.dataset_generator import generate_dataset, ImplicitDataset, TestDataset, SliceDataset\nimport numpy as np\nfrom sdf import *\nimport math\n\n@sdf3\ndef gyroid(w = 3.14159, t=0):\n def f(p):\n q = w*p\n x, y, z = (q[:, i] for i in range(3))\n return (np.cos(x)*np.sin(y) + np.cos(y)*np.sin(z) + np.cos(z)*np.sin(x) - t)\n return f", "[pyigl_import] module igl not found. trying to import pyigl\n" ], [ "generate_dataset(box(1.0) & gyroid(w=math.pi*4, t=0), N_train=100*100*100, N_test=3e6, name='box_1f0_gyroid_4pi', save_dir='../datasets')", "Saved file at ../datasets\\box_1f0_gyroid_4pi\\raw.stl\nImplicitDataset (1000000 points)\nSliceDataset (10000 points)\nUniformMeshSDFDataset (2985984 points)\nRandomMeshSDFDataset[sobol] (12582912 points)\n" ], [ "name = 'box_1f0_gyroid_4pi'\ntrain = ImplicitDataset.from_file(f'../datasets/{name}/train.npz', device='cuda')\n#test = TestDataset(f'../datasets/{name}_test.npz')\n#slice_dataset = SliceDataset.from_file(f'../datasets/{name}_slice.npz')", "_____no_output_____" ], [ "from utils.dataset_generator import batch_loader\n\nfor i in batch_loader(train.points, batch_size=10000):\n print(i.shape)", "_____no_output_____" ], [ "from utils.geometry import Mesh\nmesh = Mesh(f'../datasets/{name}/raw.stl', doNormalize=True)\nbv, _ = mesh.bounding_box()\nnp.min(bv, axis=0)", "_____no_output_____" ], [ "print(train)\nprint(test)\nprint(slice_dataset)\nprint(test.random.points.shape)", "_____no_output_____" ], [ "ndim = round(train.sdfs.shape[0]**(1/3))\nprint(ndim)\npoints = train.points.reshape((ndim, ndim, ndim, 3))\nsdfs = train.sdfs.reshape((ndim, ndim, ndim))\ntrue_sdfs = train.true_sdfs.reshape((ndim, ndim, ndim))\n\ndx = points[1,1,1,:] - points[0,0,0,:]\n#grad = np.linalg.norm(np.array(np.gradient(sdfs, *dx)), axis=0)\ngrad = train.grads.reshape((ndim, ndim, ndim, 3))\nnorm_grad = np.linalg.norm(grad, axis=3)\n\ntrue_grad = train.true_grads.reshape((ndim, ndim, ndim, 3))\nnorm_true_grad = np.linalg.norm(true_grad, axis=3)\n\ncosine_similarity = np.einsum('ij,ij->i', train.grads, train.true_grads).reshape((ndim,ndim,ndim)) / norm_grad / norm_true_grad\n\nslice_z = 20\nfrom utils.visualizer import SDFVisualize\nvisualizer = SDFVisualize()\nvisualizer._plot(sdfs[:, :, slice_z], norm_grad[:, :, slice_z])\nvisualizer._plot(true_sdfs[:, :, slice_z], norm_true_grad[:, :, slice_z])\nvisualizer._plot(sdfs[:, :, slice_z] - true_sdfs[:, :, slice_z])\nvisualizer._plot(norm_grad[:, :, slice_z] - norm_true_grad[:, :, slice_z])\nvisualizer._plot(cosine_similarity[:,:,slice_z])\n", "_____no_output_____" ], [ "import matplotlib.pyplot as plt\n_norm_grad = norm_grad.reshape((ndim**3,))\n_norm_true_grad = norm_true_grad.reshape((ndim**3,))\nplt.hist(_norm_grad, bins=120)\nplt.show()\nplt.hist(_norm_true_grad, bins=120)\nplt.show()", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb37feb1d0bb6714d4572e4b930ce67e0ed4fdb8
701,272
ipynb
Jupyter Notebook
Python Dataset Exploration.ipynb
DheerajKumar97/Dataset-Exploration-with-Python
01e1acad7d4cf27ce36dd20cd66531c23a172b31
[ "MIT" ]
3
2020-04-03T11:35:16.000Z
2020-04-23T09:38:10.000Z
Python Dataset Exploration.ipynb
DheerajKumar97/Dataset-Exploration-with-Python
01e1acad7d4cf27ce36dd20cd66531c23a172b31
[ "MIT" ]
null
null
null
Python Dataset Exploration.ipynb
DheerajKumar97/Dataset-Exploration-with-Python
01e1acad7d4cf27ce36dd20cd66531c23a172b31
[ "MIT" ]
null
null
null
448.670505
317,484
0.92905
[ [ [ "# Exploring Datasets with Python\n\nIn this short demo we will analyse a given dataset from 1978, which contains information about politicians having affairs.\n\nTo analyse it, we will use a Jupyter Notebook, which is basically a REPL++ for Python. Entering a command with shift executes the line and prints the result.", "_____no_output_____" ] ], [ [ "4 + 4", "_____no_output_____" ], [ "def sum(a, b):\n return a + b\n\nsum(40, 2)", "_____no_output_____" ], [ "import pandas as pd", "_____no_output_____" ], [ "affairs = pd.read_csv('affairs.csv')\naffairs.head()", "_____no_output_____" ], [ "affairs['sex'].head()", "_____no_output_____" ], [ "affairs['sex'].value_counts()", "_____no_output_____" ], [ "affairs['age'].describe()", "_____no_output_____" ], [ "affairs['age'].max()", "_____no_output_____" ], [ "affairs.describe()", "_____no_output_____" ], [ "affairs[affairs['sex'] == 'female'].head()", "_____no_output_____" ], [ "affairs[affairs['sex'] == 'female'].describe()", "_____no_output_____" ], [ "affairs['below_30'] = affairs['age'] < 30", "_____no_output_____" ], [ "affairs['below_30'].value_counts()", "_____no_output_____" ], [ "affairs.head()", "_____no_output_____" ], [ "rel_meanings = ['not', 'mildly', 'fairly', 'strongly']", "_____no_output_____" ], [ "affairs['religious'] = affairs['religious'].apply(lambda x: rel_meanings[min(x, 4)-1])", "_____no_output_____" ], [ "affairs.head()", "_____no_output_____" ] ], [ [ "# Visualize Data\n\nTo visualize our data, we will use Seaborn, a Python visualization library based on matplotlib. It provides a high-level interface for drawing attractive statistical graphics. Let's import it.", "_____no_output_____" ] ], [ [ "import seaborn as sns", "_____no_output_____" ], [ "%matplotlib inline\nsns.set()\nsns.set_context('talk')", "_____no_output_____" ] ], [ [ " Seaborn together with Pandas makes it pretty easy to create charts to analyze our data. We can pass our Dataframes and Series directly into Seaborn methods. We will see how in the following sections.\n\n# Univariate Plotting\n\nLet's start by visualizing the distribution of the age our our people. We can achieve this with a simple method called distplot by passing our series of ages as argument.", "_____no_output_____" ] ], [ [ "sns.distplot(affairs['age'])", "_____no_output_____" ], [ "sns.distplot(affairs['age'], bins=50, rug=True, kde=False)", "_____no_output_____" ], [ "sns.distplot(affairs['ym'], bins=10, kde=False)", "_____no_output_____" ] ], [ [ "The average age of our people is around 32, but the most people are married for more than 14 years!\n\n# Bivariate Plotting\n\nNumbers get even more interesting when we can compare them to other numbers! Lets start comparing the number of years married vs the number of affairs. Seaborn provides us with a method called jointplot for this use case.", "_____no_output_____" ] ], [ [ "sns.jointplot(affairs['ym'], affairs['nbaffairs'])", "_____no_output_____" ], [ "sns.jointplot(affairs['ym'], affairs['nbaffairs'], kind='reg')", "_____no_output_____" ], [ "sns.jointplot(affairs['ym'], affairs['age'], kind='kde', shade=True)", "_____no_output_____" ], [ "sns.pairplot(affairs.drop('below_30', axis=1), hue='sex', kind='reg')", "_____no_output_____" ], [ "sns.lmplot(x=\"ym\", y=\"nbaffairs\", hue=\"sex\", col=\"child\", row=\"religious\", data=affairs)", "_____no_output_____" ], [ "sns.boxplot(x=\"sex\", y=\"ym\", hue=\"child\", data=affairs);", "_____no_output_____" ], [ "sns.violinplot(x=\"religious\", y=\"nbaffairs\", hue=\"sex\", data=affairs, split=True);\n", "_____no_output_____" ], [ "affairs.corr()", "_____no_output_____" ], [ "sns.heatmap(affairs.corr(), cmap='coolwarm')", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb3803aa54680d8f055615894660cfa02f1810b7
29,512
ipynb
Jupyter Notebook
MSTICPy-TutorialNotebooks/MordorData.ipynb
argvssarma/AzureSentinel
09c13ad1a1ba64f42827acd98ef82688fd070e15
[ "MIT" ]
null
null
null
MSTICPy-TutorialNotebooks/MordorData.ipynb
argvssarma/AzureSentinel
09c13ad1a1ba64f42827acd98ef82688fd070e15
[ "MIT" ]
null
null
null
MSTICPy-TutorialNotebooks/MordorData.ipynb
argvssarma/AzureSentinel
09c13ad1a1ba64f42827acd98ef82688fd070e15
[ "MIT" ]
null
null
null
34.476636
660
0.514875
[ [ [ "# MSTICpy - Mordor data provider and browser\n\n### Description\nThis notebook provides a guided example of using the Mordor data provider and browser included with MSTICpy.\n\nFor more information on the Mordor data sets see the [Open Threat Research Forge Mordor GitHub repo](https://github.com/OTRF/mordor)\n\nYou must have msticpy installed to run this notebook:\n```\n%pip install --upgrade msticpy\n```\n\nMSTICpy versions >= 0.8.5\n\n### Contents:\n- Using the Mordor data provider to retrieve data sets\n - Listing queries\n - Running a query to retrieve data\n - Optional parameters\n - Searching for queries by Mordor property\n- Mordor Browser\n", "_____no_output_____" ], [ "## Using the Data Provider to download datasets\n\nUsing the data provider you can download and render event data as a pandas DataFrame.\n\n> **Note** - Mordor includes both host event data and network capture data.<br>\n> Although Capture files can be downloaded and unpacked<br>\n> they currently cannot be populated into a pandas DataFrame.\n> This is the case for most `network` datasets.<br>\n> `Host` event data is retrieved and populated into DataFrames.\n", "_____no_output_____" ] ], [ [ "from msticpy.data import QueryProvider\nmdr_data = QueryProvider(\"Mordor\", save_folder=\"./mordor\")\nmdr_data.connect()", "Retrieving Mitre data...\nRetrieving Mordor data...\n" ] ], [ [ "### List Queries\n\n> Note: Many Mordor data entries have multiple data sets, so we see more queries than Mordor entries.\n\n(Only first 15 shown)", "_____no_output_____" ] ], [ [ "mdr_data.list_queries()[:15]", "_____no_output_____" ] ], [ [ "### Retrieving/querying a data set", "_____no_output_____" ] ], [ [ "mdr_data.small.windows.credential_access.host.covenant_dcsync_dcerpc_drsuapi_DsGetNCChanges().head(3)", "https://raw.githubusercontent.com/OTRF/mordor/master/datasets/small/windows/credential_access/host/covenant_dcsync_dcerpc_drsuapi_DsGetNCChanges.zip\nExtracting covenant_dcsync_dcerpc_drsuapi_DsGetNCChanges_2020-08-05020926.json\n" ] ], [ [ "### Optional parameters\n\nThe data provider and the query functions support some parameters to control\naspects of the query operation.\n\n- **use_cached** : bool, optional<br>\n Try to use locally saved file first,\n by default True. If you’ve previously downloaded a file, it will use\n this rather than downloading a new copy.\n- **save_folder** : str, optional<br>\n Path to output folder, by default\n \".\". The path that downloaded and extracted files are saved to.\n- **silent** : bool<br>\n If True, suppress feedback. By default, False.\n\nIf you specify these when you initialize the data provider, the settings\nwill apply to all queries.", "_____no_output_____" ] ], [ [ "mdr_data = QueryProvider(\"Mordor\", save_folder=\"./mordor\")\nmdr_data.connect()", "Retrieving Mitre data...\nRetrieving Mordor data...\n" ] ], [ [ "Using these parameters in the query will override the provider settings\nand defaults for that query.", "_____no_output_____" ] ], [ [ "mdr_data.small.windows.credential_access.host.covenant_dcsync_dcerpc_drsuapi_DsGetNCChanges(silent=True, save_folder=\"./mordor\").head(2)", "_____no_output_____" ] ], [ [ "## Getting summary data about a query\n\nCall the query function with a single \"?\" parameter.", "_____no_output_____" ] ], [ [ "mdr_data.small.windows.credential_access.host.covenant_dcsync_dcerpc_drsuapi_DsGetNCChanges(\"?\")", "Query: covenant_dcsync_dcerpc_drsuapi_DsGetNCChanges\nData source: Mordor\nCovenant DCSync\n\nNotes\n-----\nMordor ID: SDWIN-200805020926\nThis dataset represents adversaries abusing Active Directory Replication services to retrieve secret domain data (i.e. NTLM hashes) from domain accounts.\n\nMitre Techniques: T1003: OS Credential Dumping\nMitre Tactics: TA0006: Credential Access\n\nParameters\n----------\nQuery:\nhttps://raw.githubusercontent.com/OTRF/mordor/master/datasets/small/windows/credential_access/host/covenant_dcsync_dcerpc_drsuapi_DsGetNCChanges.zip\n" ] ], [ [ "### Searching for Queries with QueryProvider.search_queries()\nSearch queries for matching attributes.\n\n#### Parameters\n\n**search** : str Search string. \n\nSubstrings separated by commas will be treated as OR terms - e.g. \"a, b\" == \"a\" or \"b\".<br>\nSubstrings separated by \"+\" will be treated as AND terms - e.g. \"a + b\" == \"a\" and \"b\"\n\n#### Returns\nList of matching query names.", "_____no_output_____" ] ], [ [ "mdr_data.search_queries(\"AWS\")", "_____no_output_____" ], [ "mdr_data.search_queries(\"Empire + T1222\")", "_____no_output_____" ], [ "mdr_data.search_queries(\"Empire + Credential\")", "_____no_output_____" ] ], [ [ "## Mordor Browser\n\nWe've also built a more specialized browser for Mordor data. This uses the metadata in the repository to let you view full details of the dataset.\n\nYou can also preview the dataset (if it is convertible to a DataFrame).\n\nFor details of the data shown please see the [Mordor GitHub repo](https://github.com/OTRF/mordor)<br> and the [Threat Hunter Playbook](https://threathunterplaybook.com/introduction.html)\n", "_____no_output_____" ] ], [ [ "from msticpy.data.browsers.mordor_browser import MordorBrowser\n\nmdr_browser = MordorBrowser()", "Retrieving Mitre data...\nRetrieving Mordor data...\n" ] ], [ [ "### Mordor Browser Details\nThe top scrollable list is a list of the Mordor datasets. Selecting one of these updates the data in the lower half of the browser.\n\n#### Filter Drop-down\nTo narrow your search you can filter using a text search or filter by Mitre Attack Techniques or Tactics.\n- The Filter text box uses the same syntax as the provider `search_queries()` function.\n - Simple text string will find matches for datasets that contain this string\n - Strings separated by \",\" are treated as OR terms - i.e. it will match items that contain ANY of the substrings\n - Strings separated by \"+\" are treated as AND terms - i.e. it will match items that contain ALL of the substrings\n- The Mitre Techniques and Tactics lists are multi-select lists. Only items that have techniques and tactics matching\n the selected items will be show.\n- Reset Filter button will clear any filtering.\n\n#### Main Details Window\n- title, ID, author, creation date, modification date and description are self-explanatory.\n- tags can be used for searching\n- file_paths (see below)\n- attacks - lists related Mitre Technique and Tactics. The item title is a link to the Mitre page describing the technique or tactic.\n- notebooks - if there is a notebook in the Threat Hunter Playbook site, a link to it is shown here. (multiple notebooks might be shown)\n- simulation - raw data listing the steps in the attack (and useful for replaying the attack in a demo environment).\n- references - links to any external data about the attack.\n\n#### File_paths\nThis section allows you to select, download and (in most cases) display the event data relating to the attack.\n\nSelect a file and click on the Download button.\n\nThe zipped file is downloaded and extracted. If it is event data, this is converted to a\npandas DataFrame and displayed below the rest of the data.\n\nThe current dataset is available as an attribute of the browser:\n```\n mdr_browser.current_dataset\n```\n\nDatasets that you've downloaded and displayed in this session are also cached in the browser and available in the \n`mdr_browser.datasets` attribute.\n\n#### Downloaded files\nBy default files are downloaded and extracted to the current folder. You can change this with the\n`save_folder` parameter when creating the `MordorBrowser` object.\n\nYou can also specify the `use_cached` parameter. By default, this is `True`, which causes downloaded files not\nto be deleted after extraction. These local copies are used if you try to view the same data set again.\nThis also works across sessions.\n\nIf `use_cache` is set to False, files are deleted immediately after downloading, extracting and populating the\nDataFrame.", "_____no_output_____" ], [ "### Using the standard query browser\n\n> **Note** - In the `Example` section, ignore the examples of parameters<br>\n> passed to the query - these are not needed and ignored.", "_____no_output_____" ] ], [ [ "mdr_data.browse_queries()", "_____no_output_____" ] ], [ [ "## Remove cached files", "_____no_output_____" ] ], [ [ "from pathlib import Path\nfor file in Path(\"./mordor\").glob(\"*\"):\n file.unlink()\nPath(\"./mordor\").rmdir()", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
cb3812f2040f00c0c02b3d90c3c134e4f19967da
21,759
ipynb
Jupyter Notebook
Amex_recommendation.ipynb
BiswajeetNayak/Hackathons
ec12edb25882e6f40388ff162ab11bd79365b5af
[ "MIT" ]
null
null
null
Amex_recommendation.ipynb
BiswajeetNayak/Hackathons
ec12edb25882e6f40388ff162ab11bd79365b5af
[ "MIT" ]
null
null
null
Amex_recommendation.ipynb
BiswajeetNayak/Hackathons
ec12edb25882e6f40388ff162ab11bd79365b5af
[ "MIT" ]
null
null
null
40.671028
1,774
0.485179
[ [ [ "import pandas as pd\nimport numpy as np\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n%matplotlib notebook\nsns.set()", "_____no_output_____" ], [ "#Customer Demographics\ncust_dem= pd.read_csv(r'D:\\Datasets\\Coupon recommendation\\Amex Coupon prediction_train\\customer_demographics.csv')\ncust_dem.shape", "_____no_output_____" ], [ "#train -Under preparation\ntrain= pd.read_csv(r'D:\\Datasets\\Coupon recommendation\\Amex Coupon prediction_train\\train.csv')\nprint(train.shape)\ntrain=pd.merge(train,cust_dem,on='customer_id')\ntrain.shape", "(78369, 5)\n" ], [ "train.head()", "_____no_output_____" ], [ "#item data and transaction data\nitem_data=pd.read_csv('D:\\Datasets\\Coupon recommendation\\Amex Coupon prediction_train\\item_data.csv')\nprint(item_data.shape)\ntrans_data=pd.read_csv('D:\\Datasets\\Coupon recommendation\\Amex Coupon prediction_train\\customer_transaction_data.csv')\nprint(trans_data.shape)\ntrans_data=pd.merge(trans_data,item_data,on='item_id')\ntrans_data.shape", "(74066, 4)\n(1324566, 7)\n" ], [ "del item_data,cust_dem\ntrans_data.head()", "_____no_output_____" ], [ "trans_data['category'].value_counts()", "_____no_output_____" ], [ "trans_data['item_id'].nunique()", "_____no_output_____" ], [ "trans_data1=trans_data.loc[(trans_data['category']=='Grocery') | (trans_data['category']=='Pharmaceutical'),:]\ntrans_data1.shape", "_____no_output_____" ], [ "#coupon and campaign data\ncamp_data=pd.read_csv(r'D:\\Datasets\\Coupon recommendation\\Amex Coupon prediction_train\\campaign_data.csv')\nprint(camp_data.shape)\n \ncoupon_data=pd.read_csv('D:\\Datasets\\Coupon recommendation\\Amex Coupon prediction_train\\coupon_item_mapping.csv')\nprint(coupon_data.shape)\n\ntrain=pd.merge(train,camp_data,on='campaign_id')\nprint(train.shape)\ntrain=pd.merge(train,coupon_data,on='coupon_id')\nprint(train.shape)\ntrain=pd.merge(train,trans_data1,on='customer_id')\ntrain.shape", "(28, 4)\n(92663, 2)\n(3807856, 18)\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb3827fbf2bec03440b5d72253bb4dcc59d4a600
656,036
ipynb
Jupyter Notebook
ACE2/Prepare Sequences for FoldX-unique250K_clspool_waeDeterencStart84kstep1024dim_cyccon1Start84kstep_lre-04_24ep_gen_perturb-080-congen.ipynb
salesforce/genhance
4585f4c3325e3fcb00690bd91b7d45be6f1da77c
[ "BSD-3-Clause" ]
20
2021-07-08T13:13:40.000Z
2021-12-17T07:53:29.000Z
ACE2/Prepare Sequences for FoldX-unique250K_clspool_waeDeterencStart84kstep1024dim_cyccon1Start84kstep_lre-04_24ep_gen_perturb-080-congen.ipynb
salesforce/genhance
4585f4c3325e3fcb00690bd91b7d45be6f1da77c
[ "BSD-3-Clause" ]
2
2021-07-12T07:47:45.000Z
2021-12-09T21:07:29.000Z
ACE2/Prepare Sequences for FoldX-unique250K_clspool_waeDeterencStart84kstep1024dim_cyccon1Start84kstep_lre-04_24ep_gen_perturb-080-congen.ipynb
salesforce/genhance
4585f4c3325e3fcb00690bd91b7d45be6f1da77c
[ "BSD-3-Clause" ]
8
2021-07-23T10:13:22.000Z
2022-03-30T04:48:49.000Z
152.247853
202,444
0.755655
[ [ [ "import torch\nfrom transformers import MT5ForConditionalGeneration, MT5Config, MT5EncoderModel, MT5Tokenizer, Trainer, TrainingArguments\nfrom progeny_tokenizer import TAPETokenizer\nimport numpy as np\nimport math\nimport random\nimport scipy\nimport time\nimport pandas as pd\nfrom torch.utils.data import DataLoader, RandomSampler, Dataset, BatchSampler\nimport typing\nfrom pathlib import Path\nimport argparse\nfrom collections import OrderedDict\nimport pickle\nimport matplotlib.pyplot as plt\n\nfrom tape.metrics import spearmanr", "_____no_output_____" ], [ "before_foldx = False", "_____no_output_____" ] ], [ [ "# Analyze 250K gen seqs and prepare for FoldX", "_____no_output_____" ], [ "saved output tsv file to run FoldX inference", "_____no_output_____" ] ], [ [ "wt_seq = 'STIEEQAKTFLDKFNHEAEDLFYQSSLASWNYNTNITEENVQNMNNAGDKWSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ'\nconstant_region = 'NTNITEEN'\nwt_cs_ind = wt_seq.index(constant_region)", "_____no_output_____" ], [ "gen250k_tsv_name = 'generated_seqs/congen/clspool_waeDeterencStart84kstep1024dim_cyccon1Start84kstep_lre-04_24ep_gen_perturb-080/unique250K_clspool_waeDeterencStart84kstep1024dim_cyccon1Start84kstep_lre-04_24ep_gen_perturb-080-congen_seqs260000.tsv'\n# gen250k_tsv_name = 'generated_seqs/congen/clspool_sephead512dim_lre-04_gen_perturb-1/unique250K_clspool_sephead512dim_lre-04_gen_perturb-1-congen_seqs260000.tsv'", "_____no_output_____" ], [ "gen250k_df = pd.read_table(gen250k_tsv_name)", "_____no_output_____" ], [ "gen250k_df", "_____no_output_____" ] ], [ [ "filter out sequences without constant region", "_____no_output_____" ] ], [ [ "indices_to_drop = []\ndropped_seqs = []\nfor index, row in gen250k_df.iterrows():\n seq = row['MT_seq']\n if constant_region not in seq:\n indices_to_drop.append(index)\n dropped_seqs.append(seq)\n else:\n cs_ind = seq.index(constant_region)\n if cs_ind != wt_cs_ind:\n indices_to_drop.append(index)\n dropped_seqs.append(seq)", "_____no_output_____" ], [ "print(len(indices_to_drop))\nprint(indices_to_drop)\nprint(dropped_seqs)", "1771\n[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 66, 124, 144, 278, 335, 468, 509, 580, 791, 814, 841, 916, 1039, 1253, 1302, 1374, 1395, 1464, 1465, 1652, 1982, 2176, 2199, 2602, 2673, 2751, 2991, 3030, 3033, 3110, 3121, 3139, 3203, 3412, 3512, 3581, 3787, 3943, 3975, 4089, 4091, 4310, 4494, 4551, 4627, 4688, 4773, 5082, 5195, 5403, 5488, 5516, 5761, 5777, 5828, 5858, 6156, 6198, 6205, 6807, 7035, 7362, 7987, 7997, 8071, 8084, 8125, 8346, 8464, 8514, 8560, 8594, 8662, 8782, 8894, 9090, 9201, 9337, 9486, 9650, 9935, 10028, 10033, 10413, 10513, 10750, 10916, 11005, 11056, 11246, 11352, 11434, 11461, 11511, 11669, 11751, 11860, 11959, 12419, 12508, 12533, 12783, 12830, 13066, 13614, 13795, 13943, 13960, 14328, 14781, 14782, 15063, 15280, 15341, 15473, 15606, 15843, 15984, 16076, 16172, 16258, 16334, 17015, 17036, 17169, 17202, 17213, 17259, 17729, 17935, 17939, 18273, 18358, 18500, 18506, 18601, 18718, 18982, 19134, 19190, 19202, 19232, 19395, 19481, 19501, 19552, 19623, 19883, 19948, 20305, 20474, 20535, 20660, 20707, 20789, 20816, 20875, 20889, 20931, 21043, 21108, 21135, 21386, 21430, 21493, 21561, 21883, 21976, 21981, 22271, 22273, 22310, 22347, 22591, 22858, 23001, 23045, 23244, 23385, 23412, 23588, 23883, 23989, 24079, 24895, 25359, 25519, 25692, 25802, 25880, 26114, 26221, 26373, 26839, 27151, 27475, 27603, 28004, 28149, 28592, 28628, 28998, 29291, 29737, 29851, 30078, 30175, 30191, 30347, 30365, 30461, 30700, 30851, 30916, 30943, 31153, 31424, 31650, 31829, 31856, 31901, 32283, 32336, 32414, 32551, 32772, 33181, 33244, 33578, 33697, 33711, 33763, 33823, 34136, 34235, 34477, 35261, 35325, 35348, 35388, 35477, 35587, 35635, 35644, 35764, 35849, 35877, 35878, 35884, 35936, 36074, 36308, 36605, 36769, 37051, 37133, 37537, 38066, 38494, 38546, 38588, 38978, 39157, 39269, 39532, 39615, 39915, 40345, 40356, 40529, 40715, 40827, 40829, 40913, 40922, 41272, 41307, 41495, 41627, 41640, 41839, 41843, 41988, 42099, 42508, 42567, 42662, 43076, 43167, 43206, 43236, 43879, 44135, 44232, 44321, 44387, 44493, 44521, 44539, 44551, 44885, 44886, 45196, 45241, 45419, 45453, 45519, 45664, 45668, 45717, 46197, 46307, 46579, 47018, 47227, 47360, 47956, 48574, 48685, 49100, 49244, 49603, 49677, 49738, 49745, 49753, 50037, 50634, 50683, 51019, 51092, 51181, 51320, 51482, 51713, 51718, 51845, 51896, 52156, 52430, 52861, 52864, 53063, 53175, 53250, 53516, 53652, 53655, 54110, 54205, 54406, 54479, 54804, 55429, 55485, 56074, 56266, 56474, 56562, 56589, 56669, 57025, 57032, 57181, 57522, 57574, 57599, 57653, 57901, 57931, 58056, 58148, 58157, 58166, 58396, 58905, 59354, 59707, 60194, 60373, 60548, 60778, 60846, 60889, 60999, 61076, 61147, 61310, 61498, 61652, 61716, 61906, 62247, 62423, 62449, 62451, 62524, 62588, 62870, 62946, 63221, 63227, 63301, 63335, 63400, 63475, 63486, 63550, 63716, 63875, 63957, 64068, 64086, 64096, 64130, 64908, 64988, 65283, 65428, 65454, 65514, 65717, 65838, 66018, 66353, 66394, 66639, 66655, 67074, 67211, 67277, 67293, 67427, 67541, 67681, 67709, 67799, 67884, 68047, 68179, 68191, 68244, 68538, 68632, 68665, 68879, 68901, 69098, 69158, 69941, 70049, 70348, 70373, 70418, 70658, 70687, 70966, 70986, 71253, 71261, 71631, 71651, 71818, 71848, 71907, 72264, 72324, 72750, 72843, 72881, 73115, 73391, 73586, 73798, 74316, 74372, 74556, 74681, 75050, 75074, 75119, 75294, 75298, 75353, 75450, 75873, 75975, 76161, 76345, 76535, 76649, 76671, 76829, 76880, 76921, 76978, 77041, 77129, 77723, 77859, 78706, 78758, 78799, 78840, 78874, 78955, 79206, 79357, 79697, 79990, 80340, 80341, 80383, 80410, 80515, 80767, 81148, 81386, 81553, 81621, 81696, 81833, 82096, 82284, 82319, 82371, 82468, 82521, 82553, 82741, 82954, 83095, 83122, 83179, 83293, 83724, 83861, 83914, 84017, 84110, 84201, 84270, 84319, 85327, 85680, 85777, 86110, 86146, 86186, 86334, 86715, 86846, 87105, 87375, 87649, 87699, 87767, 87797, 88241, 88692, 88776, 89140, 89352, 89401, 89575, 89963, 90289, 90628, 91219, 91468, 91531, 91656, 91752, 92020, 92086, 92541, 92560, 92878, 93186, 93226, 93381, 93545, 93699, 93736, 94258, 94339, 94372, 94497, 94519, 94531, 94639, 94760, 94899, 94924, 95135, 95154, 95285, 95307, 95496, 95788, 96082, 96328, 96436, 96564, 96592, 96666, 97056, 98054, 98107, 98126, 98628, 98903, 99313, 99607, 99614, 99870, 100204, 100790, 100819, 100858, 100925, 100929, 101294, 101441, 101527, 101599, 101756, 101900, 102473, 102614, 102778, 102822, 103147, 103279, 103337, 103499, 103533, 103561, 103970, 104405, 104618, 104695, 104808, 104924, 105015, 105153, 105334, 105403, 105424, 105521, 105582, 105703, 105911, 106073, 106542, 106663, 106787, 106921, 106960, 107121, 107584, 107605, 108007, 108266, 108303, 108682, 109069, 109298, 109377, 109574, 109599, 109654, 110021, 111618, 112444, 112495, 112515, 112862, 112870, 113050, 113250, 113679, 113730, 113789, 114097, 114187, 114240, 114686, 114958, 115163, 115223, 115315, 115378, 115451, 115578, 115624, 115725, 115973, 116035, 116481, 116835, 116949, 116964, 117122, 117178, 117243, 117249, 117354, 117376, 117601, 117721, 118053, 118067, 119269, 119358, 119719, 119948, 119987, 120324, 120359, 120430, 120665, 120697, 120846, 121595, 121917, 122476, 122500, 122791, 122854, 122999, 123025, 123035, 123402, 123562, 123939, 123957, 124127, 124142, 124255, 124495, 124884, 126069, 126080, 126097, 126253, 126350, 126352, 126684, 127086, 127506, 127577, 127706, 127726, 127826, 128022, 128170, 128212, 128414, 128575, 128708, 129022, 129072, 129089, 129308, 129476, 129692, 129959, 130005, 130153, 130344, 130498, 130948, 131002, 131149, 131362, 131873, 131987, 132020, 132409, 132477, 132529, 132632, 132637, 132658, 132992, 133420, 133711, 134170, 134378, 134379, 134386, 134428, 134467, 134738, 134838, 134860, 135084, 135146, 135423, 135572, 135864, 136085, 136345, 136377, 136502, 136694, 136739, 136908, 136912, 136963, 137432, 137662, 137878, 137893, 137912, 137995, 138323, 138330, 138454, 138659, 138779, 138835, 139333, 139618, 139668, 139782, 140134, 140182, 140279, 140354, 140634, 140646, 140699, 140706, 140974, 140997, 141282, 141308, 141496, 141652, 141667, 142607, 142680, 142748, 143590, 143975, 144118, 144546, 144727, 144774, 144958, 145007, 145071, 145428, 145682, 145738, 145799, 145949, 146808, 146903, 147058, 147138, 147280, 147406, 147607, 148305, 148341, 148469, 148904, 148918, 149139, 149417, 149473, 149603, 149772, 149820, 149838, 149965, 150082, 150562, 151034, 151048, 151167, 151306, 151479, 151510, 151635, 151843, 151930, 151998, 152134, 152275, 152278, 152379, 152438, 152723, 152736, 152899, 153020, 153119, 153219, 153236, 153243, 153565, 153819, 153875, 153924, 154417, 154432, 154517, 154682, 154772, 154808, 155205, 155491, 155821, 155858, 156007, 156079, 156303, 156390, 156609, 156647, 156777, 156887, 157848, 157979, 158031, 158242, 158253, 158306, 158379, 158537, 158552, 158588, 158604, 158959, 159147, 160218, 160252, 160270, 160298, 160405, 160425, 160491, 160530, 160548, 160788, 160805, 160863, 161178, 161270, 161462, 161691, 162011, 162089, 162387, 162403, 162409, 162451, 162980, 163026, 163406, 163424, 163560, 163777, 163833, 164135, 164183, 164365, 164484, 164526, 164552, 164657, 164797, 164962, 165562, 165954, 165980, 166059, 166141, 166533, 166719, 166914, 167212, 167247, 167363, 167370, 167424, 167486, 167710, 167737, 167782, 167815, 167889, 168185, 168295, 168359, 168711, 168908, 168947, 168973, 169264, 169272, 169353, 169616, 169734, 169814, 169912, 170167, 170282, 170368, 170444, 170446, 170496, 170669, 170676, 170682, 170734, 170793, 171289, 171958, 172222, 172312, 172758, 172782, 172987, 173026, 173034, 173186, 173286, 173400, 173569, 173579, 174219, 174549, 174653, 174922, 174980, 175024, 175033, 175037, 175069, 175180, 175284, 175317, 175465, 175607, 175681, 175744, 176221, 176353, 176503, 176567, 176807, 177433, 177543, 177671, 177835, 178086, 179301, 179321, 179426, 179448, 179716, 179834, 180164, 180346, 180436, 180502, 180540, 180648, 180772, 180888, 181216, 181266, 181407, 181435, 181496, 181515, 181752, 182098, 182142, 182155, 182162, 182198, 182218, 182441, 182464, 182831, 183804, 184106, 184371, 184380, 184428, 184544, 184552, 184911, 184988, 185074, 185130, 185154, 185342, 185491, 185531, 185706, 185863, 185947, 185996, 186128, 186197, 186540, 186562, 186665, 186802, 186902, 187068, 187149, 187350, 187406, 187789, 187838, 187922, 188062, 188392, 188469, 188733, 188787, 188903, 189141, 189180, 189573, 189577, 189634, 189855, 190354, 190392, 190745, 190872, 190927, 191272, 191650, 192051, 192108, 192218, 192524, 192792, 193212, 193916, 194052, 194126, 194344, 194368, 194484, 194528, 194835, 195388, 195701, 195736, 195774, 195782, 195970, 196044, 196103, 196398, 196415, 196457, 196759, 196784, 196870, 197072, 197181, 197422, 197685, 197827, 198076, 198118, 198121, 198296, 198321, 198357, 198433, 198477, 198533, 198545, 198666, 198772, 199158, 199186, 199288, 199367, 199375, 199539, 199685, 200023, 200178, 200736, 200856, 200965, 201086, 201102, 201502, 201661, 201778, 202366, 202650, 202673, 202679, 202763, 202860, 202976, 202996, 203068, 203206, 203277, 203284, 203370, 203429, 203693, 203776, 203838, 204064, 204113, 204246, 204765, 205523, 205775, 205814, 205910, 206013, 206203, 206244, 206272, 206620, 206760, 206898, 207120, 207168, 207171, 207214, 207318, 207412, 207462, 207562, 207828, 208297, 208783, 209228, 209350, 209640, 209704, 210092, 210237, 210279, 210494, 210585, 210694, 210994, 211059, 211208, 211491, 211569, 211810, 211833, 211842, 212090, 212094, 212120, 212451, 212709, 212851, 212991, 213670, 213776, 214041, 214257, 214272, 214450, 214608, 214613, 214682, 214874, 214963, 215192, 215285, 215676, 216165, 216348, 216351, 216464, 216503, 216564, 216610, 216673, 216751, 216757, 216768, 217180, 217525, 217827, 218061, 218292, 218333, 218358, 218445, 218481, 218548, 218806, 218846, 219106, 219471, 219545, 219656, 219972, 220011, 220026, 220128, 220213, 220226, 220335, 220662, 220681, 220747, 221026, 221232, 221512, 221561, 221678, 221699, 221760, 221861, 222030, 222208, 222216, 222333, 222371, 222552, 222796, 222980, 223050, 223121, 223137, 223140, 223270, 223567, 223588, 223600, 223855, 223975, 224057, 224186, 224312, 224390, 224505, 224817, 224849, 224903, 225029, 225127, 225169, 225172, 225225, 225721, 226032, 226045, 226445, 226450, 226452, 226525, 226611, 226796, 226802, 226947, 226965, 227016, 227038, 227255, 227396, 227410, 227905, 228157, 228562, 228694, 228911, 229287, 229938, 230034, 230120, 230177, 230198, 230213, 230273, 230379, 230393, 230419, 230546, 230651, 230946, 231088, 231223, 231412, 231511, 231575, 231646, 231659, 231688, 231821, 231892, 232130, 232133, 232227, 232762, 232904, 232988, 233304, 233334, 233577, 233581, 233744, 233756, 233865, 233910, 234134, 234142, 234353, 234600, 234631, 234640, 234841, 234908, 235151, 235544, 235666, 235810, 235860, 235915, 236033, 236273, 236497, 236691, 236710, 236724, 236897, 236899, 237025, 237029, 237222, 237381, 237500, 237561, 237632, 237680, 237962, 238224, 238241, 238752, 238788, 238803, 238836, 238984, 238995, 239045, 239328, 239329, 239470, 239552, 239703, 240023, 240204, 240222, 240368, 240596, 240634, 240655, 240915, 241078, 241091, 241103, 241119, 241132, 241138, 241170, 241534, 241751, 241852, 241898, 242051, 242112, 242282, 242382, 242403, 242492, 242720, 242924, 243027, 243075, 243139, 243148, 243216, 243224, 243294, 243301, 243320, 243370, 243397, 243420, 243799, 243850, 243969, 244056, 244170, 244322, 244340, 244349, 244479, 244639, 244692, 244919, 245160, 245423, 245597, 245612, 245740, 245987, 246069, 246224, 246262, 246264, 246322, 246384, 246428, 246436, 246442, 246514, 246553, 246810, 246873, 246879, 246918, 246981, 247079, 247191, 247261, 247288, 247377, 247459, 247474, 247549, 247803, 247903, 248010, 248011, 248027, 248070, 248081, 248169, 248270, 248278, 248381, 248489, 248553, 248705, 248738, 248773, 248777, 248919, 249043, 249106, 249108, 249200, 249237, 249313, 249618, 249986, 250209, 250243, 250572, 250590, 250667, 250768, 250993, 251107, 251128, 251324, 251352, 251356, 251362, 251490, 251568, 251577, 251656, 251705, 251842, 252187, 252388, 252411, 252416, 252530, 252797, 252852, 253057, 253128, 253150, 253228, 253325, 253336, 253763, 253777, 253805, 253822, 253866, 253916, 253926, 254166, 254296, 254614, 254662, 255042, 255087, 255120, 255124, 255211, 255237, 255294, 255357, 255451, 255514, 255529, 255570, 255680, 255713, 255779, 255883, 256037, 256051, 256073, 256133, 256154, 256272, 256496, 256591, 256608, 256678, 256742, 256743, 256796, 256997, 257077, 257101, 257207, 257331, 257379, 257483, 257622, 257637, 257672, 257714, 257941, 258025, 258090, 258193, 258231, 258249, 258362, 258402, 258403, 258484, 258527, 258543, 258768, 258789, 258855, 258997, 259156, 259169, 259171, 259209, 259225, 259277, 259298, 259308, 259314, 259357, 259360, 259370, 259376, 259383, 259390, 259549, 259602, 259623, 259626, 259649, 259650, 259683, 259705, 259741, 259775, 259777, 259824, 259844, 259854, 259858, 259892, 259898, 259906, 259918, 259943, 259947, 259948, 259956, 259957, 259965, 259969, 259974, 259978, 259980, 259984, 259994, 260010, 260013, 260026, 260046, 260053]\n['STIEEQAKTFLDKFNHEAEDLFYQSLLALMNYNTNYEEENVQNMNNALDKMSAFLKEQSTLAMMYPLQEIQNLTVKLKLQVLQ', 'STIEEQAKTFLDKFNHEAEDLVYQSMLAMMNLALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'SSIEEQAKTFLDKFNHEAEDLFSQSSLASMNYELEKMSAFLKEVSTIAQMYPLQQIQNLTVKLQLQALLAQMYPLQEIE<sep><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLYNMNYDLDKMSAFLKEISTLAQMYPLQEIQNLTVKLQLQALQ<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'SDIEE<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDNKYQMLLASMNLALDKMSAFLKELSTLAQMYPLQEIQNLTVKLQLQALQ<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'SDIEEQAKTFLDKFNHEAEDLFYQSSLAYMNYFLDKMSAFLKEQSTLAQMYPLQEIQNLTLKLQLQALQ<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'SNIEEQAKTFLDKFNHEAEDLFYQSSFASMNYNTNIEEENVQNMNNALDKMSRFLKEMKTLAQMYPLQEIQNLTVKLQLQALM', 'STIEEQAKTFLMKFNHEAEDIFYQSSLASMNYKLDKMSAFLKEQSTLAQMYPLQEIQNLTVRLQLQALQ<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDTFNHEAEDLFYQSFLTMLYYAGDKMSAFLKEQSTLAQMYPLVEIQNLTVKLMLQALQ<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'SDIEEQAKTFLDKFNHEAEDLFYQMSLASMNYNTNITLAYMDALQLMLSAFLKEQSTLAQMYPLQEIQNMTVKLQLQALQSLM', 'FTLEEQSKEFLEKFNHEAEDLFYQSSLASMNYALDKMSAFLKSQRDLAQMYPLQEIQNLTVKLQLQALM<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', '<pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'MY<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'M<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'SDI<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYAAEKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEMLFYQSSLASMNNYGDKMYAFLKLQSTLAQMYPLQEIQNLTVLLQLQALQ<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'SDIEYQAKTFLDKFV<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQKSLNSMNYNTNNTNENVQNMNNAMDKMSAFLKEISTLAQMYPLQEIQNLPVKLQLQALQ', 'STIEEQAKTFLDKFMHEAEDLFYQSSLASMNYNTNIEEENVQNMNNALDKMSAFLKEQSTLAQMFPLQEIQNLTVKLQLQALQ', 'LTLDELLDLFLKELLTLALMYPLQEIQNLTVKLQLQVLI<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITIEEENVQMMNNAGDKMSAFLKEISTLAQMYPLQEIQNLTVKLQLQA', 'STIEEQAKTFLDKFNHEAEDLFYQSMLASMNYNTNIAAENVLAMNNALDKMSAFLKEASTRAQMYPLQEIQNLTVKLQLQALQ', 'H<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFEQSFLAMMNFAGDKMSAFLKEQSTLAQMIPLQEIANLTVKLQLQALQ<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQYSLAKMMNNLMDKMSYFLKEQSTLIQMYPLHEIQNLPVKLQLQAMM<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'SSIEELAKTFLGKFNHEAEKLFYQSKLA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDMFYQSSLASMNYNTNIEEENVQNMLEALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'MTIELQAKTFLDKFR<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHMAEDLFYQSMLASMNYNTNITIEEENVQNMNNAGDKMSAFLHELSTLAQMYPLQEIQNLTVKLILQA', 'STIEEQAKTFLDKFNHEAEKLFYQSSLASMNYNTNNTNENNTNMNMALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNIEEENVQNMNNAMDKMSAFLQEQTTLAQMYPLQEIQNLTVKLQLQALQ', 'SDIEEQAKTFLDK<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNSEAELLFYQSSLASMNYAGDKMSAFLKEQSTLAQMMPLQEIQNLTVKLQLQALQ<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'MTIEETAKTFLKEQSRLAQMYPLQEIQNLTVKLQLQALM<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'SFIEERAKTFLDKFNHEAEILFYQSSLASMNYNTNIAEENFQNMNNALDKMSAFLKEQITLAQMYPLQEIQNLTVKLQLQALQ', '<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLAKFNHEAEDLFYYSALASMNYNTNIAEENVQMMNNAMDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'SGIEEQAKTFLDKFNHEAERLFYQSSLASMNYNTNFTEENVQNMNNALDKMSAFLKEQSALAQMYPLQEIQNLTVKLQLKALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSMLASMNYNTNFTEENVQNMNYALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLRALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNIAEENVQNMNRALDKMSAFLKEISTLAQMYPLQEIRNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNILEENVQNMNLALDKMSAFLKEQSTLAQMYPLKEIQNLTVKLQLQALQ', 'STIEEQAKSFLDKFNHEAEDLFYQSSLALMNYNTNYTEENVQNMNNALDKMSAFLKTQSTLAQLYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDSFNHEAEQLFRQSSLASMNYNTNIAEENVQNMNNALDKMSAFLKELSTLAQMYPLQEIQNLTVKLKLQALQ', 'STIEEQAKTFLDKFNREAEDLFYQSSLASMNYNTNILEENVQNMNNALDKMSAFLKTQSTLAQMYPLQMIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMIYNTNIAEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLPVKLQLQALQ', 'STIEEQAKTFLDKFNMRAEDLFYQSSLASMNYNTNIAEENVQNMNNALDKMSAFLKEQSTLAQMYPLRIIQNLTVKLQLQALQ', 'SDIEEQAKTFLDKFNHEAIDLFYQSSLASMNYNTAITEENVDNMNNALDKMSAFLKEQSTLAQMFPLQEIQNLTVKLQLQALQ', 'SDIEEQAKTFLDKFNHESEDLFYQSALASMNYNVNITEENVLNMNNALDRMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'SDIEEQAKTFLQKFNHEAEDLFYQSSLAMMNYNTNISEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLSLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMFYNTNITIENVQNMNQALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'SPIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNFTEENVQNMNNALDKMRAFLKEQSTLAQMYPLMEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFIHEAEDLFYQSMLASMNYNTNIVEENVQNMNNALDKMSAFLKEQSDLAQMYPLQKIQNLTVKLQLQALQ', 'STIEEQAKMFLDKFNHEAEYLFYQSSLASMNYNTNITIENVLNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAKDLFYQSMLASMNYNTNITIENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'SDIEEQAVTFLDKFNHEAEDLFYQSSLASMNYNSNITEENVQNMNNALDKMSAFLKEQSKLAQMYPLQEIQNLTVKLQLQALQ', 'SDIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNSNITEENVQNMNNALDKMSAFLKEQSALAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLAIMNYNTNIAEENVQNMNNALDKMSAFLKEISTLAQMYPLQEIQNLTVKLQLQALQ', 'SSIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTEITEENVQYMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTIKLQLQALQ', 'STIEEQAKTFLDKFNHFAEDLFYMSSLASMNYNTNILEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLKKFNHEASDLFYQSSLASMNYNTNIDEENVQNMNNALDKMSAFLKELSTLAQMYPLQEIQNLTVKLQLQALQ', 'SGIEEQAKTFLDKFNHEAEDLFYQSSLASMNFNSNITEENEQNMNNALDKMSAFLKEQSTRAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLMKFNHEAEMLFYQSSLASMNYNTFITEENVQNMNNALDKMSAFLLEQSTRAQMYPLQEIQNLTVKLQLQAFQ', 'SDIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNSNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNTTVKLQLQALQ', 'SSIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTQITEENVQNMNNALDKMSAFLKEQSRLAQMYPLQEIQNLTVKLQLLALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLARMNYNTNIAEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKEQLQALQ', 'MTIEEQAKTFLDKFNHEAEMLFYQSSLASMNYNTNIDEENVENMNIALDKMSAFLMEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'SSIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTQITEENNQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'SSIEEQAKTFLDKFNHEAEDMFYQSSLASMNYNLNITEENLQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITNENMQNMNYALDKMSAFLKEQSTLAQMYPLQEIQNLMVKLQLQALQ', 'STIEEQAKTFLDKFNHEAELLFYQSQLASMNYNTNITSENVENMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEETAKTFLDKFNHEAEDLFYQFSLASMNYQTNITEENVQNMNRALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'TDIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNIFEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALK', 'STIEEQAKTFLDKFNHEAEDLFYYSKLASMNYNALITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYLSSLASMKYNLNITEENVQNMNMALDKMSAFLKDQSTLAQMYPLQEIQNLTVKLQLQALQ', 'SDIEEQAKTFLDKFNHEAERLFYQSSLASMNYNTNISEENVQNMNNALDKMSAFLKEQSALAQMYPLQEIQNLTVKHQLQALQ', 'SEIEEQAKTFLDKFNHEAEDLFYMSSLASMNYNTNITELNVQNMNNALDKMSAFLKEQSTIAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFMHEAEDLFYQSSLASMNYNTNITIENVQNMNFALDKMIAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDGFNHEAQDLFYMSSLASMNYMTNITEENVQMMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'SSIEEQAKTFLDKFNHEAEDLFYVSSLANMNYNTNITAENVQNMNVALDKMSAFLKEQSTLAQMYPLQEIQNLAVKLKLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSLLASMNYNTEITEENVQNMNNALDKMSAFLKSQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSKLASMNYNTNIDEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLEVKLQLQALQ', 'SDIEMQAKTFLDKFNHEAEDLFYQSSLASMNYNTNIAEENVQNMYRALDKMSAFLKMQSTLAQMYPLQEIQNLTVKLQLQALQ', 'SDIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEQNVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDIFNHEAEDLFYQSSLAAMNYITNITEENVQNMNNALDKMSAFLKRQSTLAQMYPLQEIKNLTVKLQLQALQ', 'SDIEEQAKTFLDKFNHEAEDLFYHSSLASMNYNTNITERNVQNMNQALDKMSAFLKEQSALAQMYPLQEIQNLTVKLQLQALQ', 'SSIEEQAKTFLDKFNHRAEHLFYQSQLASMNYNTNISEENTQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLVVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTFITEENVQNMNMALDKMSAFLKEQSTLAQMFPLQEIQNLTVKLQLQALQ', 'NTIEEQAKTFLDHFNHEAEDLFYQSMLASMNYNTNITEEKVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLMKFNHEAEDLFYQSSLAMMNYNTEITEENVQNMNNALDKMSAFLKQQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMMYNTNILEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLPVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEQIFYQSSLASMNYNLNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQFSIASMNYNNNITEENVQNMNNALDKMSAFLKEQSTLAKMYPLQEIQNLTVKLQLQALQ', 'SDIEEQAKTFLDKFNHEARDLFYQSSLASMNYNSNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'VTIEEQAKTFLMKFNIEAEDLFYQSSLASMNYNLNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLKVKLQLQALQ', 'STIEEQAKTFLDKFNHYAEDLIYQSSLASMNYNTFITEENVQYMNEALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAERLFYQSSLASNNYNTNIKEENVQNMNNAGDKMSAFLKMLSTLAQMYPLQEIQNLTVKLQLQALR', 'SDIEEQAKTFLDKFNHEAELLFYQSSLASMNYNTNITLENVQNMNNALDKMSAFLKEQSTLAQMYMLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMRLNTNITVENVLNMNNALDKMSAFLKYQSTLAQMIPLQEIQNLTVKLQLQALQ', 'ESIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITNENVQNMNNALDKMSAFLKEQSTLAQMYPLNEIQNLSVKLQLQALQ', 'STIEEQAKLFLDKFNHEAEDLFYQVSLALMNYNTNITAENVQNMNNALDKMSAFLKLQSTLAQMYPLQEIQNLTVKLQLQALQ', 'SDIEEQAKRFLDKFNHEAEDLFYQSSLASMNYNTNITAENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAELLFYQSQLASMNYNTIITEENVENMNNALDKMSAFLKEQSTLAQMYPLQEIQNMTVKLQLQALQ', 'SFIEEQAKTFLDKFNHEAEDLFYQSSLASMIYNTNLTEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'SDIEEQAKKFLDKFNHEAEDLFYDSSLASMNYNTNITEQNVQNMNNAFDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLAMNYNTNITEENYQNMNNALDKMSAFLKMQSTLAQMYPLQEIQNLTVKLQLQALQ<sep>', 'STLEEQAKTFLDKFNHEAEDLFYQSSLASMIYNSNITEENVQNMNNALDKMSAFLKEQSTLARMYPLQEIQNLTVKLQLQALQ', 'STIEFQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEINVQNMNLALDKMSAFLKEQSTLAQMYPLQEIQNLMVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQMSLASMNMNTNIVEENVQNMNIALDKMSAFLKEQSTLAQMFPLQMIQNLTVLLQLQALQ', 'SDIEEAAKTFLDKFNREAEDLFTQSSLASMNYNTNITEEFVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMYYNTNVTEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLMVKLQLQALQ', 'STIEEQAKFFLDKFNHEAEDLFYYSSLASMNYNTQITEENVQNMNNALDKMSAFLKMQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKRFLDKFRHEAEDLFYQSMLASMNYNSNITEENVQNMNYALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKAFLDKFNHEAEDLFYQSYLASMNYNTNISEENVQNMNRALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKKFLDKFNHEAEDLFYQSSFASMYYNTNITEEEVQNMNHALDKMSAFLKEQSTLAQMYPLKEIQNLTVKLQLQALQ', 'SDIEEQAKTFLDKFNHFAEFLFYQSSLASMNYNSNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAQTFLDKFNHEAEDLFYQSSLASMMYNTNIIEENVQNMNRALDKMSAFLKEQSTLAQMYPLQEIQNETVKLQLQALQ', 'STIEEQAKIFLDKFNHEAEDLFYQFSLASMNYNANITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIDNLTVKLQLQALQ', 'STIEEYAKTFLDKFNHEAERLFYQSSLASMNYNTNITEERVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNNNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLMVKLQLQALQ', 'STIEEQAKTFLDKFNHEAERLFYQSSLASMKYNTMITEENVQDMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLAMMNYNTEITEENVQIMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYLTNITEENVQNMNRALDKMSAFLKAQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQFSLASMMYSTNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLAVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSLLASMNYNTNITEKNVLNMNNALDKMSAFLKEQSTLALMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQFSLASMNYNTNIAEENVQNMNNALDKMSAFLKEISTLAQMYPLQEIQNLTVKLQLQAIQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTFITEENVQNMNNALDKMSAFLKRQSTLAQMIPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAKDLFYQSSLASMNYNSNITEENVQNMNAALDKMSAFLKLASTLAQMYPLQEIQNLTVKLQLQALQ', 'SDIEEQARTFLDRFNHEAEDLMYQSSLQSMNYNTNITQENVQNMNNALDKMSAFLKEQTTLAQMYPLQMIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQYSLASMNYQTNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLMLQALQ', 'STIEEQAKTFLDFFNHEAEDLFYQSLLASMNYNTNITEMNVLLVKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ<sep><pad><pad><pad><pad>', 'SDIEEQAKTFLNKFNHEAEDLLFHSSSLASMNYNTNITEENVQNMNKALDQMSAFLKEQSTMAQMYPLQEIQNLTVKLQLQAL', 'STIEEQAKTFLDKFNHEAEDLFYQSMLASMNYNTFITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'SDIEENAKTFLDMFIHEAEDLFYQSILASMAYQTNITEENVQNMNNAEDKMMAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYMSMLASMNYNSNITEENVQNMNNALDKMSAFLKEKSTLAQMYPLQEIQNLTVKLALQALQ', 'STIEEQAKTFLHKFNHEAEDLFYQSLLASMNYNTNITEEMVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNAEAEDLFYQASLASMNYNLNITEENVQNMNLALDKMSAFLKEQSTLAQMYPLQEIQNDTVKLQLQALQ', 'SNIEEQAKTFIDKFNSEAEDLFYQSSLASMNYITNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLPVKLQLQALQ', 'SDIEEQAKTFLDKFNLEAEDLFYQSSLASMNYNTEITEENVQAMNNSLDKMSAFLKERSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLALMNYNTNKTEENVQNMNNALDKMSAFLKEQSQLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLKSMNYNTNIVEENVQNMNNALDKMSAFLKNQSTLAQMYPLQEIQNLTVKLQLQALQ', 'SYIEEQAITFLDKFNHEAEDLFYQSSLASMNYNTIITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLQVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMAYNTQITEENVQNMNNALDKMSAFLKEQATLAQMYPLQEIQNLPVKLQLQALQ', 'STIEEQAKTFLEKFNHEAEDLFYQSMLASMNYNTQITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLKALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLAIMNYNTKITEENVQNMNNALDKMLAFLKEASTLAQMYPLQEIQNFTVKLQLQSLQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITIENVQNMNNALDKMSAFLREQSTLAKMYPLQEIQNLPVKLQLQALQ', 'SDIEEQAKTFLRKFNHEAEDFFYQSQLASMNYNTEITEENVQNMNNAGSKMSAFLKYQSTLAQMYPLQEIQNLTVKLQLQALQ', 'MTIEEQAKTFLDKFNHMAEMLFYQSSLASMNYNTNITNENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLLKFNHEAEDLFYQSSLASMNYNTNITEELVQNMNNALDKMSAFLKMQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAITFLDKFNHEAEDLFYQSSLASMNYNTIITEENVQMMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLMYQSSLASMNYNTNITEFNVQNMNDALDKMSAFLKDQSKLAQMYPLQMIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHVAEDLFYQSMLASMNYNSNITEENVQNMNNALDKMSAFLAEQSTLAQMYPLKEILNLMVKLQLQALQ', 'SGIEEQAKTFLDKFNHEAEDLIYQSSLASMNYKTNITEENVQNMNNAKDKMSAFLKELSTLAQMYPLQEIQNLTVKLQLQALQ', 'SDIEEQAKTFLDKFNHEAEDLFYQSSLASMNYDTNITEENVHNMRNALDKMSAFLKESSTLAQMYPLQEIQNLTVKLELQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSRLASMNYKTNITEENVQNMNHALDKMSAFLIEQSTLAQMYPLQYIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQFSLASMNYNTFITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNREAEDLFYQSSLASMNYNTFITEENMQNMNNALDKMSAFLKEKSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYLTNITEENVQNMNNALDKMSAFLKHQSTLAQMYPLKEIQNLKVKLQLQALQ', 'STIEEQAKTFLDDFNHEAEDLFYQSSLAMMNYNTIITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLQVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYFTNITEENMQNMNNALDKMSAFLKEQTSLAQMYPLQEIQNLKVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMMYNTNITEEIVQNMNNALDKMSAFLKEQSTLAQMYPLQRIQNLAVKLQLQALQ', 'SDIEEQARTFLDKFNHEAEDLFYQSSLASMNYNTNITEEEVQNMNNALDKMSAFLKEQSTLAQMYPLQEIANLTVKLQLQALK', 'STIEEQAKSFLDKFNHEAEDLFYQSLLASMNYNTNITLENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLLKFNQEAEELFYQSSLASMNYNTNITEMNVQNMNNALDKMSAFLKELSTLAQMYPLQEIDNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLAYMNYNTKITEENVQNMNNALDKMSAFLKRQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAERLFYQSSLASMNYNTNITEEQVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDIFNHEAEDLFYQSSLASMNYNTAITEENFQNMNNALDKMSAFLKMQSTLAQMYPLQEIQNLTVKLQLQALQ', 'SSIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNNNITEENVQNMNNALDKMSAFLKEQSDLASMYPLQEIQNLPVKLQLQALQ', 'SFIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEEFVQNMNQALDKMSAFLKEQSTLAQMYPLQEIQNLRVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSMLASMNYNMNITEENVQLMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEVQAKHFLDKFNHEAEQLFYQSSLASMNYNKNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLPVKLQLQALQ', 'STIEEFAKTFLDKFNHEAEDLFYQSSLASMNYNTNIDEENLQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLLLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLARMNYNENITEENVQNMNFALDKMSAFLKEQSSLAQMYPLQEIQNLMVKLQLMALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNISEENVQNMNNALDKMSAFLKEQSTRAQMYPLQMIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSMASMNYFTNITEENYQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLKVKLQLLALQ', 'STIEEQAKTFLDKFNHEAEDIFYQSSLASMNYNFNITEENVRFMNNAMDKMSAFLKEQSKLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEMLFYQSSLASMNYNTNITRENKQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTLKLQLQALQ', 'STIEEQAKTFLDKFNHEATELFYQSSLASMNYNTNITEELVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLDVKLQLQQLL', 'STIEEQAKTFLKKFNHEAEDLFYQSSLASMNYKTNITEENVQNMNNALDKMRAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAQDLFYQSSLASMNYNTNITNENVQNMNRALDKMSAFLKERSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLLYQSSLAKMNYNTNITEEKVQNMNNALDKMSAFLKTQRTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLKSMNYNTNITEELVQNMNNALDKMSAFLKRQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNFEAEDLFYQSSLASMNYNTNITEFNVQNMNNALDKMSAFLKEISTLAQMYPLQEIQNLTVKLQLRALQ', 'SNIEEQAKTFLDKFNHEAEDLFYQSSLQSMNYNNNITEENVQNMNNALDKMSAFLKLQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKMNHEAEDLFYQMSLASMIYNTSITEENVQNMNNALDKMSAFLKEQSTLAQAYPLQEIQNLPVKLQLQALQ', 'STIEEQAKTFLMKFNHEAEDLFYQSSLALMNYNTNITEEDVQNMNNALDKMSAFLKEQSTLAKMYPLQEIQNLTVKLQLQSLQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLAMNYNTNITEENVHNMNNALDKMSAFLKELSTLAQMYPLQEIQNLTVKLQLQALQ<sep>', 'STIEEQAKTFLDKFNHEAEMLFYQSSLAAMNYNTNINEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEHLFYQSSLLSMNYNTNITNENVQNMNDALDKMSAFLKELSMLAQMYPLQEIQNLTVKLQLQALQ', 'STLEEQAKTFLEKFNHEAEDLFYQSYLASMNYNTNITELNVQNMNNAEDKMSAFLKEASTLAQMYPLQEIQNLTVKLQLQALQ', 'SSIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNSNITEENVQNMNNAGDKMSAFLKEQSNLASMYPLQEIQNLPVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITNENVQNMNNALDKMSAFLKELSTLAQMYPLNEIQNLMVKLQLQALA', 'SRIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTLITEENVQNMNNALDKMSAFLKELSTLAQMYPLQEIQNLTVKLQLQALL', 'SDIEEIAKTFLDKFNHEAMDLFYQSSLASMNYNTNITEKNVQNMNNALDKMSAFLKEQSDLAQMYPLQEIQNLTVKLQLQALI', 'STIEEQAKTFLDKFRHEAEDLQYISSLAEMNYFTNITEENLRNMNNALDKMSAFLKEQSTLAQNYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDMFNHEAEDLFYQSSLASMNYNTEITEENVQNMNMALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLAMNYNTNITEENFQNMNNALDKMSAFLKEMSTLAQMYPLQEIQNLTVKLQLQALQS', 'STIEEQAKTFLDKFNHEAEILFYQSSLAIMNYNTEITEENVQVMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNANITEENVQNMNNALDKMSAFLKEQRTLAQMYPLQKIQNLTVKLQLQALQ', 'SGIEEQAKTFLDKFNHEAEKLFYQSFLASMNYNTYITEENVQNMNNALDKMSAFLKEQSALAQMYPLQEIQNLTVKLQLQALQ', 'SYIEEQAKTFLDLFNHEASDLFYQSSLASMNYQTNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQATTFLDKFNHEAEDLFYQSMLASMNYNTNITIENVQNMNNALDKMSAFLKELSTLAQMYPLQEIQNLTVKRQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLAIMNYNANITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLMLQMLQ', 'STIEEQAKTFLDKFNHEAEDLAYQSSLASMNYLTNITEENVQNMNNALDKMSAFLKYMSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLMKFNHEAEDLFYQSSLAAMNYNTNITELNVQSMNNASDKMSAFLKELSTLAQMYPPQEIQNLTVLKQLQALQ', 'STIEEQAKTFLDNFNHEAMDLFYQSSLASMNYLTNITEENVQNMNNALDKMSAFLKYQSTLAQMYPLQEIQNLTVKLQLQALI', 'STIEEQAKTFLDKFNHEAETLFYQSSLASMNYFTNITEENVQNMNNALDKMNAFLKEQSTLAQMYPLQEIQNLTVKLLLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEEEVQNMNNAFDKMSAFLKEQSTLAQMYPLMEIQNLMVKLQLQALQ', 'STIEEQAKTFLDKFNLEAEKLFYQSSLASMNYNTYITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIKNLTVKLQLQALQ', 'YDIEELTAKTFLDRFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNALDKMSAFLKSQSTLAQMYPLQEIQNLTVKLQLQAL', 'ATIEEQAKTFLDKFNHEAEDLFYQSALASMNYNTNINEENVQNMNNALDKMSAFLKEQSTLAQSYPLQEIQNLPVKLQLQALQ', 'LTIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEEIVFNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLLLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLARMFYNTNILEENVQNMNNAGDKMSAFLKELSTLAQMYPPQEIKNLTVKLNLQALQ', 'STIEEQAKTFLDKFNHEAEKLFYQSSLASMNYNTNITFENVQNMNMALDKMSAFLKEQSTLAQMYPLMEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNSNITEENVQNMNRALDKMSAFLMEQSTLAQMYPLQEIQNLAVKLQLQALQ', 'STIEEQAKTFLDKFNHEAESLFYQSSLFSMNYNLNITEENVQNMNNALDKMSAFLKEQSTLAQMYPPQEIQNLTVKLQLQALQ', 'STIEEQAKTFLYKFNHEAEDLFYQSMLASMNYNTNITEEFVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'SNIEEYAKTFLDKFNHEAEDLFYQASLASMNYNTQITEENVQNMNNALDKMSAFLKENSTLAQMYPLQEIQNLPVKLQLQALQ', 'STIEEQAKTFLIKFNHEAEDLFYQLASAIYNTNITEENVINMNNALDKMSAFLKEQSTLANMYPLQEIQNLTVKLKLQALQ<sep><pad>', 'STIEEQAKQFLDKFNHIAEDLFYTSSLASMNYNTNITEELLQNMNNALDKMAAFLKEMSTLAQMYPLQEIQNLTVKLQLAALQ', 'SSIEEQAKTFLDKFNHEAEDLFYQSNLASMNYNTNISEENVQNMNTALDKMSAFLKEQSTLAQMYPLQEIQNLSVKLQLQALM', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTIITEENVQNMNYALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLLLQALQ', 'STIEEQAKTFLDKFNHEAQDLFYQSSLASMNYNTNITRENVQNMNMALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLILQALQ', 'STIEEQAKTFLEKFNHEALDLFYQSSLASMNYNTNITEEQVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLPVKAQLQALQ', 'STIEEQAKTFLDKFTHEAEDLFYQSSLASMNYNTNIMEENVQNMNNALDKMSAFLKRQSLLAQMYPLQEIQNLTVKLQLQALM', 'STIEEQAKTFLDKFNHEAEMLFYQSSLASMNYKTNITEENVQNMNNALDKMSFFLKEQSTLAQMYPLQEIQNMTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQYSLASMNYNTNITMENVLNMNNALDKMSAFLKEQSTLAQMYPLQDIQNLDVKLQLQSLQ', 'STIEEQAKYFLRKFNHEAEDLFYQSLLASMNYNTNITEEIVQNMNNALDKMSAFLKEQSLLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLFKFNHEAEDLFYQSSLASMNFKTNITEENVQNMNNALDKMSAFLKEQSMLAQMYPLQEIQNLTVKLQLQALI', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNSNITEENYQNMNNALDKMSAFLKEQSKLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLKARNYNTNINEENVQNMNNALDKMSAFLKEQSTLASMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQASLASMNYFTNITEENVQNMNYAGDKMLAFLKSLSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSMLASMNYNTFITEENLQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVLLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNDNITEENVQNMNNALDKMSAFLKELSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEEAKTFLDKFNHEAEELFYQSSLASMNYQTNITEENVQEMNNKLDKMSAFLQEQSTLAQMYPLQEIQNLLVKLQLQALQ', 'SSIEEQAKTFLDKFNHEAEDLFYQSMLASMNYNTNINEENVQNMNNALDKMSAFLKEQSSLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHRAEDLFYQSMLASMNYNLNITEENVQNMNNALDKMSAFLKEQLTLAQMYPLQEIQNMTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNNTEENVQNMNNALDKMSAFLFEQSTLAQMYPLQEIQNLPVKLQLQALQ', 'STIEEQAKTFLAKFNHEAELLFYQSALRSMNYNTNITFENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLPVRLQLQALQ', 'STIEEAAKTFLEKFNHEAEDLFYQSSLAMMFYNTYITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'SEIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNANITEENVQNMNNAKDKMSAFLKEQSTLAQMYPLQEIQNLPVKLQLQALT', 'STIEEYAKTFLDKFNHEAADLFYQASLASMNYNTNITYENVQNMNNALDKMSAFLKEQSTLAQMYPLQMIQNLTVKLQLQALQ', 'STIEEIAKTFLDKFNREAEDLFYQSSLASMNYNNNITEENIQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHAAEDLFYQSMAAMMYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ<sep>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMAYNTNITQENVQNMNNALDKMSAFLKEQSTRAQMYPLQEIQNLPVKLQLQALQ', 'STIEEQAKTFLDKFNHFAEDLFYQSSLASMNYNTFITEENVQDMNNALDKMSAFLKEQITLAQMYPLQEIQNLPVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNINEENVQNMNLALDKMSAFLKNQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTFITEENVQFMNNALDKMSAFLKEQSTLAQMYPLQMIQNLEVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSRLASMNYNSNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALI', 'STIEEQAKTFLHKFNHEAEMLFYLSLLASMNYNTNITEQNVQNMNNANRKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLLALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLAMNYNTNITEENYQNMNNALDKMSAFLKEMSTLAQMYPLQEIQNLTVKLQLQALQ<sep>', 'STIEEQAKTFLDKFNHEAEDLFYQSMLASMNYNSNITEENVQNMNNALDKMSAFLKEQSTLAQLYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLAMNYNTNITEENAQNMNNALDKMSAFLKEISTLAQMYPLQEIQNLTVKLQLQALQ<sep>', 'STIEEQAKFFLDKFNHEAEDLFYQMSLASMNYNANITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKSFLDKFNHEAEDLFYQSSLALMNYNTNITMENVQNMNNALDKMSAFLKTQSTLAQLYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLALMNYLTNITEENVQNMNNALDKMSAFLKEQSTLAMMYPLQEIQNLTVKLLLQALQ', 'STIEEQAKTFLYKFNHEAEDLFYMSSLARMNYNTNITELNVQNMNNALDKMSAFLKEQSTLAQMYPLQFIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHLAEDLFYTSSLASQNYNNNITEENVQNMNNALDKMSAFLKLMSTLAQMYPLQEIQNLTVKLQLQALR', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNLTEENVQNMNNALDKMSAFLKEQRTLAQMYPLQEIQNLPVKLHLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNYNITEENVQNMNNALDKMSAFLKEVSILAQMYPLQEIQNLPVKLQLQALQ', 'STIEEQAKTFLDMFNHEAEDLFYQSKLASMNYYTNITEENYLNMNNALIKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFLHEAENLFYQSSIASMNYNNNITEENVQNMNNALDKMSAFLKMQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTIITEENTQNMNNRLDKMSAFLKEISTLAQMYPLQEIQNLTVKLQLQALQ', 'MTIEELAKIFLLKFNHHEAEDLFYQSSLAMMNYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLMLQAL', 'SFIEEQAKTFLDKFNHEAEDLFYQFMLASMNYNTNITELNVQNMNNANDKMSAFLAEQSTLAQMYPLQEIQNLTVKLALQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQMSLASMNYNTNITEEQVQNMNNALDKMAAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEILFYNSAAMNMNTNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLPLKLQLQALQ<sep><pad>', 'STIEEQAKTFLMKFNHYAEQLFYQSLLASMAYNSNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEELAKTFLDKFNHEAEDLFYASSLASMNYNTAITEENIQNMNRALDKMSAFLKEQSTLAQMYPLQDIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEKLFYQSSLASMNYNTNIDEENVQNMFNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'SNIEEQAKEFLDKFNHEAEDLFYQSSLAMMYYNTRITEENVQNMNNALDKMSAFLKERSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMAYNTNITEEKVQNMNNASDKMSAFLKELSTLAQMYPLQEIQNLPVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLAYQSSLASMNYLTEITEENVQMMNNALDKMSAFLKAMSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKKFLDKFNHEAEDLFYMSSLASMNYNTNITEEQVQNMNNALDKMRAFLKEQSTLAQMYPPQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEEMVQNMNNALDKMSKFLKEQSTLAQMYPLKEIKNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLF<cls>QSSLTSMNYNTNITEENVQNMNNALSKMSAFLKELSTLAQMYPLQEIQQLTVKLQLQRLQ', 'STIEEQAKTFLDTFNHEAQDLFYQSSLASMNYNTYITEENVENMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLMALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTTITEENVQNMNNALDKMSAFLKELSMLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLIKFNLEAEMLFYQSSLASMNYNTNIAEENVQNMNNALDKMSAFLKELSTLAQMYPLQEIQNLTVKLQLQNLQ', 'STIEEQAKLFLDKFNHEAEDLFYQSSLAMLNYNTNVTEENVYNMLNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALI', 'SDIEEQAKIFLDKFNHEAEELFYQSSLASMHYNTNITEEIVQNMNNQLDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQTLL', 'SPIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTLITEENFFNMNNALDKMSAFLKEQSTLANMYPLQEIQNLPVKLALQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLARMNYNSNITEENVQNMNNALDKMSAFLKEQQTLAQMYPLQFIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEVNVQMMNNALDKMSAFLKEQSTLASMYPLQEIQNLTVKIQLQALF', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYLTNITEENVQNMNEARDKMTAFIKEQSMLAQMYPLQEIQNLPVKLQLQALQ', 'STIEEQAKTFLDTFVHEAEDLFYQYSLASMNYNFNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLAILLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLAMMNYALSITEENVQNMNYALDKMSAFLKEQSTLASMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNNEAEDLFYQSSLASMNYNTFITEENYQNMNNALDKMSAFLKEQSQLAQMYPLQEIQNLTVKLQLQALQ', 'STIEFQAKTFLDKFQHEAEDLFYMSSLASMNYNTNITEEYVVNMNNALDKMSAFLKEISTLAQRYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYLTNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLMLQALI', 'STIEEQAKTFLDKFMHEAEDLFQQSSLASMNYNTYITEENVQNMNNALDKMSAFLKRISTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYYASDLMMMNYNTNITEENVQLMNNALDKMSAFLKEQSTLAQSYPLKEIQNLTVKLQLQAL', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITYENVQIMNAALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAERLFYQSSLASMNYDTNITEENVQNMNAAGDKMSAFLKELSTLASMYPLQEIQNLPVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNANITEENVQNMNFALDKMSAFLKEQSTLANMYPLEEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFFHEAEDLFYQMSLASMNYNTEITEENVQNMNNALDKMSAFLKELSTLAQMYPLQEIQNLTVKLQLQALQ', 'SFIEEQAKTFLDKFNHEAEDLFYQSSLASMNYYTNITEENVQNMNQALDKMSAFLKEQSTLAQMYPLQEIQNLPVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSLLASMNYNTNITEQNVQNMNNALDKMSAFLKEQSSLAQMYPLQEIQNLPVKLKLQALQ', 'STIEEQAKTFLDKFNHFAEDLFYQSSLASMNYNTNITEKNVQNMNNALDKMSAFLKEQSTLAQMYPLLEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEEFVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLLLQALQ', 'SNIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEKNVQNMNNASDKMSAFLKEISTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEMLFHQSSLASMNYFTNITEENVQNMNNALDKMSAFLKELSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYATNITEENVQNMNNALDKMSAFLKLQSTLAQMYPLQEIQNLTLKLQLQALQ', 'STIEEQAKLFLDKFNHEAEDLFYQSSLASMNYNTNITEEEVQNMNNVKDKMSAFLKEQSTLALMYPLQEIQNLPVKLQLQLLI', 'STIEEQAKTFLDKFNHEAKDLFYQSSLASMNYNAMITEENVQRMNNALDKMSAFLKEQSALAQMYPLQEIQNLTVKLQLQALA', 'STIEEQAKTFLDKFNHEAEDLFYASSLASMNYNTEITEENVQFMNNALDKMVAFLKEISTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNLEAESLLLFYQSSLASMNYNTNITEENVQNMNNMLDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQA', 'STIEEQALTFLDKFNHEAEDLFYQSSLASMNYNTIITEENVQNMNFYLDKMSAFLFMQSTLAQMYPLQRIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLAIMNYNTNITIENVQNMNNALDKMSAFLKELSTLAQMYPLTEIQNLTVKLQLLALL', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMLYNKNITEENVQNMNNALDKMSAFLKELSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQMMFNALDKMSAFLKEVSTRALMYPLQEIQNLYVKLQL', 'SGIEEYAKTFLDKFNSEAEDLFYQSSLASMNYLTNITEENVQNMNNALDKMSAFLKEQATLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMMYNTNITEQNVQNMNQALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'QTIEEQAKTFLDKFNHEAEDLFYQSSLASMNYLTEITEENVQNMNNALDKMSAFLKEMSTLAQMYPLQEIQNLTVKLQLQALQ', 'SYIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTKITEENVQNMNNAMDKMQAFLKMQSTLAQMYPLQEIQNLTVKLQLQALK', 'STIEEQAKTFLKKFNHEAEDLFYQSSLASMNYNTNITEEQVQNMNNALDKMSAFLKEQSTLAQMYPLQDIQNLPVKLQLQALK', 'STIEEQAKTFLDKFNHEAEDLFYQSSLAMNYNTNITEENYQNMNNALDKMSAFLKEQLTLAQMYPLQEIQNLTVKLQLQALQ<sep>', 'STIEEQAKTFLDKFNLEAEDLFYQSRLASMNYLTNITEENVQNMNNFLDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEREMLFYQSSLASMNYRTNITEENVRNMNSALDKMSAFLKEQSTLAQMYPLIEIQNLTVKLQLQLLI', 'STIEEQAKTFLDKFNHEAEDLFYQFSLASMNYNTNISEENVQNMNNALDKMSAFLKEQSTLAQMYPLTMIQNLTPKLQLQALQ', 'STIEEQAKTFLDKFNHEAEFLFYQSSLASMNYNLNITEENVQNMNNAYDKMSAFLKEISTFAQMYPLQEIQNPTVKLQLQALQ', 'STMEEQAKTFLDKFNHEARDLKYQSSLASMNYNTNITEEFVQNMNNMLDKMSAFLKEQQTLAQMYPLQFIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHLAEDLFYQIMLASMNYNLNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKQFLDKFNHEAEDLFYQSSLASMNYNTEITEENVQQMNNMLDKMSAFLKELSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNREAEDLFAQSSLASRNYNTIITEENVINMNNAYDKMSAFLKEQSTLAQMYPLQEIQNLPVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLAMNYNTNITEENYQNMNNALDKMSAFLKEISTLAQMYPLQEIQNLTVKLQLQALQ<sep>', 'STIEEQAKTFLIKFNHEAEQLFSQSSLASMNYLTNITEENVQNMNNALDKMSAFLKEISTLAQMYPLQEIQNLTVKLILQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNLNITEENVQNMNNALDKMSAFLKEISTLAQMYPLQEIQNLTVKLQLQLLG', 'STIEEQAKLFLDLFNHEAEDLFYQSMLASMNYNTNITELNVQNMNNALVKMSAFLKEQSTLAQMYPLQEIQNLTVKLMLQALQ', 'STIEEQAKTFLDKFNDEAEDLFYQSSLANMNYNTNITEEQVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLPVKLQLQLLR', 'STIEEQAKTFLDKFNTEAEDLFYQSSLASMNYNTNITELNVQNMNNALDKMSAFLKELLTLAQMYPLQEIQNLTVKLQLQALQ', 'SDIEEKAKKFLDKFNHEAEDLFYQSSLASMFYNTEITEENVQNMNNALDKMSAFLKEQSTLAQSYPLPEIKNLTVKLQLQTLL', 'STIEEQAKMFLDKFNHEAEDLFYQSSLASMNYNTAITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQMIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYISSLASMNYNTDITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLEVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYSSSLASMNYNTNITEELVQNMNNALDKMSAFLKEQSLLAQMYPLQEIQNFTVKLLLQALQ', 'STIEEFAKTFLDKFNHEAEDLFMQSSLASMNYNTNITEELVQNMNNALDKMSAFLKEQSVLAQMYPLQEIKNLTVKLQLQALK', 'STIEEQAKTFLDMFNHEAEDLFYQLSLASMNYLTNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQLIQNLTVKLQLQALA', 'SKIEEQAKTFLDKFNHEAEDLRYQSSMASMNYNTNISEENVQNMNNALDKMSAFLKDLSTLAQMYPLLEIQNLTVKLQLQALL', 'STIEEQAKTFLDRFNHEAEDLFYQSSLASMNYNTNITEEYVQNMLNALDKMSAFLKEQSTLAQRYPLQEIQKLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLALMNYLTNITEENVQNMNNALDKMSAFLKEQSTLAMMYPLQEIQNLTVKLQLQLLQ', 'STIEEQAKTFLDKFNHIAEDLFYQSSLASMNYNMNITEENVQNMNNAMDKMSAFLKEISTLAQMYPLQEIQMLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFLQSSLASMNYNLNITEENVQNMNNALDKMSAFLKEISTLAQMYPLQEIQNLTIKLQLQALQ', 'STIEEQAKYFLDKFNHEAEDLFYRSSLLAMMYYNTNITEENVQNMKNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQAL', 'STIEELAKMFLMRFNHEAEFLFYQSSLASMNYLTNITEENVQNMNNAGDKMSAFLKEQSSLAQMYPPQEIQNLTVKLQLQALL', 'STIEEQAKTFLDKFMHEAEDLFYQSSLASMNYNTNITLENVQNMNNALDKMSAFLKELSTLAQMYPLQIIQNLTVKLQLQALQ', 'SDIEEQAKTFLDKFNHEANDLFYASSLASMNYNQNITEENVQNMNNANDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALL', 'SEYEEQAKTFLDKFNHEAEDLFYQSSLASMNYNSNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLIEIQNLTVKLQLQALQ', 'STIEEQAKQFLDKFNHYAESLFYQSSLAMMNYNNNITEENVQKMNNALDKMAAFLKMQSTLAQMYPLQEIQNLTVKLQLMALQ', 'SNIEEQAKTFLDKFNHEAEDLFYQSSFASMNYNLNITEENLQNMTNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'SSIEEKAKTFLDKFNHEAEDLFYDSSLASMNYNTNITLENVQNMNNAFDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNENITEENVQNMNNALDKMSAFLKEQSTLARMYPLQLIQNLTVKLMLQALQ', 'STLEEQAKTFLDKFNHEAEDLFYQSSLASMNYATNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAKLFKYSSLASMRYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLLEIQNLLVKLQLQALM<sep>', 'SNIEEQAKTFLDKFNHEAEDLFYESSLMSMNYNSNITEENYQNMNNAMMKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDSFNHEAEDIFYQSSLASMNYNTNITLENMNNALDKMSAFLEERSTLAQMYPLQEIQNLTVKLQLQALQLLL', 'SSAEEKAKTFLDKFNHEAEMLFYQSSLASMNYNENITEENRINMNNALDKMSAFVKELSLLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEEMVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLPVKLQLQQLI', 'STIEEQAKTFLDKFNHEAEDLFYQYSLASMNYNKNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIDLLPVKLQLQTLQ', 'STIEEQAKTFLDKFNHEAEVMFYQSSLASMNYNTNITE<cls>NVQNMNMAFDKMSAFLKEQSTLAQMYPLQEIQNLPVKLQLQALQ', 'SNIEEQAKDFLDKFNHEAEDLFYQSSLAMMYHLTEITEENVQNMNNAGDKMSAFLKELSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITQENVQNMNNALDKMSAFLKLLSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEDAKTFLDKFNHEAEDLFYQSSLASMNYNTNISEENVQNMNNALDKMSAFLKELSTLAQMYPLQLIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFLHEAEDLFYQSSLASMNYNANITEENVQNANNALDKMSAFLKFISTLAQMYPLQEIQNLTVKLQLQALK', 'STIEEQAKMFLDMFNHEAEQLFYQSSLASMNYNTNITEEFVQMMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYITNITEENVQNMNNAYDKMSAFLKEMKLLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKMFLDKFNHEAMDLFYQSMLAQMNYNTNITEEDVQNMNNALDKMSAFLKEQSTIAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKYFLDKFNHEAEDLFMQSSLASMNYNTFITEENVQFMNNALDKMSAFLKEQSTLAMMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDNFNHEAEDLFYQSSLASMNYNTNITEEIVQNMNLALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDYFNHEAEDLFYQSSLASMNYNTIITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALI', 'STIEERAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEEEVQNMNNAFIKMKAFLKELSTLAQMYPLQEIQNRTVKLQLQALV', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTFITEENVQNMNNALDKMSAFLKEQSTLAQMYPPQKIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSMLSSLASMRYNTNITEENVQTMNNALDKMSAFLKEQSTLAGMYPLQEIQNLPVKLKLQ', 'STIEEQAKTFLDFFNHEAQDLFYQSSLASQNYNTNVTEENMQLMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'VTIEEQAKTFLDKFNHEAEDLFYQSMLASMNYNTNITEEDVQNMNLALDKMSAFLKEQSTRAQMYPLQEIQNGTVKLQLQALQ', 'STIEEQAKTFLDKFNHMAEDLFYQSSLMSMNYNTFITEENVQNMNNALDKMSAFLKELSTLAQMYPLQEIQNLTVKLQLQALQ', 'SNIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEINVQNMYNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYATNITEENVQNMILALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEQLFYQSSLASMNYNTNITEQNVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEFAKTFLDKFMHEAEDLMYQSSLASMNYNTQITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHRAEDLFYQSSLASMIYNTNITAENVLNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLAMMNYNTNITERNVQNMNNALDKMSAFLKEQSTLAQSYPLQEIQNLTVKLQLMALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMRYNANITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'SYIEEQAKTFLDKFNHTAEDLFYQSSLASMNYNTNITAENVQNMNNALDKMSAFLKEQSSLAQMYPLQMIQNLFVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTFITEENVFRMNNALDYMSAFLLEQSTLAQMYPLQEIQNLAVKLQLKALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLKSMNYNTNITQENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLQVKLQLQALQ', 'STIEEQAITFLDKFNHEAEDLFYQSSLASMMYNTNIVEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYMSSLASMNYNTNVTEENVQNMNNALDKMSAFLKEQSYLAQMYPLREIQNLTVKLQLQALQ', 'STIEEQALTFLDKFNHEAEDLFYQSSLASMNYNTLITEENMQNMNNAMDKMSAFLKEQSTLANMYPLQEIQNLTVKLQLQALK', 'STIEEQAKTFLDKFNKEAEDLFYYSSLASMNYDTNITEENVQNMNNRADKMSAFLKEQSTLAQMYPLQEIQNLPVKLQLQALL', 'STYEEQAKTFLDKFNHEAEDLFYQFLQNSMASMMYNTNITEENVQNMNNAKDKMSAFLKNQSTLAQMYPLQEIQNLPVKLQLQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEINVNNMNNALDKMSAFLKRQATLANMYPLQEIQNLRVKLQLQLLM', 'STIEEQAKTFLDKFNHEAEDLFQQYLKASSILASMNYNTNITEENVQNMNLALDKMSAFLKAQSILAQMYPLLEIQNLTVKLQ', 'STIEEQAKTFLLKFNHEAEDLFYQSSLASMNYNTNITQENVQNMNNKLDKMSAFLKEMSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDMFYQSSLASMNYNTNITEEQVQNMNNALDKMSAFLKELSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAETFLDKFNHEAEDLFYQKSLASMNYNANITEENVQNMNNALDKMSAFLKEISTLAQMIPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQYSLASMNFNSNITEENVQNMNNALDKMSAFLKETSTLAQMYPLQEIQNLTVKLQLQNLQ', 'STIEEQAKTFLDKFIHEAEDLFYQSMLASMNYNTLITEENVQNMNNALDKMSAFLKESLTLAQMYPLQEIQNLPVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQRSLASMNYNTNVTEENVQNMNNLLDKMSAFLKEISTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNSNITEENEQNMNNALDKMHAFLKEQSLLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNANITEENVQNMNNALDKMSEFLKEQSTLAQMYPLQYIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITNENVQNMNNALDKMSAFLKELSTLAQMYPLSEIQNLPVKLMLQALI', 'STIEQQAKTFLDKFNHEAEDLFYQSSLASMNYNTEITEENVQNMNNAYSKMSAFLKEISTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNIDEENVQYMNNAHDKMSAFLKEQSTLAQMYPPQEIQNLPVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQMSLASMNYNTNITEEDMQNMNNALDKMSAFLMEQSTLAQMYPLQEIQNLTVKLILQALQ', 'STIEEQAKTFLDKFNHEAEDLFMQYSLASMNYDTNITEENVQNMNKALDKMSAFLKELSTLVQMYPLQEIQNLTVKLQLQALL', 'STIEEQAKTFLDKFNHEAELLFYQSSLASMNYNTNITEEAVQNMNMAMDKMSAFLKEQLTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDRFNHEAEDLFYQSSLKKMNYNTNINEENVQNMNNALDKMSAFLKEISTLAQMYPLNEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYYQMILALMNYNTNITEENVQMMNNALDKMSAFLKEQSALAQMYPLQEIQNLTVKLQLQAL', 'MDIEEQAKTFLHKFNIEAEDLFYTSNLASMMYNTNISEENVINMNNAGDKMMAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFIDEFNHEAEYLFYLSSLASMNYNTNITIENVVNMMIALDKMSAFLKELMTLAQMYPLQEIQNLTVKLRLQALQ', 'STIEEQAKTFLHKFNHEAEDLFQQSKLASMMYFLLSMNYNTNITEENVLVMNNALDKMSAFLKELSTLAQMYPLQEIQNLTVK', 'STIEEQAKTFLDKFNHEASHLFYQSSLASMNYNTNITEELVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLFVKLQLLLLL', 'STIEEQAKTFLDKFNHEAEDLFYQKSLASMNYFTNITEENVQNMNNALDLMMAFLKMQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTYITEENVQNMNNAYTKMSAFLKELSTLAQMYPLQEIQTLPVKLQLETLA', 'STIEEQAFTFLDKFNHEAEDLFYQSSFASMNYNANITEENVQNMNNALDKMSAFLKEQSTLAQMYPLVEIQNLMVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSYLASMNYNTNITLENVQNMNNALDKMSAFLKEISTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTEITEENMNNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKSFLDKFNHEAEDLFYQSMLASMNYNTNKTEENVQNMNNAGDKMSAFLKELSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMTYNTNITELNVYNMNNADDKMSAFLKEQSTLAQMYPLQEIQNLPVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSQLASMNYNTNITNENVQNMNNAQDMMSAFLKEQSTLAQMYPLKEIQNLTVKLQLQALK', 'STIEEQAKTFLMKFNHEAEDLFYQSSLASMNMNTFITEENVQNMNNALDKMSAFLKEISTLAQMYPLQEIQNLTVKLQLQALQ', 'MTIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEEAKQNMMNALEKMSAFLYNQSTLANMYPLQEIQNLTIKLQLQALQ', 'STMEEQAKTFLDKFNHEAEDLFLQLFLQSMLASMNYNTNITEENMQQMNNALDKMSAFLKEMSTLAQMYPLQEIQNLTVKLQL', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNISEENVQNMNNANDKMSAFLKELSTLAQMFPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFFQSSLLSMNYQTNITEENVQEMNYALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKMFLDKFNHEAEDLFYQSSLASMNYNTNITNENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLLLQALQ', 'STIEEQAKTFLDKFNAIAEDLFYQLLLQSMLASMNYNTNITEENVGNMNNALDKMSAFLKELSTLAQMYPLQEIQNLTVKLQL', 'SDIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNIAEENVHNMNNAQDKMSAFLKEQSTEAQMYPLQEIQNLTVKLELQALQ', 'STIEEQAKSFLDKFNHEAEDLFYQSYLASMNYNTNISEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKLFLDKFNHEAEDLFYQSSLASMNYNENITEENVQNMNNALDKMSAFLKEQSTLAQMYPLLEIQNLTVKLQLQALQ', 'DTIEEDAKTFLDKFNHEAEDLFYQRMEALMSMNYNTNITEENVQLMNNALDKMSAFLKEMSTLAQMYPLQEIQNLDVKLQLQA', 'STIEEQAKTFLDKFNHEAEDLFYQSSLAHMNYLTNITEENKQNMNFARDKMSAFLKEQSTLAQTYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLYKFNHEAEDLFYQSSLASMNYKTNITEENVQNMNNALFKMFAFLKEQMTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEALDLFYQSSLASMNYNENITEENNRNMNNALDKMSAFLIELSTLAQMYPLQELQMLTVKLQLQALR', 'STIEEQAKTFLDKFNHEAEQLFYQSSLASMIYNTNITEEHVQNMNNALDKMSAFLKEISTLAQMYPLQLIQNLTVKLQLQALQ', 'STIEEVAKTFLDKFNHEAEDLFYQSSYAAMNYNTNITEEQVQNMNNALDKMSAFLKEQSTLAQMYPLQYIQNLTVKLQLQALQ', 'YTIEEYAKTFLDAFNHEAEDLFSQSSLSSMNYNTEITEENVQNMNNFLDKMSAFLKEQSTLAQLYPLQEIQNLPVKLFLQALQ', 'STIEEMAKTFLDKFNMEAEDLFYQSSLASMNYNTLITEENVQNMNNALDKMSAFLKEISTLAQMYPLQEIQNLTVKLQLQALQ', 'RTIEEQAKTFLDKFNHEAEDLLLQSFMASMNYNTNITEEKVQNMNNALDKMSAFLKENMSLAQMYPLQEIQNLTVKLQLQALK', 'STIEEQAKTFLYKFNHEAEDLFYQSSLASMNYNTNITELNVQNMNNAMDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALI', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEEAVQNMNNALDKMSAFLKEQSRLAQMYPLQEIQNLVVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYMTNITEENVQNMNRAGDKMSAFLKEQSTLAQMYPLQEIQNLMVKLLLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMFYNTNITEDNVRLMNNALDKMSAFLKEQMTLAQMYPLQEIQNLTVKLQLQSLI', 'STIEEQAKTFLDKFNLEAESLELLYQSSLASMNYNTNITEENVQNMNNMLDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQA', 'ITIEEQAATFLDKFNHFAEDLFYQYSLASMNYNTQITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STLEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEDNMQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITNENVLYMNNALDKMSMFLKELSTLAQMYPLKEIQNLTVKLNLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTMITEENMLNMNNVFMKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLSSMNYNTNITEEDVQNMNNALDKMSAFLKELSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEANDLFYYFLLYSMSAASMNYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQ', 'STIEEQAKYFLDKFNHEAEDLFYRSSLLAMMYYNTNITEENVQNMNAARDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQAL', 'STIEEQAKTFLDKFNHEAEDEFYQFLLQSMLASMNYNTNITEENKQNMNNALDKMSAFLKEMSTLAQMYPLQEIQNLTVKLQL', 'FTIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEEEVQNMNNTIDKMSKFLKEISTLAQMYPLLEIQNLTVKLQLQTLQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEEQVQNMNNALDKMSAFLKEISTLAQMYPLQEIQNLTMKLQLQALQ', 'STIEEQAKDFLDKFNAEAEDLFYQSSLAAMHYNLNITEENVQNMNKAGDKMSAFLKELSTLAQMYPLQEIQNLTLKLQLAALQ', 'STQEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEEEVQNMNNMLDKMSAFLKEQSTLAQMYPLQEIQNLMVKLQLQALQ', 'SAIEEQAKAFLIKFNHEAEDLFYQSSLASMNYNTNITNENVQNMNNALDKMSAFLKEQSTLAQMYPLQMIQNLTVKLQLQALQ', 'STMEEQAKTFLDKFNHEAEDLFYQSSLASMNYNINITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQMLQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTYITEENVRNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEMQAKTFLDKFNHEAEDLFYQMILASMNYNANITEENVQNMNNALDKMSAFLKEQSTLATRYPLQEIQNLPVKLQLQALQ', 'STIEEQAKTFLDFFNREAEDLFYQSSLASMNYNLNITEENVQNMNNAMDKMNAFLKEQSTLAQMYPLQEIQNPTVKLQLQAIQ', 'STIEEQAKTFLDKFNHEMMMLLYYVSSLASMNYNTNITEENVQNMQNALDKMSAFLKEQSTLAQMYPLLEIQNLTVKLQLQAL', 'STIEEQAKTFLDKFNHEAEDIFYQSSLASMNYNTNITEEVVQNMNNALDKMSAFLKEQSSLAQMYPLQEIQTLLVKLQLQALK', 'STIEEQAKTFLDKFNAEAEDLFYQASLASMNYNTNITIENVQNMNLALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLDLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNIYEENVQNMNNALDKMTAFLKEQSTLAQMYPLQEIQNLYVKLQLQALL', 'STIEEQAKTFLDKFNHEAEDLFYQSSYASMNYLTNITEENMQNMLNAFDKMSAFLKELSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYVSLLASMNYNANITEENVQNMNNALDKMSAFLKYISTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYMTNITEENVLNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEANINNMNNALDKMSAFLKEQSTLAQMYPLQEIQNRKVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNANITEENVQNMNNAMDVMMAFLREQSTLAQMYPLQEIQNLQVKLQLQKLQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEEYVMNMNNFLDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'MTIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITKENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKIFLDKFNHEAEDLFYQSSLASMNYNTNITRENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNSNITEENVQSMNNALDKMSAFLKRQSMLAQMYPLQEIQNLAVKLQLQALQ', 'MNIEEQAKTFLDTFNRMAEDLFYQFYLASMAYLIYNTNITEENVILMNNALDKMSAFLKEQATLAQMYPLQEIQN<sep><pad><pad><pad><pad><pad><pad><pad>', 'MTIEEQAKSFLDSFNHAAAEDLFYYSSLAMMNYNTNITEENIQNMNNAFDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQAL', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYITNITEENVQNMNIALDKMSAFLKELSTLAQMYPLQEIQNLPVKLQLQALQ', 'SSIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEEFVQNMNNALDKMSAFLKEQSMLANMYPLQEIQNLPVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLALMNYNTNMTEENVQNMNNALDKMSAFLKEQMTLAQMYPLQEIQNLTVKLLLQLLQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEMNVQNMNLALDKMSAFLKEQSTLAQMYPLQAIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSMLASMNYNTNITEMNVLAMMYPLREIENQNVKLQLQALQAKMSAFLKELSSLAQMYPL', 'STIEEQAKTFLDKFNHAAEDLFYQSSLASMNYNTIITEENVQNMNNALDKMSAFLAEISTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTQITEENVQNMNNALDKMSAFLKQQSTLAQMYPLQEIQNLTVKLQLQALK', 'MTIEEQAKTFLDSFNHEAAEMLFEQSMLASMNYNTNITEENVQNMNNALDKMSSFLKEQSTMAQMYPLQEIQNLTVKLQLQAL', 'STIEEQAKTFLDKFNHEAEDLFMQSNLASMNYNSNITEENVQNMNTALDKMSAFLKEQSTLAQMYPLQEIQNLTLKLQLQALQ', 'STIEEYAKTFLDKFNVEAEDLFYQSMLKSMNYNTNITEEFVQNMNNAFDKMSAFLKELSSLAQMYPLKEIQNLTMKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFFYQKALKSMNYNTNITEENVRNMNNALDKMSAFLKELSTLAQMYPLQEIQNLTVKLQLQAL', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNSNITEENVYIMNNLLDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEVLFYQMMLASMNYNTIITEENVSQMLKALDKMLAFLKEQSTLAQMYPLQEIQNLTVKLQLQALV', 'STIEEQAKTFLDKFNHVAEDLFYQQSLLFNMNYNTNITEENVQNMNYAGDKMSAFLKELSTLAQMYPLQEIQNLTVKLQLQAL', 'STIEQQAKDFLDKFNHEAEDLFYQSSLKSMNYNTNITEEKVQNMNNALDKMSAFLKEQATLAQMYPLQEIQNLTVKLQLQALK', 'STMEEQAKTFLDKFNHEAEDLFYLESMLASMNYNTNITEENVQEMNLALDKMEAFLKEQSTLAQMYPLQEIQNLQVKLQLQQM', 'STIEEQAKTFLDKFNHEAEDLFYQSALASMRYNANITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMFYNSNITEENVQNMNDALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEEKVQNMNMALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQVLQ', 'STIEEQAETFLDKFNHEAEDLFYQKSLASMNYNTNITLENVQNMNNALDKMSAFLKEISTLAQMIPPQEIQNLTVKLQLQALQ', 'STIEERAKTFLDKFNHEAEDLFYQSSLAVMKYNTNISEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQASLASMNYLTNITEENVQNMNNAGDKMSAFLKEQSTLAQMYPLQEIQNLPVKLQLQALQ', 'STIEEQAKTFLDEFNHQAEDLFYQSSLASMNYNTNITEEIVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLKLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSHLASMNYNTNITEEKVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSMLASMNYDTNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'SAIEEQAKTFLDKFNIEAEDLFYQSSLASMNYNTQITEENMQNMNNALDKMSAFLKKQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQAMLASMNYNTNIAEENVQNMNNAIDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAQTFLDKFNHEAEDLFYQSSLASMMYNTNITERNVQNMNRALDKMSAFLKEQSTLALMYPLQEIQNETVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLAIMNYNTNITAENVHNMNNLLDKMSAFLKMQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEVLFYQSSLASMNYNTSITEENVQNMNNALDKMSAFLKEMSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLAKFNHEAEPLFYQSLASANYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLPVKLQIQALQ<sep>', 'STIEEQAKTFLDKFNHEAKDLFYQSSLASMNYNTNITEEEVQNMNAALDKMSAFLKETSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNENITEENVQNMNLALDKMSAFLKEQSTLAQMYPLQEIQNLTVKMQLQALQ', 'SSIEERAKTFLDKFNHEAEDLFVQSSLASMNYNMNITEENVQNMNNALDKMSAFLKEQSDLAQMYPLQEIQNLTVKLQLQALQ', 'MTIEEQALTFLDKFNHEAEDLFYQSSLASMNYNTNITEDNVQNMNNALDKMSTFLKEQSTLAQMYPPQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSLLASMNYNTNITKENVQNMNNAHDKMSAFLKELMTLAQMYPLQEIQNLTVKLQLRALQ', 'STIEEQAKTFLMKFNHEAEDLFYQSSLASMNYNTNITEQNVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHELEKLFYQSSLASMNYNTNITEEKVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLAMNYNTNITEENFQNMNNALDKMSAFLKEISTLAQMYPLQEIQNLTVKLQLQALQF', 'STIEEQAKTFLDKFNHEAEDLQYQSSLASMNYNTNITEFNVQNMNRAMDKMSAFMKEMSTLAQMYPLREIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLMSMNYNTIITEENVQNMNNALDKMSAFLKETSTLAQMYPLQEIQNLDVKLQLQALI', 'HTIEEQAMTFLDKFNHMAEELFYQSSLASMNYLTNITEENVLNMNNAYDKMSAFLKTQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSLLASMNYNTFITEENVQNMYNAMDKMAAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAENLFYQSMLMSMNYNTRITEENVQNMNNALDKMSAFLKEISTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNSNITEENVQFMKNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQALTFLDKFNHEAEDLFYQSSLASMNYNENITEENVQNMENAFDKMLAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEYLFYQSSLASMNYLTNITEENVVNMNNALNKMSTFLKEQSTLALMYPLQEIQNLTVKLQLQALQ', 'STIELQAKTFLDKFNHEAEDLFYQSSLAYMDYNTNITTENVQRMNNALDKMSAFLKEQSTRAQMYPLQEIQNLKVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLANMNYNTNISEENVQNMNNFLDKMSAFLKEQSTLAQMYPLQMIQNRTVKLQLQALQ', 'NTIEEQAKTFLDKFNHEAEDLFYQSSLMSMNYNTNISEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEIAKTFLDKFNHEAEDLFYMSSLASMNYNANITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNATVKLMLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSYASMNYSTNITEENVQNMNNALDKMSAFLKEQSTLARMYPLQMIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLAYQSSLASMNYNANITEENIQNMNNALDKMSAFLKSQSTLASMYPLQEIQNLTVKLQLQSLM', 'STIEEQAKTFLDFFNHEAEDLFYQSSLASMNYNTNITFENVVIMENAGDKMSAFLKTISTLAQMYPLQRIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEQEDLFYQSSLASMNYNTNITERNVINMNNAKDKMSAFLKEISTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITELNVQAMNNAKDKMSAFLKELSQLAQMYPLQEIQNLTMKLQLQALQ', 'SDIEEQAKTFLDDFNHEAEDLFYQSSLASMNYNTNITEQNVQNMNQAADKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQDLQ', 'YSIIEVQAKTFLDKFNHEAEDLFYQSSLASMMYNTNITEENVQNMNNALDKMSAFLKEISKLAQMYPLQEIQNLTLKLLLQML', 'STIEEQAKTFLDKFNHEAEDLFYQSSLALMNYNTNITEINVYNMNNAMDKMSAFLKEQSTLAQMYPLQEIQNLTIKLQLQALQ', 'STIEEQAKTFLDKFMHMAEDLFVQSSLASMNYNTNISEENVQNMNNVLDKMSAFLKEISTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDEFRQMLLASMFASMNYNTNITEENVQNMNNALDKMSAFLKELSTLAQMYPLQEIQNLTVKLQL', 'STIEDQALTFLDHFNHEAEDLFTQSSLRSMNYNTNITEEDVQNMNNAMLKMSAFLKEQSTLAQMYPLQEIQNLPVKLQLQTLA', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTIITEENVQNMNNARDKMSAFLKELSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQALTFLDKFNHEAEDLFYQSYLASMNYNTNITEINVQYMNNASDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQMLQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEINVQLMNNALDKMSAFLKEQSTLAQMYPLQDIQNLTVKLQLQALQ', 'STIEEQAQTFLDKFNHEAEDLFMQSSLASMNYNFNITEENVQNMNNALDKMAAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKSFLFKFNHEAEDLFYQSMLASMAYFMYNTNITEENVQEMNNAIEKMNAFLKEMSTLAQMYPLQEIQNLTVKLQLQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNINITEENVQNMNNALDKMSAFLKEISTLAQMYPLQEIQNLTVKLQLQA', 'STIEEQAKHFLDKFNHEAEDLFYQYSLASMNYNTNITEEEVTFMLNLLFKMSAFLKEQTTLAQMYPLQEIQNLTVKLQLQFLQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLEYMNYNTNITIENVFNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLYVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITELNVQNMLNAFDKMSAFLKEQSTLAQMYPLQEIQNLTVKMQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEEFVQNMNNALDKMSAFLKEQSTLASMYPLQKIQNLTVKLILQALQ', 'STIEEQAKTFLDKFNHAAEILFYQSSMALMNYNTIITEENVGNMNNALDKMSAFLLEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITFENVNNMNDKMSAFLKMQSTLAQMYPLQEIQNLQVKLQLQALQ<sep><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFMQSSLMSMNYFTNITEENVQLMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTPKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYSTNITEENVQNMNKALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYFSSLASMNYNTNITEEAVQNMNNAFDKMSAFLKEQSTLAQMYPLQEIQNLTVKLLLQALQ', 'STIEEQAKTFLDKFNHMAEDLFYQSSLASMAYNTNITNENVQNMNNALDKMSTFLKEQSTLAQLYPLQEIQNLTVKLLLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQMSMASMAYNANITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLAMNYNTNITEENYQNMNNAGDKMSAFLKEISTLAQMYPLQEIQNLTVKLQLQALQ<sep>', 'STIEEQAKTFLDKFNQHMAEDLFYQSKLASMNYNTNITEENVQMMNNAEEKMSAFLKEMSTLAQMYPLQEIQNLTVMLQLQAL', 'STIEEQAKTFLDKFNHEAEDLFMQASLASMNYNTNITEEIVQNMNYALDKMSAFLKSISTLAQMYPLQEIQNLTVKLQLLLM<sep>', 'SDIEEQAKTFLDKFNHEAKDLFYQSSLASMNYNTNIAEENVQNMNNAGHKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLARMNYNTNITEEEVQDMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLMLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSMLASMNYNTHITEENVFNMNFALDKMSAFLHEQSTLAQMYPLQEIQMLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLAMNYNTNITEENYQNMNNALDKMSAFLKEQITLAQMYPLQEIQNLTVKLQLQALQ<sep>', 'STIEEYAKTFLDKFNHEAEDLFYQSSLASMNYNTIITEENVQNMNNTLDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQNLQ', 'QTIERQAKTFLDKFNHEAEDLFYQSSLASMNYNTIITEENVQYMNNALDKMSAFLKEQRTLAQMYPLQEIQNLTVKLQLQALR', 'STIEEQAKTFLDKFNHEAEMLLYDSSLASMNYNSNITEENVQNMNLALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNVTEENVQNMNNALDKMSAFLKEQSSLAQMYPLQEIVNLPVKLQLQALQ', 'STIEEQAKTFLYKFNHEAEDLFYQSSLASMNYNTNITEEIVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLSALK', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTEITEENVQNMNNALDKMFAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'FTIEEQAKTFLDKFNHEAEDLFYQMSLASMNYNTNITFENVQNMNNALDKMSSFLKEQSTLAQMYPLMEIQTLTVKLQLQALQ', 'STIEERAKTFLDKFNHEAEDLFYQSSLASMNYNTNMTEENVQNMNNAGAKMLAFLKELSTLAQMYPLQEIQNLRVKLQLVALQ', 'STIEETAKTFLDKFRHEAEDLFYQSSLASMNYNTNITSENVQNMNNALDKMSAFLKEISTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLRKFNHMAEDLFYQSSLASMNYNTNITEEDVQNMNNALDKMSAFLKEVSTLAQMYPLMEIQNLTVKLQLQALQ', 'FTIEEQAKTFLDKFNHEAEILFYQSSLASMNYNTNITEELVQNMNNADDKMSAFLKEQSTLAQMFPLQEIQNLMVKLQLQALQ', 'STIEEQAKYFLDKFNHEAEDLFYRSSLLARMNYNTNITEENVQNMFNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQAL', 'STIEEQAKTFLKKFNHEAEDLFYQSSLASMNYETNITEENVQNMNNAFRLMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAFTFLDKFNHEAEDLFYQSSLASMNYNTIITEENVQNMNNAKDKMSAFLKEQSTLAQMYPLQEIQNLPVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYMTNITEENVMNMNNALDKMSAFLKEQSTYAQMYPLQEIQLLTVKLMLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTMITEENVQNMNNAFDRMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKEMLDKFNHEAEDLFEYQSNLALMNYNTNITEENVQNMNNALDKMSAFLKEMSTLAQMYPLQEIQNLTVKLQLQAL', 'STIEEQAKTFLDKFNKEAEDLFYQYSLFSMNYNRNITEENVQAMNNANDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHNAEDLFYQSSFAAMNYNTDITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEQLFYQSILASMNYNDNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLMSMNYNTNITQENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALR', 'STIEEQAKTFLDKFNHEAEDEFALFLLESSLASLNYNTNITEENVQNMNNALDKMSAFLKLMSTLAQMYPLQEIQNLTVKLQL', 'STIEEQAKQFLDKFNHEAEDLFYQSSLASMNYNTDITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLLALA', 'STIEEQAKTFLDAFNHEAEDLFYQSSLASMNYNSNITEENVQNMNLALDKMSAFLKEQSTLARMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNLNITEENVQNMNNALDKMSAFLKEQSTLAQMMPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNSNITEENVQNMMSAMDKMSAFLKELSTLAQMYPLQEIQSLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSTLASMYYNTNITAENVLNMNNALDKMSAFLKEQSHLAQMYPLQEIQNLTVKLFLQALQ', 'STIEEQAKMFLDKFNHEAEDLFYQSSLASMNYNDNITEENVQNMNYALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFMQSSLAHMNYNTNITEEKVQNMNNAKDKMSAFLKEQSTLAQMYPLQLIQNLTVKLQLQALQ', 'SDIEEQAKTFLDKFNHEAEDLFYSSSLMSMNYNTNITEEAVQAMNNALDKMSAFLKEQSTLAQMYILQEIQNLTVKLQLQALQ', 'SEIEEQAKTFLDKFNFEAEILSMQYNTNITEENVQNMMNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ<sep><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDRFNHEAEDLFYQSSLASMNYNTNITNENVQNMFLALDKMSAFLKEQSTLAQRYPLQEIQKLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAADLMYQSSLASMAYNTIITEENVQNMNNALDKMSAFLKEQSTLANMYPLQEIQNLTVKLQLQALQ', 'TTIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEEEVQNMNNAIDKMSAFLKMISTLAQMYPLQIIQNLTPKLQLQALQ', 'STIEEQAKMFLDKFNHEAEDLFYQSSLASMNYNTNITQENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHFAEDLFYQSSLYSMNYNTNITEEYVQNMNNAGDKMSAFLKEQSTLAQMYPLQEIQNLPVKLQLQALQ', 'STIEEQAKTFLDKFNHEANDLFYQSSLASMNYITNITEENVQNMNLALVKMSAFLKEVSTLAQMYPLQEIANLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLAAMNYNTIITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLAMNYNTNITEENYQNMNNAGDKMSAFLKELSTLAQMYPLQEIQNLTVKLQLQALQ<sep>', 'STIEEQTKTFLDKFNHEAEDLFYQSSLLSMNYNTLITEENIQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQMIQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLKSMNYNTNITEQNVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALY', 'STIEEQAKTFLDKFNHEAEDLFYQSSAASMNYNTNITELNVQAMNNALDKMRAFLKQQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEVAKTFLDKFNHEAEDLFYQSSLAMMNYNTIITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIELQAKKFLDKFNHEAEHLFYQSSLAIMNYNTIITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKRFLDKFNHEAEDLFYQSSLASMNYNTNITIENVQNMNNAFDKMSAFLKIQATLAQMYPLQEIQNLTVKLQLQALT', 'STIEEQAKTFLDKFNHEAEVLFYQSSLKSMNYNTHITEENVQNMNNALDKMSAFLKEQSTLAQHYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHVAMDLFYQSSLASMNYNTNITELNVQNMNNAMDKMSAFLKEQSQLAQMLPLQEIQNLMVKLQLQALQ', 'STIEEQAKYFLDKFNHEAEDLFYQSSLASMNYNTRITEENVQNMNNAFDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEVEDLFYQSSLAMMLYNSNITEENVQNMNNALDKMSAFLKNQSTLAQMYPLQEIQNLTLKLQLQALK', 'STIEEMAKVFLDKFNHEAEDLFYQSSLASMNYQTNITEENEQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSFLASMFYNSNITEENVQNMNNAIDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQQLQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNLNITEENVQNMNNAGDKMSAFLKEQSTLAQMYPLQEIQNLPVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTFITEENVQNMNNAFDKMSAFLKEQKTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLKKFLDKFNHEAEDLFYQSSLASMNYNTNITEENYQNMNNAMDKMSAFLKEQSTLAQRYPLQEIQNLTVKLQL', 'STIEEQAKTFLDKFNREAEDLFYQSSLKSMNYNLNITEENVQNMNLALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMYYNTAITEENVQNMNNAQDKMSAFLKEQSTLAQMYPLQEIQNLMVKLQLQALQ', 'STIEERAKTFLDKFNHEAEDLFYQSSLTSMNYNTNITEANVQNMNNALDKMSAFLKEQSTLASMYPLIEIQNLTVKLQLRALQ', 'STIEEQAKEFLDMFNHEAEDLFMDKSLQSQLANMRYNTNITEENVQNMNNALDKMSAFLKEMSTLAQMYPLQEIQNLTVKLQL', 'STIEERAKTFLDKFNHEAEDLFYQSSLASMNYNTNILEENVQNMNNAKDKMSAFLKEQSTLAQMYPLKEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEEEVQNMNNAMDKMSAFLKEQSTLAQMYPLQEIQNLQVKLQLQALQ', 'SELEEQAKTFLDKFNHEAEDLFYQSSLASMNFNTNQTEENVQNMNNAGDKMSAFLKEQSTLAQMYPLPEIQNLPVKLQLQALQ', 'STIEEQAKYFLDKFNHEAEDLFYQSSLASMNYNTIITEENVQMMRNAFDKMSAFLAEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLKMQAYLASMQYNTNITEENVQNMNNALDKMSAFLKEQSTLASMYPLQEIQNLTVKL', 'STIEEQAKTFLDKFNHEAEDLFYQSSLRSMNYNTNITYENVRNMNNALDKMSAFLKETSMLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYDTNITEENVQNMNMALDKMSAFLKEMSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLAMMNYNTYITEENVANMNNALDKMSAFLKEQSTLAQMYPLQNIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASFNYNTNITYENVQNMNNAQDKMSAFLKEMSTLAQMYPLPEIQNLTVKLLLQALQ', 'SGIEEQAKTFLDKFNHFAEDLFYQSSLASMNYNTNITEEIVQNMNNAGDKMSAFLKEQSTLAQMYPLQEIQNLPVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMFYNTNISEENVQNMNNELDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSQLQSMTYNSNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAELLSYQSSLASMNYNTNITEELVQLMNNAADKMSAFLKRLQSLAQELARQSMLAQMYPLQKIQLL', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITYENVENMNFAMDKMSAFLKEQSTLAQMYPLAEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQVSAASMNYLTNITEENVQNMNNMLDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'SDIEEQAKTFLMKFNHEAEDLFYQSSLAYMNYNANITEENVQNMNHAGDKMSAFLKEQSTLAQMLPLQEIQNRTVKLQLQALQ', 'SDIEEQAKTFLDKFNHEAEILFYQSSLASMNYMTNITEENMQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTAKLQLIALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMMYNTNITAENVQNMNNAGDKMFAFLREISTLAQMYPLQEIQNLTVKLQLQALQ', 'SDIEEQAITFLDKFNIEAEDLFIQSSLASMNYNAEITEENVQNMNNALDKMSAFLKEQSTLAQTYPLQEIQNPTVKLQLQAMQ', 'STIEEQAKTFLDKFNHNAEDYFYQSQLASMNYNTNINEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNLEAGDLFYQSSLTSMNYLTNITEENVQMMNNAHDKMSAFLKEISTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLYKFNHEAEDLFYQSSLASMNYNTNITEELVLQMMDLLLLMSLLALMYPLFEIQNLTVKLQLAKLLALA<sep><pad><pad><pad>', 'STIEEQAKTFLDKFNHMAEDLFYQSSLASMNYNTIITEENVINMNNALDKMSAFLKEQSTLAQMYPLMEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDEFLFMQSSLALMNYNTNITEENVQNMNNALDKMSAFLKEMSTLAQMYPLQEIQNLTVKLQLQA', 'STIEEQAKTFLDKFIHEAEDLFYQSSLASMNYNANITEENMQNMNNAFDKMSAFLAEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSMMSMTYNLNITEENVQNMKNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEMAKVFLDKFNHEAEDLFYQSSLASMNYNTNLTEENEQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLAHMNYNTNITEQNVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLDVKLQLQALQ', 'STIEEQAKIFLDKFNHEANDLFYQSSLASMNYNTNLTEENVQNMNNAFDKMSTFLKEISTLAQMYPLQFIQNLTVKLQLQALM', 'STIEEQAKTFLDKFNIEAEDLFYQSSLASMNYNTNITIEEENVQNMNNALDKMSAFLKIQSTLAQMYPLQEIQNLTVKLQLMA', 'STIEEQAKTFLQKFNHEAEDLFYQSQLASMNYNTNITQENKQNMNNAHDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQSLQ', 'STIEEQAKTFLDKFNHEAEDLFYQMSLASMNYNTNISEENVQNMNNAMDKMSAFLKEQSTLAQMYPLQEIQNLTVKLILQALQ', 'MTIEEQAKTFLDKFNHEAEDLFYQSKLASMNYNTNITELNVQNMNNEQDKMSAFLKEQSTLAQMYPLQMIQNLTVKLQLQALQ', 'STIEEQAKFFLDKFNHEAEILFYQSSLASMNYNTEITEENVQNMNNAFDKMSAFLKEQSTLAQMYPLLEIQNLTTKLMLQALQ', 'STIEEQAYTFLDKFNHEAEDLFYQSSLASMNYNTNITEEKVQNMNFALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAELLFYQSSLANMNYNTNIQNMNNAGDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ<sep><pad><pad><pad><pad>', 'SNIEEQAKTFLKKFNHEARDLFYQSSLASMNYNTNITEYNVNAKDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ<sep><pad><pad><pad>', 'STIEEQAKVFLDKFNHEAEDLFYQSNLASMNYDTNITEENLTEMNNAMAKMSAFLKELSTLAQMYPLQEIQDLTVKLQLSALM', 'STIEEQAKTFLDKFRHEAEDLFYQSSLASMNYNTNITIENVQNMNNAYLKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEEQVQNMNNAMDKMSAFLKEQSTLAQMYPLQEIQNLQVKLQLQALY', 'KNIEDQAKTFLDKFNHEAEDLFYQSSLAMMNYNINITEENVQNMNNAEDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNSNITEENVQNMNMAFDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYESSLALMSMEYNTNITEENRQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLAA', 'VTIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNIAEENVQNMNNALDKMSAFLKEVSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQRSLASMNYNTNITELNVQNMNNALDKMSAFLKEQSTLAQMYPLRMIQNLTVKLQLQALQ', 'STIEEIAKTFLDKFNHEAEDLFYQSSLASMNYNTEITEENVQNMNNAFDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFLLSSLALMNYFTDITEENVQNMNNALLKMSAFLKEQSTIAQMYPLQEIQNLTVKLQLMALM', 'STIEEQAKTFLDKFNHEAEAFLDLFSYLSLASMNYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQ', 'ITIEDQAKTFLAKFNHDEARDLFYQSSLASMNYNTNITEENVENMNNALDKMSAFLKEQSTLAQMYPLQEIQNLMVKLQLQAL', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEEQVQNMNNALDKMSSFLKEQKTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEMAKTFLDKFNHEAERLFYESSLASMNYNTNITEELVQNMNNAYDKMSAFLKEQGTLAQMYPLQEIQNLAIKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLMSMNYFTNITEENVENMNNALDKMSAFLKEQSTLATMYPLPEIQNLTVKLQLQALQ', 'STIEEAAKTFLDKFNHEAEDLFYQSSLTSRNYNTNITERNVQNMNNAGDKMSAFLKELLTLANMYPLQEIQNLLVKLQLQALQ', 'SNIEEQAKTFLDKFNHEAYSLFYQSSLASMNYNTNITEELVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALM', 'STIEEQAMLFLDKFNHEAEDLFYQSSLASMIYDTNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLILQALQ', 'STIEEQAKTFLTKFNHEAEDLGYQSSLASMNYLTNITEENVQNMNNALDKMSAFLKEISTLAQMYPLMEIQLLTVKLQLQALQ', 'STIEEQAKHFLDKFNHEAEMLFYQSSLASMNYNTNITEEQRQNMHNAMDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHVAMDLFYQSSLASMNYNTNITEANVQNMNNALDKMSAFLKEQSNLAQMLPLQEIQNLMVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQFSLASMNYLTNITEENVQNMFIALDKMSAFLVEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDTFNHEAEDLFYQSMAAMIYNTNITEENYQNMNNALDKMSAFLKEQSTLAQMYPLQVIQNLTVKLQLQALQ<sep>', 'STIEEAAKTFLDKFNHEAEFLFYQSSLASMNYNTRITEENVQNMNNMLDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTSITEENVQNMMNALDKMSAFLKEQSTLAQMMPLQEIQNLTVKLQLQALQ', 'STIEEFAKTFLDHFNHEAEDLFYQSSLASMNYNTNITITEENVQLMNNALDKMSAFLKEQSTLAQMYPLREIQNLTVKLQLQA', 'STIEEQAKSFLDKFNHEAEDLFYQSSLASMNYNTNITEELKQNMNNAEDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQFLM', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTKITEENVQNMNLALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALM', 'SQIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNQNITEENVQNMNNALQKMSAFLKEQSTLAQMYPLQELQNLLVKLQLQSLQ', 'STIEEQAKTFLDKFNHEMEDLAYQSILASMYYNTNITLENVINMNNALYKMSAFLLEQSTLAQMYPLQEIQNLTVKLQLQFLQ', 'STIEEQAKKFLDLFNMSAKDSMYSMLASMNYNTNITEENVQNMNNKLDRMSAFLKELSTLAQMYPLQEIQNLTVKLQLQALQ<sep>', 'SSIEEQAKTYLDKFNHEAEDLFYQSSLASMNYNANITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEMAKTFLDKFNHEAEDLFYQSSLASMNYNTNETEENVQNMNNALDKMSAFLKEKSTLAQMYPLQEIQNLTVKLQLMALQ', 'STIEELAKQFYLDKFNFEAEDLFYQISLASMNYNTNITEENVQNMNQALDKMSAFLKEQSTLAQMYPLQEIQNLQVKLQLQAL', 'SSIEEQAKTFLDKFNHTAEDLFYQSSLASMNYNSNITEENVQNMNNAEDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLFALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLAAMNYITNITEENVQNMNNARDKMSAFLKEMSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMRYLTNITEENVQNMNNAADKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALL', 'STIEEQAKTFLDKFNHEAEDLFYQSFLASMNYNTNITEMNVAIDKMSAFLKQQSTLAQMYPLQEIQNLTVKLQLQALQ<sep><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMFYNSNITEENVQNMNNAYDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLMKFNHEAEDIFYQSSLASMNYNANITEENVQNMNNAKDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEMAKTFLDKFNHEAEDMFYSSLASMNYNTNITEENVQNMNNALDKMSAFLKEKSTRAQMYPLQEIQNLTVKLQLQALQ<sep>', 'STIEEQAKTFLDKFNHEAEDLFYYSMLASMNYNTNITIENVFNMNNAGDKMSAFLKEQSKLAQMYPLREIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAYFAFDLFYQSMLASMNYNTNITEENVQMMNNALDKMSAFLMEQSTLAQMYPLQEIQNLTVKLQLQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMFYNTNITEEQQQAMNNARDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTIITEENVQNMNVALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'SDIEEQAKTFLDKFNHEREDLFYQSSLASMNYNTIITEENVQEMNNAGDKMSAFLKEASTLAQMYPLQYIQNLTVKLQLQALQ', 'DTYEETAKTFLDKFNHEAEDLFNQSSMFSMDYNTNIKEENVKNMNNALDKMSAFLKELSTLAQMYPLQEIQNQTVKLQLQALF', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMFYNTNITEQNVQNMNNAKDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLKSMNYNTEITEENVQNMNNALDKMSAFLKEQSTLAQDYPLQEIQNLTVKLQLQALQ', 'STIEEQAKQFLDKFNHEAEPLFYQSSLASMNYNTNIFEENVQNMNNAGDKMSAFLKEISTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFDQSSLASMNYNTNITNENVQNMNLALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALK', 'STIEEQAKTFLDKFNHEAEVLFYQSSLKSMNYNTNITDENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNDTVKLQLQALQ', 'STIENQAKTFLDKFNHEAKDLFYQSSLAMMNYNTNIREENVQKMNNAFKKMSAFLKEQSTLAQMYPLQELKNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLKSMSYNTNITEQNVQNMNNAGDKMSAFLKELSTLALMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLMSMNYNTNITDENVQNMNNALDKMIAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITIENVQNMNNAMDKMSAFLKEQSTLAKMYPLQMIQNLTVKFQLQALQ', 'STVEEQAKTFLDKFNHEAEDLFSQSSLASMNYNTNITMENVLAQMNNALDKMSLFLKEQQTLAQMYPLQIIQNLLVKLQLQAL', 'STIEEQAFVFLDKFNHEAENLFYQSSLALMNYNTNITEEQVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLHVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSELKSMNSLLSIMSYLLSMNYNTNITEENVQNMNNALDKMSMFLKELSTLAQMYPLQEI', 'STIEEQAKTFLDKFNHFAEDLFYQSSLKSMNYITNITEENVQNMNYAGDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLIKFNHEAEDLFYQSLATMMYNTNITEENVMNMNNALDKMSAFLKEQSTLAQNYPLQEIQNLTVKLKLQSLQV', 'STIEEQAKTFLDKFNHEAEDLFNGSSLAMMNYNTNITEEDVKNMNNALDKMSAFLKTISTLAQMYPLQEIQLLDVKLQLQALQ', 'YAIEEQAKLLLKKFLDKFNHEAEDLFYQSSKASMNYNTNITEENVQNMNNALDKMKAFLKEQSTLAQMYPLQEIQNLTVKLQL', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNISEENVENMNNAFDKMSAFLKEQSSLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLMSMNYNANITEENVQNMNNALDKMSAFLNEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STMEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEELVQNMNNALDKMSAFLKAQSTLAQMYPLQEIQFLTVKLQLQALQ', 'ETIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITIENVQNMNNALDKMVAFLKEQSTLALMYPLQEIQNLTVKLELQALQ', 'STIEEQAKTFLDKFLHEAEDLFYQSSLAFMNYNSNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYQTNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLDLKQLLKVKL', 'ETQEEQAKTFLIKFNHEARDLFYMSSLASMNYNTQITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAFVFLDKFNHEAENLFYQSSLALMNYNTNITEFNVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLSVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYYSSLASMNYNTNITFENVQNMNNAFDRMSAFLKEQSTLAQMYPLQEIQNLTVKLTLQALQ', 'STIEYQAKTFLDKFNHEAEDLFYQSSMASMNYLTNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLIVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYNSSLASMNYNTNITIENVQNMNNMFDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAQTFLDKFNHEAEDLFYQSSLASMNYNTMITEENTQNMNNALDKMSAFLKEQAKLAQMYPLQEIQNMTVKLQLQALQ', 'STIEEQAKTFLDKFNK<cls>ALDLFYQSSLASMNYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLEEIQNLTVKLQLQALM', 'STIEEQAKTFLDKFNHEAEDLFYQSSLAIMNYNTNITEINMQNMNNALDKMSAFLKEQMTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEEIVQNMNLAHDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEEIVQNMNEAMDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEVAKTFLDKFNHEAEDLFYQSSLAMMNYNTNITYENVQNMNNAFDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMKYNTKITEENVQNMNNALDKMSAFLKFQSTLAQMYPLQEIQNLTVKLQLQALM', 'STIEEQAKTFLLKFNLEAEDLFYQSSLASMNYNTMITEENVQNMYNAMDKMSAFLKEQSTLAQMYPLQEIQNLTVKLHLQALQ', 'MDIEEQAKTFLSHFNIEAEDLFESSALATMNYNTNITEEFMIMMNNALDKMSAFLKEQSTLAQMMPLQEIQNLTVKLQLQALM', 'STIEEQAKSFLDKFNHEAEDLFYQSSLALMNYQTNITEENVQNMNNALDKMSAFLKTQSTLALMYPLQSIQNLTVKLQLQALQ', 'SDIEEQAKTFLDKFNHEATYLFYQSSLASMNYNTNITLASMNYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQN', 'STIEEQAKTFLDKFNHEAEDLFYQSSLAMNYNTNITEENFQNMNNALDKMSAFLKEQMTLAQMYPLQEIQNLTVKLQLQALQ<sep>', 'STIEEQAKTFLDKFNHLAEDLFYQSSLASMNYATNITEENLQNMNNAGDKMSAFLKEQSTLAQTYPLQEIQNLPVKLQLQALS', 'STIEEQAKTFLDKFNHEAEDLFEQSSLASMNYLTNITEENVGNMNMALDKMSAFLLEQSYLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHLAEDLFYQSSLASLNYNTIITEENVQNMNNALDKMSHFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITERNVQNMNNALDKMSAFLKKQMTLAQMYPLQEIQNLTVKLQLQALF', 'STIEEQAKTFLDKFNHEAEDLFYVLSLANMNYNTQITEENVQNMNNALDKMSAFLKELSTLAQMYPRQEIQNLTMKLQLQMLQ', 'STIEEQAKTFLDKFNHEAMDLFYQSSLASMHYNTNITEINVQNMNVAKDKMAAFLKEMSTLAQMYPLQEIQNLTVKLQLQALQ', 'SSIEEQAYTFLDKFNHEAEDLFYQSSLASMNYNTNITEQNVQNMNNAGDKMSAFLKEQSTLAQMYPPQEIQNLTVKLQLQALR', 'STIEEQAMTFLDKFNHEAEDLLYQSSLASMNYNTNITAENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STMEEQAKTFLDKFNHEAETLMYQSSLASMNYNTNINEENVQNMNNALDKMSSFLKEQSTLAQMFPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFMQSSLASMNYNTNITENVVNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ<sep>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLAQMYYNANITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALM', 'LTIEEQAKTFLDKFNHEAEDLFYQSSLASMNYLTLITEELVLLYLTLILMNYLLLLLLLLDLLALMDLLLLMLLLLLLLL<sep><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEEMVQNMFNARDKMSAFLKEQSTYAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITMENVQNMNNAYDKMSAFLKEQSTLAQMFPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEMEDLFYQFLLRSMRYSMSMRYSMSASMRYNTNITEENVQNMNNRLDKMSAFLKEQSTLAQMYPLQE', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMLASMNYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQL', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYATNITEENVQNMNNALNKMSAFLAEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSYLASMNYDTNITEENVKNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLKALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLAEMNYLTNITEENVQNMNNAGGKMSAFLKEISTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYMSSLASMNYDTNITEENQQNMNNAFDKMSAFMKEQSTLAQMYPLQEIQNLRVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMIYNTNITEYNVNLNKVLAKMNALDDKMSAFLKELSSLAQMYPLQEIQNLTVKL', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNYTEENVQNMNNAYDKMSAFLKYQITLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEVLFYQSSLKSMNYNTNITEANVQNMNNALDKMSAFLKEQSTLAQMYPLQSIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSMLASMNYNTNITEINVNNMNNAGDKMSAFLKEQSELAQMYPLQMIQNETVKLLLQALQ', 'SSIEEQAKTFLDRFNREAYDLFYQSSLAVMNYNTNISEENVQNMNNATDKMSAFLKEQSSLAQLYPLQEIQNLTVKLQLQALV', 'STIEEQAKTFLLKFNHEASDLFQQSSLASMNYNANITEENVQNMNNAGEMMSAFLKELSYLAQMYPLQEIQNLGTKLQLQALQ', 'STIEEQAKTFLKKFLDKFNHEAEDLFYQSSLASMNYNTNITEENYQNMNNAFDKMSAFLKEQSTLARMYPLQEIQNLTVKKQL', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASYNYNTNITEEIVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALV', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNANITEENVRNMANALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'MTIEEQAKMFLDKFNHEAEDLFYQSALASMNYNTYITEENVQNMNEAGDKMSAFLKLQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEDNVQNMNNALDKMSAFLKEQSTLARRYPLQEIQNLTVKLQLQLLQ', 'STIEEQAKTFLDKFNHEAEDMFYQSSLMSMNYNTNITEINVQNMNNALDKMIAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'SEIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNDNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLLEIQNLTVKLQLQALQ', 'STIEEQAKTFLIKFNHEAEDLFYLSSTAMNYNTNITEENVINMNNALDKMSAFLKEQSTLANMYPLQEIQNLTVKLKLQALQ<sep>', 'STIEEQAKTFLDKFNHNAEDLFYQSSLASMNYNTNITEEEVQNMNAAGDKMSAFLKMQSTLAQMYPLQEIQNLTVKLQLQALI', 'STIEEQAFTFLDKFNHEAEDLFYQSSLASMSYNSNITEENVQNMNNANDKMSAFLKEQSTLAQMYPLQEIQNLPVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEDNVQNMNNALDKMSAFLKYQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKSFLDKFNHEAEDLFMQSSLASMNYNTNITEDLVVALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLMLQALQ<sep><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLAMNYNTNITEENYQNMNNALDKMSAFLKEQMTLAQMYPLQEIQNLTVKLQLQALQ<sep>', 'SDIEDQAKTFLDKFNHEAEDLFYQSSLASMNYNTNIVEENVQNMNNAQDKMSAFLKEQSTLAQMYPLQEIQELTVKLQLQALQ', 'STIEEQAKTFLDKFNHEMEDLFYQSSLASMNYNTNITELNVQNMNNAGDKMSAFLKEQSTLAQMYPLQEIQNLPVKLQLQALK', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEEAVQNMNNATDKMSAFLLEISTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYITNITEENVQNMYNAEDKMSAFLKEQSTLAQMYPLQQIQNLTVKLQLQALQ', 'STIEEQAKTFLKKFLDKFNHEAEDLFYQSSLASMNYNTNITEENYQNMNNAMDKMSAFLKEQSTLAQSYPLQEIQNLTVKKQL', 'STIEERAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEEKVQNMNNAKDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLFKFNHEAEIFSSSSLASMNYNTNITEENVQNMNNFLDKMMAFLYEQSTLAQMYPLQEIQNLTVKLQLQALQ<sep>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTEITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLYVKLQLQALM', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMTYNTQITEENVQNMNLANDKMSAFLKEQSSLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTEITEENVQNMNNALDKMSAFLKEMSTLALMYPLNEIQNLTVKLNLQSLQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASAMYATNITEENFQNMNNTKDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYFSSLASMNYNTNITQENVQNMNNAFDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSRLASMNYQTNITEENVQNMNNANDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLMSMNYNSNITEENVQNMNNAQDKMRAFLKEASTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKMFLMMFNHMASEFLDDFNHEAEDLFYQSSLAIMEYMTNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQN', 'STIEEEAMSFLDKFNHEAEDLFYQSSLASMLYDTNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEQLFYQSRFASMNYNTNKTEENVQNMNNAFDKMSAFLKEQSTSAQMYPLQEIQNLTVKLQLQALQ', 'SAIEEQAKTFLEKFNHELSDLFYQSSLASMNYNTNITEELVQNMNNALDKMSAFLKEQSLLAQMYPLQEIQNLTVKLQLQALM', 'STIEEQAKTFLDKFNHEAEALLDLFYQSSLASMNYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQ', 'STIEEQAKTFLYKFNHEAEDLFYQSSLASMNYNTIITEENVQNMNNAKDMMSAFLKEQLTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNSNITEENVQNMNDAMKKMSAFLKEQSTLANMYPLQEIQNQTVKLQLQALQ', 'STIEETAKTFLDLFNHEAEDLFYQSRLASMNYNTYITEENVQNMNNALDKMSAFLKEITTPAQMYPLQEIQNLTVKLQLQALI', 'STIEEQAKTFLDKFQMLLYYLSYGLELFYQSSLASMNYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLMEIKNLPVRL', 'STIEEQAFTFLDAFNHEAEQLFYQSSLASMNYNQNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALA', 'STIEEQAKTFLIKFNHEAEDVFYQSSLASMRFNTNETEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNVTEENFQNMNNAYDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLMKFNHEAEDLFYGSSLASMNYNSNITEENVQNMNNALDKMSSFLKEQSHLASMYPLQEIQNLTVKLQLQALI', 'STIEEQAKTFLHLFDDEEDLFYQSSLASMNYNTNITEENMQNMNNAFDKMSAFLKEISTLARMYPLQEIQNLTVKLQLQALQ<sep>', 'SDIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNINEENVQNMNNAGDKMSAFLKEQSTKAQMYPLQEIQNLTVKLQLQALQ', 'STIEEIAKTFLDKFNHEASLSFYNTNITEENMQNMNNALDKMSAFLKEMSTLAQMYPPQEIQNLTVKLQLQALQ<sep><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQASLASMNYNDNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNQTVLLQLQKLK', 'STIEEQAKTFLFKFNHEAEILFYQSSLASMNYNTNITEEYVQSMNNFGDMMFAFLMEMSTLAQMYPLEEIQNMTVKMSAFLFM', 'ETIEEQAKTFLDKFNHEAEHLFYQSSLASMNYITNITEENVQNMNNAMDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDIFNHEAEDLFYQSSLASMNYNTNITYENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALM', 'STIEEQAKTFLDKFQHEAEDLFYQSSLASMSYNTSITEENVANMNNMLDKMSAFLKEQSTLAQMYPLQEIQNLTVKLILQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITIEEINVNMNKASDKMSAFLKHQSQLAQMYPLQEIQNLSVKLQLKAL', 'NTIEEQAKTFLYFARDLFYQSSMASMNYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ<sep><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEQLFYQSSLASMNYNTNITEEIVQAMNNAEDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKMFLDKFNHEAEDLFYQSSLASMNYNRNITEENVQNMNKASDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLKALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNDNITEENVMNMNNALDKMSAFLKEQRTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASRNYNVNITEENVQNMNNALDKMSAFLQEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STREEQAKTFLDKFNHEAEDLRYQSMLASMNYNTNKTEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALM', 'STIEEQAKHFLDKFNHEAEDLFYQSSLASMNYNTNIVEENVQNMNNALDKMSAFLKEQSTLAQMYPLQFIQNLTVKLQLQALM', 'STIEEQAKDFLDMFNHEAEDLYDKFYQSKLTSMQYNTNITEENVQNMNNALDKMSAFLKEMSTLAQMYPLQEIQNLTVKLQLQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLKSMNYNTNITKENVQNMNNAGDKMSAFLKEQSTLAQMYPLQEIQNLQVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITLENVFNMNNALDKMSAFLVEQSTLAQMYPLQEIQNLTVKLQLRALQ', 'SDIEEQAKTFLDKFNHEAEDLFYQSILHSMNYNTNISEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIANLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNQTEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'SHIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEEKVQNMNNAYDKMSAFLKEQSRLAQMYPLQEIQNLPPLLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNSNITEENVQNMNTASDKMSAFLKELSTLAQMYPLQEIQNLTVKLQLQALQ', 'YSIIEEIAKTFLDKFNHEAEDLFYQSSLASMMYNTNITEENVQNMNNALDKMSAFLKAQSTLAQMLPLKEIQNLTVKLELQAL', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMYYNTNITNENVQNMNNAGLKMSAFLKEQSTLAQMYPLLEIQNLTVKLMLQQLQ', 'STIEEQAKTFLDKFRAEAEDLFYQSSLAHMNYNSNITEENVQNMNQALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDIFLDKFNHEAEDLFYQSSMASMNYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQL', 'STIEEQAKTFLDKFNHEARDLFYQSSLASMNYNTNLTEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALM', 'STIEEQAKTFLDKFNHEAEDLKYQSSLASMNYNTLITEENVQNMNNAGDKMSAFLKELSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKYFLDKFNHEAEDLFYQSSLASMNYNTNITEQNVQNMNNAFDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMKYNRNITEENVQNMNNASDKMSAFLKEQSTLAQMYPLLEIQNLTVKLQLQALQ', 'SNIEEQAKTFLQKFNHEAEDLFTQSSLASMNYNTAITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'QTIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTYITEENVQNMNNAQDKMSAFLKEQSTLAQMYPLQEIQNLLVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMIYNANITEENQINMNNAGDKMLAFLKEQSTLAQMYPLQEIQNLTVLLQLQALQ', 'STIEEQAKIFLFKFNHMLHLSEDLYYQAMLASMNYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIDNLTVKLQLQ', 'MTIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTSITEENVQNMNNAMDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNMEAEDLFYQSSLASMNYNTNIVEENVQNMNNALDKMSAFLLEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITLENVHNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNMEAEDLFYQSSLASMNYNTNITEQNVQNMNNANDKMSAFLKELSTLAQMYPLTEIQNLSVKLQLQALQ', 'STFEEQAKTFLDKFNHEATDLFYQSSLASMNYNNNITEENVQNMNNARDKMSAFRKELSTLAQMYPLREIQNLTVKLQLQALI', 'SDIEEQAKTFLDKFNHEAEDLFYQSSLAKMNYNTNNTEENVQNMRNATDKMSAFLKEQSTLAQMMPLQEIQNLTVKLQLQALQ', 'STIEEMAKTFLDKFNHEAEDLFYQSSLASMMYNTEITEENVFFMNNAGDKMSAFLKEQSLLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNKTEENVRNMNNAMDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'SQIEEQAKTFLDQFNHEARDLFYQSSLASMNYNTNITEEIVQNMNNALDKMSAFFKEQSILAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFIHEAEDLFYQSSLASMNYNTAITEENVQNMNNAGDKMSAFLKEQSTLAQMYPLQEIQNLPVKLQLQALQ', 'STIENQAKTFLDNFNREAEDLFYQSSLAMNYNTNITEENVQNMNNAGDKMSAFLKEISTLAQMYPLQEIQNLTVKLQLQMLQ<sep>', 'STIEEQAKTFLDKFNHEAEDLIYQSSLMSMNMNTNITAENVQNMNNALDKMSAFLKEQSTLANMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEEIVMNMNNAGDKMSAFLKEQSTFAQMYPLQEIQNLPVKLMLQALQ', 'STIEEGAKTFLDKFNHEAEDLFYMSDLASMKYNTNITEQNVQNMNLALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLLALQ', 'STIEEQAKTFLDKFNHEAEDLYYQSSLASMNYNTNITIENVQNMNNALDKMSAFLKMQSTLATMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLKKFNHAAEDLFYQSSLASMNYNTNITEELVQNMNNALDKMSAFLKEASTLAQMYPLQEIQNLTVKLQLQALM', 'STIEEQAKTFLDKFNHKAEQLFYQSSLASMNYNNNITEENVQNMNNAKDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLFALQ', 'STIEEQAKTFLDKFNHEAYDLFYQSRLASMNYNTNITERNVQNMNNALDKMSAFLKEQSTLAQMYPLQEINNLTVKLQLQALM', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTEITEENVQNMNNALQVMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAIDLFYQSSLASMNYNAFDKMSAFLKEASTLAQMYPLQEIQNLTVKLQLQALM<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEARDLFYQSSLASMNYNLNITEENVQPMNNAFDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALM', 'STIEEQAKEFLDKFNHEAEDLFYQSILASMFYNTNITQENVQNMNNLGDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQTLQ', 'STIEEQAKIFLDKFNHKAEDLFYQSFLASMIYNENITEENVQNMNMAGDKMSAFLKEQSTLAQMYPLQEILNLRVKLQLSALM', 'LDMMEQQAKTFLDKFNHEAEDLFYQRSLASMNYNTNITEENVQNMNNALDKMSAFLKMQMTLAQMYPLQEIQNLTVKLQLQAL', 'STIEEQAKTFLDTFNHEAEDLFYQSSLASMNYNTNITDENVQNMNNAFDKMSAFLKEQSTLARMYPLQEIQNLTVKLKLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNQNITEENVQNMNKANDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEEQVQNMNNAMDKMSYFLKEQSTLAQMYPLQEIQNQTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNIDEENVNNMNNALDKMSAFLKEQSTLAQMYPLQEIQELTVKLQLQALI', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYITNITEENVQNMNNAYDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALV', 'STIEEQAKTFLDKFNHEAEDQFYYFYLASMAYLTYITEITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQ', 'STIEEQAKTFLDTFNHEAEDLFYQSMLTILKYLVSMMYNTNITEENVTNMNNALDKMSAFLKEQSTLAQMYPLIEIQNLTVKM', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNANITEENVMNMYNAKDIMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'RTIEEQAKTFLDKFNHEAEDLFYQSLLASMNYNTNITRENVQNMNNAGDKMSAFLKEQSTLANMYPLQEIQNLTVKLQLQALQ', 'SFIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTFITEENVQNMNTAMDKMSAFLKEQSTAAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNLNITEENVQNLNNALDKMSAFLDEQSTLAQMYPLQYIQNLTVKLQLQALM', 'STIEEQAKTFLDKFNLEAEDLFYQSSLASMNYNQNITEENVQNMNNAKDKMSAFLKEQSTLAQMYPLQEIQNLTVKLYLQALQ', 'FYIEEYAKTFLDKFNHEAEDLFYQSSLASMMYNTNITEINVEENVQNMNNAVDKMSAFLKELSLLAQMYPLQEINNLTVKLQL', 'ETVEEQAKTFLDKFNHEAEDLFYQSKFANMNYLLSMNYNTNITEENVQNMNNAEKKMSAFLKEQSTLAQMYPLQEINNLEVKL', 'STYEEQAKTFLDKFNHEAEDLFHQSSLASMNYNTNITDENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLLVKLQLQALM', 'STIEEFAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEEIVQNMNNAGDRMSAFLKEMSTLAQMYPLQEIQNLTVKLQLQALL', 'STIEEQAKTFLDKFNHEAEDLFYQSDLASMNYNTNITEEQVQNMNMAIDKMSAFLKEQSTLAQMYPLQEIQNLLVKLQLQALL', 'STIERMAKDFLDKFNHEAEDLFYQSSLASMNYNTFITEENVQNMNLAGLKMSAFLKEQSTLAQMYPLQEIQNLKVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYSTNITEENVQFMNNAMDKMSAFLKEQSTLAQMYPLQEIKNLTVKLQLQALQ', 'STIEEQAKTMLDKFNHEAEDLFYQSSLASMNYNTNITEITEENVQNMNNALDKMSSFLKELSTLAQMYPPQEIQNLPVKLQLQ', 'STIEEQAKTFLDKFMHEAEDLFYQSSLASMNYNTNITEEMVDSMLDLMDKMSAFLKEMSTLAQMYPLQEINNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFMQSSLASMNYNTNITEQNVQNMNNAMDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITNENVQNMNNAFDKMSAFLKEQSTLAQMYPLQIIQNLTVKLQLQALQ', 'STIEEMAVTFLDKFNHEAEDLFYQSSLASMNYMTNITEENVQNMNNALDKMSAFLVEQSTLAQMYPLLEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEEQQQNMNNAEDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQSLK', 'SFIEEQAKRFLDKFNHEAEDLFYQSSLASMNYATNITEENVQNMNRAGSKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAMTFLDKFNHMAEDLFYQSSLASMNYNKFITEENFQNMNNAGDKMSAFLKEQSTLARMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLAMNYNTNITEENVQYMNNALDKMSAFLKEQMTLAQMYPLQEIQNLTVKLQLQALQ<sep>', 'STIEEQAKIFLRKFNMDADLFYQMSLASMNYNTNITEENVQNMNNADDKMSAFLKEQSTLAQMYPLQEINNLPVKLQLQKLQ<sep>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEINVQNMNNAMDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQKLQ', 'STIEKQAMTFLDKFNHEAYDLFYLSSLASMNYNTKITEENVQNMNNALDKMSAFLKEMSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEAQAKTFLGKFNHEAKDLQFQSSLARMNYNQNITEENVQNMNNALDKMSAFLKEMSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAADLFYQSSLAVMNYNTYITEENVQNMNNAADKMSAFLKEMSTLAQMYPLQEIQNLTVKLQLQALQ', 'SSIEEQAKTFLDKMNSEAEDLFYQSSLASMNYNTNISEENVQNMNNAGIKMSAFLKEQSTLAQMYPLQEIQNLQVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMRYNANITEENVQNMNNAGDKMSAFLKYQSTLAQMYPLQEIQNLTVKLQLQALM', 'STIEMQAKTFLDKFNHEAEKLFYQSSLASMNYNTNITEDVVNMNNALDKMSAFLKKQSTLAQMYPLQEIQNLTVKLQLQAIQ<sep>', 'STIEEMAKTFLIKFNHEAEDLFYQSSLASMNTNITITEENVQNMNNALDKMSAFLKMQSTLAQMYPLQEIQNLMVKLQLQALQ', 'MTIEEQAKTFLDKFNHEAEDIFYQSSLASMNYNANITEENVYNMNNAGNKMSAFLKLQSTLAQMYPLQQIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFMQSSLASMEYNTNISEENVQNMNNALDKMSAFLKEASTLAQMYPLQEIQNKTVKKQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLAIMNYNTEITEENVQNMNNAQDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQVSLASMNYNTNINITEENVQQMNNAMDKMSAFLKEMSTLAQMYPLQEIQNLTVKLQLQM', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITAENVQNMNNALDKMSAFLKEQSTLAAMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEANVQNMQNALDKMSAFLKEQSTLAQMYPLQEIQNLTMKLQLQALI', 'STIEEMAKTFLDKFNHEAEDLFYQSSLASMNYNTIITEENVQFMNNAGDKMSAFLKEQSTLAQMYPPQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTIITEENVQNMNNAMDKMSAFLKEQSTLAQMIPLQNIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITMENVNNMNNAMDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKAFLDKFNHEAEDLFYQSMIASMGYMTNITEENVQMMNLALDKMSAFLKEQVTLAQMYPLTEILNLTVLLQLFALQ', 'SYIEEQAKTFLDKFNHEAEDLFYQSMLAYMNYNTIITEENVQNMNNAGDMMSAFLKEQSTLAQMYPLQEIQNLTVKLDLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSALASMNYNANITEENVQNMNNAGDKMSAFLKEQATLAQMYPLQEIQNLMVKLQLQALQ', 'STIEEQAKDFLMKFNHEAEDLAYDKYLASQYYSMQYNTNITEENVQNMNNALDKMSAFLKEMSTLAQMYPLQEIQNLTVKLQL', 'SNIEEQAKTFLDKFNHEAEQLFRQSSLASMNYNTNITEQNVQNMNNASDKMAAFLREQSTLAQMYPLQEIQNLTVKLQLQALM', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITFENVQNMNNAHFKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNQNITEENVKNMNNAKDKMSAFLKEQSTLAQMYPLQEIQNLTVKEQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNSNITEENVQNMNDAFDKMSAFLREQSHLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFDQSQLASMVYNTNITEELVQNMNNAGDKMSAFLKEKSTLAQMYPLQEIQNLPVKLQLQALQ', 'STIEEQAKFFLDKFNLEAEDLFYQSLLASMNYNTIITEENVQNMNNAGDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'DYIEE<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKYFLDKFNHEAEDLFYQSSLASMNYNLNITEENVQNMNNAGDKMSAFLKLQSHLAQMYPLEEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEVLFYELSLASMNYSTNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALV', 'STIEEQAKTFLDKFNHEATDLFYQSSLSSMQYNSNITEENVQMMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLAALM', 'STIEEYAKTFLDKFNHEAEDLFYQSSLASMNYYTNITEENVQMMNNAGDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITKENVNTMEENVNAMNKLADKMSAFLKELSRRAQMYPLQEIQNLTVK', 'PTIETQAKTFLDKFNHFAEDLFYQSELASMNYNTNITEINVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALR', 'STIEEQAATFLDIFNHEAEDLFYQSSLASMNYNTNITEINVQNMNNKLDKMSLFLKEQSTAAQMYPLQLIQNLTVKLRLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTQITEENVQNMNNARDKMSAFLKEQSYLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKHFLKKFNHEAEDLFYQSSLASMNYNTNITAENVQNMNNAGDKMSAFLKELSTLAQMYPLQEIQNLTVKLQLQLLQ', 'SDIEEQSKTFLDKFNHEAEDLFSQSSLASMNYSTNITEENVQNMNNASMKMSAFLKQQSKLAQMYPLQEIRNLTVKLQLQALM', 'QTIEEQAKTFLDKFNHEAEDLFYQSMLASFNYNSNITEENVQNMNNAGDKMSAFLMEQSTLAQMYPLQEIQNLRVKMQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLAAMNYNTNITEEIVQNMNNARDKMSAFLQEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLQYQSIYAMQYNTNITEENVQNMNNALMKMSAFLKMQSTLAQMYPLMEIQNLTVKLQLQMLM<sep>', 'STIEEQAKSFLDKFNHEAEDLFLQSSLASMNYNTNITNENVQNMNNAGDKMSAFLKEQSTLAQMYPLQEIQNLPVKLQLQALQ', 'SGIEEQAKTFLDKFNHEAERLFYQSSLASMNYNTNKTEENVQNMNNAGDKMSAFLKEQSILANMYPLQEIQNLTVKLQLKALQ', 'STIEEQAKTFLDKFNEEAEDLFYQSSLASMNYNTQITEENVQFMNNAFDKMVAFLKEQSTLAQMYPLQEITNLTVKLQLQALQ', 'SDIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNINEENVQNMNNAGDKMSAFLKEQSTLAQMYPLQDIQNDTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNNNITEENVQNMNNAQDKMSAFLKEQSTLAQMYPLQEIQNLRVKLQLQALQ', 'SFIEELAKTFLDKFNHEAEDLFYQSSLASMNYNTIITEENVQNMNNMLDKMSAMLKEQSTLAQMYPLSEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQRSLAMMNYNTNITIENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHKAEDLFYQSSLARMNYITNITEENVQNMNIAGDKMSAFLKEQSTLAQMYPLQEIQNLTVKLILQALQ', 'SGIEEQAKTFLDKFNHRAEDLFYQSSLASMNYNTNKTEENVQNMNKAIDKMSAFLKEQITLAQMYPLQEIQNLTVKLQLQILQ', 'STIEEQAKTFLDKFNHEAEDLFRQSSLASMNYNMNITEENVLNMNNMGQKMSAFLKRLSTLAQMYRLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNKEAEDLFYQSSLASMNYNTNITEEQVQNMNNAKDKMSAFLKEQSSLAQMYPLQEIQFLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQRSLASMNYNTNITEELVQNMNNALDKMSAFLKEQSTLAAMYPLQEIQNLTVKLQLQALM', 'STIEEQAKTFLDKFNLEMEDLFYASLASMNYNTNITEENVQNMNNALDKMSAFLKEQLTLAQMYPLQEIQNLTVKLQLQALQ<sep>', 'STIEEQAKTFLDKFNHYAEDLFYQSSLMSMNYNTNITEEHVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKRQLRALQ', 'STIEEQAKTFLDKFNHEAEMLFQSSLASMNYNTNITEENVQNMNLAGDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ<sep>', 'STIEEQAKTFLDKFNHEAEQLLYQSSLASMNYNSNITEENVQNMNNYSDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEIAKTFLDKFNHMAEDLFLQSSLASQNMFTNITEENVQNMNNAGDKMSAFLKEMSTLAQMYPPQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYKSSLASMNYETNITEENVQNMNNARKKMSAFLREQSTLAQMYPLQEIQNLTVKLQLDALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEEEVQNMNNEVDKMSAFLKEQSTLAQMYPLQEIQNLPVKLQLQALQ', 'STIEEMAKTFLDKFNHEAEDLFYQSSLASMNYNTNITFENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALM', 'SDIEEQAKTFLNKFNHEAHDLFYQSSLESMNYNTNINEENNQNMNAAGDKMRAFLKEQSTLAQMYPLQEIQNLTVKLQLQALV', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNSNITEENEQNMNNAQDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALL', 'STIEEQAKFFLLKFNHEAEDLFYQSSLASMNYNTNITEDEVQNMNNALDKMSAFLKLQSTLAQMYPMQEIQNLMVKLQLQALM', 'STIEEQAKTMLDKFNHEAEDLFYQSSLASMNYNTIITEENVQNMNNAFMKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEMAKTFLDKFNHEAEDLFYQSSLASMNYNTNISEENVQNMNNALDKMSAFLKEQSNLAQMYPLQEIQNLTVKLQLQALQ', 'SSIEEQAKTFLDKFNHEAEDMFYQSSLASMNYNTNITEFNVQNMNNAGDKMSAFLKEQSTLAQMYPLQRIQNLTVLLQLKALQ', 'MTIEEQAKTFLDDFNHEAMNLMYESSLASMNYNTNIDEENVQNMNQALDKMSAFLSEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFMQSSLASMNYNTNITEEQVQNMNNARDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'SNIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEEIVQNMNMAGDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTIITEENVQNMLNAHDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLIKFNHEAEDLFYQSSLASMNYNTNITEEIVQNMNHAADKMSAFLKEQSTLAQMYPLDEIQNLTVKLQLRALR', 'STIEEQAKFFLDKFNHEAEDLFMQSSLAAYMLYNTNITEENVQNMNNAGDKMSAFLKIQSTLAQMYPLQEIQNLTVKLQLQAL', 'STIEEQAKTFLDKFNSEAEDLFYQSSLASMNYNTNITEEMVQNMNNALDKMAAFLKEQSTLAQMYPLQEIQNLTVKLQLQALM', 'STIEEQAKTFMDKFNHEAEDLFYQSKLARMNYNTNITEEKVQNMNNALDKMSAFLLELSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNLEAEDLEGLALLLSLASMNYNTNITEENVQNMNNMLDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQ', 'STIEEQAKTFLDKFNYEAEDLFYQSSLASMNYATNITEENVQHMNNAGDKMSAFLKEQSFLAQMYPLQEIQNLPVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLAYMNYNTRITEENVQNMNNAGKKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLLALQ', 'STIEEQAKTFLDKFNHEARDLFYQSSLASMNYNTNIAEENVQNMNMAGDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'ATIEEQAKTFLHKFNHEAEDLFYQSSLASMNYNTNITEEEVQNMNNAIDKMSAFLKEQSTLAQMYPLQFIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEALFYQSSLASMNYNTNIIEENVQNMNNAMDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDMFYQSSLASMNYNTNITEINMQNMNNAGDKMSAFLKEISTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEILFYQSSLASMNYNNNITEENVQNMNNANDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFSHEAFDLFYQSSLASMNYNTNITEFNVQNMNKALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALM', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYETNITEENVQNMNNAGDKMSAFLKEQSTLAQMYPLQEIQNLPVKLNLQALQ', 'STIEEQAKTFLDHFQHEAEDLFYQSNLASMNYNANITEENVQNMNNAYDKMSAFLKEQSTLAQKYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLKSMNYNTNINEENVQNMNNAGDKMSAFLKEQSTLAQMYPLQEIQNLQVKLQLQKLQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLAQMNYNTNKTEENVQNMNNKLDKMSAFLKEQSTLAQMYPLQEIQNLTVKMQLQALM', 'STIEEQAKTFLDKFNHEAEDLFQQYFGLSYLMSMRYNTNITEENVQNMNGAMDKMSAFLKIQSTLAMMYPLQMIQNLTVKLML', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNANITEENVQNMNNAIDKMSAFLKEMSTLAQMYPLQEIQNFTVKLQLQALQ', 'STIEEQAKFFLDKFNHFAEDLQSQASMNYNTNITEENVQNMNNAADKMSAFLKIQSTLANMYPLQEIQNLTVKLKLQALQ<sep><pad><pad>', 'STIEEQAKMFLMKF<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'SKIEEQAITFLDKFNHEAEDLFYQSSLAMMNLATNITEENVQNMNNALDKMSAFLKEQRTLAQMYPLQEIQNLTVKQQIQALQ', 'STIEEQAKTFLDKFNHEAEKLFYQSSLASANYKTNITEENVQNMRNAGDKMSAFLKEQSSLAQMYPLQEIQNLTVKLQLQMLQ', 'STIEEQAKTFLDKFNHEATDLFYQSSLLNMNYAGDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQIQALQ<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STTEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNISEENVQNMNNAYKKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STFEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNILEENVQNMNNALDKMSAFLKEQSTRAQMYPLQEIQNLTVKLQLQALM', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNKTEENVQNMNKAMDKMSSFLKEQVTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEMNVQNMNFAHDKMSAFLKEQSTLAQNYPLQEIQNLTVKLQLQSIQ', 'STIEEQAKTFLDKFQMLFYGLSLASMNYNTNITEENVQNMNNAGDKMSAFLKEQSTLAQMYPLLEISNLPVKLQLQALQ<sep><pad><pad><pad>', 'STYEEQAKMFLDKFNHIAEDLFYQSSLAIMNYNTMITEENVQNMNNAGTKMSAFLKEQSTLAQMYPLQEIQNLTVKTQLQALQ', 'STIEEQAKTFLDMFNHEAEDLFYQSSLTSMNYNSNITEENVQNMNNAGTKMSAFLKIQSTLAQMYPLQEIQNPTVKLQLQALQ', 'STIEELAKTFLDKFNHEAEDLFMQSSLFSMNYNTNITEELVQNMNNAGDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITNENFQNMNNAMDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLSALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEINVQNMNNAGDKMLAFLKEQSTLAQMYPLQEIQNLTVKLLLQALQ', 'NTIEELAKTFLDKFNHEAMDLFYQSSLASMNYNTNISEENVQNMNNAGDKMSAFLAEQSTLAQMYPLQEIQNLTVKLQLQMLQ', 'NTIEEQAKTFLDAFNMEAEDLFYQSSLASMNLNTNISEENVRNMNNALDKMSAFLKEQSTLAQMYPLVEIQNLTVKLRLQALQ', 'NTIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNNNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFYHEAEDLFYQTSLASMNYNTNITENVKLMLDLMLYLALMLLLNLLLLLYLLQMEHLLLMLLLLVLLNML', 'YTIEELAKTFLDKFNHEAEDLFYQSSLASMNYNTYITEENVQNMNNAGDKMSAFLKEQSTLAQMYPLQMIQNLTVKLQLQALQ', 'NTIEEQAKSFLDKFNHEAEDLFYQSMLARNYNTNITEENVQNMNNAGDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ<sep>', 'STIEEQAKTFLDMFNHEAKDLFYQSSLASMNYNTIITEENVQNMNAAGDKMSAFLKLQSTVAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNQAFETLASMNYNTNITEENVQNMNNALDKMSAFLKEQSTLASMYPLQEIQNLRVKLSLQALK<sep><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDVFVHEEAYILFYQSSLASINYNTNITEENVQNMNNMLDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQAL', 'SNIEEQAKTFLDKFNQEAEDLFYQSSLASMNYNTNITEMNVNMMDKLSAFLKEQSTLAQMIPLQEIQNLTVKLQLQALQ<sep><pad><pad><pad>', 'STKEEQAKTFLDKFNHEAEDLFYQSSLASMNYQTNITEENVQNMNNAKDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMHYQTNITEENVQNMNNARDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALA', 'STIEEFAKTFLDKFNHEAEDRFYQSSLASMNYNTIITEENVQNMNNALDKMSAFLKEQSLLAQMYPLQEIQNLTVKLQLQALM', 'STIEEQAKTFLDKFNHQAEDLFYQSSLASMNYNTNITEDNVQNMNAAMDKMSAFMKEQSTLAQMYPLQEIQNLTVKLQLQALA', 'STIEEQAKMFLDKFNHEAEDLFFQSSLASMNYNANITEENVQVMNNAGDKMSAFLKEQSTLAQMYPLQEIQNLKVKLQLQALQ', 'STIEEQAKKFLDKFNHEAEDLFYQSSLASMNYNTIITEENVQNMNNAMDKMDAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'QTIEEKAKTFLDKFNHEAMDLFYQSSLASMNYFTNITEENVQNMNMAGDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'SDIEEQAKTFLDKFNHEAEDLSYQSSLASMNYNTNITRENVQNMNNAGDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEEEVQMMNNAGDKMSAFLKEQSTLAQQYPLQEIQNLTVKLKLQALQ', 'STMEEQAKTFLDKFNHEAESLFYQSSLASMNYNTNITDENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'FTIEEQAKLFLDKFNHEAEDLFYQSSKASMNYNTNITEEQVQNMNNALDKMQAFLKEKLHLAQMLMSLLLLLLPGLALKLLLA', 'STIEEQAKTFLAKFNHEAEDLFYVSRLASMNYFTNITEENMQNMNNAGDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNKTEENVQNMNNANDKMSAFLKEQATLAQMYPLREIQNLTVKLQLQALQ', 'STIEEYAKTFLVKFNHEAEDLFYQFSLASMNYETNITEENVSNMNNALDKMSAFLKEQSTLAVMYPLQEIQNLTVKLQLQALL', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMFYNTNINEENNQNMNNAADKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLEALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEEAVQNMNNAADKMSAFLKEQSTLAQRYPLEEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSTLYSSMIYNTNITEENVQNMNFALDKMSAFLKEQSILAQMYPLQELQNLLVKLQLQAM', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITRENVQNMNNARDKMSAFLKEQSTIAQMYPLQEIQNLTVKLQLKALQ', 'STIEEQAKAFLDKFNHEAEDLFYQSSLLSMNYFTNITEENVFNMNNAGDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQVSLASMNYNTFITEENVQNMNIATDKMSAFLKEQSRLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITSENVQSMLNAGDKMSTFLKEQSTLAQMYPLQEIQNLPVKLQLQALQ', 'STIEEQAKTFLDKFNHEAELLFYQSFLASMNYNSNITEENVQNMNNAGQKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDEFNHEAEDLFYQSSLASMNYNTNITEEKVQNMNNAMDKMSAFLKEQSTLAKMYPLQEILNMTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITQENVQNMNNARDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALK', 'STIEEQAKTFLDKFNHEAEDLFYQSILASMNYNTNITIENVQNMNNAMDKMSKFLKEQSTLAQMYPLQEIQNRTVKLQLQALQ', 'SDIEEQAKTFLDKFNHEAEDLNYQSSLASMNYNTEITEENVQNMNNAGDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSMLASMNYNTNITYENVINMNNAQDKMSQFLKEQSTLAQMYPLQEIQNLTVALQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNNNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALM', 'STIEEQAKTFLDKFNHEAEDLFYTSSLASMNYNSNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLDVKLQLQALM', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNINITEENVENMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQA', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEDNVQNMNNANDKMSAFLKEQSTLAQMYPLQEIQDLTVKLLLQALQ', 'STIEEQAKTFLTKFNHEAEDLFYQSSLASMNYNTNITMENVQNMNNALDQMSAFLKEQSTLAQMYPLQEIQNITVKLQLVKLK', 'STIEEQAKTFLDKFNHEAEYLFMQSSLASMNYNTIITEENVQNMNNAGDKMSAFLREASTLAQMIPLQEIQNPTVKLQLQALQ', 'MTIEEQAKTFLDKFNGEAEDLFYQSSLASMIYNTNITELNVQNMNNAGDKMSAFLKRQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAHDLFYQASLKSMNYNTNINEENVQNMNQAGDKMEAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'SKIEEQAKTFLDKFNHEAEDLFYQMSLASMNYDTNITEENVQHMNNAFDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLLALQ', 'STIEEQAKTFLDFFNHEAEDLFYQSSLASMNYNSNITEENDQNMNNAMDKMKAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'SNIEEQAKTFLDKFAHEAEDLFYQHSLASMRYDGSITEENVQNMNNALDKMSAFLKTQSTLAQMYPLQEILNLTVKLMLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTLITEENFQNMNNAGDKMSFFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYNMSLLSMMNYNTNITEENVQNMNNAGDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQAL', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEEAVINMNNANDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQFSLASMNYNTNISEENVQNMNNAGDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEELAKQFL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKVFLDKFNHEAEDLFYLSSLASMNYNTNITDENVQNMNNAHDKMSAFLKEQSTLAQMYPPQEIQNLIVKLQLQALQ', 'STIEEQAKTFLDYFNHEAEDLFYQSSLASMNYNTNITEEEVQMMNNAGDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTYITEENVQNMNNAQDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEMALTFLDKFNHEAEDLFYQSSLASMTYATNITEENVQNMNNALDKMSAFLKEQSTLAQTYPLQEIQNLTVKLQLQALM', 'STIEEQAKTFLDKFNHEAEDLLYQSSLASMNMDTNITEENVQNMNNAADKMSAFLKEQSTLAQMYPLQEISNLPVKLQLQALQ', 'STIEEQAKTFLDKFNHEASDLFYQSSLASMNYNQNITEENVQNMNNANDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLMALQ', 'STIEEQAKTFLDKFNHEAERLFYQSSLASMNYNTNITQENVQNMNEAGDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSQLASMQYNRNITEENVQNMNNAGDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLAIMNYNTNITDENVQNMNNAGKKMSAFLKEASTLAQMYPLQEIQNLTVKLQLQSLQ', 'ATIEENAKTFLDKFNHEAEDLFYQYSKASMNYLTNITEENVLNMNNALDKMLAFLKEQSTLAQMYPLQEIQNLTKKLQLQYLL', 'STIEEQAKTFLDKFNHEAEDLFYQSSLAMNYNTNITEENYQNMNNAGDKMSAFLKEMSTLAQMYPLQEIQNLTVKLQLQALQ<sep>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYDTNITEENVQNMNNARDKMSAFLKEQSLLAQMYPLIEIQNLTVKLQLQSLQ', 'SAIEQQAKTFLDKFNHEAEDLFYQSILASMNYNTNITEELVQNMNNAMDKMSAFLKEQTTAAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKQFLDKFMMEAEDLFYQSSLASMNYKTNITEENVQFMNNAGDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSKLASMNYNTNITEEKVQAMNNAGDKMSAFLKEQSTLAQSYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNSEAEDLFYQSQLASMNYNTEITEENVQNMNNALDKMLALLKMLLDLFLLLMLLNLLLKMLLLLGLLSL', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNKTEENVQNMNNASLKMSAFLKEQSTLAQMYPLQEIQNLTVLLQLQALQ', 'STIEEQAKTFLDKFNHEAHDLFYQSSLASMMYNTNITEEIVQNMNNAGDKMSAFLKEQSKLAQMFPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLLKFN<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'N<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAELLFYQSSLASMNYNTNITEEKVQNMNNAGDKMSAFLKEASTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEVNVSNMNMALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAATFLDKFNHEALDAFYQSSLASMNYNTNITEEQKQNMNNAMRKMSAFLKEQSTLAQMYPLQIIQNLTVKLQLQALQ', 'STIEEVAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEELVQNMNNALDKMSAFLKEQSTLAQMYRLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLAMNYNTNITEENYQNMNNAGDKMSAFLKMQSTLAQMYPLQEIQNLTVKLQLQALQ<sep>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNIDEENVQNMNNALDKMNAFLKEQSTLAMMYPLQEIQNLTVKLQLQALM', 'STKEEQTKTFLDKFNHEAEDLTYQSSLASMNYNTNITEELVQNMNNALDKMSLFLKEQSMLAQMYPLQEIQNLLVKLQLQALA', 'STIEEQAKTFLDKFNHEAEDLFYQSMLASMNYNTDITEENVQNMNNAGDKMSAFLKEASTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFRHEAEDLFYQSSLASMNYNTNITEEDMQNMNNAKDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'MTIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITKENVQNMNNAGDKMSAFLKMQSTLAQMYPLQEIQNLAVKLQLMALQ', 'STIEDQAKTFLDKFNHEAEDLFYQSLLSMIYNTNITEENVQNMNRMGDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALV<sep>', 'STIEYQAKTFLDKFNHEAEDLFYQSSLASMMYNTNITEEQKQNMNNAGDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALK', 'STIEEQAKTFLQKFNQEAEDLFYQSSLASMNYNTNITEQNVQNMNNAQDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNAEITEENVQNMNNANDKMSTFLKEMSTLAQMYPLQEIQNLTVKLQLQALQ', 'GTIEEQAKTFLDKFNHEAEDLMYQSSLASMNYNTFITEENVQNMLMAGDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'SGIEEQAKTFLDKFNHEAEDLF<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLHKFNHEAEDLFYQSSLASMNYNTAITEENVQNMNFAVDKMSMFLKEQSTLAQMYPLQEIQNLDVKLQLQSLR', 'STIEEQAKDFLMKFNHEAEDLFEYQSNLAQMNYNTNITEENVQNMNNAGDKMSAFLKEMSTLAQMYPLQEIQNLTVKLQLQAL', 'STLEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNISEENVQFMNNAGDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLMKFNHEAEDLFYQSSLASMNYNTNVTEENVQNMNVAGDKMSAFLKEQSTLAQMYPLQEIQNLMVKLQLQALQ', 'SEIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEFNVTEENVQNMNNALDKMSAFLKEQSTLAQTYPLQEIQNLTVKLQ', 'STIEEQAKTFLLKFNTEAEDLFYQSSLASMNYNSNITEENVQNMNNAADKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDMFNHEAEDLFYQSSLASMNYNTEITEENVQNMNNASDKMSAFLKEMSTVAQMYPLEEIQNLTVKLQLQALQ', 'STIEEQAMTFLDKF<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFN<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQALTFLDTFNHEAEDLFYQSSLASMNYNTAITEENVQNMNNATDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLMALK', 'STGEEQAKTFLDKFNHEALDLFYQSSLASMNYKTNITEENLKNMNNALDKMSAFLKEQSTLAQMYPLMEIQNLTVKLQLQALQ', 'STKEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNINEENVQNMNNAKDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'MTIEEQAKTFLDKFNHMAEDLFYQSSLASMNYNTNITEEIYQNMNKAGTKMSTFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEKAKTFLDIFNHEAEDLFYQSSLASMNYITEITEENVQNMNNAHDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKYFLDKFNHMAEPLQYQSILASMNYNTNITEANVQNMNNALDKLSAFLKEQSTLAQMYPLTEIQNLTVKLQLQALI', 'SNIEEQAKTFLDQFNMEAEKLKYNYNTNITEENVNNMNNAGDKMSAFLSEQSTLAQMYPLQEIQNLTVKLQLQALQ<sep><pad><pad><pad><pad><pad><pad>', 'STIEEQAKMFLDKFNSEAHDLFYQSFLASMNYNTNITEMNVLGDKMSAFLKEQSTLAQMYPLQEIDNLTVKLQLQALQELQ<sep><pad>', 'STIEEQAKTFLDKFNKEAEDLFYQSSLASMNYNLNITEENVQNMNTAGDKMSAFLLEMSTLAQMYPLQEIQNLTVKLQLQALQ', 'STFEEQAKTFLDKFNHEAEDLFYQSSLASMNYITNITEENVQNMNNARDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQQSLASMNYNSNITEENVQNMNAALDKMSAFLKEQSTLAQMYPLQSIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLYYQSSLASMNYITEITEENVQNMNNAYDKMSAFLNEQSTLAQMYPLMEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFQQSMNAYMMYASMNYNTNITEENVQNMNNAGDKMSRFLKIQSMLAQMYPLQEILNLTVKLQ', 'STIEEQAKSFLDKFNHLAEDLFYQSSLASMNINTNITIEEENVQNMYNAADKMSAFLKEMSTLAQMYPLQEIQNLTVKLQLQA', 'KTIEEQAKTFLFTFLDKFNHEAEDLFYQSSLSSMNYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMMPLQEIQNLTMKLQL', 'STIEEQAKMFLDKFNHEAEDLFYQSSLASMNYFTNITEENVQNMTAAGDKMSAFLKEQSTLAQMYPLFEIQNLEVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSMLASMNYDTNITEENVQNMNNAADKMSAFLKEQSTAAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMHYYTNITEENVQNMNNAGRKMSAFLKEQSTLAQMYPLMEIQNLTVKLQLQALQ', 'PTIEEQAKSFLDKFNHQAEDLFYQSSLASMNYNTPITEENVQNMNNALDKMSAFLKRQSTLAQMYPLQEIRNLTIKLLLQALQ', 'STIEEQAKTFLDKFNHEAEDLLYQSSMASMNYNQNITEENVQNMNNAQDKMSAFLIEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDEFQQMLLASMNLKMSDLASMNYNTNITEENVQNMNNALDKMSAFLKEMSTLAQMYPLQEIQNL', 'STIEEQAKTFLDIFNHEAEDLFYQSSLASMNYNTNITITEENVQNMNNALDKMSAFLKEQSTLAQMYPLHEIQNLTVKLQLQA', 'STMEEQAKTFLDKFNHEAEDLFLMKSLASMDYNASYASMNYNTNITEENQQNMNNKLDLMSAFLKELSTLAQMYPLQEIQNLT', 'STFEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEDNVQNMNNAYMKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAFTFLDKFNHEAEDLFYQSSLASMNYNDNITEENVQNMNNADDKMSAFLKEQSTLAQMYPLQEIQNLPVKLQLQALQ', 'STIEEQAKTFLDKFNHEAHDLFYQSSLASMNYNTNITEEYVQNMNNAFDKMSAFLKEQSTEAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEVESYLASMNYNTNITEENVQNMNNALQKMSAFLKELSTLAQMYPLQEILNLTVKLQIQALQ<sep><pad><pad>', 'SGIEEQAKTFLDKFNHEMLDLFYQSSLASMNYNTNITEEKVQNMNNAEDKMSAFLKEQSTLAQMYPLQELQNLTVKLQLQALQ', 'STREEQAKTFLDKFNHEAEDLRYQSMLASMNYNTNIIEENVQNMNNAIDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALM', 'STIEEQAKTFLDKFNHEAEDLFYYSSLASMNYHTNITEENVQNMNNAGDKMVAFLKEQSTLAQMYPLFEIQNLTVKLLLQLLQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNIEEENVQNMNQFGDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLIKFNHEAEDLFYQSLATMMYNTNITEENVQNMNVAGDKMSAFLKEQSTLANMYPLQEIQNLTVKLKLQALQ<sep>', 'IT<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDFFNHEAEDLFYQMSLASMNYNTNITERNVQNMNNAGDEMSAFLKEQSTLAQMYPLKEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMRYNTNISEENVQNMNNAGDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALM', 'STIEEQAKTFLDKFNHEAEDLFYQSSLSSMNYNTNKTEENVQNMNNAGDKMSAFLKELSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTIITEENVNNMNNAMDKMSAFLTEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKNFLDKFNHEAEDLFYQSSLASMNLNTNKTEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALM', 'STIEEQAKTFLRKFNHEAEDLFYQSFLASMNYNTNIQEENVQNMNNAGDKMSAFLKEQSTLAQMYPLEEIQNLTVKLQLQALQ', 'STIEEHAKTFLDKFNHEAEDLFYQSSLASMNYNTNITERNVQNMNNAGDKMSAFLKEQSTLAQMYPLQEIQNLPVKLQLQALQ', 'SDIEYQSSLASMYYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNNTVKLQLQALK<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQATTFLHLFNHDAEDLFYQSTLASMNYNANITEENVANMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLRLQALK', 'STIEEQAKTFLDKFQ<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'PTIEEQAKTFLDKFNHEAEDLFYQSSLASMNYQTNITEENVQNMNNAKDKMSAFLKEQSTLAQMYPLQEIQNLTVVLQLQALI', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASYNYITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLPVKLQLQALQPLQ', 'STIEEQAKTFLDKFNHEAEDLLYQSSLAVMVYNTNITEQNVQNMNNASDKMSAFLKEQSTLAQMYPLQEIQNLTVKLILQALQ', 'STIEEQAKTFLLKFNHEAEDLFYQSSLASMNYNTNITEEAVQNMNNAADKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLFALQ', 'SQIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEEEVQNMNNAMDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEHAKTFLDKFNHEAEDLFYQSSLASMNYNTYITEENVQNMNYALDKMSNFLKEQSTLAQMYPLLEIQNLTVKLSLQALQ', 'STEELAATFLDKFNHEAEDLFYQSSLASMFYNTNITEENVQNMNNALDKMSAFLKEQSTLAQTYPLQEIQNLTVKLQLQALM<sep>', 'STIEEQAITFLDKFKHEAEDLFYQSSLASMNYKTNITEENVLNMNNAGDKMSAFLKEQSTLAQMYPLQYIQNLVVKLQLQALQ', 'STIEEQAKTFLLKFNHEAEDLFYQSSLASMKYNTNISEENVQNMNNAGDKMSAFLKEQSTLAQMYPLQRIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNLEAEKLFYQSSLASMNYNQNITEENVQNMNNAGDKMSMFLKEQSTLAQMYPLQELQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAKMLFYQRSLASMNYNSNITEENVQNMNNAGDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLLALQ', 'STIEEQAKTFLDKFNHEAEKLFYQYSLASMNYNTNITEEEVQNMNNAMDKMSAFLKEQITLAQMLPMQEIQNLTVKLQLQALQ', 'NTIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTRITEENLQNMNNAGDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEMAKTFLDKFNHMAEDLFYQSSLASMNYFTNITEENVQNMNNAGDKMSAFLKEQSTLAQMYPLQEIQNLAVKLQLQALQ', 'STIEEMAKTFLDKFNHEALDLFYQSSLASMNYNTNITITEENVQNMNNALDKMSAFLKEQSDLAQMYPLLEIQNLTVKLALQA', 'STIEEQAKTFLDKFNHEAEDLFYQSSLAYMNYNTNITNENVQNMNNARDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTIITEENVQNMNNAGDKMSAFLKEKSTLAQMYPLQEIQNLTVKLFLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEINVLNMNNAGDKMSAFLKEQSHLANMYPLQEIQNLQVKLQLQELM', 'STLEEQAKTFLDKFNHEAEDLFYQSSMAMMNYNTNITEEDVQNMNNAGDKMSAFLKEQSTLAQMYPLQEIQNQTVKLQLQALQ', 'STIEEQAKTFLDKFNHFAEDLFYQSSLASMNYNSNITEENVQNMNNAADKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMFYNTNISEENVQNMNNAGDKMSAFLKEKSTLAQMYPLQEIQNLTVKLQLQALQ', 'SYIEEQAKTFLDKFNHEAEDLFIQSSLASMNYNTNITEEIVQNMNFAGDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEMQAKTFLDKFNIEAEDLFYQSSLAQMYPLYEIQNLPMKLQLQALT<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITRENVQNMNNAGFKMSAFLKLQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDIFYQSSLASMNYNTNITQENVQNMNNAYDKMSAFLKEQSTLAMMYPLQEIQNLTVKLQLQALQ', 'STIEEMAKTFLDKFDHEAEDLFYQSSLASMNYNTNITESNVQNMNNALDKMSMFLKEQSTLASMYPLQEIQNLTVKLQLQALQ', 'STMEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEFNVNAMDKMSAFLFEQSALAQMYPLQEIQFLTVKLQLQALQ<sep><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEEIVQNMNNAFDKMDAFLKEQSTLAQMYPLQEIQSLTVKLQLQALQ', 'STIEEQAKDFLDKFNHEAEDLFYQKSLAKMNYNTNINEENVQNMNNAGDKMSAFLKEQSTLAQMYPLQEIKNLTLKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYETNITEENVQNMNNAGTKMSAFLKEQSTLAQMYPLQEIQNLLVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTIITEENVQIMNNAVDKMSAFLKEQSTLAQMYPLQEIQNLMVKLQLQALQ', 'STIEEQAKTFLTKFNHEAEDLGYQSSLASMNYNALITITEENVQNMNNAHDKMSAFLKEISTLAQMYPLQEIQNLTVKLQLQA', 'STIEEKAKTFLDIFNHEAEDLFYQSSLASMNYNTNITEEQVQNMNNLGDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITVENVQNMNNAIDKMSAFLKEQSKLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNKTEENVQRMNNMLDKMSAFLKEQATLAQMMPLQEISRLTVKLQLQALQ', 'STIEEQAKTFLRKMNFEAEDLFYQSQLASMNYNTNITERNVQNMNNAIDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLFALQ', 'STIEEQAKTFLIKFNHEAEDLFYLSSMASSMYILSSLASMIYNTNITEENVQNMNAALDKMSAFLKEQSTLANMYPLQEIQNL', 'STIEERAKDFLDKFNHMAEDLFYQSSLASMNYDTNITEENVQNMNNAIDKMSAFLKEVSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEEEVQNMNDAHDKMSAFLKEQSTLAQMYPLQQIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNMEAEDLFYQSSLASMNYNTNITEELVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLPVKLMLQAMQ', 'STIEEQAETFLDKFNHEAEDLFYQSSLASMNYNTNITEYNVQNMRNAGDKMSAFLKMQSTLAQMYPLQEIQNLTVKLQLQALI', 'STLEEQAKTFLDKFNHEAEALFYQSSLASMHYNTNKTEENVQNMNAAGDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLAMNYNTNITEENVANMNNALDKMSAFLKEQMTLAQMYPLQEIQNLTVKLQLQALQ<sep>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLAMMNYNTIITEENVKNMNNAGDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYITEENVQIMNNALDKMSAFLKEVSFLAQMYPLQEIQDLTVKLQLQALQPLQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITENVQYMNNAGDKMSAFLKEQSTLAQMYPLQEIQNLEVKLQLQALQ<sep>', 'STIEEQAKTFLDKFNLEME<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLSSMNYNTNNTNITEENVQNMNNALDKMSAFLKELSTLAQMYPLQEIQNLTVKLQLQ', 'STIEEQAKTFLDKFNHEAEDLFLDSSLASMNYITNITEENVGNMNNALDKMSAFLKEQSTIAQMYPLQEIQNLTMKLQLQMLQ', 'RTIEEQAKTFLDKFNHEAEDLFL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLANMNYNSNITEENVQNMNNAGSKMSLFLKEQSMLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKMFLDKFNHEAEDLFYQSSLASMNYNTNIDEENVQNMNNAADKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKLFL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'SIIEEQAKTFLDKFNHEAEDLFYQKSLASMNYNTNITNENVQNMNNAIDKMLAFLKEMSTLAQMYPLQEIQNLTVKLQLQALQ', 'SNIEEQAKMFLDKFNHEAEDLFYQSSLASMQYNTNISEENVQNMNNAGDKMSAFLKEQSTLAQMYPLQEIQNMTVKLQLQALQ', 'STIEEQAKTFLDKFNHEANILMMQSLASMNYNTNITEENVQNMNNMGMKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ<sep>', 'STIEEQAKTFLDKFNHEAEDLFKQSSLASMNYTTNITEENVQNMNNAGDKMSAFLKEQATLAQMYPLQEIQNLMVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQFSLASMNYNTNITENVQNMLNAGDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLSALQ<sep>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLAQMYPLQEIQNLTVKLQLQALM<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNINEENVQNMNNAQDKMSAFLKEQSTLAQMYPLQEIQNLTVKIQLQALQ', 'STIETQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEEEVQNMNNALDKMSAFLKEQSTLAQMYPLQNIQNLLVKLQLQALM', 'STIEEQAKTFLDKFNHMAEVLFYQSSLRMMNYNTNITIENVLAKMNALNKMAFMMSALAKMYPLQEIQNLTVKLQLQALQ<sep><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNIHEENVQNMNNAFDKMSAFLKEQSTLAQMYPLQEIANLFVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLVSMNYETNITEENVQMMKNADDKMSAFLKEQSTLAQMYPPQEIQNLLVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEKLFYQSSLASMNYNSNITEENVQNMNNAGDKMSAFLKEQSRLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEYAKTFLDKFNHEAEDLFYQSSLASMNYNTIITEENVQNMNNAGDKMSAFLKEQSTLAQMYPLLEIQNLTVKLQLQALQ', 'STIETQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEDNVQNMNNALDKMSAFLKEQSTLAQMYPLQQIQNLTVKLQLQLLQ', 'STIEEQAKTFIDKFNHEAEDLFYQMSLASMNYNTNITEEHVQNMNNAMDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'SDIEYQSSLASMVYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLNEIQNLTVKLQLQALR<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKEMLDMFNHEAEDLTYKMLLNSMLALMNYNTNITEENVQNMNNAGDKMSAFLKEMSTLAQMYPLQEIQNLTVKLQL', 'STIEEQAMTFLDKFNHMAEDLFYQSSLASMNYNTNITEEFVQNMNNAGDKMSAFLKEQSTLARMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDFFYQSSLASMNYNTNISEENVQVMNNALDKMIAFLKEASTLAQMYPLQTIQNLTVKLQLQALM', 'STIEEQAKPFLDKFNHEAEDLFYQSSLASMNYNTNITEEFMQNMNNALDKMSAFLKEISTLAQMYPLQEIQNLTVMLMLQALQ', 'STIEEQAKTFLDKFIHEAEILFYQSSLMSMNYNTNITEINVQQMNNAGDKMSAFLKEQSTLAQMYPLMEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYRTNITEENVQNMNNAGDKMSAFLKEQSTLAQMYPLQMIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASRNYNTNISEENVQNMNNAGDKMSAFLKEQSTLAQMYPLQEIQNPTVKLQLQALK', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITNENVQNMNNARDKMIRFLKEQSTLAQDYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNIVEENVYNMNNASDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKMFLDKFNHMAEDLFYQSSLASMRYSTNITEENVQNMNNAGDKMSAFLKENSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEANVQNMNNAGDKMMAFLKEASTLAQMYPLQEIQNLTVKLQLLALQ', 'STIEEQAKTFLDKFNHHAYMLFYQSSLAMMNYNTNISEENVQNMNNAADKMSAFLKEQTTLAQMYKLKEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEALFYQSQLASMNYNTNISEENVQNMNNAGSKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLKFFLDKFNHEAEMLFYQHGLASMNYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQL', 'SLIEEQAKTFLDKFNHEAEDLYYQSSLASMNYNTNKTEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLLVKLQLQALM', 'STIEEQAKTFLDKFNHEAEDLFYQSSLAMNYNTNITEENYQNMNNAGDKMSAFLMEQSTLAQMYPLQEIQNLTVKLQLQALQ<sep>', 'STIEEQAKAFLDKFNHEAEDLFYQSSLLSMNYNTTITEENVFNMNNAGDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'GTIEEQAKTFLDKFNHEAEDLFYQSSLMSMNYNRNITEENVQNMNNAGDKMSAFLKMQQSLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNAEAEDLMYQSSLASMNYNTNITEEQVQNMNNAVDKMSAFLKEQSELAQTYPLQEIQNLTVKLQLQALK', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNVNITEENVQNMNNAIDKMSAFLMEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLQKFRHEAEDLFYQSSLASMNYNYNITEENVQNMNNKGDKMSAFLKEQSTLAQMYPLIEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLLEQFLLASMNLSMNITEENVQNMNNAEDKMSAFLKEMSTLFQMYPLLEIQYLTVKMGLASML', 'STIEEQAKTFLLKFNTEAEDLFYQSSLASMNYFTNITEENVQNMNNAGDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'HTIEEQAKTFLDKFNHLAEDLFYQMSLASNNYNTNIFEENVQNMNNAGLKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLHKFNHEAEDLFYQSMVASMNYKTNITEENVQNMNNAGDKMSMFLKEQSTLARMYPLQEIQNLTVKLQLQSLQ', 'STIEEQAKTFLDKFNHEQEDLFYQSSLASMNYNTNITEEMVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALM', 'SSIEEQAKTFLDKFNHEAEDLFYQSSLASMEILTNITEENVYNMNNAGDKMSMFLKEQSTLAGMYPLQEIQNLTVKLQLKALQ', 'STIEEQAKTFLDKFNHEAEDLFLQSMLASMNYNSNITEENVANMNNGLDKMSAFLKEQSTLAQMYPLQEIKNLTVKLQLQALQ', 'STIEEQAKTFLYKFNHEAEDLFYQSSLASMNYNTNITEEKVQNMNNAGDKMSAFLKEASTLAQMYPLQEIQNLTVKLQLQALQ', 'STIENQAKTFLDNFNREAEDLFYQSSLAMNYNTNITEENVQNMNNALDKMSAFLKEQITLAQMYPLQEIQNLTVKLQLQALM<sep>', 'STIEEYAKTFLDKFNHEAEYLFYQSSLASINYNTDITEENVQNMNTARDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNISEENQQNMNRAGDKMSAFLKEQSTLAQMYPLQVIQNLEVKLQLQALQ', 'SAIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNIFEENVSNMNNAIIKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQATTFLDKFNHEAEDLFYQSSLAIENVQNMNNALDKMSAFLLIQSTLAQMYPLQEIQNLTVALQLQALM<sep><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNINITEENVQNMNNAFDKMSAFLKAQSQLAQMYPLQEIQNLTVKLYLQA', 'STIEEQAKTFLDKMNHEAEDLFYQFLLHSMRYSMSMSMKYSMSAFMFYNTNITEENVQNMNNRLDKMSAFLKEQSTLAQNYPL', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTIITEENVQNMNNAKDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALM', 'STIEEQAKTFLSKMNHEAQEAEMLFYQSSLASMNYNTNITEENVQNMNNAQDKMSAFLKELSTLAQMLPLQEIQNLTVKLQLQ', 'STIEEQAITFLMKFNHEAEDLFYQSSLASMNYNTNITKENVQNMNNAGDKMSAFLKERSTLAQMYPLQEIQNLTVKLQLQALQ', 'KTIEEQAKTFLDKFNHEAEDLFMQSSYASMNYNTFITEENVQNMNNAGDKMSYFLKEQSRLAQMYPLQSIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNEEANDGMELALDKMSAFLQTMSTLAQMYPLQEIQNLTVKLQLQAMQ<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'SRIEEQAKTFLDKFNHEANFLRYQSSLASMNYNTNITEELVLAMLSLLQYLLKEALDLFLQLMLYLMDMLQLMLVLALMMNLL', 'STIEEQAKTFLDKFNHEALDLFYQVKL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEMQAKTFLDKFNIEAEDLFYQSSLAQMYPPQEIQNGMVKLQLQALV<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNLA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEKLFYQSSLASMNYNSNITEENVQNMNNAGDKMSAFLKEQSTLASMYPLQEIQNLTVKLQLQALQ', 'SAIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNQNITEENVSNMNNAIIKMSAFLKMQSVLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQKSLASMNYNTNISEENVQNMYNAGDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'GTIEEQAKTFLDKFNHEAEDLFYYSSFASMNYNTNYTEENVQNMNNAGDKMSAFLKIQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKVFLDKFNHEAEDLFYQSSLAKMNYNTMITEENVQNMNNAGDKMYAFLKEQSTLAQMYPLQEIQNLTVKMQLQALQ', 'VTIEEQAKTFLDKFNHEAHDLFYQSSLAMMNYNTNQTEENVQNMNNALDKMSAFLKEQSALAQMYPLQEIQNLTVKLQLQAMQ', 'STIEELAKTFLDKFNHEAE<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'FTIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTEITEENVQNMNNTKDKMSAFLIEQSTLAQDYPLMEIQNLTVKLQLQTLL', 'SYIEEQAKTFLDKFNHEAEDLAYQSSLASMNYNTEITEENLQNMNNAGDKMSAFLSEQSTLAQMYPLQQIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNSFAEDLFYQSSLASMRYNTNISEENTQNMNNAGDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLIYQSSLASMNYNTLITEENVQNMNNAFDVMSAFLKEQSTLAQMYPLQTIQNLTVKLQLQALQ', 'SGIEEQAKTFLDKFRHEAEDLFYQSSLASMNYNTQITEENVQNMNYAGDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEMEDLFYQSSLASMNYDTEITEENVQNMNNALDKMSSFLKEQKHLAQMMPLQEIQNLTVKLQLQALQ', 'NTIEEQAKTFLYKFIHKFNHEREDLFYQRSMASMNYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQL', 'STIEEQAKTFLDQFNHEAEDLFYQSSLASMNYNTNITEELVQNMNNASDKMNAFLKEQSTLAQMYPLFEISNLTVKLQLQALQ', 'STIEEQAKTFLMKFNHEAEDLFYQSSLASMNYNTNITNENVQNMNNAGMKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFMHEAEDLFFFLRLADSLASMNYNTNITEENVQNMNNALDKMSAFLKELSTLAQMYPLQEIQNLTVKLQL', 'STIEEQAKTFLDKFNHEAL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQALTFLDKFRH<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAQLLTYQSSLASMNYKTNITEENVQNMNNAGDKMSAFLKEQSTLAQMYPLPLIKMLTVKLQLQALL', 'ETIEEQAKTFLDKFNHEEEDLM<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQVNLKSMNYNTAITEENVQNMNNAGDKMLAFLKEQSDLAQMYPLQEIINLTVKLLLQALM', 'SDIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNSNITEENVQNMNNAGDKMSAFLKENSDLAQMYPLQEIQNLTVKLQLQALQ', 'STIEVQAKHFLDKFNHEAEQLFYQSSLASMNYDTNITEENVNNMNNAGDKMSAFLKEQSTLAQMYPLQEIQNLPVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEANVQNMNNAQDKMSAFLKEQSTLALMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKMFLDKFNHEAEMLFYQSSLASMNYATNITEENVQNMNNAGVKMSAFLKEQSTIAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAESLFYQSSLASMNYNTMITEENVQNMNNAEHKMSAFLKEQSTLAQMYPLQEIQNLTVKLNLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYETNITEENVQNMNNAGDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQTLI', 'SPIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEEAVVNMNNALAKMYAFLLEQSTNAQMYPLQKIQNLTVKLQLQALM', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNIEEENVQNMNAASDKMSAFLKESSTLAQMYPLQEIQNDTVKLQLQALQ', 'NTIEEQAKTFLDMFNHEAEDLF<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLQKFNHEAEDLFYQSSLAMMNYNTNISEENVQNMNNAGDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEMAKTFLDMFNHEAEDFFYQSSLASMNYNTVITEENVRNMMNAMDKMSAFLKEQSTLAMMYPLQEILNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLQSMNYNTNINITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLMA', 'STIEEQAKTFLDKFNHEAEKLFYQSSLASMNYNTNITNENVQNMNNAGDKMSAFLKEQSTLASMYPLQEIQNLTVKLQLQALQ', 'SFIEEQAKTFLDKFNHEAEKLFYQSMLASMNYNTKITEENVQNMNNAGDKMSAFLKEQSTLAQMYPMQEIQNLTVKLQLQMLQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLAHMNYNTNITAENVQNMNNAGDKMSAFLKEQSTLAQMYPLQEIQNLEVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNINEENVQNMNNAQDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLRALQ', 'STIEEQAKTFLDKFNHEAEDLR<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASDNYNTNIAEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALM', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITLENVQNMNNAADKMSAFLKEQSTLAQMYPLQEIQNATVKLQLQALQ', 'SGIEELAKTFLDKFNHEAEDLFYQKSLASMNYNTQITEENVRNMNNAGDKMSAFLKEQSTLAQMYPLQEIQNVTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYDTNITEENIQNMNNAMDKMSAFLKEQSTLAQMYPLQEIQNLTVRLQLQALQ', 'STIEEQAKTFLDKFNHEAEMLFYQSSLASMNYNTNITEEDVQNMNNATDKMMAFLKEQSTLAQMYPLQEIQNLTVKLQLQALM', 'RTIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITQENKLTMKDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ<sep><pad><pad>', 'LTIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNISEENVQNMNNALDKMSAFLKEQSTLAQMYDLQEIQNLTVKLQLQALM', 'STIEEQAKTFLEKFNHEAE<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITTENVQNMNNAGDKMQAFLKEQSTLAQMYPLQKIQNLTVKLQLKALQ', 'SDIEEQAKTFLDKYNHEAEDLFYQSSLAS<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQGSLASMNYNTNITDENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEEDVQNMNNAADKMSAFMKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQFSLASMNYNTNISEENVQNMNNAGDKMSAFLKEQSTLAQMYPLQEIQRLTVKLQLQALQ', 'STIEEQAKNFLSSFNHEAEDLFYQSSLASMNYFTNITEENVQNMNNAADKMSAFLKEQSTLAQMYPLQEIQNQTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSGLASMVYNTNITEINVQNMNNKIDKMSAFLKEMSTLAQMYPLQEIQNLTVKLQLQALQ', 'ETYEESAKTFLDKFNHEAEDLFYQSDMASMQYKLSMNYNTNITEENVQLMNNALDKMSAFLKEQSTLAKMYPLQEIQNLTVKL', 'STLEEQAKTFLDK<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAMTFLDKFNHEAEDLFYQSSLASMNYNTNITEEEKQNMNNAGDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'ETIEEEAKTFLDKFNHEAESLFYQSSLASMNYNTNITEITVENVQNMNASLDKMSAFLMEQSTLAQMYPLQEIQNLTVKLQLQ', 'STIEEQAKSFLMKFNHEAEDLFYQSSLASMNYNTNDTEENVQNMNNAQDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEEQVQNMNNAQDKMSAFLKEQSTLAQNYPLQEIQNLTVKLQLQALM', 'STIEEQAKTFLDKFRHEAEDLF<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKNFLSEFNHEAEDLFYQSSLASMNYNTNITEEEVQNMNNAADKMSAFLKEQSTLAQMYPLQEIQNQTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITDENVQNMMMAGDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLFKFNHEAEDLFYQSSLASMNFNTNITEMNVLAKKKVLAKMYPLQEIQNLTVKLILQALQ<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLLKFNHEAEDLFYQSSLASMNYNTNITEANVQNMNNAGMKMSAFLKEQSFLAQMYPLQEIQNLTVKAQLQALQ', 'STIEEQAKTFLDKFNQHFNHELEHLFYQSSLASMNYNTNITEENVYNMNNALDKMSAFLKEQSSLAQMYPLQEIQNLTMKLQL', 'STIEEAAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEITVENVENMNNAGDKMSAFLKEISTLAQMYPLQEIQNPTVKLQLQ', 'STIEEQAKTFLDKFNR<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNNEAEDLFYQSSLASMNYNTNITEEQVQNMYNARDIMSAFLKEQSTLAIMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHIAEDLFYQSSLASMNYNTNITAENIQNMNNAGDKMSMFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STILEQAKTFLDKFNHEAEDLFYQSFLASMNYNSNITEENVQNMNNAMDKMSAFLKEQSTLAQMYPLQEIQNYTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDAEDLFYQSSLASMNYNTNITEENVQIMNNALDKMSAFLNEMSTLAQMYPLQEIQNLTVKLQLQ', 'STIEEQAKTFLDKFNDEAEALFYQSSLLFMNYDTNITEENFQNMNNALDKMSAFLKEQSTLATAYPLQEIQNLTVKLQLQALQ', 'STIEEQAKNFLMKFNHEAEDLYNMNMLADDMSSLYYMSSLAQMNYNTNITEENVQNMNNAMDKMSAFLKEMSTLAQMYPLQEI', 'STIEFQAKTFLDKFNIEAEDLFYQSSLASMNYNTNITLEEVV<cls>EMVKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALM<sep><pad><pad>', 'SDIEEQAKTFLDKFNTEAEDLFYQSSLASMNYNTNITEEEVQNMNNALDKMSAFLKIQSTLAQMYPLQEIQNLTVKIQLQALQ', 'STIENQAKTFLDKFNAEAEDLFYQSSLASMNYNTNKTEENVQNMNNALAKMSAFLKEQSTLARMYPLQEIQNLTYKLQLQALM', 'STIEEQAKTFLQKFNHEAEDLFYQSSLASMNYNTNINITEENVQNMNNAMDKMSAFLKEQSTLARMYPLQEIQNLTVKLQLQL', 'STIEEQAKTFLDKFNHEAEDLFYQYSQASMNYNTNITEINVQNMNNAGDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNANITEENVQNMNNAGFKMSAFLKEQLTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEIRDLFYISSLASMNYNTNITIENVQNMNNAGDKMSAFLKEQSTLAQMYPLQEIQMLTVKLMLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNNNITEENGQNMNKAYDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLFALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNENITEENNYFMMELLEMMEPGNMMEMMSAFLKELSTLAQMYPLQEIQNLT', 'LTVLIQL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEFNYNTNITEENVQNMNNARDKMSAFLKEISTLAQMYPLQEIQNLT', 'STIEEQAKTFLDKFNHEARDL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNKEAEDLFYQSSLASMNYNTIITEENVQNMNTAVDKMSAFLKEMSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAVGLFYQSSLMYRNITEENVQNMNNALDKMSAFLKEQSTLALMYPLQEIQNLTVKLQLQALQ<sep><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEDNVQNMNNAMDKMSAFLNEQSTLAQMYPLQDIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNSNITEENVQNMNRAGDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLLKFKHEAE<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'ITIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTTITEENVQNMNNAGDKMSAFLMEMSTLALMYPLQEIQNLTVKLQLQALQ', 'KTIEEIAKTFLDKFNHEAEDLFYQSSLASMNYNTNATEENVQNMNNAGDKMSAFLKTQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLTKFNHEAVEATDLFYQSSLASMNYNTNITEENVQNMNKALDKMSAFLKEQSYLAQMYPLQEIQNLTVKLQLQ', 'STIEEQAKTMLFKFNHEAEDLFYQSSLASMNYNTEITEENVQNMNNAGDKMSAFLKEMSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIETQAKTFLDKFNHEAEDLFYQSSLAQMNYNTFITEENVQNMNNAADKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'ETIEEQAKTFLDNFNHEAEDLFYQSSLASMNYNNEITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTMKLQLQALM', 'STIEEQAKTFLDKFNHEAEDLFYESSLKSMNYNTNITEFNVQAMNNAQDKMSAFLKEQSTLAQMYPLQEIQNLTVMLQLQALQ', 'STIELQAKTFLDKFNFEAEDLFYQSSLASMNYNTNITEEDVQNMNNAKDKMSAFLKEQSTLAQMYPLQEIQNLLVKMSLQALQ', 'STIEEQAKTFLDKFNHEANDLFYQSSLASMQYNSNITEENVQNMNNAHDKMSAFLKEQSTLAQMYPLQEIQNAMVKLQLQAMQ', 'DTIEEQAKTFLDDFNHEAEDLFYQSSLASMNYNTNILEENVQNMNNVLDKMSAFLKEQSTLAQMYPLLQIKLSLTLLGMLMHS', 'STIEEQAKTFLDKFNHEAESLFYQSSLASMNYNTNTTEENVQNMNNAGDKMSAFLKEASTLAQMYPLKEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHAAEDLFYQSSLASMNYNTQITEENVQNMNNAGDKMSAFLKEMSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEVLFYQSSLASMNYNTNITEANVQNMNNAGMKMSTFLKEASTLAQMYPLQEIQNLTVKLQLQALQ', 'FTIEETAKQFLQTLLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNALDKMSAFLKSFSTLAQMYPLEEIQNLTVKLQL', 'SEIEEAAKTFLDTFNHEAEDLFYQSSLASMNYNTNISEENVQNMNNAGNKMSAFLAEQSFLAQMMPLAEIQNLTVKLQLQALL', 'STIEEQAKYFLDKFNHMAE<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEQEDLFYYFLLASMAYLASMAYLTSIASMNYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQ', 'STIEEQAFVFLDKFNHEAENLFYQSSLALMNYNTNITEEYVQNMNNAGDKMQAFLKEQSTLAQMYPLTEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNKNSNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALM', 'STIEEQAKTFLDKFNQEMNEQAERLFYQSSLASMNYNTNITEENVQMMNNAMDKMSAFLKEQRTLAQMYPLQEIQNLTVKLQM', 'STIEEQAKTFLDKFNHEAEDKFYQSSLASMNYNTNETEENVQNMNNAFDKMSAFLKEQSTLAQMYPLQEIQNLTIKLNLQALA', 'STIEEQAKTFLDYFNHEAEDLFYQSSLASMNYNTEITEENVQNMNNAGDKMSAFLKEQTTLAQMYPLQFIQNLTVKLQLQALM', 'STKEEQAKTFLDKFNHEAEDIFYQSSLASMNYNTNITEEEVQNMNNAGDKMSAFLKIQSTLAQMYPLQEIQNLYVKLQLQAMQ', 'STIEEQAKTFLDTFAEDLFYQSSLASMNYNTNITEENVQNMNNAEDKMSAFLKKISMLALQSTLAQMYPLQEIQNLLKKMQLL', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNINEENVQNMNNALDKMSAFLKEQSTLAQMYPGQEIQNLTLKLQLQALQ', 'STIEEQAMTFLDHFLTLFLDKFYQHMAEDLFYQSHLASMNYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIDNLT', 'FSTEEEQADLFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNALDKMDAFLKAQSALAQMYPLQEIQNLTVKLQLQAL', 'STIEEQAKYFLDKFNHEKDKMSAFLKEQRTLSQMYELLEIQNLTVKLQLQARM<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEELVQNMNNAMDKMSAFLKEQSTLAQMYPLQEIQNLTVILQLSALQ', 'STIEEQAKTFLDM<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAE<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQASLASMNYNTNITDENVQNMNNAFLKMSAFLKEQSTLAQMYPLQEIQNLHVQMSLASMK', 'STIEEQAKTFLDKFNHEAEDLFYVLVTASMFLSMNYNTNITEENVQNMNMAGDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQL', 'STIEEQARTFLDKFNHEAEDLFYQSSLASMNYNTNKTNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEEAYRSMNNAGDKMSAFLKEQSTLAQMYPLREIQNLTVKLQLQALQ', 'STIEEQAKTFLLKFNTFAEELFYQS<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEETAKTFLDKFNHEAETLFYQSSLASMNYNTNITEEKVQNMNNALKKMSAFLKEQSTLAQMYALQEIQNLRVKLQLQLDK', 'STIEEQAKMFLMKFNSRAEDLFYQVSLASQMYPTNITEENVQNMNNAGDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLLKFNNEAEDLFYQSSLANMNYNTNITEELVQNMNNAGDKMSAFLKEQSTLAQMYPLQEIQNPTVKLQLQAL<sep>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLAVMNYNSNITEENVQNMNNAGDKMSAFLKTQSKLAQMYPLQEIQNLTVKLQLQALQ', 'NTIEEQAKTFLDSFNHEAEDLFYNSSLMSMNYNTNITEELVQNMNNAGDKMSAFLKEQSTLAQMYPLQEIQNLAVKLQLQALM', 'STIEEQAKTFLDKFNHEAEDLFYQSSLLSMNINTNITEEIVEYMNNALDKMSAFLKEKSTLALMYPLQELINLTVKKQLQDLQ', 'ATIEEALTQLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQQLTVKLILLLMKM', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITKENVNLMNNAVDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQKLQ', 'STIEEEAKTFLDKFNHEAE<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDYFM<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIENQAKTFLDKFNMEMEKIFQQSLASMNYNTNITEENVQNMNNAFDKMSAFVKEQSTLAQMYPLQEIQNLTVKLQLQALQ<sep>', 'STIEEQAKTFLDKFNHEAEDLFYQMSLASMNYNSNITEENVQSMNNAGDKMSAFLKEQTTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEELEDLFYQSSLASMNMNTNITEENTQNMLELLSKMSAFLKEQSTLAQMYPLQSIQNLTVKLSLQ', 'STIEEQAKTFMDKFNHEAEDLMSLLSMSSLASMNYNTNITEENVQNMNNALDKMSAFLKELSTLAQMYPLQEIQNLTVKLQLQ', 'STIEEQAKTFLDKFNHEEEDLFYQSSLASQSMLASMEYNTNITEENVENMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKL', 'MTIEIYTKTFLDKFNHQMEDLFFQQSLASMNYNTNIAEENVQNMNNAGDKMSAFLKEQSTLAQMYPLQEIQNLTLKLQLQVLL', 'STIEEQALTFLDKFNHEAEDLFYKSSLASMNYNTNITEMNVNMDDKMSAFLKEQSTLAKMYPLQEIQNLTVKLQLQALQ<sep><pad><pad><pad>', 'STMEEQAKTFLDKFNHEAEDLFYQSSLAQMNYNTNITEANVQNMNNAGDKMSAFLKEQSTRAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEQEDLFYYFLLASMAYLAYLASMNYNTNITEENVQNMNNAGDKMSAFLKEQSTLAQMYPLQEIQNLT', 'STIEEQALTFLDKFNHEAADLFYQASLASMNYNTNITETNVQNMNNAGDKMSAFMKEQSTLALMYPLGEIQNLTLKLQLQALR', 'STIEEQAKLFLDKFNHEAEDLFYQVSLALMNYQTNITEENVQNMNNALDKMSAFLKEMSTLAQMYPLQEIQNLTVKLQLMALQ', 'STIEEQAKTFGDKFNHEAEDLFYQSSLASMKYNTLITEENVQNMNNALDKMSAFLKEISVLAQMYPLLEIQNLTVKLQLQALQ', 'STIEEQAKTFLDIFLDKFNHEAEDLFYQSSMASMNYNTNITEENVQNMNNAGDKMQAFLKEQSTLAQMYPLQEIQNLTVKLQL', 'STIEEQAATFLDKFNHEAEDLFYQSSLASMNYNTNNTNITEENVQNMNNAMDKMSAFLKELSTLAQMYPLQEIQNLTVKLQLQ', 'STIEEAAKTFLDKFEHEAEDLFYQSSLASMNYNTNITEMNVNSMDKMSAFLKEQSTLAQMYPLQEIQNLTVKLLLQALQ<sep><pad><pad><pad>', 'STIEEQALTFLD<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKLFLDKFNHEAEDLFYQSSLASFMYNTNITIEEENVEAMNYNTNITEENVQNMNNALDKMSAFLKEISMLAQMYPL', 'SFIEENAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEEIVENMNNAGDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEEAKTFLDKFNHI<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKMFLDKFFTEAEDLFYQSSLASMNYNTNITIENVQNMNNALDKMSAFLKEQSALAQMYPGQEIQMLTVKLQLQLLQ', 'LTIEEQAKTFLDKFNHEAEFLFYQSSLASMNYNTQITEENVQNMNNATDKMSAFLKEQSTLAQMYPLQEIDNLTVKLQLQALQ', 'STIEEQAKTFMDKFNHEAEDLFYQSLLASMNYNTNIKEENVQNMNLALDKMYAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'SGIEEEAKRFLKKFNHEAEDLFYQS<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKEELLKFLDKFNHEAEILDYNSSLKEMNYNTNITEENVQNMNNAGDKMSAFLKEMSTLAQMYPLQEIQNLTVKLQL', 'STIEEQAKAFLDKFNHEAEDLFYQSSLASMNYNTNITEEIVYNMVNAGDKMSTFLKEQATLAQMYPLQEIQNLTVKLQLLALQ', 'STIEEMAKTFLDKFNHEAEDFFYSSLASMNYNTNITEENVQNMNNAGDKMSAFLKEKSTRAQMYPLQEIQNLTVKLQLQALQ<sep>', 'NTIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNIDEENVQNMNNAMDKMSAFLKEDSTLAQMYPLQEIQNLVVKLQLQALQ', 'STIEEQALTFLDKFNHEAEDLFYQSSLASMNYNSNITEENVQNMNNFGDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'FTIEEQAKTFLDKFNHEAEDLFAQSMLASMNYNTNITIENVMNMNNAMDKMSAFLKEQSTLAQAYPLQSIQILTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEYQSSLASMNYNTNITEENVRQMNNALDKMSAFLKEQSTLAQMYPLQFIQNLTVKLQLQALQPLM', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEEIVQAMNNAGDKMSAFLKEQSTLAQMYPLQFIQNLTVKLQLDALQ', 'STIEEQAKTFLDKFNHEAEDLFYQYLLQSKIASMNYNTNITEENVKNMNNAYDVMSAFLKMDSTLAQMYPLQEVQNLRVKLQL', 'STIEEFAKTFLDKFNHEAEDLFYQSSLASMNYNDNITEENVQNMNSASDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEYLFYQSSLASMNYNTQITEENVQNMNNAGDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFDHEAEDLFYQSSLASMNYNTNYTEENVQNMNNAQDKMYAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEALDLFDQSSLASMNYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQA', 'STIEEQAKTFLDYFNHEAEDLFYQSSSASMNYNTNITIENVQNMRNMVDKMSAFLKEQRTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQALTFLDKFNMEAEDLFYQSSLASMNYNTNITEEQVQNMNNAGDKMSAFMKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNMNANITEENVQNMNNAGYKMSAFLKERSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHAAEDLFYQSSLASMNYSTNITEENVQNMNNAGDKMSAFLSEMSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEALLFDFLDLFSMLYAMSVLASMNYNTNITEENVQNMNNAGDKMSAFLKEQSTLAQMYPLQEIQN', 'STIEEQAKTFLDKFNHEAKDLQSMLASNNYNTNITEENLQNMNNAGDKMSAFLKEMSTLAQMYPLQEIQNLTVKLQLQALQ<sep><pad>', 'STIEEQAKTFLDKFNHEAELFLDLLLDLLDLLTVLLDLFDLDLLLGLLDLFDLLSLASMNYNTNITEENVQNMNNALDKMSAF', 'NTIEEQAKTFLDMFNHEAENLFYQSSLASMNYYTNITEENGQNMNNAGDKMQAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFLHEAEDLFYQSSLASMNYNTLITEENVQNMNNAGDKMSAFLKEQSTLAQMYPLQEIQNLTVKSFLRALQ', 'STIEEQAKTFLNKFNMEAEDLFYQSSLASMNYNTNITITEENVRNMNNAGLKMSAFLKEQSTLAQMYPLREIQNLTVKLQLQA', 'STIEEQAKTFLD<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'SYIEEQAQTFLDKFNHEAEDL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'ETIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNSNITEENVQNMNNALDKMSAFLKEQSTLAQMYDLQEIQNLTVKLQLQALM', 'STIEEQAKTFLDKFNHEAEDLFYQESLASMAYDLNITEENVQNMNNLGDKMSAFLKRQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAETLFYQSSLQSMNYNANITEENVQNMNRALDKMSAFLKELSTAAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQHLLQSMFYYMNITEENVQNMNLALTKMSVFLKEQSTLVQMYPLQEIQNRKVKLLVAVDY', 'STIEEQAKTFLDKFNHEAEDFFYQSSLASMNYNTKITEENVQNMNNAGDKMSAFLKEQSTLAQRYPLQEIQNLTVKLQLQALM', 'STIEEQAKTFLDKFNHEAEDLFYMSSLASMNYFTNITEENVQNMNNAGDKMSAFLKEQSTLAQMYPLQAIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHVAEDLFLQSSLASMFYNSNITEENVQNMNNAGDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQTSLASMNYNTNITYENVQNMNNAMDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'SQIEEQAKTFLDKFNHEAEDLFYQSSAAIMNYNTNIAEENVQNMNNAIDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYDTNITEENVQNMNNAHDKMSAFLKEQSTLAQMYPLQEIKNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAFDLFYQSSLASMNYNTNYTEENVQNMNNAGDKMSAFLKEQSTLAQRYPLQEIQNKTVKLQLQALM', 'STIEEQAKTFLDKFNHEAEDLFYQSSMASMNYNTNISEENVQNMNNARDKMSAFLKEQSTLSQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHFAEDLFYQSSLASMNYNTNNTNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQ', 'SAIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEEFVQNMNRAGDNMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQTLM', 'STIEEQAKTFLDKFNEEAAQFLDKFNHEAEDLFYQSSLAIMNYNTNITEENVENMLNALDKMSALMYLLLMMVLLLYNMLELM', 'SDIEEQAKTFFDKFNHEAEDLENQLSLANMNNAEDKMSAFLKFQSSLAQQYPLQEIQNLMVKLQLQALM<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'MTIEMQTKTFLDKFNHEAEDLFYNSSLASMNYNTNITDENVQNMNNAGDKMNAFLKEQSTLAQMYPLQEIQNLMVKLQLQALQ', 'ETIEEQAKTFLDKFNHEAEDLDYQSSLNSMNYNTNITEHNVFNMNKALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALM', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITLENVQNMNNALDKMSAFLKLILTLASMY<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNQEAEDLFYQMSLASMNYNTNISEENVQNMNNAGDKMTVFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLKKFNHEAEDLFYQ<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMRYDTNITEENVQNMNNAGDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDEFNHEAEDLFYQSSLASMNYNANITEENVQNMNNAMDKMSAFLKEQSTLAMMYPLQEIQNLTVKGQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLAMNYNTNITEENYQNMNNALDKMSAFLKEQMTLAQMYPLQEIQNLTVKLQLQALQP', 'STIEEQAKHFLDKFNHEAEDLFYQSSLMSMNYNTNINEENVQNMNNAGDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLLKFNHEAEDLFV<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEEEVQNMNNAGDKMFAFLKEQSTLAQMYPLQEIQNLTVKMQLQALM', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITKENVQNMNNASDKMSAFLKEQSTLAQMYPLQEIQNLTVKLSLQALM', 'STIEEQAKTFLDKFNHEAEDLFYQSSVASMNYNTNITEQNVQNMNNAKDKLSAFLKEQSTLAQMYPLDEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLAMNYNTNITEENYQNMNNAGDKMSAFLKEQMTLAQMYPLQEIQNLTVKLQLQALQ<sep>', 'STIEEQAETFLDKFNSEAEDLFYQVSLASMNYNTNITEELVQNMLLALLKMLALMNPLLLMMYLLL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKIFLRKFNHEAEDLFYQSSLASMNYNTAITEENVQNMNMALDKMSAFLKEARTLANMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLKSMNYNTNITEEQVMLMNNAGDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'DT<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'SYIEEQAKTFLDKFNHEAEMLFYQSSLASMNYNTNIAEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLVVKLQLQALQ', 'STIELQAKTFLDKFNHEAEDLFYQSSLAHMNYNTHITEENVQYMNMAFDKMSAFLKEMSTLAMMSPLAQMYPLQEIQNLTVKL', 'SIKEEQAATFLDKFNHEAEDLFYQSSLASMNYNTNIHEENVQNMNNALDKMSAFLKQQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDTFNHEAEDLFYQSSYASMNYNTNITQENVYNMNNALDKMSAFLKEQSTLAQMYPMQEIQNLTVKLQLQALQ', 'ATEFLDKFNIEKEDLFYQSSLASMNYNTNITEENVQNMNNANDKMFAFLKEKSTLAQMFPLQEIQNITVKLQLQALM<sep><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEREDLFYQSSLASNMNNAGDKMSAFLKEQSTLAQMYPLQEIQNLTVKMQLQALM<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'SAIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTIITEENVQNMNNMGDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNRTEENVQNMNNAGDKMSAFLKEQSSLAQYYPLQELQNLTVLLQLQALM', 'STIEEQAKQFLDKFNHEAEDLFYQSSLASMNDMTNITEENFTNMNRARDKMAAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'DTIEEQAKTFLD<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKEFMHILDKFNHEAEDLFYQSSYASMNYNTNITEENVSLMNNALDKMSAFLKEQSTRAQMYPLQEIQNLLVKLQLQ', 'STIEEFAKTFLDAFNHE<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNLNTNITNENVQNMNNAYDKMSAFLKEQSTLAQMYALQMIQNLTVKLQLQALQ', 'STIEEQAKTFLDVFNMEAEDLFYQSSLASMNYNTNISEENVQNMNNAGDKMLAFLKEQSTLAQMYPLQEIQNLTVKLRLQALQ', 'STIEEQAVTFLDKFNHEAEDLFYMSSLASMNYNTNITITNVNLMMVVLLVVVVVVVLVVVMSLLFAKMSAFLKEMSTLAQMYP', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTIITEENVQNMNTAGDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEYQAKTFLDKFNHEAEDLFYQSSLASMNYQTNITEENVNNMNNALFEMSAFLKEQSTLAQMYPLQTIQNLTVKLQLQALM', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITNENVMNMNNAYDKMSAFLMEQVTLAQMYPLQYIVNLTVKGQLQALQ', 'ITIEEEAKTFLTKFNHEARDLFMQT<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHYAEDLFYQSSLASMNYNTNITEEIVQNMNNALDKMSAFLKEQSTLAQMYPLQFIQNLTVKLQLQALQ', 'SYIEEQAKTFLDKFRHEAEDLFYQSRLASMNYNTNIQEENVQNMNFAGDKMSAFLKEQSTLAQMYPGQEIQNLTVKLQLQALQ', 'STIEEQAKYFLLKFNHEAEDLFYQSSLA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEASSLMDMLKDLSDLAQMFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNALDKMSAFLKEI', 'STIEEQAKTFLDKFNHEAEDLFDLSTLASMNYDTNITEELVDNMNNALDKMSAFLKETSTLAQMYPLQEIQNLTMKLQLQMLQ', 'STIEEQAKTFLDKFNHEAEDLSYQSSLASMNYNTNITENVLDMNDAFDKMSAFLKEISTLAQMYPLQEIQNLTVKL<sep><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFRDKFNHEAEDLFYQSSLASMNYNTNITITEENVQAMNLALDKMSAFLKTQSTLAQMYPLQEIQNLMVKLQLQA', 'STIEEQAKTFLDKFNAEAEDLFY<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'SDIEEQAKIFLDKFNHEAEDLFYQSSLASMEYNTYITEENVQNMNNALDKMSAFLKEQSTLAQMYDLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLLKFNHEAEDLFYQSSLASMNYDTNITEENVQNMNNAGDKMSAFLKEQSTLAQMYPLKEIQNLTVRLQLQALQ', 'STIEEQAKTFLDKFNHEAEALG<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHSAEDLFYQSSLALMFYNTDITEENVQNMNNAGVKMSAFLKEQSTLAQTYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLKYQSSQASMNYNTNINEENVQNMNNAGDKMSAFLKEQSTLAQMYPLQEIQNLLVKLQLQALF', 'STIELQAKTFLDKFNYEAEDLFYQSSLASMNYNTNITEEDVQNMNNASDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALM', 'STIEEQAKTFLDKFNHEAEDLFYQ<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLANMNYNTNITEINVQLMNNAGDKMSAFLKNQSTLAQMYPLQEIQNLAVLMSLASMM', 'STIEEQAKTFLDKFNQIARDLFYQSSLASM<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDK<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'TTIEETAKTFLDKFNHEAEDLFYQKSAASMNYKLSLASMLYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLE', 'ITIEEEAKTFLHKFNHEAEALFYQASLASMNMQTNITEENVQNMNNAGDKMSAFLMEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLMKFNHEAEALFYQSSLASMNYNTNILEENVQNMNNARDKMSAFLKEVSFLAQMYPLQEIQNLTVKLQKQALQ', 'STIEEQAKTFLDKFNHEAKD<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEMAKTFFLDKFNHEAEDLFYQSKLASMNYNTNITEENVQNMNNAGDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQAL', 'STIEEIAKTFLDKFNHEAQD<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'SRIEEQAKTSLDKFNHEAEDLFYQSLLASMNYNTNISEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLILQALQ', 'SDIEESAKTFLDKFNHEAEDLFYQSSLASMNYNTNISEENVQNMNNAGRKMSAFLKEQSTLAQMYPLQEIQNLTLKLQMQAGQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNAMTNITEENVQNKHNARDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQLLI', 'SQIEEQAKTFLQKFNHEAE<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNI<cls>MEDLFYQSSLASMNYNTNITEENVQNMNNAHDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQAMM', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMTLITEITNVNITEENVQNMNNRLDKMSAFLKEQSTFAQMYPLQEIQNLTVKLQ', 'STIEEQAKTFLDKFNHEAEDLFYQYSLASMNYNNNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLRALQ', 'STIEEQADMFLDKFNHEAEDLFIQNKLQSMSYKKSLASMQYNTNITEENVQNMNNALDKMSAFLKEMSTLAQMYPLQEIQNLT', 'STIEETAKFFLDKFNH<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'MTIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTQITEENVQNMNNAGDKMSAFLKEQSDLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASYNYNTNITEFNVLALMYPLREIQNMTVKLQLQALQ<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKKFLDPFNH<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNQNITEENVQNMNNAFDKMSAFLKEQRTLAQMPPLQEILNLPVKLQLQALQ', 'STIEEQAKTFMDKFNHEASDLFYQSMLASMNYNTNITLAVMNYNTNITEENVQNMMNALDKMSAFLKEQSTLANMYPLQEIQN', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNIDNMSDKMIAFLKEMSTLAQMYPGQEIQNLTVKLQLQALQ<sep><pad><pad><pad><pad><pad><pad><pad>', 'QTIEEQAKTFLHVFNMFAEDQYMSYDLSMNYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ<sep>', 'STIEEQAKTFLDAFNHEAEDLFYQSSLASMNYNTNIQEENVQNMNQSGDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQSLQ', 'STIEEQAKTFLDKFTHEAEDLFSNSSISSTLASMNYNTNITEENVQNMNNAFDKMKAFLKLQSTLAQMYPLQEIQNLTVKLQL', 'STIEEQAKTFLDKFNHEAEDYLYYSSLILMYLL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'DTIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEEELLLYVNMDLLLNELLDLLLLLYLLELGELLLLMNDILLLVLYL', 'STIEEQAMTFLDEFNMEAEDLFYQSSLASMRYNTNISEENVQNMNNAGDKMSAFLKTQSTLAQSYPLQEIQNLTVRLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYLTNITEENVQNMNNAADKMSSFLKEQSTLAQMYPLQEIQNLTVKLQVQALQ', 'STIEEQADTFLLKFNHEAELLFYQSSLASMNYNTNITIENVQNMNYALDKMSAFLKELSTLAQMYPLQEIQNLTVKLQMQALQ', 'STIEEQAKTFLDKFNHEAEDLFYTSKLKSMFSLLSINSMSVLLSMNYNTNITEENVQNMNNASYLMSVFLKELSTLAQMYPLQ', 'STIEEQAKTFLDKFNVEAH<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEITKELLDVLLDLTSILYLTNITEENVQNMNNMLDKMSAFLKEQSTLAQMYPLEEIQNLTVKLQLQALMALFTLLFGLG', 'STIEEQAKTFLDKFNSSLDKMQYDGDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLFALM<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEFAKTFLDKFNHEAEDLFYQSSLASMNYFTNITEENVQNMNNAGDKMSAFLKEQSTMAQMYPLQRIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNH<cls>AEDLFYQSSLYSMYYNTNITEENVQNMNNAGDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQARM', 'SLIEEQAKAFLDKFRHEAEDLFYQRSLASMNYNTEITEENVQNMNNAHDKMMAFLKEQSTLAAMYPLQEIQNLTVKLQLNALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASNNYNTNIAEENVQNMNNATDKMSAFLKEMSTLAQMYPLQEIQNLTVKLSLQALQ', 'STIEEQAKDFLMKFNHEAESLLEQSDLAQMYPLDEIQMLTVKLQLQALQ<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLEKFNQEALDLFLDQFNHEAEDLFYQSSLAQMNYNTNITEENVQNMNNANDKMLAYLLLMQMLTLLALLFLLT', 'STIEEQAKTFLDKFNHYAEDLFYQSSLASMNYNSNITEENVQNMNNAGDKMSAFLKEQSTLAQMYPLQAIQNLTVKLQLQALQ', 'STIEEQAKTFLDHFNHEAEDLFYQSSLMSMNYNTGITEENVQNMNNAGDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'LTIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTLITEENVQNMNNLLDKMLLFLLL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLYKFNHEAEDLFIQSSLASMNYNSNITEENVQNMNNAGDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALM', 'ITIEMQAKHFTDKFNHEAEDLFYQLSLASMNYNLNITEENVQNMNNALDKMSAFLKEQSFLAQMYPLQEIQNLTVKLMLQALQ', 'STIEEQAKTFLDKFKHEAEDLFYQSSLASMNYNTNITEEDVQSMNNATDKMSAFLAEMSTLAQMYPLQEIQNLTVKLSLQALQ', 'STIEEFAKTFLDKFNHEAEDLFYQSSLQSYAYDMNYNTNITEENVQNMNNARDKMSAFLKEISTLAQMLPLQEIEELMSKMSL', 'STIEEQAKTFLDKFNHEAEDLFDQSNLASMNYLTNITEENVQNMNNAGDKMSAFLKELSTLAQMYPLQEIQNLPVKLQLQALQ', 'STIEEQAQTFLDKFNYEAEDLFYQSSLASMNYNTNKTEENVQNMMNASDKMSAFLKEQSTLATMYPLQEIQNLTVKLQLQALQ', 'SNIEEMAKTFLDKFNHEAEDLFYQSMLASMNYNTNITEEQVQNMNNAMDKMSAFLKEQSTLAQMYPLQEIQNLTVRLQLQALQ', 'STIEEQAKTFLDKFNHEAEIESYYSSSSSSLALMSYNTNITEENVQNMNNALDKMSAFLKTLSTLAQMYPIQEIQNLTVKLLL', 'STIEEQAKTFMDKFAEDLFYQSMLASMQYNTNITEENVQNMNNMGDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQMLQKKM', 'STIETQAKTFLKKFNHDLFYMSSLASMNYNTNITEENVQNMNNAGSKMSAFLKEQQTLAQMYPLQQIQNLTVKLQLQALQ<sep><pad><pad>', 'STIEEQAKTFLDKFNHEAKDLFYQSSLASMNYNTNITNENVQNMLNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKMFLDMFNHEAEQLFYQSSLASMNYNTKITEENVQMMFNAGDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'MNIEEQAKTFLDKFHTRAELLFYISSLASAFYDTNITEENVINMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALM', 'STIEEQATTFLRKFNHHAENLFYQSSLASMLYATNITEENVQNMNFAEDKMSAFLKEQSTLAQMYPLQEIQNLPVKLQLQALK', 'STIEEQAKSFLDKFNHEAEDLFYQSSLASMNYNTEITEENVQNMNNALDKMSAFLKEQSTRAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAMTFLDKFNHEAELLFYSSLGSMNYNTNITEENVANMMNAGDKMSAFLKEQSLLAQMYPLQEIQMLTVKLQLQALQ<sep>', 'STIEEQAKTFLDKFNHEAEDLFYQSFLASMNYNTNITEEEVNTMNNAGDKMSFFLMEMSALAQMYLLQEIQNGTVKLQLQALM', 'STIEEQAKTFLDKFNHEAEDLFYQSLLASMNINTNNTNITEENVQNMNMALDKMSAFLKEQATLALMYPLQELSNLTVKLQLQ', 'STLEEQAKTFLDKFNHEAEDLFYQSSLASMN<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'SDIEEQAKTFLDKFNHEAEDLLYQSSLASMNYNTNITPENVQNMNNAQDKMSAFLKEQSTLAQMYPLQEVQNLTMKLQLQALQ', 'STIEEQAKTFLDKFNHYAEDLFYQHSLAQDNYQTNITEENVQNMNNALDKMSAFLKEQSTLASSYPLQEIQNLPVKLQLQAPM', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNKNITEENVQNMNNALDKMSAFLKEQQSLALMYPLSEIMNLTTKLELQSLG', 'STIEEQAYTFLDKFNHEAEDLFYSSSLASMNYNTNITEEIVQNMNNAGDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNNTNITEENVQNMNNAMDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQ', 'STIEEQAKTFLDKFNHEAEDVEDLFYQSSLASMNMNTNITEENVQKMNNVADKMKAFLKEQSTLAQMYPLQEIQNLTVKLNLQ', 'STIEEQAKTFLDKFNHEAEHLELDSLL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEEAVQNMNNMGDKMQAFLKEQSTLAQMYPLQEIQNLTVKMSAFLKE', 'STIEEQAKTFLDKFDHEIYDLFYQSSLASMNYNTNITEQDVKMMVDLSDMNIKSDKMSAFLKEQSTLAQMYPLKEIQNLPVKL', 'STIEEFAKTFLDKFNHEAEDLFYQSSLASMNYNTNITRENVQNMNNALDFMSAFLKEMSTLAQMYPLQEIQNLTVKLQLQALL', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEEQFQNMNNAGEKMSAFLREQSTLAQMYPTQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQMSLASMNYNTNITIENAQNMNNALDKMSAFLKEQSTLAQMYPLVEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYKSSLASYNYNTNITIENVRNMNNAADKMSAFLKEQSTLAQMYPLQEIQNLTVQMSLASML', 'SLIEEQAKTFLDKFNHEAETLRYQSSLASQNYNASLASMNYNTNITEENVQNMNNALDKMSAFLKRQRILAMTLAQMFPLQEI', 'STIEEQAKTFLDKFNHAAEDLFYQSSLASMNYNTNITEMNVNTNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLM', 'STIEEQAKTFLDKFNHEAEDLFYQSSLAKMNYNTNITENNVIDINITEKMSAFLKEQRTLAQMYPLQEIQNLTVKLQLQALQ<sep>', 'STIEEQAKTFLDKFNYEAEFLFYQSSLASMNYNASIEYMNYNTNITEENVFNMNNALDKMSAFLKEQMTLAQMYPLQEIQNLT', 'SDIEEQAKTFLDKFNHMAEDLFYQSSLASMLKLTNITEENVQNMNNALDKMSAFLKESSTLAQMYPLQEIQNLTVKLQMQALS', 'SIIEEQAKTFLDKFNHEAEDAQYQSSLASMNYNLNITEENVQNMNNAMDKMSAFLLEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKIFLDEFNHLFLDLFNYLRELEDLFYQSHLALMNYNTNITEENVQNMNNATDKMSAFLKEQSLLAQMYPLQEINNF', 'STIEESAKTFLDKFLHE<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEESAKTFLDKFNHEAEDLFYQSSLASMNYNENITEENVQNMNNALDKMSAFLAEQSTLAQMYPLQEIQNLTVKLQLQMQS', 'DTLEEQAKTFLDKFNHEAEDLFYDSDLKSMNYMLSLASMNYNTNITEENVLQMNNALDKMSAFLKEQSTLAQMYPLQPIQNLT', 'STIEEQAKTFLDKFNHEAEDLFYQSSLAAMNYNTNITITNITEENVQNMNNAGKKMSAFLKEISTLAQMYPLQEIQNLTVKLQ', 'STIEEQAKTFLDKFNHEAEDLFYTSSLASMNYQLSVASMSYNTNITEENVQNMNNAADKMSAFLKEQSTLAQMYPLQEIQNLV', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYPTNITEENVQNMNNAGDKMSAFLKEMSTIAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAERLFYQSTLKSMNYNTNAEEENVQNMNHALDKMSAFLKEQSTLAQAYPLQEILNLTPKLQMQALQ', 'STIEEQAKTFLDKFNHEAEDEFLFFQLSLASMNLNTNITEENVQNMNNAGDKMSAFLKEMSTLAQMYPLQEIQNLTVKLQLQA', 'STIEEQAKKFLDKFNLLAESLVYMYVLDEIRNLTVKLQMQALM<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAED<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLAIMN<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKRFLDKFNHEAEDLFYMSSLASMNYNTNITELNVQNMNNAGDKLSAFLKEQTTLAQMYPLQMIQNLTVKLTLLALQ', 'STIEEQAKTFLGKFEHEAEDLFYQSLLASMNYNTNITEDNVQNMNNMMDKMSAFLKEISTLAQMYPLQEIQNLPMKMSLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYNMSLVLMNYNTNISEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNKNSNITEENVFTMNNAGDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALM', 'DTIEEQAKTFLDKFNHDALDLFMQSSLASMFYNTNIKEENVQNMNNAQDKMSAFLKEQSTLAQMYPLQEMQNLTVKLQLQMLQ', 'STIEEQAKTFLFKFNHEAEIISSLYLMSSSLASMNYNTNITEENVQNMNNFLTKMKAFLFEQSTLAMMYPLQAIQSLTVKMSA', 'STIEEQAKQFLDKFNHEAEDLFYELSLASMNYNSNITEENVSFMNNAGDSMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIMEQAKTFLDKFNHEAEMAEDLFYQSSLASMNMNTNITEENVQMMNNLLDMMSAFLKEQSTLAQMYPLQEIQNLTVLLSLA', 'LTIEEQKLLLQLTLADMYPLEEIQNLTVKLQLQALI<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHRAEDLFYQTSLASMNLNNNITEENVQNMNNAQDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQAMQ', 'LTIEEQAKTFLDKFNHEAEDLFYQSSLASMNYLTEITLLHVNYNTLITILDLMDITELTVLLLLFLQLLLLM<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDIEDLFYQSSLASMNMNTNITEENVQMMNNAVNKMSKFLKEQSTLAQMYPLQEIQNLTVKLQLQ', 'STIEEQAKTFLDKFNHEANQGEYEDLFYQSSLLSMNYNTNITEENVQNMNNAGDKMSAFLKELSTLAQMYPLQEIQNLTVKLQ', 'SEIEEQAKTFLEKFNHEAEDGSNLASMNLNEELEYMSELAQMYPLQEIQNLTVKLQLQALL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYHSSLASMFYNTNITETNVQMMNNAGDKMSAFLKEQSTLAQMYPDQEIQNLTVKLQLQALQ', 'STIEEQAKTFLHKFNHEAEDLFYQQSLLKMMEYLTKMTVLLSMGSLLSMMSSLASMLYNTNITEENVQNMNNALDKMSAFLKE', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMMYNTNITEMNVNIYDKMSAFLKEQSSLAQMYPLQEIQNLTVKLQLQALQ<sep><pad><pad><pad>', 'STIEEQAKTF<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEATDLFYQSSLASMNYNTNINITEENVQNMNNAGDKMSAFLKEQSTLAQMYPLQEIQNGQVKLQLQA', 'SYIEEDAKTFLDKFNHEAEDLFYQSSLASMNFNTNITEMNYNTNITEENVQNMNNAGDKMSAFLKTQSNLAQMYPLQEIQNLQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEANFQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLAVKMSLQALQ', 'STIEEQAKTFLDKFNHEAGD<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEELAKTFLDKFNHEAEDLLYSSSLASMNYNTNITEELVQNMLHLPDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALM', 'STVE<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQFSLASMNYNTFITEENVQNMNNAIDKMSAFLKEMSTLAQMYPLQEIQNLTVKLQMQALL', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITENAVDMMNAGDKMSAFLTEISTLAQMYPLQEIQNLTVKLQLQALQP', 'STIEEQAKTFLDKFNHEAEILFNQSSLSMQYNTNITEENVQNMNNAGAKMSAFLKEQSTLAQMYPLQEIQNLP<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'SNIEEQAKTFFDKFNTEAEDLFYQSSLASMNYNTAITEENVQNMNNAGEKMSAFLKEQSTLAQMYPLQEIQNLPVKLQLQALQ', 'STMEEQAKTFLAKFNHEAEDGFYQSFLASMAYADSYAEENVQNMNNAADKMSAFLKEQSTLAQMYPLQEIQNMTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLLSNNYNTNILEENVINMNNALDKMSAFLKEQKTLAQMFPLQEMANLTVKLQLLMML', 'MTIEEQAKTFLDKENLEAEDLFYQSFLNSMNYNTNITITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQA', 'LTIEEQAKNFLDKFNNEAEDLFYQSSLASMNYNQEITNLTMKLQLQAMM<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFMDKFNHEANDLFYMSSLASMNYNTNITLMSMNYNTNITEENVQNMNNALDKMSAFLKEQSTLANMYPLQEIQN', 'STIEEQAKTFLDKFNHEAEDLFYQSSIASMNYNTNITKENVQNMNNAMDKMSAFLKEQSTLAAMYPLQEIQNLTVKLQLQALQ', 'SSIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEINVQNMNNAHDKMSAFLKEQSTLAQMIPLQEIQNLTVKLQLQALR', 'STIEEQAKTFQDKFNHEAEDLFYQSSLASMNYYTNITEENVQNMNNAGDKMSAFLKILSTLIQMYPLQEMQNLPVKLQLQALQ', 'STIEEHAKTFLDKFNHKADDFDDLFYQSSLASMNYNTNITEENVQSMNNALDKMSAFLKEQSTLAQMYRLQEIQNLTVKLQLQ', 'STIEEQAKTFLDKFNHEAEDVFYQMSLASMNKNLTVKLQLYALM<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STGEEQAKMFLDKFNHEAEDLFYQSSLASMNYNSNITEENVQNMNNAGDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'EDLEYQSSLAQMNYNTNITEENVQNMNTAGDKMSAFLKEQSFLAQMYPLQEIQNLTVKLQLQALQ<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNRNITEENVQNMNNLGDKMSAFLKRQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLGYMSSL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'FTIEEQAKTFLDKFNYEAEDLFYQSSLASMNYNTNITIENVGLMVDKMSAFLKELSTLAQMMPHQEIQNLTVKLQLQALQ<sep><pad><pad>', 'ETTEEQAKTFLDKFNHEAEDLFYQESEDLMMMDSLALMNYNTNITEENVQLMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTV', 'STIEEQAIKFLDKFNHEAEDLFFQLFLASMNYNTQITEENVHNMNNAGDKMSAFLKEQSTLASMYPLQEIQNLTVKLQLQIGL', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTMITEENVQNMVNAMDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEYQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEKNVNDINVNDKMSAFLKEQSTLAQMYPLQEIQNLIVKLQLMALL<sep>', 'STIEEQAKTFLDKFNHVAEDLFYLSSLASMNYNTNIDEENVQNMNNAGDKMSAFLKENSTLAQMYPMQEIQNLVVKLQLQALM', 'STIEEQAKNFLSEFNHEAEDLFYQSSLASMNYNTEITEENVQNMNNAYDKMSAFLKEQSTLAQMYPLQEIQDLTVKLQMQALQ', 'STIEEQAKTFLDKFNHEAEDLNYQSSGLSMNKNLALMSMNYNTNITEENVQNMNNAMDKMSSFLIEMSDLARMYPLREIQNLT', 'STIEEQAKTFLDKFNHEAEDVFYQSSLASMNYNTNITLYMQLLYLDMNYLL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLAYMNYNTNITEDNVQNMNNAMDKMSAFLKEQSTLAQMYPLREIQNLTVKLQLQALQ', 'ITIEHQAKTFLDAFDHELFYQSSLASMNYNTNITEENVKNMNNAGDKMSAFLKEQSTLAQMYPLQEIQNLMVKLQLQALQ<sep><pad><pad>', 'STIEEMAKTALDKFNHEAEDLFYQSSLASMQYNTNITEESVQNMNNAGDKMSAFLKSQSTLAQMYPLQEIQNLTVKLQLQALM', 'STIEEQAKTFLDKFNHEAEDLFYQRSLASMNYNNNITEENVQNMNNAGDKMSSFLKEQSTLAQMYPLLEIQNLEVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFTQMSYHLMMVYYLAMYYYDSMLASMSYNTNITEENVQNMNNALDKMSAFLKEQSFLAQMQS', 'SNIEEQAKTF<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTAITEENYQNMNNAGDKMSAFLKYQSTLAQMYPLQEIQNLTVKLQMQALQ', 'STIERQAKTFLDKFNQEAED<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'SNIEEQAKTFLDKFNIDAEMLFYQSKLAKMNYNTNKTEENVQNMNNAIDKMSAFLKEQSTLAQMYPLQEIQNLTMKLQGQALQ', 'SDIEEQAITFLVKFNHEAIDLFYQSSLASMNYNTNITEANVNDVTEENVQNMNNALDKMSAFLKEQSTLAQAYPLQEIQALTV', 'STIEIQESLASMNYNTNITEENVKNMNNAMDKMSSFLKEQSTLAQMYPLQEIQNITVKLQLQALM<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKHFLDKFNHEIEQLTYDLFMQSSLASMNYNTNITEENVQAMNNAGDKMSAFLKEQSQLAQMYPLQEIQNLTVKGQL', 'KTIEELAKTFLDKFNHEAEDLFNQSFLALMNYMTNITEENVQNMNNALYLMSAMVKMLKMLSLLLSMSFLLSMTSDDLGDKMS', 'STIEEQAKMFLRKFNHEAEDLFYQMS<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKNFLDKFNNEAEQSSYLLDEAEDLFYQSSLASMNYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNL', 'KTIEEQAKTFLISFNHEAEDLFYQSSLNSMNYNTNIEEENVQNMNNAADKMSAFLKEQSRLAQMYPLQELQNLTVKLQMQALM', 'STIEETAKTFLDKFNHEAEDLFYQSSLASMNYNTIITEENVQNMNNANSKMMAFLKEQSTLAQMYPLSEIQNLTVKLQLQALQ', 'STIEELAKTFLDKFNHEAEDLFYQSSLASMNYNTEITEENVQNMNNAVDKMSAFLKMQSTLAQMYPLQEIQTLTVKLQLQALQ', 'STIEEQARTFLGKFNKEAEDLFYQSRLASM<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'KEAEALFYSSSLRSMTYNTNITEENVQNMNNAQDKMSAFLKEQSTLAQMYPLQEIQALTVKLLLQALQ<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHYAEDLFFQSS<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNANITEENVQNMNNKGDKMAAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEFAKTFLSKFNHMAEDLFYQSTLASMLYYLSSLASMNYNTNITEENVLNMNNAGDKMSSFLKEQSTLAYMYPLQEISNL', 'STIEEYAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEMNVNTNITEENVQNMNNALDKMSAFLKEGSTLAQMIPLQEIQNLT', 'STIEEQAKTFLDKFNS<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'NTIEEQAETFLDKFNHEAEDLFYQSSLKSMNYNTNITNENVQNMNNAGDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLEKFNHEAEDLFYQSSLTSMNYNTNITEEQKQAMDKMLALLKLAQDLAGMVLGLKILLALLLLQLLLAFMDLL', 'STIEEQAKTFLDKFNHEAEDLFYQSMLASMNYNTAITEENVQNMNSAVDKMSAFLKEQRTLALMYPLQEIQNLTVKLQMQALQ', 'STIEEQAATFLDKFNHEAIDQFYQSSLASMNGNTNITEEDVQNMNNALRKMSAFLKEQSTLAQMYPLQIIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNREAEDLFYQSSLAMMNYNTNITEMNVLLEINYGLIKMLAGMEMSLAVMYPLAEIENLTVKLQLQALQ<sep>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEQNVQNMNNAQDKMSAFLKEQSTLAQMYPLQEIQMLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLSYQSSLASMNYNTNITKENVNLAQMYPLQFIQNLLVKLSLQALKFQSSRKLALAQKLKSQSS', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNANITEENVQNMNNAGDKMSAFLKEQSTLAQMFPLQEIQNLLVKLQMQALQ', 'STIENQAKTFLDKFNMEAKMLREGYGLAYYLASMNYNTNITEENVQNMNLAGDKMSAFVKEQSTLAQMYPLQEIQNLTVKLQL', 'SAIEEQAKTFLAKFFLDKFNHEAEDLFYQSSLASMNYNTNITEEIVV<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STFEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITERNVMEENVPNMNNAFDKMSRFLKEQMTLAQMYPLQEIQNLTVKLQ', 'STIENYKDKFNHEAKFLVYQSSLASMNYNTNITEENVQGMNNALDKMSAFLKEQSTLAQMYRLQEIQNLTVKLQLQLLM<sep><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITKENVMNGNKAGDILSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEMQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITIENVQNMMYAFDKMSAFLKEQSTLNQQYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHELEDLFYQSSLASM<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQVFLAAMNYSLSIMNYNTNITEENVLAMKKGMGDKMLAFLKEQAMLAQMYPLQEIQNLTV', 'MTIEEQAKTFLDHFNTFAEDLFYQASLASMSMMASKNYNTNITEENVIIMMSARLKMSSLLAFMNPVLDKMSAFLKEQSTLAQ', 'STIEEQAKTFQDKFNHEAEDLFFQSSLSSMNYNTNDTEENVQNMNNMGDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEQEDLFYQSMLASMNYNTNISEENVQNMHNALDKMSAFLKEQSTIAQMYPLQEIQNLTVKLQFQALQ', 'STIEEIAKTFLDKFNMEAEDLFYQSSL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'SRIEEQAKQFLDKFNHEAEDLNSQSSLASMNYNAEITEENVQNMNNALDKMSAFLKFQSMLAQYY<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'SEIEEQAKTFLDKFNHTASDLFYQSSLASMNYNTNITEELVE<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKMFLDKFNHEAEDLFYQSSLASMNYNTNITEEFVGLMNYNTNITDENVREMLDLAGDKMSAFLKIQSTLAQMYPLQ', 'FTIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTQITEENVQNMNNALDKMY<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'LTIEEQAKTFLDKFNHEAEDLFYQSSLALMNYNTNITEELVQ<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'SDIEEQAKDFLDKFNHEQEMSEDLFYQSSLAIMNYNTNITEENVQNMNVAPDKMSAFLKEQSTLAQMMPLQEIQNLTVKLQLQ', 'STIEEQAKTFLDKFNMEAEDLFYQSSLKSMNYNTNITEEIVEELVKMSADLKEQADLAQMYPLQEIQNLTVKLQLQALQ<sep><pad><pad><pad>', 'KTIEEQAKTFLDKFNHEAEDLFYQSSLKSMNYNTKITEENVQNMNNA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'ATIEEQATTFLDKFNHEAEDLFYQSSLAVMN<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMAYNTNITELNVLALMNYNTNITEENVQNMNNAGDKMSAFLKERSTLAQMYPLQ', 'NFEDEFYQASLASMNYNTNITEENVQNMNNAQDKMSAFLRYQSTLAQMYPLQEIQNLLVKQSLA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDIFNHEREDLFYQSSLASMN<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSQA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKYNHEAEDNLYQSSLASMNYNALDLFYQSSLASMNYNTNITEENVQNMNNAGDKMSAFLKEQSTLAQMYPL', 'SPIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITELNVQNMNNAGDKMSAFLKEISTLAQMYPLQEIQNLTVKLQLQALP', 'ITYEEAAKTFLEKFNHEAEFLTYQSSLDSMNYNTNITEEAVQLSLDLMSMNALAPDLKMMLDKMSAFLKEISTLAQMYPLQEI', 'STIEEQALTFLDKFNAEAEDLFYQSMGALMNITEEITLALMY<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKSFLDKYYHEAEDLFLQSSR<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFMDKFNHEAEDLFYQSSLASMNYNTNITEMNVNTNITEENVQNMNVAGDKMSAFLKEQSTLAQMYPLQEIQNLM', 'STIEEQAKTFLDQFNHEAEDIFDLFYLSSLASMRYNTNITEENVLNMNNAGDKMSAFLKRAGLLMYRMMYPLKEYLSLMYLL<sep>', 'ATIEEQAKTFLDKFNHEAEDLFYQSS<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDGEDLFYQSSLASMNMNTNITEENVQMMNNNGDKMLAFLKEQSTLAQMYPLQEIQNLTVKLQLQ', 'STYEEYAKTFLDKFNHEAEDLFYQASLASMFYNTNITEEMVKLSINLLLMLDLALINYNTNITEENVQNMNNAGDKMSAFLKE', 'STI<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSS<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'SHIEEQAKTFLDKFNHEAENGEIEEEAEDLFYQSSLASMNYNTNITEENVQFMNNALDKMSAFLKEQSTLAQMYQLQEIQNLS', 'S<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAETLFYQSS<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNMESQDLFYQSSLAAS<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLSYQSSLASMNYNTNINEENVQNMNNAADKMSAFLKRQSMLAQMKPLQSLKAQMVLAQSLQAQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLNVM<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKMFLDKFNHEAEDLFYQSSLASMNYNTNITEEFVQNMNTVGDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALP', 'STIEEQAKTFDDKFNHMAEDLFYQSSLASMNFNTNITAENVQNMNNAQDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQASRFVKEIEDLFYQSSLPSMNYNTNITEENVQNMNNALDKMSKFLKETSTLAQMYPLQEILNLTVKMQLAALA<sep><pad><pad><pad>', 'ETIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTIITEENVQNMNNYLDKMLLDLML<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNISEEEVQNMNNAEDKMSAFLKEQSTQAMMYPLQEIQNLTVKLQLQALP', 'STIEEQAKTFLDKFNHYAEDLFYQSMLASMNYNTNITEFNVNYLLELVDLLVKLLTVKLQLDKMHALGLLLVKMSALGALASM', 'SDIEEQEKTFLDHFNHEAEDLFYQSSLASMSYNTNISEENVQNMNNAGDKMSAFRKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'SNIEEQAKTFLDKFNHEAYDLFFQSSLASMNYNTRITEENVQNMNNT<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEEEDLFYQSSLASKN<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'YTIEEQAKTFLDKFNHEAEDLFYQSSGASMNYNTNITEKNVQAMMYPLLKMQYLMSALALMNYNTNITEENVQNMNNLTDKMS', 'STIEEQAKTFLTKFNHEAEDLFYQSSLASMNYNTNITEAQMLTNKLQLQALI<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKPFLD<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFMDKFNHEAEDLFFISQLASMNYQTKAASMSYSMSLASMNYNTNITEENVQNMNNAGDKMQAFLKEQSTLALMY', 'STIEEQAKTFGDKFNHEAEDL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKRFLDKFNHEAEDLFYQSSA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'LTIEEQAKTFLDKFNHEAEDLFYQDSLASMNYNTNITEELVQNMNNALDKMLAFLKELDTLAQMYPLQEIKNLTVKLQLQLLP', 'STIEEQAKTFLDKFNHEAEDLFYQSSLAQMNYDLEDKNLDLVKMQSDLALMSMLQLMLDLFLKMQSTLAQML<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'DTIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNDNITEKDVLALMLLLSDILAEMNILALMLLTLDKMNHLLLKMLLLKDLLD', 'STIEEQAKTFLFKFNHEAEVLFYQSSLASMNYNTNITEITVSSSLASMNYNTNITEENVQNMNNFLFKMMSTLAAMYSLRSML', 'STIEENAKTFLTSFNHEAEDLFYQSSSASMTYNTNITAESIDEAQAKMSAFLKLQSTLAQMYPLQQIQSLTAKMSKENIQNQ<sep>', 'SNIEEQAKTFLDKFNHEAEDGYKMEIDKMEEIDKMEIDKIEEEIDAMEKIEELMDKMDKMSAFLKEQSTRAQMYPLQEIQNLT', 'SAIEEQAKTFLDKFNHEATDLFYQSSLASMNYNTNITEELVQEMNNLTVKLQLQALS<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQYQTQSTLAQMYPLQEIQNLNVKLQLTALF<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'LTYEETAAT<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'TTIEEQAKTFLDKFNHEAEDLKYQSSLASMNYNTNITDENVQNMNNANDKMSAFLQEELTLALVLHLTRELELTELLTLLLVL', 'STIEEQAKTFLDKFNHQMENLPVNLQLQALF<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDSFNHEAEDLFYQSSLALDNYNTKITEMNVLADKLDLYLLTLLLNLTLMLYLLLNMLIDGLLLASLLGLLDL', 'STIEEQSSLASENANTSITREDKTKAFLKQQSALAQMYPLQEIQNLTVKAQGQALQALMSLASKLTLQALITLAQQSSKALAK', 'STIEEQAKTFLDKFNHEAEDMFYQSSLASMNYNTNITEESVLAMMNYLTEITEEAVDSMNDMTDATDKMSAFLKEQSTLAYMY', 'ST<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'SAIEEQAKTFLDKFNHEAEDFYQSSLASMNYNSNITEELVEMSNLASKISESSLALKNYLARMQAQASQLKERSALASMNALA', 'STLEEQYKLFLDKFNHEAEDLFYQSSLASMNYFTNITEENVQNMNNALDKMSAFLKEQSTVAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLSYQSSLASMNYLTEITEEEVV<cls>MSDLSSMSSLASMNYLTEITEEEVVMSMSALSEMSSSSLD', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITLEEEEE<cls>MDIEEEEEEEEEEM<cls>YDKEEEEEEE<cls>ELMDMNDLLLDMN', 'STIEEQYKTFLDKFNHEAEDLAYQSSLASMNYNTNITELNVQNMNNARDKMLAFLKEMSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEATDLFYQSSLASRSSSQALMSSLPSMSSMASMQSSGSSMSEISLYSSDLLMSSSLASMNYNTNITE']\n" ], [ "gen250k_df_dropped_nocon = gen250k_df.drop(indices_to_drop)", "_____no_output_____" ], [ "gen250k_df_dropped_nocon", "_____no_output_____" ] ], [ [ "filter out sequences with non-AA tokens", "_____no_output_____" ] ], [ [ "rejected_tokens = [\"<pad>\", \"<sep>\", \"<cls>\", \"<mask>\", \"<unk>\"]", "_____no_output_____" ], [ "indices_to_drop = []\ndropped_seqs = []\nfor index, row in gen250k_df_dropped_nocon.iterrows():\n seq = row['MT_seq']\n \n for rejected_token in rejected_tokens:\n if rejected_token in seq:\n indices_to_drop.append(index)\n dropped_seqs.append(seq)\n break\n ", "_____no_output_____" ], [ "print(len(indices_to_drop))\nprint(indices_to_drop)\nprint(dropped_seqs)", "1386\n[318, 2311, 5938, 7072, 15695, 17179, 20191, 20491, 22718, 24293, 30136, 35046, 35839, 36466, 37265, 37676, 38222, 38579, 39211, 41915, 45090, 46458, 46571, 49557, 51201, 54977, 55496, 55897, 56124, 57330, 57359, 57374, 60070, 60523, 63611, 64365, 66500, 66569, 69509, 69701, 70389, 71089, 71489, 73468, 74147, 74349, 74938, 75035, 76096, 76604, 77470, 78549, 79867, 80594, 81037, 83741, 83906, 85182, 85279, 85558, 86511, 87114, 89683, 90500, 91135, 91251, 91336, 92604, 92962, 93551, 93686, 94674, 95860, 97098, 101451, 101549, 101690, 101758, 102469, 102601, 103057, 103266, 103524, 104242, 104734, 105460, 106314, 106524, 106700, 106745, 106790, 107320, 107530, 108104, 108324, 108854, 109222, 109715, 110009, 110828, 110869, 111135, 111540, 111762, 113070, 113950, 114113, 114270, 114890, 115672, 117590, 117919, 118375, 118480, 118640, 121101, 121127, 121212, 121383, 122463, 123031, 123666, 123676, 124597, 124621, 125694, 126021, 126294, 127048, 127337, 127395, 128471, 128824, 130162, 131186, 131562, 131845, 131927, 132186, 132270, 132387, 132488, 132503, 133179, 133520, 134434, 134527, 135965, 136062, 137778, 137887, 137974, 138200, 139099, 139759, 139835, 140562, 140851, 141389, 142511, 142557, 143734, 143987, 145300, 146367, 146577, 146699, 148198, 149530, 149667, 150555, 152257, 153853, 154066, 154391, 155004, 155320, 155474, 155815, 156656, 158432, 159772, 160120, 160199, 160236, 161625, 162150, 162174, 163354, 163523, 163911, 164029, 164535, 164706, 165066, 165611, 165792, 165961, 165964, 166284, 166844, 166850, 166985, 167839, 168458, 168538, 168620, 169557, 169694, 170334, 170467, 170797, 170803, 171688, 171799, 172131, 172399, 172497, 172526, 172962, 172986, 173311, 173378, 173396, 174475, 174513, 174574, 174859, 174941, 175158, 175244, 175742, 176337, 176646, 177420, 177989, 178047, 178135, 178159, 178920, 179184, 180392, 180424, 180934, 181633, 182057, 182105, 182477, 182715, 182784, 182956, 183182, 183528, 184102, 184401, 184620, 185280, 185389, 185390, 185442, 185553, 186436, 186558, 186579, 186671, 186823, 187552, 187605, 187659, 187811, 188459, 188542, 188572, 188627, 188689, 189501, 189643, 189682, 189738, 189875, 190282, 190451, 190600, 190721, 191733, 192286, 192292, 193202, 193371, 193980, 194235, 194537, 194630, 195453, 195840, 196286, 196344, 198150, 199307, 199655, 199670, 200025, 200080, 200703, 200764, 201375, 201497, 201524, 201528, 202371, 202463, 202523, 202660, 202980, 203577, 203678, 203944, 204022, 204141, 204322, 204336, 204360, 204435, 204493, 204498, 204655, 204784, 204865, 205105, 205130, 205503, 205532, 205673, 206340, 206407, 206859, 206905, 207116, 207681, 208156, 208170, 208496, 208549, 208953, 209241, 209761, 209844, 210321, 210851, 210893, 211373, 211465, 211878, 211882, 211899, 211908, 212068, 212140, 212175, 212380, 212436, 213118, 213335, 213426, 213536, 213862, 213865, 213870, 213990, 215242, 215389, 215762, 215896, 216219, 216417, 216441, 216659, 216784, 217042, 217187, 217722, 217794, 217868, 217881, 217987, 218192, 218818, 218919, 218978, 219004, 219287, 219434, 219672, 219724, 219938, 220040, 220183, 220693, 221112, 221295, 221456, 221490, 221671, 221812, 222102, 222283, 222532, 222553, 222691, 223083, 223105, 223288, 223318, 223340, 223688, 223706, 223838, 224385, 224602, 224644, 224756, 224801, 224920, 224932, 225042, 225315, 225325, 225355, 225431, 225513, 225668, 225736, 225766, 225894, 225995, 226123, 226191, 226207, 226365, 226649, 226680, 226821, 227036, 227093, 227192, 227553, 227568, 227695, 227877, 227944, 228044, 228197, 228206, 228390, 228494, 228949, 229461, 229470, 229728, 229772, 229891, 230058, 230070, 230188, 230190, 230420, 230639, 230914, 231063, 231126, 231218, 231372, 231404, 231780, 232024, 232078, 232137, 232372, 232574, 232751, 233140, 233516, 233651, 233757, 233847, 233888, 234250, 234330, 234363, 234449, 234463, 234708, 235007, 235169, 235230, 235260, 235830, 235901, 235999, 236107, 236120, 236243, 236322, 236356, 236367, 236370, 236410, 236455, 236641, 236822, 236855, 236940, 237080, 237208, 237267, 237284, 237342, 237540, 237542, 237545, 237697, 237717, 237724, 237955, 238203, 238210, 238304, 238328, 238355, 238385, 238415, 238416, 238498, 238703, 238705, 238855, 238910, 238911, 239223, 239277, 239469, 239853, 239898, 240035, 240084, 240088, 240172, 240190, 240277, 240282, 240326, 240336, 240476, 240537, 240545, 240606, 240662, 240693, 240977, 241196, 241544, 241635, 241790, 241810, 241883, 241895, 241907, 241985, 242018, 242157, 242176, 242219, 242273, 242381, 242445, 242467, 242526, 242556, 242686, 242715, 242731, 242758, 242769, 242788, 242795, 242831, 242878, 243006, 243050, 243059, 243070, 243092, 243100, 243261, 243411, 243455, 243484, 243499, 243508, 243520, 243774, 243833, 243838, 243874, 243975, 244029, 244123, 244161, 244247, 244285, 244356, 244370, 244389, 244449, 244463, 244472, 244568, 244687, 244795, 244817, 244840, 244863, 244865, 244880, 244887, 244944, 244958, 245095, 245229, 245283, 245461, 245538, 245638, 245680, 245761, 245772, 245851, 245860, 245935, 245983, 245984, 245988, 246008, 246012, 246025, 246076, 246107, 246172, 246176, 246207, 246234, 246246, 246295, 246326, 246354, 246361, 246368, 246669, 246837, 246864, 246904, 246920, 246939, 246971, 246975, 246992, 247013, 247093, 247094, 247097, 247099, 247107, 247153, 247185, 247225, 247255, 247260, 247273, 247291, 247358, 247369, 247372, 247373, 247383, 247429, 247470, 247483, 247543, 247575, 247614, 247651, 247672, 247679, 247718, 247720, 247725, 247745, 247759, 247760, 247788, 247799, 247836, 247862, 247914, 247917, 247971, 247978, 248073, 248074, 248132, 248137, 248149, 248268, 248324, 248337, 248401, 248491, 248508, 248509, 248558, 248583, 248638, 248778, 248796, 248828, 248950, 248958, 249056, 249123, 249193, 249273, 249335, 249363, 249398, 249411, 249444, 249658, 249704, 249715, 249753, 249761, 249822, 249842, 249849, 249892, 249914, 249924, 249930, 249932, 249948, 249989, 249993, 250011, 250090, 250175, 250215, 250233, 250324, 250327, 250390, 250395, 250437, 250525, 250536, 250556, 250561, 250566, 250568, 250597, 250660, 250684, 250714, 250737, 250751, 250763, 250771, 250796, 250810, 250813, 250839, 250843, 250858, 250861, 250909, 250983, 251004, 251028, 251033, 251046, 251073, 251201, 251208, 251211, 251217, 251236, 251309, 251335, 251339, 251361, 251363, 251379, 251420, 251492, 251519, 251520, 251589, 251625, 251651, 251653, 251659, 251669, 251679, 251704, 251713, 251773, 251788, 251809, 251830, 251836, 251840, 251853, 251878, 251890, 251963, 252037, 252075, 252105, 252132, 252137, 252202, 252247, 252253, 252276, 252433, 252456, 252509, 252516, 252545, 252565, 252605, 252645, 252668, 252684, 252710, 252738, 252754, 252827, 252856, 252858, 252873, 252880, 252931, 252932, 252997, 253014, 253024, 253060, 253070, 253089, 253104, 253119, 253121, 253124, 253130, 253154, 253159, 253200, 253210, 253236, 253272, 253296, 253368, 253369, 253373, 253431, 253457, 253496, 253538, 253548, 253555, 253564, 253566, 253571, 253636, 253648, 253665, 253715, 253721, 253753, 253772, 253780, 253889, 253894, 253909, 253919, 253955, 253960, 253967, 253998, 254005, 254016, 254029, 254033, 254050, 254102, 254113, 254158, 254172, 254192, 254218, 254237, 254243, 254246, 254258, 254395, 254397, 254435, 254440, 254518, 254532, 254541, 254629, 254647, 254719, 254765, 254792, 254796, 254827, 254843, 254865, 254877, 254903, 254925, 254935, 255038, 255092, 255125, 255170, 255254, 255257, 255401, 255445, 255460, 255471, 255481, 255482, 255486, 255513, 255517, 255575, 255583, 255619, 255674, 255715, 255723, 255727, 255731, 255739, 255753, 255763, 255772, 255783, 255789, 255793, 255824, 255827, 255845, 255849, 255859, 255902, 255970, 255984, 256036, 256072, 256075, 256089, 256091, 256094, 256102, 256128, 256151, 256155, 256168, 256333, 256444, 256508, 256567, 256574, 256587, 256619, 256640, 256659, 256666, 256688, 256694, 256704, 256719, 256769, 256780, 256800, 256842, 256882, 256888, 256922, 256939, 256973, 256996, 257035, 257060, 257065, 257117, 257141, 257165, 257173, 257205, 257222, 257224, 257233, 257275, 257324, 257396, 257502, 257516, 257570, 257613, 257615, 257624, 257633, 257651, 257667, 257693, 257704, 257708, 257711, 257760, 257762, 257767, 257781, 257783, 257806, 257840, 257842, 257849, 257854, 257857, 257861, 257866, 257914, 257942, 257945, 257974, 257988, 258012, 258016, 258040, 258042, 258045, 258053, 258055, 258061, 258105, 258127, 258196, 258217, 258224, 258225, 258257, 258259, 258264, 258266, 258273, 258281, 258293, 258295, 258330, 258336, 258339, 258347, 258366, 258367, 258372, 258376, 258377, 258380, 258417, 258432, 258457, 258459, 258483, 258492, 258502, 258504, 258516, 258524, 258551, 258567, 258572, 258579, 258586, 258596, 258618, 258626, 258630, 258633, 258643, 258653, 258659, 258666, 258668, 258679, 258687, 258714, 258720, 258725, 258726, 258739, 258750, 258751, 258756, 258763, 258765, 258785, 258787, 258797, 258800, 258802, 258803, 258805, 258807, 258813, 258816, 258817, 258819, 258822, 258824, 258831, 258834, 258837, 258853, 258861, 258863, 258873, 258880, 258882, 258902, 258910, 258915, 258917, 258921, 258925, 258926, 258929, 258932, 258935, 258936, 258954, 258955, 258989, 259003, 259021, 259041, 259042, 259046, 259058, 259064, 259070, 259074, 259076, 259077, 259078, 259085, 259087, 259092, 259100, 259101, 259106, 259108, 259119, 259129, 259133, 259135, 259147, 259149, 259160, 259168, 259170, 259179, 259183, 259184, 259210, 259215, 259218, 259219, 259223, 259229, 259230, 259235, 259242, 259252, 259254, 259261, 259264, 259267, 259270, 259278, 259280, 259283, 259285, 259289, 259290, 259292, 259300, 259302, 259305, 259309, 259333, 259337, 259339, 259343, 259344, 259346, 259347, 259349, 259364, 259365, 259368, 259378, 259389, 259401, 259410, 259411, 259419, 259420, 259421, 259431, 259438, 259440, 259441, 259443, 259444, 259446, 259447, 259453, 259459, 259469, 259474, 259477, 259479, 259482, 259483, 259487, 259489, 259491, 259492, 259496, 259497, 259501, 259502, 259516, 259523, 259524, 259526, 259527, 259532, 259534, 259536, 259546, 259550, 259555, 259559, 259565, 259568, 259569, 259580, 259583, 259585, 259600, 259601, 259603, 259609, 259611, 259612, 259614, 259618, 259627, 259629, 259634, 259636, 259642, 259648, 259653, 259655, 259661, 259662, 259665, 259667, 259668, 259669, 259670, 259671, 259673, 259675, 259676, 259678, 259679, 259681, 259685, 259686, 259687, 259700, 259703, 259707, 259708, 259714, 259721, 259722, 259724, 259726, 259737, 259743, 259748, 259750, 259755, 259758, 259761, 259772, 259773, 259774, 259780, 259782, 259786, 259787, 259794, 259797, 259800, 259802, 259803, 259805, 259806, 259811, 259816, 259822, 259828, 259830, 259834, 259835, 259837, 259839, 259843, 259845, 259847, 259851, 259855, 259856, 259857, 259860, 259863, 259872, 259874, 259876, 259877, 259880, 259891, 259893, 259894, 259899, 259900, 259913, 259916, 259921, 259931, 259934, 259940, 259945, 259951, 259955, 259958, 259962, 259963, 259964, 259975, 259979, 259983, 259988, 259989, 260023]\n['SDIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQAL<sep>', 'SDIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTIQLLL<sep><pad><pad><pad>', 'SFIEEQAKTFLDKFNIEAEDLFYQFSLASMNYNTNITEENVQNMNNALDKMSAFLKEQSTRAQMYPLQEIQNLTVKLQLQAL<sep>', 'SFIEEQAKTFLDKFNMEAEDLFYQFSLASMNYNTNITEENVQNMNNALDKMSAFLKEQSTRAQMYPLQEIQNLTVKLQLQAL<sep>', 'STIEEQAKTFLDKFNHMAEDLFYQSMLASMYYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQAL<sep>', 'MTIEEQAKTFLLKFNHYAEDLFYQSSLASMNYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLPVKLQLQAL<sep>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLPVKLQLQALQ<sep>', 'STIEEQAKMFLDKFNTEAEDLFYQSSLAQMNYNTNITEENVINMNNALDKMSAFLKEQSTLAQMYPLQEIQNLPVKLQLQAL<sep>', 'STIEEQAKTFLDKFNHEAEQLFYQYSLASMNYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQAL<sep>', 'LTIEEQAKTFLDKFNHEAEDLFYQSSLALMNYNTNITEENVQNMNNALDKMDLFLKELSTLAQMYPLQEIQNLTVKLQLQ<sep><pad><pad>', 'STIEEQAKTFLDTFNHEAEDLFYQSSLASMNYNTNITEENVNMMNAGRKMSAFLKFLSTMAQMYPLQEIQNLYVKLQLQALQ<sep>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLPVKLLLALL<sep>', 'STIEEQAKTFLDSFNHEAFDLLYQSSLALMIYNTNITEENVQNMNNAKDKMSAFLKELSTLAQMYPLQEIQNLTVKLNLQ<sep><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLQSMNYNTNITEENVQNMNNALDKMSAFLKEVSVLAQMYPLQEIQNLPVKLLLQAL<sep>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMRYNTNITEENVQNMNEALDKMSAFLKRQSTLAQMYPLQEIQNLTVKLQLQAL<sep>', 'YTIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNALDKMLAFLKEISTLAQMYPLQEIQNLTVKLQLQ<sep><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSMLASMNYNTNITEENVQNMNNAFDKMSAFLKELSTLAQMYPLQEIQNLTVKLQLQAL<sep>', 'STIEEQAKTFLDKFMHEAQDLFYQFLLASMNYNTNITEENVQNMN<cls>AGDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSLLAMMNYNTNITEENVQNMNNALDKMSAFLKEMSTLAQMYPLQEIQNLTVKLELQA<sep><pad>', 'STIEEQAKMFLDKFNTEAEDLFYQSSLASKNYNTNITEENVINMNNALDKMSAFLKEQSTLAQMYPLQEIQNLPVKMQLQAL<sep>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLTSMNYNTNITEENVQNMNNALDKMSAFLKEISTLAQMYPLQEIQNLPLKLLL<sep><pad><pad><pad>', 'STIEEQAKTFLDKFNKEAEDILYQSSLASMRYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLLEIQNLLVKLLLALL<sep>', 'STIEEQAKTFLDKFNHEAEDLFYQFSLASMNYNTNITEENVQNMNNALDKMSAFLKELSTLAQMYPLQEIQNLTVKLQLQA<sep><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLAMMNYNTNITEENVQNMNNALDKMSAFLKTLASMYPLQEIQNLTVKLQLQALQ<sep><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMKYNTNITEENVQNMNNALDKMSAFLKFQSTLAQMYPLFEIQNLLVKLQLQAL<sep>', 'MTIEEQALTFLDFFNHFAEDLFYQSSLASMNYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQKIQNLTVKLQLQAL<sep>', 'STIEEQAKTFLDKFNSEMYRLFYQSSLASMNYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQKIQNLTVKLFLQ<sep><pad><pad>', 'STIEEQAETFLDKFNHEAEDLFYQSSLASMMYNTNITEENVQNMNNAYDKMSAFLKELSTLAQMYPLLEIQNLTLKLQLQAL<sep>', 'STIEEQAKTFLDKFNHEAEDLFYQSQLASMNYNTNITEENVQNMMNALDKMSAFLKEQSTLAQMYPLQEIQN<cls>TVKLQLQALQ', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNRLDKMSAFLKLISTLAQMYPLQEIQNLTVKLQLQ<sep><pad><pad>', 'STIEEQAKTFLFKFNHEAEDLFYQSSLASMNYNTNITEENMNNMLDKMSAFLKEISTLAQMYPLQEIQNLTIKGMLQALQ<sep><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLAMMNYNTNITEENVQNMNNALDKMSAFLKTLASMYPPQEIQNLTVKLQLQALQ<sep><pad><pad>', 'LTIEEQALTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNALDKMELFLKELLTLALMYPLQEIQNLTVKLQLQ<sep><pad><pad>', 'STIERQAKTFLDKFNHEAEDLFYQSLLASMNYNTNITEENVQNMNLAMDKMSAFLKEQSTLAQMYPLLQILNLLVKLQLQAL<sep>', 'STIEEQAKTFLDKFNHEAEDLFYQSMLASMNYNTNITEENQNMNNALDKMSAFLKEQSTLAQDYPLQEIQNLTVKLQLQALQ<sep>', 'SYIEEQAKMFLDKFNHEAEDLFYQMSLASMNYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTPKLQLQML<sep>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQMMNNALDKMHAFLKEQSTLAQMYPLQEIQNLTVKLQLQLL<sep>', 'LTIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNALDKMLAFLKELSTLALMYPLQEIQNLTVKLQLLLL<sep>', 'STIEEQAKTFLDKFNHEAEDLFYQSMLASMNYNTNITEENVQNMNNAFDKMSAFLKEMSTLAQMYPLQEIQNLTVKLQLQAL<sep>', 'STIEEIAKTFLDKFNNEAEDLFYQSSLASMNYNTNITEENVQNMNNALDKMSSFLKEISTLAQMYPLQEIQNLTVKLQLQAL<sep>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMEYNTNITEENVQNMNRALDKMSAFLKRQSTLAQMYPLQEIQNLTVKLQLQAL<sep>', 'STIEEQAKTFLDKFNHEAEDLFYQSFLASMKYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQAL<sep>', 'STIEEQAKTFLDKFNHEAEDLFYQSMLASMRYNTNITEENVQNMNNAFDKMQAFLAQASLAQMYPLQEIQNLTVKLQLQAIG<sep>', 'STIEEQAKTFLDVFNHLAKDLFYQSSLASMIYNTNITEENVQNMNEALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQAL<sep>', 'SDIEEQAKTFLDKFNHEAEDLEYQSSLASMNYNTNITEENVQNMNNALDKMSAFLKEISTLAQMYPL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASYNYNTNITEENMNNMLDRMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ<sep><pad><pad>', 'STIEEQAKTFLDVFNHLARDLFYQSSLASMIYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQAL<sep>', 'STIEEQAKTFLDKFNHEAEDLKYQSSQASMNYNTNITEENRQNMNNAMDKMSAFLKEQSTLAQMYPLQEIQNLPVLLLLALL<sep>', 'STIEEQAKTFLDKFNHEEEDLFYQMSLASMNYNTNITEENVFLMNIFLKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ<sep>', 'MTIEEQAKTFLLKFNHYAEDLFYQSSLASMNYNTNITEENVQNMNNAGNKMSAFLKEQSTLAQMYPLQEIQNLPVKLQLQAL<sep>', 'STIEEQAKTFLDKFNHEAEDLFYQYMLASMNYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENMNNMLDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ<sep><pad><pad>', 'STIERQAKTFLDKFNHEAEDLFYQSLLASMNYNTNITEENVQNMNLAMDKMSAFLKEQSTLAQMYPLQKIQNLTVKLQLQAL<sep>', 'STIEEQAKTFLDVFNHLAQDLFYQSSLASMIYNTNITEENVQNMNEALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQAL<sep>', 'STIEEQAKTFLDKFNHEAEDLFYQSLLAMMNYNTNITEENVQNMNNAGDKMSAFLKEMSTLAQMYPLQEIQNLMVKLQLQ<sep><pad><pad>', 'MTIEEQAKMFLDKFNREAEDLFYQSSLASMNYNTNITEENVQNMNNAGNKMSAFLKEQSTLAQMYPLQEIQNLPVKLQLQAL<sep>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLPMKLQLQAL<sep>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNFNTNITEENVSLMKKLLLMLKKMSALAMMYPLQEIQNLTVKLQLQALQ<sep><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSMLASMNYNTNITEENVQNMNNAFDKMSAFLMEQSTLAQMYPLQEIQNLTVKLQLQAL<sep>', 'STIEEQAKTFLDVFNHLAKDLFYQSSLASMIYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQAL<sep>', 'STIEEQAKTFLDKFNHEAERLFYQSSLASMNYNTNITEENVQNMFNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQAL<sep>', 'STIEEQAKTFLDGFNHEAERLFYQSRLASMNYNTNITEENVQNMNNALDKMAAFLKEQSTLAQMYPLQEIQNLTVKLQLQAL<sep>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQMMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQAL<sep>', 'STIEEQAKTFLDKFNREAEDLFYQSSLASMNYNTNITEENVQEMNNALDKMSAFLKEQSTLAQMYPLQEIQNLLVKLLL<sep><pad><pad><pad>', 'STIEMQAKTFLDKFNHEAEDLFYQSSLASMSYNTNITEENVNLMIKMLAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ<sep><pad><pad><pad>', 'STIEEQAKTFLDKFIHEAEDLFYQSSLASMNFNTNITEENVYNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLLML<sep>', 'STIEEQAKTFLDGFNHEAERLFYQSRLASMNYNTNITEENVQNMNNAMDKMFAFLKEQSTLAQMYPLQEIQNLTVKLQLQAL<sep>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQKMNNARDKMSAFLKEQSRLAQMYPLQEIQNLPVKLQLKAL<sep>', 'STIEEQAKTFLDVFNHLAQDLFYQSSLASMIYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQAL<sep>', 'STIEEQAKKFLDKFNHEARDIFLQSSLASMRYNTNITEENVANMNNALDKMSAFLKEQSLLAQMYPLLEIQLLLVKLFL<sep><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAESLFLQSMLASMNFNTNITEENVQNMNNALDKMSAFLKEISTLAQMFPLQEIENLT<sep><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMFYNTNITEENVQNMNNAYQKMSAFLKEISTLAQMYPLQEIQNL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENDDLMVKLSKFLKELSTLAQMYPLQEIQNLQVKLQLQALQ<sep><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLAMMNYNTNITEENVQNMNNALDKMSAFLKEQSSLAQMYPLPEIQNLTVKLQLALL<sep>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLALMNYNTNITEENVQNMNNALDKMSAFLKDLSTLAQMYPLQEIQNL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDVFNHKALDLFYQSALASMIYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQAL<sep>', 'STIEEMAKTFLDKFNHEAEDLFYYFSLASMNYNTNITEENQLLMVKLSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ<sep><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASFNYNTNITEENVQNMNNALDKMSMFLKELSTLAQMYPLIEIQNLTVKLLK<sep><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEMLFYQSSLASMNYNTNITEENVQNMNNALDKMSAFLKEASTLAQMYPLQEIQNLPVKLQLRLL<sep>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMFYNTNITEENVYNMNNALDKMSAFLKEQITLAQMYPLQEIQNLTVKLQLQAL<sep>', 'STIEEQAKTFLDKFNHDAEMLFYQMSLALMNYNTNITEENMQNMNNALDKMSAFLKEMSTLAQMYPPQEIRNLT<sep><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNRALDKMSAFLTRLSTLAQMFPLQEIQNL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSMLASMNYNTNITEENVQNMNNALDKMSSFLKEQSTLANMYPLQEIQNLLMK<sep><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLARMFYNTNITEENVQNMNNALDKMSAFLKEISTLAQMYPPQEIKN<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'SNIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQNITNLTVKLQLLLL<sep>', 'STIEEQAKTFLDKFNHQAEMLFYMSSLASMNYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQTL<sep>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLAYMNYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLLEIQNLDVKLQLQAL<sep>', 'STIEEQAKTFLDKFNHEAEDLLYQSSLASMNYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMYPPLLIKLQLQALQ<sep><pad><pad><pad><pad>', 'STIEENAKTFLDKFNHEAEDLFYQSSFAFMNYNTNITEENVQMMNNAQKKMSAFLKELQQLAQMYPLQEIQNLTVKLNLQLL<sep>', 'STIEEQAKTFLDKFNHEAEDLRYQSMLASMNYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQLL<sep>', 'STIEEQAKTFLDLFNHEAERLFYQSRLASMNYNTNITEENVQNMNNALDKMFAFLKEQSTLAQMYPLQEIQNLTVKLQLQA<sep><pad>', 'STIEEQAKTFLDKFNHEAMDLFYQSSLASANYNTNITEENVQNMNNALDKMSAFLKTLAQMYPLQEIQNLTVKLQLQALQ<sep><pad><pad>', 'STIEEQAKTFLDKFNHEAEDYFMQSSLASMNYNTNITEENVLLMLKLLSTLAGMYPLQEIQNLLVKLQLQALQ<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEILFYQYSLASMNYNTNITEENVQNMNNALDKMSAFLKEQSNLAQMYPLQEIQNL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKMFLDKFNHEAEDLFYQSSLASMNYNTNITEENMQNMNNAGDKMSAFLKILSTLAQMYPLPEIQNLTVKLQLQAL<sep>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNAMRKMSAFLKEQSTLAQNYPLQEIQNLTVKLQLLAL<sep>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLAYMNYNTNITEENVQNMNNALDKMSAFLKELLLLLLLLLLYLLLYLLLLLL<sep><pad><pad><pad><pad>', 'STIEEQAKVFLDKFNHEAEDLFYQSSLAKMNYNTNITEENVDLFNNMLDKMSAFLKEISTLAQEYPLQEIQNLTVKLQLQ<sep><pad><pad>', 'YTIEEQAKTFLDKFNHEAEDLFYQSSLFSMNYNTNITEENVQNMNNALDKMYAFLKEQSTLAQMYPLQEIQNLTVKLQLQKL<sep>', 'SDIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNAKDKMSAFLKEQSTLAQMYPLQEIQNLTVKDQLFLL<sep>', 'STIEEQAKTFLDKFGHHAEDLFYQSMLASMNYNTNITEENVQNMNNALDKMSAFLKEQSILARMYPLQEIQNLTVKLQLLAL<sep>', 'STIEEQAKTFLDKFNHEAEDLNYISSLASMNYNTNITEENVQNMNNALDKMSAFLKEQSFLAQMYPLQEIQNLPVKLALQ<sep><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQMMNNALDKMSIFLKEQSTLAQMYPLQEIQNLTVKLQLAAL<sep>', 'STIEEQSKTFLDKFNHEAEDLFYLISLASMNYNTNITEENVGNMNLKMSAFLKEISTLAQMYPLQEIQNLTVKLQLQALQ<sep><pad><pad>', 'STIEEQAKRFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNAYDKMSAFLKIQTTLAQMYPLQEIQNLTVKLQLQLL<sep>', 'GTIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNALDKMLAFLKEQSTLAQHYPLQEIQNLTVKLQLQLL<sep>', 'VTIEEQAKTFLDKFNHQAEKLFYQSSLASMNYNTNITEENVQNMNNAFDKMSAFLKTQSTLAQMYPLQEIQNLTVKLLLQAL<sep>', 'STIEEQAKTFLDKFNHEAEDLFYQSRLASMNYNTNITEENVELMLNLLLKQYLALMPLQEIQNLTVKLQLQRLM<sep><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLLKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMYELQEIQNLPVKLQLQAL<sep>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMEYNTNITEENVQNMNRALDKMSAFLKSQSTLAQMYPLQEIQNLTVKLQLQAL<sep>', 'STIEEQAKLFLDKFNHEAEDLFYQSSLAMMAYNTNITEENVLNMNAFLKEQSTLAQMYPLQEIQNLTVKLQLQMLV<sep><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHFAEDLFYQSSLASMNYNTNITEENMNMMNNALDKMSAFLKEQTSLAQMFPLQEIQNL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAENIFYQSNLKSMNYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLPPLLLL<sep><pad><pad><pad>', 'SDIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVENMNNAFDKMSAFLKEQSTLAQMYPLQEIQNLTVKSQLQAM<sep>', 'MTIEEQAKTFLDMFNHMAEDLFYQSSLASMNYNTNITEENVQNMNYALDKMSAFLKEQSTLAMMYPLQEIQNLTVKLQLM<sep><pad><pad>', 'FTIEEIAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNFFDKMSAFLKEQATLAQMYPLQEIQNLTVKLQLQAL<sep>', 'STIEEQAKTFLDKFNHEAEYLFMQSSLASMNYNTNITEENVQNMNNALDKMSAFLKELSTLAQMIPLQEIQN<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKMFLDKFNHEAEDLFYQSSLASMNYNTNITEENMQNMNNALDKMSAFLKEQSVLAQMYPLQEIQNLTVKLQLQAL<sep>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNALDKMSAFLKTISTLAQMYPLQEIQNL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENENLMDKMSAFLKELSDLAQMYPLQEINNLTVKLQLQQLQ<sep><pad><pad><pad>', 'SSIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNAGDKMSAFLFEQSTLAQMYPLQKIQNLTVKLQLLAL<sep>', 'STIEEQAKTFLDKFNKEAEDLFYQSSLASMNYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLDLKLQLQ<sep><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLARMNYNTNITEENVQNMNLAGDKMSAFLKEQSSLAQMYPLQEIQNLLLKMLL<sep><pad><pad><pad>', 'MTIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNMLDKMSAFLKEQSTLAQMYPLQQIQNLTVKLQLLAM<sep>', 'STIEEQAKTFLDVFNMEAEKLFYQSSLASMVYNTNITEENVQNMNEALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQAL<sep>', 'STIEEQAKTFLDKFNHEMEDLFYQSSLAMMNYNTNITEENVQNMLNALDKMSAFLKEQSTLAQMYPLQEIQLLTVKLLL<sep><pad><pad><pad>', 'STIEEQAKTFLDKFNAEAEDLMYQSSLASMNYNTNITEENVQNMNNANDKMSAFLKELSTLASMYPLQEIQNLTVKLQLQAL<sep>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENYQNMNNALDKMSAFLKEQSTLASMYPLQEIQNLQVKLQLALM<sep>', 'STIEEQAKTFLDKFNHEAEDLFAQSLLASMNYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLFLLM<sep><pad>', 'STIEEQAKTFLDMFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNPTVKLQLQAL<sep>', 'STIEEQAKTFLDKFNQEAEDMFYQSSLASMRYNTNITEENVQNMNNAGDKMSAFLKEQSLLAQMYPLLEIKNLLVKLLLALM<sep>', 'LTIEEQAKTFLDKFNNEAEDLFYQSSLASMNYNTNITEENVQNMNNFLDKMSAFLKEQSTLAQMYPLTEIQNLTVKLQLQLL<sep>', 'STIEEQAKTFLDKFNHEAEDLAYQSSLASMNYNTNITEENVQNMNNALDKMSAFLKSLLAQMYPLQEIQNLTVKLQLQALQ<sep><pad>', 'STIEEQANTFLDKFNHEAQVLFYQSSLALMNYNTNITEENQSLMVKMSAFLKEMSTLAQMYPLQEIQNLTVKLQLQALQ<sep><pad><pad><pad>', 'STIEDQAKTFLDKFNHEAEDLFYQSLLASMNYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMYPPQEIQNL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STMEELAKTFLDKFNHEAEDLFYQSSMASMNYNTNITEENVQNMNNALDKMSAFLKEQSTLAQLYPLQEIQNLTVKLQLQAL<sep>', 'STIEEQAKTFLDFFNHEAEDLFYQSSLASMNYNTNITEENQNMMNALDKMSAFLKEKMTLAQMYPLQEIQNLTVKLALQALQ<sep>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNKALDKMSAFLKEQSTLAQMYPLQEIQNFTLQLQLLQ<sep><pad>', 'STIEEQAKTFLDKFNHEAEDLIFQSMLASMNYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLALL<sep>', 'STMEEQAKTFLDKFNHEAELLFYQYMLASMMYNTNITEENVQNMNEKQSKFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ<sep><pad><pad>', 'STIEEQAKHFLRKFNHEAEYLFYISSLASMNYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQAL<sep>', 'SMIEEQAKTFLLKFNHYAEDLFYQSSLASMNYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLPVKLQLQAL<sep>', 'STIEEQAKTFLDKFNHEAEDLAYQSSLASMNYNTNITEENVDLMLLLGLHSMLALMSILAQMYPLQEIQNLTVKLQLQALQ<sep><pad>', 'STIEEQAKTFLDKFNHEAEDLMYQSSLASMNYNTNITEENVQNMNNARDKMSAFLKEQSTLAQMYPLQEIQNLLVKLQLLAL<sep>', 'SDLEEQAKTFLDKFNHEAEDLFYESSLTSMNYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMYPL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDIFYQSSLTSMNYNTNITEENVQNMNFKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ<sep><pad><pad>', 'STIEEQAKTFLDGFNHEAERLFYQSRLASMNYNTNITEENVQNMNNAMDKMFLFLKEQSTLAQMYPLQEIQNLTVKLKLQA<sep><pad>', 'SFIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNAMDKMMAFLKKQSTLAQMYPLQQLAVKLQLQALM<sep><pad><pad>', 'STIERQAKTFLDKFNHEAEDLFYQSLLASMNYNTNITEENVQNMNLASDKMSAFLKEQSTLAQMYPLLEIQNLTVKLQLQAL<sep>', 'STIEEQAKTFLDKFNHEAEDLFYQSALASMMYNTNITEENVQNMNEALDKMSAFLKEQSTLALMYPLQEIQNLTVKLALALL<sep>', 'STIEEQAKTFLDKFNHEAEDIFYQSSLASMNYNTNITEENVQNMNNALDKMSAFLKEQSLLFQMYLLGLLLLLLNLLKLLLL<sep>', 'STIEEQAETFLEKFNHEAEDLFYQVSLASMNYNTNITEENVQNMNNALHKMLLMLLGLFLILLLMYLGLMTLLQMLPLLLLM<sep>', 'STIEEMAKTFLDKFNHEAEDLFYQSSLQSMFYNTNITEENVQNMNNALVKMSAFLKELSTLAQMYPLQEIQNLT<sep><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLMYQSSLASMNYNTNITEENVQNMNNADDKMSAFLREISTLAQMYPLQEIQNLTVKLQLQAL<sep>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQLMNNALDKMSAFLKEISTLALMYPLGEIQNLTVLL<sep><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNEEAEFLFYQSSLAYMNYNTNITEENVQNMNNALDKMTAFLLELSLALMYPLQEIQNLIVKLDLQALF<sep>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNALAKMSAFLKEQSTLAQMYPLQQIQNLTVKLQLQSL<sep>', 'STIEEQAKTFLDKFNHMAEDLFYQSMLASMYYNTNITEENVQNMNNALDKMSTFLKEQSTLAQMYPLQEIQNLTVKLQVQAL<sep>', 'TTIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENFQNMNNALDKMSAFLKDLASMYPLQEIQNLTVKLSLQALQ<sep><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNMLDKMSAFLKEQSTLAQSYPLQEIQNLTVKLQLAAL<sep>', 'YTIEEQAKTFLDKFNHEAEDLFYQSSGASMNYNTNITEENAQNMNNALDKMSAFLKEISTLAQMYPLQEIQNLTVKLLLL<sep><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLDLQLQLLR<sep><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENMNNMDDKMSAFLKYMSTLAQMYPLQEIQNLTVKLQLQALQ<sep><pad><pad>', 'STIEEQAKTFLDKFNHEAESLFYQSSLQLMNYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTIKLQL<sep><pad><pad><pad>', 'STIEEMASTFLDKFNHEAEDLFYQSSLASMYYNTNITEENVQNMNNALDKMSAFLKEQSTLAQTYPLQEIQNLTVKLQLQLL<sep>', 'MTIEEQAKTFLDKFNHEAEDLAYQSSLALMNYNTNITEENVQNMNNAMDKMSAFLKEQSTLAQMYPLQEIVNLMVLLFL<sep><pad><pad><pad>', 'STIEEQAKTFLDKFNREAEDLFYQSSLASMNYNTNITEENVQEMNNAMDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQLL<sep>', 'SDIEEQARTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVNNMNNALDKMSAFLKEQSTLAQMYPL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQLNNENDKMSMFLKEQATLAQMYPLQEIQNLPVKLQLQALQ<sep>', 'STIEELAMLFLMDMNHEAEFLFYQSSLASMNYNTNITEENVQNMNNALDKMSAFLKEQSTLAQRYPLQEIQNLTVKLQLQAL<sep>', 'ETIEEQAKMFLDKFNHEAENLFVTLLLASMNYNTNITEENVQNMNNALLMFLLAQMYPLQEIQNLLVKLQLQALM<sep><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVNNMLDKMSAFLKEQSMLAQMYPLQEIQNLTVKLQLQAMQ<sep><pad><pad>', 'STIEEQALTFLDTFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLA<sep><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNAGDKMSAFLREISTLAQMYPLQEIQNLPVKLQL<sep><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLQSMNYNTNITEENVQNMNNAGDKMSAFLKIISTLAQMYPLQSIQNLTVKLLLQAL<sep>', 'STIEEEAKDFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNALDKMSAFLKEISTLAQMYPPLEI<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDAFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNANDKMSAFLKEQSTRAQMYPLQEIQNLPLMELL<sep><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSNLASMNYNTNITEENVQNMFNAYNKMSAFLKEQSTLAQMYPLQEIQNLFVKLQLQA<sep><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLAYMNYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLLLS<sep><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNARDKMSAFLKEISTLAQMYPLQHIQNLDVKLQL<sep><pad><pad><pad>', 'DTIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNALDKMLAFLKEQSTLALMYPLQEIQNLTVKLQLQ<sep><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNRLDKMSTFLQEISTLAQMYPLQEIQNL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEELAKTFLDKFNHYAERLFYQSSLASMNYNTNITEENVQNMNIAMDKMSAFLKEQSTLAQMYPLQEIQN<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQFELASMNYNTNITEENVQNMNNAGDKMSAFLKEKSTLAQMYPLQEIQNLTVKLQLQAL<sep>', 'STIEEQAKTFLDKFNHEMEDLFYQSSLASMNYNTNITEENVQNMLDKMSAFLKIQSELAQMYPLQEIQNQTVKLQLQALQ<sep><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENLQNMNNALDKMSAFLREQSTLAQMYPLQEIQNLTVKLQRLL<sep><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMFYNTNITEENVQNMNNALDKMSSFLKELSTLAQMYPL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKKFLIKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNALAFLLEISTLAQMYPLQEIQNLTVKLQLQALM<sep><pad><pad><pad>', 'STIEEQAKMFLDKFNHEAEDLFYQSSLASMNYNTNITEENMQNMNNALDKMSAFLKEQSVLAQMYPLQEIENLTVKLQLQAL<sep>', 'PTIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENFQNMNNALDKMSAFLKEQSTLAQMYPPQEIQNL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLLKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLKVKL<sep><pad><pad><pad><pad><pad>', 'YTIEEMAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNFLDKMSAFLKEQTTLAQMYPLQEIQNLTVKLQLQAM<sep>', 'STMEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNALDKMSAFLKEQSYLAQMYPLQEIQNLTLKLQL<sep><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMIYNTNITEENVENMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLALM<sep>', 'STIEEQAKTFLDKFNHEATDLFYQSSLASMNYNTNITEENRQNMNNALDKMSAFLKEQSKLAQMYPLQEIQNLTVKLQL<sep><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEKLFYQSSLMSMNYNTNITEENVQNMNNAQDKMSAFLKEMSTLAQMYPKQEIQNLMMKMLL<sep><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMLPLQEIQNLTVKLQLQNL<sep>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNHALDKMEAFLKEVSTLAQMYPLQEIQILTVKLKLA<sep><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSNLASMNYNTNITEENVQDMNNALDKMSAFLKEQSTLAMMYPLQEIQNLLVLMELQ<sep><pad><pad>', 'SDIEEQAKTFLDKFNHEAGDLFYQSSLASMNYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMYPL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'SDIEEQAKTFLDKFNREATDLFYQSMLASMNYNTNITEENVQNM<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDTFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNQLDKMSAFLK<cls>QSTLALMYPLQEIQNLTVKLQLQALM', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMYPPQMIQNL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'SNIEEQAKTFLDKFNHEAEQLFYQRSLASMNYNTNITEENVQNMNNALDHMSAFLREQSTLAQMYPLQEIQNLTVKLQLQAL<sep>', 'STIEEQAKTFLDRFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNALDKMSAFLKEQQLLLLDLLYLLLLLLKMLDLLL<sep><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFMQSSLASMNYNTNITEENVQNMNNALDKMSAFLKEASTLAQMYPLQEIQNLLVKL<sep><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEANDLFYQSSLASMQYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLVVKLLMALL<sep>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLAVMNYNTNITEENFQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQQALL<sep>', 'DTIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNALDKMSAFLKEQSTLAGMYPLQEIQNLTVKLQLQLL<sep>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQIMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLAIL<sep>', 'STIEEIAKTFLDKFNMVAEDLFYQSSLASMYYNTNITEENVQNMDNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQLL<sep>', 'STIEEQAKMFLDKFNHEAEDLFYQSSLASMNYNTNITEENMQNMNNALDKMSAFLKEQSVLAQMYPLQEIRNLTVKLQLQAL<sep>', 'STIEEQAKTFLDKFNHESEDLFYQSSLAMMNYNTNITEENQQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLM<sep><pad><pad>', 'STIEEQAKTFLDKFNHEAERLFYQSSLASMNYNTNITEENVQNMNNAGDKMSYFLKEQSTLAQMYPLQFIQNLTVKLQLQAL<sep>', 'LTIEEQAKTFLDKFNHEAENLFYQSSLASMNYNTNITEENVQNMNNALDKMSAFLKEQLTLAQMYPLQEIQNLTPLLLL<sep><pad><pad><pad>', 'SSIEEQAKTFLDKFNHEAEDQFKQSSLASMNYNTNITEENVQNMNNALDKMSAFLKRQSMLAQKYPLQEIQN<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAMTFLDKFNHEAEDLFYQSSLASMMYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLIEIQNLTVKLQL<sep><pad><pad><pad>', 'STIEEQAQTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQEMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQLL<sep>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNALDKMSAFLKEQSSLAQMYPLQEIQNLTVKLQLQ<sep><pad><pad>', 'STIEEQAKTFLQKFNHESETLFYQFSLASMNYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQL<sep><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSMLASMNYNTNITEENVQNMNNAMDKMSAFLKEQMTLAQMYPLQEIQNLTVKLQLQAL<sep>', 'STIEEQAKTFLDKFNHEAEDLFYQSNLASMNYNTNITEENVQYMNNALDKMSAFLKEQATLAQMYPLQEIQNL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSMLASMNYNTNITEENVQNMNNALDKMSAFLKEQSTIAQMYPLQEIQN<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'SSIEEQAKTFLDKFNHEAHKLFSQSSLASMNYNTNITEENVLLMLKLQLLLMLMLH<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'TTLEEQAKTFLDKFNHEAEDLFYQSEMANMNYNTNITEENVQLMNNALDKMSAFLKEQSTLAQMYPLQEIQ<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQ<sep><pad><pad>', 'STIEEEAKTFLDKFFHEAEDLRYQSSLASMNMNTNITEENAISMDKMSAFLKELSTLAQMYPLQEIQNLTVKLQLQALQ<sep><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKMLL<sep><pad><pad><pad>', 'DTIEEQAKTFLDDFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNMLDKMSAFLKEQSTLAQMYPLQEIQDLTVKLQLQAL<sep>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQMMNNALDKMSAFLKEQSTLAQKYPLQEIQNLTVK<sep><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNMALDKMYAFLLEQSLLAQMYPLEEIQN<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQMSLASMNYNTNITEENVQNMNNALDKMSAFLKNQSTLAQMYPLQEIQLL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQFIQNL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'HTIEMQAQTFLDKFNHEAEDLFYISSLASMNYNTNITEENMQIMNNAGDKMSAFLKELSTLAQMYPLQEIQNLTVKLQLQAL<sep>', 'SNIEEFAKTFLDKFNHEAEDLFMQSSLASMNYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQ<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STMEEQAKTFLDKFNHEAEMLFYQSSMASMIYNTNITEENVQNMNNALDKMSAFLKEQSVLAQMYPLQEIQNLTVKLQLAAM<sep>', 'STIEEYAKTFLDKFNMEAEDLFYQSSLAIMNYNTNITEENVQNMNNAMDKMSAFLKEQSTLAQMYPLQEIQNLTVKTLL<sep><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQKSLASMNYNTNITEENVQNMNNALAAMLMMSSTLAQMYPLQEIQNLTVKLQLQALQ<sep><pad><pad>', 'STIEEQAKMFLDKFNHEAEDLFYQSSLASMNYNTNITEENMQNMNNAGDKMSIFLKEQSTLAQMYPPQEIQNLTVKLQLQAL<sep>', 'STIEEQAKTFLDKFNHEAEKLFYQSSLASMNYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNM<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMEYNTNITEENVQNMNRAEDKMSAFLKQQSTLAQMYPLQEIQNLTVKLQLQAL<sep>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENLQNMNLALDKMSAFLKEQSTLAQMYPLQEIQNLTV<sep><pad><pad><pad><pad><pad><pad><pad>', 'DTIEEQAKTFLDDFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNFLDKMSAFLKEQSTLAQMYPLTEIQNLTVKLQLQLL<sep>', 'STIEEQAKTFLDKFIHEAEMLFYQSSLASMNYNTNITEENVQNMNNAGDKMSAFLKELSTLAQMYPLQEIQNL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHFAEDLFYQSSLASMNYNTNITEENVQTMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLMLQLL<sep>', 'STIEEQAKTFLDKFNHEQEDLFYQSSLASMNYNTNITEENLQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQAL<sep>', 'STIEEQAKTFLDKFNHEAERLFYQSSLAAMNYNTNITEENVQNMNNANDKMSAFLKEQSTIAQMYPLMEIQNLTVKLQLR<sep><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLAFMNYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLTLL<sep>', 'SAIEEQAKTFLDKFNHEAEDLAYQSSLASMNYNTNITEENVQNMNNAGDKMSAFLKELSTLAQQYPLQEIQNLTVKLQLQLL<sep>', 'STIEEQAKTFLDKFNHFAEDLFYQSSLASMNYNTNITEENFMNMNNALDKMSAFLKEQGTLASMYPLLEIQNL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHTARDLFYQSSLASMNYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFMQFSLASMNYNTNITEENVVNMNRAGDKMSAFVKELSTLAQMYPLQEIQNLTVK<sep><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQYSLASMNYNTNITEENVQNMNNAGDKMSAFLKLQSTLAQMYPLQEIQNLTVKLQLQAL<sep>', 'STIEEEAKTFLDKFNHEAEDLFYQSSMASMNYNTNITEENVQNMNQALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLALQ<sep><pad><pad>', 'STIEEQAKTFLDKFNREAEDLFYQSSLASMVYNTNITEENVQAMNNAMDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQAL<sep>', 'STIEEQAKTFLTKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQAL<sep>', 'STIEEQAKEFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNALDKMSAFLGKMSAFLKESSTYAQMYPLQEIQNL<sep><pad><pad>', 'STIEEQAKIFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQALLKALSTLAQMFPLQEIQNLTVKLDLQALQ<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNAEAEDLFYQSSEASMNYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQLL<sep>', 'STIEEQAKTFLDKFNHEAEDLFYQKSLASMNYNTNITEENVQNMNNALDKMSAFLKEVSTLAQMYPLQEIQNL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'LTIEEMAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNFFDKMSAFLKEQLTLALMYPLQEIQNLTVKLQLQAM<sep>', 'STIEEQAKTFLYKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLAIL<sep>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENQNMMDKMSAFLKEMSTLAQMYPLQEIQNLTVKLQLSALL<sep><pad><pad><pad>', 'STIEEQAKTFLDKFYHEAEDLFYQSSLASMNYNTNITEENVENMNNALDKMSAFLLLELLNLSLKLLLMSLI<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'SDIEEQAKTFLGKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNMAGDKMSAFLHEQSTLARMYPLQEIQNLTVKLQLQAL<sep>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENNQNMNNALDKMSAFLKEQSTLAQMYPPQEIQNL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKMFLDKFNHEAEDLFYQSSLASMNYNTNITEENMQNMNNALDKMSAFLKEQVTLAQMYPLQEIRNLTVKLQLQAL<sep>', 'STIEEQAKTFLDKFNHMAEDLFYQSMLASMYYNTNITEENVQNMNNAGDKMDAFLKEQSTLAQMYPLQEIQNLTVKLQLQAL<sep>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLSSMNYNTNITEENVQNMNNALDKMSAFLKEISTLAQMYPLQEIQVLTVKMVL<sep><pad><pad><pad>', 'STIEERAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNALDKMSAFLKEQTTLAQMYPLQQIQNLTVKLQL<sep><pad><pad><pad>', 'STYEEQIKTFLDKFNHEAETVFYQSSLASMNYNTNITEENVQNMNNALDKMSAFLKEQSDRAQMYPLLEIQNLPLKLLL<sep><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNVKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ<sep><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNKLDKMSAFLKEMSILAQMYPLQEIQ<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNEALDKMSAFLKEQSYLAQMYPLQEIQNL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEVLFYQSSLASMSYNTNITEENQNEMLALMSMLAQMVPLQEIQNLTVKLQLQALQ<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFAQSYMARMNFNTNITEENVQNMNMALDKMSAFLKLQMMLALMYPLQMLASMY<sep><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLQSMNYNTNITEENVQNMNNARDKMSAFLKEQSFLAQMYPLQEIQNLTVKLQLQAL<sep>', 'STIEEQAKTFLDKFNDEAEDLMDQSSLASMNYNTNITEENVQIMNHFLKMMSSLAQMYPLQEIQNLTVKLQLQALQ<sep><pad><pad><pad><pad><pad><pad>', 'STIEEQAKIFLDSFNMQAYDLFYQSKLASMNYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIDNL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'QTIEEQAKTFLDKFNHEAEDLFYQSSLASLNYNTNITEENVQNMNAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ<sep><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLAYQSSLASMNYNTNITEENIQNMNNAGMKMSAFLKELSTLAQMYPLQEIQNLPVKL<sep><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNAGDKMSAFLKELSTLAQMYPLQEIQNLTVKLQLM<sep><pad><pad>', 'STLEEQAKTFLDKFNHFAEDLFYQSSLASMNYNTNITEENVQNMNNAMDKMSAFLKEQSTLAQMYPLQEIQNL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STKEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNALDKMSAFLKELSTLAQMYPLQEI<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYHSSLASMNYNTNITEENVQNMNVALDKMSAFLKEISTLAQMYPLQEIQN<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQYMLASMNYNTNITEENVQNMNNAGDKMSAFLKEQSTLAQMYPLQEIQNL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHLAEDLFYQSSLASMNYNTNITEENVQNMNNALDKMSAFLKELLDLFLKELLYLMVKLQLQALM<sep><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAELLFYQKSLASMNYNTNITEENVQNMNNALDKMSAFLKEQSSLAQMYPLQEIQNL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STEEEQAKTFLDKFNHEAEDLFYQSMLASMNYNTNITEENVQNMNNALDRMSAFLKEMSTLAQMYPLQEIQNLTVKEVLMVL<sep>', 'STIEEQAKTFLDKFNHEAEDVFYQSSLASMMYNTNITEENVQNMNNAHDKMSAFLKEQSTLAQMYPLQEIQNLTLKMLL<sep><pad><pad><pad>', 'STIEEQAKMFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNALDKMSAFLKEQSALAQMYPLQEIQNL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQYSLASMNYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEINTL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLFKFNHFAEDLFYQSSLASMNYNTNITEENVQNMNNAMDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQAM<sep>', 'SGIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNALDKMSAFLYEISRLAQMYPL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKALLALM<sep>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNAMDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQAL<sep>', 'STIEEQAKTFLDKFNDEAEDLFYQSSLAIMNYNTNITEENVKNMNNALDKMSAFLKEQSTLANMYPLQEIQNLTVKLQLRLL<sep>', 'STIEEQAKTFLDKFNHEAEDLFYQSSQASMNYNTNITEENMNNMNDKMSAFLKEMSTLAQMYPLQEIQNLTVKLQLQALM<sep><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNYALDKMSAFLKLQSTLAQMYPLQEIQNLTV<sep><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMEYNTNITEENVQNMNRAGNKMSAFLKRQSTLAQMYPLQEIQNLTVKLQLQAL<sep>', 'SDIEEQAKTFLDKMNHEAEDLFYQASLASMNYNTNITEENVQNMLNATDKMSAFLKEQSTLAQMYPLQEIQNL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSYLASMNYNTNITEENVQNMNNALDKMSAFLKENSTLAQMYPLQEISNLTVKLQLQAL<sep>', 'STIEEQAKTFLDKFNHMAEDLFYQSSLASMNYNTNITEENVQNMNNAGDKMSAFLKVQSTLAQMYPLKEIQNLLVKLQLQAL<sep>', 'STIEEQAKTFLDKFNHEAGDLFYQSSLASMNYNTNITEENVQNMMNASDKMSAFLKEQSYLAQMYPLQEIQNLMVKLQLQ<sep><pad><pad>', 'SSIEEQAKTFLDKFNHEALDLFYTSSLASMNYNTNITEENVQNMNAMLDKMSAFLKEQSTLAQMI<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEASILFDQSSLLSMNYNTNITEENVQNMNNALDKMSAFLKEASILAQMYPLQEIQNL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLLKFNHEAEDLFYQSSLFSMNYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQYIQNQMVKMQMY<sep><pad><pad>', 'STIEEQAKTFLDKFNHEAEDAFYQSMLASMQYNTNITEENVQNMNMALDKMSAFLKEQSTLAQMYPLQEIQ<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKAFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNYVLDKMSAFLKTQSTLAQMYPLQEIQNLTVKLQLALM<sep>', 'STIEEQAKTFLLKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMYELQEIQNLLVKLQLQAL<sep>', 'STIEEQAKTFLDKFNHEAEDLFYQASLASMNYNTNITEENVQNMNNALDKMSAFLKEQSNLAQMYPLQEIQLL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKVFLDKFNHEAEDLFYQSSLAKMNYNTNITEENVQIMVNAHDKMSAFLKELSTLAQMYPLQEIQNLTVKLQLEFM<sep>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMFYNTNITEENVQNMNNALDKMSSFLKEQSTKAQMYPLQEIQNLTVK<sep><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNKEAEDLFYQSSLASMNYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLPLQMVL<sep><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYAMSLASMRYNTNITEENVQNMNAALDKMSAFLKEQSTLAQMYPLIELQ<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLIVKLQLQA<sep><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMFYNTNITEENVYNMNNAGDKMSAFLKMQSTLAQMYPLQEIQNLTVKLQLQAL<sep>', 'STIEEQAKTFLDKFNHEAEFLFYQSSLASMNYNTNITEENVQNMNNALDKMSAFGKEQSTLAQMYPLREIKNLTVKLLLLLL<sep>', 'STIEEQAKTFLDIFNHEAEDLFYQSSLAMMNYNTNITEENVQNMNNALDKMSAFLKEQSGLAQMYPLQEIQNLM<sep><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKQFLDKFNHEAEDLFYQSSLAMMNYNTNITEENVQNMNNALDKMSAFLHMISTLAQMYP<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEPLFYQSSLASMMYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASLNYNTNITEENVQNMNNALDKMSAFLKEQRTLAQMYPLQEIQNKTVKLQLAAM<sep>', 'SDIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMY<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEPLFYQSSLASMNYNTNITEENVQNMNNALDKMSAFLKMQSTLAQMYPLQEIQNL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAKDLFYQSSLASMNYNTNITEENVQNMVDKMSAFLKKQSTLAQMYPLIEIQNLTVKMQLQALQ<sep><pad><pad>', 'STIEEQAKTFLDYFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNLLLKMSYFLKELSTLAQMYP<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNKEAEDLFYQSSLASMNYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQDLTVKLL<sep><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQASLASMNYNTNITEENVQNMNNLLDKMSAFLKEQSTLAQMYPLQEIQN<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENKQNMNNAIDKMSAFLKEQSTLAQMYPLQEIQNLLVKLQLQVL<sep>', 'STIEEQAKTFLDKFNHEAEDLFYQRSLASMNYNTNITEENVNLMLKETSTLAQMYPLQEIQNLTVKLQLQMLQ<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEFAKTFLDKFNHEAEDFFYQSSLASMNYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLLVKL<sep><pad><pad><pad><pad><pad>', 'SEIEEQAKTFLDKFNHEAEDLFYQSSVASMNYNTNITEENKQNMNNAGHKMSAFLKEQSTLAQMYPLQEIQNLPLKLQLAAM<sep>', 'STIEEQAKTFLDKFNHEAEDLFAQSKLASMNYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDVFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQ<sep><pad><pad>', 'SDIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMYP<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKQFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQMNNATDKMSAFLKEQSQLAQMYPLQEIQNLTVKLQLQALQ<sep>', 'STIEYQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENQNEMLKMSAFLKEQSTLAQMYPLQEIQNLYVKLQLQALM<sep><pad><pad><pad>', 'SSIEEQAKTFLDKFNHEAEDLFYQFSLASMNYNTNITEENVLAMSYLLEIYNLTVKLQLQALFVKMSGLALIYNLLALI<sep><pad><pad><pad>', 'STIEEQAKLFLDKFNHEAEDLFYMSSLASMNYNTNITEENVQNMNNALDKMRAFLKEQSTLAQMYPL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFMHEAEDLFYQSSLASMNYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLRL<sep><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLSYQSSLASMNYNTNITEENVQNMNNALDKMSAFLKELSTLAQMYPLQEIQ<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'LTIEEQAKTFLDKFGHEAEDLFYQSSLASMNYNTNITEENVQNMNNAGDKMSAFLKEISTLAQSYPLQEIQNLTVKLFLQLL<sep>', 'STIEEQAKTFLDKFNHEAIDLFYQSSLASMNYNTNITEENNLLMMMYATLKMQSSLAQMYPLQEIQNLTVKLQLQALM<sep><pad><pad><pad><pad>', 'STIEERAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNPTVKLQLQA<sep><pad>', 'STIEEQAKTFLDMFKHEAEDLFYQSSLTSMNYNTNITEENVQNMNNAGDKMSAFLMTLAQMYPLQEIQNLTVKLQLQSLQ<sep><pad><pad>', 'STIEEQAKTFLDKFNHEAEDAFYQSMLASMNYNTNITEENVQNMNRALDKMSAFLKEQSTLAQMYPLQGIQNLTVLL<sep><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAQLLFYKSSLASMFYNTNITEENVRNMNNAGDKMSAFLKERSTLAQMYPLQEIQNLTVKLQL<sep><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQYSLASMNYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEID<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'SDIEEIAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMGDAMSAFLKEQSTLAQMYPLQEIANLTVKLQLQALQ<sep><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSMLASMNYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIVNLT<sep><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDIFYQSSLASMNYNTNITEENYQNMNNALDKMSAFLKEQSTLAQMYPLQEIQN<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEMAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNAEDKMSAFLKEISTLAQMYPLQEIQNLTV<sep><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNMEEEDLFYQSSLASMTLNTNITEENVQNMNRAQDKMSAFLKMQSTLAQMYPLQEIQNLPVKLQLN<sep><pad><pad>', 'STIEEQANTFLDKFNHEAEDLFYQSFLASMVYNTNITEENMQNMNKALDKMSAFLKEQSTLAQMYPL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNALDKMSAFLKMISTLAQMY<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEMEDLFYQSSLASMNYNTNITEENVQNMNNANDKMSAFLKEQSTLASMYPLQEIQNLPVKLQLQA<sep><pad>', 'SDIEEQAKTFLIKFNHEAIDLFYQSSLASMNYNTNITEENVQNMNNAFDKMSAFLKEQSTLAQAYPL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHQASDLFYQNSLASMNYNTNITEENVQNMNNALDKMSAFLKEMSLLAQMYPLEEIQNLTVKLQLQAL<sep>', 'ETIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNAILLMGLLGALLKLAQMLPLEEIQNLTVKLQLQALM<sep>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQSMNNALDKMSAFLKEQSRLAQMYPLQEIQNL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSQASMNYNTNITEENMQNMNNAYDKMNAFLMEQSTLAQMYPLQEIQNLTVKLQLQAM<sep>', 'STIEEQAKTFLDKFNHEAEMLFYQSSLASMNYNTNITEENDNGMDKMSAFLKEISFLAQMYPLQEIQNLTVKLQLQALQ<sep><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMYPPQEIQN<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAMTFLDKFNHEAEDLFYQSSLMSMNYNTNITEENVQNMMNALYKMSAFLKIQSTLAQMYPL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNAMDKMSAFLKEQSTLALMYPLGLINVKVKLKLQALQ<sep>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLALMNYNTNITEENVQNMNNAGDKMSAFLKEQMTLAQMYPLQEIQNLTVKLLLQLL<sep>', 'STIEEQAKTFLAKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNAGRKMSEFLKEISTLANMYPLQEIQN<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFQRSYLMSLNYNTNITEENVKNMNYATFLMSMSFLAQMYPLMEIQNLTVKLQLAALY<sep><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLAYQSSLASMNYNTNITEENVELMNYLLGMSSLAQMYPLQEIQNLTVKLQLQALQ<sep><pad><pad><pad><pad><pad><pad><pad>', 'STVEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENEQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHLAEDLFYQSSLRSMNYNTNITEENVQNMNNAIDKMSAFLKEQSTLAQMYPLQEIQNPKVK<sep><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMAYNTNITEENVQNMNLALDKMSAFLKEQSTLAQMYPLQEIQN<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHLAEDLFYQSSLAYMNYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMYNLLEILELEVKLMLALF<sep>', 'STIEEQAKTFLDKFNHEAENLFYQSSLAFMNYNTNITEENVQNMNNAGDKMSAFLKEQSTLAQMYPPLKIKLQLQALQ<sep><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVMNMNKAYDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQL<sep><pad><pad><pad>', 'STIEEQAKTFLDKFNIEAEDLFYQSSLASMAYNTNITEENVQNMNNALDKMSTFLKELSTLAQMYPL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAKDLMYQSSLASMRYNTNITEENVQNMNNALDKMSAFLKEDSLLAMMYPLLEYLL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYASELASMNYNTNITEENVQNMNNARDKMSAFLKEISTLAQMYPLSEIQNLPVKL<sep><pad><pad><pad><pad><pad>', 'GTIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMFNAGDKMSAFLKLISTLAQMYPLQEIQNLT<sep><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFAHEAEDLFYQSSLASMNYNTNITEENVQNMNNAGDKMSAFLKEQSTLAQMYPLQEIQNLPVKLQLAAL<sep>', 'STIEEQAKTFLAKFNHEAEDLFYQSELASMNYNTNITEENVQNMNNAGKKMSAFLKEQSTLAQMYPLMEIQNLTVKLQLQAL<sep>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQMMNNAGNKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQAL<sep>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLAMMNYNTNITEENLQNMNNALDKMSAFLKEQSTLAGMYPL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSMLTSMNYNTNITEENVQNMNNAGDLMSAFLKEVSTLAQMYPLQEIQNL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'SSIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNAAGDKMSAFLKEQSTLAQMYPLQEIQNL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKMFLDKFNHEAEALFYQSSLASMNYNTNITEENVQNMNNALDKMSAFLKEQSTLAQTYPLQEIQ<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'ETIEEQAKTFLDKFNHSAEDLFYQSSMASMNYNTNITEENEQNMNNAADKMSAFLKEISTLAQMYPLQEIQNLTVK<sep><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNALDKMSAFLHELSTLAQMYPL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEQQALTFLDKFNHRAEDLFYQSSMFSMNYNTNITEENVQNMNNALDKMSAFLKEMSTLAQMLPLQEIQNLTVLLNL<sep><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENLQLMNNAGDKMSAFLKEMSTLAQMYPLQEIQNL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNSEAEDLFYQSSLASMNYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLLVKMSL<sep><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNALDKMSAFLKEQRTLAQQYPLQEIQN<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDIFYQSSLASMNYNTNITEENVQNMNNAGDKMSAFLKSLLAQMYPLQEIQNLTVKLQLQALQ<sep><pad>', 'SVIEEQAKTFLDKFNHQAEKLFYQSSLASMNYNTNITEENVQNMNNAFDKMSAFLKTQSTLAQMYPLQEIQNLTVKQQLFAL<sep>', 'STIEEQAKTFLDKFNHFAEDLFYQSSLASMNYNTNITEENMQNMNVALDKMSAFLKEQSSLASMFPL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'LTIEEQAKNFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNAADKMSAFLKEQSTLAQMYPLQEIQNLLMKMQL<sep><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFLQSSLASMNYNTNITEENVQNMNNALDKMSAFLKEQSMLAQMYPLLEIQN<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'ETIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNALDKMNLFLKEQSTLALMYPLQEIQNLTVKLQLLMM<sep>', 'STIEEQAKTFLDKFNHEAEDLYYQSSLASMNYNTNITEENIQNMNNAIDKMSAFLKEQLTLAQMYPLQEIQNLPVKL<sep><pad><pad><pad><pad><pad>', 'SSIEEQAKLFLDKFNHNAEDLFYLSSLAAMNMNTNITEENVQNMNNAIDKMSFFLKEQSTLAQMYPL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAMDLMYQSSLASMNYNTNITEENVQNMNNAGDKMSAFLKEQSTLAQRYPLQEIQNLMVKLQLQTL<sep>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNAKDKMSAFLRMQSTLAQMYPLQEIQNL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNLEAEDLFYQSSLASQQYNTNITEENVQNMNNALDKMLAGLKLELLLYLLLTLLLLLLLLLLLTLLL<sep><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSELANMNYNTNITEENRRNMNNALDKMSAFLKEQSTLASMYPL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENLNNMNNALDKMSTFLKMQSTLAQMYPL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFIHEAEDLFYQHLLASMNYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQDIQNLMIALQM<sep><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQRSLASMNYNTNITEENVQMMNNAGDKMSAFLKEQSILAQMYPLQEIQNLTVKLMLASM<sep>', 'STIEEQAKTFLDPFNHEAEDLFYQSSLKSMNYNTNITEENVQNMNNAGDKMSAFLKEISTLAQMYPLQEIQNLTVKLKLQ<sep><pad><pad>', 'STIEEQAKYFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNAGKKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQAL<sep>', 'STIEEMAKTFLDKFNHEARDLFYQYSLASMNYNTNITEENVQNMNNALDKMSAFLKEQSTLA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDIFNHEAEDLFYQSSLAMMNYNTNITEENVQNMNNAGDKMSGFLKEQLTLAQMYPLQMIQNLTLKLMLQ<sep><pad><pad>', 'STIEEQAKTFLDKFIHEAEILFYQSSLMSMNYNTNITEENVQEMNNALDKMSAFLKEQSTLAQMYPL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'QTIEEQAKTFLDKFNLEAEDLFYQSSLASMNYNTNITEENVQIMNNAMDKMSAFLLFMLTLMLLL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNAMDKMSAFLKEQRTLAQMYPPLELLN<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSLLASMNYNTNITEENIQNMNNAGDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQL<sep><pad><pad><pad>', 'STIEEQAKTFLDKFNKEAEDLFYQMSLASMNYNTNITEENVQNMNNAGDKMSFFLKEQSTLAQMYPLQEIQNLTLKLQL<sep><pad><pad><pad>', 'STIELQAKTFLDKFRHEAEDLFYQSSLALMNYNTNITEENMQNMDDKMDKFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ<sep><pad><pad>', 'STIETQAKTFLDKFNHEAEDLFYQSSLAYMNYNTNITEENVQNMNNALKKMLAKMKPLQEIQNLTVKLQLQALI<sep><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEMLFYQSSLAHYNYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQN<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVENMNNALDKMSSFLKEQSILAQMYPLQEIQN<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAQDLMYQSSLLSMNYNTNITEENVQNMNNALDKMSAFLKEISTLALTYPLLAMV<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKQFLDKFNHEAEDLFYQQMLASMNYNTNITEENVNINNALIKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ<sep>', 'ETIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENMQTMNNAADKMSAFLKEQSLLAQMYPLQEIQNLTVKLQLQ<sep><pad><pad>', 'STIEEQAKTFLDKFNHEAEELFYQSSLKSMNYNTNITEENVDNMNNALDKMSAFLKFLLELASMY<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFEHEAMFLFYQSSLASMNYNTNITEENVQNMNNALDKMSAFLKRQSTLAQMYPL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'DTIEEQAKDFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQSLTVKLQLQAL<sep>', 'STIEEQAKTFLDKFNHEAEFLFYQSSLASMNYNTNITEENVQNMNNAADKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQ<sep><pad><pad>', 'STIEELAKTFLMKFNHNAEDLFYQSRLLSMNYNTNITEENVNNMNNALDKMSAFLKEQSTLAQMYP<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHFAEDLFYQSNLASMNYNTNITEENVINMNNAGDKMSAFLQEISTLAQMYPLQEIQN<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHENEDLFYQSKLASMNYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLALIQNLTVKLQLMAL<sep>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMQYNTNITEENVQNMNNAYDKMSAFLKEQSTLAQMYPLQEIQNMTVKLQL<sep><pad><pad><pad>', 'ETIEEQAKTFLDKFNNEAEDLFYQSSLASMNYNTNITEENVQNMNNAMDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQ<sep><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQASLASMNYNTNITEENVQNMNNAGAKMSAFLKEQSTLAQMYPLQEIQLLMVKLQLMA<sep><pad>', 'STIEEQAKTFLDKFNHRAEDLFYQSELASMNYNTNITEENVQNMNNALDKMSAFLKEQSMLAQMYPL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSMLASMNYNTNITEENVQNMNAAQDKMSAFLKEQTTLAQMYPL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNALDKMSAFLKEISTLAQMYPPQLL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENEQNMNNALDKMSAFLKEQSTLAQMYPLPMTVKLQLQALQ<sep><pad><pad><pad>', 'STIEEQAKTFLDVFNHLARDLFYQSSLASMIYNTNITEENVQNMNSAGDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQAL<sep>', 'MTIEMQAKTFLDKFNHMAEDLFYQSSLASMNYNTNITEENVQNMNYATDKMSAFLKEQSTLAMMYPLQEIQNLTVKLQLM<sep><pad><pad>', 'STIEEQAKTFLDKFNFEARDLFYQSSLASMNYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMYPL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'SNIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENRQNFNNAHDKMSAFLKEQSTRAQMYPLQEIQNQLVKL<sep><pad><pad><pad><pad><pad>', 'STIEYQAKTFLDMFNHEAEDLFYQSSLASMHYNTNITEENVQNMNQALDKMSAFLKEQSTLAQHYPIQEIQNLTVKLQLQAL<sep>', 'STIEEQAKTFLDKFNHEAESLFYQSSLASMNYNTNITEENVQNMNNKMSRLLKMQSTLLQMYPLLEIQNLTVKLMLEALI<sep><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSLLASMNYNTNITEENVQNMQLALDKMSAFLKEQSTLAQMYP<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'LTIEEQAKTFLDKFNHEAQDLFYQSSLASMNYNTNITEENVQNMNNALDKMSAFLAEQMTLAQMYPLQEIQNLTVKLQL<sep><pad><pad><pad>', 'LTIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNAHDKMDAFLKEQSTLASMYPLQEIQNLTVKLQLQ<sep><pad><pad>', 'STIEEQAKTFLDKFNLEAEDLFYQSSLASQNYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNSTVKLQLQAL<sep>', 'STIEEIAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQN<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKKFLDKFNHEAEDLFYMSSLASMNYNTNITEENVQNMNNALDKMAAFLKEQSTLAQMYPL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLAKMNYNTNITEENVQNMNNALDKMSAFLKEISTLAQMYPLQEIQILTVKGVLALM<sep>', 'SEIELQAKTFLDKFNHEAEDLFYQSSLASMAYNTNITEENVQNMRNALDKMSAFLKEQSTGAQMYPLQMIQNLTVKLQL<sep><pad><pad><pad>', 'STIEEQAKTFLLKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNAGDKMSAFLKEQSTLAQMYPLQKIQNLTVKLQLQAL<sep>', 'STIEEQAKTFLDKFNHEAEDLFMQSSLASMNYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLT<sep><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIDNL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'SEIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNAGDKMSAFLKEQSTLAQMYPLQEIQNLKVKLQL<sep><pad><pad><pad>', 'STIEEQAKTFLDKFNHEATDLFYQSSLASMNYNTNITEENRQNMNNANDKMSAFLKEQSSLAQMYPLQEIQNLTVKLQL<sep><pad><pad><pad>', 'ETIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQMMYNAGDKMSAFLKEQSTLAQMYPLQEIQNLTVMLQLQML<sep>', 'STIEEQAKTFLDVFNHKALDLFYQSSLASMIYNTNITEENVQNMNEAGDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQAL<sep>', 'STIENQAKTFLDKFNHEAEDLFYQASLASMNMNTNITEENVNNMEDKMSFFLMEQSQVAQMYPLQEIQNLTVKLQLQALQ<sep><pad><pad>', 'STIEEQAKTFLDKFNLEAEDLFYQSSLASMNYNTNITEENQQNMNNALDKMSAFLKEQSTLAQMYPLQEIQN<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STVEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNAMDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQ<sep><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMKNALDKMSAFLKEQSALAQMYPLQEIQN<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STLEEQAKEFLDKFNHMAEDLFYQSSLASMNLNTNITEENVQNMNNAYDKMSAFLKLQSVLAQMYPL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQDMNNALDKMSAFLKEQSTLAIMYPLQEIQNLTVKLQLQAL<sep>', 'STIEEQAKTFLDIFNHEAEDLFYGSMLASMNYNTNITEENVQNMNNLLDKMSAFLKEQSTLAQMYPL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNALDKMSAFLKEQSTRAQMYPL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQMSLASMDYNTNITEENVQRMNFAGTKMSAFLKEQGTLAQMYPLLEIQTLTVKLQLLSM<sep>', 'STIEEQAKTFLDKFNHEAADLFYLSSGASMNYNTNITEENVQNMNNALDKMSYFLKELSTLAQMYPLQEIQNLT<sep><pad><pad><pad><pad><pad><pad><pad><pad>', 'YTIEEMAKTFLDKFNLEAEDLFYSSSLRSMNYNTNITEENVQNMNNAFDKMSAFLKEQSTLAQMYPLREIQNLTVKL<sep><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSRLASMNYNTNITEENVLLMNNLSLKLRLALMYALLSMLALMSLL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'ATPEEEAKTFLDKFNHEAEDLFYQSSERYMNYNTNITEENVQLMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQA<sep><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVYMMNNAGDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQ<sep><pad><pad>', 'SDIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNAGDKMSAFLKEQSTLASMYPLQEIQNLT<sep><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKLFLDKFNHEAEDLFYQSLLASMNFNTNITEENVQNMNNAFDKMSAFLKEQSTLAQMYPLQTL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYTSSLASMNYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKNQLQ<sep><pad><pad>', 'STIEEQALTFLFKFNHEAEDLFYQSSLATMNYNTNITEENVYNMNNAGDKMSAFLKEVSTLAQMYPLQEIQNLTVKLQLQAL<sep>', 'STIEEQAKTFLDKFNLEALDLFYQSSLTSMNYNTNITEENVQMNNMGDKMSAFLQEISTLAQMYPLQEIQNLTVKLQLQALQ<sep>', 'STIEEQAKRFLDKFNHESEDLFYQSSLASMNYNTNITEENVQNMNSALDKMSAFLKEQSTLAQMYPLLEIL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEMAKTFLYKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQALQ<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYYSSLASMNYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMYPL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'SMIEEQAKTFLDKFNHEAEDLFYQSMLASMNYNTNITEENVQNMNNARTLAKMYPPQEIQNLTVKLQLQALQ<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLQSMNYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLT<sep><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAETMLEKFNSEAEDLFYQSSLASMNYNTNITEENVQRMNNALAKMSAFLKEQSTLAQMYPLQEIQNLTVKLQL<sep><pad><pad><pad>', 'SSIEESAKTFLDKFNHEAEDLSYQSSLASMNYNTNITEENVQNMNNALDKMSAFLKRQSTLAQMYPLQEL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEQMFYQSSLASMNYNTNITEENVQNMNNAKDKMSAFLKEQSTLAQMYPLQEIQNL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAERLFYQFSKASMNYNTNITEENVQNMNSAGDKMSAFLKEQSTLAQAYPLQLIQNLT<sep><pad><pad><pad><pad><pad><pad><pad><pad>', 'SLIEEQAKTFLDKFNHEAEDLYYQSSLASMNYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQ<sep><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLTSMNYNTNITEENVQYMNNAYDKMSAFLKEQSTLAQMYPLQEIQ<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMMYNTNITEENVQNMNDAMDKMSAFLKEQSTLAQTYPLQEIQN<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAMTLVYQSSLKFMNYNTNITEENVQNMNNALDKMSAFLKEQLTLAAMYPLQEIQMLTVKLQLQ<sep><pad><pad>', 'STIEEQAKMFLDKFNHEAEDLFYQSHLASMNYNTNITEENQMNMNNALDKMSAFLKEQSTLAQMYPL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNALDKMSAFLDEQSTLAQMYPLQEIQILTVKLQLALM<sep>', 'LTIEEQAKTFLDKFNHEAENLFYQSSLASMNYNTNITEENVQNMNNAGDKMSAFLKELSTLAQMYPLQEIQNLYPQLLL<sep><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQN<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLLKFNHEAEDLFYQSSLMSMNYNTNITEENVQNMNAAGDKMSAFLKEQSTLAQMYPLTEIQNLTVKLQLFAL<sep>', 'SNIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNKNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVK<sep><pad><pad><pad><pad><pad><pad>', 'YTIEEMAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNFFDKMSAFLKEQGTLAQMYPLQEIQNLTVKLQLQA<sep><pad>', 'STIEEQAKTFLDKFNHEAIDLFYQSSLASMNYNTNITEENVNLMVKLSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALM<sep><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDVFYQSSLASMNYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLS<sep><pad><pad>', 'STDEEQAKTFLDKFNHEAERLFYQSSLASMNYNTNITEENVQNMFNALDKMSAFLKEQSTLAQMYPLQEILNL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'SNIEEQAKTFLDKFNHEAEDLFYQSSLASMRYNTNITEENVQNMNFAGDKMSAFLKEQSYLAQMYPL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHLAEDLFYQSSLASMNYNTNITEENVQNMNNALDKMSAFLKEYLLLFVKLQLQALM<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'QTIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNALDKMSAFLKTQSTLAQMYPL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNSEAEDLFYQVSLASMNYNTNITEENVQNMRNALDKMSAFLKEQSTLAQMYPLQEIQN<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNMLDKMSAFLKEQSTLAQMYPL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLALMNYNTNITEENVQNMNNALDKMSAFLKEQSTLAMMYPLQEIQNLTVKL<sep><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAELLFYQSMLASMNYNTNITEENVQNMNNALDKMSAFLKEQSTRAQMYPLQEIQNLTVKAQM<sep><pad><pad><pad>', 'STIEEQAKTFLDKFNGEAEDLFYQFSLASMNYNTNITEENVQNMNNALDKMSAFLKEQSTLAAMYPL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKL<sep><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFIHEANDLFYQSSLASMNRNTNITEENVQNMNRALDKMSAFLKEQSTVAQMYPLQEIQNL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLMSMNYNTNITEENVENMNNALDKMSAFLKEQSTLALMYPL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENMQNMNNAFDKMSAFLKEQSTLAQMYPL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNAQDKMAAFLKEMSTLAQMYPLQEIQNLTVKL<sep><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVLNMNNAFDKMSLFMKEQSTLAQMYPL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFVHEAEDLMYQSSLASMNYNTNITEENVQNMNNAGDKMSAFLKELSTLAQMYPLQEIQNL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'SDIEEQAKTFLDKFNHEAEDLFYQSSLASHNYNTNITEENVQNMNNALDKMSAFLKEMMDLAQMY<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKKFIDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNALAFLKEQSTLAQMYPLQEIQNLTVKLQLQALM<sep><pad><pad><pad>', 'FTIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNALDKMSAFLKEMSTLAQMYPLQEITN<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNANDKMSAFLKEQSTLAQMYPPQEIQ<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLMSMNYNTNITEENVQNMNNAGDKMSAFLKEQSTLAQMYPLKEIQNLTVKLQLQAM<sep>', 'STIEEQAKTFLDKFNHMAEDLFYQSSLASMNYNTNITEENVGNMNNALDKMSAFLKEQSVLAQMYPLQQLQNLT<sep><pad><pad><pad><pad><pad><pad><pad><pad>', 'STMEEQAMTFLDKFNHEAEDLFYQSSLMSMNYNTNITEENLQNMNNAGDKMSAFLKEQSTLAQMYPLQEIQNLTVLMLI<sep><pad><pad><pad>', 'STIEELAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNMAGDKMSAFLKEQSVLAQMYPLQEIQMLTVKLQLQAL<sep>', 'STMEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLP<sep><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLNKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNKMSMMLKEQSTLAQMYPLQEIQNLTVKLQLQMLQ<sep><pad><pad>', 'STIEELAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNAGDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQAL<sep>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMYYNTNITEENVQNMNNAKDKMSAFLKEQTTLAQMYPLQEIQN<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLKKFNHEAESMFYQSSLASMRYNTNITEENVQNMNNAGDKMSAFLKEQSTLAQMYPLQEILMLLVMM<sep><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNYLDKMSAFLKEQSTLAQMYPL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNY<cls>DKMSAFLKEQSTLAQMYLLQEIQNLTVKLQLQALQ', 'STIEVQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNAQDKMSAFLKEISRLAQMYPLQEIQN<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSFASMNYNTNITEENVINMNNALDKMSAFLKESSTLAQMYPLNEIQNLTVKQ<sep><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNMLDKMSAFIKEQSTLAQMYPLQEI<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'QTIEEQAKTFLDKFNHEAEDLFYQSLLASMNYNTNITEENVQNMNNALDKMYLLLLL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAIDLFYQSSLASMNYNTNITEENVQNMNNALDKMSAFLKEMSTLAQMMPLQEIQNLTVLMSLASL<sep>', 'STIEELAKTFLFKFNHRAEDLFYQSSLFAMNYNTNITEENRQNMNNALDKMSAFLKEQSTLAQMYP<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'SSIEEQAKTFLDKFNAEAEDLFYQSSLASMNYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMYP<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMQYNTNITEENVQDMNNALDKMSSFLYEQLTLAQMFPL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKLFLDKFNHEAEDLFYQSSLTSMNYNTNITEENVQYMNNALDKMSAFLKEQSTTAQMYPLQEIQNL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAELLFYQSSLASMNYNTNITEENVQNMNNAGDKMSAFLKEISTLAQMYPLGLIQN<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMYPL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSQASMNYNTNITEENMDNMNNAGDNMSAFMKELSTLAQMYPLQEIQNLTVKLQLQAM<sep>', 'STIEEQAKTFSDKFNHEAEILFMQSSLASMNYNTNITEENVQNMNNALDKMSAFLKELLTLAQMYPPQEIQN<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDMFNHEAEDLFYQSSLTSMRYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMY<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHLAEDLFYQSSLASMNYNTNITEENVQNMNNALDKMSAFLKELLDLMVKLQLQAMM<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFTHEAEDLFYQSSLASMNYNTNITEENVQNMNNALDKMSAFLKLQSTLASMYPL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNAEAEDLMYQSSLASMNYNTNITEENVQNMNNASDKMSAFLKEASTLAQAYPLQEIQNLTVKLQLQAL<sep>', 'STIEEQAKTFLDKFMHEAEDLFYQSSLASMNYNTNITEENVQNMNNAGDKMSAFLKEQSTLAQMYPLQEIQNLLVKMFL<sep><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSMLASMNYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMYP<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMRYNTNITEENVQNMNNTTDKMSAFLKEISTLASMYPL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'LTIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNALDKMLAFLLLQLLL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKNFLDKFFHEAEDLFYQSSLALMNYNTNITEENVQNMNNAGDKMSAFLKEQTTLAQMYPLQMIQNLTVK<sep><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNAFDAMSAFLKEQSTLAQMYPLQEIQNL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSAIAYMNYNTNITEENVQNMNNALDKMSIFQQEQSTLAQMYPLQEIQNL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'DDIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVYNMNNAGDKMSAFLKEMSTLAQMYP<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'LTIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNALDKMLLFLLL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEQLFRYSALASMNYNTNITEENVQNMNNALDKMSAFLKEQSILAQMYP<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'MTIEEQAKTFLDKFNHEMEDLFYQSSLASMNYNTNITEENVQNMNNALDKMSMFLKEQSTLAQMYPLQEVQ<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQMMNNALDKMSAFLKEQSQLAQMYMLARIK<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNYGDKMSAFLKEQSTLAQMYPPQEIQNL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAYDLFYQSLLASMNYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMYP<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLIEIQNH<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSIASMNYNTNITEENVQNMNNALDKMSLFLKEQSLLAQMRLANDKLQLLLLLALAL<sep><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLMSINYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMYPL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENNQNMNNAGDKMSAFLKEQSTLAQMYPLPEVQNLPLKELL<sep><pad><pad><pad>', 'STYEEQAKTFLDMFNHEAEDLFYQSSLASMNYNTNITEENVQSMNHAMDKMSAFLKEQSLLAQMYPL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAMTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNALDKMSAFLKEQSTLAQSYPL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKMFLDKFNHEAEDLFIQSSLRKMNYNTNITEENVQNMNNAGDKMSAFLKEQMTLAQMYPLQEIQNLMVLL<sep><pad><pad><pad><pad><pad>', 'STIEEQAKKFIDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNAKAFLLEQSTLAQMYPLQEIQNLTVKLQLQALM<sep><pad><pad><pad>', 'SDIEEKAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNALDKMSAFLKEISTMSFMLLG<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNAGDKMSAFLKEQSTAAQMYPLQEIQNLQLKRLL<sep><pad><pad><pad>', 'STIEEQAKTFLDKFNHLAEDLFYQSSLASMNYNTNITEENVQNMNNALDKMSAFLKELLDLFVKLQLQALM<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAYDLFYQSSLASMNYNTNITEENVQNMNNVLDKMSQFLKEQSTLAQMYPL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDFFYQSSLFSMNYNTNITEENVQNMNNAGDKMSAFLKEQSTLAQMYPLQEIQNLAVKLLLQ<sep><pad><pad>', 'STIEEQAKTFLDKFNHEAADLFYQSSLASMNYNTNITEENVQNMNNALDKMSAFLKEQSSLAQMYPL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQFSLASMNYNTNITEENVYGMLTLALMYSLQEIQNLTVKLQLQALM<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEKLFYQASLASMNYNTNITEENVQNMNNALDKMSAFLKMQSTLAQMYPP<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQKMNNAYDKMSAFLKEQSTLAQMYPLQEIQNLT<sep><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEETAKFFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNARDKMSAFLKALLDLTVKLLLQALI<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'LTIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNALDKMLAFLL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNEEAEDLFYESSLASMNYNTNITEENVQNMNMALDKMSMFLMEQRTLAQIYPLQEIQSLT<sep><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDDFYQSSLASMNYNTNITEENLQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQKL<sep>', 'STIEEQAKTFLDKFNHEAEDLAYQSSLASMNYNTNITEENVQNMNNAGDKMSAFLKHISTLAQMYPLQEIQ<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMMYNTNITEENVQNMNIAGDKMSAFLKAQSTLAQMYPLQEIQNLTVK<sep><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAELLFHQSNLASMNYNTNITEENVQNMNNAFDKMIAFLKEQSTLSQMYPL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDQFNHKAEDLFYQSKLGRMNYNTNITEENVQNMNNAGDKMSAFLKEQSTLAQTYPLQEIQNLTVKLQLQ<sep><pad><pad>', 'STIEEQTKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNALDKMSAFLKEISTLAQMYP<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNMMDKMSAFLKEQSTLAQMYPLQEIQNLTVLMSL<sep><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNAGMKMSAFLKEQSTLAQMYPLQEIQNLTVKALL<sep><pad><pad><pad>', 'STIEEEAKTFLDKFNHEAEDLFYYSSLASMNYNTNITEENVNAMVKMSAFLKEQKTLAQMYPLQEIQNLTVKLQLQAMQ<sep><pad><pad><pad>', 'STIEEQAKTFLEKFNHEAEDLFYQSSLASQNYNTNITEENVQNMNNALDKMSALLLQLFLDKMLLLLLF<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNYIDKMSLFLKEQSTLAQMYPLQEIQNLTVKL<sep><pad><pad><pad><pad><pad>', 'STIEEQAKTFLKKFNHEFEDLFYQSSLASMNYNTNITEENVQNMNNAFDKMSAFLKLQSTLAQMYPL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STMEEQAKLFLDKFNHIAEDLFYQSSLASMNYNTNITEENVQNMYNMGDKMSAFLKEMSTLAQMLPL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNAGDKMSAFLREISTLAQMYPLQELQNLTVKLQ<sep><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAVDMFYQSSLASMNYNTNITEENVQNMNMAFDKMSAFLKEQSTLAQMYPLQEIQNL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLDEIQNP<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNESAQDLFYQSSLTKMAYNTNITEENVQNMSNAFDKMSAFLKEQSTLAQMYPLLEIQNLTVKLL<sep><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENIQNMNNAADKMSAFLKETSTLAQMYPLQEIQNL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSRASMNYNTNITEENVQNMNNALDRMVKLSLDSMLALMYPLQEIQNLTVKGQLQALM<sep>', 'STIEEQAKTFLDKFNHEAERMFYQSSLASMNYNTNITEENVQNMNNALDKMSAFLQEQSYLAQMM<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIALL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDVFYQSSLASMNYNTNITEENVQNMNNAFDKMSAFLKEQSTLAQMYPLQEIQNLTVK<sep><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNAADKMSAFLKEQSTLAQMYPLQEIQNL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSALASKNYNTNITEENVQRMNNARDKMSAFLKEQSTLAQMYPLQEIINLTVKLQL<sep><pad><pad><pad>', 'STIEEQAKTFLEKFNHEAEVLFYQSSLRSMNYNTNITEENVQNMNNALDKMSAFLKTQSTLAQMYPLQ<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNAFDKMSAFLKEQSTLAQMYPL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAFTFLDKFNHEREDLFYQASLASMNYNTNITEENVQNMNNAGDKMSAFLAELSTLAQMYPPQEIQN<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'GTIEEQAKTFLDKFNHEAEDLFYQSSLMSMNYNTNITEENVAPMNAGLKMSAFLKRQQSLAQMYPLQEIQNLTVKLQLQLLQ<sep>', 'STIEEQAKTFLDKFMHEAEDLFYQSSLASMNYNTNITEENVQNMNNAGDKMSAFLKEQSTLAQMYPLMEIQNLTVKLQLQ<sep><pad><pad>', 'STIEEQAKTFLDKFNLEAEDLFYQSNLASMNYNTNITEENVQNMNNAGDKMSAFLKEQSTLAQMYPLLEIQNL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNMMDKMSAFLKEQSTLAQMYPL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQMSLASMNYNTNITEENVQNMNNAGSKMSAFLKEQSTLAQMYPLALML<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'YTIEMQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMDNMYDKMSAFLKEQTLLAQMYPLQEIQNLTVKLQLQAM<sep>', 'STIEEQAKTFLDKFNSEAELLFYQSSLASMNYNTNITEENFQNMNNALDKMSAFL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYNSSLASMNYNTNITEENNQNMNNALDKMSAFLKEQSTLAQMYPLQEIQN<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNVEAEDLFYQSSLASMNYNTNITEENVQEMNNALDKMSAFLKEISTLAQMYP<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'DTIEEQAKTFLDKFNHEAEDLFYQSSLAIMNYNTNITEENVQNMNNALDKMSAFLKEQSTLALMYPLQEIQNLTVKLPLQ<sep><pad><pad>', 'STIEEQAKQFLDKFNHEAERLFYQSSLASMNYNTNITEENVQNMNNALDKMSAFLKERSTLAQLYP<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYNSSLASMNYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMYPL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQMMNNADDKMMAFLKELSAFAQMF<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQALTFLDKFNFEAEDLFYQSSDASMNYNTNITEENVQNMNNAEDKMSAFLKEQSTLAQMYPLQEIQNPTVKLQLQAL<sep>', 'STIEEQAKTFLDKFNHEAELLFYQKSLASMNYNTNITEENVQNMNNAGDKMSAFLKEQSTLAQMYPLQEIQNL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQFSLASMNYNTNITEENVGAMLTLALMYSLADITKLAQMFALQEIQNLTVKLQLQALM<sep><pad>', 'STIEEQAKTFLDKFNDEAEDLFYQSSLALMNYNTNITEENVQNMNNALDKMLLFLLL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMMNALDKMYAFLKQQSTLAQMYP<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKLFLDKFNHEAEDLFYQSSLASMQYNTNITEENVNLMLNLTLALMTPLLLLALLVLLSLLALMEPL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHIAEDLFYQSSLAMMNYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMYQL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'SDIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNAGDKMSAFLKEQSLLAQMYMLQELLI<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEYAKTFLDKFNHKAEDLFYQSSLASMNYNTNITEENVQNMNNALDKMSAFLKLQSTLAQMYP<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNAMLDMSAFLKELSTLAQMYP<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMMNAGGKMTAFLKELSTLAQMYPLQEINNL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFIHEAEDLFYQSSLASMNFNTNITEENVMNMNNAGYKMSAFLKEQSTLAQMYPLQEIQNLTVKLQL<sep><pad><pad><pad>', 'STIEEQAKTFLDRFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNANDKMSAFLLELLLLLLK<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEDQAKTFLDKFNHEAEDLFYQSLLASMNYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMYP<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNAFDKMSYFLKEQITLAQMYPL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEFAKTFLDKFNHEAEDLFEQSSLASMNYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMYPL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSMLASMNYNTNITEENVQNMNNALDKMSAFLKE<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'TTIEEQAKTFLDKFNHEATKLFYQSSLASMKYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMY<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'DTIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMLNALDKMLAFLL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEMLFYQSSLASMNYNTNITEENVQNMNNAGLKMSAFLKESSTLAQMYPLQEIQNL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEILFYQSSLASMNYNTNITEENVINMNNALDKMSAFLKEQSTLAQMM<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQMMNNALDKMSAFLKEQSTLAQMYP<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHDAEDLFYQSSLASMNYNTNITEENVQNMNNALTKMSAFLKDQSTLAQMYPLQEIQN<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'SMIEEQAKSFLDKFNHEAEDLFYQSSKASMNYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLKYQSSQASMNYNTNITEENRQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQ<sep><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLMYQMSLASMNYNTNITEENVQNMNNASDKMSAFMKEQSTLAQMYPL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNLAMDKMSAFLKIQSTLAQMY<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSQASMNYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEI<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'SQIEEQAKFFIDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNALAFLAEQSTLAQMYPLQEIQNLTVKLQLQALM<sep><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDVFYQSSLASMMYNTNITEENVQNMNNAHDKMSAFLKEQSTLAQMYPLQEIQNL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAKDLFYQSSLASMNYNTNITEENVQNMTNALDKMSAFLKEQSTLAQSYPLQEIKVLTVFL<sep><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFKHEAEDLFYQSSLASMNYNTNITEENVQNMNNAGDKMSAFLKEMSTLAQMYPLQEIQ<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENLQNMNNAGDKMSAFLKEQSTLAQNYPLQEIQNQMVALQLML<sep><pad>', 'STIEEQASTFLDKFLHEAEDLFYQSSLALMNMNTNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQ<sep><pad><pad><pad><pad>', 'STMLEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNAFDKMSAFLKEQRTLAQSFPL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQLTFASMNYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMYPVQEIQNLTVLL<sep><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQSMNLAGDKMSAFLKTQSTLAQMYPLQEIQNL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STVEEQAKTFLDKFNHEAEDLFYQSSFASMNYNTNITEENVQNMNQAIDMMSAFLKEQSTLAQMYPLQEIQNLTVKLQLAMM<sep>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNARDKMSAFLKEQSTLAQMYPLQEIQNNLVLMSLQ<sep><pad><pad>', 'STIEEQAKEFLDKFNHEAEVLFYYSSLASMNYNTNITEENVQNMNNAGDKMSAFLKEQSTLAQMYPLQEIQNL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLMKFNHRALDLFYQSSLASMNYNTNITEENVQNMNNAGDKMAAFLKEQSTLAQMYPLQEIQ<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDVFYMSSNASMNYNTNITEENVQNMNNAYDKMSAFLKEISTLAQMYPL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQMMNNAGDKMSAFLKEQSTLANMYPLQEIQN<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKDFLDKFNHEAEDLFYQKSLAKMNYNTNITEENVQNMNNAGDKMSAFLKEQSTLAQMYPLQEIQNL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'LTIEEQAKTFLDKFLHEAEDLFYQSSLASMNYNTNITEENVQNMNNALDKMLALLYPLQEIQNLTVKTQLQ<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'SDIEEQAKTFLDKFNHEAEDLFYQSSLAQMNYNTNITEENVQNMNNASDIMSAFLKEQSTLAQMY<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFMQSSLASMNYNTNITEENVQNMNNALDKMSAFLKEQSKLAQMYP<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFYHEAEDLFYQSSLASMNYNTNITEENVQNMNNAFDKMSAFLKEQSTLAQMYPLQEIQNLT<sep><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHLAEDLFYQSSLASMNYNTNITEENVQNMNNALDKMSAFLKELLQL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKPFLDKFNHEAEDLFYQSSLASMNYNTNITEENMQNMNNAVDKMSAFLKEISTLAQMYPLQEIQNLTVMLMLQAL<sep>', 'STIEEQAKKFLDKFNHEAEDLFYMSSLASMNYNTNITEENVQNMNNAGQKMMAFLKEQSTLAQMYPL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNQEAEDIIYQSSLASSNYNTNITEENVQNMNNAGDKMSAFLKEQSTLAQMYPLQEIQNLNVKLLLQAL<sep>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNALTKMSAFLKEMSMLSQMY<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLAIMSYNTNITEENVQNMNNALDKMSAFL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKRFLRKFNHEAEDLFYQSSLASMNYNTNITEENVQRMNNAGNKMSAFLKEQSLLAQMYPLQEIQN<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFEISLLASMNFNTNITEENVQNMNNALDKMSAFLKEQSTLAQMYP<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNAGDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQ<sep><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLMYQSSLASMNYNTNITEENSQNMQNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKMSL<sep><pad><pad><pad>', 'STIEEQAKMFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNALDKMSAFLKEISTLAFMYPGMEI<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLLKFNHEAEDLFYTSSLASMNYNTNITEENVENMNNALDKMSAFLKEQSTLAQMYP<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'SNIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMYP<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'SYIEEQAKTFLDKFNHEAEDLFYQSILASMNYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMYP<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'ATIEEQAKTFLDKFNHEAEDLFYQSSLASMRYNTNITEENVQNMNNAMIKMSAFLKEQSTLAQMYP<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'SNIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNALDKMMAFLMEQSTLAQQYPLQEIQQLTVKLQFQAL<sep>', 'STIEEQAKTFLDKFNHEAEDLFYQSLLASENYNTNITEENIQNMNNALDKMSAFLREQSTLAQMYP<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'PTIEEQAKTFLDKFNHEAEDLFLQSSLASMNYNTNITEENVQNMNNALDKMLAFLQ<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNKQAEDLFYQSSLASMNYNTNITEENVQNMNTALDKMSAFLTEQSTLAQMYPLQEIQN<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENGQNMNKAADKMSAFLKEQSTLAQMYPLQEIQNLTVKLQL<sep><pad><pad><pad>', 'SSIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNALDKMSAFLKEQSTLAQM<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENQNEMLTLALMYPLQEIQNLTVKLQLQALM<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKFFLDKFNHEAEDLFYQSFLASMNLNTNITEENVQNMNNALDKMSAFLKEQSTLAQMYP<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKIFLDKFNNEADDLFYQSSLASMNYNTNITEENVQNMNNAGDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLQQL<sep>', 'RTIEEQAKTFLDKFNHEAEDLDYQSSSASFNYNTNITEENKNNELDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLLAFM<sep><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSMLKSMAYNTNITEENVQNMNNALDKMSAFLKEQSTLAQAY<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKYFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNALDKMSAFLKEVSTLAQMYP<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLIYQSSLASMNYNTNITEENTQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLDVKLMAKL<sep><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENLQNMNNAGDKMSAFLKEQSTLAQNYPLQEIQNLMVLAQMA<sep><pad><pad>', 'STIEEQAKTFLDKFNQELEDLVYQSSLASMNYNTNITEENNQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDDFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNADDKMSKFLRLMLLLYLLLR<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMIYNTNITEENVTYMNMAGDKMSAFLKEQSTLAQMYPLQEIQNLTV<sep><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKMFLDKFNHEAEDLFYQSSLASMNYNTNITEENQQNMNNALSKMLALLLLASMLALM<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQPSLASMNYNTNITEENVQNMNNALDKMSAFLKEQSNLAQMYPLQEIQNL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSELASMNYNTNITEENVQNMNNAQDKMSAFLKEQSTLAQMYPLQRI<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNALDKMSAFLKEMSTMALML<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMMYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMYP<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEEAKTFLDLFNHEAEDLFYQFSLASMNYNTNITEENVQNMNNAADKMSAFLKEQSTLANMYP<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'LTIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNALDKMYLLLLL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLALMNYNTNITEENVQNMNNALDKMSAFLKK<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLAIMNYNTNITEENVQNMNNALDKMSTFLKEQSTLAQMYP<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLTSMNYNTNITEENVQNMTNAIDKMSAFLKEQSTLAQMYPLQEIQNL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEENAKTFLDKFNHEAEDLFYQSRLASMNYNTNITEENVQNMNNAGDKMSAFLKEQSTLAQMYPPRMI<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENMQNMNNALDKMSAFLKEQSTLAQMYP<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLAFMNYNTNITEENVQNMNNALDKMSAFLKELLL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNALDKMSAFLKEQRTLAQMYP<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNAGDKMSAFLKEMSGLAQMYPLQEIQNL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'SFIEIQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENFQNMNNALDKMSAFLKAASTLAQMYP<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSELASMRYNTNITEENNANMNNALDKMSAFLKEQSTLANMYP<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNANDKMSAFLKENSTLAQMYPLQEIQ<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNAYDKMTAFLKELSTLAQMYP<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'YTIEEKAKTFLDKFNHEAEDLFRQSSLASMNYNTNITEENVQNMNNAGDKMSAFLKEKSTLAQMYPLQEIQNL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'ITIEEEAKTFLDYFNHEAEDLIYQSSLRSINYNTNITEENVQNMNNALDKMSAFLMEQSTLAQMYP<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLSKFNHEAEDLFYQSSLASNNYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STYEEQAKTFLDKFNHEAATVFYQSSLASMNYNTNITEENVQNMNNAGDKMSAFLKNRSTLANMYPLLEIQNL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEIALTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNAGDKMSAFLKEQSTLAQMYPLQEIQNL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEKLFYQSSLMSMNYNTNITEENVQNMNNAKNKMSAFLKEMSTLAQMYP<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STMEEQAKTFLDMFNHYAEDLFYQSSLASMNYNTNITEENVQNMNNFMDKMSAFLKEASTMAQML<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFFHEAEDLFYQSSLLSMNYNTNITEENVQNMNNAGDQMSAFLKEQSTLAQMYPLQEIQNLTVKLQL<sep><pad><pad><pad>', 'QTIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNAGDKMSVFLKEMSTLAQMYPLQEIQN<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'SFIEELAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENANDMDKMSAFLKEQSTLAQMYPLSEIQNLTVKLQLQALQ<sep><pad><pad><pad>', 'SHIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNAYDKMSAFLKEQLTLAQMYPLQEIQNQ<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMMNAKDKMIAFL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'PTIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENFQNMNNALDKMSAFLKEQSTLAQMYP<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDDFNHEAEDLFYQSSLASQNYNTNITEENVQNMNNALDKMSAFLLLFLLL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNLAMDKMSIFLKEQSTLAQMY<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLIYQFMLASMNYNTNITEENVQNMNN<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STMEEQAMTFLDKFNHEAEDLFYQSSLMSMNYNTNITEENLQNMNNALDKMSAFLKEQSTLAQMY<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLMYQVSLASMNYNTNITEENVQNMNNAGDKMSAFLKEQSTLAQMYPLQEIQNLTVK<sep><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASQNYNTNITEENVQNMNNALDKMSAFLKEQSTLAYMYPL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'DTIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNALDKMNAFLK<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAELLFYQSSLASMNYNTNITEENVQNMNNALDKMSAFLKEQSNLAQMYP<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYNSSLSSMNYNTNITEENVQNMNNALDKMSAFLAEQSTLAQMYPL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDDFNHEAEDLFYQSSLQSMNYNTNITEENVQNMNNAMDKMSAFLKEQSTLAQMYPL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNAYNKMSKFLKEQSTLAYMYPLYEIQ<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STDEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNAAYDKMSAFLKEQSTLAQMYPLQEIQNL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKLFLDKMNHEKEDLFYQSSLASMNYNTNITEENVQNMNNALLKMLLLLKLLL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIPEQAKTFLDKFNHLAEDLFYQSSLASMNYNTNITEENVQNMLRAGDKMSMFLKEMSTLAQMYPL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYVSSLASMNMNTNITEENVQNMNNAGDKMSAFLKEMSTLAQMYPLQEIQ<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNALDKMSAFLKEMLTLALML<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNLAGDKMSAFLKEQSTLAQMYPLQEIQNLTVKL<sep><pad><pad><pad><pad><pad>', 'LTIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMFNALDKMSAFLKEQSTLAQMYPLGEIQNLTVDLQLQAL<sep>', 'STIEEQAKTFLDKFNHQAEDLFLQSSLASMNYNTNITEENVQNMNNANDKMSAFLKEQSTHAQMYPLQEIQNL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLVSMNYNTNITEENVQMMNEAGDKMLAFLKEQSTLAQMLPLQEIQNLLV<sep><pad><pad><pad><pad><pad><pad><pad>', 'ITIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNALDKMSAFLMEVSTLAQMY<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLADMNYNTNITEENVQNMNNALDKMSAFLKEQSTLANMYPL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEALDLLYQSSLASMNYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMY<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'SMIEEQAKTFLDKFNHEAENLFYLSSLASMNYNTNITEENVQNMNNALDKMSAFLKEQSLLAQMLALQLLM<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'ITIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNAFIFMSAFLKYQSTLAQMYP<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEKIFYQSSLASMNYNTNITEENVQNMLNAGDKMSAFLKEQSYLAQMYPLGEI<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'VTIEEQAKTFLDKFNHEAEDLFYQSSLASMMYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMYP<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKDFLDIFNHKAEDLFYQSSLNSMNYNTNITEENVQNMNNAGDKMLAFLKEQTKLAQMYPLQEIQNL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLTSMNYNTNITEENVQNMNNAGEKMSLFLKEQSTLAQMYPL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLEKFNHEAEDLFYQSDLASMNYNTNITEENMQNMHNATDKMSAFLKNQSTLAQMYPLQEIQLLPVMYVL<sep><pad><pad><pad>', 'STIEEHAKTFLDKFNHKAEDHFYQSSLASMNYNTNITEENVQMMNNALDKMSAFLKEQSSLAQMYLLQEIQNLTVKLQLQ<sep><pad><pad>', 'STYEEQAKTFLDKFNHEAVDLTYQSSLASMNYNTNITEENVQNMNNAGDKMSAFLKDLSTLAQMYPPQEIQNL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'SNIEEQAKTFLKKFNHEFEDLFYQSSLASMNYNTNITEENVQNMNNAMDKMSAFLKLQSTLAQMYP<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNTEAEDLFYQSSLASMNYNTNITEENVRNMNNAMDKMSAFLKEMMTLAQMF<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLMYQSSLASMNYNTNITEENVQNMNNALDKMSAFLKEQSALAQMYP<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLAMMNYNTNITEENVQNMNNAGDKMLAQSSLALSYPLQEIQNPTVKLQLQALQ<sep><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAESLFYQSSLFSMNYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMYP<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEIAKTFLDKFNHEAEDLFYQSSLNSMNYNTNITEENYQNMNNALDKMSAFLKELLLLTLML<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQRSKKSMNYNTNITEENVRNMNNALDKMSAFLKEQSVLAQMY<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'LTIEEQAKTFLDKFNHEAEDLFYKSSLASMSYNTNITEENVQNMNNAGDKMSAFLKELLTLAQMYP<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSMASMNMNTNITEENVQNMNNALDKMSAFLKEQSHLARMM<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSQLASSNYNTNITEENVQNMNNARDKMDAFLKMQSTLAQMYPLQEIQN<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'SFIEEQAKTFLDKFNHEAEDLFYQSSLLSMNYNTNITEENVQNMNNALDKMSAFLKQQSFLAQM<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMRYNTNITEENVQNMNNTSDKLSAFLKEISLLAQMYP<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHSAEDLFYQSMLASMNYNTNITEENVQNMNNALDKMSAFLKEQSSLAAMF<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'NTIEEQAKTFLDKFNHEAEDMFYQSSLASMNYNTNITEENMMNMNNADDKMSAFLKEQSTLAQMYPLQEIQ<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQFMNNALDKMVAFLLEASTIAQMYP<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'VTIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNALDKMSAFLKEQSSLSQMYPLQEIQN<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'QTIEEQAKTFLMKFNHEMEDLFYQSSLASMNYNTNITEENVQNMNNAMDKMMAFLKE<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMEYNTNITEENVQNMNNAAHLMSAFLKEQSTLAQMYPLQEIQN<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLKKFNHEYEDLFYQSSLASMNYNTNITEENVQNMNNAMDKMSAFLKLQSTLAQMYPL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQYSLASMNYNTNITEENVQNMNNAGDKMSAMLKEQSTLAQMYPLQEIQNLPVKLQ<sep><pad><pad><pad><pad>', 'STIEEQAKTYLDKFNNEAEDLFYQSSLASQNYNTNITEENVQNMNNALDKMNAFLLLMLLL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'MTIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENTQNMNNAGDKMSAFLKEQSTLAQMYPLQEIQNL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEYAKTFLDKFNHEAEHLFYQSSLASMNYNTNITEENVQNMNNALDKMSAFLKETSKLAQLF<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMHYNTNITEENVFNMNNALDKMSAFLKSLSTLAQM<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYNSSLASMNYNTNITEENMQNMNNALDKMSAFLKEQSTLAQMYP<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLYKFNHEAEDLFYQSSLASMNYNTNITEENVQNMSNAGDKMSAFLKELSTLAQYYPL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEDAKTFLDKFNHEAEGLFYQSSLASMNMNTNITEENVQNMNNALDKMSAFLKEQSSLAQMYPL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEQLYIQASLASMNYNTNITEENVQNMNNALDKMSAFLKEQSVLAQMYP<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'LTIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNALDKMLALLL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHLAEDLFYQSSNASMNYNTNITEENVQNMNNAMDKMSAFLKEQSTLAQMYPLQEIQN<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDFFNHELEDLFYQSSLASMIYNTNITEENVQNMNNAGDKMMAFLKEQSTLAQMYPL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STREEQAKTFLDKFNHEAEDLRYQSMLASMNYNTNITEENVQNMNNALDKMSAFLKEQSSLAQML<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEFQAKTFLDKFKHEAEDLFYMSSLASMNYNTNITEENVQNMNNYLDKMSAFLKEQSMLARMYP<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKAFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNAGDKMSAFLKEQSTLAQMYPLQEIQNL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STMEEQAKTFLDKFNHEAEMLFYQSSMASMIYNTNITEENVQNMNNALDKMSAFLKEQSVLAQMYPLQEIQNLTVKAQM<sep><pad><pad><pad>', 'STIELQAKTFLDKFNHEAEDLFYQSSLAKMNYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMYP<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLLSMNYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMY<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMIYNTNITEENVQLMNNALDKMSAFLKEQSTLALMYPLQE<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'MTIEEQAKTFLDKFNHEAEDLFYQSSLASINYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEDLDLMLLL<sep><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNLEAEDLFKQSSLASMNYNTNITEENVQNMNYAGDKMSAFLKEQSTLAQMYPLQEIQN<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAETLFYQSSLKSMNYNTNITEENVQNMNNALDKMSAFLKEQSDLAQMYPLQ<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAGDLFYMSSLAFMNYNTNITEENVQNMNNAGDKMSAFLKEASTLAQMYPLQEIQNL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAELLFYQSSLMSMNYNTNITEENVQNMNNAVDKMSAFLKEQSTLAKMYPLQEIQNLTV<sep><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYKSSLASMNYNTNITEENVQNMNNALQKMSAFL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLNSMNYNTNITEENVQNMNNAGDKMSAFLKEITTLAQMYPLQEIQNLTVKL<sep><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFQQHSLASMNYNTNITEENVQNMNNMSDKMSAFLKELSTLAQMM<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAENIFSQSSLKSMNYNTNITEENVQNMNNAGDKMSAFLKEQSTLAQMYPLQEIQNLP<sep><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLLYQSSLASMNYNTNITEENVQNMNNAIDKMSAFLKELLTLASMY<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEERAKTFLDKFNHEAEDLFYQSSTASMKYNTNITEENVQNMNLALDKMSAFLK<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKINHEAEDLFYQSSLAIMNYNTNITEENVQNMNNALDKMSAFLKEISTLAQMYP<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHIAEDLFYLSSLASMNYNTNITEENVQNMNNAGDKMSAFLKENSTLAQMLPLQEIQNL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNAGNKMSAFLKEQITLAQMYPLQEIQNL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQMSLASMKYNTNITEENVYNMNNALDKMSTFLKEQSTLAQM<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEESAKAFLDRFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNALDKMSAFLKELLDLYLKMYLDFMLKELLL<sep><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENFQGMNNANDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQ<sep><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNANDKMDAFLKEQSTLAQMYPLQEIQN<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'SDIEEQAKTFLDKFNHEAEDLFYSSSLMSMNYNTNITEENVQNMNNAGDKMSAFLKEQSTLAQMYQLVEIQ<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFMDKFNHEAEDLFYQSSLAKMNYNTNITEENVQNMNNALDKMSAFLKEMSLLAQMY<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMMYNTNITEENVQNMNSAGDKMSAFLKTQSTLAQMYPLREIQNLT<sep><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKVFLDKFNHEAEDLFYLSSLASMNYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMYP<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'TTIEEQAKTFLDKFNHEAEDLFYQSSIASMNYNTNITEENVQNMNNALKKMSAFLKMQSSLAQM<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLAFMNYNTNITEENVNLMVKLLLLLMLYYLLYPLLAMLYLLHMYVYGLLS<sep><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLAEMNYNTNITEENVQNMNNASDKMSAFLKEQSTLAQMYPL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYSSSLASANYNTNITEENVQNMNNAGDKMSAFLKEQSTLAQHYPLQEIQNLDVKLQLALL<sep>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMYP<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNQEAEDLFYQSSLASMNYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMYP<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNAGDKMSAFLKTQSTLAQMYPLQEIQ<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSMASMNMNTNITEENVQNMNNALDKMSAFLKEQSYMAQMM<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNREAEDLFYQSSLASMNYNTNITEENVQNMNNAFDKMEAFLKEMSTLAQM<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNEEAEDLFYQSSLAIMNYNTNITEENVQNMNNALDKMNAILDKMLLYLLLMLLLMELL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'LTIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNALDKMLALLK<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNALDKMSAFLKSQSTLAQMY<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEMEDLFYQSSLASMNYNTNITEENVQNMNNAFDKMSAFLKEQSQLAQML<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'DDFQEQTKTFLDKFNHEAEDLFYQSKITKMNYNTNITEENVQLMNNAGDLMSAFLKEQSQLAQML<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEKADTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNALDKMSAFLKELSTLASMVPLLQM<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNALDKMSAFLKEQSSLAQMYP<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEIAKTFLDKFNFLLEDLFYQSSLQFMNMNTNITEENVQNMNNALDKMSAFLKEFSTLAQMMPLQEIQN<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'SDIENQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNASDKMSAFLKEQSTLAQMYPTQEIQNL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQIMNNANDKMSAFLKEQQTLAQMMP<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEMLFYQSSLASMNYNTNITEENVQNMNNAYDKMSAFLKEQSTLAQMYP<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEALFYQSSLASNNYNTNITEENVQNMNNALDKMSAFLKENSTLAQMYPLQEIQNLTVALQL<sep><pad><pad><pad>', 'STIEEQAKTFLDKFNHEMEDLFYQSSLASMNYNTNITEENVQNMNNALDKMSAFLKEQLSLAQML<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHVAEDLFIQSSLAEMNYNTNITEENVNNMNNALDKMSAFLKEQSTLAAMMPL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSNLASMNYNTNITEENVQNMNNAGDKMSAFLKEQSMLAQMYPL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLTSMTYNTNITEENVQNMNMAEDKMMAFLKEQSTLASMYLL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKMFLDAFMHEAEDLFYQSSLASMNYNTNITEENVQNMNNAGFKMSAFLKEQSTLAQMYPL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLKKFNHEAESLFYQSSLASMNYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMY<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKMFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNAGDKMSAFLLEQLTLAQMYPL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYKSSLASMNYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMY<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLEYQSSLASLNYNTNITEENVQNMNNAGMKMLAFLKEQSNLAQMYPL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLMYQSSLASMNYNTNITEENVQNMNNAGMKMSAFLKEQSTLAQMYPLQEIQNLTVKR<sep><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSQASMNYNTNITEENMQNMNNALDKMDAFLKEMSTLAQMMKLQSSLASMY<sep><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQFSLASMNYNTNITEENVVAMLNAHDKMLALLKY<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNANDKMSAFLKEQSLLADMYPLQEIQNL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKMFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNAGDKMSAFLKSLILLALLYPLTTLL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNAQRKMSAFLKEQSRLAQMYP<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQVKTFLDKFNHEAEDLFYQDSLASMNYNTNITEENVQNMNNARDKMSAFLKELSTLAQMYPLQEIQN<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFGQSSLASMNYNTNITEENVANMNNALDKMSAFLKEQSTLAQMYPLQEIQNP<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNVEAEDLFYQSSLASMNYNTNITEENVQNMNNALTKMSAFLQEKSTLAQMM<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEETAKLFLDKFNHFAEDLFYQSSLASMNYNTNITEENVQNMNNALDKMSAFLKELLKLTPLLHLQSILASML<sep><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASDNYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVQDQL<sep><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDVFYQSMSASMNYNTNITEENVQNMNNARDKMSAFLKEISTLAQMYP<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAMTFLDKVNHEAEDLFYQSSLASMNYNTNITEENVQNMNNALDKMSAFLKEQKQLAQMLAQMKLAQMKKLQMLL<sep><pad><pad>', 'STIEEQAKTFLEKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNAGDKMSAFLKEQSTLAQMYPLQEIQNLT<sep><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNALDKMSAFMKEQSTLAQMYP<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAITFLDKFNHEAEDLFYQSNLASMNYNTNITEENVQNMNHAMDKMSAFLKEQSTLAQMYP<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'SFIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNAGDKMSAFLKEQSTLAQMYPLQEIQN<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNAIDKMSAFLKEQSTLAQMYPLQEIQNQTVKLQ<sep><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEMEDLFYQSSLASMTYNTNITEENVQNMNNALDKMSAFLKEQILLAQLY<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNAQDKMSAFLKEMSTLATMM<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'LTIEEQAKTFLDKFNHEAEDLFYQSLLASMNYNTNITEENVQNMNNALDKMLKEILLLLKL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFTHEAEDLFYQSSLASMNYNTNITEENTNSMNDKMSAFLKKISTLAQMY<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'ETIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMY<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKMFLDKFNHEAEDLFYKSSLASMNYNTNITEENVQNMNQAKSKMMLLLAL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNRDAEDLFYQSSLASMNYNTNITEENVQEMNNALDKMSAFLKEQSTLAQMY<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNVEAEDLFYQSSLASMNYNTNITEENVQRMNNALDKMSAFLKEQTTLAQMYP<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNALSKMSAFLKEQSTLA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNMMDKMSAFLKEQSTLAQMYP<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'SKIEEQAKTFLDKFNHEAEDLRYQSSLMSMNYNTNITEENVQNMNNAGDKMRAFFKEQLTLAQMYPLQEIQNL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'TTIEEQAKTFLDKFNHEAEDLFYQSSLTSMNYNTNITEENVQNMNNAVDKMSAFLKEISTLAQMYP<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEHAKTFLDKFNHEAEDLFFQSSLASFNYNTNITEENVQNMNNAGDKMSAFLKEMSTLAQMYPLQYIQNLTVQL<sep><pad><pad><pad><pad><pad>', 'STIEEQAKAFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNAGDKMSAFLKEISTLAQMRPL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIELQAKTFLDKFNHEAEDLFYQSSLAHMNYNTNITEENVQYMNMAFDKMSAFLK<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLMSMNYNTNITEENVQNMNNAGDKMSAFLKEQSTLAQMYPLKEL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKRFLDKFRHEAEDLFYQSMLASMNYNTNITEENVQNMNAAGDKMSAFLKEQSTLAQMYP<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQMLNNALDKMSAFLKEAS<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEENAKTFLDKFNHEAEDLFYQSRLASMNYNTNITEENVQNMNNAGDKMSAFLKEQSTLAQMYPPHMINLSVKLQLQALQ<sep>', 'STIEEQAKTFMDKFNHEAEDLFYQSSLESMNYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQNLTVKLQLVLL<sep>', 'STIEEQAKTFLDKFTHEAEDLFYQSSLASMNYNTNITEENVQNMNNALDKMSAFLKDLSTLAQM<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMMNM<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFEHEAEDLFYQSSLASMNYNTNITEENVQNMNNAMLKMSAFLKEQSTLAQMYP<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAFTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNATDKMSAFLKEQSTLAQMYPLQEIQNL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLIYQSSLASMNYNTNITEENTQNMNNAGDKMSAFLKEQSTLAQMYPLQEIQ<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STMEMQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENQNKMNALGKMSAFLKEQSTLAQMYPLQEIQNLTIKLQMQALQ<sep>', 'STIEEFAKTFLDKFNHEAEDRFYQSSLASMNYNTNITEENVQNMNNALDKMSAFLKEQSLLAMM<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLAYMNYNTNITEENVQNMNNAGDKMSAFLKEQSTLAQMYPLQEIQNTLILL<sep><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFGHHAEDLFYQSMLASMNYNTNITEENVQNMNNAGDKMSAFLKMLSTLAQMYP<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNFNTNITEENVQNMNNAADKMSAFLMEISTLAQM<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQMLLASMNYNTNITEENVQNMNNA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKNFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNALDKMSYFLKENSTLAQMYP<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFIDKFNHEAEDMFYQSSLASMNYNTNITEENVQNMNHAGDRMSAFLKVQSTLAQMYPLQEIQNLTVKLQLQ<sep><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLIYQSSLASMNYNTNITEENAQNMNNALDKMSAFLKEQSTLSQMYPLLEV<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSMLGSMNYNTNITEENVSNMNNAGDKMSAFLKESSTLAQMYPPQEIQNL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNTEAEDLFYQSSLASMNYNTNITEENVQNMNNAIDKMSEFLKELSTLMQMY<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'SMIEAQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQS<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSLLASMNYNTNITEENVSYMNNLLVKMSLLNMLL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNAFDKMSAFLKEQSTLAQMYP<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNLEAEDLFYQSSLASMNYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQELTN<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLKYQSSLASMNYNTNITEENVQNMNNAGDKMSAFLKELSTLAQMYP<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNSEAEDLFYQSSLASMNYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMY<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDKFYQSSLASMNYNTNITEENVQNMNNAMDKMSAFLKEQSTLAQMYPLQEVQNL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEKIFYQSSLASMNYNTNITEENVQNMLNAGDKMSAFLKEQSYLAQMYP<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNAYDKMSAFLKEALDLMLALMLPL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSMLASMNYNTNITEENVQNMNNAGDKMSAFLKEQSFLAQMYP<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNAGDKMSAFLKELSTLAQMFP<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNAGDKMSAFLKELSTLAQMYP<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMLYNTNITEENVQNMNNAKDKMFAFLKEQSTLA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNALDKMSAFLKEQTMLASMYP<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEKQAKTFLDKFNHEAEDLFYQSSLASMNFNTNITEENVQNMNNALQKMSAFLKEQSTLAQMY<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFMDKFNHEAESLFYQMSLASMNYNTNITEENVQNMNNAGDKMSAFLKEQSTLAQMYPLQEIQQLTVKLQLLMM<sep>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNAGDKMSAFLKEQSTLAQMYPLQEIQNLT<sep><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMFYNTNITEENVQNMNNAGDKMSAFLKELKTLAQ<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEIAKTFLDKFNHEAETLFYQSSLASMNYNTNITEENVQNMNNAGDKMSAFLKEQSTLAQMYPL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNREAEDLFYQSSLASMNYNTNITEENIQMMFNA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STLEEQAKTFLDKFNLEAEDLFYQSSLASMSYNTNITEENVQNMNNALAKSSAFLKELSTLAQMYP<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSMLTSMNYNTNITEENVQNMNNALDKMSAF<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHFAEDLFYQSSLASMNYNTNITEENLLNMNNAGDKMSAFLKTLSTLAQQYP<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEGAKTFLDKFNHEAEDLFYQSSYASMNYNTNITEENVQNMNNMGDKMSAFLKEQSTLAQMYPLQEIQNL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEDAKTFLDKFNHEAEALFYQSSLASMNYNTNITEENVFFMNNARDKMSAFLKEQSTLAQMYP<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMMNAGDFMIAFLKKQSTLAQMYP<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEDAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNLADDKMSAFLKRQSTLAQMY<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'ATIEEQAKTFLDKFNHVAEDLFYQSSLASMNYNTNITEENVQNMNNAGDKMSAFLKEQSTLAQMYPLQEII<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKMNHEARDLFYQSSLASMNYNTNITEENVQNMNNAGYKMSSFLKEQSLLALMYPL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASTNYNTNITEENVQNMNNAQDKMSAFLKEQITLAQMYPLQEIQNL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSAASMRYNTNITEENVQNMNNAGNKMSAFLKEQSTLAQMYPL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEENAKTFLDKFNHEAEDLFYQSRLASMNYNTNITEENVQNMNNAKDKMSAFLKEQSTLAQMYP<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSFFRMNYNTNITEENVLAMLNLALKLYLAGMLALA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLRKFNHEAEDLFYQSSLTSMNYNTNITEENQQNMNNAGDKMSAFLKEMLLLALSLALML<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFMHFAEDLFEQSSLASMNYNTNITEENVQNMNNAQDKMSAFLSFQSSLAQMYPLAEIAKKFTLA<sep><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNLELEDLFYQSMLASMNYNTNITEENVNLMVQMYGLLLMMELL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDYFNHEAEDLFYQSQLASMNYNTNITEENVQNMNNAGDKMSAFLKEVSTLAQMFP<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSMASMNYNTNITEENMQNMNNALDKMSAFLKEQSTLAQHM<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'SDIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFMDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNMAHDKMSAFLKEQSTLAQNYPLQEIQNLTVKL<sep><pad><pad><pad><pad><pad>', 'STIEEQAKFFLDKFNHEMFDLFYQSSLASMNYNTNITEENVQNMNNAGDKMSAFLKEQSTLAQMYPIQEIQNL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEEEDLFYQSSLAQMNMNTNITEENVQNMNDAGDKMSAFLKEQSTLAQMYPLQEIQMLTVKLLM<sep><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFRQMSLASMNYNTNITEENVQNMNNA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'SKIEEQAKTFLDKFNHEIEQLFYQSSLASMNYNTNITEENVQNMNNALDKMSAFLK<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STYEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNARDKMSAFLKEQSTLAQMYP<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFQDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNANDKMSAFLKEQSTLAQMYPLQEILNLTVKLQLQ<sep><pad><pad>', 'STIEEQAMTFLDKVNHEAEDLFYQSSLASMNYNTNITEENVQNMNNALDKMSAFLKEIKKLAQMK<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'SDIEEQAKTFLSKFNHEAEDSFYHSSLASMNYNTNITEENVQNMNKANDLMSAFLKEQSTMAQMM<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYMSSLAVMNYNTNITEENVQNMNNAGDKMMAFLKEQSTLAQMY<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNAGDKMSSFLKEQSTLAQMYPLMEIQN<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEAYAKTFLDRFNHEAEDLFYQESLAKMNYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMYP<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQFLNNALDLMSAFLKAQSTLAQM<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNMLAEDLFYQFSLAQMNYNTNITEENVQNMNNA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEVLFYQSSLASMNYNTNITEENVQNMNNALDKMSAFLKEASTLAQM<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENEQNMNNAHDKMSAFLKEQSTLAQMYP<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAELLFYQSSLASMNYNTNITEENVQNMNNALDKMSAFLKEQSNLAQ<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STYEEQAKTFLDKFNHEAEVLAYQSSLASMNYNTNITEENVQNMNNALDKMSAFLKEQTQLAQM<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'RTLEEQAKTFLDKFNHEAGDLFYQMSLASMFYNTNITEENVQNMNNA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSMLASMNYNTNITEENVYNMNHAVDKMSAFLEEQSTLAQMYPLQEIQMLMVKMSI<sep><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAQDFFYQSSLASMNYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLPEIQEP<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'SDIEEQAKTFLKKFNHEAEDTFYQSSLASMNYNTNITEENVQNMNRAGDKMSAFLKEQSTAAQMM<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'SSIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENLQNMNNA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYTSSLASMNYNTNITEENVQNMNNALDKMSAFLKEQSTAADMYLLTMLRLMVKLQLQALI<sep>', 'STIEEQAKTFLDKFNHEAEDVFYQSSLASMNYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMY<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEFLFYQSSLASMNYNTNITEENVQNMNNALVKMSAFLKGQSTLAQMYP<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'SNIEEQAKTFLDKFNHEAEDLFYQSLLLSMNYNTNITEENVQNMNNA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASKNYNTNITEENFQNMNNAGDKMIAFLKEQSTLAQMY<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNALDKMSAFLKEQSTLAQSY<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHIAEKLFYRSSLASMNYNTNITEENVQNMNNVLDKMSAFLKENSTLSLMY<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKFFLDKFNHFAEDLFYQSSLASMNYNTNITEENVQNMNNAIDKMSAFLKEYSTLAQMYPLQML<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFQQSSLASMNYNTNITEENVQNMNNVLDKMSAFLKETSTLAQMYP<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'SAIEEQAKTFLDKFNHEAEDTFEQSSLASMNYNTNITEENVQNMNNALDKMSAFLKRLSMLAQ<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFMDKFMHEAEDLFYQSSLASMNYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMYP<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQIMNNAGDKMSAFLKLQSTLAQMYP<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNAKDKMSAFLREQSTKAQMYP<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQKSLASMNYNTNITEENVQNMNNAADKMSAFLKEQSTLANMY<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDDFNHEAEDLFYQSSLQSMNYNTNITEENVQNMNNAMDKMSAFLKEQSTLAQMYP<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNNEAEDLFYQSSLASMNYNTNITEENQQNMNNALDKMSALLLLLLLK<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAETFLDKFNHEAEDLFYQKSLASMNYNTNITEENVQNMNNAGDKMSAFLKEISTLAQMIP<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNAGDKMSAFLKEQSTLAQMYPLQEIQNLTPLL<sep><pad><pad><pad><pad><pad>', 'STIEEIAKTFLDKFNHEAEDLFYQSSLASSNYNTNITEENMQNMNNALDKMSAFLKESSLLLYM<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNAFDKMSAFLKEQSTLAQMYPLQE<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDDFYQLSLAHMNYNTNITEENVQNMNNAGDKMSAFLKEQSTLAQMYPLQEIQNLSVKLQLMAL<sep>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLAIMNFNTNITEENVQNMNNAFDKMSAFLT<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIERQAKTFLDKFNHEAEDLFYQSLLASMNYNTNITEENVQNMNMA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQRSKKSMNYNTNITEENVANMQNALDKMSAFLKEQSVLAQMY<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAERLFYQSSLANMNYNTNITEENVQNMNNAGDKMSAFLKEQSTLAQMYP<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKQFLDKFNHEAEDGMRYSELASMNYNTNITEENVQNMNLALDKMSALLQMYLL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNAGDKMSAFLAEQSTLAQMYPL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLNSMNYNTNITEENVQNMNNALDKMSAFLKEIQALASMY<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIDEQAKTFLDKFNHEAEDLFYQSSLAMMNYNTNITEENVQNMNNASFKMSAFLKEQSTLAQMYPRMELLSVKLQLQALQ<sep><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQYSLASMNYNTNITEENVQNMNNFGYKMSAFLKEQSTLAQMYP<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASQNYNTNITEENVNNMNNALDKMSALLKEYLLQEIQFGTVKLQLQALL<sep><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYTSSLASMNYNTNITEENVNLMVNLTVKLQLEALM<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'LTIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNALDKMLLF<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAMDLFYQSSLASMNYNTNITEENVQNMNMA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNANDKMSAFLKEQSTLAQMYP<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEIAKTFLDNFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNAFDKMSAFLKEQSTQAQMYP<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKYQHEAEDLFYQSSLASMNYNTNITEENVQNMNNAMDKMSAFLKEKSTLAQMY<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'SYIEELAKTFGDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLQEIQR<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEALFYQSSLASMNYNTNITEENNQNMNNALDKMSAFLKEESTLAQMY<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSRLASMDYNTNITEENVKLSLDKMLALLKYLLLKELLALALQLAGYLLL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNAIDKMSAFLKEQSTLAQMYP<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNVEAEDLFYQSSLASMNYNTNITEENVQEMNNALDKMSAFLKEVSTLARMYP<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLVSMNYNTNITEENVQNMNNALDKMLAFLKEQMMLAQMLLLQSLLAQMSLAALL<sep><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNALDKMSAFLKEQSLLAQMYLLQEIQNLTVKLNG<sep><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNAEDKMSAFLKEQSTLAQMYP<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKIFLRKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNALDKMSAFLKS<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'LTIEEQAITLGDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNALDKMLAFLKEQSTLALMYPLQEIQNLALKL<sep><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAELLFLTSSLASMNYNTNITEENVQNMNNALDNMSAFLKEQSTLAQMVP<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDFFNHEAEDLFYQSAFASMNYNTNITEENVQNMN<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEIAKTFLDKFNHEAEDLFYQSSLADMNYNTNITEENTQNMNNALDKMSAFLKELSMLSNMNLLLSLLL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHIAEDLFYQSSLAMMNYNTNITEENVQNMNNAGDKMSAFLKEQSTLAQMY<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKRFLEKFNHEAEDLFYQSSLASMNYNTNITEENVQRMNNAL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENKELMNNLLLLTLALMA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLIKFNHLAEMLFYQSSLASMNYNTNITEENVQNMNNA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQFMNNAMDKMFAF<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STFEESAKTFLDKFNHEAEDLFYQSMLASMNYNTNITEENVQNMNNARDKMSYFLKEQSLLAQM<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNLAGDKMSAFLKEQSTLAQMY<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEQLFYQSSLASMNYNTNITEENYQNMNNAVDKMSAFLKEQKSLASMY<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNAYDYMSAFLKESSTLAQMYP<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQSMNNALDKMSAFLKEQR<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAMTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMF<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'DTIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNALDKMFKFLDMMLLR<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEDQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNALDKMSAFLKELST<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLQKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNAMDKMSAFLKEQSTLAQAY<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'QTIEEQAKNFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNALDKMSAFLNELSDLALMYMLRETEE<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHQAEDLFYQSSLASMNYNTNITEENVQNMNNALDKMSAFLKEQSYLAQM<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENMQNMNNALDKMSAFLKEQSARAQM<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFLHEAKDLFYQSSLASMNYNTNITEENVQNMNNAGDKMSAFLKM<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'SSIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLSKFNHEAEDLFYQSSLASQNYNTNITEENVQNMNNALDKMLALLLYLL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'MTIEEQAKTFLDKFNHEAEDLFYQSSNASMNYNTNITEENVQNMNKALDKMYAFLKEQSTLAQ<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLLKFNHEAEDLFYTSSLASMNYNTNITEENVENMNNAGDKMSAFLKEQSTLAQMYP<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'SEIEEQAKTFLDKFNHEAEILFYQSSLASMNYNTNITEENVQNMNEA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNVEAEDLFYMSSLASMNYNTNITEENVQNMNNALDKMSAFLKEQSTLAQ<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLIKFNAEAEDLFYQSSLASMNYNTNITEENVQFMNFA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNLNTNITEENVQNMNNMGDKMSAFLKEMSTLAQMYP<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEMEDLFYQSSLASMNYNTNITEENVQNMNNAGDKMHAFLKEQSQLAQML<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNAYDKMSAFLE<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNDEAEALFYQSSLRMMNYNTNITEENRQSMNNALDKMSAFLKEQSTLASMYP<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFGQSSLASMNYNTNITEENVANMNNAGAKMSAFLKEQSTLAQMYPLQEIQ<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAMTFLDKFNHEAEDLFYQSSLASMRYNTNITEENVQNMNNAVDKMSAFLQEQSTLAQMYP<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQANTFLDKFNHEAVDLFYQSHLASMQYNTNITEENQGAMDKMSAFLKEMSTLAQMYPLQEMQNLTVKLQLQALQ<sep><pad><pad><pad>', 'STIEEQAKTFMDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNAGDKMSAFLKEQSMLAQMYPLQLIQN<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKNFLDYFNHEAEDLFYQISLASMNYNTNITEENVQNMNGALDKMSAFLKEQS<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHTAEDLFYQSSLASMNYNTNITEENVQNMNMA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFKHEAEDLFYQSSLASMNYNTNITEENVQNMNNAGDKMSAFLKEISILAQ<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEMLFYQSSLAMMNYNTNITEENVQNMNNEGDKMSAFLKEQSTLAQMYP<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKMFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNAGDKMSAFLKQQSTLAQMYP<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAKKISYDKYLASMNYNTNITEENVQLMNNLLLKMYLLLY<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'SLIEEQAKTFLNKFNHEAEMLFYQSSLASMNYNTNITEENVQNMNNALDKMSAFLKEQSTLASQL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STLEEQAKTFLDKFNHEAEDLFYQSSLAAMNYNTNITEENVQNMNNK<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEKLFYFSSLASMNYNTNITEENVQ<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'SSIEEQAKTFLDKFNHEAEDIFYQRSLASMNYNTNITEENVQNMNNA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSMLTSMNYNTNITEENVQNMNNA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STLEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENMQNMNNA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEYQAKIFLDKFNHEAEDLFYQSSLASMHYNTNITEENVQNMNEALDKMSAFLKEQSTLAQHYPLQE<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEERAKTFLDKFNHEAEDLFYQSSLTSMNYNTNITEENVQNMNNAPDKMSAFLKEQSTLASMFPLQEIQNLEVLL<sep><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEELFYFSSLASMNYNTNITEENVQNMNNALDKMSAFLKEQST<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKHFLDKFNHEAEDVFYQSSLASMNYNTNITEENVQNMNNAGQKMVAFLKEQSTLAQMYPL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHLAEDLFYQSSLASMNYNTNITEENVQNMNNAGDKMSAFLKEQSTLAQMYP<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSMLASMNYNTNITEENVQNMNNA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSFLASMNYNTNITEENVQNMNNALDKMSAALKELGLLMSLLL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STLEEQAKDFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNAGDKMSAFLKEQSHLAQMYP<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDTFNHEAEDLFYQSSLASMNYNTNITEENVQMMLNA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLAKFNHEAEILFYQSSLASMNYNTNITEENVQNMNNAGDKMSAFLKEQSLLATMYPL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNLA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLVSMNYNTNITEENVQNMNNALDKMSMFLKEQSLLAQMLLLVLLLK<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEILFYQSSLASMNYNTNITEENVQNMNNAFEKMSAF<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLTKLNHEAETLLSQSSLASMNYNTNITEENVQNMNNAQDKMSAFLKELSTLAQMMP<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAKDLFFQSSLASMRYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMYPLLEKVSLLMVPPLM<sep><pad><pad>', 'STIEAQAKTFLDKFNHEAEDLAYQSSLASMNYNTNITEENVQNMNNAGDKMSAFLKSQSTLAQMYPL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTGLDDFNHEAEDLFYQSLLASMNYNTNITEENVQNMNLAFMKMAAFGKEQSTLAQMYPLQEIQNLTVKLQLQ<sep><pad><pad>', 'NTIEEQAKTFLDKFNHEAEDLFYQSSLASMKYNTNITEENFQNMNNA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNREAEDLFYQSSLASMNYNTNITEENVLNM<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNAGDKMSAFLKMQSTKAQMYP<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEYLFYQSSLASMNYNTNITEENVINMNNA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAETLFYQSSLASMNYNTNITEENVQNMQ<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'PTIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENF<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMQYNTNITEENVQNMNNAGDKMSAFLKEQSQLAQMYP<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNQEAETLMYQSSLASMRYNTNITEENVQNMNNALDKMSAFLKELMLT<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEARDLFYQSSLASMNYNTNITEENVQ<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STVEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNAGSKMSAFLKEQSTLAQEYPL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEQLFYQSSLASMNYNTNITEENYQNMNNA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAHDLFYQSMLASMNYNTNITEENVQNMNNA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEGAKTFLDKFNHEAEDLFYQSQLASMNYNTNITEENVQNM<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFMDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNLLDKMSAFLKEQSTLAQMY<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'TTIEEQAKTFLDEFNHEAEDLFYQSSLALMNYNTNITEENVQTMMNALDKMSAF<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDQFNHEAEDLLYQSSLASMNYNTNITEENVQNMNNAGDKMSAFLKEQSTLAQMYP<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHLAELLFYQSSLASMNYNTNITEENVQNMNNAGDKMSAFLKELLTL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDFFNHEAEDLFYQSSLASMNYNTNITEENQNIMDKMSAFLKEKMTLLQMYPLQEIQNLTVKLQLQALQ<sep><pad><pad><pad>', 'SELEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNMGDKMSAFLKEQSTLAQ<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNKEAEDYMYLSSLASMRYNTNITEENVQNMNNALDKMSAFLKETLLGLLPHLKMMLFLDLMY<sep><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFMHEAEDLFYQSSLASMNYNTNITEENVQLMNNA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKVFLDKFNHEAEDLFYQSSLAKMNYNTNITEENVL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHVAMDLFYQSSLASMNYNTNITEENVQNMNNAGDKMSAFLKEQSQLAQML<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STYEEQAKTFLDKFNHEAETIFYQSSLASMNYNTNITEENVQNMNNALDKMSAFLKEQSDRAQMYP<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'SFIEEQAKTFLDKFMHLAEDLFYQSSLASMNYNTNITEENVQNMNNA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNALDKMSAFLKEQST<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLAKFFKEAEDLFYQSSLASMNYNTNITEENVQNMNNAGDKMSAFMKELSLVTVKLQLQALL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'MTIEEQAKTFLDKFNHEAEDLFYFSSLASMNYNTNITEENVQNMNNALDKMSAFLKEQSTLAQT<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAMTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNIA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENV<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'LTIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNALDKMNALLKELDLMLLL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNKNTNITEENVLNMSNALDKMSAFLKEQSTLAQMY<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEEN<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLAYMNYNTNITEENVQNMVM<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDGFNHEAQDLFYMSSLASMNYNTNITEENVQMMNNA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'PTIEEQAKTFLDKFNHEAEDLFYQSMLASMNYNTNITEENKQNMENA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKEFLIMFQHEAEDLFYQSSLKSMNYNTNITEENVTNMNKA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'LTIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNALDKMLDQML<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLTSMNYNTNITEENVQNMNNAADKMSAFLKEQSTLAQM<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNFEAEDLFYTSSLASMEYNTNITEENVQNMNNALDKMSAFLKEVSTLAQMQP<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEKLFYQSSLASMNYNTNITEENN<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMTYNTNITEENVQNMN<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLEKFNHQAEKLFYQSSLASMNYNTNITEENVQNMNNA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'SFIEEQARTFLDKFNEEAEDLFYQSSLASMNYNTNITEENVQNMNLA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNAGDKMSAFLKEQSTLAQMYP<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSQLASMNYNTNITEENVQNMNNA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSMASMMYNTNITEENVTNMN<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYFSSLASMNYNTNITEENVQNMN<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEALILFYQSSLASMNYNTNITEENVQNMNNA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLAIMNYNTNITEENVQMMNNAADKMSAFLSE<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKQFLDKFNAEAEFLFYQSSLASMIYNTNITEENVNLMLDLLEMLDLFLIE<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSMLASMNYNTNITEENVMNMNNA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQHSLASMNYNTNITEENVQNM<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEMLFYQSSLASMNYNTNITEENVQNMNNA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STLEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQN<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLAMMNYNTNITEENVQNMHNA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAATFLDKFNHEAEGLFKSSSLARMNYNTNITEENVQNMNNAGVKMSAFLKELSTLAQMYP<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'ATIEEQAKTFLDKFNHEAEDLFVQSSLASMKYNTNITEENVQNMNNAMAKMSAFLKEQNTLAQMYP<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKHFLDKFNHEAEDLFYQSSLASMNYNTNITEEN<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQARTFLDKFNHEAEDLFAQSSLASMNYNTNITEENVQNMNNSLDKMSAFLK<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKEFLDAFNHEAEDLFYLSMLASMNLNTNITEENVQNMNNA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLMKFNHNAEDLFYQSYLASMNYNTNITEENVQNMNNA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STFEEQAKTFLDKFNHEAEDLFEQSSLAKMNYNTNITEENVQNMNNKGKKMSAFL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'DTIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNALDKMLALGKLTLLLKLLDLLFLMLLLLFP<sep><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKMNHEAEDLFYQSSLASMNYNTNITEENVQNMNNAYDKMSAFLKEQSTRAQM<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNLEAEDLVNQSDLALMNYNTNITEENVQNMNNALDKMSAFLLLFMLAGYLD<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHMAEDLFYQSLLASMNYNTNITEENVQEMNNA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSMASMNMNTNITEENVQNMNNAGDKMSAFLKYQSTLAQRM<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STFEEQAKTFLDKFNHEAEDLFYQYSLASMNYNTNITEENVQNMNNA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAMTFLDKVNHEAEDLFYQSSLASMNYNTNITEENVQNMNNALDKMSAFLKEQKLLAQM<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSKLASMNYNTNITEENVQNMNNA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEIQAKTFLYKFNHEAEDLFYQSSLASMYYNTNITEENVQN<cls>NNALDKMSAFLKEQSTLASMYPLQIIKNLTVKLQLQALP', 'STIEEQAKTFLDKFRHEAERLFYQSSLAYMNYNTNITEENVQNMNN<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'LTIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNALDKMLAL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKYFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMFNA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'VTIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNAGDKMSAFLKRSSTLAQMYP<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNNEAEDLFYQSSLASQNYNTNITEENVQNMNNALDKMSAFLLEQLDLLLMTKLGLLLLL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKHFLDKFNHEAEFLFYQSSLASMRYNTNITEENVQSMNF<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDYFNHEAEDLFYQSQLASMNYNTNITEENVQNMKNA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDYFNHEAEDMEYQSSLASMNMNTNITEENVQMMNNMGDQMKAFLKEQSTLAQMY<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQASLASMNYNTNITEENIQNMNNA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQMSLASMNYNTNITEENVNNMHNA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYAHSLASMNYNTNITEENVQNMNNAFDKMSAFLKETLLLM<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'SPIEEQAKTFLDKFNHEAEDLFYQSSLAMMNYNTNITEENVQNMNE<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHIAEDLFYQSSLASMNYNTNITEENIQNMNNA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAETFLDKFNSEAEDLFYQVSLASMNYNTNITEENVLAMMNNLLDKMNLFLDKFNHEAEDLFMMMLLPLANML<sep><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQYMNRA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'SHIEEQAKTFLDKFNHEAEDRFYQSSLASMNYNTNITEENVQFMNNAGKKMSAFLKEQSTLAQMY<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLIYQSSLASMNYNTNITEENVQNMNLALD<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'KTIEEQAKTFLDAFTHIAEDLFYQSSLASMNYNTNITEENVQNMNNALDKMKARLLKMQSSLAQMY<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQMMNNAYDKMSGFLKETL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEARDLFYQSSLASMNYNTNITEENVQNMNNA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFGHHAEDLFYQSMLASMNYNTNITEENVQNMNNAGDKMSAFLKMQSTLAQM<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMYQALDKMS<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDMFYQSLLASMNYNTNITEENVQNMNNA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFIHEAEDLFYQSSLASMNYNTNITEENKQNMNNA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDHFNHEAEDLFYQSSLASMNYNTNITEENVQNMN<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENLQNMLNA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHIAQDLFYQSSLASTNYNTNITEENV<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'SEIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNLEATDLFYQSSLTSMNYNTNITEENVQIMNN<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENMQNMNNAGRKMSAFLKELSA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'PTLEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'LTIEEQAKTFLDKFNHEAEFLFYQSSLASMNYNTNITEENVQNMNNA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAELLFKQSSLASMNYNTNITEENVQNMNNAGDKMSAFLKE<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'SGIEKQAKTFLDKFNHEAERLFYQSSLASMSYNTNITEENVQNMRNA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLKKFNHEAEDLFYQSSLASMNYNTNITEENVNNMNYA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLAMMNYNTNITEENVQNMNNA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEFLFYQSSLASMNYNTNITEENVQNMHNA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKQFLDKFNHEAEFLFYQSSLASMNYNTNITEENVQNMNNA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'ATIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNQA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAELLFYQSSLASMNYNTNITEENVQNMNNA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLNKFNHEAEYLFYQSSLASMNYNTNITEENVQNMLNA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNAGDKMSAFLDEQSTLAQMY<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLQKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNAGDKMSSFLLLMLGLNEIQNLTVKLQLTALM<sep><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSMASMNMNTNITEENVQNMNNAGDKMSAFLKYQSTLAQKL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFIHEAENLFYQSSLASMNRNTNITEENVKNMNNAGDKMSAFLKEVSTLAQMYP<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'MTIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENMQNMNNA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDFFMHEAEDLFYQSSLASMNYNTNITEENVQNMNNA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYISSLASMNYNTNITEENVQNMNNA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'VTIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNAGDKMSAFLKQSSTLAQMYP<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEMAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVFNMNNAGDKMSAFLKEQSTLAQ<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNKEAEDMMYQSSLASMRYNTNITEENVHNMNNALDKMSAFLKEQSLLLLMYNLPNMLYL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEMQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNALDKMSAF<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDSFNHEAFDLFYQSSLAYMNYNTNITEENVQNMNNAGDKMSAFLKEQSTLALMYPELLYAKMY<sep><pad><pad><pad><pad><pad><pad><pad><pad>', 'SKIEEQARTFLQKFNHLAEDLFYQSSLASMNYNTNITEENVQNMFNA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQKSLASMNYNTNITEENVTNMNKA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFEQSMLASMNFNTNITEENVQNMNNAGDKMSAFLKEQSTLAQM<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLTSMNYNTNITEENVQNMNNAGSKMSAFLKEQSTLAQM<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHQAEILFYQSSLASMNYNTNITEENVQNMNNA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEANDLFYQSSLASMRYNTNITEENVQNMNNA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNIEAEDLFYQSSLASMHYNTNITEENVQNMMNA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKFFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNY<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIELQAKTFLRKFNHEAEHLFYQSSLAIMNYNTNITEENVQNMNNA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSYLASMNYNTNITEENVQNMNNA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNAKDKMAAF<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLKKFNHEASDLFYQSSLASMNYNTNITEENVQNMNNA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEYAKYFLDKFNHEAEDLFYQSSLASMYYNTNITEENVQNMNN<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNK<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNLNTNITEENVQNMNRA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEYAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKEFLMKYNHEAEDLFYQSSLASMNYNTNITEENVQNMNNA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'SNIEEQAKTFLDLFNMEAEDLFYQSSLASMNYNTNITEENVQNMNNA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'DTIEEQAKTFLDKFNHEAEDLFYQYSLASMEYNTNITEENVKNMNNA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFMHMAEDLFYTSSLASMNYNTNITEENVQNMNNA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNIAGDKMSAFLKEQSTLAQM<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'SNIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENLQNYNNAKDKMSAFGKEQSTLAKMY<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDMFYQSSLASMNYNTNITEENVQNMN<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKMFLDKFFHEATDLFYQSSLASMNYNTNITEENVQNMNNA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNDEAEDLFYQSSLASQNYNTNITEENVQNMNNALDKMSALLL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLMKFNAEAEDLFYQSSLASMNYNTNITEENVDNMNNA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'DTIEEQAKTFLDMFNHEAEDLFYQSSLLSMNYNTNITEENVQNMNNA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQSKTFLDKFNHEAEDLFYQSSLAFMNYNTNITEENVK<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKRFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDRFNHEAADLFYQSLLASMNLNTNITEENVQNMMNAGDKMSAF<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLYKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDVFNMEAEDLFYQSMLASMNANTNITEENVQNMNNAGDKMFAFLKEQSTLAQMYP<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDVFNHEAENLAYQSSLASMNYNTNITEENVKKMNRA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLMMQSSLASMNYNTNITEENVQNMNNA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENNQNMNNATDKMSAFLK<cls><sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'RTIEEQAKTFLDKFNHEAEDLLEQSSIASMNYNTNITEENVQNMNNAGDKMSKFLKE<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSGASMNYNTNITEENVQNMNNALDKMSAFLKEQSTLAQMYPP<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLYSMNYNTNITEENVQ<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEARDLFYQSSLASMNYNTNITEENVQPMNNA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQALTFLDKFNHEAEDLFNQSSLASMNYNTNITEENMMNMNNA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHQAEDLFYQSSLASMNYNTNITEENVQNMNNARDKMSAFLKEQSY<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQVSLASMNYNTNITEENVQNMNNA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHTAEDLMYQSSLASMNYNTNITEENVQNMNNA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'DDFEFQALTFLDKFNHEAEDMFYQSSLASMNYNTNITEENVQNMNNAQDKMSAFLRM<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEATDLFYQSSLASMNYNTNITEENVQNMNNA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHQAEDRFYQSSLASMNYNTNITEENVQNMNNAGDKMSAFLKEQSTLAQMYPLQEIPLPMVKL<sep><pad><pad><pad><pad><pad>', 'GTIEEQALTFLDKFNNEAEDLFYQSSLASSNYNTNITEENVQSMNNAGDKMSAFLKYQSTLALMYP<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STMEEQAKTFLDKFNHEAETLMYQSSLASMNYNTNITEENVQNMNNA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STMEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMENA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFFQSSLAHMNYNTNITEENVQNMNHA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STFEEQAKTFLDKFNHEATDLFYQSSLASMNYNTNITEENVQNMNNALDKMSSF<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFNQSSLASMNYNTNITEENVQKMNNA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDFFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAELLFYESSLASMNYNTNITEENVQNMNDA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKAFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMRNA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEIAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENTQNMNNA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEGLFYQSSLASMNYNTNITEENVQKMNNA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKAFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDIFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'SMIEEQAKTFLDKFNHEAEDLFYQSSLAMMNYNTNITEENLQNMNNA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYLSSEASMNYNTNITEENVINMNN<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENISLMLEMYNLLEI<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEELAKTFLDKFNHEAEPLFYQSSLASMVYNTNITEENVQNMNNA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAGDLFYQSSLASMMFNTNITEENVQNMNNA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNMEAEDLFYQSSLFSMNYNTNITEENVQNMNNA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMENAMD<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQRSLASMNYNTNITEENVQNMNNA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEERAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNN<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLAFMNYNTNITEENVQNMNNAHDKMSAFLKELDQ<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNDEAEDLMHQSSLASMNYNTNITEENVQFMNY<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIETMAKTFLDMFNHEAEDLMYQSSLASMNYNTNITEENVQNMNNA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEMAKTFLDKFNHEAEDLFYQSYLASMNYNTNITEENVQNMNNA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEVAKTFLAKFNHEAEPLFYQSYLASMNYNTNITEENVQNMNNALDKMSSFGKERSALLKMY<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFQQSSLASMNYNTNITEENVQNMNNA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAETLFYQSSLASMNYNTNITEENVQNMNNAKD<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLRYQSYLASMNYNTNITEENVQNMNNA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLLKFNHEAEDLSYQSKLASMNYNTNITEENVQNMNNALD<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKAFLQKFNHEAEDLFYQLSLVSMNYNTNITEENVQNMNNAGGKMSAFLKEQSTLA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDNFNHEAEMLFYQSSLASMNYNTNITEENVQNMINAGDKMSAF<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEHQAKTFLDKFNHEAEMLMYQSSLASMNYNTNITEENVQNMNNA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQKGNNAGDKMAAFLKEQSTLAQMIPLQEISN<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKLFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNN<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'NTIEEQAKTFLDKFNHEAEDLGYQSSLASMAYNTNITEENFQLMNNAGDKMSAF<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNAYDK<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVRKMNN<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDVFNHEAEDLSYQSKLASMNYNTNITEENVQRMNEA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNAGDKMSAFLREGSTLAQMYP<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSRLTSMNYNTNITEENVQNKNNA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMEYNTNITEENVQNMNNA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQHMKNAGDKMIAFLKEQST<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNAGDKMSAFLKEQSTLAQL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAVDLFYQSSLASMNYNTNITEENVQNMNA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDVYYQSSLASMNYNTNITEENMQNMGL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNN<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDYFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNALDKMSAFGKLSLLLAKMSLLAELALLT<sep><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAQTFLDKFRQEAEDLFYQSSLASMNYNTNITEENVQNMNNA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVGNMNNA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'QTIEEQAKTFLDEFNHEAEDLFYQSSLASMNYNTNITEENVQNMNN<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQARTFLDKFNHDAEDLFYLSSLLSMNYNTNITEENVQNMNN<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'ETIEEQAKTFLDKFNHEAEDLGYQSSLAHMNYNTNITEENVQ<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'SFIEEQAKTFLDKFNHEGEDLFYQSSLASMNYNTNITEENVQNMNNAGDKMSAFLK<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNAGDKMSAFLIE<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLERMNYNTNITEENVNNMNNASDKMSAFL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEQQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAQDLFYQSSQASMNYNTNITEENVQNMNNA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNARDKMS<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKNFLDKFNHEAEDLFYQSSLESMNYNTNITEENV<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYMSSLASMNYNTNITEENVQMMNN<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STLEHQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENSQNMNNAF<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEHLLYQSSLASANYNTNITEENNQNMNNA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEIQAKTFLDKFNHEMEDLFYQSSLASMNYNTNITEENVQNMNNAGDKMSAFLKEQSTLAQMYPLIEMQNL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNEEAEDLFYQSSLASMNYNTNITEENVQNMNNA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'SYIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNAGDKMSAFLKEQLTMALMM<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'SMIEEQAKTFLDKFNHEAEDLFYQSSLAQMMYNTNITEENVQNMNNA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'DTIEEQALTFLDLFNHEAEDLFYQSSLASMNYNTNITEENVQNMNN<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEIAKLFLDKFNHEAEDLFYQSSLSSMNYNTNITEENSQNMNNAGDKMSAFLKETKLTLLL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHGANDLFYQSSFASMNYNTNITEENVQNMNNA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'SLIEEQAKTFLDRFNHEAEDLFRQSSLASMNYNTNITEENVQNMNNA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLMSMNMNTNITEENVQNMNNASDKMSAFLKEQSYMAQM<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNDEAEDLFEQISLASMNYNTNITEENVQYMNL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVELMKKLADMVKE<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQMMNN<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAATFLDIFNHEAEDLFYQSSLASMNYNTNITEENVQNMQNA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQSMNNA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNMEAEILFYQLSLAQMNYNTNITEENVQNMNN<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHDAEDLFYQSSLASMNYNTNITEENVQNMNNAG<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'LTIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNALDKMLALLKDLLLM<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLVSMNYNTNITEENVQMMNAA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLEKFNHEAEDLFYQSSLASMNYNTNITEENVNDMNDK<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMFYNTNITEENVQNMNNE<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'DTIEEQAKTFLDNFNHEAEDLFYQSSLASMNYNTNITEENVQNMNN<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFDLSSLASMTYNTNITEENVQNMNNA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STDEEQAKTFLDKFNHEAEDLFYQSQIRSMNYNTNITEENVQ<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEFLFYQSSLASMNYNTNITEENVQNMNNAVDKMSAFLKEMST<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLVKFNHEAEDLFYQSILASMYYNTNITEENVQNMNNAGDKMSRFLKEQSTLAQM<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEDAKTFLDKFNHEAADLFYQSSLASMNYNTNITEENVQNMNN<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'SDIEEQTKTFLDKFNHLAEDLFYQSSYASMNYNTNITEENVQNMSNA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKFFLDKFNHEAEDLMYQSSLASMNKNTNITEENVQNMNNA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAMTFLDKFNHMAEDLFYQDSLASMNYNTNITEENVQNMINA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'SEIEEQAATFLDKFNEEAEDLFYQSSLASMNYNTNITEENVMNMNNAGNKMSAFLKEQST<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNQ<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'SNIEEQAKTFLDKFNHEAEDLNYQSSLASMNYNTNITEENVLNMNNASMKMSNFLKEQSTLDQQM<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'ITIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNLNNAMD<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEVLGYQSSLASMNYNTNITEENVQNMNNA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHMAEDLFYQSSLAVDNYNTNITEENVQNMNNA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'DTIEEQAKTFLDKFNDEAEDLFYQSSLASMNYNTNITEENVQNMNN<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAETFLDKFNSEAEDLFYQVSLASMNYNTNITEENVLAMMNPLALMYPLQEIQNLTVKLQLQALP<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDMFYESSLASMNYNTNITEENVQNMNNA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNAQD<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNE<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'LTIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVDDMNNLLDKMYLL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAQDTFYQSSLASMNYNTNITEENVQNMNNA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIETDAKTMLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDRFYQSSLASMVYNTNITEENVQNMNNA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDTFNHEAEDLFYQLVLNIMNYNTNITEENVTNMN<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STVEEQAKGFLDMFNHRAEDLFYQSSLASMNYNTNITEENVQNMNNMADKMSAFLKEIST<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'LTIEEQAKTYLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNALDKMLALLKELLLL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEMEDLFYQSSLASMNYNTNITEENVANMNNA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQFSLASMNYNTNITEENVLAMNYLLDVDITVEITLALMYALLTVQELLTLMVKLQLNA<sep><pad>', 'STIEEQAKTFLDKFNDEAEDLFYQSMLASMNYNTNITEENVQNMNNALDKMTHMKILTGMLYHMSIGALMYFLGLMQ<sep><pad><pad><pad><pad><pad>', 'STIEENAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNAGDKMSAFLKEIST<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEFEDLFYQSMLASMNYNTNITEENVQNMNNAGDQMSA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'LTIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNALDKMLALLLPGDQMLLDLMLYLLL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFMDKFNHEAEDLFFQSSLASMNYNTNITEENVQNMDNA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'SNIEEQAKTFLDKFRHEAEDLFYSSSLARMNYNTNITEENVQNMNNAPDKMSVFLKEQSTLAQMYP<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQTSLASMNYNTNITEENVQNMNN<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLSKFNHEAFDLFYQSSLAIMNYNTNITEENVQNMNNALDKMFLLSLLYDMYQDLYEL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLAYQSSLASMNYNTNITEENVNDMLDLASMSSGLAQMSLLAEMY<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEFQAKTFLDKFNHEAEMGFYQSSLASMHYNTNITEENVQNMNNA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQFSLASMNYNTNITEENVSAMLYGLDLINAVLGTGLTLYDIQELTL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENVQNMNNALDKMSAFLKEQSTLRQMYLLSHLLLM<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'STIEEQAKTFLDKFNHEAEHLFYQSSLQSDNYNTNITEENVQNMNNALDLMGALLKMPLLL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'ETIEEQAKTFLDKFNHEAEDLFYQSSLASMNYNTNITEENQNLNITLAGMNLAYMLPLALMLAL<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', 'RTIEEQAKTFLDKFNHEAEDDFYQSSLISMNYNTNITEENVQRMNRA<sep><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>']\n" ], [ "gen250k_df_dropped = gen250k_df_dropped_nocon.drop(indices_to_drop)\nprint(len(gen250k_df_dropped))", "256920\n" ], [ "gen250k_df_dropped", "_____no_output_____" ] ], [ [ "# Filter out sequences that are repeat or in training set", "_____no_output_____" ] ], [ [ "input_data_file = 'data/gen_train_data/top_half_ddG/train_ddG.pkl'\ninput_data_df = pd.read_pickle(input_data_file)", "_____no_output_____" ], [ "input_data_df.iloc[0]['MT_seq']", "_____no_output_____" ], [ "input_data_df.iloc[0]['MT_seq'] in input_data_df['MT_seq']", "_____no_output_____" ], [ "input_data_df.iloc[0]['MT_seq'] in input_data_df['MT_seq'].tolist()", "_____no_output_____" ], [ "train_seq_list = input_data_df['MT_seq'].tolist()", "_____no_output_____" ], [ "train_seq_list", "_____no_output_____" ], [ "len(train_seq_list)", "_____no_output_____" ] ], [ [ "Filter out those that are repeat", "_____no_output_____" ] ], [ [ "gen250k_df_dropped_norepeat = gen250k_df_dropped[gen250k_df_dropped['repeated_gen'] == False]", "_____no_output_____" ], [ "gen250k_df_dropped_norepeat", "_____no_output_____" ], [ "gen250k_df_dropped_norepeat.iloc[0]", "_____no_output_____" ] ], [ [ "Filter out those from the training set", "_____no_output_____" ] ], [ [ "gen250k_df_filtered = gen250k_df_dropped_norepeat[gen250k_df_dropped_norepeat['in_train_data_gen'] == False]\ngen250k_df_filtered", "_____no_output_____" ], [ "gen250k_df_filtered.iloc[0]", "_____no_output_____" ], [ "np.sum(gen250k_df_filtered['repeated_gen'])", "_____no_output_____" ], [ "np.sum(gen250k_df_filtered['in_train_data_gen'])", "_____no_output_____" ], [ "topK_saved = 10000\n\ngen250k_df_filtered = gen250k_df_filtered[:250000]\n\ngen250k_df_filtered = gen250k_df_filtered.sort_values(by='latent_head_pred', ascending=True)\n# gen250k_df_filtered = gen250k_df_filtered.sort_values(by='disc_pred', ascending=True)\ngen250k_df_filtered_topK = gen250k_df_filtered.iloc[:topK_saved]", "_____no_output_____" ], [ "filtered_LHscored_gen250k_top10K_tsv_name = 'generated_seqs/congen/clspool_waeDeterencStart84kstep1024dim_cyccon1Start84kstep_lre-04_24ep_gen_perturb-080/unique250K_clspool_waeDeterencStart84kstep1024dim_cyccon1Start84kstep_lre-04_24ep_gen_perturb-080-congen_seqs260000_top10Klatentheadfiltered.tsv'\n# filtered_LHscored_gen250k_top10K_tsv_name = 'generated_seqs/congen/clspool_sephead512dim_lre-04_gen_perturb-1/unique250K_clspool_sephead512dim_lre-04_gen_perturb-1-congen_seqs260000_top10Klatentheadfiltered.tsv'", "_____no_output_____" ], [ "disc_latenthead_cor = spearmanr(gen250k_df_filtered_topK['disc_pred'], gen250k_df_filtered_topK['latent_head_pred'])\nprint(\"disc_latenthead_cor: \", disc_latenthead_cor)", "disc_latenthead_cor: -0.2401010836368954\n" ], [ "gen250k_df_filtered_sorted_disc = gen250k_df_filtered.sort_values(by='disc_pred', ascending=True)\ngen250k_df_filtered_sorted_disc_topK = gen250k_df_filtered_sorted_disc.iloc[:topK_saved]", "_____no_output_____" ], [ "filtered_Dscored_gen250k_top10K_tsv_name = 'generated_seqs/congen/clspool_waeDeterencStart84kstep1024dim_cyccon1Start84kstep_lre-04_24ep_gen_perturb-080/unique250K_clspool_waeDeterencStart84kstep1024dim_cyccon1Start84kstep_lre-04_24ep_gen_perturb-080-congen_seqs260000_top10Kdiscfiltered.tsv'\n# filtered_Dscored_gen250k_top10K_tsv_name = 'generated_seqs/congen/clspool_sephead512dim_lre-04_gen_perturb-1/unique250K_clspool_sephead512dim_lre-04_gen_perturb-1-congen_seqs260000_top10Kdiscfiltered.tsv'", "_____no_output_____" ], [ "all250K_disc_latenthead_cor = spearmanr(gen250k_df_filtered['disc_pred'], gen250k_df_filtered['latent_head_pred'])\nprint(\"all250K_disc_latenthead_cor: \", all250K_disc_latenthead_cor)", "all250K_disc_latenthead_cor: 0.9027374908587169\n" ] ], [ [ "# Save top 10K seqs for FoldX Evaluation", "_____no_output_____" ] ], [ [ "if before_foldx:\n gen250k_df_filtered_topK.to_csv(filtered_LHscored_gen250k_top10K_tsv_name, sep=\"\\t\", index=False)\n gen250k_df_filtered_sorted_disc_topK.to_csv(filtered_Dscored_gen250k_top10K_tsv_name, sep=\"\\t\", index=False)", "_____no_output_____" ], [ "len(gen250k_df_filtered_topK)", "_____no_output_____" ], [ "df_toplot = gen250k_df_filtered", "_____no_output_____" ] ], [ [ "# Analyze hamming distance", "_____no_output_____" ] ], [ [ "# Compute hamming distance between MT and WT\ndef hamming_dist(str1, str2):\n i = 0\n count = 0\n \n while(i < len(str1)):\n if(str1[i] != str2[i]):\n count += 1\n i += 1\n return count", "_____no_output_____" ], [ "hamming_dist_list = []", "_____no_output_____" ], [ "wt_seq = df_toplot.iloc[0]['WT_seq']", "_____no_output_____" ], [ "for index, row in df_toplot.iterrows():\n gen_seq = row['MT_seq']\n h_dist = hamming_dist(gen_seq, wt_seq)\n hamming_dist_list.append(h_dist)", "_____no_output_____" ], [ "print(\"Hamming distance stats\")\nprint(\"max: \", np.max(hamming_dist_list))\nprint(\"min: \", np.min(hamming_dist_list))\nprint(\"median: \", np.median(hamming_dist_list))\nprint(\"mean: \", np.mean(hamming_dist_list))\nprint(\"std: \", np.std(hamming_dist_list))", "Hamming distance stats\nmax: 47\nmin: 2\nmedian: 7.0\nmean: 7.156048\n" ] ], [ [ "hamming distance for generator training data", "_____no_output_____" ] ], [ [ "gen_train_data = 'data/gen_train_data/top_half_ddG/train_ddG.pkl'", "_____no_output_____" ], [ "gen_train_df = pd.read_pickle(gen_train_data)\nwt_seq = gen_train_df.iloc[0]['WT_seq']\n\ngen_train_hamming_dist_list = []\nfor index, row in gen_train_df.iterrows():\n train_seq = row['MT_seq']\n h_dist = hamming_dist(train_seq, wt_seq)\n gen_train_hamming_dist_list.append(h_dist)", "_____no_output_____" ], [ "plt.figure(figsize=(8,6))\nplt.hist(hamming_dist_list, density=True, label='generated', bins=[i for i in range(46)], alpha=0.4)\n# plt.xlabel(\"Hamming Distance\", size=14)\n# plt.ylabel(\"Count\", size=14)\n# plt.title(\"Hamming Distance from WT seq\")\n\nplt.hist(gen_train_hamming_dist_list, density=True, label='train_data', bins=[i for i in range(46)], alpha=0.4)\nplt.xlabel(\"Hamming Distance\", size=14)\nplt.ylabel(\"Density\", size=14)\nplt.title(\"Top 5% Generator\")\nplt.legend(loc='upper left')", "_____no_output_____" ] ], [ [ "# Sample for E[min] FoldX Computation", "_____no_output_____" ] ], [ [ "gen250k_df_filtered", "_____no_output_____" ], [ "# Get topk seqs\nnum_rounds = 100 # N\nround_pool_size = 10000\ntopk = 10 # K\n\nround_topk = {}\ncols_to_sort = ['latent_head_pred']\n# cols_to_sort = ['disc_pred', 'latent_head_pred']\n\nfoldx_df = None\nin_count = 0 \nfor col_to_sort in cols_to_sort:\n print(\"col_to_sort: \", col_to_sort)\n round_topk[col_to_sort] = {}\n for round_ind in range(num_rounds):\n sampled_rows = gen250k_df_filtered.sample(n=round_pool_size)\n sorted_sampled_rows = sampled_rows.sort_values(by=col_to_sort, ascending=True)[:topk]\n topk_rows = sorted_sampled_rows[:topk]\n round_topk[col_to_sort][round_ind] = topk_rows\n \n for round_ind in round_topk[col_to_sort]:\n round_topk_df = round_topk[col_to_sort][round_ind]\n if foldx_df is None:\n foldx_df = round_topk_df\n else:\n all_mt = foldx_df['MT_seq'].tolist()\n\n for row_ind, row in round_topk_df.iterrows():\n if row['MT_seq'] not in all_mt:\n foldx_df = foldx_df.append(row)\n else:\n in_count += 1\n \n print(\"len(foldx_df)+in_count: \", len(foldx_df)+in_count)", "col_to_sort: latent_head_pred\nlen(foldx_df)+in_count: 1000\n" ], [ "foldx_df", "_____no_output_____" ], [ "in_count", "_____no_output_____" ] ], [ [ "# save E[min] seqs to do FoldX¶", "_____no_output_____" ] ], [ [ "seqsforEmin_dict_name = 'generated_seqs/congen/clspool_waeDeterencStart84kstep1024dim_cyccon1Start84kstep_lre-04_24ep_gen_perturb-080/unique250K_clspool_waeDeterencStart84kstep1024dim_cyccon1Start84kstep_lre-04_24ep_gen_perturb-080-congen_seqsforEmin_df.pkl'\n# seqsforEmin_dict_name = 'generated_seqs/congen/clspool_sephead512dim_lre-04_gen_perturb-1/unique250K_clspool_sephead512dim_lre-04_gen_perturb-1-congen_seqsforEmin_df.pkl'\n\nif before_foldx:\n with open(seqsforEmin_dict_name, 'wb') as f:\n pickle.dump(round_topk, f)\n\n# with open(seqsforEmin_dict_name, 'rb') as f:\n# b = pickle.load(f)", "_____no_output_____" ], [ "seqsforEmin_tsv_name = 'generated_seqs/congen/clspool_waeDeterencStart84kstep1024dim_cyccon1Start84kstep_lre-04_24ep_gen_perturb-080/unique250K_clspool_waeDeterencStart84kstep1024dim_cyccon1Start84kstep_lre-04_24ep_gen_perturb-080-congen_seqsforEmin_foldx.tsv'\n# seqsforEmin_tsv_name = 'generated_seqs/congen/clspool_sephead512dim_lre-04_gen_perturb-1/unique250K_clspool_sephead512dim_lre-04_gen_perturb-1-congen_seqsforEmin_foldx.tsv'\n\nif before_foldx:\n foldx_df.to_csv(seqsforEmin_tsv_name, sep=\"\\t\", index=False)", "_____no_output_____" ] ], [ [ "# <<===== After Foldx Computation =====>>", "_____no_output_____" ] ], [ [ "# foldx_results_name = \"path_to_foldx_results\"\n# # foldx_results_name = \"foldx_sim_results/tophalf-basegen_top10K-Dscore_250Kgen/results_full.tsv\"\n# foldx_results_df = pd.read_table(foldx_results_name)\n\n", "_____no_output_____" ], [ "foldx_results_names = [\n \"foldx_sim_results/unique250K_clspool_waeDeterencStart84kstep1024dim_cyccon1Start84kstep_lre-04_24ep_gen_perturb-080-congen_seqs260000_top10Klatentheadfiltered/results_full.tsv\", \n# \"foldx_sim_results/unique250K_clspool_waeDeterencStart84kstep1024dim_cyccon1Start84kstep_lre-04_24ep_gen_perturb-080-congen_seqs260000_top10Kdiscfiltered/results_full.tsv\",\n ]\n# foldx_results_name = \"foldx_sim_results/tophalf-basegen_top10K-Dscore_250Kgen/results_full.tsv\"\nfoldx_results_df = None\nfor foldx_results_name in foldx_results_names:\n if foldx_results_df is None:\n foldx_results_df = pd.read_table(foldx_results_name)\n else:\n next_foldx_results_df = pd.read_table(foldx_results_name)\n foldx_results_df = foldx_results_df.append(next_foldx_results_df, ignore_index=True)\n\n", "_____no_output_____" ], [ "foldx_results_df", "_____no_output_____" ], [ "# Compute Emin from foldx values\nrows_to_patch = None\nEmin_results_dict = {}\nmean_disc_ddG_cor_results_dict = {}\nmean_latent_ddG_cor_results_dict = {}\n\nfor col_to_sort in round_topk:\n print(col_to_sort)\n current_score_round_topk = round_topk[col_to_sort]\n \n round_min_list = []\n round_disc_ddG_cor_list = []\n round_latent_ddG_cor_list = []\n \n for round_ind in current_score_round_topk:\n round_topk_df = current_score_round_topk[round_ind]\n \n round_ddG = []\n round_disc_pred = []\n round_latent_head_pred = []\n for row_ind, row in round_topk_df.iterrows():\n row_seq = row['MT_seq']\n matched_row = foldx_results_df.loc[foldx_results_df['MT_seq'] == row_seq]\n if len(matched_row) != 1 :\n# print(\"matched_row: \", matched_row)\n if len(matched_row) == 0 :\n if rows_to_patch is None:\n rows_to_patch = row\n else:\n rows_to_patch.append(row)\n# raise\n else:\n# round_ddG.append(matched_row.iloc[0]['ddG'].to_numpy()[0])\n# round_disc_pred.append(matched_row.iloc[0]['disc_pred'].to_numpy()[0])\n# round_latent_head_pred.append(matched_row.iloc[0]['latent_head_pred'].to_numpy()[0])\n round_ddG.append(matched_row.iloc[0]['ddG'])\n round_disc_pred.append(matched_row.iloc[0]['disc_pred'])\n round_latent_head_pred.append(matched_row.iloc[0]['latent_head_pred'])\n else: \n# print(\"matched_row['ddG'] to_numpy: \", matched_row['ddG'].to_numpy())\n# print(\"matched_row['ddG'] to_numpy 0: \", matched_row['ddG'].to_numpy()[0])\n# print(\"matched_row['ddG']: \", matched_row['ddG'])\n# print(\"matched_row['disc_pred']: \", matched_row['disc_pred'])\n round_ddG.append(matched_row['ddG'].to_numpy()[0]) # ! changed to ddG\n round_disc_pred.append(matched_row['disc_pred'].to_numpy()[0])\n round_latent_head_pred.append(matched_row['latent_head_pred'].to_numpy()[0])\n# round_ddG.append(matched_row['ddG']) # ! changed to ddG\n# round_disc_pred.append(matched_row['disc_pred'])\n# round_latent_head_pred.append(matched_row['latent_head_pred'])\n \n# print(\"len(round_disc_pred): \", len(round_disc_pred))\n# print(\"len(round_ddG): \", len(round_ddG))\n# print(\"round_disc_pred: \", round_disc_pred)\n# print(\"round_ddG: \", round_ddG)\n# print(\"round_ddG.to_numpy(): \", round_ddG.to_numpy())\n round_disc_ddG_cor = spearmanr(round_disc_pred, round_ddG)\n round_disc_ddG_cor_list.append(round_disc_ddG_cor)\n round_latent_ddG_cor = spearmanr(round_latent_head_pred, round_ddG)\n round_latent_ddG_cor_list.append(round_latent_ddG_cor)\n \n round_min = np.min(round_ddG)\n# print(\"round_ddG: \", round_ddG)\n# print(\"round_min: \", round_min)\n round_min_list.append(round_min)\n \n Emin = np.mean(round_min_list)\n# print(\"round_min_list: \", round_min_list)\n# print(\"Emin: \", Emin)\n mean_disc_ddG_cor = np.mean(round_disc_ddG_cor_list)\n mean_latent_ddG_cor = np.mean(round_latent_ddG_cor_list)\n \n Emin_results_dict[col_to_sort] = Emin\n mean_disc_ddG_cor_results_dict[col_to_sort] = mean_disc_ddG_cor\n mean_latent_ddG_cor_results_dict[col_to_sort] = mean_latent_ddG_cor", "latent_head_pred\n" ], [ "print(rows_to_patch)", "None\n" ] ], [ [ "# Save Emin Results", "_____no_output_____" ] ], [ [ "Emin_results_name = 'generated_seqs/congen/clspool_waeDeterencStart84kstep1024dim_cyccon1Start84kstep_lre-04_24ep_gen_perturb-080/unique250K_clspool_waeDeterencStart84kstep1024dim_cyccon1Start84kstep_lre-04_24ep_gen_perturb-080-congen_seqsforEmin_results.txt'\n# Emin_results_name = 'generated_seqs/baseline_gen/Emin_results/tophalf-basegen_seqsforEmin_results.txt'", "_____no_output_____" ], [ "with open(Emin_results_name, \"w\") as writer:\n writer.write(\"***** E[min] results *****\\n\")\n writer.write(\"seqsforEmin_dict_name: {}\\n\".format(seqsforEmin_dict_name))\n for key in sorted(Emin_results_dict.keys()):\n writer.write(\"sorted by %s = %s\\n\" % (key, str(Emin_results_dict[key])))\n \n \n writer.write(\"***** mean_disc_ddG_cor results *****\\n\")\n for key in sorted(mean_disc_ddG_cor_results_dict.keys()):\n writer.write(\"sorted by %s = %s\\n\" % (key, str(mean_disc_ddG_cor_results_dict[key])))\n \n writer.write(\"***** mean_latent_ddG_cor results *****\\n\")\n for key in sorted(mean_latent_ddG_cor_results_dict.keys()):\n writer.write(\"sorted by %s = %s\\n\" % (key, str(mean_latent_ddG_cor_results_dict[key])))", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ] ]
cb382c9836f36be67299dc0aadb76e8d6a33b40e
170,168
ipynb
Jupyter Notebook
Calibration_MJW.ipynb
mattwilkin/StrainRecon
5c8e0cf6792e1d58db764f6734b5028adc133b8b
[ "BSD-3-Clause" ]
null
null
null
Calibration_MJW.ipynb
mattwilkin/StrainRecon
5c8e0cf6792e1d58db764f6734b5028adc133b8b
[ "BSD-3-Clause" ]
null
null
null
Calibration_MJW.ipynb
mattwilkin/StrainRecon
5c8e0cf6792e1d58db764f6734b5028adc133b8b
[ "BSD-3-Clause" ]
null
null
null
187.20352
64,164
0.881241
[ [ [ "from util.MicFileTool import MicFile\nimport util.Simulation as Gsim\nimport util.RotRep as Rot\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom scipy import ndimage\nfrom scipy import optimize\nimport json\nimport os", "_____no_output_____" ] ], [ [ "# Extract the windows around the Bragg Peaks from a small grain", "_____no_output_____" ] ], [ [ "a=MicFile(\"AuxData/Ti7_WithHRM_2ndLoad_z1_.mic.LBFS\")\n", "-0.556875 -0.00974279 0 2 7 1 281.746 88.7135 78.1765 0.145161 0.885753 0.14876 0 1 1 1 0 0 0\n\n<class 'str'>\nsw is 0.72 \n\nshape of snp is (32855, 19)\n" ], [ "# crystal_file = {'material':'Ti7LP15',\n# 'A':list(2.95*np.array([1,0,0])),\n# 'B':list(2.95*np.array([np.cos(np.pi*2/3),np.sin(np.pi*2/3),0])),\n# 'C':list(4.7152*np.array([0,0,1])),\n# 'atom_pos':[[[1/3.0,2/3.0,1/4.0],22],[[2/3.0,1/3.0,3/4.0],22]]}\n# with open('Ti7LP15.json','w') as file:\n# json.dump(crystal_file,file)\n \n \n# det_file = {'name':'test',\n# 'psizeJ':0.001454,\n# 'psizeK':0.001454,\n# 'pnJ':2048,\n# 'pnK':2048,\n# 'J':1182.19,\n# 'K':2026.27,\n# 'trans_vec':[7.14503,0,0],\n# 'tilt':Rot.EulerZXZ2Mat(np.array([89.1588,87.5647,0.278594])/180.0*np.pi).tolist()}\n\n# with open('det_params.json','w') as file:\n# json.dump(det_file,file)", "_____no_output_____" ], [ "############ Experimental Parameters ############\ncrystal_file = 'Ti7LP15.json'\ndetector_file = 'det_params.json'\nenergy = 51.9957\netalimit=81/180.0*np.pi\nomegaL,omegaU = 0,180\n\n\n################ grain ID and hexomap output file #############\ngrain_ID = '55_2nd'\npath = 'Calibration_Files/grain_%s/'%grain_ID\ngrain_pos=np.array([-0.345937, 0.238698, 0])\no_mat=Rot.EulerZXZ2Mat(np.array([97.9141, 90.0041, 259.313])/180.0*np.pi)\n\n################### raw data #######################\nraw_data='/mnt/data/sparrow_data/NF-DataSets/2013-07_NF-Strain1/V/Suter_Jul13/NF/Ti7_WithHRM_Under2ndLoad/Ti7_WithHRM_Under2ndLoad__'\n\nif 'grain_%s'%grain_ID not in os.listdir('Calibration_Files/'):\n os.mkdir(path)\n os.mkdir(path+'Ps_bf/')\n os.mkdir(path+'Ps_filtered/')\n \n ", "_____no_output_____" ], [ "\n\n# create detector object\nDet1=Gsim.Detector(param_file=detector_file)\n\n#create crystal object\ncrystal_str=Gsim.CrystalStr(cryst_file=crystal_file)\ncrystal_str.getRecipVec()\ncrystal_str.getGs(13)\n\n\nPs,Gs,Info=Gsim.GetProjectedVertex(Det1,crystal_str,o_mat,etalimit,\n grain_pos,getPeaksInfo=True,\n omegaL=omegaL,omegaU=omegaU,energy=energy)\n\n\n", "_____no_output_____" ], [ "# extract window around the Bragg peak on an omega frame\ndef fetch(ii,pks,fn,offset=0,dx=100,dy=50,verbo=False,more=False,pnx=2048,pny=2048,omega_step=20):\n omegid=int((180-pks[ii,2])*omega_step)+offset\n if omegid<0:\n omegid+=3600\n if omegid>=3600:\n omegid-=3600\n I=plt.imread(fn+'{0:06d}.tif'.format(omegid))\n x1=int((pny-1-pks[ii,0])-dx)\n y1=int(pks[ii,1]-dy)\n if verbo:\n print('y=',pks[ii,1])\n print('x=',pks[ii,0])\n x1=max(0,x1)\n y1=max(0,y1)\n x2=x1+2*dx\n y2=y1+2*dy\n x2=min(x2,pnx)\n y2=min(y2,pny)\n if more:\n return I[y1:y2,x1:x2],(x1,x2,y1,y2,omegid)\n return I[y1:y2,x1:x2]\n", "_____no_output_____" ], [ "pks=Ps", "_____no_output_____" ], [ "dx = 150\ndy = 80\n\n\n\nfor ii in range(len(pks)):\n allpks=[]\n alllims=[]\n totoffset=0\n f,axis=plt.subplots(9,5)\n i=0\n j=0\n for offset in range(totoffset-22,totoffset+23):\n Im,limits=fetch(ii,pks,raw_data,offset,dx=dx,dy=dy,more=True)\n \n if i==9:\n j+=1\n i=0\n axis[i,j].imshow(Im,vmin=0,vmax=30)\n i+=1\n \n allpks.append(Im)\n alllims.append(limits)\n \n f.subplots_adjust(wspace=0,hspace=0)\n f.savefig(path+'Ps_bf/{0:d}.png'.format(ii),dpi=200,bbox_inches='tight')\n plt.close(f)\n allpks=np.array(allpks)\n alllims=np.array(alllims)\n np.save(path+'Ps_bf/Im{0:d}'.format(ii),allpks)\n np.save(path+'Ps_bf/limit{0:d}'.format(ii),alllims)", "_____no_output_____" ], [ "Nfile=len(pks)\n\n\nIm=[]\nflucThresh=4\n\nfor ii in range(Nfile):\n Im.append(np.load(path+'Ps_bf/Im{:d}.npy'.format(ii)))\n Im[ii]=Im[ii]-np.median(Im[ii],axis=0) #substract the median\n mask=Im[ii]>flucThresh\n Im[ii]=mask*Im[ii] #make all pixel that below the fluctuation to be zero \n\nfrom scipy.signal import convolve2d\nmykernel=np.array([[1,1,1],[1,-1,1],[1,1,1]])\n# remove hot spot (whose value is higher than the sum of 8 neighbors)\nfor ii in range(Nfile):\n for jj in range(45):\n mask=convolve2d(Im[ii][jj],mykernel,mode='same')>0\n Im[ii][jj]*=mask\n \n\nmykernel2=np.array([[1,2,1],[2,4,2],[1,2,1]])/16.0\n# Smoothing\nfor ii in range(Nfile):\n for jj in range(45):\n Im[ii][jj]=convolve2d(Im[ii][jj],mykernel2,mode='same')\n\nfor ii in range(Nfile):\n np.save(path+'Ps_filtered/Im{:d}'.format(ii),Im[ii].astype('uint16'))", "_____no_output_____" ], [ "idx = 2\na = np.load(path+f'Ps_filtered/Im{idx}.npy')\n\nplt.imshow(a[41,:,:])\nplt.show()\n\nmaxes = a.sum(axis=(1,2))\nnp.where(maxes==maxes.max())\n \n ", "_____no_output_____" ], [ "# manually write down the IDs of \"good\" peaks for calibration\n\ngoodidx=np.array([2,4,5,7,8,9,10,12,15,18,\n 19,24,25,26,27,28,29,30,\n 31,32,33,34,35,37,38,39,\n 40,41,43,46,49,50,51,52,\n 53,54,55,56,57,60,63,64,\n 65,66,67,69,70,71,72,73,\n 74,76,77,78,79,81,82,83,\n 84,85,86,87,88,89,90,91,\n 92,93,94,95,\n 36,44,45,62,68,80])", "_____no_output_____" ] ], [ [ "# Find the Center of Mass of each Bragg Peak", "_____no_output_____" ] ], [ [ "\n# choose one of following two methods to find the center of mass of each \"good\" Bragg peak.\n\n\n# can be used even the peak persists on several omega frames\ndef getCenter1(Im,Omeg1,Omeg2,lower=100,upper=2000):\n blobs, _ = ndimage.label(Im[Omeg1:Omeg2+1])\n _,size=np.unique(blobs,return_counts=True)\n blobID = np.where((size>lower)*(size<upper))[0]\n if len(blobID)==1:\n blobID=blobID[0]\n else:\n print('need manual pick')\n return\n co,cy,cx = ndimage.measurements.center_of_mass(Im[Omeg1:Omeg2+1],blobs,blobID)\n\n return co,cy,cx\n\n# can be only used on single omega frame\ndef getCenter2(Im,Omeg,dx=15,dy=7):\n Py,Px=ndimage.measurements.maximum_position(Im[Omeg])\n labels=np.zeros(Im[Omeg].shape,dtype=int)\n labels[Py-dy:Py+dy+1,Px-dx:Px+dx+1]=1\n cy,cx = ndimage.measurements.center_of_mass(Im[Omeg],labels=labels,index=1)\n return Py,Px,cy,cx", "_____no_output_____" ], [ "# %matplotlib notebook\ntmp=np.load(path+'Ps_filtered/Im88.npy')\nbOmeg=21\neOmeg=22\n\nco,cy,cx = getCenter1(tmp,bOmeg,eOmeg,lower=100,upper=1000)\nprint(\"({:.2f}, {:.2f}, {:.2f})\".format(co+bOmeg,cx,cy))\nplt.imshow(np.sum(tmp[bOmeg:eOmeg+1],axis=0))\nplt.scatter(cx,cy,c='k')\nplt.show()", "(21.28, 8.54, 144.77)\n" ], [ "# %matplotlib notebook\ntmp=np.load(path+'Ps_filtered/Im94.npy')\nOmeg=21\nPy,Px,cy,cx = getCenter2(tmp,Omeg,dx=15,dy=5)\nprint(\"({:.2f}, {:.2f}, {:.2f})\".format(Omeg,cx,cy))\nplt.imshow(tmp[Omeg])\nplt.scatter(cx,cy,c='k')\nplt.show()", "(21.00, 125.61, 92.52)\n" ] ], [ [ "# Write down all the center of mass for the good Peaks", "_____no_output_____" ] ], [ [ "# Write down the center of mass of each Bragg Peak, in the order of goodidx\ncenter_of_mass = np.array([(28.79, 161.38, 36.47),\n (20, 141.56, 51.82),\n (20.00, 136.09, 52.67),\n (28.00, 152.87, 54.98),\n (31.00, 150.29, 56.65),\n (19.86, 141.39, 56.96),\n (21.00, 135.42, 55.26),\n (27.47, 147.29, 57.41),\n (19.33, 139.94, 59.89),\n (26.00, 152.22, 58.66),\n (26.00, 152.82, 60.23),\n (21.00, 140.49, 63.34),\n (22.00, 77.09, 63.10),\n (26.61, 153.50, 67.04),\n (22.00, 141.62, 66.31),\n (22.00, 22.54, 66.19),\n (26.00, 149.67, 67.16),\n (26.55, 146.24, 67.16),\n (31.40, 144.80, 69.36),\n (20.42, 142.44, 68.36),\n (21.48, 140.41, 68.70),\n (21.91, 138.86, 68.54),\n (22.00, 68.60, 68.84),\n (25.00, 147.25, 68.22),\n (25.00, 146.93, 68.76),\n (19.00, 153.14, 71.91),\n (25.00, 148.87, 68.36),\n (19.68, 146.27, 72.12),\n (21.53, 78.33, 70.98),\n (26.00, 152.47, 72.68),\n (22.00, 68.98, 70.95),\n (25.00, 148.49, 73.62),\n (25.00, 148.11, 74.45),\n (25.67, 150.38, 75.97),\n (23.00, 144.00, 72.79),\n (22.93, 141.36, 73.26),\n (23.22, 140.70, 74.06),\n (23.00, 60.33, 73.96),\n (24.74, 144.38, 74.19),\n (23.00, 142.72, 75.71),\n (24.28, 146.32, 74.34),\n (21.00, 74.10, 78.07),\n (24.00, 149.06, 74.65),\n (22.00, 115.61, 77.83),\n (22.00, 53.23, 76.99),\n (23.00, 13.56, 74.23),\n (24.00, 146.26, 77.74),\n (23.00, 148.08, 75.22),\n (23.00, 143.69, 75.62),\n (25.00, 145.35, 77.68),\n (24.00, 143.52, 77.78),\n (24.00, 142.50, 77.13),\n (23.00, 142.20, 77.28),\n (23.00, 142.62, 77.41),\n (23.00, 66.54, 77.65),\n (24.00, 141.78, 77.94),\n (24.00, 142.46, 78.02),\n (23.00, 146.43, 77.95),\n (23.00, 148.30, 78.59),\n (23.00, 145.70, 78.51),\n (23.00, 145.49, 78.34),\n (24.00, 146.21, 78.10),\n (24.00, 146.66, 78.50),\n (24.48, 148.78, 79.36),\n (22.00, 150.51, 75.82),\n (23.00, 148.37, 76.01),\n (22.00, 103.28, 76.59),\n (24.00, 147.77, 78.74),\n (23.00, 148.90, 76.78),\n (23.00, 145.66, 76.77),\n (26.00, 148.64, 67.44),\n (24.37, 152.98, 69.57),\n (25.00, 152.38, 71.98),\n (25.00, 144.92, 74.90),\n (25.00, 151.22, 76.78),\n (24.81, 142.24, 77.70)])\n# np.save('center_of_mass.npy',center_of_mass)", "_____no_output_____" ], [ "imgN = len(goodidx)\n\nLimH = np.empty((imgN,5),dtype=np.int32)\ngood_Gs = Gs[goodidx]\nwhichOmega = np.empty(imgN,dtype=np.int32)\n\n\n\nfor ii in range(imgN):\n limit=np.load(path+'Ps_bf/limit{0:d}.npy'.format(goodidx[ii]))\n \n LimH[ii,:]=limit[0]\n\n if Info[goodidx[ii]]['WhichOmega']=='b':\n whichOmega[ii] = 2\n else:\n whichOmega[ii] = 1", "_____no_output_____" ], [ "absCOM=np.empty(center_of_mass.shape)\nfor ii in range(len(absCOM)):\n absCOM[ii,1]=LimH[ii,2]+center_of_mass[ii,2]\n absCOM[ii,0]=2047-(LimH[ii,0]+center_of_mass[ii,1])\n absCOM[ii,2]=(LimH[ii,4]+center_of_mass[ii,0])\n if absCOM[ii,2] >=3600:\n absCOM[ii,2] -= 3600\n absCOM[ii,2] = 180-absCOM[ii,2]*0.05", "_____no_output_____" ] ], [ [ "# Start Calibration", "_____no_output_____" ] ], [ [ "def GetVertex(Det1,Gs,Omegas,orien,etalimit,grainpos,bIdx=True,omegaL=-90,omegaU=90,energy=50):\n Peaks=[]\n rotatedG=orien.dot(Gs.T).T\n for ii in range(len(rotatedG)):\n g1=rotatedG[ii]\n res=Gsim.frankie_angles_from_g(g1,verbo=False,energy=energy)\n\n if Omegas[ii]==1:\n omega=res['omega_a']/180.0*np.pi\n newgrainx=np.cos(omega)*grainpos[0]-np.sin(omega)*grainpos[1]\n newgrainy=np.cos(omega)*grainpos[1]+np.sin(omega)*grainpos[0]\n idx=Det1.IntersectionIdx(np.array([newgrainx,newgrainy,0]),res['2Theta'],res['eta'],bIdx\n ,checkBoundary=False\n )\n\n Peaks.append([idx[0],idx[1],res['omega_a']])\n\n \n else:\n omega=res['omega_b']/180.0*np.pi\n newgrainx=np.cos(omega)*grainpos[0]-np.sin(omega)*grainpos[1]\n newgrainy=np.cos(omega)*grainpos[1]+np.sin(omega)*grainpos[0]\n idx=Det1.IntersectionIdx(np.array([newgrainx,newgrainy,0]),res['2Theta'],-res['eta'],bIdx\n ,checkBoundary=False\n )\n Peaks.append([idx[0],idx[1],res['omega_b']])\n\n Peaks=np.array(Peaks)\n return Peaks", "_____no_output_____" ], [ "pars={'J':0,'K':0,'L':0,'tilt':(0,0,0),'x':0,'y':0,'distortion':((0,0,0),(0,0,0),(0,0,0))}\nDetDefault=Gsim.Detector(psizeJ=0.001454, psizeK=0.001454)\n\ndef SimP(x):\n DetDefault.Reset()\n pars['J']=x[0]+1182.19\n pars['K']=x[1]+2026.27\n pars['L']=x[2]*10**(-3)+7.14503\n pars['tilt']=Rot.EulerZXZ2Mat((x[3:6]+np.array([89.1588,87.5647,0.278594]))/180.0*np.pi)\n pars['x']=x[6]*10**(-3)-0.345937\n pars['y']=x[7]*10**(-3)+0.238698\n pars['distortion']=x[8:17].reshape((3,3))*10**(-3)+np.eye(3)\n DetDefault.Move(pars['J'],pars['K'],np.array([pars['L'],0,0]),pars['tilt'])\n pos=np.array([pars['x'], pars['y'], 0])\n Ps=GetVertex(DetDefault,\n good_Gs,\n whichOmega,\n pars['distortion'],\n etalimit,\n pos,\n bIdx=False,\n omegaL=0,omegaU=180,energy=energy) \n return Ps\n\ndef CostFunc(x):\n Ps = SimP(x)\n weights=np.array((1,5,100))\n tmp=np.sum(((Ps-absCOM)*weights)**2,axis=0)\n return np.sum(tmp)", "_____no_output_____" ], [ "res=optimize.minimize(CostFunc,np.zeros(17)\n ,bounds=[(-5,5),(-5,2),(-100,50)]+3*[(-0.3,3)]+2*[(-10,20)]+9*[(-5,10)]\n )\nprint(res)", " fun: 497.5866126237406\n hess_inv: <17x17 LbfgsInvHessProduct with dtype=float64>\n jac: array([-0.05084075, -0.04588401, -0.02344224, -0.58892055, -0.30861429,\n -1.61728622, 0.18092122, 0.05128413, -0.08977281, -0.06505729,\n 0.10445547, 0.03230412, 0.01377884, -0.08586198, 0.17719799,\n 0.17075763, -0.19430216])\n message: 'CONVERGENCE: REL_REDUCTION_OF_F_<=_FACTR*EPSMCH'\n nfev: 11394\n nit: 554\n njev: 633\n status: 0\n success: True\n x: array([-1.94590765e+00, -3.50800883e+00, -8.91343618e+01, 5.34655893e-01,\n 2.47994791e+00, -8.82475546e-02, 1.49199694e+01, -2.63017925e+00,\n 7.91054683e+00, -5.48537993e-01, 3.54198905e-01, 1.10120828e+00,\n 7.49117461e+00, 6.50794125e-01, -6.65259832e-01, -6.26602199e-01,\n 8.60900622e+00])\n" ] ], [ [ "# Check the position of simulated Bragg peaks after calibration", "_____no_output_____" ] ], [ [ "newPs=SimP(res['x'])", "_____no_output_____" ], [ "oldPs=SimP(np.zeros(17))", "_____no_output_____" ], [ "fig,ax=plt.subplots(1,2,figsize=(15,4))\nax[0].hist(oldPs[:,2]-absCOM[:,2],label='before calibration',bins=np.arange(-0.2,0.35,0.05),alpha=0.5)\nax[0].hist(newPs[:,2]-absCOM[:,2],label='after calibration',bins=np.arange(-0.2,0.35,0.05),alpha=0.5)\nax[0].legend(loc='upper right', fontsize=15)\nax[0].set_xlabel(r'$\\Omega$ difference $(^\\circ)$', fontsize=20)\nax[1].scatter(oldPs[:,0]-absCOM[:,0],oldPs[:,1]-absCOM[:,1],label='before calibration')\nax[1].scatter(newPs[:,0]-absCOM[:,0],newPs[:,1]-absCOM[:,1],label='after calibration',alpha=0.5)\nax[1].set_xlabel('horizontal difference (pixels)', fontsize=20)\nax[1].set_ylabel('vertical difference (pixels)', fontsize=20)\nax[1].legend(loc='upper right', fontsize=15)\nax[0].tick_params(axis='both', which='major', labelsize=20)\nax[1].tick_params(axis='both', which='major', labelsize=20)\nplt.savefig('calibration.png',dpi=100,bbox_inches='tight')\nplt.show()", "_____no_output_____" ] ], [ [ "# Check the position of simulated Bragg peaks after calibration: Image", "_____no_output_____" ] ], [ [ "x= res['x']\n\npars={}\npars['J']=x[0]+1182.19\npars['K']=x[1]+2026.27\npars['L']=x[2]*10**(-3)+7.14503\npars['tilt']=Rot.EulerZXZ2Mat((x[3:6]+np.array([89.1588,87.5647,0.278594]))/180.0*np.pi)\npars['x']=x[6]*10**(-3)-0.345937\npars['y']=x[7]*10**(-3)+0.238698\npars['distortion']=x[8:17].reshape((3,3))*10**(-3)+np.eye(3)\nDetDefault=Gsim.Detector(psizeJ=0.001454, psizeK=0.001454)\nDetDefault.Reset()\nDetDefault.Move(pars['J'],pars['K'],np.array([pars['L'],0,0]),pars['tilt'])\npos=np.array([pars['x'], pars['y'], 0])\n\nPs_new,Gs_new,Info_new=Gsim.GetProjectedVertex(DetDefault,crystal_str,\n pars['distortion'].dot(o_mat),\n etalimit,pos,getPeaksInfo=True,\n omegaL=omegaL,omegaU=omegaU,energy=energy)\nprint(pars)", "{'J': 1180.2440923474867, 'K': 2022.7619911749573, 'L': 7.0558956382061915, 'tilt': array([[ 5.35273770e-03, 7.61463259e-04, 9.99985384e-01],\n [ 9.99980156e-01, -3.32628765e-03, -5.35017683e-03],\n [ 3.32216507e-03, 9.99994178e-01, -7.79252894e-04]]), 'x': -0.331017030558736, 'y': 0.23606782074559335, 'distortion': array([[ 1.00791055e+00, -5.48537993e-04, 3.54198905e-04],\n [ 1.10120828e-03, 1.00749117e+00, 6.50794125e-04],\n [-6.65259832e-04, -6.26602199e-04, 1.00860901e+00]])}\n" ], [ "# %matplotlib notebook\n\n# ii is the image ID\nii=86\n\nf,axis=plt.subplots()\n\nomegid=int(round((180-Ps_new[ii,2])*20))\nif omegid<0:\n omegid+=3600\nif omegid>=3600:\n omegid-=3600\nI=plt.imread(raw_data+'{0:06d}.tif'.format(omegid))\n\n\naxis.imshow(I,vmax=40)\naxis.scatter(2047-Ps_new[ii,0],Ps_new[ii,1],c='r')\nplt.show()", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ] ]
cb382ebba0f9ecdadc4622a3b59dd59170269e83
9,967
ipynb
Jupyter Notebook
tasks/task_08_CSG_mesh_tally/1_example_2d_mesh_tallies.ipynb
pshriwise/openmc_workshop
7bf1fe7483305c63c9442a9e80266b56675489fb
[ "MIT" ]
1
2021-08-23T22:49:31.000Z
2021-08-23T22:49:31.000Z
tasks/task_08_CSG_mesh_tally/1_example_2d_mesh_tallies.ipynb
pshriwise/openmc_workshop
7bf1fe7483305c63c9442a9e80266b56675489fb
[ "MIT" ]
null
null
null
tasks/task_08_CSG_mesh_tally/1_example_2d_mesh_tallies.ipynb
pshriwise/openmc_workshop
7bf1fe7483305c63c9442a9e80266b56675489fb
[ "MIT" ]
null
null
null
33.003311
243
0.609612
[ [ [ "# Part 1 - 2D mesh tallies\n\nSo far we have seen that neutron and photon interactions can be tallied on surfaces or cells, but what if we want to tally neutron behaviour throughout a geometry? (rather than the integrated neutron behaviour over a surface or cell).\n\nA mesh tally allows a visual inspection of the neutron behaviour spatially throughout the geometry.\n\nThe geometry is subdivided into many rectangles and the neutron behaviour is recorded (tallied) by the simulation in each of the small rectangles.\n\nThis can form a 2D slice of the neutron interactions throughout the model.\n\nThis notebook allows users to create a simple geometry from a few different materials and plot the results of a 2D regular mesh tally applied to the geometry.", "_____no_output_____" ] ], [ [ "from IPython.display import HTML\nHTML('<iframe width=\"560\" height=\"315\" src=\"https://www.youtube.com/embed/KYIsDjip1nQ\" frameborder=\"0\" allow=\"accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture\" allowfullscreen></iframe>')", "/home/jshim/anaconda3/lib/python3.8/site-packages/IPython/core/display.py:717: UserWarning: Consider using IPython.display.IFrame instead\n warnings.warn(\"Consider using IPython.display.IFrame instead\")\n" ] ], [ [ "This code block defines the model geometry, materials, neutron source and regular mesh tally. Run the cell to see the model geometry.\n\nObserve how a 2D mesh is achieved by creating a 3D mesh with a thickness of one mesh cell in one dimension.", "_____no_output_____" ] ], [ [ "import openmc\nimport matplotlib.pyplot as plt\n\n# MATERIALS\n\n# creates two materials, one is a neutron multiplier (lead) and the other a tritium breeder (lithium)\nmats = openmc.Materials()\n \nbreeder_material = openmc.Material(name=\"breeder\") \nbreeder_material.add_element('Li', 1, percent_type='ao')\nbreeder_material.set_density('g/cm3', 2.0)\n\nmultiplier_material = openmc.Material(name=\"multiplier\") \nmultiplier_material.add_element('Pb', 1, percent_type='ao')\nmultiplier_material.set_density('g/cm3', 11.0)\n\nmats = [breeder_material, multiplier_material]\n\n\n\n# GEOMETRY\n\n# surfaces\nsph1 = openmc.Sphere(r=50)\nsph2 = openmc.Sphere(r=90, boundary_type='vacuum')\nplane1 = openmc.XPlane(20)\n\n# cells\nbreeder_cell = openmc.Cell(region=+sph1 & -sph2 & -plane1)\nbreeder_cell.fill = breeder_material\n\nmultiplier_cell = openmc.Cell(region=+sph1 & -sph2 & +plane1)\nmultiplier_cell.fill = multiplier_material\n\ninner_vacuum_cell = openmc.Cell(region=-sph1)\n\nuniverse = openmc.Universe(cells=[inner_vacuum_cell, breeder_cell, multiplier_cell])\n\ngeom = openmc.Geometry(universe)\n\n\n# SETTINGS\n\n# Instantiate a Settings object\nsett = openmc.Settings()\nsett.batches = 100\nsett.inactive = 0\nsett.particles = 50\nsett.particle = \"neutron\"\nsett.run_mode = 'fixed source'\n\n# creates a 14MeV point source\nsource = openmc.Source()\nsource.space = openmc.stats.Point((0, 0, 0))\nsource.angle = openmc.stats.Isotropic()\nsource.energy = openmc.stats.Discrete([14e6], [1])\nsett.source = source\n\n\n# Create mesh which will be used for tally\nmesh = openmc.RegularMesh()\nmesh_height = 100 # number of cells in the X and Z dimensions\nmesh_width = mesh_height\nmesh.dimension = [mesh_width, 1, mesh_height] # only 1 cell in the Y dimension\nmesh.lower_left = [-200, -200, -200] # physical limits (corners) of the mesh\nmesh.upper_right = [200, 200, 200]\n\n\ntallies = openmc.Tallies()\n# Create mesh filter for tally\nmesh_filter = openmc.MeshFilter(mesh)\nmesh_tally = openmc.Tally(name='tallies_on_mesh')\nmesh_tally.filters = [mesh_filter]\nmesh_tally.scores = ['flux', 'absorption', '(n,2n)'] # change flux to absorption\ntallies.append(mesh_tally)\n\n# combines the geometry, materials, settings and tallies to create a neutronics model\nmodel = openmc.model.Model(geom, mats, sett, tallies)\n\nplt.show(universe.plot(width=(180, 180), basis='xz'))", "_____no_output_____" ] ], [ [ "The next code block performs the simulation which tallies neutron flux on the mesh, and loads the results for inspection.", "_____no_output_____" ] ], [ [ "# deletes old files\n!rm summary.h5\n!rm statepoint.*.h5\n\n# runs the simulation\noutput_filename = model.run()\n\n# open the results file\nresults = openmc.StatePoint(output_filename)", "_____no_output_____" ] ], [ [ "This code block filters the results to show the neutron flux recorded by the mesh tally.", "_____no_output_____" ] ], [ [ "# access the flux tally\nmy_tally = results.get_tally(scores=['flux'])\nmy_slice = my_tally.get_slice(scores=['flux'])\nmy_slice.mean.shape = (mesh_width, mesh_height)\n\nfig = plt.subplot()\n\nplt.show(fig.imshow(my_slice.mean))\n\n# notice that neutrons are produced and emitted isotropically from a point source.\n# There is a slight increase in flux within the neutron multiplier.", "_____no_output_____" ] ], [ [ "This code block filters the results to show the neutron absorption recorded by the mesh tally.", "_____no_output_____" ] ], [ [ "# access the absorption tally\nmy_tally = results.get_tally(scores=['absorption'])\nmy_slice = my_tally.get_slice(scores=['absorption'])\nmy_slice.mean.shape = (mesh_width, mesh_height)\n\nfig = plt.subplot()\n\nplt.show(fig.imshow(my_slice.mean))\n\n# notice that neutrons are being absorpted on the left hand side of the model", "_____no_output_____" ] ], [ [ "This code block filters the results to show the neutron multiplication recorded by the mesh tally.", "_____no_output_____" ] ], [ [ "# access the neutron multiplication tally\nmy_tally = results.get_tally(scores=['(n,2n)'])\nmy_slice = my_tally.get_slice(scores=['(n,2n)'])\nmy_slice.mean.shape = (mesh_width, mesh_height)\n\nfig = plt.subplot()\n\nplt.show(fig.imshow(my_slice.mean))\n\n# notice that neutrons are being muliplied on the right hand side of the model", "_____no_output_____" ], [ "# Bonus information\n# The 2D mesh tally is currently recording all interactions in the 3rd dimention (z).\n# The diagrams are showing the xy plane and all interactions in the z direction.\n# However one can also change the mesh to take a central slice of with a 1cm thickness in the following way.\n# The tally takes a little longer to converge as less neutrons are interacting in the tally region.\n\n# Create mesh which will be used for tally\nmesh = openmc.RegularMesh()\nmesh_height = 100\nmesh_width = mesh_height\nmesh.dimension = [mesh_width, 1, mesh_height] # only one entry in the Y direction\nmesh.lower_left = [-200, -0.5, -200] # Y thickness is now smaller\nmesh.upper_right = [200, 0.5, 200] # Y thickness is now smaller", "_____no_output_____" ] ], [ [ "**Learning Outcomes for Part 1:**\n\n- Mesh tallies can be used to visualise neutron interactions spatially throughout geometry.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ] ]
cb382f1cafa098fdcbc57e6a5f628365243aadd6
200,722
ipynb
Jupyter Notebook
notebooks/usecase_optic_nerve_heads_analysis_in_kendall_shape_space.ipynb
Florent-Michel/geomstats
07fd2d0b77e46c10dcf623e906032271fbe06977
[ "MIT" ]
null
null
null
notebooks/usecase_optic_nerve_heads_analysis_in_kendall_shape_space.ipynb
Florent-Michel/geomstats
07fd2d0b77e46c10dcf623e906032271fbe06977
[ "MIT" ]
null
null
null
notebooks/usecase_optic_nerve_heads_analysis_in_kendall_shape_space.ipynb
Florent-Michel/geomstats
07fd2d0b77e46c10dcf623e906032271fbe06977
[ "MIT" ]
null
null
null
317.096367
77,088
0.934392
[ [ [ "# Tutorial: Computing with shapes of landmarks in Kendall shape spaces", "_____no_output_____" ], [ "In this tutorial, we show how to use geomstats to perform a shape data analysis. Specifically, we aim to study the difference between two groups of data:\n- optical nerve heads that correspond to normal eyes,\n- optical nerve heads that correspond to glaucoma eyes.\n\nWe wish to investigate if there is a difference in these two groups, and if this difference is a difference in sizes of the optical nerve heads, or a difference in shapes (where the size has been quotiented out).", "_____no_output_____" ], [ "<img src=\"figures/optic_nerves.png\" />", "_____no_output_____" ], [ "## Set up", "_____no_output_____" ] ], [ [ "import os\nimport sys\nimport warnings\n\nsys.path.append(os.path.dirname(os.getcwd()))\nwarnings.filterwarnings('ignore')", "_____no_output_____" ], [ "%matplotlib inline\nimport matplotlib.colors as colors\nimport matplotlib.patches as mpatches\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom mpl_toolkits.mplot3d.art3d import Poly3DCollection\n\nimport geomstats.backend as gs\nimport geomstats.datasets.utils as data_utils\nfrom geomstats.geometry.pre_shape import PreShapeSpace, KendallShapeMetric", "INFO: Using numpy backend\n" ] ], [ [ "We import the dataset of the optical nerve heads from 22 images of Rhesus monkeys’ eyes (11 monkeys), available in [[PE2015]](#References). \n\nFor each monkey, an experimental glaucoma was introduced in one eye, while the second\neye was kept as control. One seeks to observe differences between the glaucoma and the\ncontrol eyes. On each image, 5 anatomical landmarks were recorded: \n- 1st landmark: superior aspect of the retina, \n- 2nd landmark: side of the retina closest to the temporal bone of the skull,\n- 3rd landmark: nose side of the retina, \n- 4th landmark: inferior point,\n- 5th landmark: optical nerve head deepest point.\n\nLabel 0 refers to a normal eye, and Label 1 to an eye with glaucoma.", "_____no_output_____" ] ], [ [ "nerves, labels, monkeys = data_utils.load_optical_nerves()\nprint(nerves.shape)\nprint(labels)\nprint(monkeys)", "(22, 5, 3)\n[0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1]\n[ 0 0 1 1 2 2 3 3 4 4 5 5 6 6 7 7 8 8 9 9 10 10]\n" ] ], [ [ "We extract the landmarks' sets corresponding to the two eyes' nerves of the first monkey, with their corresponding labels.", "_____no_output_____" ] ], [ [ "two_nerves = nerves[monkeys==0]\nprint(two_nerves.shape)\n\ntwo_labels = labels[monkeys==0]\nprint(two_labels)", "(2, 5, 3)\n[0 1]\n" ], [ "label_to_str = {0: 'Normal nerve', 1: 'Glaucoma nerve'}\nlabel_to_color = {0: (102/255, 178/255, 255/255, 1.), 1: (255/255, 178/255, 102/255, 1.)}\n\nfig = plt.figure()\nax = Axes3D(fig)\nax.set_xlim((2000, 4000))\nax.set_ylim((1000, 5000))\nax.set_zlim((-600, 200))\n\nfor nerve, label in zip(two_nerves, two_labels):\n x = nerve[:, 0]\n y = nerve[:, 1]\n z = nerve[:, 2]\n\n verts = [list(zip(x,y,z))]\n \n poly = Poly3DCollection(verts, alpha=0.5)\n color = label_to_color[int(label)]\n poly.set_color(colors.rgb2hex(color))\n poly.set_edgecolor('k')\n ax.add_collection3d(poly)\n\npatch_0 = mpatches.Patch(color=label_to_color[0], label=label_to_str[0], alpha=0.5)\npatch_1 = mpatches.Patch(color=label_to_color[1], label=label_to_str[1], alpha=0.5)\nplt.legend(handles=[patch_0, patch_1], prop={'size': 14})\nplt.show()", "_____no_output_____" ] ], [ [ "We first try to detect if there are two groups of optical nerve heads, based on the 3D coordinates of the landmarks sets.", "_____no_output_____" ] ], [ [ "from geomstats.geometry.euclidean import EuclideanMetric\n\nnerves_vec = nerves.reshape(22, -1)\n\neucl_metric = EuclideanMetric(nerves_vec.shape[-1])\n\neucl_dist = eucl_metric.dist_pairwise(nerves_vec)\n\nplt.figure()\nplt.imshow(eucl_dist);", "_____no_output_____" ] ], [ [ "We do not see any two clear clusters.", "_____no_output_____" ], [ "We want to investigate if there is a difference between these two groups of shapes - normal nerve versus glaucoma nerve - or if the main difference is merely relative to the global size of the landmarks' sets.", "_____no_output_____" ] ], [ [ "m_ambient = 3\nk_landmarks = 5\n\npreshape = PreShapeSpace(m_ambient=m_ambient, k_landmarks=k_landmarks)\nmatrices_metric = preshape.embedding_metric\n\nsizes = matrices_metric.norm(preshape.center(nerves))\n\nplt.figure(figsize=(6, 4))\nfor label, col in label_to_color.items():\n label_sizes = sizes[labels==label]\n plt.hist(label_sizes, color=col, label=label_to_str[label], alpha=0.5, bins=10)\n plt.axvline(gs.mean(label_sizes), color=col)\nplt.legend(fontsize=14)\nplt.title('Sizes of optical nerves', fontsize=14);", "_____no_output_____" ] ], [ [ "The vertical lines represent the sample mean of each group (normal/glaucoma).", "_____no_output_____" ] ], [ [ "plt.figure(figsize=(6, 4))\nplt.hist(sizes[labels==1] - sizes[labels==0], alpha=0.5)\nplt.axvline(0, color='black')\nplt.title('Difference in size of optical nerve between glaucoma and normal eyes', fontsize=14);", "_____no_output_____" ] ], [ [ "We perform a hypothesis test, testing if the two samples of sizes have the same average. We use the t-test for related samples, since the sample elements are paired: two eyes for each monkey.", "_____no_output_____" ] ], [ [ "from scipy import stats\n\nsignif_level = 0.05\n\ntstat, pvalue = stats.ttest_rel(sizes[labels==0], sizes[labels==1])\nprint(pvalue < signif_level)", "True\n" ] ], [ [ "There is a significative difference, in optical nerve eyes' sizes, between the glaucoma and normal eye.", "_____no_output_____" ], [ "We want to investigate if there is a difference in shapes, where the size component has been quotiented out. \n\nWe project the data to the Kendall pre-shape space, which:\n- centers the nerve landmark sets so that they share the same barycenter,\n- normalizes the sizes of the landmarks' sets to 1.", "_____no_output_____" ] ], [ [ "nerves_preshape = preshape.projection(nerves)\nprint(nerves_preshape.shape)\nprint(preshape.belongs(nerves_preshape))\nprint(gs.isclose(matrices_metric.norm(nerves_preshape), 1.))", "(22, 5, 3)\n[ True True True True True True True True True True True True\n True True True True True True True True True True]\n[ True True True True True True True True True True True True\n True True True True True True True True True True]\n" ] ], [ [ "In order to quotient out the 3D orientation component, we align the landmark sets in the preshape space. ", "_____no_output_____" ] ], [ [ "base_point = nerves_preshape[0]\n\nnerves_shape = preshape.align(point=nerves_preshape, base_point=base_point)", "_____no_output_____" ] ], [ [ "The Kendall metric is a Riemannian metric that takes this alignment into account. It corresponds to the metric of the Kendall shape space, which is the manifold defined as the preshape space quotient by the action of the rotation in m_ambient dimensions, here in 3 dimensions.", "_____no_output_____" ] ], [ [ "kendall_metric = KendallShapeMetric(m_ambient=m_ambient, k_landmarks=k_landmarks)", "_____no_output_____" ] ], [ [ "We can use it to perform a tangent PCA in the Kendall shape space, and determine if we see a difference in the shapes of the optical nerves.", "_____no_output_____" ] ], [ [ "from geomstats.learning.pca import TangentPCA\n\ntpca = TangentPCA(kendall_metric)\ntpca.fit(nerves_shape)\n\nplt.plot(\n tpca.explained_variance_ratio_)\nplt.xlabel(\"Number of principal tangent components\", size=14)\nplt.ylabel(\"Fraction of explained variance\", size=14);", "_____no_output_____" ] ], [ [ "Two principal components already describe around 60% of the variance. We plot the data projected in the tangent space defined by these two principal components.", "_____no_output_____" ] ], [ [ "X = tpca.transform(nerves_shape)\n\nplt.figure(figsize=(12, 12))\n\nfor label, col in label_to_color.items():\n mask = labels == label\n plt.scatter(X[mask, 0], X[mask, 1], color=col, s=100, label=label_to_str[label]);\nplt.legend(fontsize=14);\n \nfor label, x, y in zip(monkeys, X[:, 0], X[:, 1]):\n plt.annotate(\n label,\n xy=(x, y), xytext=(-20, 20),\n textcoords='offset points', ha='right', va='bottom',\n bbox=dict(boxstyle='round,pad=0.5', fc='white', alpha=0.5),\n arrowprops=dict(arrowstyle = '->', connectionstyle='arc3,rad=0'))\n\nplt.show()", "_____no_output_____" ] ], [ [ "The indices represent the monkeys' indices. \n\nIn contrast to the above study focusing on the optical nerves' sizes, visual inspection does not reveal any clusters between the glaucoma and normal optical nerves' shapes. We also do not see any obvious pattern between the two optical nerves of the same monkey.\n\nThis shows that the difference between the optical nerve heads mainly resides in the over sizes of the optical nerves.", "_____no_output_____" ] ], [ [ "dist_pairwise = kendall_metric.dist_pairwise(nerves_shape)\nprint(dist_pairwise .shape)", "(22, 22)\n" ], [ "plt.figure()\nplt.imshow(dist_pairwise);", "_____no_output_____" ] ], [ [ "We try a agglomerative hierarchical clustering to investigate if we can cluster in the Kendall shape space.", "_____no_output_____" ] ], [ [ "from geomstats.learning.agglomerative_hierarchical_clustering import AgglomerativeHierarchicalClustering\n\nclustering = AgglomerativeHierarchicalClustering(distance='precomputed', n_clusters=2)\nclustering.fit(dist_pairwise)\npredicted_labels = clustering.labels_\n\nprint('True labels:', labels)\nprint('Predicted labels:', predicted_labels)\n\naccuracy = gs.sum(labels==predicted_labels) / len(labels)\nprint(f'Accuracy: {accuracy:.2f}')", "True labels: [0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1]\nPredicted labels: [0 1 1 1 1 1 1 0 0 1 1 1 1 1 0 0 1 1 1 1 1 1]\nAccuracy: 0.55\n" ] ], [ [ "The accuracy is barely above the accuracy of a random classifier, that would assign 0 or 1 with probably 0.5 to each of the shapes. This confirms that the difference that exists between the two groups is mostly due to the landmarks' set size and not their shapes.", "_____no_output_____" ], [ "## References\n\n.. [PE2015] Patrangenaru and L. Ellingson. Nonparametric Statistics on Manifolds and Their Applications to Object Data, 2015. https://doi.org/10.1201/b18969", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ] ]
cb38324f121a30cd3e824c3970203cac335b19a5
126,461
ipynb
Jupyter Notebook
MNIST_PCA.ipynb
c-quilo/ROMS-tutorial
7cbfdaa349e54ef012c83f23f1af646b6f0a12f3
[ "MIT" ]
2
2021-03-23T12:44:07.000Z
2022-02-05T12:48:07.000Z
MNIST_PCA.ipynb
c-quilo/ROMS-tutorial
7cbfdaa349e54ef012c83f23f1af646b6f0a12f3
[ "MIT" ]
null
null
null
MNIST_PCA.ipynb
c-quilo/ROMS-tutorial
7cbfdaa349e54ef012c83f23f1af646b6f0a12f3
[ "MIT" ]
2
2021-03-11T12:18:38.000Z
2021-03-23T12:44:47.000Z
409.2589
58,134
0.894125
[ [ [ "<a href=\"https://colab.research.google.com/github/DL-WG/ROMS-tutorial/blob/main/MNIST_PCA.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ], [ "MNIST with PCA: example\nWednesday 3rd March 2021", "_____no_output_____" ] ], [ [ "from keras.datasets.mnist import load_data\nimport matplotlib.pyplot as plt\n!pip install eofs\nimport eofs\nfrom eofs.standard import Eof\nimport numpy as np", "Collecting eofs\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/a6/7a/90efd4238918d97c8f5870e514caa684b942870c14a65952ad7c88a9bcc5/eofs-1.4.0.tar.gz (1.0MB)\n\r\u001b[K |▎ | 10kB 15.9MB/s eta 0:00:01\r\u001b[K |▋ | 20kB 13.6MB/s eta 0:00:01\r\u001b[K |█ | 30kB 8.5MB/s eta 0:00:01\r\u001b[K |█▎ | 40kB 6.1MB/s eta 0:00:01\r\u001b[K |█▋ | 51kB 4.2MB/s eta 0:00:01\r\u001b[K |██ | 61kB 4.6MB/s eta 0:00:01\r\u001b[K |██▎ | 71kB 5.0MB/s eta 0:00:01\r\u001b[K |██▌ | 81kB 5.0MB/s eta 0:00:01\r\u001b[K |██▉ | 92kB 5.1MB/s eta 0:00:01\r\u001b[K |███▏ | 102kB 5.1MB/s eta 0:00:01\r\u001b[K |███▌ | 112kB 5.1MB/s eta 0:00:01\r\u001b[K |███▉ | 122kB 5.1MB/s eta 0:00:01\r\u001b[K |████▏ | 133kB 5.1MB/s eta 0:00:01\r\u001b[K |████▌ | 143kB 5.1MB/s eta 0:00:01\r\u001b[K |████▊ | 153kB 5.1MB/s eta 0:00:01\r\u001b[K |█████ | 163kB 5.1MB/s eta 0:00:01\r\u001b[K |█████▍ | 174kB 5.1MB/s eta 0:00:01\r\u001b[K |█████▊ | 184kB 5.1MB/s eta 0:00:01\r\u001b[K |██████ | 194kB 5.1MB/s eta 0:00:01\r\u001b[K |██████▍ | 204kB 5.1MB/s eta 0:00:01\r\u001b[K |██████▊ | 215kB 5.1MB/s eta 0:00:01\r\u001b[K |███████ | 225kB 5.1MB/s eta 0:00:01\r\u001b[K |███████▎ | 235kB 5.1MB/s eta 0:00:01\r\u001b[K |███████▋ | 245kB 5.1MB/s eta 0:00:01\r\u001b[K |████████ | 256kB 5.1MB/s eta 0:00:01\r\u001b[K |████████▎ | 266kB 5.1MB/s eta 0:00:01\r\u001b[K |████████▋ | 276kB 5.1MB/s eta 0:00:01\r\u001b[K |█████████ | 286kB 5.1MB/s eta 0:00:01\r\u001b[K |█████████▎ | 296kB 5.1MB/s eta 0:00:01\r\u001b[K |█████████▌ | 307kB 5.1MB/s eta 0:00:01\r\u001b[K |█████████▉ | 317kB 5.1MB/s eta 0:00:01\r\u001b[K |██████████▏ | 327kB 5.1MB/s eta 0:00:01\r\u001b[K |██████████▌ | 337kB 5.1MB/s eta 0:00:01\r\u001b[K |██████████▉ | 348kB 5.1MB/s eta 0:00:01\r\u001b[K |███████████▏ | 358kB 5.1MB/s eta 0:00:01\r\u001b[K |███████████▌ | 368kB 5.1MB/s eta 0:00:01\r\u001b[K |███████████▊ | 378kB 5.1MB/s eta 0:00:01\r\u001b[K |████████████ | 389kB 5.1MB/s eta 0:00:01\r\u001b[K |████████████▍ | 399kB 5.1MB/s eta 0:00:01\r\u001b[K |████████████▊ | 409kB 5.1MB/s eta 0:00:01\r\u001b[K |█████████████ | 419kB 5.1MB/s eta 0:00:01\r\u001b[K |█████████████▍ | 430kB 5.1MB/s eta 0:00:01\r\u001b[K |█████████████▊ | 440kB 5.1MB/s eta 0:00:01\r\u001b[K |██████████████ | 450kB 5.1MB/s eta 0:00:01\r\u001b[K |██████████████▎ | 460kB 5.1MB/s eta 0:00:01\r\u001b[K |██████████████▋ | 471kB 5.1MB/s eta 0:00:01\r\u001b[K |███████████████ | 481kB 5.1MB/s eta 0:00:01\r\u001b[K |███████████████▎ | 491kB 5.1MB/s eta 0:00:01\r\u001b[K |███████████████▋ | 501kB 5.1MB/s eta 0:00:01\r\u001b[K |████████████████ | 512kB 5.1MB/s eta 0:00:01\r\u001b[K |████████████████▎ | 522kB 5.1MB/s eta 0:00:01\r\u001b[K |████████████████▌ | 532kB 5.1MB/s eta 0:00:01\r\u001b[K |████████████████▉ | 542kB 5.1MB/s eta 0:00:01\r\u001b[K |█████████████████▏ | 552kB 5.1MB/s eta 0:00:01\r\u001b[K |█████████████████▌ | 563kB 5.1MB/s eta 0:00:01\r\u001b[K |█████████████████▉ | 573kB 5.1MB/s eta 0:00:01\r\u001b[K |██████████████████▏ | 583kB 5.1MB/s eta 0:00:01\r\u001b[K |██████████████████▌ | 593kB 5.1MB/s eta 0:00:01\r\u001b[K |██████████████████▊ | 604kB 5.1MB/s eta 0:00:01\r\u001b[K |███████████████████ | 614kB 5.1MB/s eta 0:00:01\r\u001b[K |███████████████████▍ | 624kB 5.1MB/s eta 0:00:01\r\u001b[K |███████████████████▊ | 634kB 5.1MB/s eta 0:00:01\r\u001b[K |████████████████████ | 645kB 5.1MB/s eta 0:00:01\r\u001b[K |████████████████████▍ | 655kB 5.1MB/s eta 0:00:01\r\u001b[K |████████████████████▊ | 665kB 5.1MB/s eta 0:00:01\r\u001b[K |█████████████████████ | 675kB 5.1MB/s eta 0:00:01\r\u001b[K |█████████████████████▎ | 686kB 5.1MB/s eta 0:00:01\r\u001b[K |█████████████████████▋ | 696kB 5.1MB/s eta 0:00:01\r\u001b[K |██████████████████████ | 706kB 5.1MB/s eta 0:00:01\r\u001b[K |██████████████████████▎ | 716kB 5.1MB/s eta 0:00:01\r\u001b[K |██████████████████████▋ | 727kB 5.1MB/s eta 0:00:01\r\u001b[K |███████████████████████ | 737kB 5.1MB/s eta 0:00:01\r\u001b[K |███████████████████████▎ | 747kB 5.1MB/s eta 0:00:01\r\u001b[K |███████████████████████▌ | 757kB 5.1MB/s eta 0:00:01\r\u001b[K |███████████████████████▉ | 768kB 5.1MB/s eta 0:00:01\r\u001b[K |████████████████████████▏ | 778kB 5.1MB/s eta 0:00:01\r\u001b[K |████████████████████████▌ | 788kB 5.1MB/s eta 0:00:01\r\u001b[K |████████████████████████▉ | 798kB 5.1MB/s eta 0:00:01\r\u001b[K |█████████████████████████▏ | 808kB 5.1MB/s eta 0:00:01\r\u001b[K |█████████████████████████▌ | 819kB 5.1MB/s eta 0:00:01\r\u001b[K |█████████████████████████▊ | 829kB 5.1MB/s eta 0:00:01\r\u001b[K |██████████████████████████ | 839kB 5.1MB/s eta 0:00:01\r\u001b[K |██████████████████████████▍ | 849kB 5.1MB/s eta 0:00:01\r\u001b[K |██████████████████████████▊ | 860kB 5.1MB/s eta 0:00:01\r\u001b[K |███████████████████████████ | 870kB 5.1MB/s eta 0:00:01\r\u001b[K |███████████████████████████▍ | 880kB 5.1MB/s eta 0:00:01\r\u001b[K |███████████████████████████▊ | 890kB 5.1MB/s eta 0:00:01\r\u001b[K |████████████████████████████ | 901kB 5.1MB/s eta 0:00:01\r\u001b[K |████████████████████████████▎ | 911kB 5.1MB/s eta 0:00:01\r\u001b[K |████████████████████████████▋ | 921kB 5.1MB/s eta 0:00:01\r\u001b[K |█████████████████████████████ | 931kB 5.1MB/s eta 0:00:01\r\u001b[K |█████████████████████████████▎ | 942kB 5.1MB/s eta 0:00:01\r\u001b[K |█████████████████████████████▋ | 952kB 5.1MB/s eta 0:00:01\r\u001b[K |██████████████████████████████ | 962kB 5.1MB/s eta 0:00:01\r\u001b[K |██████████████████████████████▏ | 972kB 5.1MB/s eta 0:00:01\r\u001b[K |██████████████████████████████▌ | 983kB 5.1MB/s eta 0:00:01\r\u001b[K |██████████████████████████████▉ | 993kB 5.1MB/s eta 0:00:01\r\u001b[K |███████████████████████████████▏| 1.0MB 5.1MB/s eta 0:00:01\r\u001b[K |███████████████████████████████▌| 1.0MB 5.1MB/s eta 0:00:01\r\u001b[K |███████████████████████████████▉| 1.0MB 5.1MB/s eta 0:00:01\r\u001b[K |████████████████████████████████| 1.0MB 5.1MB/s \n\u001b[?25hRequirement already satisfied: numpy in /usr/local/lib/python3.7/dist-packages (from eofs) (1.19.5)\nBuilding wheels for collected packages: eofs\n Building wheel for eofs (setup.py) ... \u001b[?25l\u001b[?25hdone\n Created wheel for eofs: filename=eofs-1.4.0-cp37-none-any.whl size=1100357 sha256=6dc5a7bde1bc11bac9c7ecf88919e14384db712c9b51f3fe71f58ffbcd4f4dd8\n Stored in directory: /root/.cache/pip/wheels/47/f8/64/72dfdafae89c380846799c1396957cddd04decf459242ea988\nSuccessfully built eofs\nInstalling collected packages: eofs\nSuccessfully installed eofs-1.4.0\n" ], [ "#Load data\n(Xtrain, ytrain), (Xtest, ytest) = load_data()\n# summarize the shape of the dataset\nprint('Train', Xtrain.shape, ytrain.shape)\nprint('Test', Xtest.shape, ytest.shape)", "Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/mnist.npz\n11493376/11490434 [==============================] - 0s 0us/step\nTrain (60000, 28, 28) (60000,)\nTest (10000, 28, 28) (10000,)\n" ], [ "for i in range(36):\n plt.subplot(6,6,i+1)\n plt.imshow(Xtrain[np.random.random_integers(Xtrain.shape[0])], cmap='gray_r')\n plt.axis('off')\n", "/usr/local/lib/python3.7/dist-packages/ipykernel_launcher.py:3: DeprecationWarning: This function is deprecated. Please call randint(1, 60000 + 1) instead\n This is separate from the ipykernel package so we can avoid doing imports until\n" ], [ "#Reshape data\nmodelData = np.reshape(Xtrain, (Xtrain.shape[0], Xtrain.shape[1]*Xtrain.shape[2]))\n", "_____no_output_____" ], [ "#Standardise data\nmeanData = np.mean(modelData, axis = 0)\nstdData = np.std(modelData)\nmodelDataScaled = (modelData - meanData)/stdData\n", "_____no_output_____" ], [ "#PCA analysis\n\nsolver = Eof(modelDataScaled)\n\nvarianceCumulative = np.cumsum(solver.varianceFraction())\neigenvalues = solver.eigenvalues()\npcs = solver.pcs()\neof = solver.eofs()", "_____no_output_____" ], [ "#set desired explained variance\ntargetVariance = 0.864\n\nminPCs = np.min(np.where(varianceCumulative>targetVariance))\nplt.plot(varianceCumulative)\nplt.axvline(minPCs, color='r')\nplt.axhline(targetVariance, color='r')\nprint('Number of PCs for ' + str(targetVariance*100) + '% variance is: ' + str(minPCs))", "Number of PCs for 86.4% variance is: 64\n" ], [ "pcsTruncated = pcs[:, :minPCs]\neofsTruncated= eof[:minPCs, :]\nreconTruncatedPCs = np.matmul(pcsTruncated, eofsTruncated)*stdData + meanData", "_____no_output_____" ], [ "#Reshape to full-space\nreconModelData = np.reshape(reconTruncatedPCs, (Xtrain.shape[0], Xtrain.shape[1], Xtrain.shape[2]))", "_____no_output_____" ], [ "#Reconstruction comparison\nrandomIndex = np.random.randint(0, Xtrain.shape[0], 36)\nprint(randomIndex)\nfor i in range(18):\n plt.subplot(6, 6, 2*i + 1)\n plt.imshow(Xtrain[randomIndex[i], :, :], cmap = 'gray_r')\n plt.axis('off')\n plt.subplot(6, 6, 2*i + 2)\n plt.imshow(reconModelData[randomIndex[i], :, :], cmap = 'gray_r')\n plt.axis('off')", "[ 7218 33559 47942 44522 47842 53467 1957 45710 30669 22253 33570 32165\n 59142 49781 33566 22630 28968 11795 16214 8775 41997 30194 34326 37428\n 40787 46234 5779 19652 31063 2442 13634 16131 58256 18657 9300 32562]\n" ] ] ]
[ "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb383f38fa4b666ca9d2298cf2428492391846fb
19,791
ipynb
Jupyter Notebook
g3doc/tutorials/Fairness_Indicators_Pandas_Case_Study.ipynb
rtg0795/fairness-indicators
038c976747043f722cd5a4f5fdc206ec0afe70e2
[ "Apache-2.0" ]
null
null
null
g3doc/tutorials/Fairness_Indicators_Pandas_Case_Study.ipynb
rtg0795/fairness-indicators
038c976747043f722cd5a4f5fdc206ec0afe70e2
[ "Apache-2.0" ]
null
null
null
g3doc/tutorials/Fairness_Indicators_Pandas_Case_Study.ipynb
rtg0795/fairness-indicators
038c976747043f722cd5a4f5fdc206ec0afe70e2
[ "Apache-2.0" ]
null
null
null
42.288462
703
0.592845
[ [ [ "##### Copyright 2020 The TensorFlow Authors.", "_____no_output_____" ] ], [ [ "#@title Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.", "_____no_output_____" ] ], [ [ "# Pandas DataFrame to Fairness Indicators Case Study\n", "_____no_output_____" ], [ "<table class=\"tfo-notebook-buttons\" align=\"left\">\n <td>\n <a target=\"_blank\" href=\"https://www.tensorflow.org/responsible_ai/fairness_indicators/tutorials/Fairness_Indicators_Pandas_Case_Study\"><img src=\"https://www.tensorflow.org/images/tf_logo_32px.png\" />View on TensorFlow.org</a>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://colab.research.google.com/github/tensorflow/fairness-indicators/blob/master/g3doc/tutorials/Fairness_Indicators_Pandas_Case_Study.ipynb\"><img src=\"https://www.tensorflow.org/images/colab_logo_32px.png\" />Run in Google Colab</a>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://github.com/tensorflow/fairness-indicators/tree/master/g3doc/tutorials/Fairness_Indicators_Pandas_Case_Study.ipynb\"><img src=\"https://www.tensorflow.org/images/GitHub-Mark-32px.png\" />View on GitHub</a>\n </td>\n <td>\n <a href=\"https://storage.googleapis.com/tensorflow_docs/fairness-indicators/g3doc/tutorials/Fairness_Indicators_Pandas_Case_Study.ipynb\"><img src=\"https://www.tensorflow.org/images/download_logo_32px.png\" />Download notebook</a>\n </td>\n</table>", "_____no_output_____" ], [ "## Case Study Overview\nIn this case study we will apply [TensorFlow Model Analysis](https://www.tensorflow.org/tfx/model_analysis/get_started) and [Fairness Indicators](https://www.tensorflow.org/tfx/guide/fairness_indicators) to evaluate data stored as a Pandas DataFrame, where each row contains ground truth labels, various features, and a model prediction. We will show how this workflow can be used to spot potential fairness concerns, independent of the framework one used to construct and train the model. As in this case study, we can analyze the results from any machine learning framework (e.g. TensorFlow, JAX, etc) once they are converted to a Pandas DataFrame.\n \nFor this exercise, we will leverage the Deep Neural Network (DNN) model that was developed in the [Shape Constraints for Ethics with Tensorflow Lattice](https://colab.research.google.com/github/tensorflow/lattice/blob/master/docs/tutorials/shape_constraints_for_ethics.ipynb#scrollTo=uc0VwsT5nvQi) case study using the Law School Admissions dataset from the Law School Admissions Council (LSAC). This classifier attempts to predict whether or not a student will pass the bar, based on their Law School Admission Test (LSAT) score and undergraduate GPA.\n\n## LSAC Dataset\nThe dataset used within this case study was originally collected for a study called '[LSAC National Longitudinal Bar Passage Study. LSAC Research Report Series](https://eric.ed.gov/?id=ED469370)' by Linda Wightman in 1998. The dataset is currently hosted [here](http://www.seaphe.org/databases.php).\n\n* **dnn_bar_pass_prediction**: The LSAT prediction from the DNN model.\n* **gender**: Gender of the student.\n* **lsat**: LSAT score received by the student.\n* **pass_bar**: Ground truth label indicating whether or not the student eventually passed the bar.\n* **race**: Race of the student.\n* **ugpa**: A student's undergraduate GPA.\n", "_____no_output_____" ] ], [ [ "!pip install -q -U pip==20.2\n\n!pip install -q -U \\\n tensorflow-model-analysis==0.39.0 \\\n tensorflow-data-validation==1.8.0 \\\n tfx-bsl==1.8.0", "_____no_output_____" ] ], [ [ "## Importing required packages:", "_____no_output_____" ] ], [ [ "import os\nimport tempfile\nimport pandas as pd\nimport six.moves.urllib as urllib\nimport pprint\n\nimport tensorflow_model_analysis as tfma\nfrom google.protobuf import text_format\n\nimport tensorflow as tf\ntf.compat.v1.enable_v2_behavior()", "_____no_output_____" ] ], [ [ "## Download the data and explore the initial dataset.", "_____no_output_____" ] ], [ [ "# Download the LSAT dataset and setup the required filepaths.\n_DATA_ROOT = tempfile.mkdtemp(prefix='lsat-data')\n_DATA_PATH = 'https://storage.googleapis.com/lawschool_dataset/bar_pass_prediction.csv'\n_DATA_FILEPATH = os.path.join(_DATA_ROOT, 'bar_pass_prediction.csv')\n\ndata = urllib.request.urlopen(_DATA_PATH)\n\n_LSAT_DF = pd.read_csv(data)\n\n# To simpliy the case study, we will only use the columns that will be used for\n# our model.\n_COLUMN_NAMES = [\n 'dnn_bar_pass_prediction',\n 'gender',\n 'lsat',\n 'pass_bar',\n 'race1',\n 'ugpa',\n]\n\n_LSAT_DF.dropna()\n_LSAT_DF['gender'] = _LSAT_DF['gender'].astype(str)\n_LSAT_DF['race1'] = _LSAT_DF['race1'].astype(str)\n_LSAT_DF = _LSAT_DF[_COLUMN_NAMES]\n\n_LSAT_DF.head()", "_____no_output_____" ] ], [ [ "## Configure Fairness Indicators.\nThere are several parameters that you’ll need to take into account when using Fairness Indicators with a DataFrame \n\n* Your input DataFrame must contain a prediction column and label column from your model. By default Fairness Indicators will look for a prediction column called `prediction` and a label column called `label` within your DataFrame.\n * If either of these values are not found a KeyError will be raised.\n\n* In addition to a DataFrame, you’ll also need to include an `eval_config` that should include the metrics to compute, slices to compute the metrics on, and the column names for example labels and predictions. \n * `metrics_specs` will set the metrics to compute. The `FairnessIndicators` metric will be required to render the fairness metrics and you can see a list of additional optional metrics [here](https://www.tensorflow.org/tfx/model_analysis/metrics).\n\n * `slicing_specs` is an optional slicing parameter to specify what feature you’re interested in investigating. Within this case study race1 is used, however you can also set this value to another feature (for example gender in the context of this DataFrame). If `slicing_specs` is not provided all features will be included.\n * If your DataFrame includes a label or prediction column that is different from the default `prediction` or `label`, you can configure the `label_key` and `prediction_key` to a new value.\n\n* If `output_path` is not specified a temporary directory will be created.", "_____no_output_____" ] ], [ [ "# Specify Fairness Indicators in eval_config.\neval_config = text_format.Parse(\"\"\"\n model_specs {\n prediction_key: 'dnn_bar_pass_prediction',\n label_key: 'pass_bar'\n }\n metrics_specs {\n metrics {class_name: \"AUC\"}\n metrics {\n class_name: \"FairnessIndicators\"\n config: '{\"thresholds\": [0.50, 0.90]}'\n }\n }\n slicing_specs {\n feature_keys: 'race1'\n }\n slicing_specs {}\n \"\"\", tfma.EvalConfig())\n\n# Run TensorFlow Model Analysis.\neval_result = tfma.analyze_raw_data(\n data=_LSAT_DF,\n eval_config=eval_config,\n output_path=_DATA_ROOT)", "_____no_output_____" ] ], [ [ "## Explore model performance with Fairness Indicators.\n\nAfter running Fairness Indicators, we can visualize different metrics that we selected to analyze our models performance. Within this case study we’ve included Fairness Indicators and arbitrarily picked AUC.\n\nWhen we first look at the overall AUC for each race slice we can see a slight discrepancy in model performance, but nothing that is arguably alarming.\n\n* **Asian**: 0.58\n* **Black**: 0.58\n* **Hispanic**: 0.58\n* **Other**: 0.64\n* **White**: 0.6\n\nHowever, when we look at the false negative rates split by race, our model again incorrectly predicts the likelihood of a user passing the bar at different rates and, this time, does so by a lot. \n\n* **Asian**: 0.01\n* **Black**: 0.05\n* **Hispanic**: 0.02\n* **Other**: 0.01\n* **White**: 0.01\n\nMost notably the difference between Black and White students is about 380%, meaning that our model is nearly 4x more likely to incorrectly predict that a black student will not pass the bar, than a whilte student. If we were to continue with this effort, a practitioner could use these results as a signal that they should spend more time ensuring that their model works well for people from all backgrounds.", "_____no_output_____" ] ], [ [ "# Render Fairness Indicators.\ntfma.addons.fairness.view.widget_view.render_fairness_indicator(eval_result)", "_____no_output_____" ] ], [ [ "# tfma.EvalResult", "_____no_output_____" ], [ "The [`eval_result`](https://www.tensorflow.org/tfx/model_analysis/api_docs/python/tfma/EvalResult) object, rendered above in `render_fairness_indicator()`, has its own API that can be used to read TFMA results into your programs.", "_____no_output_____" ], [ "## [`get_slice_names()`](https://www.tensorflow.org/tfx/model_analysis/api_docs/python/tfma/EvalResult#get_slice_names) and [`get_metric_names()`](https://www.tensorflow.org/tfx/model_analysis/api_docs/python/tfma/EvalResult#get_metric_names)", "_____no_output_____" ], [ "To get the evaluated slices and metrics, you can use the respective functions.", "_____no_output_____" ] ], [ [ "pp = pprint.PrettyPrinter()\n\nprint(\"Slices:\")\npp.pprint(eval_result.get_slice_names())\nprint(\"\\nMetrics:\")\npp.pprint(eval_result.get_metric_names())", "_____no_output_____" ] ], [ [ "## [`get_metrics_for_slice()`](https://www.tensorflow.org/tfx/model_analysis/api_docs/python/tfma/EvalResult#get_metrics_for_slice) and [`get_metrics_for_all_slices()`](https://www.tensorflow.org/tfx/model_analysis/api_docs/python/tfma/EvalResult#get_metrics_for_all_slices)", "_____no_output_____" ], [ "If you want to get the metrics for a particular slice, you can use `get_metrics_for_slice()`. It returns a dictionary mapping metric names to [metric values](https://github.com/tensorflow/model-analysis/blob/cdb6790dcd7a37c82afb493859b3ef4898963fee/tensorflow_model_analysis/proto/metrics_for_slice.proto#L194).", "_____no_output_____" ] ], [ [ "baseline_slice = ()\nblack_slice = (('race1', 'black'),)\n\nprint(\"Baseline metric values:\")\npp.pprint(eval_result.get_metrics_for_slice(baseline_slice))\nprint(\"Black metric values:\")\npp.pprint(eval_result.get_metrics_for_slice(black_slice))", "_____no_output_____" ] ], [ [ "If you want to get the metrics for all slices, `get_metrics_for_all_slices()` returns a dictionary mapping each slice to the corresponding `get_metrics_for_slices(slice)`.", "_____no_output_____" ] ], [ [ "pp.pprint(eval_result.get_metrics_for_all_slices())", "_____no_output_____" ] ], [ [ "## Conclusion\nWithin this case study we imported a dataset into a Pandas DataFrame that we then analyzed with Fairness Indicators. Understanding the results of your model and underlying data is an important step in ensuring your model doesn't reflect harmful bias. In the context of this case study we examined the the LSAC dataset and how predictions from this data could be impacted by a students race. The concept of “what is unfair and what is fair have been introduced in multiple disciplines for well over 50 years, including in education, hiring, and machine learning.”<sup>1</sup> Fairness Indicator is a tool to help mitigate fairness concerns in your machine learning model.\n\nFor more information on using Fairness Indicators and resources to learn more about fairness concerns see [here](https://www.tensorflow.org/responsible_ai/fairness_indicators/guide).\n\n---\n\n1. Hutchinson, B., Mitchell, M. (2018). 50 Years of Test (Un)fairness: Lessons for Machine Learning. https://arxiv.org/abs/1811.10104\n", "_____no_output_____" ], [ "## Appendix\n\nBelow are a few functions to help convert ML models to Pandas DataFrame.\n", "_____no_output_____" ] ], [ [ "# TensorFlow Estimator to Pandas DataFrame:\n\n# _X_VALUE = # X value of binary estimator.\n# _Y_VALUE = # Y value of binary estimator.\n# _GROUND_TRUTH_LABEL = # Ground truth value of binary estimator.\n\ndef _get_predicted_probabilities(estimator, input_df, get_input_fn):\n predictions = estimator.predict(\n input_fn=get_input_fn(input_df=input_df, num_epochs=1))\n return [prediction['probabilities'][1] for prediction in predictions]\n\ndef _get_input_fn_law(input_df, num_epochs, batch_size=None):\n return tf.compat.v1.estimator.inputs.pandas_input_fn(\n x=input_df[[_X_VALUE, _Y_VALUE]],\n y=input_df[_GROUND_TRUTH_LABEL],\n num_epochs=num_epochs,\n batch_size=batch_size or len(input_df),\n shuffle=False)\n\ndef estimator_to_dataframe(estimator, input_df, num_keypoints=20):\n x = np.linspace(min(input_df[_X_VALUE]), max(input_df[_X_VALUE]), num_keypoints)\n y = np.linspace(min(input_df[_Y_VALUE]), max(input_df[_Y_VALUE]), num_keypoints)\n\n x_grid, y_grid = np.meshgrid(x, y)\n\n positions = np.vstack([x_grid.ravel(), y_grid.ravel()])\n plot_df = pd.DataFrame(positions.T, columns=[_X_VALUE, _Y_VALUE])\n plot_df[_GROUND_TRUTH_LABEL] = np.ones(len(plot_df))\n predictions = _get_predicted_probabilities(\n estimator=estimator, input_df=plot_df, get_input_fn=_get_input_fn_law)\n return pd.DataFrame(\n data=np.array(np.reshape(predictions, x_grid.shape)).flatten())", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ] ]
cb384658547418b47829268f52121356fb1bf76f
4,576
ipynb
Jupyter Notebook
07ClassificationPerformanceMeasures/02F1-Score.ipynb
violet-Bin/MachineLearning
886af9fb22442cdc0d684e7a19132410ccb92572
[ "Apache-2.0" ]
1
2019-04-10T12:46:05.000Z
2019-04-10T12:46:05.000Z
07ClassificationPerformanceMeasures/02F1-Score.ipynb
violet-Bin/MachineLearning
886af9fb22442cdc0d684e7a19132410ccb92572
[ "Apache-2.0" ]
null
null
null
07ClassificationPerformanceMeasures/02F1-Score.ipynb
violet-Bin/MachineLearning
886af9fb22442cdc0d684e7a19132410ccb92572
[ "Apache-2.0" ]
null
null
null
18.451613
81
0.490603
[ [ [ "# F1-Score", "_____no_output_____" ] ], [ [ "import numpy as np", "_____no_output_____" ], [ "def f1_score(precision, recall):\n try:\n return 2 * precision * recall / (precision + recall)\n except:\n return 0.0", "_____no_output_____" ], [ "precision = 0.5\nrecall = 0.5\nf1_score(precision, recall)", "_____no_output_____" ], [ "precision = 0.1\nrecall = 0.9\nf1_score(precision, recall)", "_____no_output_____" ], [ "from sklearn import datasets\n\ndigits = datasets.load_digits()\nX = digits.data\ny = digits.target.copy()\n\ny[digits.target==9] = 1\ny[digits.target!=9] = 0", "_____no_output_____" ], [ "from sklearn.model_selection import train_test_split\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, random_state=666)", "_____no_output_____" ], [ "from sklearn.linear_model import LogisticRegression\n\nlog_reg = LogisticRegression()\nlog_reg.fit(X_train, y_train)\nlog_reg.score(X_test, y_test)", "_____no_output_____" ], [ "y_predict = log_reg.predict(X_test)", "_____no_output_____" ], [ "from sklearn.metrics import confusion_matrix\n\nconfusion_matrix(y_test, y_predict)", "_____no_output_____" ], [ "from sklearn.metrics import precision_score\n\nprecision_score(y_test, y_predict)", "_____no_output_____" ], [ "from sklearn.metrics import recall_score\n\nrecall_score(y_test, y_predict)", "_____no_output_____" ], [ "from sklearn.metrics import f1_score\n\nf1_score(y_test, y_predict)", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb385bcb5fb0e6de40e05548266b2bfd3ea7ed46
29,970
ipynb
Jupyter Notebook
PY0101EN-1-2-Strings.ipynb
R-Unix/Python-For-Datascience
6d60334c5d67a0864da0da7f22f80c793dd66bb1
[ "MIT" ]
null
null
null
PY0101EN-1-2-Strings.ipynb
R-Unix/Python-For-Datascience
6d60334c5d67a0864da0da7f22f80c793dd66bb1
[ "MIT" ]
null
null
null
PY0101EN-1-2-Strings.ipynb
R-Unix/Python-For-Datascience
6d60334c5d67a0864da0da7f22f80c793dd66bb1
[ "MIT" ]
null
null
null
22.249443
716
0.529897
[ [ [ "<a href=\"https://cognitiveclass.ai/\">\n <img src=\"https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/Ad/CCLog.png\" width=\"200\" align=\"center\">\n</a>", "_____no_output_____" ], [ "<h1>String Operations</h1>", "_____no_output_____" ], [ "<p><strong>Welcome!</strong> This notebook will teach you about the string operations in the Python Programming Language. By the end of this notebook, you'll know the basics string operations in Python, including indexing, escape sequences and operations.</p> ", "_____no_output_____" ], [ "<div class=\"alert alert-block alert-info\" style=\"margin-top: 20px\">\n <a href=\"https://cocl.us/topNotebooksPython101Coursera\">\n <img src=\"https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/Ad/TopAd.png\" width=\"750\" align=\"center\">\n </a>\n</div>", "_____no_output_____" ], [ "<h2>Table of Contents</h2>\n<div class=\"alert alert-block alert-info\" style=\"margin-top: 20px\">\n <ul>\n <li>\n <a href=\"#strings\">What are Strings?</a>\n </li>\n <li>\n <a href=\"#index\">Indexing</a>\n <ul>\n <li><a href=\"neg\">Negative Indexing</a></li>\n <li><a href=\"slice\">Slicing</a></li>\n <li><a href=\"stride\">Stride</a></li>\n <li><a href=\"concat\">Concatenate Strings</a></li>\n </ul>\n </li>\n <li>\n <a href=\"#escape\">Escape Sequences</a>\n </li>\n <li>\n <a href=\"#operations\">String Operations</a>\n </li>\n <li>\n <a href=\"#quiz\">Quiz on Strings</a>\n </li>\n </ul>\n <p>\n Estimated time needed: <strong>15 min</strong>\n </p>\n</div>\n\n<hr>", "_____no_output_____" ], [ "<h2 id=\"strings\">What are Strings?</h2>", "_____no_output_____" ], [ "The following example shows a string contained within 2 quotation marks:", "_____no_output_____" ] ], [ [ "# Use quotation marks for defining string\n\n\"Michael Jackson\"", "_____no_output_____" ] ], [ [ "We can also use single quotation marks:", "_____no_output_____" ] ], [ [ "# Use single quotation marks for defining string\n\n'Michael Jackson'", "_____no_output_____" ] ], [ [ "A string can be a combination of spaces and digits: ", "_____no_output_____" ] ], [ [ "# Digitals and spaces in string\n\n'1 2 3 4 5 6 '", "_____no_output_____" ] ], [ [ "A string can also be a combination of special characters : ", "_____no_output_____" ] ], [ [ "# Special characters in string\n\n'@#2_#]&*^%$'", "_____no_output_____" ] ], [ [ "We can print our string using the print statement:", "_____no_output_____" ] ], [ [ "# Print the string\n\nprint(\"hello!\")", "_____no_output_____" ] ], [ [ "We can bind or assign a string to another variable:\n", "_____no_output_____" ] ], [ [ "# Assign string to variable\n\nName = \"Michael Jack\"\nName ", "_____no_output_____" ] ], [ [ "<hr>", "_____no_output_____" ], [ "<h2 id=\"index\">Indexing</h2>", "_____no_output_____" ], [ "It is helpful to think of a string as an ordered sequence. Each element in the sequence can be accessed using an index represented by the array of numbers: ", "_____no_output_____" ], [ "<img src=\"https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/Chapter%201/Images/StringsIndex.png\" width=\"600\" align=\"center\" />", "_____no_output_____" ], [ " The first index can be accessed as follows:", "_____no_output_____" ], [ "<hr/>\n<div class=\"alert alert-success alertsuccess\" style=\"margin-top: 20px\">\n[Tip]: Because indexing starts at 0, it means the first index is on the index 0.\n</div>\n<hr/>", "_____no_output_____" ] ], [ [ "# Print the first element in the string\n\nprint(Name[0])", "M\n" ] ], [ [ " We can access index 6:", "_____no_output_____" ] ], [ [ "# Print the element on index 6 in the string\n\nprint(Name[6])", "_____no_output_____" ] ], [ [ "Moreover, we can access the 13th index:", "_____no_output_____" ] ], [ [ "# Print the element on the 13th index in the string\n\nprint(Name[13])", "_____no_output_____" ] ], [ [ "<h3 id=\"neg\">Negative Indexing</h3>", "_____no_output_____" ], [ " We can also use negative indexing with strings:", "_____no_output_____" ], [ "<img src=\"https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/Chapter%201/Images/StringsNeg.png\" width=\"600\" align=\"center\" />", "_____no_output_____" ], [ "Negative index can help us to count the element from the end of the string.", "_____no_output_____" ], [ "The last element is given by the index -1: ", "_____no_output_____" ] ], [ [ "# Print the last element in the string\n\nprint(Name[-1])", "k\n" ] ], [ [ " The first element can be obtained by index -15:", "_____no_output_____" ] ], [ [ "# Print the first element in the string\n\nprint(Name[-15])", "_____no_output_____" ] ], [ [ "We can find the number of characters in a string by using <code>len</code>, short for length:", "_____no_output_____" ] ], [ [ "# Find the length of string\n\nlen(\"Michael Jackson\")", "_____no_output_____" ] ], [ [ "<h3 id=\"slice\">Slicing</h3>", "_____no_output_____" ], [ "We can obtain multiple characters from a string using slicing, we can obtain the 0 to 4th and 8th to the 12th element: ", "_____no_output_____" ], [ "<img src=\"https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/Chapter%201/Images/StringsSlice.png\" width=\"600\" align=\"center\" />", "_____no_output_____" ], [ "<hr/>\n<div class=\"alert alert-success alertsuccess\" style=\"margin-top: 20px\">\n[Tip]: When taking the slice, the first number means the index (start at 0), and the second number means the length from the index to the last element you want (start at 1)\n</div>\n<hr/>", "_____no_output_____" ] ], [ [ "# Take the slice on variable Name with only index 0 to index 3\n\nName[0:4] and Name [8:13]", "_____no_output_____" ], [ "# Take the slice on variable Name with only index 8 to index 11\n\nName[8:12]", "_____no_output_____" ] ], [ [ "<h3 id=\"stride\">Stride</h3>", "_____no_output_____" ], [ " We can also input a stride value as follows, with the '2' indicating that we are selecting every second variable:", "_____no_output_____" ], [ "<img src=\"https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/Chapter%201/Images/StringsStride.png\" width=\"600\" align=\"center\" />", "_____no_output_____" ] ], [ [ "# Get every second element. The elments on index 1, 3, 5 ...\n\nName[::2]", "_____no_output_____" ] ], [ [ "We can also incorporate slicing with the stride. In this case, we select the first five elements and then use the stride: ", "_____no_output_____" ] ], [ [ "# Get every second element in the range from index 0 to index 4\n\nName[0:5:2]", "_____no_output_____" ] ], [ [ "<h3 id=\"concat\">Concatenate Strings</h3>", "_____no_output_____" ], [ "We can concatenate or combine strings by using the addition symbols, and the result is a new string that is a combination of both:\n", "_____no_output_____" ] ], [ [ "# Concatenate two strings\n\nStatement = Name + \"is the best\"\nStatement", "_____no_output_____" ] ], [ [ "To replicate values of a string we simply multiply the string by the number of times we would like to replicate it. In this case, the number is three. The result is a new string, and this new string consists of three copies of the original string:", "_____no_output_____" ] ], [ [ "# Print the string for 3 times\n\n3 * \"Michael Jackson\"", "_____no_output_____" ] ], [ [ "You can create a new string by setting it to the original variable. Concatenated with a new string, the result is a new string that changes from Michael Jackson to “Michael Jackson is the best\".\n", "_____no_output_____" ] ], [ [ "# Concatenate strings\n\nName = \"Michael Jackson\"\nName = Name + \" is the best\"\nName", "_____no_output_____" ] ], [ [ "<hr>", "_____no_output_____" ], [ "<h2 id=\"escape\">Escape Sequences</h2>", "_____no_output_____" ], [ "Back slashes represent the beginning of escape sequences. Escape sequences represent strings that may be difficult to input. For example, back slash \"n\" represents a new line. The output is given by a new line after the back slash \"n\" is encountered:", "_____no_output_____" ] ], [ [ "# New line escape sequence\n\nprint(\" Michael Jackson \\n is the best\" )", "_____no_output_____" ] ], [ [ "Similarly, back slash \"t\" represents a tab: ", "_____no_output_____" ] ], [ [ "# Tab escape sequence\n\nprint(\" Michael Jackson \\t is the best\" )", "_____no_output_____" ] ], [ [ " If you want to place a back slash in your string, use a double back slash:", "_____no_output_____" ] ], [ [ "# Include back slash in string\n\nprint(\" Michael Jackson \\\\ is the best\" )", "_____no_output_____" ] ], [ [ " We can also place an \"r\" before the string to display the backslash:", "_____no_output_____" ] ], [ [ "# r will tell python that string will be display as raw string\n\nprint(r\" Michael Jackson \\ is the best\" )", "_____no_output_____" ] ], [ [ "<hr>", "_____no_output_____" ], [ "<h2 id=\"operations\">String Operations</h2>", "_____no_output_____" ], [ "There are many string operation methods in Python that can be used to manipulate the data. We are going to use some basic string operations on the data. ", "_____no_output_____" ], [ "Let's try with the method <code>upper</code>; this method converts lower case characters to upper case characters:", "_____no_output_____" ] ], [ [ "# Convert all the characters in string to upper case\n\nA = \"Thriller is the sixth studio album\"\nprint(\"before upper:\", A)\nB = A.upper()\nprint(\"After upper:\", B)", "_____no_output_____" ] ], [ [ "The method <code>replace</code> replaces a segment of the string, i.e. a substring with a new string. We input the part of the string we would like to change. The second argument is what we would like to exchange the segment with, and the result is a new string with the segment changed: \n", "_____no_output_____" ] ], [ [ "# Replace the old substring with the new target substring is the segment has been found in the string\n\nA = \"Michael Jackson is the best\"\nB = A.replace('Michael', 'Janet')\nB", "_____no_output_____" ] ], [ [ "The method <code>find</code> finds a sub-string. The argument is the substring you would like to find, and the output is the first index of the sequence. We can find the sub-string <code>jack</code> or <code>el<code>. ", "_____no_output_____" ], [ "<img src=\"https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/Chapter%201/Images/StringsFind.png\" width=\"600\" align=\"center\" />", "_____no_output_____" ] ], [ [ "# Find the substring in the string. Only the index of the first elment of substring in string will be the output\n\nName = \"Michael Jackson\"\nName.find('el')", "_____no_output_____" ], [ "# Find the substring in the string.\n\nName.find('Jack')", "_____no_output_____" ] ], [ [ "If the sub-string is not in the string then the output is a negative one. For example, the string 'Jasdfasdasdf' is not a substring:", "_____no_output_____" ] ], [ [ "# If cannot find the substring in the string\n\nName.find('Jasdfasdasdf')", "_____no_output_____" ] ], [ [ "<hr>", "_____no_output_____" ], [ "<h2 id=\"quiz\">Quiz on Strings</h2>", "_____no_output_____" ], [ "What is the value of the variable <code>A</code> after the following code is executed? ", "_____no_output_____" ] ], [ [ "# Write your code below and press Shift+Enter to execute \n\nA = \"1\"", "_____no_output_____" ] ], [ [ "Double-click <b>here</b> for the solution.\n\n<!-- Your answer is below:\n\"1\"\n-->", "_____no_output_____" ], [ "What is the value of the variable <code>B</code> after the following code is executed?", "_____no_output_____" ] ], [ [ "# Write your code below and press Shift+Enter to execute\n\nB = \"2\"", "_____no_output_____" ] ], [ [ "Double-click <b>here</b> for the solution.\n\n<!-- Your answer is below:\n\"2\"\n-->", "_____no_output_____" ], [ "What is the value of the variable <code>C</code> after the following code is executed?", "_____no_output_____" ] ], [ [ "# Write your code below and press Shift+Enter to execute \n\nC = A + B", "_____no_output_____" ] ], [ [ "Double-click <b>here</b> for the solution.\n\n<!-- Your answer is below:\n\"12\"\n-->", "_____no_output_____" ], [ "<hr>", "_____no_output_____" ], [ "Consider the variable <code>D</code> use slicing to print out the first three elements:", "_____no_output_____" ] ], [ [ "# Write your code below and press Shift+Enter to execute\n\nD = \"ABCDEFG\"", "_____no_output_____" ] ], [ [ "Double-click <b>here</b> for the solution.\n\n<!-- Your answer is below:\nprint(D[:3]) \n# or \nprint(D[0:3])\n-->", "_____no_output_____" ], [ "<hr>", "_____no_output_____" ], [ "Use a stride value of 2 to print out every second character of the string <code>E</code>: ", "_____no_output_____" ] ], [ [ "# Write your code below and press Shift+Enter to execute\n\nE = 'clocrkr1e1c1t'", "_____no_output_____" ] ], [ [ "Double-click <b>here</b> for the solution.\n\n<!-- Your answer is below:\nprint(E[::2])\n-->", "_____no_output_____" ], [ "<hr>", "_____no_output_____" ], [ "Print out a backslash:", "_____no_output_____" ] ], [ [ "# Write your code below and press Shift+Enter to execute", "_____no_output_____" ] ], [ [ "Double-click <b>here</b> for the solution.\n<!-- Your answer is below:\nprint(\"\\\\\")\nor\nprint(r\" \\ \")\n-->", "_____no_output_____" ], [ "<hr>", "_____no_output_____" ], [ "Convert the variable <code>F</code> to uppercase:", "_____no_output_____" ] ], [ [ "# Write your code below and press Shift+Enter to execute\n\nF = \"You are wrong\"", "_____no_output_____" ] ], [ [ "Double-click <b>here</b> for the solution.\n\n<!-- Your answer is below:\nF.upper()\n-->", "_____no_output_____" ], [ "<hr>", "_____no_output_____" ], [ "Consider the variable <code>G</code>, and find the first index of the sub-string <code>snow</code>:", "_____no_output_____" ] ], [ [ "# Write your code below and press Shift+Enter to execute\n\nG = \"Mary had a little lamb Little lamb, little lamb Mary had a little lamb \\\nIts fleece was white as snow And everywhere that Mary went Mary went, Mary went \\\nEverywhere that Mary went The lamb was sure to go\"", "_____no_output_____" ] ], [ [ "Double-click __here__ for the solution.\n\n<!-- Your answer is below:\nG.find(\"snow\")\n-->", "_____no_output_____" ], [ "In the variable <code>G</code>, replace the sub-string <code>Mary</code> with <code>Bob</code>:", "_____no_output_____" ] ], [ [ "# Write your code below and press Shift+Enter to execute", "_____no_output_____" ] ], [ [ "Double-click __here__ for the solution.\n\n<!-- Your answer is below:\nG.replace(\"Mary\", \"Bob\")\n-->", "_____no_output_____" ], [ "<hr>\n<h2>The last exercise!</h2>\n<p>Congratulations, you have completed your first lesson and hands-on lab in Python. However, there is one more thing you need to do. The Data Science community encourages sharing work. The best way to share and showcase your work is to share it on GitHub. By sharing your notebook on GitHub you are not only building your reputation with fellow data scientists, but you can also show it off when applying for a job. Even though this was your first piece of work, it is never too early to start building good habits. So, please read and follow <a href=\"https://cognitiveclass.ai/blog/data-scientists-stand-out-by-sharing-your-notebooks/\" target=\"_blank\">this article</a> to learn how to share your work.\n<hr>", "_____no_output_____" ], [ "<div class=\"alert alert-block alert-info\" style=\"margin-top: 20px\">\n<h2>Get IBM Watson Studio free of charge!</h2>\n <p><a href=\"https://cocl.us/bottemNotebooksPython101Coursera\"><img src=\"https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/Ad/BottomAd.png\" width=\"750\" align=\"center\"></a></p>\n</div>", "_____no_output_____" ], [ "<h3>About the Authors:</h3> \n<p><a href=\"https://www.linkedin.com/in/joseph-s-50398b136/\" target=\"_blank\">Joseph Santarcangelo</a> is a Data Scientist at IBM, and holds a PhD in Electrical Engineering. His research focused on using Machine Learning, Signal Processing, and Computer Vision to determine how videos impact human cognition. Joseph has been working for IBM since he completed his PhD.</p>", "_____no_output_____" ], [ "Other contributors: <a href=\"www.linkedin.com/in/jiahui-mavis-zhou-a4537814a\">Mavis Zhou</a>", "_____no_output_____" ], [ "<hr>\n<p>Copyright &copy; 2018 IBM Developer Skills Network. This notebook and its source code are released under the terms of the <a href=\"https://cognitiveclass.ai/mit-license/\">MIT License</a>.</p>", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ] ]
cb385fb2a4833d31cbd0142d7089a492f0b71e8d
89,898
ipynb
Jupyter Notebook
NLP Data Prep.ipynb
fakecoinbase/sweetpandslashAlgorithms
9641e31320f17c6393b7746312c4b030a7faf015
[ "MIT" ]
3
2021-04-21T07:11:33.000Z
2022-01-09T00:05:55.000Z
NLP Data Prep.ipynb
sweetpand/Algorithms
2e4dcf2d42de25531fae5b4ec0d96ce100043117
[ "MIT" ]
null
null
null
NLP Data Prep.ipynb
sweetpand/Algorithms
2e4dcf2d42de25531fae5b4ec0d96ce100043117
[ "MIT" ]
null
null
null
96.353698
5,978
0.701061
[ [ [ "from fastai.text import *", "_____no_output_____" ], [ "from fastai.tabular import *", "_____no_output_____" ], [ "path = Path('')", "_____no_output_____" ], [ "data = pd.read_csv('good_small_dataset.csv', engine='python')", "_____no_output_____" ], [ "data.head()", "_____no_output_____" ], [ "df = data.dropna()", "_____no_output_____" ], [ "df.to_csv('good_small_dataset_drop_missing.csv')", "_____no_output_____" ], [ "data_lm = TextLMDataBunch.from_csv(path, 'good_small_dataset_drop_missing.csv', text_cols = 'content', label_cols = 'type')\ndata_lm.save('data_lm_export.pkl')", "_____no_output_____" ], [ "data_clas = TextClasDataBunch.from_csv(path, 'good_small_dataset_drop_missing.csv', vocab=data_lm.train_ds.vocab, text_cols = 'content', label_cols = 'type',bs=16)\ndata_clas.save('data_clas_export.pkl')", "_____no_output_____" ], [ "from fastai.text import *", "_____no_output_____" ], [ "data_lm = load_data('NLP/', 'data_lm_export.pkl')", "_____no_output_____" ], [ "data_clas = load_data('', 'data_clas_export.pkl')", "_____no_output_____" ], [ "learn = language_model_learner(data_lm, AWD_LSTM, drop_mult=0.5)", "_____no_output_____" ], [ "learn.save('initial')", "_____no_output_____" ], [ "learn.fit_one_cycle(1, 1e-2)", "_____no_output_____" ], [ "learn.save('initial')", "_____no_output_____" ], [ "learn.unfreeze()", "_____no_output_____" ], [ "learn.fit_one_cycle(1, 1e-3)", "_____no_output_____" ], [ "learn.save_encoder('ft_enc')", "_____no_output_____" ], [ "learn.save('ft_encoder_model')", "_____no_output_____" ], [ "learn.predict(\"The President today spoke on\", n_words=10)", "_____no_output_____" ], [ "learn.predict(\"Kim Kardashian released a new photo depicting her doing\", n_words=6)", "_____no_output_____" ], [ "learn.predict(\"World War Three has begun between\", n_words=10)", "_____no_output_____" ], [ "learn = text_classifier_learner(data_clas, AWD_LSTM, drop_mult=0.5);", "_____no_output_____" ], [ "learn.load_encoder('ft_enc')\nlearn.load('good_model_epoc_2');", "_____no_output_____" ], [ "learn.summary()", "_____no_output_____" ], [ "data_clas.show_batch()", "_____no_output_____" ], [ "learn.fit_one_cycle(1, 1e-2)\nlearn.save('good_model')", "_____no_output_____" ], [ "learn.freeze_to(-2)\nlearn.fit_one_cycle(1, slice(5e-3/2., 5e-3))\nlearn.save('good_model_epoc_2')", "_____no_output_____" ], [ "learn.unfreeze()\nlearn.fit_one_cycle(1, slice(2e-3/100, 2e-3))\nlearn.save('good_model_epoc_3')", "_____no_output_____" ], [ "# BBC\nlearn.predict(\"Israeli PM Benjamin Netanyahu has said he will annex Jewish settlements in the occupied West Bank if he is re-elected.Israelis go to the polls on Tuesday and Mr Netanyahu is competing for votes with right-wing parties who support annexing part of the West Bank.The settlements are illegal under international law, though Israel disputes this.Last month the US recognised the occupied Golan Heights, seized from Syria in 1967, as Israeli territory.Can Jewish settlement issue be resolved?What Trump’s Golan Heights move really meansIsrael's Benjamin Netanyahu: Commando turned PMIsrael has settled about 400,000 Jews in West Bank settlements, with another 200,000 living in East Jerusalem. There are about 2.5 million Palestinians living in the West Bank.Palestinians want to establish a state in the occupied West Bank, East Jerusalem and the Gaza Strip.What happens to the settlements is one of the most contentious issues between Israel and the Palestinians - Palestinians say the presence of settlements make a future independent state impossible.Israel says the Palestinians are using the issue of settlements as a pretext to avoid direct peace talks. It says settlements are not a genuine obstacle to peace and are negotiable.What exactly did Netanyahu say?He was asked during an interview on Israeli TV why he had not extended Israeli sovereignty to large settlements in the West Bank.'You are asking whether we are moving on to the next stage - the answer is yes, we will move to the next stage,' he said.Image copyrightREUTERSImage captionMr Netanyahu is seeking re-election'I am going to extend [Israeli] sovereignty and I don't distinguish between settlement blocs and the isolated settlements.'A spokesman for Palestinian leader Mahmoud Abbas told Reuters: 'Any measures and any announcements will not change the facts. Settlements are illegal and they will be removed.'Potentially explosive commentsBy Sebastian Usher, BBC Arab affairs editorThese comments by Benjamin Netanyahu are potentially explosive over an issue that has helped stall peace efforts for years.They will resonate with several parties with which he'll try to form a coalition government if he wins the biggest share of votes.But the very idea of annexation will rouse new Palestinian fury, as well as international condemnation.Mr Netanyahu may have been emboldened by the Trump administration, which just last month recognised Israeli sovereignty over the Golan Heights.What is the political background?Mr Netanyahu's right-wing Likud party is in a tight race with the new centre-right Blue and White alliance.However other parties, some of which support annexation, could end up being kingmakers when they try to form a governing coalition.Israel election: Who are the key candidates?The ex-military chief trying to unseat NetanyahuIn Mr Netanyahu's own Likud party, 28 out of the 29 lawmakers running for re-election are on record as supporting this approach. Until now the prime minister was the only exception.What is the situation of peace negotiations?Mr Trump's administration is preparing to unveil a long-awaited Middle East peace plan, which US officials say will be fair.However the Trump administration has carried out a series of actions that have inflamed Palestinian opinion and generally pleased Israel.In 2017 Mr Trump announced that the US recognised Jerusalem as Israel's capital, overturning decades of official US policy.In response Mr Abbas cut off relations with the US, saying the US could no longer be a peace broker.Last year the US stopped contributing to the UN Relief and Works Agency (Unrwa), which has been looking after Palestinian refugees since 1949.Last month President Trump officially recognised Israeli sovereignty over the occupied Golan Heights.Peace negotiations between Israel and the Palestinians have been at a standstill since 2014, when a US-brokered attempt to reach a deal collapsed.\")", "_____no_output_____" ], [ "# Fox News:\nlearn.predict(\"Former President Barack Obama said on Saturday that he is worried that progressives are creating a “circular firing squad” as prospective Democratic presidential candidates race to the left on a number of hot topic issues ahead of the 2020 election.“The way we structure democracy requires you to take into account people who don’t agree with you,” he said at an Obama Foundation town hall event in Berlin, according to The New York Post. “And that by definition means you’re not going to get 100 percent of what you want.”BARACK OBAMA STILL BELIEVES BIDEN WOULD BE 'AN EXCELLENT PRESIDENT' AMID INAPPROPRIATE TOUCHING ALLEGATIONS: REPORT“One of the things I do worry about sometimes among progressives … we start sometimes creating what’s called a ‘circular firing squad’ where you start shooting at your allies because one of them has strayed from purity on the issues,” he said.Obama’s remarks come as freshman House Democrats such as Rep. Alexandria Ocasio-Cortez, D-N.Y., have pushed once-fringe positions on Medicare-for-all, the Green New Deal and reparations for slavery. In turn, 2020 presidential hopefuls have also taken some of those positions.In that climate, candidates have come under criticism for their past stances from activists. South Bend Mayor Pete Buttigieg was forced this week to address remarks he made in 2015 when he said that “all lives matter” -- which some activists say is a counterslogan to the “black lives matter” sloganSen. Kamala Harris, D-Calif., meanwhile has been hit by controversy over her past as a prosecutor. A scathing op-ed published in January in The New York Times, written by law professor Lara Bazelon, has kickstarted renewed scrutiny.Obama reportedly warns freshmen House Democrats about pricey policy proposalsVideoBazelon says Harris previously 'fought tooth and nail to uphold wrongful convictions that had been secured through official misconduct that included evidence tampering, false testimony and the suppression of crucial information by prosecutors.'Bazelon further suggested that Harris should 'apologize to the wrongfully convicted people she has fought to keep in prison and to do what she can to make sure they get justice' or otherwise make clear she has 'radically broken from her past.'Former vice president under Obama, Joe Biden, meanwhile has faced criticism for inappropriate past physical contact with women, as well a a 1993 speech on crime in which he warned of “predators on our streets”'They are beyond the pale many of those people, beyond the pale,' Biden continued. 'And it's a sad commentary on society. We have no choice but to take them out of society.'The latter was reminiscent of heat 2016 presidential nominee Hillary Clinton took from activists for her description of some gang members as “superpredators” in 1996.Obama himself may not escape criticism in the election cycle. His signature health care legislation, the Affordable Care Act, is quickly being eclipsed by calls from Democrats for single-payer and Medicare-for-all plans. Meanwhile, a number of Democrats have said they are open to reparations for black Americans for slavery -- something that Obama opposed when he was in office.\")", "_____no_output_____" ], [ "# BrightBert again\nlearn.predict(\"The border agencies need tougher leadership, President Donald Trump declared Friday as he dropped plans to appoint a long-time agency staffer to run the Immigration and Customs Enforcement agency (ICE).'Ron [Vitiello is] a good man,” Trump told reporters. 'But we’re going in a tougher direction. We want to go in a tougher direction.” Trump’s 'tougher direction” statement suggests he may pressure Department of Homeland Secretary (DHS) Secretary Kirstjen Nielsen to implement policies that top agency staffers oppose, such as rejecting legal interpretations and bureaucratic practices set by former President Barack Obama. Immigration reformers blame those Obama policies for encouraging the wave of economic migrants from Central America.Breitbart TVDonald Trump Says Everything Jared Kushner Touched ‘Turned To Gold’The shift comes amid the growing wave of Central American economic migrants who are using Obama-era legal loopholes to walk through the border wall and into jobs, neighborhoods, and blue-collar schools throughout the United States. That wave is expected to deliver one million migrants into the United States by October, and it is made possible because Democrats are blocking any reform the border loopholes.Immigration reformers fear that Obama-appointed staffers and former business lobbyists are keeping Trump in the dark about ways to improve operation at the DHS. 'I don’t now know if the President is getting the information he needs about what powers he has,” according to Rosemary Jenks, policy director at the Center for Immigration Studies. 'Secretary Nielsen and some of the attorneys in DHS are blocking the information because they are afraid of implementing some of the things they can do,” partly because they are afraid of lawsuits, she said.For example, many so-called 'Unaccompanied Alien Children” are being smuggled up the border because Trump’s agencies will pass them to their illegal immigrant parents living throughout the United States, under policies set by Obama. But those youths and children should be sent home, said Jenks, because the 2008 law only protects trafficked victims, such as forced prostitutes, not youths and children who have parents in the United States or who are willingly smuggled up to the border. According to the Washington Post, Vitiello’s exit was prompted by Steve Miller, one of Trump’s first aides who earlier played a key role in derailing the 2013 'Gang of Eight” amnesty and cheap labor bill. The Post said:Six administration officials said Friday that the decision to jettison Vitiello was a sign of the expanding influence that Miller now wields over immigration matters in the White House, particularly as Trump lashes out at Mexico and Central American nations — as well as Homeland Security officials and aides who express doubts about the legality of his ideas.The New York Times reported:One person familiar with the president’s thinking said that Mr. Trump believed that Mr. Vitiello did not favor closing the border, as the president had proposed before backing off that threat this week.Another person said that Stephen Miller, the president’s chief policy adviser and a supporter of curtailing legal and illegal immigration, did not support Mr. Vitiello’s nomination.Vitiello’s defenders lashed out at Miller. The Washington Post highlighted the complaints:'Ron Vitiello has spent as much time defending our nation’s borders as Stephen Miller has been alive,” one official said of Miller, who is 33.One senior official said: 'This is part of an increasingly desperate effort by Stephen to throw people under the bus when the policies he has advocated are not effective. Once it becomes clear that Stephen’s policies aren’t working, he tells the president, ‘They’re not the right people.’” But Vitiello’s appointment was opposed by the ICE officers’ union, the National ICE Council. Vitiello 'lacks the judgment and professionalism to effectively lead a federal agency,” said a February letter from union President Chris Crane.\")", "_____no_output_____" ], [ "# BBC\nlearn.predict(\"Israeli PM Benjamin Netanyahu has said he will annex Jewish settlements in the occupied West Bank if he is re-elected.Israelis go to the polls on Tuesday and Mr Netanyahu is competing for votes with right-wing parties who support annexing part of the West Bank.The settlements are illegal under international law, though Israel disputes this.Last month the US recognised the occupied Golan Heights, seized from Syria in 1967, as Israeli territory.Can Jewish settlement issue be resolved?What Trump’s Golan Heights move really meansIsrael's Benjamin Netanyahu: Commando turned PMIsrael has settled about 400,000 Jews in West Bank settlements, with another 200,000 living in East Jerusalem. There are about 2.5 million Palestinians living in the West Bank.Palestinians want to establish a state in the occupied West Bank, East Jerusalem and the Gaza Strip.What happens to the settlements is one of the most contentious issues between Israel and the Palestinians - Palestinians say the presence of settlements make a future independent state impossible.Israel says the Palestinians are using the issue of settlements as a pretext to avoid direct peace talks. It says settlements are not a genuine obstacle to peace and are negotiable.What exactly did Netanyahu say?He was asked during an interview on Israeli TV why he had not extended Israeli sovereignty to large settlements in the West Bank.'You are asking whether we are moving on to the next stage - the answer is yes, we will move to the next stage,' he said.Image copyrightREUTERSImage captionMr Netanyahu is seeking re-election'I am going to extend [Israeli] sovereignty and I don't distinguish between settlement blocs and the isolated settlements.'A spokesman for Palestinian leader Mahmoud Abbas told Reuters: 'Any measures and any announcements will not change the facts. Settlements are illegal and they will be removed.'Potentially explosive commentsBy Sebastian Usher, BBC Arab affairs editorThese comments by Benjamin Netanyahu are potentially explosive over an issue that has helped stall peace efforts for years.They will resonate with several parties with which he'll try to form a coalition government if he wins the biggest share of votes.But the very idea of annexation will rouse new Palestinian fury, as well as international condemnation.Mr Netanyahu may have been emboldened by the Trump administration, which just last month recognised Israeli sovereignty over the Golan Heights.What is the political background?Mr Netanyahu's right-wing Likud party is in a tight race with the new centre-right Blue and White alliance.However other parties, some of which support annexation, could end up being kingmakers when they try to form a governing coalition.Israel election: Who are the key candidates?The ex-military chief trying to unseat NetanyahuIn Mr Netanyahu's own Likud party, 28 out of the 29 lawmakers running for re-election are on record as supporting this approach. Until now the prime minister was the only exception.What is the situation of peace negotiations?Mr Trump's administration is preparing to unveil a long-awaited Middle East peace plan, which US officials say will be fair.However the Trump administration has carried out a series of actions that have inflamed Palestinian opinion and generally pleased Israel.In 2017 Mr Trump announced that the US recognised Jerusalem as Israel's capital, overturning decades of official US policy.In response Mr Abbas cut off relations with the US, saying the US could no longer be a peace broker.Last year the US stopped contributing to the UN Relief and Works Agency (Unrwa), which has been looking after Palestinian refugees since 1949.Last month President Trump officially recognised Israeli sovereignty over the occupied Golan Heights.Peace negotiations between Israel and the Palestinians have been at a standstill since 2014, when a US-brokered attempt to reach a deal collapsed.\")", "_____no_output_____" ], [ "# Fox News:\nlearn.predict(\"Former President Barack Obama said on Saturday that he is worried that progressives are creating a “circular firing squad” as prospective Democratic presidential candidates race to the left on a number of hot topic issues ahead of the 2020 election.“The way we structure democracy requires you to take into account people who don’t agree with you,” he said at an Obama Foundation town hall event in Berlin, according to The New York Post. “And that by definition means you’re not going to get 100 percent of what you want.”BARACK OBAMA STILL BELIEVES BIDEN WOULD BE 'AN EXCELLENT PRESIDENT' AMID INAPPROPRIATE TOUCHING ALLEGATIONS: REPORT“One of the things I do worry about sometimes among progressives … we start sometimes creating what’s called a ‘circular firing squad’ where you start shooting at your allies because one of them has strayed from purity on the issues,” he said.Obama’s remarks come as freshman House Democrats such as Rep. Alexandria Ocasio-Cortez, D-N.Y., have pushed once-fringe positions on Medicare-for-all, the Green New Deal and reparations for slavery. In turn, 2020 presidential hopefuls have also taken some of those positions.In that climate, candidates have come under criticism for their past stances from activists. South Bend Mayor Pete Buttigieg was forced this week to address remarks he made in 2015 when he said that “all lives matter” -- which some activists say is a counterslogan to the “black lives matter” sloganSen. Kamala Harris, D-Calif., meanwhile has been hit by controversy over her past as a prosecutor. A scathing op-ed published in January in The New York Times, written by law professor Lara Bazelon, has kickstarted renewed scrutiny.Obama reportedly warns freshmen House Democrats about pricey policy proposalsVideoBazelon says Harris previously 'fought tooth and nail to uphold wrongful convictions that had been secured through official misconduct that included evidence tampering, false testimony and the suppression of crucial information by prosecutors.'Bazelon further suggested that Harris should 'apologize to the wrongfully convicted people she has fought to keep in prison and to do what she can to make sure they get justice' or otherwise make clear she has 'radically broken from her past.'Former vice president under Obama, Joe Biden, meanwhile has faced criticism for inappropriate past physical contact with women, as well a a 1993 speech on crime in which he warned of “predators on our streets”'They are beyond the pale many of those people, beyond the pale,' Biden continued. 'And it's a sad commentary on society. We have no choice but to take them out of society.'The latter was reminiscent of heat 2016 presidential nominee Hillary Clinton took from activists for her description of some gang members as “superpredators” in 1996.Obama himself may not escape criticism in the election cycle. His signature health care legislation, the Affordable Care Act, is quickly being eclipsed by calls from Democrats for single-payer and Medicare-for-all plans. Meanwhile, a number of Democrats have said they are open to reparations for black Americans for slavery -- something that Obama opposed when he was in office.\")", "_____no_output_____" ], [ "# BrightBert again\nlearn.predict(\"The border agencies need tougher leadership, President Donald Trump declared Friday as he dropped plans to appoint a long-time agency staffer to run the Immigration and Customs Enforcement agency (ICE).'Ron [Vitiello is] a good man,' Trump told reporters. 'But we’re going in a tougher direction. We want to go in a tougher direction.' Trump’s 'tougher direction' statement suggests he may pressure Department of Homeland Secretary (DHS) Secretary Kirstjen Nielsen to implement policies that top agency staffers oppose, such as rejecting legal interpretations and bureaucratic practices set by former President Barack Obama. Immigration reformers blame those Obama policies for encouraging the wave of economic migrants from Central America.Breitbart TVDonald Trump Says Everything Jared Kushner Touched ‘Turned To Gold’The shift comes amid the growing wave of Central American economic migrants who are using Obama-era legal loopholes to walk through the border wall and into jobs, neighborhoods, and blue-collar schools throughout the United States. That wave is expected to deliver one million migrants into the United States by October, and it is made possible because Democrats are blocking any reform the border loopholes.Immigration reformers fear that Obama-appointed staffers and former business lobbyists are keeping Trump in the dark about ways to improve operation at the DHS. 'I don’t now know if the President is getting the information he needs about what powers he has,' according to Rosemary Jenks, policy director at the Center for Immigration Studies. 'Secretary Nielsen and some of the attorneys in DHS are blocking the information because they are afraid of implementing some of the things they can do,' partly because they are afraid of lawsuits, she said.For example, many so-called 'Unaccompanied Alien Children' are being smuggled up the border because Trump’s agencies will pass them to their illegal immigrant parents living throughout the United States, under policies set by Obama. But those youths and children should be sent home, said Jenks, because the 2008 law only protects trafficked victims, such as forced prostitutes, not youths and children who have parents in the United States or who are willingly smuggled up to the border. According to the Washington Post, Vitiello’s exit was prompted by Steve Miller, one of Trump’s first aides who earlier played a key role in derailing the 2013 'Gang of Eight' amnesty and cheap labor bill. The Post said:Six administration officials said Friday that the decision to jettison Vitiello was a sign of the expanding influence that Miller now wields over immigration matters in the White House, particularly as Trump lashes out at Mexico and Central American nations — as well as Homeland Security officials and aides who express doubts about the legality of his ideas.The New York Times reported:One person familiar with the president’s thinking said that Mr. Trump believed that Mr. Vitiello did not favor closing the border, as the president had proposed before backing off that threat this week.Another person said that Stephen Miller, the president’s chief policy adviser and a supporter of curtailing legal and illegal immigration, did not support Mr. Vitiello’s nomination.Vitiello’s defenders lashed out at Miller. The Washington Post highlighted the complaints:'Ron Vitiello has spent as much time defending our nation’s borders as Stephen Miller has been alive,' one official said of Miller, who is 33.One senior official said: 'This is part of an increasingly desperate effort by Stephen to throw people under the bus when the policies he has advocated are not effective. Once it becomes clear that Stephen’s policies aren’t working, he tells the president, ‘They’re not the right people.’' But Vitiello’s appointment was opposed by the ICE officers’ union, the National ICE Council. Vitiello 'lacks the judgment and professionalism to effectively lead a federal agency,' said a February letter from union President Chris Crane.\")", "_____no_output_____" ], [ "# BBC\nlearn.predict(\"Israeli PM Benjamin Netanyahu has said he will annex Jewish settlements in the occupied West Bank if he is re-elected.Israelis go to the polls on Tuesday and Mr Netanyahu is competing for votes with right-wing parties who support annexing part of the West Bank.The settlements are illegal under international law, though Israel disputes this.Last month the US recognised the occupied Golan Heights, seized from Syria in 1967, as Israeli territory.Can Jewish settlement issue be resolved?What Trump’s Golan Heights move really meansIsrael's Benjamin Netanyahu: Commando turned PMIsrael has settled about 400,000 Jews in West Bank settlements, with another 200,000 living in East Jerusalem. There are about 2.5 million Palestinians living in the West Bank.Palestinians want to establish a state in the occupied West Bank, East Jerusalem and the Gaza Strip.What happens to the settlements is one of the most contentious issues between Israel and the Palestinians - Palestinians say the presence of settlements make a future independent state impossible.Israel says the Palestinians are using the issue of settlements as a pretext to avoid direct peace talks. It says settlements are not a genuine obstacle to peace and are negotiable.What exactly did Netanyahu say?He was asked during an interview on Israeli TV why he had not extended Israeli sovereignty to large settlements in the West Bank.'You are asking whether we are moving on to the next stage - the answer is yes, we will move to the next stage,' he said.Image copyrightREUTERSImage captionMr Netanyahu is seeking re-election'I am going to extend [Israeli] sovereignty and I don't distinguish between settlement blocs and the isolated settlements.'A spokesman for Palestinian leader Mahmoud Abbas told Reuters: 'Any measures and any announcements will not change the facts. Settlements are illegal and they will be removed.'Potentially explosive commentsBy Sebastian Usher, BBC Arab affairs editorThese comments by Benjamin Netanyahu are potentially explosive over an issue that has helped stall peace efforts for years.They will resonate with several parties with which he'll try to form a coalition government if he wins the biggest share of votes.But the very idea of annexation will rouse new Palestinian fury, as well as international condemnation.Mr Netanyahu may have been emboldened by the Trump administration, which just last month recognised Israeli sovereignty over the Golan Heights.What is the political background?Mr Netanyahu's right-wing Likud party is in a tight race with the new centre-right Blue and White alliance.However other parties, some of which support annexation, could end up being kingmakers when they try to form a governing coalition.Israel election: Who are the key candidates?The ex-military chief trying to unseat NetanyahuIn Mr Netanyahu's own Likud party, 28 out of the 29 lawmakers running for re-election are on record as supporting this approach. Until now the prime minister was the only exception.What is the situation of peace negotiations?Mr Trump's administration is preparing to unveil a long-awaited Middle East peace plan, which US officials say will be fair.However the Trump administration has carried out a series of actions that have inflamed Palestinian opinion and generally pleased Israel.In 2017 Mr Trump announced that the US recognised Jerusalem as Israel's capital, overturning decades of official US policy.In response Mr Abbas cut off relations with the US, saying the US could no longer be a peace broker.Last year the US stopped contributing to the UN Relief and Works Agency (Unrwa), which has been looking after Palestinian refugees since 1949.Last month President Trump officially recognised Israeli sovereignty over the occupied Golan Heights.Peace negotiations between Israel and the Palestinians have been at a standstill since 2014, when a US-brokered attempt to reach a deal collapsed.\")", "_____no_output_____" ], [ "# Fox News:\nlearn.predict(\"Former President Barack Obama said on Saturday that he is worried that progressives are creating a “circular firing squad” as prospective Democratic presidential candidates race to the left on a number of hot topic issues ahead of the 2020 election.“The way we structure democracy requires you to take into account people who don’t agree with you,” he said at an Obama Foundation town hall event in Berlin, according to The New York Post. “And that by definition means you’re not going to get 100 percent of what you want.”BARACK OBAMA STILL BELIEVES BIDEN WOULD BE 'AN EXCELLENT PRESIDENT' AMID INAPPROPRIATE TOUCHING ALLEGATIONS: REPORT“One of the things I do worry about sometimes among progressives … we start sometimes creating what’s called a ‘circular firing squad’ where you start shooting at your allies because one of them has strayed from purity on the issues,” he said.Obama’s remarks come as freshman House Democrats such as Rep. Alexandria Ocasio-Cortez, D-N.Y., have pushed once-fringe positions on Medicare-for-all, the Green New Deal and reparations for slavery. In turn, 2020 presidential hopefuls have also taken some of those positions.In that climate, candidates have come under criticism for their past stances from activists. South Bend Mayor Pete Buttigieg was forced this week to address remarks he made in 2015 when he said that “all lives matter” -- which some activists say is a counterslogan to the “black lives matter” sloganSen. Kamala Harris, D-Calif., meanwhile has been hit by controversy over her past as a prosecutor. A scathing op-ed published in January in The New York Times, written by law professor Lara Bazelon, has kickstarted renewed scrutiny.Obama reportedly warns freshmen House Democrats about pricey policy proposalsVideoBazelon says Harris previously 'fought tooth and nail to uphold wrongful convictions that had been secured through official misconduct that included evidence tampering, false testimony and the suppression of crucial information by prosecutors.'Bazelon further suggested that Harris should 'apologize to the wrongfully convicted people she has fought to keep in prison and to do what she can to make sure they get justice' or otherwise make clear she has 'radically broken from her past.'Former vice president under Obama, Joe Biden, meanwhile has faced criticism for inappropriate past physical contact with women, as well a a 1993 speech on crime in which he warned of “predators on our streets”'They are beyond the pale many of those people, beyond the pale,' Biden continued. 'And it's a sad commentary on society. We have no choice but to take them out of society.'The latter was reminiscent of heat 2016 presidential nominee Hillary Clinton took from activists for her description of some gang members as “superpredators” in 1996.Obama himself may not escape criticism in the election cycle. His signature health care legislation, the Affordable Care Act, is quickly being eclipsed by calls from Democrats for single-payer and Medicare-for-all plans. Meanwhile, a number of Democrats have said they are open to reparations for black Americans for slavery -- something that Obama opposed when he was in office.\")", "_____no_output_____" ], [ "#Pseudoscience\nlearn.predict(\"Have you ever clicked on a link like 'What does your favorite animal say about you?' wondering what your love of hedgehogs reveals about your psyche? Or filled out a personality assessment to gain new understanding into whether you’re an introverted or extroverted 'type'? People love turning to these kinds of personality quizzes and tests on the hunt for deep insights into themselves. People tend to believe they have a 'true' and revealing self hidden somewhere deep within, so it’s natural that assessments claiming to unveil it will be appealing.As psychologists, we noticed something striking about assessments that claim to uncover people’s 'true type.' Many of the questions are poorly constructed – their wording can be ambiguous and they often contain forced choices between options that are not opposites. This can be true of BuzzFeed-type quizzes as well as more seemingly sober assessments.On the other hand, assessments created by trained personality psychologists use questions that are more straightforward to interpret. The most notable example is probably the well-respected Big Five Inventory. Rather than sorting people into 'types,' it scores people on the established psychological dimensions of openness to new experience, conscientiousness, extroversion, agreeableness and neuroticism. This simplicity is by design; psychology researchers know that the more respondents struggle to understand the question, the worse the question is.But the lack of rigor in 'type' assessments turns out to be a feature, not a bug, for the general public. What makes tests less valid can ironically make them more interesting. Since most people aren’t trained to think about psychology in a scientifically rigorous way, it stands to reason they also won’t be great at evaluating those assessments. We recently conducted series of studies to investigate how consumers view these tests. When people try to answer these harder questions, do they think to themselves 'This question is poorly written'? Or instead do they focus on its difficulty and think 'This question’s deep'? Our results suggest that a desire for deep insight can lead to deep confusion.Confusing difficult for deepIn our first study, we showed people items from both the Big Five and from the Keirsey Temperament Sorter (KTS), a popular 'type' assessment that contains many questions we suspected people find comparatively difficult. Our participants rated each item in two ways. First, they rated difficulty. That is, how confusing and ambiguous did they find it? Second, what was its perceived 'depth'? In other words, to what extent did they feel the item seemed to be getting at something hidden deep in the unconscious?Sure enough, not only were these perceptions correlated, the KTS was seen as both more difficult and deeper. In follow-up studies, we experimentally manipulated difficulty. In one study, we modified Big Five items to make them harder to answer like the KTS items, and again we found that participants rated the more difficult versions as 'deeper.'We also noticed that some personality assessments seem to derive their intrigue from having seemingly nothing to do with personality at all. Take one BuzzFeed quiz, for example, that asks about which colors people associate with abstract concepts like letters and days of the week and then outputs 'the true age of your soul.' Even if people trust BuzzFeed more for entertainment than psychological truths, perhaps they are actually on board with the idea that these difficult, abstract decisions do reveal some deep insights. In fact, that is the entire idea behind classically problematic measures such as the Rorschach, or 'ink blot,' test.In two studies inspired by that BuzzFeed quiz, we found exactly that. We gave people items from purported 'personality assessment' checklists. In one study, we assigned half the participants to the 'difficult' condition, wherein the assessment items required them to choose which of two colors they associated with abstract concepts, like the letter 'M.' In the 'easier' condition, respondents were still required to rate colors on how much they associated them with those abstract concepts, but they more simply rated one color at a time instead of choosing between two.Again, participants rated the difficult version as deeper. Seemingly, the sillier the assessment, the better people think it can read the hidden self.Intuition may steer you wrongOne of the implications of this research is that people are going to have a hard time leaving behind the bad ideas baked into popular yet unscientific personality assessments. The most notable example is the Myers-Briggs Type Indicator, which infamously remains quite popular while doing a fairly poor job of assessing personality, due to longstanding issues with the assessment itself and the long-discredited Jungian theory behind it. Our findings suggest that Myers-Briggs-like assessments that have largely been debunked by experts might persist in part because their formats overlap quite well with people’s intuitions about what will best access the “true self.”People’s intuitions do them no favors here. Intuitions often undermine scientific thinking on topics like physics and biology. Psychology is no different. People arbitrarily divide parts of themselves into “true” and superficial components and seem all too willing to believe in tests that claim to definitively make those distinctions. But the idea of a “true self” doesn’t really work as a scientific concept.Some people might be stuck in a self-reinforcing yet unproductive line of thought: Personality assessments can cause confusion. That confusion in turn overlaps with intuitions of how they think their deep psychology works, and then they tell themselves the confusion is profound. So intuitions about psychology might be especially pernicious. Following them too closely could lead you to know less about yourself, not more.\", thresh=0.5)", "_____no_output_____" ], [ "learn.predict(\"PETALUMA, CA — An incident in which a white man was reportedly beaten in downtown Petaluma by a group of suspects the victim described as four or five black men is being investigated as a hate crime and an assault, the Petaluma Police Department said Tuesday in a news release.Petaluma police Lt. Ed Crosby said officers immediately responded at 9:03 p.m. Saturday, March 9 to the intersection of Mary Street at Petaluma Boulevard North to a woman's report that her domestic partner, a 60-year-old white man, had just been attacked.The lieutenant said when officers arrived they found the victim on the ground suffering from numerous facial injuries.The man was rushed to Santa Rosa Memorial Hospital where according to police, he stayed two days. Injuries to the victim were confirmed as a fractured left eye socket, a broken nose and other abrasions to his face including facial swelling, Crosby said.The couple told police that the night of the incident they had just finished eating dinner at a restaurant on Petaluma Boulevard North and were walking westbound toward their car, which was parked on Mary Street, when they passed a group of several African-American men who looked to be in their 20s, standing around a four-door, emerald green Honda Civic.The couple said they did not interact with the group and were continuing on their way when one of the men by the green Honda 'hurled profanity at the victim and referred to his [the victim's] race,' Crosby said.'The victim turned around and saw one of the males rushing at him, swinging his arms,' Crosby said.'The victim grabbed the advancing male, brought him to the ground, and pinned him,' Crosby said. 'In response, the other males by the green Honda repeatedly kicked the victim in the face before getting into the green Honda and fleeing the scene.'Petaluma police are asking anyone with information about the incident to contact or leave a message for Petaluma Police Department Officer Ron Flores by calling 707-778-4372.The victim and his female companion were not able to give many descriptive details about the suspects, the lieutenant said, and thus far, officers' efforts in canvassing the downtown area for any witnesses or video footage that would help identify the suspects have not been successful.The green Honda was missing a front license plate; the back license plate may possibly include the numbers 611, according to police.\", thresh=.5)", "_____no_output_____" ], [ "learn.data.classes", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb38630fd54a0ac6f6a618fcbfcb086c0d448f0e
15,057
ipynb
Jupyter Notebook
test/notebooks/hanford_RF2_1x.ipynb
yeungyh/pickle
90e467aef3b8cdfd1dac009b88a4d38f35622c6f
[ "MIT" ]
null
null
null
test/notebooks/hanford_RF2_1x.ipynb
yeungyh/pickle
90e467aef3b8cdfd1dac009b88a4d38f35622c6f
[ "MIT" ]
null
null
null
test/notebooks/hanford_RF2_1x.ipynb
yeungyh/pickle
90e467aef3b8cdfd1dac009b88a4d38f35622c6f
[ "MIT" ]
null
null
null
34.220455
174
0.565053
[ [ [ "import paths\nimport yaml\nimport os\nimport copy\nimport numpy as np\nimport numpy.random as npr\nimport scipy.optimize as spo\nimport scipy.linalg as spl\nfrom matplotlib import pyplot as plt, collections as mc, patches as mpatches, cm, ticker\nfrom sdfs.geom_mrst import GeomMRST\nfrom sdfs.bc_mrst import BCMRST\nfrom sdfs.darcy import DarcyExp\nfrom sdfs.tpfa import TPFA\nfrom sdfs.dasa import DASAExpLM, DASAExpLMWithFlux\nfrom time import perf_counter\nimport ckli.mapest as mapest\nimport ckli.ckliest_l2reg as ckliest\nimport h5py\nimport GPy", "_____no_output_____" ], [ "plt.rc('text', usetex=True)\nplt.rc('image', cmap='plasma')\n\ndef plot_patch(patches, values, fig, ax, points, title, cmin=None, cmax=None, cb=False):\n p = mc.PatchCollection(patches, cmap=cm.jet)\n p.set_array(values)\n p.set_clim([cmin, cmax])\n ax.add_collection(p)\n if points is not None:\n ax.plot(*points, 'ko', markersize=0.5)\n ax.set_aspect('equal')\n ax.axis('off')\n ax.autoscale(tight=True)\n #ax.set_title(title)\n if cb:\n fig.colorbar(p, ax=ax)\n return p", "_____no_output_____" ], [ "# Parameters\nseed = 0\nnum_trials = 1\nres_fac = 1\nresolution = '1x'\nresolution_fine = '16x'\nNYobs = 100\nNYlearn = NYobs\nNYrefobs = 50\nNYxi = 1000\nNuxi = 1000\nNens = 5000\nbeta_ckli = 1e1\nYgamma_ckli = 1e-4\nugamma_ckli = 1e-4\ngamma_map = 1e-6\nstd_dev_ref = 1.0\ncor_len_ref = 0.1\nNeumann_sd = 0\nlsq_method = 'trf'\ndata_path = '../data/'\nresults_path = '../results/'\nfigures_path = '../figures/'\ngeom_filename = data_path + f'geom/geom_{resolution}.mat'\ngeom_fine_filename = data_path + f'geom/geom_{resolution_fine}.mat'\nbc_filename = data_path + f'bc/bc_{resolution}.mat'\nconduct_filename = data_path + f'RF2/conduct_log_RF2_{NYrefobs}_{resolution}.mat'\nwell_cells_filename = data_path + f'well_cells/well_cells_{resolution}.mat'\nyobs_filename = data_path + f'yobs/yobs_{NYobs}_{resolution}.npy'\nyobs_fine_filename = data_path + f'yobs/yobs_{NYobs}_{resolution_fine}.npy'\nref = f\"Yref=RF2_{NYrefobs}_{resolution}\"", "_____no_output_____" ], [ "Yfac = 7.0 # Rescaling factor for log-conductivity. Must be applied to Yref and the BCs\n\ngeom = GeomMRST(geom_filename)\nbc = BCMRST(geom, bc_filename)\nbc.rescale('N', Yfac)\nprob = DarcyExp(TPFA(geom, bc), None)\n\nNc = geom.cells.num\nNinf = geom.faces.num_interior\nprint(f'Ninf = {Ninf}, Nc = {Nc}')", "_____no_output_____" ], [ "patches = [mpatches.Polygon(v, closed=True) for v in geom.nodes.coords.T[geom.cells.nodes.T, :]]", "_____no_output_____" ], [ "# Observations\nrs = npr.RandomState(seed)\n\n# Read stochastic model from GPML output\nwith h5py.File(conduct_filename, 'r') as f:\n Yref = f.get('mu')[:].ravel() - Yfac\n xrefYobs = f.get('xYobs')[:]\n\nuref = prob.randomize_bc('N', Neumann_sd).solve(Yref)\n\n# u observations\nwith h5py.File(well_cells_filename, 'r') as f:\n iuobs = f.get('well_cells')[:].ravel() - 1\nuobs = uref[iuobs]\nNuobs = iuobs.size", "_____no_output_____" ], [ "fig, ax = plt.subplots(figsize=(4, 4))\np = plot_patch(patches, Yref + Yfac, fig, ax, xrefYobs, 'Yref', 0, 12)\ncbar = fig.colorbar(p, ax=ax)\ncbar.ax.tick_params(labelsize='30')\ncbar.locator = ticker.MaxNLocator(nbins=7)\ncbar.update_ticks()\nfig.tight_layout()\nfig.savefig(figures_path + f'ref/Yref_{ref}.pdf', dpi=300)", "_____no_output_____" ], [ "rl2e = lambda yest, yref : spl.norm(yest - yref, 2) / spl.norm(yref, 2)\ninfe = lambda yest, yref : spl.norm(yest - yref, np.inf)", "_____no_output_____" ], [ "if os.path.exists(yobs_filename):\n print(f\"iYobs set read from file {yobs_filename}\")\n iYobs = np.load(yobs_filename)\nelif os.path.exists(yobs_fine_filename):\n print(f\"iYobs set read from file {yobs_fine_filename} and randomly selected nearby cell\")\n iYobs_fine = np.load(yobs_fine_filename)\n geom_fine = GeomMRST(geom_fine_filename)\n iYobs = np.array([geom.anyCellsWithin(geom_fine.nodes.coords.T[geom_fine.cells.nodes.T[iYobs_fine[t]]]) for t in range(num_trials)])\n np.save(yobs_filename, iYobs)\nelse:\n print(f\"iYobs set randomly generated and saved to {yobs_filename}\")\n iYobs = np.array([np.sort(rs.choice(Nc, NYobs, replace=False)) for _ in range(num_trials)])\n np.save(yobs_filename, iYobs)\nprint(f\"{iYobs.shape=}\")\nprint(iYobs)", "_____no_output_____" ], [ "exp = f'NY={NYobs}_Nu={iuobs.size}_{NYlearn=}_{Nuxi=}_{NYxi=}_beta={beta_ckli}_gamma={ugamma_ckli}_Neumann_sd={Neumann_sd}_{lsq_method=}_h1reg_{ref}'\nprint(exp)", "_____no_output_____" ], [ "timings = np.zeros((num_trials, 6))\nnfevs = np.zeros((num_trials, 3), dtype=int)\nrel_errors = np.zeros((num_trials, 4))\nabs_errors = np.zeros((num_trials, 4))\n\nYobs = np.zeros((num_trials, NYobs))\nYpred = np.zeros((num_trials, Nc))\nCYpred = np.zeros((num_trials, Nc, Nc))\numean = np.zeros((num_trials, Nc))\nCu = np.zeros((num_trials, Nc, Nc))\nupred = np.zeros((num_trials, Nc))\nCupred = np.zeros((num_trials, Nc, Nc))\n\nPsiY = np.zeros((num_trials, Nc, NYxi))\nLambdaY = np.zeros((num_trials, NYxi))\nPsiu = np.zeros((num_trials, Nc, Nuxi))\nLambdau = np.zeros((num_trials, Nuxi))\n\nYxi = np.zeros((num_trials, NYxi))\nuxi = np.zeros((num_trials, Nuxi))\nYest = np.zeros((num_trials, Nc))\nuest = np.zeros((num_trials, Nc))\nYest_MAPH1 = np.zeros((num_trials, Nc))\nif Neumann_sd != 0:\n Nq = np.count_nonzero(bc.kind == 'N')\n q_MAPH1 = np.zeros((num_trials, Nq))", "_____no_output_____" ], [ "for t in range(num_trials):\n Yobs[t] = Yref[iYobs[t]]\n\n ts = perf_counter()\n klearn = GPy.kern.sde_Matern52(input_dim=2, variance=std_dev_ref**2, lengthscale=cor_len_ref)\n mYlearn = GPy.models.GPRegression(geom.cells.centroids[:, iYobs[t]].T, Yobs[t, :,None], klearn, noise_var=np.sqrt(np.finfo(float).eps))\n mYlearn.optimize(messages=True, ipython_notebook=False)\n print(f\"{klearn.lengthscale.values[0]=}\")\n print(f\"{np.sqrt(klearn.variance.values[0])=}\")\n\n mYref = GPy.models.GPRegression(geom.cells.centroids[:, iYobs[t]].T, Yobs[t, :, None], mYlearn.kern, noise_var=np.sqrt(np.finfo(float).eps))\n Ypred[t], CYpred[t] = (lambda x, y : (x.ravel(), y))(*mYref.predict_noiseless(geom.cells.centroids.T, full_cov=True))\n timings[t, 0] = perf_counter() - ts\n\nprint(f\"GPR: {timings[:, 0]} s\")", "_____no_output_____" ], [ "for t in range(num_trials):\n # Compute GP model for u\n ts = perf_counter()\n umean[t], Cu[t] = ckliest.smc_gp(Ypred[t], CYpred[t], Nens, copy.deepcopy(prob), rs, randomize_bc=True, randomize_scale=Neumann_sd)\n upred[t], Cupred[t] = ckliest.gpr(umean[t], Cu[t], uobs, iuobs)\n timings[t, 1] = perf_counter() - ts\n\nprint(f\"Monte Carlo: {timings[:, 1]} s\")", "_____no_output_____" ], [ "# PICKLE models\nYm = Ypred\nCYm = CYpred\num = umean #or change to upred\nCum = Cu #or change to Cupred\n\nrel_errors[:, 0] = np.array([rl2e(Ym[t], Yref) for t in range(num_trials)])\nabs_errors[:, 0] = np.array([infe(Ym[t], Yref) for t in range(num_trials)])\n\nprint(f\"GPR\\tRelative error: {rel_errors[:, 0]}\")\nprint(f\"GPR\\tInfinity error: {abs_errors[:, 0]}\")", "_____no_output_____" ], [ "for t in range(num_trials):\n ts = perf_counter()\n PsiY[t], LambdaY[t] = ckliest.KL_via_eigh(CYm[t], NYxi)\n Psiu[t], Lambdau[t] = ckliest.KL_via_eigh(Cum[t], Nuxi)\n timings[t, 2] = perf_counter() - ts\n\nprint(f\"eigendecomposition: {timings[:, 2]} s\")", "_____no_output_____" ], [ "# PICKLE estimate\nssv = None if Neumann_sd == 0 else np.delete(np.arange(Nc), np.unique(geom.cells.to_hf[2*geom.faces.num_interior:][bc.kind == 'N']))\n\nfor t in range(num_trials):\n res = ckliest.LeastSqRes(NYxi, Ym[t], PsiY[t], Nuxi, um[t], Psiu[t], prob, ugamma_ckli, Ygamma_ckli, res_fac, iuobs, uobs, iYobs[t], Yobs[t], beta_ckli, ssv=ssv)\n x0 = np.zeros(Nuxi + NYxi)\n \n ts = perf_counter()\n sol = spo.least_squares(res.val, x0, jac=res.jac, method=lsq_method, verbose=2)\n ckli_status = sol.status\n timings[t, 3] = perf_counter() - ts\n nfevs[t, 0] = sol.nfev\n print(f'CKLI optimality: {sol.optimality : g}')\n\n uxi[t] = sol.x[:Nuxi]\n Yxi[t] = sol.x[Nuxi:]\n uest[t] = um[t] + Psiu[t] @ uxi[t]\n Yest[t] = Ym[t] + PsiY[t] @ Yxi[t]\n\nrel_errors[:, 1] = np.array([rl2e(Yest[t], Yref) for t in range(num_trials)])\nabs_errors[:, 1] = np.array([infe(Yest[t], Yref) for t in range(num_trials)])\n\nprint(f\"PICKLE: {timings[:, 3]} s\")\nprint(f\"PICKLE\\trelative L2 error: {rel_errors[:, 1]}\")\nprint(f\"PICKLE\\tabsolute Infinity error: {abs_errors[:, 1]}\")", "_____no_output_____" ], [ "# MAP H1 estimate\nLreg = mapest.compute_Lreg(geom)\nfor t in range(num_trials):\n if Neumann_sd == 0:\n loss = mapest.LossVec(Nc, Nc, iuobs, uobs, iYobs[t], Yobs[t], gamma_map, Lreg) # H1 regularization\n dasa = DASAExpLM(loss.val, loss.grad_u, loss.grad_Y, prob.solve, prob.residual_sens_u, prob.residual_sens_Y)\n ts = perf_counter()\n sol = spo.least_squares(dasa.obj, np.zeros(Nc), jac=dasa.grad, method=lsq_method, verbose=2)\n Yest_MAPH1[t] = sol.x\n else:\n loss = mapest.LossVecWithFlux(Nc, Nc, Nq, iuobs, uobs, iYobs[t], Yobs[t], gamma_map, Lreg) # H1 regularization\n dasa = DASAExpLMWithFlux(Nc, loss.val, loss.grad_u, loss.grad_p, prob.solve, prob.residual_sens_u, prob.residual_sens_p)\n ts = perf_counter()\n sol = spo.least_squares(dasa.obj, np.zeros(Nc + Nq), jac=dasa.grad, method=lsq_method, verbose=2)\n Yest_MAPH1[t] = sol.x[:Nc]\n q_MAPH1[t] = sol.x[Nc:]\n MAP_status = sol.status\n timings[t, 4] = perf_counter() - ts\n nfevs[t, 1] = sol.nfev\n print(f'MAP status: {MAP_status}, message: {sol.message}')\n\nrel_errors[:, 2] = np.array([rl2e(Yest_MAPH1[t], Yref) for t in range(num_trials)])\nabs_errors[:, 2] = np.array([infe(Yest_MAPH1[t], Yref) for t in range(num_trials)])\n\nprint(f\"MAP: {timings[:, 4]} s\")\nprint(f\"MAP\\trelative L2 error: {rel_errors[:, 2]}\")\nprint(f\"MAP\\tabsolute infinity error: {abs_errors[:, 2]}\")", "_____no_output_____" ], [ "np.savetxt(results_path + f'iYobs/iYobs_{exp}.txt', iYobs.astype(int), fmt='%i')\nnp.savetxt(results_path + f'timings/timings_{exp}.txt', timings)\nnp.savetxt(results_path + f'nfevs/nfevs_{exp}.txt', nfevs.astype(int), fmt='%i')\nnp.savetxt(results_path + f'rel_errors/rel_errors_{exp}.txt', rel_errors)\nnp.savetxt(results_path + f'abs_errors/abs_errors_{exp}.txt', abs_errors)\nnp.savetxt(results_path + f'YGPR/YGPR_{exp}.txt', Yref)\nnp.savetxt(results_path + f'YPICKLE/YPICKLE_{exp}.txt', Yest)\nnp.savetxt(results_path + f'YMAP/YMAP_{exp}.txt', Yest_MAPH1)", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb3882211062670ca663898143866f6f9fecc5b3
198,208
ipynb
Jupyter Notebook
sentiment-analysis-network/Sentiment_Classification_Solutions.ipynb
bfMendonca/deep-learning-v2-pytorch
23589b10221c4a23601d338ee32d4fed78ae052a
[ "MIT" ]
null
null
null
sentiment-analysis-network/Sentiment_Classification_Solutions.ipynb
bfMendonca/deep-learning-v2-pytorch
23589b10221c4a23601d338ee32d4fed78ae052a
[ "MIT" ]
null
null
null
sentiment-analysis-network/Sentiment_Classification_Solutions.ipynb
bfMendonca/deep-learning-v2-pytorch
23589b10221c4a23601d338ee32d4fed78ae052a
[ "MIT" ]
null
null
null
71.349172
42,260
0.721939
[ [ [ "# Sentiment Classification & How To \"Frame Problems\" for a Neural Network\n\nby Andrew Trask\n\n- **Twitter**: @iamtrask\n- **Blog**: http://iamtrask.github.io", "_____no_output_____" ], [ "### What You Should Already Know\n\n- neural networks, forward and back-propagation\n- stochastic gradient descent\n- mean squared error\n- and train/test splits\n\n### Where to Get Help if You Need it\n- Re-watch previous Udacity Lectures\n- Leverage the recommended Course Reading Material - [Grokking Deep Learning](https://www.manning.com/books/grokking-deep-learning) (Check inside your classroom for a discount code)\n- Shoot me a tweet @iamtrask\n\n\n### Tutorial Outline:\n\n- Intro: The Importance of \"Framing a Problem\" (this lesson)\n\n\n- [Curate a Dataset](#lesson_1)\n- [Developing a \"Predictive Theory\"](#lesson_2)\n- [**PROJECT 1**: Quick Theory Validation](#project_1)\n\n\n- [Transforming Text to Numbers](#lesson_3)\n- [**PROJECT 2**: Creating the Input/Output Data](#project_2)\n\n\n- Putting it all together in a Neural Network (video only - nothing in notebook)\n- [**PROJECT 3**: Building our Neural Network](#project_3)\n\n\n- [Understanding Neural Noise](#lesson_4)\n- [**PROJECT 4**: Making Learning Faster by Reducing Noise](#project_4)\n\n\n- [Analyzing Inefficiencies in our Network](#lesson_5)\n- [**PROJECT 5**: Making our Network Train and Run Faster](#project_5)\n\n\n- [Further Noise Reduction](#lesson_6)\n- [**PROJECT 6**: Reducing Noise by Strategically Reducing the Vocabulary](#project_6)\n\n\n- [Analysis: What's going on in the weights?](#lesson_7)", "_____no_output_____" ], [ "# Lesson: Curate a Dataset<a id='lesson_1'></a>", "_____no_output_____" ] ], [ [ "def pretty_print_review_and_label(i):\n print(labels[i] + \"\\t:\\t\" + reviews[i][:80] + \"...\")\n\ng = open('reviews.txt','r') # What we know!\nreviews = list(map(lambda x:x[:-1],g.readlines()))\ng.close()\n\ng = open('labels.txt','r') # What we WANT to know!\nlabels = list(map(lambda x:x[:-1].upper(),g.readlines()))\ng.close()", "_____no_output_____" ] ], [ [ "**Note:** The data in `reviews.txt` we're using has already been preprocessed a bit and contains only lower case characters. If we were working from raw data, where we didn't know it was all lower case, we would want to add a step here to convert it. That's so we treat different variations of the same word, like `The`, `the`, and `THE`, all the same way.", "_____no_output_____" ] ], [ [ "len(reviews)", "_____no_output_____" ], [ "reviews[0]", "_____no_output_____" ], [ "labels[0]", "_____no_output_____" ] ], [ [ "# Lesson: Develop a Predictive Theory<a id='lesson_2'></a>", "_____no_output_____" ] ], [ [ "print(\"labels.txt \\t : \\t reviews.txt\\n\")\npretty_print_review_and_label(2137)\npretty_print_review_and_label(12816)\npretty_print_review_and_label(6267)\npretty_print_review_and_label(21934)\npretty_print_review_and_label(5297)\npretty_print_review_and_label(4998)", "labels.txt \t : \t reviews.txt\n\nNEGATIVE\t:\tthis movie is terrible but it has some good effects . ...\nPOSITIVE\t:\tadrian pasdar is excellent is this film . he makes a fascinating woman . ...\nNEGATIVE\t:\tcomment this movie is impossible . is terrible very improbable bad interpretat...\nPOSITIVE\t:\texcellent episode movie ala pulp fiction . days suicides . it doesnt get more...\nNEGATIVE\t:\tif you haven t seen this it s terrible . it is pure trash . i saw this about ...\nPOSITIVE\t:\tthis schiffer guy is a real genius the movie is of excellent quality and both e...\n" ] ], [ [ "# Project 1: Quick Theory Validation<a id='project_1'></a>\n\nThere are multiple ways to implement these projects, but in order to get your code closer to what Andrew shows in his solutions, we've provided some hints and starter code throughout this notebook.\n\nYou'll find the [Counter](https://docs.python.org/2/library/collections.html#collections.Counter) class to be useful in this exercise, as well as the [numpy](https://docs.scipy.org/doc/numpy/reference/) library.", "_____no_output_____" ] ], [ [ "from collections import Counter\nimport numpy as np", "_____no_output_____" ] ], [ [ "We'll create three `Counter` objects, one for words from postive reviews, one for words from negative reviews, and one for all the words.", "_____no_output_____" ] ], [ [ "# Create three Counter objects to store positive, negative and total counts\npositive_counts = Counter()\nnegative_counts = Counter()\ntotal_counts = Counter()", "_____no_output_____" ] ], [ [ "**TODO:** Examine all the reviews. For each word in a positive review, increase the count for that word in both your positive counter and the total words counter; likewise, for each word in a negative review, increase the count for that word in both your negative counter and the total words counter.\n\n**Note:** Throughout these projects, you should use `split(' ')` to divide a piece of text (such as a review) into individual words. If you use `split()` instead, you'll get slightly different results than what the videos and solutions show.", "_____no_output_____" ] ], [ [ "# Loop over all the words in all the reviews and increment the counts in the appropriate counter objects\nfor i in range(len(reviews)):\n if(labels[i] == 'POSITIVE'):\n for word in reviews[i].split(\" \"):\n positive_counts[word] += 1\n total_counts[word] += 1\n else:\n for word in reviews[i].split(\" \"):\n negative_counts[word] += 1\n total_counts[word] += 1", "_____no_output_____" ] ], [ [ "Run the following two cells to list the words used in positive reviews and negative reviews, respectively, ordered from most to least commonly used. ", "_____no_output_____" ] ], [ [ "# Examine the counts of the most common words in positive reviews\npositive_counts.most_common()", "_____no_output_____" ], [ "# Examine the counts of the most common words in negative reviews\nnegative_counts.most_common()", "_____no_output_____" ] ], [ [ "As you can see, common words like \"the\" appear very often in both positive and negative reviews. Instead of finding the most common words in positive or negative reviews, what you really want are the words found in positive reviews more often than in negative reviews, and vice versa. To accomplish this, you'll need to calculate the **ratios** of word usage between positive and negative reviews.\n\n**TODO:** Check all the words you've seen and calculate the ratio of postive to negative uses and store that ratio in `pos_neg_ratios`. \n>Hint: the positive-to-negative ratio for a given word can be calculated with `positive_counts[word] / float(negative_counts[word]+1)`. Notice the `+1` in the denominator – that ensures we don't divide by zero for words that are only seen in positive reviews.", "_____no_output_____" ] ], [ [ "pos_neg_ratios = Counter()\n\n# Calculate the ratios of positive and negative uses of the most common words\n# Consider words to be \"common\" if they've been used at least 100 times\nfor term,cnt in list(total_counts.most_common()):\n if(cnt > 100):\n pos_neg_ratio = positive_counts[term] / float(negative_counts[term]+1)\n pos_neg_ratios[term] = pos_neg_ratio", "_____no_output_____" ] ], [ [ "Examine the ratios you've calculated for a few words:", "_____no_output_____" ] ], [ [ "print(\"Pos-to-neg ratio for 'the' = {}\".format(pos_neg_ratios[\"the\"]))\nprint(\"Pos-to-neg ratio for 'amazing' = {}\".format(pos_neg_ratios[\"amazing\"]))\nprint(\"Pos-to-neg ratio for 'terrible' = {}\".format(pos_neg_ratios[\"terrible\"]))", "_____no_output_____" ] ], [ [ "Looking closely at the values you just calculated, we see the following: \n\n* Words that you would expect to see more often in positive reviews – like \"amazing\" – have a ratio greater than 1. The more skewed a word is toward postive, the farther from 1 its positive-to-negative ratio will be.\n* Words that you would expect to see more often in negative reviews – like \"terrible\" – have positive values that are less than 1. The more skewed a word is toward negative, the closer to zero its positive-to-negative ratio will be.\n* Neutral words, which don't really convey any sentiment because you would expect to see them in all sorts of reviews – like \"the\" – have values very close to 1. A perfectly neutral word – one that was used in exactly the same number of positive reviews as negative reviews – would be almost exactly 1. The `+1` we suggested you add to the denominator slightly biases words toward negative, but it won't matter because it will be a tiny bias and later we'll be ignoring words that are too close to neutral anyway.\n\nOk, the ratios tell us which words are used more often in postive or negative reviews, but the specific values we've calculated are a bit difficult to work with. A very positive word like \"amazing\" has a value above 4, whereas a very negative word like \"terrible\" has a value around 0.18. Those values aren't easy to compare for a couple of reasons:\n\n* Right now, 1 is considered neutral, but the absolute value of the postive-to-negative rations of very postive words is larger than the absolute value of the ratios for the very negative words. So there is no way to directly compare two numbers and see if one word conveys the same magnitude of positive sentiment as another word conveys negative sentiment. So we should center all the values around netural so the absolute value fro neutral of the postive-to-negative ratio for a word would indicate how much sentiment (positive or negative) that word conveys.\n* When comparing absolute values it's easier to do that around zero than one. \n\nTo fix these issues, we'll convert all of our ratios to new values using logarithms.\n\n**TODO:** Go through all the ratios you calculated and convert them to logarithms. (i.e. use `np.log(ratio)`)\n\nIn the end, extremely positive and extremely negative words will have positive-to-negative ratios with similar magnitudes but opposite signs.", "_____no_output_____" ] ], [ [ "# Convert ratios to logs\nfor word,ratio in pos_neg_ratios.most_common():\n pos_neg_ratios[word] = np.log(ratio)", "_____no_output_____" ] ], [ [ "**NOTE:** In the video, Andrew uses the following formulas for the previous cell:\n> * For any postive words, convert the ratio using `np.log(ratio)`\n> * For any negative words, convert the ratio using `-np.log(1/(ratio + 0.01))`\n\nThese won't give you the exact same results as the simpler code we show in this notebook, but the values will be similar. In case that second equation looks strange, here's what it's doing: First, it divides one by a very small number, which will produce a larger positive number. Then, it takes the `log` of that, which produces numbers similar to the ones for the postive words. Finally, it negates the values by adding that minus sign up front. The results are extremely positive and extremely negative words having positive-to-negative ratios with similar magnitudes but oppositite signs, just like when we use `np.log(ratio)`.", "_____no_output_____" ], [ "Examine the new ratios you've calculated for the same words from before:", "_____no_output_____" ] ], [ [ "print(\"Pos-to-neg ratio for 'the' = {}\".format(pos_neg_ratios[\"the\"]))\nprint(\"Pos-to-neg ratio for 'amazing' = {}\".format(pos_neg_ratios[\"amazing\"]))\nprint(\"Pos-to-neg ratio for 'terrible' = {}\".format(pos_neg_ratios[\"terrible\"]))", "_____no_output_____" ] ], [ [ "If everything worked, now you should see neutral words with values close to zero. In this case, \"the\" is near zero but slightly positive, so it was probably used in more positive reviews than negative reviews. But look at \"amazing\"'s ratio - it's above `1`, showing it is clearly a word with positive sentiment. And \"terrible\" has a similar score, but in the opposite direction, so it's below `-1`. It's now clear that both of these words are associated with specific, opposing sentiments.\n\nNow run the following cells to see more ratios. \n\nThe first cell displays all the words, ordered by how associated they are with postive reviews. (Your notebook will most likely truncate the output so you won't actually see *all* the words in the list.)\n\nThe second cell displays the 30 words most associated with negative reviews by reversing the order of the first list and then looking at the first 30 words. (If you want the second cell to display all the words, ordered by how associated they are with negative reviews, you could just write `reversed(pos_neg_ratios.most_common())`.)\n\nYou should continue to see values similar to the earlier ones we checked – neutral words will be close to `0`, words will get more positive as their ratios approach and go above `1`, and words will get more negative as their ratios approach and go below `-1`. That's why we decided to use the logs instead of the raw ratios.", "_____no_output_____" ] ], [ [ "# words most frequently seen in a review with a \"POSITIVE\" label\npos_neg_ratios.most_common()", "_____no_output_____" ], [ "# words most frequently seen in a review with a \"NEGATIVE\" label\nlist(reversed(pos_neg_ratios.most_common()))[0:30]\n\n# Note: Above is the code Andrew uses in his solution video, \n# so we've included it here to avoid confusion.\n# If you explore the documentation for the Counter class, \n# you will see you could also find the 30 least common\n# words like this: pos_neg_ratios.most_common()[:-31:-1]", "_____no_output_____" ] ], [ [ "# End of Project 1. \n## Watch the next video to continue with Andrew's next lesson.\n\n# Transforming Text into Numbers<a id='lesson_3'></a>", "_____no_output_____" ] ], [ [ "from IPython.display import Image\n\nreview = \"This was a horrible, terrible movie.\"\n\nImage(filename='sentiment_network.png')", "_____no_output_____" ], [ "review = \"The movie was excellent\"\n\nImage(filename='sentiment_network_pos.png')", "_____no_output_____" ] ], [ [ "# Project 2: Creating the Input/Output Data<a id='project_2'></a>\n\n**TODO:** Create a [set](https://docs.python.org/3/tutorial/datastructures.html#sets) named `vocab` that contains every word in the vocabulary.", "_____no_output_____" ] ], [ [ "vocab = set(total_counts.keys())", "_____no_output_____" ] ], [ [ "Run the following cell to check your vocabulary size. If everything worked correctly, it should print **74074**", "_____no_output_____" ] ], [ [ "vocab_size = len(vocab)\nprint(vocab_size)", "_____no_output_____" ] ], [ [ "Take a look at the following image. It represents the layers of the neural network you'll be building throughout this notebook. `layer_0` is the input layer, `layer_1` is a hidden layer, and `layer_2` is the output layer.", "_____no_output_____" ] ], [ [ "from IPython.display import Image\nImage(filename='sentiment_network_2.png')", "_____no_output_____" ] ], [ [ "**TODO:** Create a numpy array called `layer_0` and initialize it to all zeros. You will find the [zeros](https://docs.scipy.org/doc/numpy/reference/generated/numpy.zeros.html) function particularly helpful here. Be sure you create `layer_0` as a 2-dimensional matrix with 1 row and `vocab_size` columns. ", "_____no_output_____" ] ], [ [ "layer_0 = np.zeros((1,vocab_size))", "_____no_output_____" ] ], [ [ "Run the following cell. It should display `(1, 74074)`", "_____no_output_____" ] ], [ [ "layer_0.shape", "_____no_output_____" ], [ "from IPython.display import Image\nImage(filename='sentiment_network.png')", "_____no_output_____" ] ], [ [ "`layer_0` contains one entry for every word in the vocabulary, as shown in the above image. We need to make sure we know the index of each word, so run the following cell to create a lookup table that stores the index of every word.", "_____no_output_____" ] ], [ [ "# Create a dictionary of words in the vocabulary mapped to index positions \n# (to be used in layer_0)\nword2index = {}\nfor i,word in enumerate(vocab):\n word2index[word] = i\n \n# display the map of words to indices\nword2index", "_____no_output_____" ] ], [ [ "**TODO:** Complete the implementation of `update_input_layer`. It should count \n how many times each word is used in the given review, and then store\n those counts at the appropriate indices inside `layer_0`.", "_____no_output_____" ] ], [ [ "def update_input_layer(review):\n \"\"\" Modify the global layer_0 to represent the vector form of review.\n The element at a given index of layer_0 should represent\n how many times the given word occurs in the review.\n Args:\n review(string) - the string of the review\n Returns:\n None\n \"\"\"\n \n global layer_0\n \n # clear out previous state, reset the layer to be all 0s\n layer_0 *= 0\n \n # count how many times each word is used in the given review and store the results in layer_0 \n for word in review.split(\" \"):\n layer_0[0][word2index[word]] += 1", "_____no_output_____" ] ], [ [ "Run the following cell to test updating the input layer with the first review. The indices assigned may not be the same as in the solution, but hopefully you'll see some non-zero values in `layer_0`. ", "_____no_output_____" ] ], [ [ "update_input_layer(reviews[0])\nlayer_0", "_____no_output_____" ] ], [ [ "**TODO:** Complete the implementation of `get_target_for_labels`. It should return `0` or `1`, \n depending on whether the given label is `NEGATIVE` or `POSITIVE`, respectively.", "_____no_output_____" ] ], [ [ "def get_target_for_label(label):\n \"\"\"Convert a label to `0` or `1`.\n Args:\n label(string) - Either \"POSITIVE\" or \"NEGATIVE\".\n Returns:\n `0` or `1`.\n \"\"\"\n if(label == 'POSITIVE'):\n return 1\n else:\n return 0", "_____no_output_____" ] ], [ [ "Run the following two cells. They should print out`'POSITIVE'` and `1`, respectively.", "_____no_output_____" ] ], [ [ "labels[0]", "_____no_output_____" ], [ "get_target_for_label(labels[0])", "_____no_output_____" ] ], [ [ "Run the following two cells. They should print out `'NEGATIVE'` and `0`, respectively.", "_____no_output_____" ] ], [ [ "labels[1]", "_____no_output_____" ], [ "get_target_for_label(labels[1])", "_____no_output_____" ] ], [ [ "# End of Project 2 solution. \n## Watch the next video to continue with Andrew's next lesson.", "_____no_output_____" ], [ "# Project 3: Building a Neural Network<a id='project_3'></a>", "_____no_output_____" ], [ "**TODO:** We've included the framework of a class called `SentimentNetork`. Implement all of the items marked `TODO` in the code. These include doing the following:\n- Create a basic neural network much like the networks you've seen in earlier lessons and in Project 1, with an input layer, a hidden layer, and an output layer. \n- Do **not** add a non-linearity in the hidden layer. That is, do not use an activation function when calculating the hidden layer outputs.\n- Re-use the code from earlier in this notebook to create the training data (see `TODO`s in the code)\n- Implement the `pre_process_data` function to create the vocabulary for our training data generating functions\n- Ensure `train` trains over the entire corpus", "_____no_output_____" ], [ "### Where to Get Help if You Need it\n- Re-watch previous week's Udacity Lectures\n- Chapters 3-5 - [Grokking Deep Learning](https://www.manning.com/books/grokking-deep-learning) - (Check inside your classroom for a discount code)", "_____no_output_____" ] ], [ [ "import time\nimport sys\nimport numpy as np\n\n# Encapsulate our neural network in a class\nclass SentimentNetwork:\n def __init__(self, reviews,labels,hidden_nodes = 10, learning_rate = 0.1):\n \"\"\"Create a SentimenNetwork with the given settings\n Args:\n reviews(list) - List of reviews used for training\n labels(list) - List of POSITIVE/NEGATIVE labels associated with the given reviews\n hidden_nodes(int) - Number of nodes to create in the hidden layer\n learning_rate(float) - Learning rate to use while training\n \n \"\"\"\n # Assign a seed to our random number generator to ensure we get\n # reproducable results during development \n np.random.seed(1)\n\n # process the reviews and their associated labels so that everything\n # is ready for training\n self.pre_process_data(reviews, labels)\n \n # Build the network to have the number of hidden nodes and the learning rate that\n # were passed into this initializer. Make the same number of input nodes as\n # there are vocabulary words and create a single output node.\n self.init_network(len(self.review_vocab),hidden_nodes, 1, learning_rate)\n\n def pre_process_data(self, reviews, labels):\n \n # populate review_vocab with all of the words in the given reviews\n review_vocab = set()\n for review in reviews:\n for word in review.split(\" \"):\n review_vocab.add(word)\n\n # Convert the vocabulary set to a list so we can access words via indices\n self.review_vocab = list(review_vocab)\n \n # populate label_vocab with all of the words in the given labels.\n label_vocab = set()\n for label in labels:\n label_vocab.add(label)\n \n # Convert the label vocabulary set to a list so we can access labels via indices\n self.label_vocab = list(label_vocab)\n \n # Store the sizes of the review and label vocabularies.\n self.review_vocab_size = len(self.review_vocab)\n self.label_vocab_size = len(self.label_vocab)\n \n # Create a dictionary of words in the vocabulary mapped to index positions\n self.word2index = {}\n for i, word in enumerate(self.review_vocab):\n self.word2index[word] = i\n \n # Create a dictionary of labels mapped to index positions\n self.label2index = {}\n for i, label in enumerate(self.label_vocab):\n self.label2index[label] = i\n \n def init_network(self, input_nodes, hidden_nodes, output_nodes, learning_rate):\n # Set number of nodes in input, hidden and output layers.\n self.input_nodes = input_nodes\n self.hidden_nodes = hidden_nodes\n self.output_nodes = output_nodes\n\n # Store the learning rate\n self.learning_rate = learning_rate\n\n # Initialize weights\n\n # These are the weights between the input layer and the hidden layer.\n self.weights_0_1 = np.zeros((self.input_nodes,self.hidden_nodes))\n \n # These are the weights between the hidden layer and the output layer.\n self.weights_1_2 = np.random.normal(0.0, self.output_nodes**-0.5, \n (self.hidden_nodes, self.output_nodes))\n \n # The input layer, a two-dimensional matrix with shape 1 x input_nodes\n self.layer_0 = np.zeros((1,input_nodes))\n \n def update_input_layer(self,review):\n\n # clear out previous state, reset the layer to be all 0s\n self.layer_0 *= 0\n \n for word in review.split(\" \"):\n # NOTE: This if-check was not in the version of this method created in Project 2,\n # and it appears in Andrew's Project 3 solution without explanation. \n # It simply ensures the word is actually a key in word2index before\n # accessing it, which is important because accessing an invalid key\n # with raise an exception in Python. This allows us to ignore unknown\n # words encountered in new reviews.\n if(word in self.word2index.keys()):\n self.layer_0[0][self.word2index[word]] += 1\n \n def get_target_for_label(self,label):\n if(label == 'POSITIVE'):\n return 1\n else:\n return 0\n \n def sigmoid(self,x):\n return 1 / (1 + np.exp(-x))\n \n def sigmoid_output_2_derivative(self,output):\n return output * (1 - output)\n \n def train(self, training_reviews, training_labels):\n \n # make sure out we have a matching number of reviews and labels\n assert(len(training_reviews) == len(training_labels))\n \n # Keep track of correct predictions to display accuracy during training \n correct_so_far = 0\n\n # Remember when we started for printing time statistics\n start = time.time()\n \n # loop through all the given reviews and run a forward and backward pass,\n # updating weights for every item\n for i in range(len(training_reviews)):\n \n # Get the next review and its correct label\n review = training_reviews[i]\n label = training_labels[i]\n \n #### Implement the forward pass here ####\n ### Forward pass ###\n\n # Input Layer\n self.update_input_layer(review)\n\n # Hidden layer\n layer_1 = self.layer_0.dot(self.weights_0_1)\n\n # Output layer\n layer_2 = self.sigmoid(layer_1.dot(self.weights_1_2))\n \n #### Implement the backward pass here ####\n ### Backward pass ###\n\n # Output error\n layer_2_error = layer_2 - self.get_target_for_label(label) # Output layer error is the difference between desired target and actual output.\n layer_2_delta = layer_2_error * self.sigmoid_output_2_derivative(layer_2)\n\n # Backpropagated error\n layer_1_error = layer_2_delta.dot(self.weights_1_2.T) # errors propagated to the hidden layer\n layer_1_delta = layer_1_error # hidden layer gradients - no nonlinearity so it's the same as the error\n\n # Update the weights\n self.weights_1_2 -= layer_1.T.dot(layer_2_delta) * self.learning_rate # update hidden-to-output weights with gradient descent step\n self.weights_0_1 -= self.layer_0.T.dot(layer_1_delta) * self.learning_rate # update input-to-hidden weights with gradient descent step\n\n # Keep track of correct predictions.\n if(layer_2 >= 0.5 and label == 'POSITIVE'):\n correct_so_far += 1\n elif(layer_2 < 0.5 and label == 'NEGATIVE'):\n correct_so_far += 1\n \n # For debug purposes, print out our prediction accuracy and speed \n # throughout the training process. \n elapsed_time = float(time.time() - start)\n reviews_per_second = i / elapsed_time if elapsed_time > 0 else 0\n \n sys.stdout.write(\"\\rProgress:\" + str(100 * i/float(len(training_reviews)))[:4] \\\n + \"% Speed(reviews/sec):\" + str(reviews_per_second)[0:5] \\\n + \" #Correct:\" + str(correct_so_far) + \" #Trained:\" + str(i+1) \\\n + \" Training Accuracy:\" + str(correct_so_far * 100 / float(i+1))[:4] + \"%\")\n if(i % 2500 == 0):\n print(\"\")\n \n def test(self, testing_reviews, testing_labels):\n \"\"\"\n Attempts to predict the labels for the given testing_reviews,\n and uses the test_labels to calculate the accuracy of those predictions.\n \"\"\"\n \n # keep track of how many correct predictions we make\n correct = 0\n\n # we'll time how many predictions per second we make\n start = time.time()\n\n # Loop through each of the given reviews and call run to predict\n # its label. \n for i in range(len(testing_reviews)):\n pred = self.run(testing_reviews[i])\n if(pred == testing_labels[i]):\n correct += 1\n \n # For debug purposes, print out our prediction accuracy and speed \n # throughout the prediction process. \n\n elapsed_time = float(time.time() - start)\n reviews_per_second = i / elapsed_time if elapsed_time > 0 else 0\n \n sys.stdout.write(\"\\rProgress:\" + str(100 * i/float(len(testing_reviews)))[:4] \\\n + \"% Speed(reviews/sec):\" + str(reviews_per_second)[0:5] \\\n + \" #Correct:\" + str(correct) + \" #Tested:\" + str(i+1) \\\n + \" Testing Accuracy:\" + str(correct * 100 / float(i+1))[:4] + \"%\")\n \n def run(self, review):\n \"\"\"\n Returns a POSITIVE or NEGATIVE prediction for the given review.\n \"\"\"\n # Run a forward pass through the network, like in the \"train\" function.\n \n # Input Layer\n self.update_input_layer(review.lower())\n\n # Hidden layer\n layer_1 = self.layer_0.dot(self.weights_0_1)\n\n # Output layer\n layer_2 = self.sigmoid(layer_1.dot(self.weights_1_2))\n \n # Return POSITIVE for values above greater-than-or-equal-to 0.5 in the output layer;\n # return NEGATIVE for other values\n if(layer_2[0] >= 0.5):\n return \"POSITIVE\"\n else:\n return \"NEGATIVE\"\n ", "_____no_output_____" ] ], [ [ "Run the following cell to create a `SentimentNetwork` that will train on all but the last 1000 reviews (we're saving those for testing). Here we use a learning rate of `0.1`.", "_____no_output_____" ] ], [ [ "mlp = SentimentNetwork(reviews[:-1000],labels[:-1000], learning_rate=0.1)", "_____no_output_____" ] ], [ [ "Run the following cell to test the network's performance against the last 1000 reviews (the ones we held out from our training set). \n\n**We have not trained the model yet, so the results should be about 50% as it will just be guessing and there are only two possible values to choose from.**", "_____no_output_____" ] ], [ [ "mlp.test(reviews[-1000:],labels[-1000:])", "_____no_output_____" ] ], [ [ "Run the following cell to actually train the network. During training, it will display the model's accuracy repeatedly as it trains so you can see how well it's doing.", "_____no_output_____" ] ], [ [ "mlp.train(reviews[:-1000],labels[:-1000])", "_____no_output_____" ] ], [ [ "That most likely didn't train very well. Part of the reason may be because the learning rate is too high. Run the following cell to recreate the network with a smaller learning rate, `0.01`, and then train the new network.", "_____no_output_____" ] ], [ [ "mlp = SentimentNetwork(reviews[:-1000],labels[:-1000], learning_rate=0.01)\nmlp.train(reviews[:-1000],labels[:-1000])", "_____no_output_____" ] ], [ [ "That probably wasn't much different. Run the following cell to recreate the network one more time with an even smaller learning rate, `0.001`, and then train the new network.", "_____no_output_____" ] ], [ [ "mlp = SentimentNetwork(reviews[:-1000],labels[:-1000], learning_rate=0.001)\nmlp.train(reviews[:-1000],labels[:-1000])", "_____no_output_____" ] ], [ [ "With a learning rate of `0.001`, the network should finally have started to improve during training. It's still not very good, but it shows that this solution has potential. We will improve it in the next lesson.", "_____no_output_____" ], [ "# End of Project 3. \n## Watch the next video to continue with Andrew's next lesson.", "_____no_output_____" ], [ "# Understanding Neural Noise<a id='lesson_4'></a>", "_____no_output_____" ] ], [ [ "from IPython.display import Image\nImage(filename='sentiment_network.png')", "_____no_output_____" ], [ "def update_input_layer(review):\n \n global layer_0\n \n # clear out previous state, reset the layer to be all 0s\n layer_0 *= 0\n for word in review.split(\" \"):\n layer_0[0][word2index[word]] += 1\n\nupdate_input_layer(reviews[0])", "_____no_output_____" ], [ "layer_0", "_____no_output_____" ], [ "review_counter = Counter()", "_____no_output_____" ], [ "for word in reviews[0].split(\" \"):\n review_counter[word] += 1", "_____no_output_____" ], [ "review_counter.most_common()", "_____no_output_____" ] ], [ [ "# Project 4: Reducing Noise in Our Input Data<a id='project_4'></a>\n\n**TODO:** Attempt to reduce the noise in the input data like Andrew did in the previous video. Specifically, do the following:\n* Copy the `SentimentNetwork` class you created earlier into the following cell.\n* Modify `update_input_layer` so it does not count how many times each word is used, but rather just stores whether or not a word was used. ", "_____no_output_____" ], [ "The following code is the same as the previous project, with project-specific changes marked with `\"New for Project 4\"`", "_____no_output_____" ] ], [ [ "import time\nimport sys\nimport numpy as np\n\n# Encapsulate our neural network in a class\nclass SentimentNetwork:\n def __init__(self, reviews,labels,hidden_nodes = 10, learning_rate = 0.1):\n \"\"\"Create a SentimenNetwork with the given settings\n Args:\n reviews(list) - List of reviews used for training\n labels(list) - List of POSITIVE/NEGATIVE labels associated with the given reviews\n hidden_nodes(int) - Number of nodes to create in the hidden layer\n learning_rate(float) - Learning rate to use while training\n \n \"\"\"\n # Assign a seed to our random number generator to ensure we get\n # reproducable results during development \n np.random.seed(1)\n\n # process the reviews and their associated labels so that everything\n # is ready for training\n self.pre_process_data(reviews, labels)\n \n # Build the network to have the number of hidden nodes and the learning rate that\n # were passed into this initializer. Make the same number of input nodes as\n # there are vocabulary words and create a single output node.\n self.init_network(len(self.review_vocab),hidden_nodes, 1, learning_rate)\n\n def pre_process_data(self, reviews, labels):\n \n # populate review_vocab with all of the words in the given reviews\n review_vocab = set()\n for review in reviews:\n for word in review.split(\" \"):\n review_vocab.add(word)\n\n # Convert the vocabulary set to a list so we can access words via indices\n self.review_vocab = list(review_vocab)\n \n # populate label_vocab with all of the words in the given labels.\n label_vocab = set()\n for label in labels:\n label_vocab.add(label)\n \n # Convert the label vocabulary set to a list so we can access labels via indices\n self.label_vocab = list(label_vocab)\n \n # Store the sizes of the review and label vocabularies.\n self.review_vocab_size = len(self.review_vocab)\n self.label_vocab_size = len(self.label_vocab)\n \n # Create a dictionary of words in the vocabulary mapped to index positions\n self.word2index = {}\n for i, word in enumerate(self.review_vocab):\n self.word2index[word] = i\n \n # Create a dictionary of labels mapped to index positions\n self.label2index = {}\n for i, label in enumerate(self.label_vocab):\n self.label2index[label] = i\n \n def init_network(self, input_nodes, hidden_nodes, output_nodes, learning_rate):\n # Set number of nodes in input, hidden and output layers.\n self.input_nodes = input_nodes\n self.hidden_nodes = hidden_nodes\n self.output_nodes = output_nodes\n\n # Store the learning rate\n self.learning_rate = learning_rate\n\n # Initialize weights\n\n # These are the weights between the input layer and the hidden layer.\n self.weights_0_1 = np.zeros((self.input_nodes,self.hidden_nodes))\n \n # These are the weights between the hidden layer and the output layer.\n self.weights_1_2 = np.random.normal(0.0, self.output_nodes**-0.5, \n (self.hidden_nodes, self.output_nodes))\n \n # The input layer, a two-dimensional matrix with shape 1 x input_nodes\n self.layer_0 = np.zeros((1,input_nodes))\n \n \n def update_input_layer(self,review):\n\n # clear out previous state, reset the layer to be all 0s\n self.layer_0 *= 0\n \n for word in review.split(\" \"):\n # NOTE: This if-check was not in the version of this method created in Project 2,\n # and it appears in Andrew's Project 3 solution without explanation. \n # It simply ensures the word is actually a key in word2index before\n # accessing it, which is important because accessing an invalid key\n # with raise an exception in Python. This allows us to ignore unknown\n # words encountered in new reviews.\n if(word in self.word2index.keys()):\n ## New for Project 4: changed to set to 1 instead of add 1\n self.layer_0[0][self.word2index[word]] = 1\n \n def get_target_for_label(self,label):\n if(label == 'POSITIVE'):\n return 1\n else:\n return 0\n \n def sigmoid(self,x):\n return 1 / (1 + np.exp(-x))\n \n def sigmoid_output_2_derivative(self,output):\n return output * (1 - output)\n \n def train(self, training_reviews, training_labels):\n \n # make sure out we have a matching number of reviews and labels\n assert(len(training_reviews) == len(training_labels))\n \n # Keep track of correct predictions to display accuracy during training \n correct_so_far = 0\n\n # Remember when we started for printing time statistics\n start = time.time()\n \n # loop through all the given reviews and run a forward and backward pass,\n # updating weights for every item\n for i in range(len(training_reviews)):\n \n # Get the next review and its correct label\n review = training_reviews[i]\n label = training_labels[i]\n \n #### Implement the forward pass here ####\n ### Forward pass ###\n\n # Input Layer\n self.update_input_layer(review)\n\n # Hidden layer\n layer_1 = self.layer_0.dot(self.weights_0_1)\n\n # Output layer\n layer_2 = self.sigmoid(layer_1.dot(self.weights_1_2))\n \n #### Implement the backward pass here ####\n ### Backward pass ###\n\n # Output error\n layer_2_error = layer_2 - self.get_target_for_label(label) # Output layer error is the difference between desired target and actual output.\n layer_2_delta = layer_2_error * self.sigmoid_output_2_derivative(layer_2)\n\n # Backpropagated error\n layer_1_error = layer_2_delta.dot(self.weights_1_2.T) # errors propagated to the hidden layer\n layer_1_delta = layer_1_error # hidden layer gradients - no nonlinearity so it's the same as the error\n\n # Update the weights\n self.weights_1_2 -= layer_1.T.dot(layer_2_delta) * self.learning_rate # update hidden-to-output weights with gradient descent step\n self.weights_0_1 -= self.layer_0.T.dot(layer_1_delta) * self.learning_rate # update input-to-hidden weights with gradient descent step\n\n # Keep track of correct predictions.\n if(layer_2 >= 0.5 and label == 'POSITIVE'):\n correct_so_far += 1\n elif(layer_2 < 0.5 and label == 'NEGATIVE'):\n correct_so_far += 1\n \n # For debug purposes, print out our prediction accuracy and speed \n # throughout the training process. \n elapsed_time = float(time.time() - start)\n reviews_per_second = i / elapsed_time if elapsed_time > 0 else 0\n \n sys.stdout.write(\"\\rProgress:\" + str(100 * i/float(len(training_reviews)))[:4] \\\n + \"% Speed(reviews/sec):\" + str(reviews_per_second)[0:5] \\\n + \" #Correct:\" + str(correct_so_far) + \" #Trained:\" + str(i+1) \\\n + \" Training Accuracy:\" + str(correct_so_far * 100 / float(i+1))[:4] + \"%\")\n if(i % 2500 == 0):\n print(\"\")\n \n def test(self, testing_reviews, testing_labels):\n \"\"\"\n Attempts to predict the labels for the given testing_reviews,\n and uses the test_labels to calculate the accuracy of those predictions.\n \"\"\"\n \n # keep track of how many correct predictions we make\n correct = 0\n\n # we'll time how many predictions per second we make\n start = time.time()\n\n # Loop through each of the given reviews and call run to predict\n # its label. \n for i in range(len(testing_reviews)):\n pred = self.run(testing_reviews[i])\n if(pred == testing_labels[i]):\n correct += 1\n \n # For debug purposes, print out our prediction accuracy and speed \n # throughout the prediction process. \n\n elapsed_time = float(time.time() - start)\n reviews_per_second = i / elapsed_time if elapsed_time > 0 else 0\n \n sys.stdout.write(\"\\rProgress:\" + str(100 * i/float(len(testing_reviews)))[:4] \\\n + \"% Speed(reviews/sec):\" + str(reviews_per_second)[0:5] \\\n + \" #Correct:\" + str(correct) + \" #Tested:\" + str(i+1) \\\n + \" Testing Accuracy:\" + str(correct * 100 / float(i+1))[:4] + \"%\")\n \n def run(self, review):\n \"\"\"\n Returns a POSITIVE or NEGATIVE prediction for the given review.\n \"\"\"\n # Run a forward pass through the network, like in the \"train\" function.\n \n # Input Layer\n self.update_input_layer(review.lower())\n\n # Hidden layer\n layer_1 = self.layer_0.dot(self.weights_0_1)\n\n # Output layer\n layer_2 = self.sigmoid(layer_1.dot(self.weights_1_2))\n \n # Return POSITIVE for values above greater-than-or-equal-to 0.5 in the output layer;\n # return NEGATIVE for other values\n if(layer_2[0] >= 0.5):\n return \"POSITIVE\"\n else:\n return \"NEGATIVE\"\n ", "_____no_output_____" ] ], [ [ "Run the following cell to recreate the network and train it. Notice we've gone back to the higher learning rate of `0.1`.", "_____no_output_____" ] ], [ [ "mlp = SentimentNetwork(reviews[:-1000],labels[:-1000], learning_rate=0.1)\nmlp.train(reviews[:-1000],labels[:-1000])", "_____no_output_____" ], [ "mlp.test(reviews[-1000:],labels[-1000:])", "_____no_output_____" ] ], [ [ "# End of Project 4 solution. \n## Watch the next video to continue with Andrew's next lesson.\n# Analyzing Inefficiencies in our Network<a id='lesson_5'></a>", "_____no_output_____" ] ], [ [ "Image(filename='sentiment_network_sparse.png')", "_____no_output_____" ], [ "layer_0 = np.zeros(10)", "_____no_output_____" ], [ "layer_0", "_____no_output_____" ], [ "layer_0[4] = 1\nlayer_0[9] = 1", "_____no_output_____" ], [ "layer_0", "_____no_output_____" ], [ "weights_0_1 = np.random.randn(10,5)", "_____no_output_____" ], [ "layer_0.dot(weights_0_1)", "_____no_output_____" ], [ "indices = [4,9]", "_____no_output_____" ], [ "layer_1 = np.zeros(5)", "_____no_output_____" ], [ "for index in indices:\n layer_1 += (1 * weights_0_1[index])", "_____no_output_____" ], [ "layer_1", "_____no_output_____" ], [ "Image(filename='sentiment_network_sparse_2.png')", "_____no_output_____" ], [ "layer_1 = np.zeros(5)", "_____no_output_____" ], [ "for index in indices:\n layer_1 += (weights_0_1[index])", "_____no_output_____" ], [ "layer_1", "_____no_output_____" ] ], [ [ "# Project 5: Making our Network More Efficient<a id='project_5'></a>\n**TODO:** Make the `SentimentNetwork` class more efficient by eliminating unnecessary multiplications and additions that occur during forward and backward propagation. To do that, you can do the following:\n* Copy the `SentimentNetwork` class from the previous project into the following cell.\n* Remove the `update_input_layer` function - you will not need it in this version.\n* Modify `init_network`:\n>* You no longer need a separate input layer, so remove any mention of `self.layer_0`\n>* You will be dealing with the old hidden layer more directly, so create `self.layer_1`, a two-dimensional matrix with shape 1 x hidden_nodes, with all values initialized to zero\n* Modify `train`:\n>* Change the name of the input parameter `training_reviews` to `training_reviews_raw`. This will help with the next step.\n>* At the beginning of the function, you'll want to preprocess your reviews to convert them to a list of indices (from `word2index`) that are actually used in the review. This is equivalent to what you saw in the video when Andrew set specific indices to 1. Your code should create a local `list` variable named `training_reviews` that should contain a `list` for each review in `training_reviews_raw`. Those lists should contain the indices for words found in the review.\n>* Remove call to `update_input_layer`\n>* Use `self`'s `layer_1` instead of a local `layer_1` object.\n>* In the forward pass, replace the code that updates `layer_1` with new logic that only adds the weights for the indices used in the review.\n>* When updating `weights_0_1`, only update the individual weights that were used in the forward pass.\n* Modify `run`:\n>* Remove call to `update_input_layer` \n>* Use `self`'s `layer_1` instead of a local `layer_1` object.\n>* Much like you did in `train`, you will need to pre-process the `review` so you can work with word indices, then update `layer_1` by adding weights for the indices used in the review.", "_____no_output_____" ], [ "The following code is the same as the previous project, with project-specific changes marked with `\"New for Project 5\"`", "_____no_output_____" ] ], [ [ "import time\nimport sys\nimport numpy as np\n\n# Encapsulate our neural network in a class\nclass SentimentNetwork:\n def __init__(self, reviews,labels,hidden_nodes = 10, learning_rate = 0.1):\n \"\"\"Create a SentimenNetwork with the given settings\n Args:\n reviews(list) - List of reviews used for training\n labels(list) - List of POSITIVE/NEGATIVE labels associated with the given reviews\n hidden_nodes(int) - Number of nodes to create in the hidden layer\n learning_rate(float) - Learning rate to use while training\n \n \"\"\"\n # Assign a seed to our random number generator to ensure we get\n # reproducable results during development \n np.random.seed(1)\n\n # process the reviews and their associated labels so that everything\n # is ready for training\n self.pre_process_data(reviews, labels)\n \n # Build the network to have the number of hidden nodes and the learning rate that\n # were passed into this initializer. Make the same number of input nodes as\n # there are vocabulary words and create a single output node.\n self.init_network(len(self.review_vocab),hidden_nodes, 1, learning_rate)\n\n def pre_process_data(self, reviews, labels):\n \n # populate review_vocab with all of the words in the given reviews\n review_vocab = set()\n for review in reviews:\n for word in review.split(\" \"):\n review_vocab.add(word)\n\n # Convert the vocabulary set to a list so we can access words via indices\n self.review_vocab = list(review_vocab)\n \n # populate label_vocab with all of the words in the given labels.\n label_vocab = set()\n for label in labels:\n label_vocab.add(label)\n \n # Convert the label vocabulary set to a list so we can access labels via indices\n self.label_vocab = list(label_vocab)\n \n # Store the sizes of the review and label vocabularies.\n self.review_vocab_size = len(self.review_vocab)\n self.label_vocab_size = len(self.label_vocab)\n \n # Create a dictionary of words in the vocabulary mapped to index positions\n self.word2index = {}\n for i, word in enumerate(self.review_vocab):\n self.word2index[word] = i\n \n # Create a dictionary of labels mapped to index positions\n self.label2index = {}\n for i, label in enumerate(self.label_vocab):\n self.label2index[label] = i\n\n def init_network(self, input_nodes, hidden_nodes, output_nodes, learning_rate):\n # Set number of nodes in input, hidden and output layers.\n self.input_nodes = input_nodes\n self.hidden_nodes = hidden_nodes\n self.output_nodes = output_nodes\n\n # Store the learning rate\n self.learning_rate = learning_rate\n\n # Initialize weights\n\n # These are the weights between the input layer and the hidden layer.\n self.weights_0_1 = np.zeros((self.input_nodes,self.hidden_nodes))\n\n # These are the weights between the hidden layer and the output layer.\n self.weights_1_2 = np.random.normal(0.0, self.output_nodes**-0.5, \n (self.hidden_nodes, self.output_nodes))\n \n ## New for Project 5: Removed self.layer_0; added self.layer_1\n # The input layer, a two-dimensional matrix with shape 1 x hidden_nodes\n self.layer_1 = np.zeros((1,hidden_nodes))\n \n ## New for Project 5: Removed update_input_layer function\n \n def get_target_for_label(self,label):\n if(label == 'POSITIVE'):\n return 1\n else:\n return 0\n \n def sigmoid(self,x):\n return 1 / (1 + np.exp(-x))\n \n def sigmoid_output_2_derivative(self,output):\n return output * (1 - output)\n \n ## New for Project 5: changed name of first parameter form 'training_reviews' \n # to 'training_reviews_raw'\n def train(self, training_reviews_raw, training_labels):\n\n ## New for Project 5: pre-process training reviews so we can deal \n # directly with the indices of non-zero inputs\n training_reviews = list()\n for review in training_reviews_raw:\n indices = set()\n for word in review.split(\" \"):\n if(word in self.word2index.keys()):\n indices.add(self.word2index[word])\n training_reviews.append(list(indices))\n\n # make sure out we have a matching number of reviews and labels\n assert(len(training_reviews) == len(training_labels))\n \n # Keep track of correct predictions to display accuracy during training \n correct_so_far = 0\n\n # Remember when we started for printing time statistics\n start = time.time()\n \n # loop through all the given reviews and run a forward and backward pass,\n # updating weights for every item\n for i in range(len(training_reviews)):\n \n # Get the next review and its correct label\n review = training_reviews[i]\n label = training_labels[i]\n \n #### Implement the forward pass here ####\n ### Forward pass ###\n\n ## New for Project 5: Removed call to 'update_input_layer' function\n # because 'layer_0' is no longer used\n\n # Hidden layer\n ## New for Project 5: Add in only the weights for non-zero items\n self.layer_1 *= 0\n for index in review:\n self.layer_1 += self.weights_0_1[index]\n\n # Output layer\n ## New for Project 5: changed to use 'self.layer_1' instead of 'local layer_1'\n layer_2 = self.sigmoid(self.layer_1.dot(self.weights_1_2)) \n \n #### Implement the backward pass here ####\n ### Backward pass ###\n\n # Output error\n layer_2_error = layer_2 - self.get_target_for_label(label) # Output layer error is the difference between desired target and actual output.\n layer_2_delta = layer_2_error * self.sigmoid_output_2_derivative(layer_2)\n\n # Backpropagated error\n layer_1_error = layer_2_delta.dot(self.weights_1_2.T) # errors propagated to the hidden layer\n layer_1_delta = layer_1_error # hidden layer gradients - no nonlinearity so it's the same as the error\n\n # Update the weights\n ## New for Project 5: changed to use 'self.layer_1' instead of local 'layer_1'\n self.weights_1_2 -= self.layer_1.T.dot(layer_2_delta) * self.learning_rate # update hidden-to-output weights with gradient descent step\n \n ## New for Project 5: Only update the weights that were used in the forward pass\n for index in review:\n self.weights_0_1[index] -= layer_1_delta[0] * self.learning_rate # update input-to-hidden weights with gradient descent step\n\n # Keep track of correct predictions.\n if(layer_2 >= 0.5 and label == 'POSITIVE'):\n correct_so_far += 1\n elif(layer_2 < 0.5 and label == 'NEGATIVE'):\n correct_so_far += 1\n \n # For debug purposes, print out our prediction accuracy and speed \n # throughout the training process. \n elapsed_time = float(time.time() - start)\n reviews_per_second = i / elapsed_time if elapsed_time > 0 else 0\n \n sys.stdout.write(\"\\rProgress:\" + str(100 * i/float(len(training_reviews)))[:4] \\\n + \"% Speed(reviews/sec):\" + str(reviews_per_second)[0:5] \\\n + \" #Correct:\" + str(correct_so_far) + \" #Trained:\" + str(i+1) \\\n + \" Training Accuracy:\" + str(correct_so_far * 100 / float(i+1))[:4] + \"%\")\n if(i % 2500 == 0):\n print(\"\")\n \n def test(self, testing_reviews, testing_labels):\n \"\"\"\n Attempts to predict the labels for the given testing_reviews,\n and uses the test_labels to calculate the accuracy of those predictions.\n \"\"\"\n \n # keep track of how many correct predictions we make\n correct = 0\n\n # we'll time how many predictions per second we make\n start = time.time()\n\n # Loop through each of the given reviews and call run to predict\n # its label. \n for i in range(len(testing_reviews)):\n pred = self.run(testing_reviews[i])\n if(pred == testing_labels[i]):\n correct += 1\n \n # For debug purposes, print out our prediction accuracy and speed \n # throughout the prediction process. \n\n elapsed_time = float(time.time() - start)\n reviews_per_second = i / elapsed_time if elapsed_time > 0 else 0\n \n sys.stdout.write(\"\\rProgress:\" + str(100 * i/float(len(testing_reviews)))[:4] \\\n + \"% Speed(reviews/sec):\" + str(reviews_per_second)[0:5] \\\n + \" #Correct:\" + str(correct) + \" #Tested:\" + str(i+1) \\\n + \" Testing Accuracy:\" + str(correct * 100 / float(i+1))[:4] + \"%\")\n \n def run(self, review):\n \"\"\"\n Returns a POSITIVE or NEGATIVE prediction for the given review.\n \"\"\"\n # Run a forward pass through the network, like in the \"train\" function.\n \n ## New for Project 5: Removed call to update_input_layer function\n # because layer_0 is no longer used\n\n # Hidden layer\n ## New for Project 5: Identify the indices used in the review and then add\n # just those weights to layer_1 \n self.layer_1 *= 0\n unique_indices = set()\n for word in review.lower().split(\" \"):\n if word in self.word2index.keys():\n unique_indices.add(self.word2index[word])\n for index in unique_indices:\n self.layer_1 += self.weights_0_1[index]\n \n # Output layer\n ## New for Project 5: changed to use self.layer_1 instead of local layer_1\n layer_2 = self.sigmoid(self.layer_1.dot(self.weights_1_2))\n \n # Return POSITIVE for values above greater-than-or-equal-to 0.5 in the output layer;\n # return NEGATIVE for other values\n if(layer_2[0] >= 0.5):\n return \"POSITIVE\"\n else:\n return \"NEGATIVE\"\n", "_____no_output_____" ] ], [ [ "Run the following cell to recreate the network and train it once again.", "_____no_output_____" ] ], [ [ "mlp = SentimentNetwork(reviews[:-1000],labels[:-1000], learning_rate=0.1)\nmlp.train(reviews[:-1000],labels[:-1000])", "_____no_output_____" ] ], [ [ "That should have trained much better than the earlier attempts. Run the following cell to test your model with 1000 predictions.", "_____no_output_____" ] ], [ [ "mlp.test(reviews[-1000:],labels[-1000:])", "_____no_output_____" ] ], [ [ "# End of Project 5 solution. \n## Watch the next video to continue with Andrew's next lesson.\n# Further Noise Reduction<a id='lesson_6'></a>", "_____no_output_____" ] ], [ [ "Image(filename='sentiment_network_sparse_2.png')", "_____no_output_____" ], [ "# words most frequently seen in a review with a \"POSITIVE\" label\npos_neg_ratios.most_common()", "_____no_output_____" ], [ "# words most frequently seen in a review with a \"NEGATIVE\" label\nlist(reversed(pos_neg_ratios.most_common()))[0:30]", "_____no_output_____" ], [ "from bokeh.models import ColumnDataSource, LabelSet\nfrom bokeh.plotting import figure, show, output_file\nfrom bokeh.io import output_notebook\noutput_notebook()", "_____no_output_____" ], [ "hist, edges = np.histogram(list(map(lambda x:x[1],pos_neg_ratios.most_common())), density=True, bins=100, normed=True)\n\np = figure(tools=\"pan,wheel_zoom,reset,save\",\n toolbar_location=\"above\",\n title=\"Word Positive/Negative Affinity Distribution\")\np.quad(top=hist, bottom=0, left=edges[:-1], right=edges[1:], line_color=\"#555555\")\nshow(p)", "_____no_output_____" ], [ "frequency_frequency = Counter()\n\nfor word, cnt in total_counts.most_common():\n frequency_frequency[cnt] += 1", "_____no_output_____" ], [ "hist, edges = np.histogram(list(map(lambda x:x[1],frequency_frequency.most_common())), density=True, bins=100, normed=True)\n\np = figure(tools=\"pan,wheel_zoom,reset,save\",\n toolbar_location=\"above\",\n title=\"The frequency distribution of the words in our corpus\")\np.quad(top=hist, bottom=0, left=edges[:-1], right=edges[1:], line_color=\"#555555\")\nshow(p)", "_____no_output_____" ] ], [ [ "# Project 6: Reducing Noise by Strategically Reducing the Vocabulary<a id='project_6'></a>\n\n**TODO:** Improve `SentimentNetwork`'s performance by reducing more noise in the vocabulary. Specifically, do the following:\n* Copy the `SentimentNetwork` class from the previous project into the following cell.\n* Modify `pre_process_data`:\n>* Add two additional parameters: `min_count` and `polarity_cutoff`\n>* Calculate the positive-to-negative ratios of words used in the reviews. (You can use code you've written elsewhere in the notebook, but we are moving it into the class like we did with other helper code earlier.)\n>* Andrew's solution only calculates a postive-to-negative ratio for words that occur at least 50 times. This keeps the network from attributing too much sentiment to rarer words. You can choose to add this to your solution if you would like. \n>* Change so words are only added to the vocabulary if they occur in the vocabulary more than `min_count` times.\n>* Change so words are only added to the vocabulary if the absolute value of their postive-to-negative ratio is at least `polarity_cutoff`\n* Modify `__init__`:\n>* Add the same two parameters (`min_count` and `polarity_cutoff`) and use them when you call `pre_process_data`", "_____no_output_____" ], [ "The following code is the same as the previous project, with project-specific changes marked with `\"New for Project 6\"`", "_____no_output_____" ] ], [ [ "import time\nimport sys\nimport numpy as np\n\n# Encapsulate our neural network in a class\nclass SentimentNetwork:\n ## New for Project 6: added min_count and polarity_cutoff parameters\n def __init__(self, reviews,labels,min_count = 10,polarity_cutoff = 0.1,hidden_nodes = 10, learning_rate = 0.1):\n \"\"\"Create a SentimenNetwork with the given settings\n Args:\n reviews(list) - List of reviews used for training\n labels(list) - List of POSITIVE/NEGATIVE labels associated with the given reviews\n min_count(int) - Words should only be added to the vocabulary \n if they occur more than this many times\n polarity_cutoff(float) - The absolute value of a word's positive-to-negative\n ratio must be at least this big to be considered.\n hidden_nodes(int) - Number of nodes to create in the hidden layer\n learning_rate(float) - Learning rate to use while training\n \n \"\"\"\n # Assign a seed to our random number generator to ensure we get\n # reproducable results during development \n np.random.seed(1)\n\n # process the reviews and their associated labels so that everything\n # is ready for training\n ## New for Project 6: added min_count and polarity_cutoff arguments to pre_process_data call\n self.pre_process_data(reviews, labels, polarity_cutoff, min_count)\n \n # Build the network to have the number of hidden nodes and the learning rate that\n # were passed into this initializer. Make the same number of input nodes as\n # there are vocabulary words and create a single output node.\n self.init_network(len(self.review_vocab),hidden_nodes, 1, learning_rate)\n\n ## New for Project 6: added min_count and polarity_cutoff parameters\n def pre_process_data(self, reviews, labels, polarity_cutoff, min_count):\n \n ## ----------------------------------------\n ## New for Project 6: Calculate positive-to-negative ratios for words before\n # building vocabulary\n #\n positive_counts = Counter()\n negative_counts = Counter()\n total_counts = Counter()\n\n for i in range(len(reviews)):\n if(labels[i] == 'POSITIVE'):\n for word in reviews[i].split(\" \"):\n positive_counts[word] += 1\n total_counts[word] += 1\n else:\n for word in reviews[i].split(\" \"):\n negative_counts[word] += 1\n total_counts[word] += 1\n\n pos_neg_ratios = Counter()\n\n for term,cnt in list(total_counts.most_common()):\n if(cnt >= 50):\n pos_neg_ratio = positive_counts[term] / float(negative_counts[term]+1)\n pos_neg_ratios[term] = pos_neg_ratio\n\n for word,ratio in pos_neg_ratios.most_common():\n if(ratio > 1):\n pos_neg_ratios[word] = np.log(ratio)\n else:\n pos_neg_ratios[word] = -np.log((1 / (ratio + 0.01)))\n #\n ## end New for Project 6\n ## ----------------------------------------\n\n # populate review_vocab with all of the words in the given reviews\n review_vocab = set()\n for review in reviews:\n for word in review.split(\" \"):\n ## New for Project 6: only add words that occur at least min_count times\n # and for words with pos/neg ratios, only add words\n # that meet the polarity_cutoff\n if(total_counts[word] > min_count):\n if(word in pos_neg_ratios.keys()):\n if((pos_neg_ratios[word] >= polarity_cutoff) or (pos_neg_ratios[word] <= -polarity_cutoff)):\n review_vocab.add(word)\n else:\n review_vocab.add(word)\n\n # Convert the vocabulary set to a list so we can access words via indices\n self.review_vocab = list(review_vocab)\n \n # populate label_vocab with all of the words in the given labels.\n label_vocab = set()\n for label in labels:\n label_vocab.add(label)\n \n # Convert the label vocabulary set to a list so we can access labels via indices\n self.label_vocab = list(label_vocab)\n \n # Store the sizes of the review and label vocabularies.\n self.review_vocab_size = len(self.review_vocab)\n self.label_vocab_size = len(self.label_vocab)\n \n # Create a dictionary of words in the vocabulary mapped to index positions\n self.word2index = {}\n for i, word in enumerate(self.review_vocab):\n self.word2index[word] = i\n \n # Create a dictionary of labels mapped to index positions\n self.label2index = {}\n for i, label in enumerate(self.label_vocab):\n self.label2index[label] = i\n\n def init_network(self, input_nodes, hidden_nodes, output_nodes, learning_rate):\n # Set number of nodes in input, hidden and output layers.\n self.input_nodes = input_nodes\n self.hidden_nodes = hidden_nodes\n self.output_nodes = output_nodes\n\n # Store the learning rate\n self.learning_rate = learning_rate\n\n # Initialize weights\n\n # These are the weights between the input layer and the hidden layer.\n self.weights_0_1 = np.zeros((self.input_nodes,self.hidden_nodes))\n\n # These are the weights between the hidden layer and the output layer.\n self.weights_1_2 = np.random.normal(0.0, self.output_nodes**-0.5, \n (self.hidden_nodes, self.output_nodes))\n \n ## New for Project 5: Removed self.layer_0; added self.layer_1\n # The input layer, a two-dimensional matrix with shape 1 x hidden_nodes\n self.layer_1 = np.zeros((1,hidden_nodes))\n \n ## New for Project 5: Removed update_input_layer function\n \n def get_target_for_label(self,label):\n if(label == 'POSITIVE'):\n return 1\n else:\n return 0\n \n def sigmoid(self,x):\n return 1 / (1 + np.exp(-x))\n \n def sigmoid_output_2_derivative(self,output):\n return output * (1 - output)\n \n ## New for Project 5: changed name of first parameter form 'training_reviews' \n # to 'training_reviews_raw'\n def train(self, training_reviews_raw, training_labels):\n\n ## New for Project 5: pre-process training reviews so we can deal \n # directly with the indices of non-zero inputs\n training_reviews = list()\n for review in training_reviews_raw:\n indices = set()\n for word in review.split(\" \"):\n if(word in self.word2index.keys()):\n indices.add(self.word2index[word])\n training_reviews.append(list(indices))\n\n # make sure out we have a matching number of reviews and labels\n assert(len(training_reviews) == len(training_labels))\n \n # Keep track of correct predictions to display accuracy during training \n correct_so_far = 0\n\n # Remember when we started for printing time statistics\n start = time.time()\n \n # loop through all the given reviews and run a forward and backward pass,\n # updating weights for every item\n for i in range(len(training_reviews)):\n \n # Get the next review and its correct label\n review = training_reviews[i]\n label = training_labels[i]\n \n #### Implement the forward pass here ####\n ### Forward pass ###\n\n ## New for Project 5: Removed call to 'update_input_layer' function\n # because 'layer_0' is no longer used\n\n # Hidden layer\n ## New for Project 5: Add in only the weights for non-zero items\n self.layer_1 *= 0\n for index in review:\n self.layer_1 += self.weights_0_1[index]\n\n # Output layer\n ## New for Project 5: changed to use 'self.layer_1' instead of 'local layer_1'\n layer_2 = self.sigmoid(self.layer_1.dot(self.weights_1_2)) \n \n #### Implement the backward pass here ####\n ### Backward pass ###\n\n # Output error\n layer_2_error = layer_2 - self.get_target_for_label(label) # Output layer error is the difference between desired target and actual output.\n layer_2_delta = layer_2_error * self.sigmoid_output_2_derivative(layer_2)\n\n # Backpropagated error\n layer_1_error = layer_2_delta.dot(self.weights_1_2.T) # errors propagated to the hidden layer\n layer_1_delta = layer_1_error # hidden layer gradients - no nonlinearity so it's the same as the error\n\n # Update the weights\n ## New for Project 5: changed to use 'self.layer_1' instead of local 'layer_1'\n self.weights_1_2 -= self.layer_1.T.dot(layer_2_delta) * self.learning_rate # update hidden-to-output weights with gradient descent step\n \n ## New for Project 5: Only update the weights that were used in the forward pass\n for index in review:\n self.weights_0_1[index] -= layer_1_delta[0] * self.learning_rate # update input-to-hidden weights with gradient descent step\n\n # Keep track of correct predictions.\n if(layer_2 >= 0.5 and label == 'POSITIVE'):\n correct_so_far += 1\n elif(layer_2 < 0.5 and label == 'NEGATIVE'):\n correct_so_far += 1\n \n # For debug purposes, print out our prediction accuracy and speed \n # throughout the training process. \n elapsed_time = float(time.time() - start)\n reviews_per_second = i / elapsed_time if elapsed_time > 0 else 0\n \n sys.stdout.write(\"\\rProgress:\" + str(100 * i/float(len(training_reviews)))[:4] \\\n + \"% Speed(reviews/sec):\" + str(reviews_per_second)[0:5] \\\n + \" #Correct:\" + str(correct_so_far) + \" #Trained:\" + str(i+1) \\\n + \" Training Accuracy:\" + str(correct_so_far * 100 / float(i+1))[:4] + \"%\")\n if(i % 2500 == 0):\n print(\"\")\n \n def test(self, testing_reviews, testing_labels):\n \"\"\"\n Attempts to predict the labels for the given testing_reviews,\n and uses the test_labels to calculate the accuracy of those predictions.\n \"\"\"\n \n # keep track of how many correct predictions we make\n correct = 0\n\n # we'll time how many predictions per second we make\n start = time.time()\n\n # Loop through each of the given reviews and call run to predict\n # its label. \n for i in range(len(testing_reviews)):\n pred = self.run(testing_reviews[i])\n if(pred == testing_labels[i]):\n correct += 1\n \n # For debug purposes, print out our prediction accuracy and speed \n # throughout the prediction process. \n\n elapsed_time = float(time.time() - start)\n reviews_per_second = i / elapsed_time if elapsed_time > 0 else 0\n \n sys.stdout.write(\"\\rProgress:\" + str(100 * i/float(len(testing_reviews)))[:4] \\\n + \"% Speed(reviews/sec):\" + str(reviews_per_second)[0:5] \\\n + \" #Correct:\" + str(correct) + \" #Tested:\" + str(i+1) \\\n + \" Testing Accuracy:\" + str(correct * 100 / float(i+1))[:4] + \"%\")\n \n def run(self, review):\n \"\"\"\n Returns a POSITIVE or NEGATIVE prediction for the given review.\n \"\"\"\n # Run a forward pass through the network, like in the \"train\" function.\n \n ## New for Project 5: Removed call to update_input_layer function\n # because layer_0 is no longer used\n\n # Hidden layer\n ## New for Project 5: Identify the indices used in the review and then add\n # just those weights to layer_1 \n self.layer_1 *= 0\n unique_indices = set()\n for word in review.lower().split(\" \"):\n if word in self.word2index.keys():\n unique_indices.add(self.word2index[word])\n for index in unique_indices:\n self.layer_1 += self.weights_0_1[index]\n \n # Output layer\n ## New for Project 5: changed to use self.layer_1 instead of local layer_1\n layer_2 = self.sigmoid(self.layer_1.dot(self.weights_1_2))\n \n # Return POSITIVE for values above greater-than-or-equal-to 0.5 in the output layer;\n # return NEGATIVE for other values\n if(layer_2[0] >= 0.5):\n return \"POSITIVE\"\n else:\n return \"NEGATIVE\"\n", "_____no_output_____" ] ], [ [ "Run the following cell to train your network with a small polarity cutoff.", "_____no_output_____" ] ], [ [ "mlp = SentimentNetwork(reviews[:-1000],labels[:-1000],min_count=20,polarity_cutoff=0.05,learning_rate=0.01)\nmlp.train(reviews[:-1000],labels[:-1000])", "_____no_output_____" ] ], [ [ "And run the following cell to test it's performance.", "_____no_output_____" ] ], [ [ "mlp.test(reviews[-1000:],labels[-1000:])", "_____no_output_____" ] ], [ [ "Run the following cell to train your network with a much larger polarity cutoff.", "_____no_output_____" ] ], [ [ "mlp = SentimentNetwork(reviews[:-1000],labels[:-1000],min_count=20,polarity_cutoff=0.8,learning_rate=0.01)\nmlp.train(reviews[:-1000],labels[:-1000])", "_____no_output_____" ] ], [ [ "And run the following cell to test it's performance.", "_____no_output_____" ] ], [ [ "mlp.test(reviews[-1000:],labels[-1000:])", "_____no_output_____" ] ], [ [ "# End of Project 6 solution. \n## Watch the next video to continue with Andrew's next lesson.\n# Analysis: What's Going on in the Weights?<a id='lesson_7'></a>", "_____no_output_____" ] ], [ [ "mlp_full = SentimentNetwork(reviews[:-1000],labels[:-1000],min_count=0,polarity_cutoff=0,learning_rate=0.01)", "_____no_output_____" ], [ "mlp_full.train(reviews[:-1000],labels[:-1000])", "_____no_output_____" ], [ "Image(filename='sentiment_network_sparse.png')", "_____no_output_____" ], [ "def get_most_similar_words(focus = \"horrible\"):\n most_similar = Counter()\n\n for word in mlp_full.word2index.keys():\n most_similar[word] = np.dot(mlp_full.weights_0_1[mlp_full.word2index[word]],mlp_full.weights_0_1[mlp_full.word2index[focus]])\n \n return most_similar.most_common()", "_____no_output_____" ], [ "get_most_similar_words(\"excellent\")", "_____no_output_____" ], [ "get_most_similar_words(\"terrible\")", "_____no_output_____" ], [ "import matplotlib.colors as colors\n\nwords_to_visualize = list()\nfor word, ratio in pos_neg_ratios.most_common(500):\n if(word in mlp_full.word2index.keys()):\n words_to_visualize.append(word)\n \nfor word, ratio in list(reversed(pos_neg_ratios.most_common()))[0:500]:\n if(word in mlp_full.word2index.keys()):\n words_to_visualize.append(word)", "_____no_output_____" ], [ "pos = 0\nneg = 0\n\ncolors_list = list()\nvectors_list = list()\nfor word in words_to_visualize:\n if word in pos_neg_ratios.keys():\n vectors_list.append(mlp_full.weights_0_1[mlp_full.word2index[word]])\n if(pos_neg_ratios[word] > 0):\n pos+=1\n colors_list.append(\"#00ff00\")\n else:\n neg+=1\n colors_list.append(\"#000000\")\n ", "_____no_output_____" ], [ "from sklearn.manifold import TSNE\ntsne = TSNE(n_components=2, random_state=0)\nwords_top_ted_tsne = tsne.fit_transform(vectors_list)", "_____no_output_____" ], [ "p = figure(tools=\"pan,wheel_zoom,reset,save\",\n toolbar_location=\"above\",\n title=\"vector T-SNE for most polarized words\")\n\nsource = ColumnDataSource(data=dict(x1=words_top_ted_tsne[:,0],\n x2=words_top_ted_tsne[:,1],\n names=words_to_visualize,\n color=colors_list))\n\np.scatter(x=\"x1\", y=\"x2\", size=8, source=source, fill_color=\"color\")\n\nword_labels = LabelSet(x=\"x1\", y=\"x2\", text=\"names\", y_offset=6,\n text_font_size=\"8pt\", text_color=\"#555555\",\n source=source, text_align='center')\np.add_layout(word_labels)\n\nshow(p)\n\n# green indicates positive words, black indicates negative words", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb3884cec82d1c1de8fc8af54eda3660ea4dc79f
6,325
ipynb
Jupyter Notebook
2_Curso/Laboratorio/SAGE-noteb/IPYNB/PROBA/112-PROBA-pi-paralelo.ipynb
AlejandroSantorum/Apuntes_Mat_IngInf
68ee7851769c27a7d04203a35b14449d7f80eb73
[ "Apache-2.0" ]
18
2019-10-03T13:07:35.000Z
2022-03-29T12:58:34.000Z
2_Curso/Laboratorio/SAGE-noteb/IPYNB/PROBA/112-PROBA-pi-paralelo.ipynb
AlejandroSantorum/Apuntes_Mat_IngInf
68ee7851769c27a7d04203a35b14449d7f80eb73
[ "Apache-2.0" ]
null
null
null
2_Curso/Laboratorio/SAGE-noteb/IPYNB/PROBA/112-PROBA-pi-paralelo.ipynb
AlejandroSantorum/Apuntes_Mat_IngInf
68ee7851769c27a7d04203a35b14449d7f80eb73
[ "Apache-2.0" ]
7
2020-01-23T09:59:08.000Z
2021-10-04T18:03:03.000Z
22.349823
347
0.49834
[ [ [ "<p>C&aacute;lculo aproximado de $\\pi$ contando cuantos puntos aleatorios del cuadrado $[0,1]\\times[0,1]$, de entre un total de $N$,&nbsp; caen dentro de la circunferencia de radio unidad,&nbsp; digamos que han caido dentro $N_0$. El &aacute;rea de un cuarto de circunferencia, $\\pi/4$ es aproximadamente igual al cociente $N_0/N$:</p>", "_____no_output_____" ] ], [ [ "def pi(N,rs):\n cont = 0\n set_random_seed(rs) #Inicializamos el generador de numeros aleatorios\n for muda in xsrange(N):\n x,y = random(),random() #Coordenadas de un punto aleatorio del cuadrado\n if x^2+y^2 <= 1:\n cont += 1\n return cont", "_____no_output_____" ] ], [ [ "<p>Usando un &uacute;nico n&uacute;cleo calcula la aproximaci&oacute;n de $\\pi$ usando $4*10^7$ puntos aleatorios en unos 90 segundos, y el valor de $\\pi$ obtenido tiene 3 cifras decimales correctas.</p>", "_____no_output_____" ] ], [ [ "time pi(4*10^7,9873223)", "CPU times: user 1min 10s, sys: 1.24 s, total: 1min 12s\nWall time: 1min 10s\n" ], [ "((4*31418544)/4*10^7).n()", "_____no_output_____" ] ], [ [ "<p>Usando los cuatro n&uacute;cleos (dos n&uacute;cleos reales y cada uno de ellos 2 virtuales), mandamos a cada n&uacute;cleo $10^7$ puntos y obtenemos un tiempo de 54 segundos.</p>", "_____no_output_____" ] ], [ [ "@parallel(4)\ndef pi_paralelo(N,rs):\n cont = 0\n set_random_seed(rs)\n for muda in xsrange(N):\n x,y = random(),random()\n if x^2+y^2 <= 1:\n cont += 1\n return cont", "_____no_output_____" ], [ "time list(pi_paralelo([(10^7,23459),(10^7,29954),(10^7,7654),(10^7,67543)]))", "CPU times: user 4 ms, sys: 16 ms, total: 20 ms\nWall time: 18.1 s\n" ], [ "pi1 = 4*(7854510+7854508+7855063+7853909)/(4*10^7);(pi1).n()", "_____no_output_____" ] ], [ [ "<p>Usando dos n&uacute;cleos el tiempo total es 58 s, y vemos que la&nbsp; ventaja obtenida por usar los n&uacute;cleos virtuales es m&iacute;nima.</p>", "_____no_output_____" ] ], [ [ "@parallel(2)\ndef pi_paralelo(N,rs):\n cont = 0\n set_random_seed(rs)\n for muda in xsrange(N):\n x,y = random(),random()\n if x^2+y^2 <= 1:\n cont += 1\n return cont", "_____no_output_____" ], [ "time list(pi_paralelo([(2*10^7,23459),(2*10^7,29954)]))", "CPU times: user 0 ns, sys: 12 ms, total: 12 ms\nWall time: 36.2 s\n" ] ], [ [ "<p>&iquest;Mejora la aproximaci&oacute;n de $\\pi$ al aumentar $N$ (el n&uacute;mero de puntos aleatorios que consideramos)?</p>", "_____no_output_____" ] ], [ [ "[pi(10^k,965452) for k in srange(3,9)]", "_____no_output_____" ] ], [ [ "<p>&iquest;Qu&eacute; opinas?</p>", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
cb38853106c1f05af0bde4fc57aefb90cc1ab40f
36,053
ipynb
Jupyter Notebook
Statistics/Lesson_10/KC_lecture_3_aa.ipynb
LubovFedoseeva/Learning
2d0e87c31298186b4e8f0b7301207aea574e0d03
[ "MIT" ]
1
2021-12-08T19:41:57.000Z
2021-12-08T19:41:57.000Z
Statistics/Lesson_10/KC_lecture_3_aa.ipynb
LubovFedoseeva/Learning
2d0e87c31298186b4e8f0b7301207aea574e0d03
[ "MIT" ]
null
null
null
Statistics/Lesson_10/KC_lecture_3_aa.ipynb
LubovFedoseeva/Learning
2d0e87c31298186b4e8f0b7301207aea574e0d03
[ "MIT" ]
null
null
null
156.752174
16,284
0.900757
[ [ [ "import numpy as np\nimport pandas as pd\nfrom scipy import stats\nimport matplotlib.pyplot as plt\n\nfrom tqdm.auto import tqdm", "_____no_output_____" ] ], [ [ "### A/A-тест, который сходится", "_____no_output_____" ] ], [ [ "n = 100000\nsimulations = 1000\nn_s = 1000\nres = []\n\ndf = pd.DataFrame({\n \"s1\": np.random.exponential(scale=1/0.001, size=n),\n \"s2\": np.random.exponential(scale=1/0.001, size=n)\n})\n\n# Запуск симуляций A/A теста\nfor i in tqdm(range(simulations)):\n s1 = df['s1'].sample(n_s, replace = False).values\n s2 = df['s2'].sample(n_s, replace = False).values\n res.append(stats.ttest_ind(s1, s2, equal_var = False)[1]) # сохраняем pvalue\n\nplt.hist(res, bins = 50)\nplt.style.use('ggplot')\nplt.xlabel('pvalues')\nplt.ylabel('frequency')\nplt.title(\"Histogram of ttest A/A simulations \")\nplt.show()\n\n# Проверяем, что количество ложноположительных случаев не превышает альфа\nsum(np.array(res) <0.05) / simulations", "_____no_output_____" ], [ "sum(np.array(res) <0.1) / simulations", "_____no_output_____" ] ], [ [ "### Второй случай, когда A/A-тест не сходится", "_____no_output_____" ] ], [ [ "n = 100000\nsimulations = 1000\nn_s = 1000\nres = []\n\ndf = pd.DataFrame({\n \"s1\": np.random.exponential(scale=1/0.001, size=n),\n \"s2\": np.random.exponential(scale=1/0.001, size=n) * 1.1 # добавляем эффект 10%\n})\n\nfor i in tqdm(range(simulations)):\n s1 = df['s1'].sample(n_s, replace = False).values\n s2 = df['s2'].sample(n_s, replace = False).values\n res.append(stats.ttest_ind(s1, s2, equal_var = False)[1])\n\nplt.hist(res, bins = 50)\nplt.style.use('ggplot')\nplt.xlabel('pvalues')\nplt.ylabel('frequency')\nplt.title(\"Histogram of ttest A/A simulations \")\nplt.show()\n\n# FPR не сойдется\nsum(np.array(res) <0.05) / simulations", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ] ]
cb38a426c537693793ffdd57865a2b0b6a6bd9aa
48,292
ipynb
Jupyter Notebook
old_utils/creating_training_dataset.ipynb
Zumo09/Feedback-Prize
e7e7343a81bfec2f5b187f2266154da0bbe48fb9
[ "MIT" ]
null
null
null
old_utils/creating_training_dataset.ipynb
Zumo09/Feedback-Prize
e7e7343a81bfec2f5b187f2266154da0bbe48fb9
[ "MIT" ]
null
null
null
old_utils/creating_training_dataset.ipynb
Zumo09/Feedback-Prize
e7e7343a81bfec2f5b187f2266154da0bbe48fb9
[ "MIT" ]
null
null
null
57.082742
6,238
0.563323
[ [ [ "%matplotlib inline\n\nimport re\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom typing import Set, Any\n\nfrom utils import data, text, visualization", "_____no_output_____" ], [ "train_texts, train_data = data.load_dataset(preprocess=True)\ntest_texts = data.load_texts('test', preprocess=True)", "100%|██████████| 15594/15594 [00:02<00:00, 7140.11it/s]\n100%|██████████| 5/5 [00:00<00:00, 5000.36it/s]\n" ], [ "train_data", "_____no_output_____" ], [ "END_OF_SENTENCES = re.compile('[.,;!?]')\nEND_OF_SENTENCES = re.compile('[.,;!?]')\ndataset_complete = pd.DataFrame({'id':train_texts.index, 'text':train_texts.values})\ndataset_complete['sentences'] = dataset_complete['text'].apply(lambda x: END_OF_SENTENCES.split(x))\n\ndef compute_positions(sentences):\n ret = []\n initial = 0\n for s in sentences:\n end = initial + len(s)\n ret.append((initial, end))\n initial = end + 1\n return ret\n\ndataset_complete['positions'] = dataset_complete['sentences'].apply(compute_positions)\n\nTHRESHOLD = 0.5\ndef is_intersected(s1: Set[Any], s2: Set[Any]):\n if len(s1) == 0 or len(s2) == 0:\n return False\n intersection = len(s1.intersection(s2)) \n return intersection / len(s1) > THRESHOLD or intersection / len(s2) > THRESHOLD\n\ngb = train_data.groupby(by='id')\n\ndef retrieve_labels(text_id, positions):\n tags = gb.get_group(text_id)\n labels = []\n for ps, pe in positions:\n label = 'NO LABEL'\n pos = set(range(ps, pe))\n for ds, de, l in zip(tags['discourse_start'], tags['discourse_end'], tags['discourse_type']):\n dis = set(range(ds, de))\n if is_intersected(pos, dis):\n label = l\n break\n labels.append(label)\n return labels\n\n\ndataset_complete['labels'] = dataset_complete.apply(lambda s: retrieve_labels(s['id'], s['positions']), axis=1)", "_____no_output_____" ], [ "dataset_complete", "_____no_output_____" ], [ "example_id = dataset_complete['id'][15591]", "_____no_output_____" ], [ "visualization.highlight_segments(example_id, train_data, texts=train_texts)", "_____no_output_____" ], [ "visualization.highlight_segments(example_id, dataset_complete)", "_____no_output_____" ], [ "train_data.groupby(by='id').count()['discourse_id'].describe()", "_____no_output_____" ], [ "train_data.groupby(by='id').count()['discourse_id'].hist(bins=50)", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb38a81f9b289e6bd49f67f617e524b3099cabce
326,348
ipynb
Jupyter Notebook
Project3/Project3.final(modified1).ipynb
wiggs555/cse7324project
6bc6e51ccbdbf0b80abbb0e7f0a64ae150831abd
[ "Unlicense" ]
null
null
null
Project3/Project3.final(modified1).ipynb
wiggs555/cse7324project
6bc6e51ccbdbf0b80abbb0e7f0a64ae150831abd
[ "Unlicense" ]
null
null
null
Project3/Project3.final(modified1).ipynb
wiggs555/cse7324project
6bc6e51ccbdbf0b80abbb0e7f0a64ae150831abd
[ "Unlicense" ]
1
2019-02-05T07:45:51.000Z
2019-02-05T07:45:51.000Z
93.778161
42,756
0.735681
[ [ [ "# **CSE 7324 Lab 3: Extending Logistic Regression**\n### *Thomas Adams, Suleiman Hijazeen, Nancy Le and Andrew Whigham*\n------", "_____no_output_____" ], [ "### **1. Preparation and Overview**\n------", "_____no_output_____" ], [ "#### 1.1 Business Understanding\n---", "_____no_output_____" ], [ "Austin Animal Center is the largest no-kill shelter in the United States and provides shelter to more than 16,000 animals each year. As a no-kill shelter they refuse to euthanize any animal unless the animal has a terminal medical issue and is in pain or if the animal is a danger to the public or to the shelter staff. Although the shelter’s primary goal is to find ‘forever homes’ for each and every animal that comes through their doors, many animals end up staying in the shelter for a long time if they are not considered as desirable for adoption as other animals. In addition to adopting out animals, the Austin Animal Center partners with various other rescues and animal sanctuaries to try to find homes for their animals. \n\nThe average annual cost per animal at the Austin Animal Center is approximately $715 [3] and with many animals staying at the facility for long periods of time, some for several years, the cost can add up quickly. The shelter has fixed financial support via legislation to cover costs for staffing the shelters and a few grants to cover veterinary staff and services, but the shelter primarily relies on donations to provide for food, bedding and toys for the animals. The shelter must try to minimize costs associated with each animal and try to have the animals leave the shelter through adoption or transfer to a sanctuary as quickly as possible.\n\nThe Austin Animal Center keeps track of each animal that comes through their doors and keeps a record of the animal’s outcome; that is whether they were adopted, transferred to a partner shelter or sanctuary or one of many other outcomes. If the shelter could predict an animal’s outcome based on the animal’s characteristics, they could be much more efficient with having animals leave the shelter by knowing which animals they should be able to adopt out and which animals they should transfer to other shelters or sanctuaries. This added efficiency would result in the shelter’s ability to take in more animals which in return would lower the average cost per animal.\n\nThis lab examines the Austin Animal Center animal outcome data set to specifically look at cats and the outcome of each cat and attempts to build an accurate model of predicting the outcome. If accurate, this model could serve the Austin Animal Center as well as other cities that are looking at issuing a no-kill ordinance for their shelters.", "_____no_output_____" ], [ "#### 1.2 Data Preparation\n---", "_____no_output_____" ] ], [ [ "# dependencies\nimport pandas as pd\nimport numpy as np\nimport missingno as msno \nimport matplotlib.pyplot as plt\nimport re\nfrom sklearn.model_selection import train_test_split\n\nfrom textwrap import wrap\nfrom sklearn.preprocessing import StandardScaler\nimport warnings\nwarnings.filterwarnings(\"ignore\")\nimport math\n%matplotlib inline", "_____no_output_____" ], [ "# import data\nshelter_outcomes = pd.read_csv(\"C:/Users/w47518657u/OneDrive/SMU Spring 2019/CSE 7318/Labs/Lab Three/aac_shelter_outcomes.csv\")\n# filter animal type for just cats\ncats = shelter_outcomes[shelter_outcomes['animal_type'] == 'Cat']\n#print(cats.head())\n\n# remove age_upon_outcome and recalculate to standard units (days)\nage = cats.loc[:,['datetime', 'date_of_birth']]\n# convert to datetime\nage.loc[:,'datetime'] = pd.to_datetime(age['datetime'])\nage.loc[:,'date_of_birth'] = pd.to_datetime(age['date_of_birth'])\n# calculate cat age in days\ncats.loc[:,'age'] = (age.loc[:,'datetime'] - age.loc[:,'date_of_birth']).dt.days\n# get dob info\ncats['dob_month'] = age.loc[:, 'date_of_birth'].dt.month\ncats['dob_day'] = age.loc[:, 'date_of_birth'].dt.day\ncats['dob_dayofweek'] = age.loc[:, 'date_of_birth'].dt.dayofweek\n# get month from datetime\ncats['month'] = age.loc[:,'datetime'].dt.month\n# get day of month\ncats['day'] = age.loc[:,'datetime'].dt.day\n# get day of week\ncats['dayofweek'] = age.loc[:, 'datetime'].dt.dayofweek\n# get hour of day\ncats['hour'] = age.loc[:, 'datetime'].dt.hour\n# get quarter\ncats['quarter'] = age.loc[:, 'datetime'].dt.quarter\n\n# clean up breed attribute\n# get breed attribute for processing\n# convert to lowercase, remove mix and strip whitespace\n# remove space in 'medium hair' to match 'longhair' and 'shorthair'\n# split on either space or '/'\nbreed = cats.loc[:, 'breed'].str.lower().str.replace('mix', '').str.replace('medium hair', 'mediumhair').str.strip().str.split('/', expand=True)\ncats['breed'] = breed[0]\ncats['breed1'] = breed[1]\n\n# clean up color attribute\n# convert to lowercase\n# strip spaces\n# split on '/'\ncolor = cats.loc[:, 'color'].str.lower().str.strip().str.split('/', expand=True)\ncats['color'] = color[0]\ncats['color1'] = color[1]\n\n# clean up sex_upon_outcome\nsex = cats['sex_upon_outcome'].str.lower().str.strip().str.split(' ', expand=True)\nsex[0].replace('spayed', True, inplace=True)\nsex[0].replace('neutered', True, inplace=True)\nsex[0].replace('intact', False, inplace=True)\nsex[1].replace(np.nan, 'unknown', inplace=True)\ncats['spayed_neutered'] = sex[0]\ncats['sex'] = sex[1]\n\n# add in domesticated attribute\ncats['domestic'] = np.where(cats['breed'].str.contains('domestic'), 1, 0)\n\n# combine outcome and outcome subtype into a single attribute\ncats['outcome_subtype'] = cats['outcome_subtype'].str.lower().str.replace(' ', '-').fillna('unknown')\ncats['outcome_type'] = cats['outcome_type'].str.lower().str.replace(' ', '-').fillna('unknown')\ncats['outcome'] = cats['outcome_type'] + '_' + cats['outcome_subtype']\n\n# drop unnecessary columns\ncats.drop(columns=['animal_id', 'name', 'animal_type', 'age_upon_outcome', 'date_of_birth', 'datetime', 'monthyear', 'sex_upon_outcome', 'outcome_subtype', 'outcome_type'], inplace=True)\n#print(cats['outcome'].value_counts())\n\ncats.head()\n", "_____no_output_____" ] ], [ [ "Not all information included in this data set is necessary to the targeted prediction of outcome type. Some animals that were adopted were returned to the shelter as runaways before being returned to their owners. These instances have no impact on trying to predict outcome and will be removed from the data set.", "_____no_output_____" ], [ "#### 1.3 Data Description\n---", "_____no_output_____" ] ], [ [ "print(\"Default datatypes of shelter cat outcomes:\\n\")\nprint(cats.dtypes)\n\nprint(\"\\nBelow is a description of the attributes in the cats dataframe:\\n\")", "Default datatypes of shelter cat outcomes:\n\nbreed object\ncolor object\nage int64\ndob_month int64\ndob_day int64\ndob_dayofweek int64\nmonth int64\nday int64\ndayofweek int64\nhour int64\nquarter int64\nbreed1 object\ncolor1 object\nspayed_neutered object\nsex object\ndomestic int32\noutcome object\ndtype: object\n\nBelow is a description of the attributes in the cats dataframe:\n\n" ] ], [ [ "Attribute | Description | Scale | Datatype\n--- | --- | --- | ---\nBreed | Primary breed of the cat | Nominal | Object\nColor | Primary color of the cat | Nominal | Object\nAge | Age of cat in days | Ordinal | int64\nDOB_Month | Date of birth month (1-12) for the cat | Ordinal | int64\nDOB_Day | Date of birth day (1-31) for the cat | Ordinal | int64\nDOB_DayOfWeek | Date of birth day of week (1-7) for the cat | Ordinal | int64\nMonth | Month (1-12) of the outcome | Ordinal | int64\nDay | Day of month (1-31) of the outcome | Ordinal | int64\nDayOfWeek | Day of week (1-7) of the outcome | Ordinal | int64\nHour | Hour during the day (0-23) of the outcome | Ordinal | int64\nQuarter | Quarter during the year (1-4) of the outcome | Ordinal | int64\nBreed1 | Secondary breed of the cat | Nominal | Object\nColor1 | Secondary color of the cat | Nominal | Object\nSpayed_Neutered | Is the cat spayed/netured or not | Nominal | bool\nSex | Sex of the cat | Nominal | bool\nDomestic | Is the cat domesticated | Nominal | bool\nOutcome | The outcome of the animal | nominal | object", "_____no_output_____" ] ], [ [ "print('Below is a listing of the target classes and their distributions:')\ncats['outcome'].value_counts()", "Below is a listing of the target classes and their distributions:\n" ] ], [ [ "Each feature has a different count, a low count per feature decrease the accuracy and the efficiency of the logistic regression method used, so all features with low count was not taken into account in traning the classfier", "_____no_output_____" ] ], [ [ "# examine missing data\nmsno.matrix(cats)", "_____no_output_____" ] ], [ [ "Since the missing data shows that breed1 will have little impact on the prediction since there are only two records that have a value, it will be removed from the data set. The missing data in color1 should be handled when one hot encoding is performed on it.", "_____no_output_____" ], [ "#### 1.4 One hot encoding of data and splitting into training and testing sets", "_____no_output_____" ] ], [ [ "cats.drop(columns=['breed1'], inplace=True)\n# Breed, Color, Color1, Spayed_Netured and Sex attributes need to be one hot encoded\ncats_ohe = pd.get_dummies(cats, columns=['breed', 'color', 'color1', 'spayed_neutered', 'sex'])\ncats_ohe.head()\nout_t={'euthanasia_suffering' : 0, 'died_in-kennel' : 0, 'return-to-owner_unknown' : 0, 'transfer_partner' : 1, 'euthanasia_at-vet' : 2, 'adoption_foster' : 3, 'died_in-foster' : 0, 'transfer_scrp' : 4, 'euthanasia_medical' : 0, 'transfer_snr' : 0, 'died_enroute' : 0, 'rto-adopt_unknown' : 0, 'missing_in-foster' : 0, 'adoption_offsite' : 0, 'adoption_unknown' :5,'euthanasia_rabies-risk' : 0, 'unknown_unknown' : 0, 'adoption_barn' : 0, 'died_unknown' : 0, 'died_in-surgery' : 0, 'euthanasia_aggressive' : 0, 'euthanasia_unknown' : 0, 'missing_unknown' : 0, 'missing_in-kennel' : 0, 'missing_possible-theft' : 0, 'died_at-vet' : 0, 'disposal_unknown' : 0, 'euthanasia_underage' : 0, 'transfer_barn' : 0}\n#output is converted from string to catogries 0 to 5 represent each output\n# separate outcome from data\noutcome = cats_ohe['outcome']\ncats_ohe.drop(columns=['outcome'])\n\nprint(cats_ohe.head())\n\n# split the data\nX_train, X_test, y_train, y_test = train_test_split(cats_ohe, outcome, test_size=0.2, random_state=0)\nX_train.drop(columns=['outcome'], inplace=True)\ny_train = [out_t[item] for item in y_train]\n#print(X_train.shape, X_test.shape, y_train.shape, y_test.shape)", " age dob_month dob_day dob_dayofweek month day dayofweek hour \\\n0 15 7 7 0 7 22 1 16 \n8 59 6 16 0 8 14 3 18 \n9 95 3 26 2 6 29 6 17 \n10 366 3 27 2 3 28 4 14 \n17 24 12 16 0 1 9 3 19 \n\n quarter domestic ... color1_tortie point color1_tricolor \\\n0 3 1 ... 0 0 \n8 3 1 ... 0 0 \n9 2 1 ... 0 0 \n10 1 1 ... 0 0 \n17 1 1 ... 0 0 \n\n color1_white color1_yellow spayed_neutered_False spayed_neutered_True \\\n0 0 0 1 0 \n8 1 0 1 0 \n9 0 0 0 1 \n10 1 0 0 1 \n17 1 0 1 0 \n\n spayed_neutered_unknown sex_female sex_male sex_unknown \n0 0 0 1 0 \n8 0 1 0 0 \n9 0 1 0 0 \n10 0 1 0 0 \n17 0 0 1 0 \n\n[5 rows x 141 columns]\n" ] ], [ [ "One hot encoding is used for the cat breed, color, spayed/neutered and sex attributes to convert the categorical variables into a form that should play nicer with logistic regression. Although spayed_neutered and sex are essentially boolean attributes, they had to be converted because there were many unknown values in each.\n\nThe data is split with an 80/20 train/test ratio using the train_test_split function in the cross validation functions in Skikit Learn's cross validation package. Although this was an easy method to split the data into training and test sets, it was not a good way to split the data for this dataset. As shown above, the target distribution is skewed and some targets have very few instances. It would have been better to select an 80/20 ratio for each class.", "_____no_output_____" ], [ "### **2. Modeling**\n------", "_____no_output_____" ] ], [ [ "import numpy as np\nclass BinaryLogisticRegressionBase:\n # private:\n def __init__(self, eta, iterations, C,reg):\n self.eta = eta\n self.iters = iterations\n self.C=C\n self.reg=reg\n # internally we will store the weights as self.w_ to keep with sklearn conventions\n \n def __str__(self):\n return 'Base Binary Logistic Regression Object, Not Trainable'\n \n # convenience, private and static:\n @staticmethod\n def _sigmoid(theta):\n return 1/(1+np.exp(-theta)) \n \n @staticmethod\n def _add_bias(X):\n return np.hstack((np.ones((X.shape[0],1)),X)) # add bias term\n \n # public:\n def predict_proba(self,X,add_bias=True):\n # add bias term if requested\n Xb = self._add_bias(X) if add_bias else X\n return self._sigmoid(Xb @ self.w_) # return the probability y=1\n \n def predict(self,X):\n return (self.predict_proba(X)>0.5) #return the actual prediction\n \n # inherit from base class\nclass BinaryLogisticRegression(BinaryLogisticRegressionBase):\n #private:\n def __str__(self):\n if(hasattr(self,'w_')):\n return 'Binary Logistic Regression Object with coefficients:\\n'+ str(self.w_) # is we have trained the object\n else:\n return 'Untrained Binary Logistic Regression Object'\n \n def _get_gradient(self,X,y):\n # programming \\sum_i (yi-g(xi))xi\n gradient = np.zeros(self.w_.shape) # set gradient to zero\n for (xi,yi) in zip(X,y):\n # the actual update inside of sum\n gradi = (yi - self.predict_proba(xi,add_bias=False))*xi \n # reshape to be column vector and add to gradient\n gradient += gradi.reshape(self.w_.shape) \n \n return gradient/float(len(y))\n \n # public:\n def fit(self, X, y):\n Xb = self._add_bias(X) # add bias term\n num_samples, num_features = Xb.shape\n \n self.w_ = np.zeros((num_features,1)) # init weight vector to zeros\n \n # for as many as the max iterations\n for _ in range(self.iters):\n gradient = self._get_gradient(Xb,y)\n self.w_ += gradient*self.eta # multiply by learning rate \nimport numpy as np\nfrom scipy.special import expit\n\nclass VectorBinaryLogisticRegression(BinaryLogisticRegression):\n # inherit from our previous class to get same functionality\n @staticmethod\n def _sigmoid(theta):\n # increase stability, redefine sigmoid operation\n return expit(theta) #1/(1+np.exp(-theta))\n \n # but overwrite the gradient calculation\n def _get_gradient(self,X,y):\n ydiff = y-self.predict_proba(X,add_bias=False).ravel() # get y difference\n gradient = np.mean(X * ydiff[:,np.newaxis], axis=0) # make ydiff a column vector and multiply through\n gradient = gradient.reshape(self.w_.shape)\n if self.reg=='L2':\n gradient[1:] += -2 * self.w_[1:] * self.C\n if self.reg=='L1':\n gradient[1:] += -self.C # the deravtiv of C*abs(W), which should be dx(abs(w))= w/abs(w)\n if self.reg=='L1L2':\n gradient[1:] += -self.C-2 * self.w_[1:] * self.C\n if self.reg=='none':\n gradient[1:]\n return gradient\n \n \nfrom scipy.optimize import minimize_scalar\nimport copy\nclass LineSearchLogisticRegression(VectorBinaryLogisticRegression):\n \n # define custom line search for problem\n \n @staticmethod\n def objective_function(eta,X,y,w,grad,C=0.001):\n wnew = w - grad*eta\n g = expit(X @ wnew)\n return -np.sum(np.log(g[y==1]))-np.sum(np.log(1-g[y==0])) + C*sum(wnew**2)\n \n \n def fit(self, X, y):\n Xb = self._add_bias(X) # add bias term\n num_samples, num_features = Xb.shape\n \n self.w_ = np.zeros((num_features,1)) # init weight vector to zeros\n \n # for as many as the max iterations\n for _ in range(self.iters):\n gradient = -self._get_gradient(Xb,y)\n # minimization inopposite direction\n \n # do line search in gradient direction, using scipy function\n opts = {'maxiter':self.iters/50} # unclear exactly what this should be\n res = minimize_scalar(self.objective_function, # objective function to optimize\n bounds=(self.eta/1000,self.eta*10), #bounds to optimize\n args=(Xb,y,self.w_,gradient,0.001), # additional argument for objective function\n method='bounded', # bounded optimization for speed\n options=opts) # set max iterations\n \n eta = res.x # get optimal learning rate\n self.w_ -= gradient*eta # set new function values\n # subtract to minimize\nclass StochasticLogisticRegression(BinaryLogisticRegression):\n # stochastic gradient calculation \n def _get_gradient(self,X,y):\n idx = int(np.random.rand()*len(y)) # grab random instance\n ydiff = y[idx]-self.predict_proba(X[idx],add_bias=False) # get y difference (now scalar)\n gradient = X[idx] * ydiff[:,np.newaxis] # make ydiff a column vector and multiply through\n \n\n gradient = gradient.reshape(self.w_.shape)\n if self.reg=='L2':\n gradient[1:] += -2 * self.w_[1:] * self.C\n if self.reg=='L1':\n gradient[1:] += -self.C # the deravtiv of C*abs(W), which should be dx(abs(w))= w/abs(w)\n if self.reg=='L1L2':\n gradient[1:] += -self.C-(2 * self.w_[1:] * self.C)\n if self.reg=='none':\n gradient[1:]\n return gradient\n \nfrom scipy.optimize import fmin_bfgs\nclass BFGSBinaryLogisticRegression(BinaryLogisticRegression):\n \n @staticmethod\n def objective_function(w,X,y,C,reg):\n g = expit(X @ w)\n return -np.sum(np.log(g[y==1]))-np.sum(np.log(1-g[y==0])) + C*sum(w**2) #-np.sum(y*np.log(g)+(1-y)*np.log(1-g))\n\n @staticmethod\n def objective_gradient(w,X,y,C,reg):\n g = expit(X @ w)\n ydiff = y-g # get y difference\n gradient = np.mean(X * ydiff[:,np.newaxis], axis=0)\n gradient = gradient.reshape(w.shape)\n if reg=='L2':\n gradient[1:] += -2 * w[1:] * C\n if reg=='L1':\n gradient[1:] += - C # the deravtiv of C*abs(W), which should be dx(abs(w))= w/abs(w)\n if reg=='L1L2':\n gradient[1:] +=(-2 * w[1:] * C) - C\n if reg=='none':\n gradient[1:]\n return -gradient\n \n # just overwrite fit function\n def fit(self, X, y):\n \n Xb = self._add_bias(X) # add bias term\n num_samples, num_features = Xb.shape\n \n self.w_ = fmin_bfgs(self.objective_function, # what to optimize\n np.zeros((num_features,1)), # starting point\n fprime=self.objective_gradient, # gradient function\n args=(Xb,y,self.C,self.reg), # extra args for gradient and objective function\n gtol=1e-03, # stopping criteria for gradient, |v_k|\n maxiter=self.iters, # stopping criteria iterations\n disp=False)\n \n self.w_ = self.w_.reshape((num_features,1)) \n \n \nfrom numpy.linalg import pinv\nclass HessianBinaryLogisticRegression(BinaryLogisticRegression):\n # just overwrite gradient function\n def _get_gradient(self,X,y):\n g = self.predict_proba(X,add_bias=False).ravel() # get sigmoid value for all classes\n hessian = X.T @ np.diag(g*(1-g)) @ X - 2 * self.C # calculate the hessian\n\n ydiff = y-g # get y difference\n gradient = np.sum(X * ydiff[:,np.newaxis], axis=0) # make ydiff a column vector and multiply through\n gradient = gradient.reshape(self.w_.shape)\n if self.reg=='L2':\n gradient[1:] += -2 * self.w_[1:] * self.C\n if self.reg=='L1': # the deravtiv of C*abs(W), which should be dx(abs(w))= w/abs(w)\n gradient[1:] += -self.C\n if self.reg=='L1L2':\n gradient[1:] += -self.C-2 * self.w_[1:] * self.C\n if self.reg=='none':\n gradient[1:]\n return pinv(hessian) @ gradient", "_____no_output_____" ], [ "from scipy.optimize import minimize_scalar\nimport copy\nclass LogisticRegression:\n def __init__(self, eta, iterations,solver='leaner', C=0.001,reg='L2'):\n self.eta = eta\n self.iters = iterations\n self.slv = solver\n self.C=C\n self.reg=reg\n # internally we will store the weights as self.w_ to keep with sklearn conventions\n \n def __str__(self):\n if(hasattr(self,'w_')):\n return 'MultiClass Logistic Regression Object with coefficients:\\n'+ str(self.w_) # is we have trained the object\n else:\n return 'Untrained MultiClass Logistic Regression Object'\n \n def fit(self,X,y):\n num_samples, num_features = X.shape\n self.unique_ = np.sort(np.unique(y)) # get each unique class value\n num_unique_classes = len(self.unique_)\n self.classifiers_ = [] # will fill this array with binary classifiers\n \n for i,yval in enumerate(self.unique_): # for each unique value\n y_binary = (y==yval) # create a binary problem\n # train the binary classifier for this class\n if self.slv=='stochastic':\n slr = StochasticLogisticRegression(self.eta,self.iters,self.C,self.reg)\n slr.fit(X,y_binary)\n self.classifiers_.append(slr)\n if self.slv=='steepest':\n mls=LineSearchLogisticRegression(self.eta,self.iters,self.C,self.reg)\n mls.fit(X,y_binary)\n self.classifiers_.append(mls)\n if self.slv=='leaner':\n blr = VectorBinaryLogisticRegression(self.eta,self.iters,self.reg)\n blr.fit(X,y_binary)\n self.classifiers_.append(blr)\n if self.slv=='BFGS':\n bfgslr = BFGSBinaryLogisticRegression(self.eta,self.iters,self.C,self.reg)\n bfgslr.fit(X,y_binary)\n self.classifiers_.append(bfgslr)\n if self.slv=='newton':\n newt = HessianBinaryLogisticRegression(self.eta,self.iters,self.C,self.reg)\n newt.fit(X,y_binary)\n self.classifiers_.append(newt)\n \n # add the trained classifier to the list \n # save all the weights into one matrix, separate column for each class\n self.w_ = np.hstack([x.w_ for x in self.classifiers_]).T\n \n \n \n def predict_proba(self,X):\n probs = []\n for blr in self.classifiers_:\n probs.append(blr.predict_proba(X)) # get probability for each classifier\n \n return np.hstack(probs) # make into single matrix\n \n def predict(self,X):\n return np.argmax(self.predict_proba(X),axis=1) # take argmax along row \n\n", "_____no_output_____" ] ], [ [ "We chose some initial parameters to try on based on professor's suggestions. After several trials, we decided to use these parameters because they did not require much time consumption and they provided the best results among trials.\n\nUsing trial and error to obtain optimized parameters for classification does not seem to be \"data snooping\" of the negative kind in the typical sense. Data snooping is the \"misuse of data analysis to find patterns in data that can be presented as statistically significant when in fact there is no real underlying effect.\" (https://en.wikipedia.org/wiki/Data_dredging) In our case, we are pursuing optimal parameters that enable the highest accuracy classification possible. No matter what methods we use to obtain a model that accurately predicts classes for training data, it does not matter if the same is not also true for new, untrained data. If our classification algorithm is negatively impacted by our tweaking of parameters for new instances of data, then the tweaking of these parameters would be unjustified.\n\nHowever, since our goal is only to build the best classification tool possible, any means that improve that capability would be permissible, including adjusting parameters such as the regularization term - assuming they contribute to an increase in overall classification performance for untrained data.", "_____no_output_____" ] ], [ [ "%%time\nfrom sklearn.metrics import accuracy_score\nx_train_ar=X_train.values\ny_target_ar=np.asarray(y_train)\nx_train_ar = StandardScaler().fit(x_train_ar).transform(x_train_ar)\nlr = LogisticRegression(.01,1000,'stochastic',1,'L1')\nlr.fit(x_train_ar,y_target_ar)\nprint(lr)\n\nyhat = lr.predict(x_train_ar)\nstoc1=accuracy_score(y_target_ar,yhat)\nprint('Accuracy of: ',accuracy_score(y_target_ar,yhat))", "MultiClass Logistic Regression Object with coefficients:\n[[ -4.63861077 -8.36827515 -8.38249855 -9.17086857 -9.21844644\n -8.46572034 -9.40844502 -9.47614569 -9.46076256 -8.37357738\n -11.29398837 -9.94762782 -9.91999337 -9.26727903 -9.96976419\n -9.9144675 -9.90927715 -9.47668857 -9.9572392 -9.93238493\n -9.17237068 -9.9572392 -9.9572392 -9.96976419 -9.92592983\n -9.92592983 -9.57788123 -9.61910524 -11.30051312 -9.96976419\n -9.96976419 -9.83428953 -9.92592983 -9.9572392 -9.55670492\n -9.75228074 -9.96976419 -9.9572392 -9.96976419 -9.96976419\n -9.94762782 -9.85492633 -9.94762782 -9.85811817 -9.79736305\n -9.96976419 -9.37543473 -9.64768733 -9.06181368 -9.92592983\n -9.94762782 -9.9572392 -9.94762782 -9.96976419 -10.84229452\n -9.96976419 -9.74422605 -9.46815748 -10. -9.82525703\n -9.81102481 -9.79272203 -9.89969785 -9.97580066 -9.83707824\n -9.96976419 -9.96976419 -10.82800997 -9.93952453 -9.43701786\n -10.0545102 -9.79550886 -9.86472674 -9.78172986 -9.78597763\n -9.73913839 -9.96976419 -9.28209439 -9.52265813 -9.38896235\n -9.77145259 -9.668967 -9.7110157 -10.16726764 -10.\n -9.96976419 -9.9572392 -9.98605573 -9.92592983 -9.8868367\n -9.73081182 -9.93952453 -9.98983053 -9.89738377 -9.56581246\n -9.94762782 -8.9100678 -9.92592983 -10. -10.\n -9.02767904 -9.96976419 -9.91999337 -9.8952356 -9.49620345\n -9.87167397 -9.96976419 -9.91284205 -9.56352626 -9.9572392\n -9.44691634 -9.94762782 -9.8790182 -10. -9.94762782\n -9.94762782 -9.85180296 -9.88286237 -9.96976419 -9.74847889\n -9.8952356 -9.96976419 -9.9572392 -9.73598934 -9.72074082\n -9.9572392 -9.93238493 -9.9572392 -9.9572392 -9.88286237\n -9.87529217 -9.9572392 -9.93952453 -9.59653638 -9.96976419\n -9.83856701 -10.50985964 -9.37224158 -10.21537656 -10.13567615\n -9.37224158]\n [ -2.79350215 -9.61549449 -8.82013647 -9.14375082 -9.1817953\n -8.71063241 -9.16932486 -9.99677248 -9.69492215 -8.71213194\n -10.8812729 -9.96845999 -9.95181775 -9.45193431 -9.98179114\n -9.94848991 -9.94536414 -9.93184987 -9.97424824 -9.9592803\n -9.96357997 -9.97424824 -8.88937497 -9.98179114 -9.95539285\n -9.95539285 -9.94058641 -9.87134367 -10.67592343 -9.98179114\n -9.98179114 -9.90020448 -9.95539285 -9.97424824 -9.8283954\n -9.85074466 -9.98179114 -9.97424824 -9.98179114 -9.98179114\n -9.96845999 -9.91263255 -9.96845999 -9.91455476 -9.65631771\n -9.98179114 -9.57121525 -9.65541101 -9.96845999 -9.95539285\n -9.96845999 -9.97424824 -9.96845999 -9.98179114 -10.80667819\n -8.44758125 -9.8042572 -9.41314725 -10. -9.94778559\n -9.64032505 -9.65103767 -9.93959522 -10.06403445 -9.33175344\n -9.98179114 -9.98179114 -10.71750409 -9.96357997 -9.93959522\n -9.71537354 -9.65205363 -9.91853463 -9.86855157 -9.43671719\n -9.92760766 -9.98179114 -9.52545802 -8.97280507 -9.62437033\n -9.66161063 -10.15149773 -9.50762099 -10.32750189 -10.\n -9.98179114 -9.97424824 -9.87743785 -9.95539285 -9.93184987\n -9.66498845 -9.96357997 -10.16928493 -10.18331285 -9.47007442\n -9.96845999 -8.60272629 -9.95539285 -10. -10.\n -8.70071561 -9.98179114 -9.95181775 -9.93690792 -9.40347784\n -9.92271845 -9.98179114 -9.89375052 -9.88915504 -9.97424824\n -9.41797847 -9.96845999 -9.92714135 -10. -9.96845999\n -9.96845999 -9.59742915 -9.53320726 -9.98179114 -9.66356234\n -9.93690792 -9.98179114 -9.97424824 -9.48847131 -9.49841258\n -9.97424824 -9.27310246 -9.97424824 -9.97424824 -9.92945642\n -9.92489743 -9.97424824 -9.96357997 -10.00075151 -9.98179114\n -9.23784806 -11.48081055 -8.66731209 -10.43636881 -10.30895356\n -8.66731209]\n [ -5.87902468 -9.39160916 -8.26446936 -9.16406006 -9.41532353\n -8.36817339 -8.92184366 -9.32314841 -9.94556489 -8.40982062\n -10.97873618 -9.93362294 -9.89859874 -9.52520679 -9.96167881\n -9.89159519 -9.88501689 -9.85657563 -9.94580451 -9.91430394\n -9.92335274 -9.94580451 -9.94580451 -9.96167881 -9.90612267\n -9.90612267 -9.65026793 -9.72562899 -10.9899245 -9.96167881\n -9.96167881 -9.78997678 -9.90612267 -9.94580451 -9.7898768\n -9.68588683 -9.96167881 -9.94580451 -9.96167881 -9.96167881\n -9.93362294 -9.81613209 -9.93362294 -9.82017746 -9.72070884\n -9.96167881 -9.56695381 -9.55347511 -9.93362294 -9.90612267\n -9.93362294 -9.94580451 -9.93362294 -9.96167881 -10.47857944\n -9.96167881 -9.58805217 -9.471451 -10. -10.22781799\n -9.76049083 -9.73702607 -9.87287599 -10.15107431 -9.79351123\n -9.96167881 -9.96167881 -10.27962043 -9.92335274 -9.87287599\n -10.33463654 -9.73423754 -9.82855322 -9.7233621 -9.72874577\n -9.90121908 -9.96167881 -9.67847495 -9.56869889 -9.63419962\n -9.71033657 -9.98967933 -9.63373822 -10.47219512 -10.\n -9.96167881 -9.94580451 -9.61105737 -9.90612267 -9.85657563\n -9.65882803 -9.92335274 -9.94326993 -10.14190427 -9.69301937\n -9.93362294 -8.86435865 -9.90612267 -10. -10.\n -9.25876071 -9.96167881 -9.89859874 -9.86722048 -9.51700237\n -9.83735822 -9.96167881 -9.77639419 -9.81279377 -9.94580451\n -9.39829994 -9.93362294 -9.84666638 -10. -9.93362294\n -9.93362294 -9.81217349 -9.85153852 -9.96167881 -9.68121948\n -9.86722048 -9.96167881 -9.94580451 -9.66539007 -9.64610004\n -9.94580451 -9.96285192 -9.94580451 -9.94580451 -9.85153852\n -9.84194396 -9.94580451 -9.92335274 -9.41696696 -9.96167881\n -10.14894804 -10.01758091 -9.71992421 -10.19590216 -9.96042598\n -9.71992421]\n [ -5.19463794 -8.99250285 -8.56780083 -9.0988067 -9.26108645\n -8.51619826 -9.23134445 -8.9390623 -9.44556762 -8.43494145\n -11.31867891 -9.94135 -9.91040303 -9.5618949 -9.96613984\n -9.90421477 -9.89840226 -9.87327189 -9.95211349 -9.92427995\n -9.93227537 -9.95211349 -9.95211349 -9.96613984 -9.91705108\n -9.29064598 -9.388838 -9.6586971 -11.38671856 -9.96613984\n -9.96613984 -9.81442592 -9.91705108 -9.95211349 -9.6670475\n -9.34706027 -9.96613984 -9.95211349 -9.96613984 -9.96613984\n -9.94135 -9.51748195 -9.94135 -9.84111089 -9.75322158\n -9.96613984 -9.39355519 -9.47303658 -9.05553586 -9.91705108\n -9.94135 -9.95211349 -9.94135 -8.43192995 -11.0447596\n -9.96613984 -9.63600768 -9.41559058 -10. -10.09133391\n -9.78837248 -9.76764277 -9.8876747 -9.67109395 -9.81754892\n -9.96613984 -9.96613984 -10.5813794 -9.93227537 -9.8876747\n -10.28891623 -9.85752186 -9.84851162 -9.75571705 -9.76032355\n -9.51575244 -9.96613984 -9.69368048 -9.61890762 -9.55584214\n -9.74405676 -9.62477791 -9.67637535 -10.04714292 -10.\n -9.96613984 -9.95211349 -9.41175091 -9.91705108 -9.87327189\n -9.61209499 -9.16512153 -10.00803526 -10.20651781 -9.72875548\n -9.94135 -9.00277495 -9.91705108 -10. -10.\n -9.34107183 -9.96613984 -9.91040303 -9.88267756 -9.45049349\n -9.85629161 -9.96613984 -9.8024245 -9.79387901 -9.95211349\n -9.46842904 -9.94135 -9.8645162 -10. -9.94135\n -9.94135 -9.52071626 -9.86882116 -9.96613984 -9.71832922\n -9.88267756 -9.96613984 -9.95211349 -9.70434255 -9.68726621\n -9.95211349 -9.92427995 -9.95211349 -9.95211349 -9.86882116\n -9.86034352 -8.86724022 -9.93227537 -9.45681273 -9.96613984\n -11.22683265 -9.14196167 -9.45892238 -10.43966162 -9.86216751\n -9.45892238]\n [ -4.76152118 -9.05132234 -8.91957558 -9.24789199 -9.41894245\n -8.75341693 -9.16585449 -9.47657197 -9.70318668 -8.80836634\n -11.00386739 -9.9462401 -9.91787341 -9.59756493 -9.96896302\n -9.91220112 -9.93410117 -9.88383819 -9.95610616 -9.93059331\n -9.93792209 -9.95610616 -9.95610616 -9.96896302 -9.92396717\n -9.92396717 -9.3853652 -9.95116133 -10.98610476 -9.96896302\n -9.96896302 -9.82989866 -9.92396717 -9.95610616 -9.71068652\n -9.37026025 -9.96896357 -9.95610616 -9.96896302 -9.96896302\n -9.9462401 -9.85108228 -9.9462401 -9.85435869 -9.58720164\n -9.96896302 -9.56819998 -9.63835196 -9.9462401 -9.92396717\n -9.9462401 -9.95610616 -9.9462401 -9.96896302 -10.95469692\n -9.96896302 -9.66857414 -9.44348208 -10. -9.70639517\n -9.83292761 -9.78701298 -9.89704012 -10.00034292 -9.83276126\n -9.96896302 -9.96896302 -10.6742734 -9.93792209 -9.89704012\n -10.1106019 -9.7847545 -9.86114237 -9.79244874 -9.56311038\n -9.84832131 -9.96896302 -9.60738233 -9.65068196 -9.48837705\n -9.76539671 -9.82842705 -9.70335841 -10.16430792 -10.\n -9.96896302 -9.95610616 -9.62515282 -9.92396717 -9.88383819\n -9.55078024 -9.93792209 -9.80279108 -9.75234298 -9.55934334\n -9.9462401 -9.2251951 -9.92396717 -10. -10.\n -9.3049325 -9.96896302 -9.91787341 -9.89245963 -9.48626216\n -9.86827368 -9.96896302 -9.81889788 -9.8110649 -9.95610616\n -9.45357572 -9.9462401 -9.87581252 -10. -9.9462401\n -9.9462401 -9.84787615 -9.87975854 -9.96896302 -9.74181428\n -9.89245963 -9.96896302 -9.95610616 -9.73393503 -9.7133427\n -9.95610616 -9.93059331 -9.95610616 -9.95610616 -9.87975854\n -9.87198775 -9.95610616 -9.93792209 -9.54354249 -9.96896302\n -9.94913273 -10.32529558 -9.51229741 -10.08682546 -10.18614352\n -9.51229741]\n [ -3.34150576 -9.25061454 -8.10843756 -9.11263903 -8.96398267\n -7.94375578 -9.29858104 -9.09428814 -8.15948356 -8.02975817\n -11.34596584 -9.96227277 -9.9423658 -9.82242912 -9.9782191\n -9.93838514 -9.42315595 -9.91848081 -9.9691965 -9.95129228\n -9.95643542 -9.9691965 -9.9691965 -9.9782191 -9.94664223\n -9.94664223 -9.46784153 -9.84880127 -11.23019847 -9.9782191\n -9.9782191 -9.60034766 -9.32023713 -9.9691965 -9.64980944\n -9.4461484 -9.9782191 -9.9691965 -9.9782191 -9.9782191\n -9.96227277 -9.89550073 -9.96227277 -9.89789214 -9.63028405\n -9.9782191 -9.22351004 -9.3489428 -9.96227277 -9.94664223\n -9.96227277 -9.9691965 -9.96227277 -9.9782191 -10.52186983\n -9.9782191 -9.76992538 -9.51967952 -10. -9.78992557\n -9.37213084 -9.8505321 -9.46506557 -10.14887575 -9.88263641\n -9.9782191 -9.9782191 -10.8982677 -9.95643542 -9.92774556\n -10.17041151 -9.84894658 -9.9025535 -9.6297846 -9.84582518\n -9.72976018 -9.9782191 -9.48027207 -9.4807728 -9.36023033\n -9.83536181 -9.55472625 -9.63089766 -10.38953663 -10.\n -9.9782191 -9.9691965 -9.6166843 -9.94664223 -9.91848081\n -9.63318667 -9.95643542 -10.16675402 -10.09335541 -9.24941883\n -9.96227277 -8.82093159 -9.94664223 -10. -10.\n -9.26418807 -9.9782191 -9.9423658 -9.9245311 -9.60283622\n -9.90755806 -9.9782191 -9.87290747 -9.8674105 -9.9691965\n -9.26308648 -9.96227277 -9.91284861 -10. -9.96227277\n -9.96227277 -9.57992138 -9.91561782 -9.9782191 -9.29255277\n -9.9245311 -9.9782191 -9.9691965 -9.80981522 -9.29871301\n -9.9691965 -9.95129228 -7.79944996 -9.9691965 -9.91561782\n -9.91016449 -9.9691965 -9.95643542 -9.42505579 -9.9782191\n -11.62557264 -8.26511348 -10.33597105 -9.79399013 -10.01839741\n -10.33597105]]\nAccuracy of: 0.42163402302757363\nWall time: 816 ms\n" ], [ "%%time\nfrom sklearn.metrics import accuracy_score\nx_train_ar=X_train.values\ny_target_ar=np.asarray(y_train)\n#y_target_ar=y_train_b.transfer_partner.values\n#y_target_ar=y_train_b_v\nx_train_ar = StandardScaler().fit(x_train_ar).transform(x_train_ar)\nlr = LogisticRegression(.01,1000,'stochastic',.0001,'L1L2')\nlr.fit(x_train_ar,y_target_ar)\nprint(lr)\n\nyhat = lr.predict(x_train_ar)\nstoc2=accuracy_score(y_target_ar,yhat)\nprint('Accuracy of: ',accuracy_score(y_target_ar,yhat))", "MultiClass Logistic Regression Object with coefficients:\n[[-1.53434694e+00 3.05400388e-01 5.12894362e-02 -8.65622721e-02\n 1.74550422e-02 -4.90222007e-02 8.51124884e-02 -3.65424297e-02\n -5.56120706e-02 -7.65107534e-02 8.04992458e-02 1.63022402e-02\n 2.54313280e-02 -7.22964319e-03 8.98945055e-03 -2.90092600e-01\n 2.89714490e-02 3.63846901e-02 1.31271030e-02 2.13377546e-02\n 1.89791761e-02 1.31271030e-02 1.31271030e-02 8.98945055e-03\n 2.34702088e-02 -2.96207163e-01 4.77404938e-02 5.58020697e-02\n -1.46183122e-02 8.98945055e-03 -7.59838440e-01 5.37437404e-02\n 2.34702088e-02 1.31271030e-02 -4.54738036e-02 -1.96538502e-02\n 8.98945055e-03 -5.95405252e-01 8.98945055e-03 8.98945055e-03\n 1.63022402e-02 -1.19661189e-01 1.63022402e-02 -3.03626023e-02\n -6.09369662e-02 8.98945055e-03 7.83280020e-02 -1.07001583e-01\n 1.63022402e-02 2.85530031e-01 1.63022402e-02 -7.29922456e-01\n -4.48616045e-01 -7.73973812e-01 1.17683464e-02 8.98945055e-03\n -7.75099413e-02 1.98991365e-02 -9.99001664e-04 -7.02844618e-02\n 6.14292795e-02 -5.71353225e-02 3.21359865e-02 -2.61230274e-02\n 1.72581706e-01 8.98945055e-03 8.98945055e-03 1.07530436e-01\n 1.89791761e-02 3.21359865e-02 8.74778300e-02 -7.63004777e-02\n -1.28295820e-01 1.63800323e-01 1.66120815e-01 -5.70862986e-03\n 8.98945055e-03 9.39121445e-02 4.58749235e-02 -1.04889332e-03\n -2.12617992e-02 3.09440501e-02 -3.50752278e-02 -3.17129853e-02\n -9.99001664e-04 8.98945055e-03 1.31271030e-02 -4.25097635e-02\n 2.34702088e-02 -1.56596368e-01 -7.01943589e-02 1.89791761e-02\n -2.23573006e-02 -7.39170895e-02 -1.07204072e-01 1.63022402e-02\n -6.97022865e-02 2.34702088e-02 -9.99001664e-04 -9.99001664e-04\n 3.41673318e-02 8.98945055e-03 2.54313280e-02 3.36101003e-02\n 5.31148302e-02 -1.30621611e-01 8.98945055e-03 5.72840566e-02\n -5.96586942e-02 1.31271030e-02 -9.88281004e-02 1.63022402e-02\n 3.89675450e-02 -9.99001664e-04 1.63022402e-02 -5.25683875e-01\n 4.79581478e-02 -1.79178060e-01 8.98945055e-03 -8.55062381e-02\n 2.55468030e-01 8.98945055e-03 1.31271030e-02 1.34171239e-01\n -1.00203377e-01 1.31271030e-02 2.13377546e-02 1.31271030e-02\n 1.31271030e-02 3.76976181e-02 4.01984474e-02 1.31271030e-02\n 1.89791761e-02 -9.63069253e-02 8.98945055e-03 1.05043565e-01\n -7.37311212e-02 -5.02914043e-02 -5.45095565e-02 8.02846606e-02\n -5.02914043e-02]\n [-7.61299002e-01 -1.72238158e-02 1.06603412e-01 1.50532269e-01\n -2.32123864e-02 1.40852454e-02 9.13477771e-03 -1.94629962e-01\n -1.24282474e-01 -6.83010033e-02 4.14614857e-02 7.58462016e-03\n 1.21138117e-02 -1.15419725e-02 3.95654414e-03 1.30194833e-02\n 1.38701630e-02 1.75480758e-02 6.00934731e-03 1.00828774e-02\n 8.91272166e-03 6.00934731e-03 6.00934731e-03 3.95654414e-03\n -1.55303907e-01 1.11408465e-02 3.88020909e-02 -8.67544934e-02\n 6.48989229e-02 3.95654414e-03 3.95654414e-03 2.61603780e-02\n 1.11408465e-02 6.00934731e-03 -8.24734089e-02 -2.14443150e-02\n 3.95654414e-03 6.00934731e-03 3.95654414e-03 3.95654414e-03\n 7.58462016e-03 2.27780764e-02 7.58462016e-03 2.22549450e-02\n 3.51178364e-02 3.95654414e-03 -3.82763536e-02 -1.94858446e-02\n 7.58462016e-03 1.11408465e-02 7.58462016e-03 6.00934731e-03\n 7.58462016e-03 3.95654414e-03 5.20551213e-02 6.07657563e-01\n 1.36479629e-01 -5.22622502e-02 -9.99001664e-04 -1.02716205e-02\n 2.99733853e-02 3.30077573e-02 1.54401771e-02 -2.41202278e-02\n -2.00936738e-01 3.95654414e-03 3.95654414e-03 2.89444240e-02\n 8.91272166e-03 -2.58069086e-01 -4.71023592e-02 -2.79188808e-02\n 2.11718237e-02 -2.48159065e-02 -6.53597777e-02 7.04009893e-02\n 3.95654414e-03 7.25829562e-03 -1.21633755e-01 -8.86383071e-02\n -1.20268918e-01 -1.31553072e-02 -1.05146565e-01 1.04979546e-01\n -9.99001664e-04 3.95654414e-03 6.00934731e-03 -2.47619616e-02\n 1.11408465e-02 -1.88769000e-01 -3.95573284e-02 8.91272166e-03\n -9.00623151e-02 -1.88239185e-02 6.41597269e-03 -3.57045893e-01\n 1.98622170e-02 1.11408465e-02 -9.99001664e-04 -9.99001664e-04\n 3.34453123e-04 3.95654414e-03 1.44874148e-01 1.61715255e-02\n -1.06103075e-01 -2.10592920e-01 3.95654414e-03 -1.91500263e-01\n 1.39714033e-01 -4.88642617e-01 -8.95223653e-02 7.58462016e-03\n 1.88295011e-02 -9.99001664e-04 7.58462016e-03 7.58462016e-03\n -1.88060562e-01 1.81994555e-02 3.95654414e-03 -4.28596785e-02\n 3.91870158e-01 3.95654414e-03 6.00934731e-03 -5.45957196e-02\n -6.93753588e-02 6.00934731e-03 1.00828774e-02 6.00934731e-03\n 6.00934731e-03 2.35998274e-01 1.94401857e-02 6.00934731e-03\n 6.67116833e-01 1.80148280e-02 3.95654414e-03 4.55354265e-01\n -5.23251836e-01 1.55319671e-01 -2.78999147e-02 -6.15929812e-02\n 1.55319671e-01]\n [-2.24732593e+00 -3.74004195e-02 8.19365899e-03 1.17439633e-02\n -4.89865223e-02 -4.06783941e-02 -2.09278683e-02 1.76488592e-02\n -1.58527634e-04 -2.09351144e-02 9.25354371e-02 2.43425414e-02\n -1.85848704e-01 6.57351853e-03 1.36313234e-02 4.03879616e-02\n -2.10172891e-01 -9.89936999e-02 1.96918420e-02 3.17181800e-02\n -3.32388387e-01 1.96918420e-02 1.96918420e-02 1.36313234e-02\n 3.48416367e-02 3.48416367e-02 1.31960386e-03 7.99738416e-02\n 2.31501203e-04 1.36313234e-02 1.36313234e-02 -7.16077607e-02\n -2.18498125e-01 1.96918420e-02 -2.83064897e-02 6.38301894e-03\n 1.36313234e-02 1.96918420e-02 1.36313234e-02 1.36313234e-02\n 2.43425414e-02 -7.04824000e-02 -3.45660639e-01 -3.96176288e-02\n 1.05629229e-01 1.36313234e-02 -5.02473505e-02 3.65942574e-02\n 2.43425414e-02 3.48416367e-02 2.43425414e-02 1.96918420e-02\n 2.43425414e-02 1.36313234e-02 5.04018589e-02 1.36313234e-02\n -2.10755476e-02 -2.00423302e-02 -9.99001664e-04 5.27869904e-02\n 9.04411961e-02 -1.21279598e-02 -1.07039262e-01 8.32863198e-03\n -1.04599984e-01 -7.29644050e-01 1.36313234e-02 -2.09504984e-02\n 2.82635135e-02 4.75346087e-02 7.50191958e-03 -8.38150784e-03\n 6.44562265e-02 -2.89725331e-02 -7.31105272e-03 -5.05971989e-03\n 1.36313234e-02 -2.55174124e-02 -3.34325884e-02 -2.34998767e-02\n 3.36833047e-03 2.69668461e-02 5.02254328e-02 -1.34344983e-02\n -9.99001664e-04 -8.20189538e-01 1.96918420e-02 2.09349530e-02\n 3.48416367e-02 5.37577865e-02 -2.09804541e-02 -5.95050123e-01\n 9.84661232e-03 -5.86968397e-02 -1.06166092e-02 2.43425414e-02\n 6.65334247e-03 3.48416367e-02 -9.99001664e-04 -9.99001664e-04\n -1.18507567e-02 1.36313234e-02 -2.69496310e-01 4.96937785e-02\n -3.49242361e-02 -1.21405792e-01 1.36313234e-02 -6.18519630e-02\n -1.17026178e-01 1.96918420e-02 -4.14287749e-02 2.43425414e-02\n 5.75409559e-02 -9.99001664e-04 2.43425414e-02 2.43425414e-02\n 7.07097070e-02 -1.42563647e-01 1.36313234e-02 -9.36637243e-02\n -2.60001378e-01 1.36313234e-02 1.96918420e-02 -6.31075223e-02\n 4.22582969e-02 1.96918420e-02 3.17181800e-02 1.96918420e-02\n 1.96918420e-02 -1.86762108e-01 -8.59791411e-02 1.96918420e-02\n 2.82635135e-02 8.86291101e-04 1.36313234e-02 -2.85494820e-03\n -1.09872938e-02 1.96924679e-02 2.69692164e-02 -4.06417981e-02\n 1.96924679e-02]\n [-1.69149341e+00 -2.21782588e-01 -1.11606795e-01 -4.32133929e-02\n -1.14374228e-02 9.84766980e-02 -1.58495474e-01 6.70216669e-02\n -2.08730610e-01 7.74355701e-02 3.95948928e-02 1.80749879e-02\n 2.81394750e-02 1.34370156e-01 1.00129034e-02 -2.74577755e-01\n 3.20423295e-02 4.02151582e-02 1.45745147e-02 -3.36527120e-01\n 2.10262123e-02 1.45745147e-02 5.39325666e-01 1.00129034e-02\n 2.59774125e-02 2.59774125e-02 2.61119190e-02 -5.36344311e-02\n 4.72105731e-02 1.00129034e-02 1.00129034e-02 -2.30183989e-02\n 2.59774125e-02 1.45745147e-02 -1.70000581e-02 -8.48193549e-02\n 1.00129034e-02 1.45745147e-02 -7.48553751e-01 1.00129034e-02\n 1.80749879e-02 -1.67446623e-01 1.80749879e-02 5.06744695e-02\n 2.98360969e-02 1.00129034e-02 -9.18650444e-03 -1.08718835e-01\n 1.80749879e-02 2.59774125e-02 1.80749879e-02 1.45745147e-02\n -4.68892394e-01 1.00129034e-02 1.33474112e-02 1.00129034e-02\n 1.22192624e-02 -2.75345059e-02 -9.99001664e-04 4.55351565e-02\n -2.40087755e-02 7.45686990e-02 3.55311170e-02 -5.71155861e-03\n -3.08497187e-02 1.00129034e-02 1.00129034e-02 1.33438047e-01\n 2.10262123e-02 3.55311170e-02 -5.24673206e-02 -6.05417698e-03\n 2.28850960e-01 -1.26184369e-02 -9.26593477e-02 -5.77089370e-03\n 1.00129034e-02 3.45103517e-03 -1.73805865e-02 2.36444321e-02\n -2.85497813e-03 -8.84941103e-02 2.04870047e-02 -3.28917440e-02\n -9.99001664e-04 1.00129034e-02 1.45745147e-02 6.71602303e-02\n -4.03200573e-01 -2.53090537e-01 -8.42085528e-02 2.10262123e-02\n -1.83708407e-02 -2.20500059e-02 -6.85626290e-02 1.80749879e-02\n -7.91034901e-02 -1.58237644e-01 -9.99001664e-04 -9.99001664e-04\n -1.51661164e-02 1.00129034e-02 2.81394750e-02 -2.29078463e-01\n -1.58140123e-02 -7.67721962e-02 1.00129034e-02 6.32559492e-02\n -9.19400501e-02 1.45745147e-02 5.07390269e-02 1.80749879e-02\n 4.30626617e-02 -9.99001664e-04 1.80749879e-02 1.80749879e-02\n 5.29744741e-02 4.16626135e-02 1.00129034e-02 -2.42415462e-02\n -2.26956452e-01 1.00129034e-02 1.45745147e-02 -7.64975706e-02\n -7.31784405e-03 1.45745147e-02 2.36264593e-02 1.45745147e-02\n 1.45745147e-02 4.16626135e-02 4.44196868e-02 5.31523995e-01\n 2.10262123e-02 -5.56167924e-03 1.00129034e-02 -2.61668238e-01\n 3.00907819e-01 -9.66196373e-02 3.73619525e-02 1.40962530e-02\n -9.66196373e-02]\n [-1.61345798e+00 5.84133610e-02 5.55852812e-02 3.30261664e-02\n 7.42191252e-02 -1.58376967e-02 -8.65968698e-02 3.76087162e-03\n -4.00999851e-01 -4.60171672e-02 3.84440226e-02 1.71930184e-02\n 2.67921299e-02 2.51395545e-03 9.50371975e-03 -1.42923413e-01\n 3.05145188e-02 3.83094409e-02 1.38544049e-02 2.24877933e-02\n 2.00077800e-02 1.38544049e-02 1.38544049e-02 9.50371975e-03\n -9.65274507e-02 -2.49712243e-01 -6.31059857e-02 -1.81560283e-02\n 6.81102999e-02 9.50371975e-03 9.50371975e-03 -1.04308833e-01\n 2.47300398e-02 1.38544049e-02 -4.56437847e-02 -5.11487282e-03\n 9.50371975e-03 1.38544049e-02 9.50371975e-03 9.50371975e-03\n 1.71930184e-02 4.93938383e-02 1.71930184e-02 -1.36491827e-01\n 1.27389536e-02 9.50371975e-03 2.21530067e-02 -1.35332192e-02\n 1.71930184e-02 2.47300398e-02 1.71930184e-02 -2.74690615e-01\n 1.71930184e-02 -6.62668427e-01 4.66528593e-02 9.50371975e-03\n -6.22313470e-03 -4.50287842e-02 -9.99001664e-04 -7.96218580e-03\n 9.58465354e-02 -3.14307512e-02 3.38419870e-02 -3.84381193e-02\n -1.48941239e-01 9.50371975e-03 9.50371975e-03 -2.27944262e-02\n 2.00077800e-02 3.38419870e-02 3.89252676e-02 -1.19370752e-01\n 4.59895665e-02 1.79516295e-01 -7.44336843e-02 -2.94842553e-02\n 9.50371975e-03 5.79862006e-05 -6.89997201e-02 3.19823069e-02\n -6.66585346e-02 5.00734586e-02 -1.03854678e-01 -4.94488197e-02\n -9.99001664e-04 9.50371975e-03 1.38544049e-02 -3.23314656e-02\n -2.51698750e-01 3.83094409e-02 1.49686541e-01 2.00077800e-02\n 4.24630929e-02 4.48526421e-02 8.31354485e-02 1.71930184e-02\n -9.23571262e-03 2.47300398e-02 -9.99001664e-04 -9.99001664e-04\n 3.63521513e-02 9.50371975e-03 2.67921299e-02 -1.28037713e-01\n -7.77433982e-02 4.35763728e-02 9.50371975e-03 6.02848400e-02\n 6.29354735e-02 1.38544049e-02 -9.91719513e-02 1.71930184e-02\n -1.96220487e-01 -9.99001664e-04 1.71930184e-02 3.29257692e-01\n -8.52122456e-02 -1.30515992e-01 9.50371975e-03 1.49472983e-01\n 3.53919975e-02 9.50371975e-03 1.38544049e-02 -6.98430679e-02\n -3.23633391e-02 -4.26581019e-01 2.24877933e-02 1.38544049e-02\n 1.38544049e-02 -9.79838024e-02 8.95586619e-02 1.38544049e-02\n 2.00077800e-02 -1.06955436e-02 9.50371975e-03 2.02357681e-01\n -1.85742529e-01 -1.58224102e-02 2.54175619e-02 -1.91890754e-02\n -1.58224102e-02]\n [-8.46154867e-01 -2.01908205e-01 1.85778254e-02 7.54535851e-02\n -4.62296511e-02 5.69587169e-02 -7.68538063e-03 2.82135424e-01\n 5.32230243e-01 -2.61369149e-03 -6.63635122e-03 8.54255535e-03\n -1.94542160e-01 -8.04436664e-03 4.50958478e-03 3.08417844e-01\n -2.25382109e-01 1.78236969e-01 6.79148150e-03 1.13196189e-02\n -4.69231085e-01 6.79148150e-03 6.79148150e-03 4.50958478e-03\n 1.24956578e-02 1.24956578e-02 -8.81368179e-02 -1.05052555e-02\n 4.73801654e-02 4.50958478e-03 4.50958478e-03 1.88311207e-01\n 1.71223185e-01 6.79148150e-03 1.37862118e-01 1.39618405e-02\n 4.50958478e-03 6.79148150e-03 4.50958478e-03 4.50958478e-03\n 8.54255535e-03 2.54316066e-02 8.54255535e-03 -8.64301128e-02\n 1.51106161e-01 4.50958478e-03 -1.01005738e-01 1.44264562e-01\n -6.91049695e-01 -3.02918485e-01 8.54255535e-03 6.79148150e-03\n 8.54255535e-03 4.50958478e-03 2.39619329e-02 4.50958478e-03\n 1.14798596e-01 -3.16174731e-03 -9.99001664e-04 -9.78082911e-03\n -7.60021362e-02 -6.40263766e-02 1.11032104e-01 7.35017940e-02\n -9.34986509e-02 4.50958478e-03 4.50958478e-03 -4.53212893e-02\n 1.00188734e-02 -1.41313404e-01 -2.88905654e-02 7.22141466e-02\n 2.36460955e-02 -1.03629556e-02 -5.83247115e-02 1.09202475e-01\n 4.50958478e-03 6.32653771e-02 9.45525884e-02 7.22096495e-02\n -7.83086651e-02 1.01814297e-03 -8.28428737e-02 -1.01478211e-01\n -9.99001664e-04 4.50958478e-03 6.79148150e-03 -7.05849368e-02\n 1.24956578e-02 -2.85422639e-02 2.32901917e-02 -5.95858660e-01\n 8.14746537e-03 3.39026345e-02 -3.14028706e-02 -2.52561480e-01\n 5.90923641e-02 1.24956578e-02 -9.99001664e-04 -9.99001664e-04\n 1.12698241e-01 4.50958478e-03 1.35772062e-02 1.80877623e-02\n 7.43105589e-02 1.83081796e-01 4.50958478e-03 -1.86725232e-01\n 3.25340770e-02 6.79148150e-03 -2.72458163e-03 8.54255535e-03\n 2.10423690e-02 -9.99001664e-04 -3.08087543e-01 -1.59912333e-01\n 2.60006459e-02 2.03420100e-02 4.50958478e-03 -2.15508202e-02\n -1.67382480e-01 4.50958478e-03 6.79148150e-03 -9.08142586e-02\n 1.48712005e-01 -1.91325267e-01 -4.68412715e-01 6.79148150e-03\n 6.79148150e-03 2.03420100e-02 1.73316613e-01 6.79148150e-03\n 1.00188734e-02 1.26648589e-01 4.50958478e-03 -4.74632158e-01\n 5.61827222e-01 -1.99858114e-01 2.19020347e-02 8.74390811e-02\n -1.99858114e-01]]\nAccuracy of: 0.5271699876789735\nWall time: 775 ms\n" ], [ "%%time\nfrom sklearn.metrics import accuracy_score\nx_train_ar=X_train.values\ny_target_ar=np.asarray(y_train)\nx_train_ar = StandardScaler().fit(x_train_ar).transform(x_train_ar)\nlr = LogisticRegression(.1,50,'steepest',10,'L2')\nlr.fit(x_train_ar,y_target_ar)\nprint(lr)\n\nyhat = lr.predict(x_train_ar)\nsteep=accuracy_score(y_target_ar,yhat)\nprint('Accuracy of: ',accuracy_score(y_target_ar,yhat))", "MultiClass Logistic Regression Object with coefficients:\n[[-1.08775915e+01 -8.61005274e+49 -6.62810348e+48 2.32944053e+48\n -5.19314015e+48 1.09458477e+49 2.41521839e+48 1.19072508e+49\n 1.54189677e+49 1.10754533e+49 1.26718882e+49 1.65632622e+48\n -1.53886390e+47 -1.60409384e+49 9.49262371e+47 1.86190870e+47\n 5.06119821e+47 -5.61356108e+48 1.27745844e+48 -3.98952472e+48\n -1.57326720e+48 1.29521165e+48 1.28210290e+48 -5.84627953e+48\n 2.24422309e+48 -3.32593631e+48 -9.73111463e+48 1.45856164e+48\n 1.20252352e+49 9.12937152e+47 9.67481317e+47 -4.97424142e+48\n -5.13528023e+47 1.32570335e+48 -4.01400713e+48 4.28187233e+48\n 9.20664967e+47 1.27420338e+48 9.38318469e+47 9.00164502e+47\n -2.33759589e+48 -1.07304739e+44 1.54334548e+48 -1.51384633e+48\n -5.43686687e+48 8.97124992e+47 -5.16372967e+48 1.28812460e+48\n -6.23811447e+48 -8.78475324e+48 -2.32588454e+48 -3.48374528e+48\n 1.65312239e+48 8.99810186e+47 1.05881053e+48 9.34522716e+47\n 5.09886405e+48 4.78205441e+48 0.00000000e+00 -1.00077394e+48\n -1.74921832e+48 3.53613816e+47 -9.98501236e+47 4.10936746e+48\n -3.83986585e+48 9.35108864e+47 9.35808341e+47 6.20356340e+48\n 1.82997356e+48 -9.16981908e+48 3.18181672e+48 5.29216734e+47\n 1.16846635e+48 -3.75440560e+48 -7.86085800e+48 6.06888442e+48\n 8.56123355e+47 1.31623949e+48 -1.62423729e+49 -1.43862901e+49\n -2.09241087e+48 -2.15445825e+48 -7.52104774e+48 -1.12422761e+48\n 0.00000000e+00 9.47484200e+47 1.28909927e+48 -3.59974793e+48\n -3.30754689e+48 -5.60606423e+48 3.03365429e+48 -4.99214766e+48\n 3.42143752e+48 1.78968360e+48 -1.97733243e+48 -2.33813809e+48\n -9.58495762e+48 -3.29386373e+48 0.00000000e+00 0.00000000e+00\n -5.38058311e+48 9.58140237e+47 -4.20899877e+46 3.27692576e+48\n -2.94237805e+48 2.42388567e+48 8.70218976e+47 -1.52046348e+48\n -4.45267318e+48 1.30012226e+48 -3.27049692e+48 1.61768760e+48\n -6.47318141e+48 0.00000000e+00 -2.31080335e+48 1.61754505e+48\n 4.19468831e+47 3.64778638e+48 9.54009145e+47 -6.26300883e+48\n -6.56909785e+48 9.40629429e+47 -3.53343594e+48 -3.61002425e+48\n 1.24266342e+48 -8.23637827e+48 -4.04030610e+48 -3.50531763e+48\n -3.47312857e+48 -6.94464299e+48 5.15029694e+47 1.33286760e+48\n 1.89414250e+48 -1.74947543e+48 -5.84459194e+48 -2.33876851e+49\n 2.54290369e+49 -5.65886866e+48 1.40691497e+49 -1.09404377e+49\n -5.65886866e+48]\n [-5.32244964e+00 5.63239367e+49 -1.11194874e+49 -3.46966629e+48\n 2.64132330e+48 1.00676763e+49 -1.61231997e+48 5.72397291e+49\n 4.14981217e+49 1.11340531e+49 -1.06984531e+49 3.01600762e+46\n -1.91534700e+48 -3.83656996e+46 2.24935595e+48 -8.14656263e+47\n 4.27537906e+48 6.57623023e+48 -6.51395170e+48 2.01141133e+48\n 1.15822349e+48 3.17885401e+48 -1.56350446e+48 2.25106510e+48\n 4.43509695e+46 -2.57277466e+46 1.92831436e+49 9.75586422e+48\n -2.35721559e+49 -4.53358677e+48 2.25659288e+48 -4.75884231e+46\n 2.72076301e+48 3.20976555e+48 9.65002657e+48 2.30262938e+48\n -4.50972598e+48 3.20202601e+48 2.25858937e+48 2.26115890e+48\n 3.89605568e+48 3.85612032e+48 3.93559185e+48 1.78511396e+48\n 7.87722331e+48 2.26719406e+48 6.67133856e+48 -4.62793009e+48\n 3.80587876e+48 2.72878157e+48 -3.97705842e+48 3.17305601e+48\n -2.49527344e+46 2.24980638e+48 -1.35355821e+49 -4.62935683e+48\n 3.47837277e+48 -1.00849708e+49 0.00000000e+00 -1.89667948e+47\n 3.94132482e+48 -4.58405966e+48 7.41948274e+48 2.51950613e+48\n -3.33990963e+48 -4.63063270e+48 -4.66313377e+48 3.32898398e+48\n -2.28727361e+48 -3.22528496e+48 6.77055987e+48 -2.68757787e+47\n 8.76147881e+47 2.70217213e+48 -2.89619368e+48 -2.63417377e+47\n 2.26779744e+48 4.99814974e+47 -6.20297730e+48 -7.74137909e+48\n -4.06654350e+48 8.97068295e+48 -1.14753238e+49 2.04110145e+48\n 0.00000000e+00 -4.66885193e+48 -1.68146052e+48 4.76471360e+47\n 2.69645817e+48 1.04709699e+48 1.97693386e+48 9.74425175e+47\n 1.16137583e+49 4.10191034e+48 6.77693354e+48 -1.64078539e+47\n -1.06654565e+48 -3.10284998e+47 0.00000000e+00 0.00000000e+00\n -7.31742900e+48 2.22220016e+48 -1.77501390e+48 -4.18318372e+48\n 1.12633295e+48 6.21447427e+48 2.26360394e+48 5.87691514e+48\n -4.62135098e+46 3.17725693e+48 5.07213203e+48 -8.03265701e+48\n 2.07282160e+48 0.00000000e+00 -4.03764336e+48 3.61936557e+48\n 3.95468901e+48 -2.54473254e+47 2.23308880e+48 -4.76746134e+48\n -2.96018090e+47 2.25329135e+48 -1.76744423e+48 4.42116010e+48\n 1.86725043e+48 2.91022078e+48 1.86341735e+48 3.07958589e+48\n -1.71093081e+48 3.34813185e+48 7.91877890e+47 -1.68691209e+48\n -5.76138220e+48 1.56680318e+49 2.26392058e+48 -1.66223351e+50\n 2.23470969e+50 -1.15474183e+50 3.54090310e+49 2.91784411e+49\n -1.15474183e+50]\n [-1.48192077e+01 -1.46555401e+47 2.95338781e+47 8.29013666e+46\n 3.26246748e+46 1.84718023e+47 1.02966064e+47 -6.09483210e+46\n 4.66071928e+47 1.62764238e+47 -1.40605781e+47 6.91342586e+45\n 8.53803169e+45 4.02437684e+46 3.40151815e+45 9.46376494e+45\n 1.06861551e+46 1.31731101e+46 4.66599246e+45 7.83310808e+45\n 5.99730213e+45 5.05809493e+45 5.32809812e+45 2.49831801e+45\n 8.49756955e+45 8.49015837e+45 1.03452203e+47 1.43948273e+47\n -2.42809971e+47 3.71088641e+45 3.90540397e+45 1.89806411e+46\n 8.90056715e+45 5.63649078e+45 3.31842428e+46 2.74862504e+46\n 3.80246990e+45 5.25262233e+45 3.46828799e+45 3.23464396e+45\n 6.09688665e+45 1.72104456e+46 6.03479652e+45 1.63227473e+46\n 2.59888524e+46 3.81056596e+45 1.04099710e+47 4.11592445e+46\n 6.13684690e+45 8.85448475e+45 6.61028476e+45 5.38702190e+45\n 5.32999767e+45 3.80316737e+45 -7.89363888e+46 3.40506257e+45\n 3.50723040e+46 3.75018707e+46 0.00000000e+00 1.19793863e+47\n 1.88770603e+46 2.39316673e+46 1.13106206e+46 1.11455616e+47\n 1.51674535e+46 2.60469810e+45 1.63652197e+45 3.54025323e+46\n 6.57305991e+45 7.87986939e+45 1.03450542e+47 2.39061559e+46\n 1.52244295e+46 2.46447105e+46 2.04919719e+46 -2.18316713e+47\n 3.67027900e+45 4.34094221e+46 2.85338890e+46 3.34520235e+46\n 2.58911776e+46 7.04006194e+46 2.47115658e+46 -2.68636187e+47\n 0.00000000e+00 1.26588185e+45 3.09984363e+45 5.94580161e+46\n 8.41683310e+45 1.32499198e+46 2.62775142e+46 5.62864044e+45\n 8.93124447e+46 -1.07401186e+47 2.76559845e+46 5.76668603e+45\n 1.01510291e+47 5.38301380e+45 0.00000000e+00 0.00000000e+00\n -5.27512456e+47 3.08486130e+45 9.05430988e+45 1.08558240e+46\n 3.95569149e+46 1.35366736e+46 4.03227374e+45 1.92381505e+46\n 1.93379055e+46 4.94497947e+45 4.85140457e+46 4.33026263e+45\n 1.30282908e+46 0.00000000e+00 5.74416839e+45 4.62517550e+45\n 1.63549102e+46 1.19393207e+46 3.66826405e+45 2.65928170e+46\n 1.06032672e+46 3.84734483e+45 4.76760144e+45 2.80166169e+46\n 2.89857054e+46 3.11334397e+45 8.42212366e+45 4.10875650e+45\n 4.44628932e+45 1.29167773e+46 1.22956349e+46 4.39823124e+45\n 6.00175905e+45 9.26796614e+46 3.08668677e+45 -8.84327980e+46\n 4.92020330e+47 -7.18441685e+47 1.65224702e+47 2.36779589e+47\n -7.18441685e+47]\n [-1.17249278e+01 2.02457437e+49 1.41654217e+49 2.99946376e+48\n -1.29651627e+48 -9.75249418e+48 8.69170348e+48 -1.24608200e+49\n 1.57253941e+49 -1.24905289e+49 5.01474736e+48 1.26659823e+48\n 2.08611726e+48 1.66276318e+48 -5.98860309e+48 2.11331533e+48\n -4.46687016e+48 2.80886150e+48 1.13250833e+48 -1.38381089e+48\n 1.48931489e+48 -3.76011240e+48 -3.72358346e+48 7.77697462e+47\n -6.46640705e+48 -9.16002885e+47 -6.33478508e+47 -4.47415284e+48\n 6.61077775e+48 7.50104973e+47 -5.99331512e+48 1.66119577e+48\n 1.84496346e+48 1.05092117e+48 -4.14586475e+48 -3.37087450e+47\n 7.37452869e+47 1.05539768e+48 7.21714742e+47 7.33632863e+47\n 1.28726645e+48 -2.08237662e+48 -6.49277775e+48 6.52330402e+47\n -1.24605776e+47 7.45390060e+47 -4.77683959e+48 -4.66855234e+47\n -2.58924294e+48 -9.52621970e+47 1.33284941e+48 1.05915622e+48\n 1.27601969e+48 -5.99493623e+48 2.75295204e+48 8.14815842e+47\n -6.99492104e+48 9.81918005e+46 0.00000000e+00 1.61295263e+48\n 2.67574362e+48 -1.69638646e+48 -3.65150104e+48 -6.70168183e+47\n 4.21085830e+48 8.17718807e+47 8.31400453e+47 -4.23126574e+48\n 1.53492208e+48 2.70037830e+48 4.82347273e+47 3.61116156e+47\n -1.17269897e+48 -2.92900471e+48 2.70245767e+48 7.34776002e+47\n 7.35650736e+47 1.02099669e+48 8.42270899e+48 8.90660111e+48\n -2.25313853e+48 -8.74569227e+48 4.76761435e+48 -2.91462161e+48\n 0.00000000e+00 8.34925988e+47 -3.70380319e+48 1.21180222e+48\n -9.04223927e+47 2.82901689e+48 -1.54636794e+48 -1.82022477e+48\n -3.72590994e+48 1.92650212e+48 1.81054137e+48 1.40425623e+48\n 4.37970196e+48 2.00144343e+48 0.00000000e+00 0.00000000e+00\n 6.76295520e+48 -5.98429413e+48 1.99170250e+48 -1.19800635e+48\n -1.03006479e+48 -1.55985338e+48 7.53699716e+47 -1.34131375e+48\n 2.53309015e+47 1.08457150e+48 -1.83331061e+44 1.43492488e+48\n 3.09901279e+48 0.00000000e+00 1.37699793e+48 1.41003700e+48\n -1.82726114e+48 -4.01503589e+48 7.56516878e+47 2.42821716e+48\n 2.72756455e+48 -5.98672167e+48 1.16207516e+48 4.43804294e+48\n 3.45034751e+48 1.18446585e+48 -1.31252187e+48 1.13146521e+48\n 1.10251537e+48 2.98064016e+48 1.58963966e+48 -3.70754321e+48\n 1.56842336e+48 -5.12494141e+47 7.60099778e+47 7.93561989e+49\n -9.71139428e+49 3.82726580e+49 -1.24326868e+49 -8.97206819e+48\n 3.82726580e+49]\n [-1.18078403e+01 2.83548325e+48 4.07245913e+48 1.84462379e+48\n -3.10296669e+48 1.91695500e+49 -1.14971047e+48 8.44053183e+48\n 7.97029090e+49 1.87932088e+49 -1.17193657e+49 1.37300542e+48\n 2.00146412e+48 9.26732451e+48 7.56324949e+47 2.13455440e+48\n 2.25187931e+48 1.07805529e+48 1.04367860e+48 1.72254576e+48\n 1.53744987e+48 1.12287821e+48 1.09538761e+48 7.84629772e+47\n 1.89525398e+48 1.88509653e+48 -1.15650879e+48 -3.20736292e+48\n -4.54497933e+48 7.76294807e+47 8.04600250e+47 4.18177426e+48\n -8.94556508e+47 1.13329857e+48 3.12297393e+48 -4.97248757e+47\n 7.71503307e+47 1.10843376e+48 7.69015460e+47 7.86026233e+47\n 1.33018134e+48 3.75782300e+48 1.34236856e+48 6.39688857e+47\n -9.82987095e+47 7.83227008e+47 4.47213664e+48 5.87523517e+48\n 1.31321687e+48 1.88898152e+48 1.32770385e+48 1.10113462e+48\n 1.29614683e+48 8.08570813e+47 1.26696043e+48 6.99844668e+47\n 6.78470565e+48 8.45906504e+47 0.00000000e+00 -1.67635789e+48\n 1.34790517e+48 2.26644288e+48 2.52555370e+48 1.20885976e+48\n -2.31160320e+48 6.93188855e+47 7.05905800e+47 -5.93261402e+48\n 1.50578506e+48 2.35830147e+48 -2.32137056e+48 3.36204417e+47\n 3.42647292e+48 2.63798377e+48 2.32656745e+48 1.36505099e+48\n 8.10484776e+47 4.72055818e+48 4.50187042e+48 3.77832282e+48\n 3.87792003e+48 3.58675128e+48 1.88336353e+48 -8.53781176e+47\n 0.00000000e+00 7.01136963e+47 1.05048642e+48 -1.93641680e+47\n 1.88062340e+48 2.86451147e+48 -3.33322990e+48 1.48566244e+48\n -1.50944456e+48 -5.03870015e+48 9.98959050e+46 -2.64223978e+48\n 4.95706620e+48 1.76157506e+48 0.00000000e+00 0.00000000e+00\n 3.50498689e+48 7.35762248e+47 2.05392102e+48 2.57737268e+48\n 7.43124407e+47 3.18973533e+48 8.04667171e+47 -1.43417903e+48\n 2.29243979e+48 1.09685566e+48 1.20383074e+48 1.20881891e+48\n 2.97553976e+48 0.00000000e+00 1.30040629e+48 -1.04430902e+49\n 3.70759443e+48 2.89356319e+48 7.86008137e+47 2.09799059e+48\n 2.55428958e+48 7.88969193e+47 1.03078146e+48 3.44286981e+48\n 1.76650507e+48 1.00620244e+48 1.70809072e+48 1.05597939e+48\n 1.04527702e+48 2.94182846e+48 -2.41062021e+47 1.03830563e+48\n 1.45491070e+48 -7.82679448e+47 7.47023344e+47 -6.70125555e+49\n 7.48621604e+49 -1.97368491e+49 -1.81788083e+48 1.28804168e+49\n -1.97368491e+49]\n [-5.64208062e+00 6.14426709e+48 -6.34813967e+47 -3.72696578e+48\n 6.79737268e+48 -3.01297314e+49 -8.38244650e+48 -6.48369038e+49\n -1.51898510e+50 -2.81802207e+49 5.11764763e+48 -4.26083792e+48\n -1.99493277e+48 5.06898151e+48 2.07476174e+48 -3.62292156e+48\n -2.59398091e+48 -4.88009940e+48 3.10031962e+48 1.60717461e+48\n -2.62305356e+48 -1.80693050e+48 2.91775609e+48 2.08998066e+48\n 2.29740819e+48 2.38473643e+48 -8.13776804e+48 -3.77394657e+48\n 1.00881248e+49 2.09405095e+48 1.99981118e+48 -8.77856509e+47\n -3.17344927e+48 -6.64831984e+48 -4.77809196e+48 -5.76104092e+48\n 2.06608285e+48 -6.60860038e+48 -4.66433565e+48 -4.66703284e+48\n -4.19601765e+48 -5.60232551e+48 -3.59051587e+47 -1.63441956e+48\n -1.47909844e+48 -4.67435787e+48 -1.52856085e+48 -2.13948657e+48\n 3.73038995e+48 5.11973362e+48 3.62666079e+48 -1.85968915e+48\n -4.17294990e+48 2.06333695e+48 8.57848721e+48 2.21386375e+48\n -8.30278115e+48 4.42348778e+48 0.00000000e+00 1.06450565e+48\n -6.22306728e+48 3.61787217e+48 -5.33184228e+48 -7.19109960e+48\n 5.22717939e+48 2.21642591e+48 2.23932726e+48 6.42400099e+47\n -2.56013456e+48 7.29708772e+48 -8.16650390e+48 -1.01227103e+48\n -4.25569473e+48 1.27413403e+48 5.65136852e+48 -7.59057374e+48\n -4.66759563e+48 -7.62803840e+48 9.43632608e+48 9.33558537e+48\n 4.46463549e+48 -1.87755965e+48 1.22611333e+49 3.04959886e+48\n 0.00000000e+00 2.23996253e+48 3.05727644e+48 1.91611296e+48\n -4.02346110e+47 -1.15621806e+48 -1.55723425e+47 4.35752248e+48\n -9.83566634e+48 -2.64673515e+48 -6.80192264e+48 3.70674560e+48\n 1.15208652e+48 -1.60217016e+47 0.00000000e+00 0.00000000e+00\n 2.92894620e+48 2.12765770e+48 -2.19095322e+48 -4.13036853e+47\n 1.99109168e+48 -1.02280066e+49 -4.67430544e+48 -1.63773455e+48\n 1.89159006e+48 -6.60425772e+48 -3.08760850e+48 3.83827048e+48\n -1.69696110e+48 0.00000000e+00 3.67877122e+48 3.84486475e+48\n -6.25240520e+48 -2.22595424e+48 -4.67342687e+48 6.43107589e+48\n 1.57260901e+48 2.03257370e+48 3.11821205e+48 -8.70520057e+48\n -8.29631128e+48 3.14828231e+48 1.75186068e+48 -1.75917834e+48\n 3.04703348e+48 -2.33342866e+48 -2.65941384e+48 3.02157054e+48\n 8.74552776e+47 -1.27446071e+49 2.12055630e+48 1.76918531e+50\n -2.26903578e+50 1.03630349e+50 -3.53265848e+49 -2.26258643e+49\n 1.03630349e+50]]\nAccuracy of: 0.08582232230105791\nWall time: 16.2 s\n" ], [ "%%time\nfrom sklearn.metrics import accuracy_score\nx_train_ar=X_train.values\ny_target_ar=np.asarray(y_train)\nx_train_ar = StandardScaler().fit(x_train_ar).transform(x_train_ar)\nlr = LogisticRegression(.1,50,'steepest',.0001,'L2')\nlr.fit(x_train_ar,y_target_ar)\nprint(lr)\n\nyhat = lr.predict(x_train_ar)\nsteep1=accuracy_score(y_target_ar,yhat)\nprint('Accuracy of: ',accuracy_score(y_target_ar,yhat))", "MultiClass Logistic Regression Object with coefficients:\n[[-1.92754645e+00 5.46786441e-01 2.78665955e-02 -1.89398224e-02\n 3.36734016e-02 -2.44923485e-02 -2.59534491e-02 -6.64020202e-02\n -5.10700104e-02 -2.44315308e-02 -9.23485969e-03 -1.19647008e-02\n -5.64802429e-03 6.87828114e-02 -6.78583848e-03 -3.59302549e-03\n -3.63334631e-03 3.17707798e-02 -1.67754087e-02 2.45829860e-02\n 4.76617352e-03 -1.42083286e-02 -1.82229113e-02 2.68148777e-02\n -3.38110088e-02 1.99908919e-03 9.03587485e-04 -6.73692628e-03\n -1.58776725e-03 -9.88386699e-03 -4.74106429e-03 3.58854210e-03\n -4.06861171e-04 -1.28561503e-02 -4.96454249e-03 -4.56870588e-02\n -1.12144728e-02 -1.85216311e-02 -8.92524940e-03 -1.20747662e-02\n 9.79798568e-03 -4.72947799e-02 -2.92756860e-02 1.12106611e-03\n 3.09505731e-02 -1.44296409e-02 2.78633825e-03 -2.29704328e-02\n 2.49510539e-02 3.98389121e-02 1.25591993e-02 2.01706343e-02\n -1.42231108e-02 -9.15926490e-03 -3.64807149e-03 -1.06053128e-02\n -3.20751827e-02 -3.44301641e-02 0.00000000e+00 -5.21325118e-03\n 1.38818779e-02 -8.22474061e-03 7.31033707e-03 -1.65889566e-02\n 1.93705910e-02 -1.09335723e-02 -1.00434676e-02 -2.11482817e-02\n -2.50534932e-02 4.16820461e-02 -2.79650661e-03 -3.15914574e-03\n -1.18818210e-02 1.32864368e-02 4.09196636e-02 -3.44086938e-02\n -1.53203863e-02 -3.04972810e-02 7.52303775e-02 6.97176620e-02\n -1.04887712e-03 3.39400262e-03 3.80536058e-02 4.48282560e-03\n 0.00000000e+00 -9.36921834e-03 -1.67110184e-02 8.98693517e-03\n 1.97424605e-02 3.30044213e-02 -2.68856619e-02 4.79159885e-03\n 4.66380896e-03 5.34974683e-03 9.57880591e-03 6.14385919e-03\n 3.21557242e-02 1.21563648e-02 0.00000000e+00 0.00000000e+00\n 7.84208946e-03 -7.60312323e-03 1.06446103e-02 -3.19554059e-02\n 2.62875656e-03 -2.19603985e-02 -1.62593847e-02 1.45403833e-02\n 1.97602817e-02 -1.99113404e-02 1.21356067e-02 -1.86076048e-02\n 2.83292871e-02 0.00000000e+00 1.26857693e-02 -1.85944088e-02\n -1.72900175e-03 -3.79231502e-02 -7.69271145e-03 2.25936285e-02\n 3.25579865e-02 -7.40399838e-03 2.17451184e-03 1.03666164e-02\n -1.56421340e-02 4.35275189e-02 1.36488390e-02 5.73467315e-03\n 1.55525158e-02 2.66368679e-02 -1.72526898e-02 -1.27887677e-02\n -1.97016959e-02 1.81893684e-02 2.71523009e-02 1.70917862e-01\n -1.83526452e-01 3.72883193e-02 -7.40339756e-02 5.33626176e-02\n 3.72883193e-02]\n [-9.15702391e-01 -2.31038397e-02 8.14673602e-02 1.12405346e-02\n -1.70592581e-02 -3.26041523e-04 -1.25026084e-02 -2.48316173e-01\n -7.27826139e-02 -3.32802214e-02 -3.40594900e-03 1.28921779e-02\n 3.38444159e-03 -1.12105797e-02 -1.04500676e-02 1.92414487e-02\n -2.96634345e-02 -2.94601743e-02 3.62579835e-02 2.38342147e-03\n 7.81323911e-03 -2.31995754e-02 1.36247150e-02 -1.36073769e-02\n 1.60571435e-02 1.00865960e-02 -4.09785008e-02 -1.02151674e-02\n 2.62840270e-02 3.17620349e-02 -1.16500677e-02 2.27881860e-02\n -6.39274773e-03 -1.37075795e-02 1.43515939e-03 -5.51685765e-03\n 3.27667546e-02 -1.47071498e-02 -9.53083021e-03 -1.15862316e-02\n -2.00378438e-02 1.63334073e-02 -1.71764909e-02 4.07362557e-03\n -2.38005234e-02 -1.07737334e-02 -2.91549623e-03 4.31781735e-02\n -2.99363438e-02 -2.09713161e-03 2.06837717e-02 -1.73863731e-02\n 9.39086289e-03 -1.40285915e-02 4.67831272e-02 2.07004813e-02\n 2.95935417e-03 2.93581099e-02 0.00000000e+00 1.14180295e-02\n -1.80660666e-02 2.55401022e-02 -4.31017635e-02 -1.37927693e-02\n 1.11137990e-02 2.20376495e-02 2.07961877e-02 -1.51723632e-02\n 2.10585794e-02 -6.37316654e-03 -1.29212209e-02 1.11948923e-02\n 5.68209762e-03 -1.85556895e-02 -3.78543438e-04 9.81126845e-03\n -1.25991436e-02 1.08516424e-02 -2.13319487e-02 -1.32813699e-02\n 1.20586995e-02 -3.49336105e-02 2.52838533e-02 -5.67661927e-03\n 0.00000000e+00 2.03668137e-02 1.52662705e-02 -5.07886798e-03\n -1.22858282e-02 -1.80721584e-05 -3.36001210e-03 -9.19138916e-03\n -3.69371017e-02 -3.12172772e-03 -2.08860270e-02 -4.91663909e-03\n 4.24379434e-03 -9.51661028e-03 0.00000000e+00 0.00000000e+00\n 1.78721672e-02 -1.35372103e-02 1.52230100e-02 1.74152762e-02\n -7.75469654e-03 -2.99830201e-02 -9.82269482e-03 -2.35956340e-02\n -2.92419494e-03 -1.69631410e-02 -3.36963715e-02 3.80683410e-02\n -2.82199474e-03 0.00000000e+00 1.76314818e-02 -3.61910402e-02\n -8.77389560e-03 2.21021675e-03 -1.24685532e-02 1.17843753e-02\n -5.70353696e-03 -1.10150160e-02 -1.15570543e-03 -1.15554710e-02\n -3.44404221e-03 -3.47620502e-02 -1.63467765e-02 -2.60914438e-02\n 5.66178003e-03 -6.88166313e-03 -2.88876325e-03 7.74608906e-03\n 3.08700224e-02 -6.84841363e-02 -1.02480781e-02 4.10013187e-01\n -4.92739592e-01 1.81856811e-01 -6.42850922e-02 -3.74066056e-02\n 1.81856811e-01]\n [-3.37367861e+00 4.04097992e-03 -4.06565058e-03 -1.33668723e-03\n -5.98296040e-04 -8.64646896e-04 -1.51945658e-03 1.45190830e-03\n -6.49427864e-03 -3.00109297e-04 6.06026540e-04 8.59100171e-05\n -1.19411023e-04 -7.03713935e-04 -1.19647093e-05 -1.89308170e-05\n -1.15200145e-05 -4.97592343e-06 -2.77277902e-05 -1.37007857e-05\n -1.77241706e-04 2.03912339e-05 9.50220421e-06 -1.52369172e-04\n -8.70600186e-05 -8.83442588e-05 -1.02813884e-03 -1.34005899e-03\n 1.85132906e-03 3.47340520e-05 1.86522523e-05 -1.37269440e-04\n 1.40439892e-05 4.76859469e-05 -1.57127148e-04 -1.36007184e-04\n 1.76227791e-05 1.03152006e-05 -1.76865450e-05 -5.17621039e-05\n 1.20535337e-05 -5.17361531e-05 -4.80305397e-05 -7.61428880e-05\n 3.05746430e-05 7.93640606e-06 -3.27461061e-04 -8.13138875e-06\n 5.15886024e-05 3.13215683e-06 1.26607151e-04 7.54591446e-05\n -7.19942876e-05 2.27568105e-05 1.04912673e-03 2.68286541e-06\n -1.14611165e-04 -8.65838232e-04 0.00000000e+00 -1.43592366e-03\n -2.32356392e-04 -7.55936469e-05 -3.64670610e-05 -1.40435112e-03\n -4.01923730e-04 -5.95128284e-05 -1.61544600e-04 -2.57522737e-04\n -4.53889912e-05 -3.19674452e-04 -1.36640371e-03 -1.26433437e-04\n -3.43061346e-05 -1.31631560e-04 -3.36635033e-04 3.43662159e-03\n 8.02509680e-06 -1.78924618e-04 -9.53980107e-04 -1.08841432e-03\n -1.39433111e-04 -2.86845605e-04 -6.80102187e-04 4.08488472e-03\n 0.00000000e+00 -1.88903364e-04 -1.97498691e-04 -3.72949518e-04\n -7.21119572e-05 -1.68629649e-05 -3.76321900e-04 -1.55544352e-04\n -1.00785686e-03 1.63531123e-03 -1.49544356e-04 -2.89096304e-05\n -3.71365600e-03 -2.94636431e-04 0.00000000e+00 0.00000000e+00\n 8.84552141e-03 -3.99722279e-05 8.89963075e-06 2.75841303e-05\n 1.01976334e-04 -1.27296436e-04 4.02718000e-05 1.29339278e-04\n -4.32168608e-05 5.83354702e-05 6.74216778e-05 -1.30088776e-04\n -3.94281152e-06 0.00000000e+00 -1.49359183e-05 -6.27915317e-05\n 2.13805812e-05 7.93612571e-06 7.62797064e-05 4.66831590e-05\n -2.54452941e-05 6.08288708e-05 1.85699416e-07 5.60605242e-05\n 1.13582694e-04 -8.09339145e-05 7.00378428e-05 -8.29046762e-05\n -2.65414095e-05 9.68652227e-05 -1.32570741e-04 -8.10273799e-05\n -6.75877786e-05 -1.18012757e-03 -2.01228382e-05 7.85618026e-04\n -4.08632222e-03 5.88121668e-03 -9.15270954e-04 -2.37685735e-03\n 5.88121668e-03]\n [-2.31129429e+00 -3.78253358e-01 -1.16796286e-01 -1.43932696e-02\n 1.66951179e-02 -7.52138565e-03 -5.29097334e-02 7.46631532e-02\n -2.45571808e-01 7.46705653e-02 -3.96764167e-03 -1.89894230e-02\n -1.66306910e-02 2.05000889e-03 3.14055999e-02 -3.01591111e-02\n 3.11238123e-02 -3.48273910e-02 -6.41015076e-03 5.30230747e-04\n -1.84853310e-02 7.40201440e-03 1.90929474e-02 -4.06995951e-03\n 2.57711543e-02 -3.39002527e-03 1.19433680e-02 1.13119560e-02\n -1.66014287e-02 -1.06668861e-02 3.23302467e-02 -1.23320662e-02\n -2.18957322e-02 -1.16687390e-02 2.32995204e-02 1.74878338e-03\n -9.71363492e-03 -1.12432811e-02 -1.30793821e-02 -9.02071899e-03\n -1.77655191e-02 1.90847573e-02 3.60415220e-02 -1.08429230e-02\n -1.22902854e-02 -6.98782375e-03 1.01607000e-02 -4.79498484e-03\n 1.00128520e-02 -3.05614799e-03 -1.39840310e-02 -1.62889006e-02\n -2.15897067e-02 3.24962924e-02 -1.04238295e-02 -2.53217691e-03\n 3.34960965e-02 1.02457417e-02 0.00000000e+00 -2.61950505e-03\n -2.34171085e-02 1.06539715e-02 1.43797592e-02 5.45628966e-03\n -3.93339815e-02 -2.95783520e-03 -2.40017658e-03 3.06993327e-02\n -1.59032721e-02 -1.38248183e-02 -1.52447848e-02 -1.02004499e-02\n -1.40648401e-03 2.30958441e-02 -1.17529279e-02 -1.44764476e-02\n -6.75405070e-03 -1.31317611e-02 -5.34243562e-02 -5.88525054e-02\n 2.43325280e-02 4.79852211e-02 -2.10086530e-02 1.78570559e-02\n 0.00000000e+00 -2.09679709e-03 2.46217591e-02 -1.18500572e-02\n 8.77439307e-03 -3.64758858e-02 1.04096134e-02 1.59506377e-02\n 1.33152791e-02 -2.74060540e-02 -2.53476757e-02 -4.98656952e-03\n -2.75308775e-03 -9.99068772e-03 0.00000000e+00 0.00000000e+00\n -4.55710012e-02 3.22077920e-02 -2.59927133e-02 1.08695670e-02\n 1.42336460e-02 1.21367445e-02 -7.10923934e-03 3.82413036e-03\n -2.59294680e-03 -1.08173188e-02 4.72477518e-03 -3.40525035e-03\n -3.13682994e-02 0.00000000e+00 -8.56243356e-03 -7.18491987e-03\n 3.49877680e-03 1.99015953e-02 -9.78414473e-03 -1.21911049e-02\n -2.46986458e-02 3.28152741e-02 -1.20467979e-03 -3.85272570e-02\n -3.77673696e-02 -3.27031315e-04 1.00614767e-02 -3.36635161e-03\n -1.08294042e-02 -3.32650754e-02 -9.47119427e-03 1.75135602e-02\n -1.42577261e-02 -2.24045808e-02 -7.92348582e-03 -4.93481997e-01\n 5.64089226e-01 -1.67885313e-01 6.60185260e-02 2.78408055e-02\n -1.67885313e-01]\n [-2.27946308e+00 1.01096807e-01 3.00776972e-02 -2.41737712e-02\n 2.08795074e-02 -8.99885650e-02 2.62644080e-03 -1.70826933e-02\n -5.28628964e-01 -7.45204957e-02 4.65290525e-02 -3.36585368e-03\n -1.63733674e-02 -8.37012382e-02 -6.10008629e-03 -2.19052911e-02\n -2.20791632e-02 4.03935445e-03 -1.05631191e-02 -1.17805286e-02\n -1.19945662e-02 -2.70484421e-03 -7.65522413e-03 -4.78191033e-03\n -1.37527798e-02 -1.86827228e-02 2.41893514e-02 2.90688199e-02\n -3.54037132e-03 -3.58731893e-03 -2.36439053e-04 -3.73540266e-02\n 1.47483331e-02 -2.19798274e-03 -2.07638908e-03 1.60923894e-02\n -5.98718537e-03 -6.16905876e-03 -5.54048540e-03 -2.83654676e-03\n -9.18659946e-03 -2.10438493e-02 -9.68302412e-03 -4.97192058e-03\n 2.62405694e-02 -4.29662188e-03 1.02870506e-02 -4.57536589e-02\n -1.00289396e-02 -1.19896332e-02 -9.57465539e-03 -6.05721664e-03\n -1.54994004e-02 9.22103405e-04 -2.19273866e-02 -1.15557149e-02\n -5.84846511e-02 -1.29852889e-02 0.00000000e+00 3.31277713e-03\n -1.30319297e-02 -2.77778974e-04 -2.26438158e-02 -1.08792904e-02\n 1.64377445e-02 -1.08274982e-02 -9.60184561e-03 3.67966830e-02\n -1.48172335e-02 -3.47629660e-02 2.32043875e-02 8.94133340e-03\n -2.54951591e-02 -1.33563151e-02 -2.49682249e-02 -2.74737173e-03\n 8.39760582e-04 -2.81765360e-02 -6.42540011e-02 -6.06236026e-02\n -3.75441894e-02 -5.44878996e-03 -3.52738150e-02 1.52310196e-02\n 0.00000000e+00 -9.77034002e-03 -1.13770488e-02 2.12721569e-02\n -1.37882474e-02 -2.20679602e-02 3.00736356e-02 -1.66835819e-02\n 2.61280492e-02 3.67986554e-02 7.21290637e-03 2.09873584e-02\n -1.67488479e-02 -2.55639501e-02 0.00000000e+00 0.00000000e+00\n -9.51175696e-03 -9.01885436e-03 -9.64208827e-03 -2.46821630e-02\n 3.46077958e-03 -2.86141113e-02 -9.79171030e-04 2.35769789e-02\n -2.14640604e-02 -6.11309443e-03 7.89064730e-04 -1.95663686e-02\n -3.93659800e-02 0.00000000e+00 -1.09295871e-02 5.46414537e-02\n -3.29015520e-02 -2.81336582e-02 -1.98525777e-03 -1.74444779e-02\n -2.91470295e-02 -2.37770129e-03 -1.44308790e-02 -1.65471873e-02\n -1.01420386e-03 -1.32910476e-02 -1.36638816e-02 -1.03806328e-02\n -1.17331431e-02 -2.29914005e-02 6.41159520e-03 -1.35129487e-02\n -1.68761480e-02 1.86505800e-02 -9.15893149e-03 2.90655071e-01\n -3.13994428e-01 6.67523853e-02 8.26213892e-04 -3.82253153e-02\n 6.67523853e-02]\n [-1.25458128e+00 -2.64820818e-01 -5.36858191e-03 4.07201036e-02\n -2.87961571e-02 7.71204491e-02 8.75926690e-02 2.50964839e-01\n 8.35454035e-01 6.34493504e-03 -9.32268306e-04 -1.51334723e-03\n 1.39975812e-02 -1.81610178e-02 -1.89396248e-02 1.58308734e-02\n 1.94787075e-02 5.03773554e-03 -1.43458146e-02 -1.35045209e-02\n 8.36167655e-03 1.26858270e-02 -2.26213748e-02 -1.38973613e-02\n -1.34073072e-02 -2.46571323e-02 2.21681630e-02 -1.12216561e-02\n -4.28163078e-03 -1.77621813e-02 -2.50711647e-02 6.49837636e-03\n 1.04882830e-02 2.66682623e-02 1.09928049e-04 2.95579138e-02\n -1.65438868e-02 3.47822165e-02 2.56014800e-02 2.64985415e-02\n 1.48509380e-02 2.02944731e-02 -3.26168722e-03 8.79899837e-03\n -5.81296521e-03 2.56247470e-02 -2.84418982e-03 3.51109773e-03\n -2.02445382e-02 -4.23997307e-02 -3.45810332e-02 9.29697321e-04\n 2.25860308e-02 -1.57816383e-02 -1.93025070e-02 -5.31356143e-03\n 1.46091415e-02 -9.61648928e-03 0.00000000e+00 -1.88015277e-03\n 2.65455975e-02 -2.84553905e-02 1.26117108e-02 3.47051401e-02\n -2.83002421e-02 -6.66972790e-03 -4.01332073e-03 -7.96796552e-03\n 9.63380101e-03 -3.98146154e-02 2.33281509e-02 5.78870958e-04\n 8.07023241e-03 -3.39238959e-03 -2.98221405e-02 3.35775666e-02\n 2.77113098e-02 3.63794967e-02 1.92448423e-03 3.21573500e-03\n -1.93312246e-02 -3.49350575e-04 -3.31046445e-02 -7.09724172e-03\n 0.00000000e+00 -4.18212100e-03 -1.66593170e-02 1.67761598e-03\n -4.28764453e-04 -4.30376983e-03 -6.23996603e-03 -2.16482676e-02\n 1.34402355e-02 -6.12942045e-03 2.71037137e-02 -2.74150948e-02\n -1.54752382e-02 1.56462273e-02 0.00000000e+00 0.00000000e+00\n 1.12439028e-02 -1.29270144e-02 -5.38569181e-03 -3.87753965e-03\n -1.04482011e-02 3.55466899e-02 2.44187367e-02 -3.94426645e-03\n -3.30302048e-04 3.43016568e-02 2.04974263e-02 -1.23654388e-02\n 8.71141449e-03 0.00000000e+00 -3.17323467e-02 -8.79965261e-03\n 2.32592155e-02 1.58050229e-02 2.15897423e-02 -1.80987333e-02\n -1.89579825e-03 -2.36800464e-02 -9.88378565e-03 3.39423255e-02\n 3.97579005e-02 -7.89327370e-03 1.58813809e-03 1.28217977e-02\n -1.50566161e-02 8.31339078e-03 1.59402466e-02 -1.99124783e-02\n -2.18829445e-03 6.64836805e-02 -1.13695634e-02 -5.09106458e-01\n 6.77466328e-01 -3.41387034e-01 1.24118446e-01 6.67700830e-02\n -3.41387034e-01]]\nAccuracy of: 0.5500701023919786\nWall time: 12.6 s\n" ], [ "%%time\nfrom sklearn.metrics import accuracy_score\nx_train_ar=X_train.values\ny_target_ar=np.asarray(y_train)\nx_train_ar = StandardScaler().fit(x_train_ar).transform(x_train_ar)\nlr = LogisticRegression(.1,10,'BFGS',.0001,'L2')\nlr.fit(x_train_ar,y_target_ar)\nprint(lr)\n\nyhat = lr.predict(x_train_ar)\nBFGS1=accuracy_score(y_target_ar,yhat)\nprint('Accuracy of: ',accuracy_score(y_target_ar,yhat))", "MultiClass Logistic Regression Object with coefficients:\n[[-1.99190101e+00 5.85494769e-01 3.50322946e-02 -2.05397637e-02\n 3.76760879e-02 -2.73423889e-02 -2.88634196e-02 -7.42415604e-02\n -5.75760553e-02 -2.74088446e-02 -8.87965977e-03 -1.40108220e-02\n -6.57291310e-03 7.12941427e-02 -8.26486445e-03 -3.73087681e-03\n -3.63353433e-03 3.38867041e-02 -1.81773680e-02 2.63321897e-02\n 5.13157349e-03 -1.65869275e-02 -1.96364698e-02 2.93512252e-02\n -3.63247285e-02 2.16902502e-05 -1.19287281e-03 -8.13085107e-03\n 6.98468787e-04 -1.12981426e-02 -6.45233435e-03 3.12027521e-03\n -5.22728192e-04 -1.42318796e-02 -5.92387887e-03 -5.17721510e-02\n -1.22116367e-02 -1.98531438e-02 -9.98178177e-03 -1.31789248e-02\n 1.04385171e-02 -5.20338302e-02 -3.13028919e-02 1.51767240e-03\n 3.39051915e-02 -1.52933895e-02 4.64926113e-03 -2.47638891e-02\n 2.55484111e-02 4.08685654e-02 1.35204763e-02 2.13636624e-02\n -1.58864504e-02 -1.11075438e-02 -2.98178661e-03 -1.14778826e-02\n -3.71028922e-02 -3.88710385e-02 0.00000000e+00 -4.79387712e-03\n 1.52530082e-02 -9.19149797e-03 7.75227914e-03 -1.77236871e-02\n 2.07278387e-02 -1.17619041e-02 -1.09526987e-02 -2.20919853e-02\n -2.70068676e-02 4.19068789e-02 -2.32769254e-03 -3.09632809e-03\n -1.33351317e-02 1.38569597e-02 4.31936042e-02 -3.81014639e-02\n -1.71945564e-02 -3.40831309e-02 7.74230278e-02 7.25743987e-02\n -1.46531539e-03 3.71388922e-03 4.03200313e-02 6.22440408e-03\n 0.00000000e+00 -1.02239547e-02 -1.82569424e-02 9.92991244e-03\n 2.10134369e-02 3.54025513e-02 -2.97476231e-02 3.43620939e-03\n 5.60288226e-03 6.10251790e-03 1.06668414e-02 5.90203045e-03\n 3.31507784e-02 1.20247431e-02 0.00000000e+00 0.00000000e+00\n 8.59616568e-03 -8.65187352e-03 1.15833090e-02 -3.51534859e-02\n 3.08071602e-03 -2.35001178e-02 -1.74121932e-02 1.66841801e-02\n 2.12843669e-02 -2.13261726e-02 1.37603988e-02 -2.01526894e-02\n 2.96265730e-02 0.00000000e+00 1.33995320e-02 -2.01550931e-02\n -1.97714040e-03 -4.14525438e-02 -8.74640212e-03 2.37947033e-02\n 3.40151062e-02 -8.77155690e-03 6.43237714e-04 1.12065645e-02\n -1.71443673e-02 4.64706108e-02 1.36751632e-02 4.50748830e-03\n 1.57190799e-02 2.72375986e-02 -1.94539123e-02 -1.41093081e-02\n -2.11942671e-02 1.93730208e-02 2.95755836e-02 1.91913591e-01\n -2.05940985e-01 4.16398671e-02 -8.17091266e-02 5.86225716e-02\n 4.16398671e-02]\n [-9.17947201e-01 -1.77015551e-02 8.43589668e-02 1.26375438e-02\n -1.74484217e-02 -5.92758202e-03 -1.35539691e-02 -2.53842188e-01\n -6.91974094e-02 -3.37434946e-02 -1.49871052e-03 1.35179059e-02\n 2.99062618e-03 -1.13648660e-02 -1.07834032e-02 1.99905196e-02\n -3.18866651e-02 -3.11262346e-02 3.65681395e-02 2.92162966e-03\n 8.24612397e-03 -2.25493925e-02 1.27747307e-02 -1.37778951e-02\n 1.62429888e-02 9.68992865e-03 -4.27330246e-02 -9.57595720e-03\n 2.79518117e-02 3.26222997e-02 -1.19020934e-02 2.24394188e-02\n -6.80255488e-03 -1.40813235e-02 2.57515053e-03 -5.15529409e-03\n 3.37405881e-02 -1.49723390e-02 -9.88183376e-03 -1.17646309e-02\n -2.04327253e-02 1.77391806e-02 -1.73003169e-02 3.86360168e-03\n -2.52222937e-02 -1.08706244e-02 -5.41581340e-03 4.38602667e-02\n -3.09244320e-02 -2.06836950e-03 2.04490004e-02 -1.77879918e-02\n 9.67620228e-03 -1.42159390e-02 4.70233421e-02 2.11950455e-02\n 3.18104458e-03 3.03364145e-02 0.00000000e+00 1.16942125e-02\n -1.90261053e-02 2.58930973e-02 -4.42284788e-02 -1.41731620e-02\n 1.06722964e-02 2.23623510e-02 2.09216056e-02 -1.45816196e-02\n 2.11287177e-02 -5.50046381e-03 -1.36813077e-02 1.19315548e-02\n 4.57251859e-03 -1.88342506e-02 -2.71734118e-04 1.01292763e-02\n -1.25840635e-02 1.26336760e-02 -2.12967409e-02 -1.33567530e-02\n 1.23803026e-02 -3.40812846e-02 2.57321396e-02 -6.36079328e-03\n 0.00000000e+00 2.05043965e-02 1.47391459e-02 -4.10085691e-03\n -1.32927775e-02 4.24221386e-04 -3.32349412e-03 -9.85309697e-03\n -3.77879213e-02 -3.32729261e-03 -2.11384483e-02 -5.55753991e-03\n 3.23508726e-03 -9.34947930e-03 0.00000000e+00 0.00000000e+00\n 1.88469818e-02 -1.39124311e-02 1.45515053e-02 1.75200055e-02\n -8.46605205e-03 -3.22572286e-02 -9.99241461e-03 -2.44594691e-02\n -2.85928449e-03 -1.72304954e-02 -3.44472144e-02 3.84376109e-02\n -2.77498370e-03 0.00000000e+00 1.74301752e-02 -3.79082135e-02\n -9.22751926e-03 2.10185635e-03 -1.27653205e-02 1.20310896e-02\n -6.38437466e-03 -1.11881079e-02 -1.18838056e-03 -1.12353523e-02\n -3.26791816e-03 -3.63735602e-02 -1.73232452e-02 -2.70503570e-02\n 5.41460971e-03 -6.91795456e-03 -2.27751142e-03 7.90392927e-03\n 3.11885747e-02 -7.09311471e-02 -1.04130577e-02 4.08338080e-01\n -4.91919507e-01 1.83214467e-01 -6.40820374e-02 -3.83708594e-02\n 1.83214467e-01]\n [-6.76026315e+00 7.06416490e-03 -8.49087684e-03 -2.73591457e-03\n -1.17030359e-03 -3.11145834e-03 -3.24421037e-03 2.60883437e-03\n -1.40105020e-02 -2.24887028e-03 2.16143364e-03 3.34231480e-05\n -2.07516753e-04 -1.20771953e-03 -5.08413677e-05 -1.55125339e-04\n -1.21423138e-04 -1.41411126e-04 -9.02399569e-05 -9.64377637e-05\n -2.41530643e-04 -3.97139971e-05 -3.05770591e-05 -1.89544367e-04\n -1.57594867e-04 -1.58869362e-04 -2.46073734e-03 -3.41720470e-03\n 5.02539146e-03 -5.80680165e-06 -6.81225589e-06 -3.28628134e-04\n -7.14361806e-05 7.60229255e-06 -4.98382942e-04 -4.66640794e-04\n -1.16364910e-05 -3.55779607e-05 -5.11032351e-05 -8.67530544e-05\n -5.64786452e-05 -2.06313798e-04 -1.00885869e-04 -2.50968443e-04\n -2.36519948e-04 -1.76417705e-05 -1.47653095e-03 -4.68173404e-04\n -3.58629589e-05 -8.58096533e-05 3.14855756e-05 1.87706083e-05\n -1.47014492e-04 -1.53830778e-05 2.12512073e-03 -4.02025869e-05\n -5.96948047e-04 -1.65012878e-03 0.00000000e+00 -3.38544634e-03\n -5.24839866e-04 -3.33750379e-04 -1.61441622e-04 -3.27987903e-03\n -6.47288442e-04 -1.16618233e-04 -2.29744917e-04 -9.40914984e-04\n -1.27396121e-04 -5.26838267e-04 -3.00206750e-03 -3.77963589e-04\n -2.10973495e-04 -4.09930380e-04 -7.02214182e-04 7.00800965e-03\n -3.20841732e-05 -6.73028302e-04 -1.60416117e-03 -1.83305431e-03\n -4.26712406e-04 -1.06849955e-03 -1.24228546e-03 8.41779456e-03\n 0.00000000e+00 -2.80060861e-04 -2.79734916e-04 -1.03940673e-03\n -1.56687176e-04 -1.49572152e-04 -8.20650764e-04 -2.47091644e-04\n -2.40367298e-03 3.45397667e-03 -4.42564111e-04 -9.15385491e-05\n -5.20516929e-03 -4.39927502e-04 0.00000000e+00 0.00000000e+00\n 1.72799954e-02 -8.33362963e-05 -1.06179365e-04 -1.91048047e-04\n -7.16639888e-04 -3.03739187e-04 1.35067646e-05 -2.22294943e-04\n -3.87931114e-04 -2.42536157e-05 -9.59427374e-04 -2.38643102e-04\n -2.15399145e-04 0.00000000e+00 -8.90671372e-05 -1.83159045e-04\n -2.21716916e-04 -2.43217336e-04 1.56449677e-05 -4.47186191e-04\n -2.35070923e-04 9.62028056e-06 -7.66647381e-05 -4.44618599e-04\n -4.92382018e-04 -2.04155395e-04 -2.32271362e-05 -1.46119786e-04\n -9.98745576e-05 -1.36001348e-04 -3.45996282e-04 -1.34504357e-04\n -1.66014178e-04 -2.68487446e-03 -7.88433395e-05 1.92725438e-03\n -1.07967395e-02 1.57874817e-02 -3.03108557e-03 -5.80457633e-03\n 1.57874817e-02]\n [-3.19265469e+00 -5.85137917e-01 -1.89915550e-01 -2.44589107e-02\n 2.64459756e-02 -2.79532190e-04 -8.41570456e-02 1.23256907e-01\n -3.74589698e-01 1.00850858e-01 -1.26505850e-03 -3.10128074e-02\n -3.01027171e-02 1.22345195e-03 4.69919627e-02 -4.84185262e-02\n 4.62502924e-02 -5.82767608e-02 -1.20904200e-02 8.86446327e-04\n -3.15670304e-02 5.65951054e-03 2.38596306e-02 -9.80642909e-03\n 2.76207072e-02 -1.14319938e-02 1.94162162e-02 1.92646309e-02\n -2.43322236e-02 -1.66881998e-02 4.70942792e-02 -2.20487177e-02\n -3.68896567e-02 -2.05572501e-02 3.64574076e-02 2.61370475e-03\n -1.63202274e-02 -2.02508659e-02 -2.09689612e-02 -1.57961646e-02\n -2.90680003e-02 3.07381114e-02 4.46893053e-02 -1.59569572e-02\n -1.63290418e-02 -1.36018113e-02 1.51595838e-02 -6.11471782e-03\n 1.13691759e-02 -5.22765040e-03 -2.33528393e-02 -2.58093290e-02\n -3.40100983e-02 4.69158836e-02 -1.40940702e-02 -6.36125116e-03\n 4.56684934e-02 1.62778210e-02 0.00000000e+00 -2.03149366e-03\n -3.91096199e-02 1.58682749e-02 1.81359923e-02 1.16982508e-02\n -6.74756315e-02 -6.59186726e-03 -5.54204976e-03 5.19726469e-02\n -2.71149902e-02 -2.68548443e-02 -2.19748954e-02 -1.53574671e-02\n -5.61703323e-03 3.51698840e-02 -2.13915779e-02 -2.01259060e-02\n -1.38035520e-02 -2.15953841e-02 -9.76741670e-02 -1.05677428e-01\n 3.74150495e-02 6.97743378e-02 -3.96913175e-02 3.08392495e-02\n 0.00000000e+00 -5.07828147e-03 3.74508254e-02 -1.83588824e-02\n 1.46008308e-02 -5.98455483e-02 1.67835331e-02 2.07383430e-02\n 2.30148962e-02 -4.09757934e-02 -3.95103249e-02 -1.26497212e-02\n -1.01985517e-02 -1.97696715e-02 0.00000000e+00 0.00000000e+00\n -6.94498703e-02 4.78673864e-02 -4.21154333e-02 1.61065166e-02\n 2.71014872e-02 1.81729935e-02 -1.31471621e-02 7.86220138e-03\n -2.56824558e-03 -1.88132048e-02 1.23569485e-02 -9.04411103e-03\n -5.29756773e-02 0.00000000e+00 -1.67625826e-02 -1.30583789e-02\n 6.56166970e-03 2.54900862e-02 -1.57515630e-02 -1.91553102e-02\n -4.20904837e-02 4.81292347e-02 -6.91862932e-03 -6.31886301e-02\n -5.75713673e-02 -5.17279584e-03 1.59566049e-02 -1.05331456e-02\n -1.81844256e-02 -5.50931828e-02 -1.65392044e-02 2.26789080e-02\n -2.47802954e-02 -3.14605731e-02 -1.37640208e-02 -8.93994771e-01\n 1.03741030e+00 -3.31439743e-01 1.20368913e-01 6.49579236e-02\n -3.31439743e-01]\n [-2.52465987e+00 1.34210379e-01 4.07653912e-02 -3.07147306e-02\n 2.67344173e-02 -1.10768000e-01 4.80770927e-03 -2.54168405e-02\n -6.35336770e-01 -9.70049974e-02 5.96648183e-02 -6.37565890e-03\n -2.12649682e-02 -1.05428366e-01 -7.76907511e-03 -2.65549009e-02\n -2.77614709e-02 4.01830287e-03 -1.33511570e-02 -1.49643601e-02\n -1.44235293e-02 -5.54203285e-03 -9.35376894e-03 -5.69021573e-03\n -1.67271602e-02 -2.20097273e-02 3.22292038e-02 3.54460294e-02\n -3.89295839e-03 -5.34198613e-03 -2.40210426e-03 -4.54775702e-02\n 1.97982155e-02 -4.46775991e-03 -1.27779890e-03 2.05508578e-02\n -7.07464948e-03 -7.75721622e-03 -6.85619524e-03 -4.58474174e-03\n -1.16527132e-02 -2.74156485e-02 -1.15922440e-02 -6.21589952e-03\n 3.34540543e-02 -5.46402117e-03 1.11056070e-02 -5.77208144e-02\n -1.33193365e-02 -1.56879861e-02 -1.26374391e-02 -7.96045727e-03\n -1.84517802e-02 -1.85084225e-03 -2.67018098e-02 -1.37957678e-02\n -7.44297352e-02 -1.66467273e-02 0.00000000e+00 4.41781960e-03\n -1.66135171e-02 -1.84275729e-03 -2.77828443e-02 -1.38526872e-02\n 1.90434978e-02 -1.32080137e-02 -1.16975110e-02 4.55067264e-02\n -1.81329713e-02 -4.20741413e-02 2.85841489e-02 1.10966931e-02\n -3.27226896e-02 -1.83260683e-02 -3.16629764e-02 -2.70624763e-03\n -1.76158543e-03 -3.57373832e-02 -7.94019903e-02 -7.66709277e-02\n -4.69263150e-02 -6.87673759e-03 -4.22059172e-02 2.10476477e-02\n 0.00000000e+00 -1.18792455e-02 -1.32728623e-02 2.71053351e-02\n -1.76754828e-02 -2.81360970e-02 3.72199169e-02 -2.04371420e-02\n 3.18944686e-02 4.39793317e-02 8.76562276e-03 2.45517968e-02\n -2.04921357e-02 -3.08778617e-02 0.00000000e+00 0.00000000e+00\n -1.30673105e-02 -1.06213871e-02 -1.46959318e-02 -3.13924187e-02\n 4.49735545e-03 -3.60581252e-02 -2.66353994e-03 3.02895678e-02\n -2.68652454e-02 -8.06149901e-03 5.66759846e-04 -2.37871766e-02\n -4.80064067e-02 0.00000000e+00 -1.40568971e-02 6.33701543e-02\n -4.12274944e-02 -3.47926534e-02 -3.91987069e-03 -2.36134199e-02\n -3.57539918e-02 -4.10446407e-03 -1.69578178e-02 -2.21156167e-02\n -1.77205645e-03 -1.63554496e-02 -1.75704201e-02 -1.28221012e-02\n -1.39835642e-02 -2.93409195e-02 7.08109990e-03 -1.63942300e-02\n -2.06202227e-02 2.18986946e-02 -1.04256574e-02 3.57650021e-01\n -3.87823967e-01 8.47004452e-02 9.12270082e-04 -4.83666598e-02\n 8.47004452e-02]\n [-1.41803151e+00 -2.78418416e-01 1.02631211e-02 4.82106266e-02\n -2.94172837e-02 7.30222008e-02 1.03158967e-01 2.44018309e-01\n 9.40151581e-01 1.62728941e-03 -2.05558497e-03 -6.85571016e-03\n 1.54737577e-02 -1.72885248e-02 -2.27641878e-02 1.66621575e-02\n 2.18400417e-02 2.33957684e-03 -1.65137331e-02 -1.23176297e-02\n 1.05085489e-02 1.39270039e-02 -2.80012415e-02 -1.75011523e-02\n -1.16258517e-02 -3.11968548e-02 3.31306336e-02 -1.61112522e-02\n -7.27653450e-03 -2.11428938e-02 -3.03049573e-02 1.29741958e-02\n 1.16254620e-02 3.29548759e-02 5.53505224e-03 3.14220798e-02\n -2.03238680e-02 4.00090822e-02 2.92994659e-02 3.00649011e-02\n 1.42749321e-02 2.65273335e-02 -9.92145936e-04 1.04800811e-02\n -4.57640892e-03 2.90897289e-02 -4.45929044e-03 1.46259353e-03\n -2.47027817e-02 -5.07964860e-02 -4.14516886e-02 4.62612599e-04\n 2.56540326e-02 -1.99304927e-02 -1.84632604e-02 -7.45181077e-03\n 1.07769471e-02 -1.30947403e-02 0.00000000e+00 2.94471384e-04\n 2.68040152e-02 -3.09058125e-02 1.20247999e-02 3.42263526e-02\n -3.08676642e-02 -8.30092492e-03 -5.78427597e-03 -1.14804913e-02\n 1.01655806e-02 -4.81655368e-02 2.77748906e-02 2.76168566e-03\n 7.46909309e-03 -3.74366542e-03 -3.64852632e-02 3.41770342e-02\n 3.10558260e-02 3.72496440e-02 4.91177297e-03 7.52516554e-03\n -2.18904250e-02 -2.92577029e-03 -3.53837748e-02 -2.00787197e-03\n 0.00000000e+00 -5.91372522e-03 -1.94135865e-02 4.47316533e-03\n 1.54514248e-03 -6.96592262e-03 -7.05010208e-03 -2.68704258e-02\n 1.11531343e-02 -7.30482379e-03 2.71115377e-02 -3.29444962e-02\n -1.63844979e-02 1.75913113e-02 0.00000000e+00 0.00000000e+00\n 1.49564858e-02 -1.61177821e-02 -5.96707230e-03 -7.81142220e-03\n -1.03125639e-02 3.43443793e-02 2.79310296e-02 -3.97793471e-03\n 5.01679570e-04 3.96680424e-02 2.14756996e-02 -1.50813938e-02\n 1.18332920e-02 0.00000000e+00 -3.77864307e-02 -1.24890238e-02\n 2.39385414e-02 1.78469739e-02 2.60898677e-02 -1.63303494e-02\n -1.46377947e-03 -2.84186400e-02 -1.28169624e-02 3.37411742e-02\n 4.02072658e-02 -1.03186049e-02 4.70524438e-03 1.48792166e-02\n -1.89945179e-02 1.09027427e-02 1.57830682e-02 -2.41843118e-02\n 2.70790956e-04 7.58476359e-02 -1.50064035e-02 -5.08642114e-01\n 7.20976392e-01 -4.18776039e-01 1.45524292e-01 8.86566247e-02\n -4.18776039e-01]]\nAccuracy of: 0.5515996091260569\nWall time: 2.99 s\n" ], [ "%%time\nfrom sklearn.metrics import accuracy_score\nx_train_ar=X_train.values\ny_target_ar=np.asarray(y_train)\nx_train_ar = StandardScaler().fit(x_train_ar).transform(x_train_ar)\nlr = LogisticRegression(.1,10,'BFGS',10,'L2')\nlr.fit(x_train_ar,y_target_ar)\nprint(lr)\n\nyhat = lr.predict(x_train_ar)\nBFGS2=accuracy_score(y_target_ar,yhat)\nprint('Accuracy of: ',accuracy_score(y_target_ar,yhat))", "MultiClass Logistic Regression Object with coefficients:\n[[-1.75513220e+00 6.10539068e-02 4.88558307e-03 -1.64558375e-03\n 3.47691426e-03 -5.79635660e-03 -1.91151522e-03 -7.73854564e-03\n -9.56390595e-03 -5.85172174e-03 -4.99992149e-03 -1.09025583e-03\n -1.80433227e-04 1.01303250e-02 -6.57782383e-04 -3.98208659e-04\n -5.64832752e-04 3.72852809e-03 -1.11977420e-03 2.73940205e-03\n 8.85048798e-04 -1.06556597e-03 -1.09073729e-03 3.89549044e-03\n -1.84058737e-03 1.91057673e-03 5.21138547e-03 -1.47996235e-03\n -4.82681877e-03 -7.67895018e-04 -5.93222435e-04 2.62151931e-03\n 1.85860787e-04 -9.81265417e-04 1.75654546e-03 -3.50881292e-03\n -7.39632948e-04 -1.12088070e-03 -6.90489481e-04 -7.92577454e-04\n 1.45112241e-03 -1.08609344e-03 -1.42254890e-03 5.50644458e-04\n 3.30460386e-03 -7.89106086e-04 8.31978789e-04 -2.07892687e-03\n 4.08652897e-03 6.01302951e-03 1.50303635e-03 2.36194267e-03\n -1.08958337e-03 -8.04422487e-04 -4.35705008e-04 -6.97772081e-04\n -3.42792630e-03 -3.21253160e-03 0.00000000e+00 4.29933987e-04\n 1.22997006e-03 -8.65110823e-04 6.03520252e-04 -2.45381022e-03\n 2.46842932e-03 -6.94100675e-04 -6.96412680e-04 -3.50772456e-03\n -1.49806464e-03 6.37430520e-03 -1.49465059e-03 -7.97382579e-04\n -8.69265983e-04 1.91752663e-03 5.23534139e-03 -4.00714271e-03\n -9.60324933e-04 -2.20376574e-03 1.09831677e-02 9.75448517e-03\n 6.97909545e-04 -4.26938506e-05 5.07428239e-03 7.88200564e-04\n 0.00000000e+00 -6.53469178e-04 -1.07758829e-03 1.01525335e-03\n 2.01135113e-03 3.75271372e-03 -2.10889388e-03 2.99317139e-03\n -1.52304086e-03 -5.18564562e-04 7.64443050e-04 1.46135195e-03\n 5.40931786e-03 2.10025103e-03 0.00000000e+00 0.00000000e+00\n 2.74911132e-03 -6.16651081e-04 1.50019593e-04 -2.28831164e-03\n 1.52689494e-03 -1.59617844e-03 -8.92375259e-04 1.06052386e-03\n 2.67525311e-03 -1.02450257e-03 1.94564638e-03 -1.20855100e-03\n 4.39145860e-03 0.00000000e+00 1.58088986e-03 -1.20983802e-03\n -4.25053640e-04 -2.60489177e-03 -6.33700964e-04 3.84088998e-03\n 4.53640427e-03 -6.93370211e-04 2.08151144e-03 2.16193235e-03\n -1.12273245e-03 5.70691558e-03 2.48091665e-03 2.27237376e-03\n 2.43600807e-03 4.60703094e-03 -6.19242222e-04 -9.57879512e-04\n -1.30305879e-03 1.71954876e-03 3.90806228e-03 1.58977419e-02\n -1.70866995e-02 3.49682816e-03 -8.50374637e-03 6.56983585e-03\n 3.49682816e-03]\n [-7.49263398e-01 -3.96688383e-03 5.56834739e-04 2.92380544e-05\n -3.97071955e-05 -2.02203995e-04 3.19782416e-04 -2.05842747e-03\n -3.12951177e-03 1.39022509e-04 1.24284276e-03 -1.36106122e-04\n 1.26834451e-04 8.05582274e-05 -1.84627178e-04 -1.43015583e-04\n -6.05065198e-05 -1.90079034e-04 9.34081951e-05 -1.63454494e-04\n -9.42400360e-05 8.44121503e-05 6.16913155e-05 3.06179936e-06\n -9.86505283e-05 -2.43517817e-05 -4.95212907e-04 -4.10761093e-04\n 1.32712412e-03 2.84902483e-05 -6.77127442e-05 -1.22710602e-04\n -1.04014847e-04 -2.28752994e-04 -5.66004135e-04 -7.69220595e-05\n 7.14835042e-05 -1.76004450e-04 -1.90595300e-04 -3.24570157e-05\n -2.08162050e-04 -2.83603749e-04 -1.60254899e-04 -2.05199149e-04\n -3.02197007e-04 -7.00490824e-05 -9.73545572e-04 -3.98164707e-04\n -8.10902226e-05 -2.93220478e-04 1.08304646e-04 -1.65402163e-04\n -7.89555888e-05 2.40536224e-05 5.73326912e-04 2.81532575e-04\n -2.15051505e-04 2.08999824e-04 0.00000000e+00 3.19142311e-05\n -9.10057052e-05 3.11429667e-05 -3.15178194e-04 -5.98655765e-05\n 2.35703668e-04 2.15961891e-04 9.96923845e-05 -1.59261871e-04\n 8.00555122e-06 3.04939458e-04 -1.26148203e-04 -1.12884277e-04\n 4.69420781e-05 -1.72046211e-05 1.01080276e-04 -5.25887871e-05\n 3.99318464e-05 -3.29365552e-04 4.77250488e-04 5.53099776e-04\n 1.40716473e-04 -6.57070957e-04 4.42571707e-04 4.68079498e-05\n 0.00000000e+00 9.95512836e-05 -7.97465971e-05 -1.20271737e-04\n 1.53170451e-04 -1.70829236e-04 -1.33977869e-04 -8.58581763e-05\n -4.62941737e-04 -9.26311579e-05 -3.71852164e-04 1.60973369e-04\n -7.63753671e-05 5.99158242e-05 0.00000000e+00 0.00000000e+00\n 2.80757144e-04 -9.05073145e-05 1.26270120e-04 1.24703360e-04\n 6.86374412e-05 -2.16985166e-04 -1.99060753e-04 -2.34197939e-04\n 3.87536050e-05 -1.01026658e-04 -6.04309924e-05 3.02348333e-04\n -4.38488336e-05 0.00000000e+00 1.84643263e-04 8.46305809e-06\n -2.11318808e-04 -3.81555497e-06 -1.03594547e-04 2.35791451e-04\n 1.14546218e-04 -1.75848409e-04 2.22793249e-04 -1.97787899e-04\n -1.13716459e-04 8.51149598e-05 2.72676105e-05 3.75487659e-05\n 1.16597327e-04 -1.64607022e-04 -1.82128810e-04 1.19468941e-05\n 1.46091396e-04 -2.27808026e-04 -7.10766934e-05 8.81093650e-03\n -1.05021432e-02 3.75562471e-03 -1.25560239e-03 -8.44701224e-04\n 3.75562471e-03]\n [-5.52405181e+00 1.38300354e-03 -2.58464406e-05 -1.30593573e-04\n -1.09106568e-04 1.21985696e-03 -6.82381251e-05 4.65938068e-04\n -1.00318876e-04 1.37380228e-03 -6.15598333e-04 1.41683601e-04\n -5.76197724e-06 -1.80419821e-04 2.05600435e-05 4.88542961e-05\n 6.96614951e-05 1.22306311e-04 9.41164149e-06 5.59803436e-05\n -8.66002395e-05 5.58114447e-05 5.60037883e-05 -1.10871061e-04\n 1.14310306e-06 -1.71632530e-06 2.66481338e-04 4.60231627e-04\n -8.47524886e-04 6.01088786e-05 5.11276671e-05 1.85157014e-05\n 8.94409842e-05 8.65122726e-05 1.67780921e-04 1.35475102e-04\n 5.14007443e-05 5.11477727e-05 1.77486421e-05 -1.67344799e-05\n 6.54238421e-05 1.01796131e-04 1.37651853e-05 5.17934972e-05\n 2.72154568e-04 4.36093426e-05 4.95000397e-04 2.98257058e-04\n 8.52817183e-05 6.77009625e-05 1.47275656e-04 1.13797786e-04\n -2.84023790e-06 4.13217623e-05 -4.00419035e-06 3.57572362e-05\n 2.74693188e-04 -2.55032900e-04 0.00000000e+00 1.56390575e-04\n 1.80161988e-05 1.22826588e-04 8.38451136e-05 1.02197010e-04\n -1.33916001e-04 -1.77523021e-05 -1.03154178e-04 1.86664572e-04\n 2.35469578e-05 -1.61075126e-04 1.32342845e-05 8.61289130e-05\n 1.22986075e-04 8.65327881e-05 -4.52721712e-05 3.65022931e-04\n 2.65701242e-05 1.87526740e-04 -3.98605773e-04 -4.75851644e-04\n 8.61991533e-05 3.16801026e-04 -2.52898611e-04 3.19095934e-04\n 0.00000000e+00 -1.29644706e-04 -1.23825467e-04 1.59717854e-04\n 1.08479043e-06 1.02756989e-04 -1.42982746e-05 -6.01678307e-05\n 1.45235760e-04 7.52854729e-05 8.73288619e-05 4.48325449e-05\n -1.47917257e-03 -1.39218890e-04 0.00000000e+00 0.00000000e+00\n 1.18832914e-03 1.94696826e-07 9.38993223e-05 1.11231276e-04\n 3.71591563e-04 4.28348225e-05 6.82844819e-05 2.56100980e-04\n 1.10804220e-04 8.36313602e-05 4.15177733e-04 -5.86461982e-05\n 1.04693883e-04 0.00000000e+00 4.79775310e-05 -1.81198946e-05\n 1.57727829e-04 9.07421888e-05 9.15666200e-05 2.44254089e-04\n 6.54825889e-05 6.90003031e-05 1.69756210e-05 2.86791861e-04\n 3.05040024e-04 -5.11686296e-05 1.09628025e-04 -2.06743452e-05\n 1.58982186e-05 1.69831158e-04 2.24129242e-05 -2.75222456e-05\n 1.06491642e-05 1.34149147e-04 1.17363745e-06 -4.86654638e-04\n 1.78089933e-03 -2.32185947e-03 7.91774785e-04 5.06661054e-04\n -2.32185947e-03]\n [-1.99636418e+00 -1.69716210e-02 -8.37778840e-03 -1.11515037e-03\n 8.19219958e-04 2.42419768e-03 -3.88759538e-03 4.95322532e-03\n -1.15893681e-02 4.23114403e-03 -1.39621690e-05 -9.83713498e-04\n -9.26974286e-04 -7.01071244e-04 2.90498138e-03 -1.43928098e-03\n 2.23974462e-03 -1.88731567e-03 -4.47525427e-04 3.33073028e-04\n -1.05727747e-03 1.46851202e-03 1.71687288e-03 -4.27126416e-04\n 2.87060313e-03 1.16852575e-04 -6.45247589e-04 1.20301664e-03\n -5.12760612e-04 -5.03375018e-04 2.85965131e-03 -1.36655787e-03\n -1.20515941e-03 -7.57599301e-04 9.66694422e-04 -8.82951158e-05\n -5.48036295e-04 -7.42341961e-04 -6.02213880e-04 -5.63449085e-04\n -9.12941679e-04 4.24979453e-04 3.13110488e-03 -7.64001562e-04\n -7.09412883e-04 -5.28486051e-04 5.99669158e-04 -4.06935380e-04\n 1.21487491e-03 1.02510397e-04 -7.38242993e-04 -7.24410248e-04\n -9.41503509e-04 2.84597229e-03 -7.30720362e-04 -2.52312811e-04\n 3.07492024e-03 5.98490630e-04 0.00000000e+00 -8.29106783e-04\n -1.34210667e-03 4.53357503e-04 1.46000571e-03 3.83482111e-04\n -2.02245894e-03 -2.33338239e-04 -1.56847585e-04 2.21212861e-03\n -8.88897402e-04 -8.00031955e-04 -7.52418590e-04 -7.18440684e-04\n 1.11301933e-04 1.22416569e-03 -9.51084489e-04 -5.87871223e-04\n -5.62045076e-04 -1.24839308e-03 -2.92944857e-03 -3.22682481e-03\n 1.01208453e-03 3.20822678e-03 -1.48283690e-03 1.27818615e-03\n 0.00000000e+00 -1.37867767e-04 1.78589988e-03 -1.44482815e-03\n 2.87201088e-04 -1.77563314e-03 6.61358432e-04 9.93712894e-04\n 1.33186158e-03 -1.40249501e-03 -1.63158519e-03 -4.62182939e-04\n -1.35686211e-03 -5.29989033e-04 0.00000000e+00 0.00000000e+00\n -2.65383120e-03 2.94197704e-03 -1.29836901e-03 7.68531257e-04\n 7.02564929e-04 5.71541989e-04 -5.02859848e-04 5.28789265e-04\n -1.16235600e-04 -6.40363409e-04 3.21641828e-04 -3.04531197e-04\n -1.64188001e-03 0.00000000e+00 -5.85453743e-04 -4.30784298e-04\n 5.70855666e-04 2.00284503e-03 -4.86551694e-04 -9.24262499e-04\n -1.23300152e-03 2.91660595e-03 -3.22525906e-04 -2.24634645e-03\n -1.81319618e-03 -1.89396104e-04 5.96671719e-04 -4.57366520e-04\n -5.47128747e-04 -1.66910975e-03 -6.36064282e-04 1.80289810e-03\n -7.32739253e-04 -4.70372204e-04 -4.82185736e-04 -3.21217539e-02\n 3.63356385e-02 -1.02552257e-02 3.95580096e-03 1.77779628e-03\n -1.02552257e-02]\n [-2.17754587e+00 -5.83105417e-04 -5.20304815e-04 -1.67433738e-04\n 3.55448334e-04 -2.09622893e-03 1.95729543e-04 -1.32051303e-03\n -8.34983863e-03 -2.13273989e-03 5.19943743e-04 -1.87134000e-04\n -1.98620363e-04 -8.09332499e-04 -6.46449233e-05 -1.57248355e-04\n -1.49769073e-04 -1.29914218e-04 -6.91969235e-05 -1.55746789e-04\n -1.14532806e-04 -1.38332691e-04 -7.85988165e-05 -5.85325329e-05\n -1.34035612e-04 -1.01333916e-04 2.69260996e-04 5.52027809e-04\n -1.97541972e-04 -8.77011872e-05 -1.08274825e-04 -2.54484907e-04\n 1.08368310e-04 -1.40551148e-04 -2.22227468e-04 1.30963056e-04\n -4.78603774e-05 -9.57807436e-05 -6.08801536e-05 -8.12261632e-05\n -1.23260836e-04 -3.56681403e-04 -1.07274997e-04 8.82500688e-05\n 1.52291088e-04 -6.96958238e-05 1.04238254e-04 -3.36449937e-04\n -1.35677492e-04 -1.60507799e-04 -1.38030398e-04 -1.09551529e-04\n -1.09267264e-04 -1.13335177e-04 -2.14844685e-04 -1.19520300e-05\n -7.33641749e-04 -1.51847032e-04 0.00000000e+00 2.66305354e-04\n -1.43308152e-04 -1.52449053e-04 -2.24635192e-04 -1.95344903e-04\n 2.51866842e-04 -4.65842293e-05 -7.85298833e-05 5.53446961e-04\n -1.19248312e-04 -1.92493034e-04 2.10941668e-04 7.57606906e-05\n -3.49319965e-04 -1.74006925e-04 -2.23174807e-04 -2.25399499e-04\n -1.14287502e-04 -2.43615701e-04 -3.90333633e-04 -3.09084702e-04\n -1.86767706e-04 5.46132463e-06 -1.00782450e-04 6.30033710e-06\n 0.00000000e+00 -8.84250968e-05 -7.51861330e-05 3.65333540e-04\n -1.33459119e-04 -2.59397216e-04 2.97553539e-04 -1.31062296e-04\n 4.21847349e-05 5.37911361e-04 1.67044417e-04 2.47047649e-04\n -6.74953565e-04 -1.87265270e-04 0.00000000e+00 0.00000000e+00\n -4.82300524e-04 -4.32139134e-05 -2.82697536e-04 -3.14118273e-04\n -1.20667893e-04 -3.56282508e-04 -1.01132110e-04 8.08635455e-05\n -1.59676235e-04 -1.43136141e-04 -2.12971838e-04 -8.52964342e-05\n -2.43966864e-04 0.00000000e+00 -1.44726706e-04 1.08169977e-03\n -3.79500115e-04 -3.06798662e-04 -1.35016643e-04 -2.04676731e-04\n -2.51168302e-04 -9.74413914e-05 -1.46570437e-05 -4.67674699e-04\n -2.86179568e-04 -1.10027545e-04 -1.12016146e-04 -1.14921163e-04\n -7.21788496e-05 -3.34804196e-04 -2.97368546e-05 -4.66995941e-05\n -1.41048976e-04 -1.11506220e-04 -2.89604346e-05 8.52252066e-03\n -9.06669973e-03 1.71048335e-03 5.47653507e-04 -1.50753440e-03\n 1.71048335e-03]\n [-7.78158384e-01 -4.75558589e-04 5.45785664e-05 1.80255926e-04\n -3.14037758e-04 1.39403937e-03 4.18065694e-04 2.99872027e-03\n 7.09553151e-03 1.26170576e-03 -1.58066453e-04 1.84393704e-04\n 9.12237190e-05 -2.49096880e-04 -1.05319112e-04 1.71107134e-04\n 1.15669005e-04 2.17168262e-04 -1.49154058e-04 -8.38011095e-05\n 1.24472896e-04 9.28550350e-05 -1.35068696e-04 -9.24361776e-05\n -1.05885259e-04 -1.05651714e-04 4.01841912e-04 1.61335926e-04\n -4.21060236e-04 -1.04623197e-04 -1.06853706e-04 4.25671365e-05\n 1.47806932e-04 2.96763191e-04 2.16650087e-04 2.60674061e-04\n -9.79146177e-05 3.11346975e-04 2.19182055e-04 2.23133388e-04\n 1.96709246e-04 2.74809255e-04 1.90661160e-05 7.45790559e-05\n 5.92606250e-05 2.24557801e-04 -6.58338386e-06 6.68533032e-05\n -1.75260823e-04 -2.52580810e-04 -1.79413818e-04 7.88920692e-05\n 2.01671427e-04 -1.01626049e-04 -3.79029819e-04 -9.57617064e-05\n 3.69986806e-04 -2.11686116e-04 0.00000000e+00 -3.67226256e-05\n 2.90218317e-04 -1.96787224e-04 2.46303122e-04 3.38676168e-04\n -2.44288125e-04 -9.90165667e-05 -1.04446157e-04 -2.49402036e-05\n 1.20808095e-04 -3.33525701e-04 3.77394620e-04 2.85621251e-05\n 1.99010071e-04 -7.21239823e-05 -2.71349428e-04 3.52261076e-04\n 2.22465811e-04 3.34055297e-04 -4.16873516e-04 -4.07722874e-04\n -2.18748967e-04 3.67665056e-05 -5.57698497e-04 -1.21669042e-04\n 0.00000000e+00 -1.04536825e-04 -1.42107059e-04 -1.21590046e-04\n 1.78126950e-05 3.90687765e-05 5.57402796e-06 -1.91218076e-04\n 4.35274149e-04 1.20808444e-04 3.07777818e-04 -1.73802177e-04\n -6.49632863e-05 1.11183462e-05 0.00000000e+00 0.00000000e+00\n -1.26786763e-04 -9.65275019e-05 8.94175162e-05 1.12638462e-05\n -8.47900462e-05 4.77917131e-04 2.16976010e-04 6.69232307e-05\n -8.50187962e-05 3.12824496e-04 1.51914478e-04 -1.76936416e-04\n 9.00050320e-05 0.00000000e+00 -1.79559584e-04 -1.67166563e-04\n 2.89852542e-04 1.09303364e-04 2.14231324e-04 -2.91516844e-04\n -6.94239542e-05 -1.10234105e-04 -1.33793816e-04 4.04423765e-04\n 3.92340924e-04 -1.41966994e-04 -7.84811049e-05 9.15586985e-05\n -1.35780972e-04 1.10010273e-04 1.23646004e-04 -1.41628332e-04\n -4.31325587e-05 6.23536047e-04 -8.97135963e-05 -8.03877298e-03\n 1.03582141e-02 -4.79365786e-03 1.63853111e-03 1.04217840e-03\n -4.79365786e-03]]\nAccuracy of: 0.5231337893529336\nWall time: 19 s\n" ], [ "%%time\nfrom sklearn.metrics import accuracy_score\nx_train_ar=X_train.values\ny_target_ar=np.asarray(y_train)\nx_train_ar = StandardScaler().fit(x_train_ar).transform(x_train_ar)\nlr = LogisticRegression(.1,3,'newton',.0001,'L1')\nlr.fit(x_train_ar,y_target_ar)\nprint(lr)\n\nyhat = lr.predict(x_train_ar)\nnewton1=accuracy_score(y_target_ar,yhat)\nprint('Accuracy of: ',accuracy_score(y_target_ar,yhat))", "MultiClass Logistic Regression Object with coefficients:\n[[-3.95556207e-01 1.01804301e-01 9.16801678e-04 -3.19286853e-03\n 4.28003604e-03 -1.95803726e-03 -3.93968295e-03 -8.44709157e-03\n -4.65517196e-03 -1.22652616e-03 1.53615502e-02 3.55943333e-04\n 7.58777491e-04 2.10021643e-02 3.25951857e-04 8.53394443e-04\n 1.18751759e-03 8.12659822e-03 -1.19305273e-03 6.02379365e-03\n 1.75337452e-03 -6.13213013e-04 -1.71982980e-03 4.71734129e-03\n -2.56234087e-03 2.54194374e-03 5.07186614e-03 7.81944450e-03\n 1.16410371e-02 -4.02656526e-04 7.04279727e-04 3.49292213e-03\n 1.44983472e-03 -3.64345227e-04 4.12880245e-03 -2.66033957e-05\n -8.09994966e-04 -1.80983183e-03 -2.66418568e-04 -1.11747684e-03\n 2.81200043e-03 -5.39949587e-03 -3.35865257e-03 2.68809247e-03\n 9.42491248e-03 -1.69030900e-03 1.72141257e-02 3.16835791e-03\n 6.79939876e-03 1.01827185e-02 3.37272789e-03 5.07233454e-03\n -8.22390190e-04 -7.38236818e-04 1.59589371e-02 -8.94183436e-04\n -3.30905898e-04 -2.69562762e-04 8.90615452e-03 8.27316585e-03\n 3.53563607e-03 9.35227854e-04 1.97293044e-03 6.97078432e-03\n 4.59992758e-03 -9.70954443e-04 -7.69259384e-04 1.39717274e-02\n -2.49668823e-03 1.00380895e-02 7.85827040e-03 1.30340945e-03\n -1.79171917e-04 4.40415181e-03 9.23519474e-03 1.75519110e-03\n -2.27043044e-03 -6.62934208e-04 1.72756186e-02 1.59117065e-02\n 2.21822124e-03 6.72413328e-03 8.82708841e-03 1.26382264e-02\n 8.86953343e-03 -6.44659937e-04 -2.11728875e-03 6.57855976e-03\n 4.07560177e-03 7.15253421e-03 -1.23000050e-03 1.62701033e-03\n 8.14579831e-03 9.47931620e-03 3.42401241e-03 1.58727694e-03\n 1.43671572e-02 3.18597849e-03 8.85732640e-03 8.85732640e-03\n 1.77490524e-04 -5.84353600e-04 1.58294070e-03 -3.01904219e-03\n -1.47739957e-04 -2.97169309e-03 -2.63034845e-03 2.00330755e-03\n 3.28423813e-03 -2.49423960e-03 1.34960095e-03 -2.04776705e-03\n 5.07462598e-03 8.85732640e-03 2.28930903e-03 -2.09790271e-03\n -2.97177235e-04 -3.89336392e-03 -5.98744572e-04 3.39672965e-03\n 6.14999564e-03 -4.69934737e-04 8.85019383e-04 1.17360499e-03\n -2.43059549e-03 7.41904679e-03 2.89987535e-03 1.36047508e-03\n 3.28341195e-03 5.14107426e-03 -2.65797666e-03 -1.26938770e-03\n -2.18523978e-03 2.68906800e-03 4.22245580e-03 3.27170599e-02\n -1.51582560e-02 1.04740648e-02 5.84886674e-04 1.71351232e-02\n 1.04832201e-02]\n [-1.93693913e-01 -3.27871225e-03 1.44088236e-02 2.01131349e-03\n -3.36043044e-03 1.46101938e-02 -2.85004589e-03 -4.67743124e-02\n -1.28572878e-02 -1.99129335e-02 1.60872324e-02 3.41411372e-03\n 2.29829633e-03 4.66956185e-03 -2.40464132e-04 5.27325610e-03\n -3.18856196e-03 -2.03845224e-03 6.92435818e-03 1.45461989e-03\n 2.39677102e-03 -2.53957812e-03 3.73986216e-03 -1.22826416e-03\n 4.45101193e-03 3.84810766e-03 -4.59614389e-03 2.19259666e-03\n 1.10878338e-02 6.37430929e-03 -6.96921708e-04 7.97249617e-03\n 3.63382751e-04 7.78443714e-05 5.43566710e-03 2.89162190e-03\n 6.64486489e-03 -3.76955306e-04 -5.73422824e-05 -7.36995186e-04\n -1.03581183e-03 5.58775703e-03 -5.17459102e-04 3.86455882e-03\n 1.25892932e-04 -4.47333008e-04 1.90250514e-02 1.64868330e-02\n -2.96485579e-03 1.49115414e-03 5.27742939e-03 -9.26658327e-04\n 2.30052234e-03 -1.74435004e-03 2.79152355e-02 3.17254071e-03\n 3.86392706e-03 9.93522204e-03 9.44681201e-03 1.32152125e-02\n -1.34601315e-03 5.88678818e-03 -3.16254351e-03 7.72029961e-03\n 3.51445531e-03 3.52947012e-03 3.18274493e-03 1.59907353e-02\n 4.75067388e-03 -9.17419495e-04 6.93390038e-03 3.17903377e-03\n 2.75326885e-03 -2.73346282e-03 1.94578463e-03 8.68279458e-03\n -1.50424841e-03 3.98538666e-03 -1.28427009e-03 7.96042155e-04\n 3.46257267e-03 -3.57743399e-03 7.95946408e-03 1.28568352e-02\n 9.17825732e-03 3.02531616e-03 3.18084162e-03 1.01764958e-03\n -1.86854604e-03 6.75571115e-04 2.16382961e-03 -6.37332834e-04\n 1.47506226e-03 8.97727526e-03 -2.46280523e-03 -4.55369906e-04\n 1.17113394e-02 -1.39924490e-03 9.22708544e-03 9.22708544e-03\n 3.08237727e-03 -1.60477605e-03 3.09165999e-03 3.61495075e-03\n -1.65566563e-03 -3.75724365e-03 -8.83985811e-04 -4.10443758e-03\n -1.20472740e-03 -1.78012223e-03 -6.68795093e-03 5.64620667e-03\n -5.53648129e-04 9.22708544e-03 3.71374538e-03 -6.15705714e-03\n -1.41634134e-03 5.14040544e-04 -1.35618495e-03 2.07024432e-03\n -1.08727591e-03 -1.21434253e-03 -6.08504391e-04 -2.35696079e-03\n -8.96647232e-04 -6.22624398e-03 -3.27566487e-03 -3.57886775e-03\n 1.20920664e-03 -1.18187977e-03 -3.35595208e-04 1.57122544e-03\n 6.41971438e-03 -1.33301719e-02 -8.79444302e-04 1.00322875e-01\n -1.01421086e-01 4.84101170e-02 -2.78475821e-03 2.22786248e-03\n 4.86786717e-02]\n [-5.49649883e-01 3.69893709e-04 -3.31792903e-04 -1.00812984e-04\n -4.75599151e-05 -2.59898367e-04 -9.97495016e-05 1.20428543e-04\n -4.46169349e-04 2.97159662e-04 3.18173662e-02 1.89600765e-03\n 2.84225076e-03 1.38780130e-02 1.08674596e-03 3.10748875e-03\n 3.27190158e-03 4.06847532e-03 1.54224929e-03 2.43358708e-03\n 2.14145573e-03 1.54967272e-03 1.53943198e-03 1.06807780e-03\n 2.65016409e-03 2.64880702e-03 7.89415841e-03 1.06187808e-02\n 1.54173036e-02 1.09711431e-03 1.09230028e-03 5.95420643e-03\n 2.66732457e-03 1.54902069e-03 1.04003714e-02 8.88730274e-03\n 1.08960612e-03 1.54353591e-03 1.08604325e-03 1.08333054e-03\n 1.88741239e-03 5.21616916e-03 1.87772849e-03 5.10915918e-03\n 7.92156947e-03 1.08724126e-03 3.13850118e-02 1.26573760e-02\n 1.91351376e-03 2.67378960e-03 1.94176932e-03 1.54688072e-03\n 8.48564229e-04 5.00088346e-04 3.19230563e-02 4.95323234e-04\n 5.28658900e-03 6.73915873e-03 1.63299596e-02 1.83356917e-02\n 3.05714615e-03 3.35668198e-03 1.62951108e-03 1.73783405e-02\n 2.58337673e-03 4.89164475e-04 4.53993700e-04 3.19975673e-02\n 9.83923000e-04 1.60580783e-03 1.60682627e-02 3.38430759e-03\n 2.19640762e-03 3.52697613e-03 3.45107785e-03 1.20421290e-02\n 4.98791257e-04 6.16346441e-03 5.41325589e-03 6.31185441e-03\n 3.68961141e-03 9.84139648e-03 4.64344486e-03 2.42594046e-02\n 1.60129582e-02 4.75718390e-04 6.71178389e-04 8.44020786e-03\n 1.19521257e-03 1.83003312e-03 4.35214850e-03 9.55044012e-04\n 1.39108952e-02 1.63840785e-02 3.90900853e-03 8.35210472e-04\n 1.65844627e-02 1.14700694e-03 1.60131490e-02 1.60131490e-02\n 1.07763653e-03 -2.89093012e-06 1.71932303e-05 7.20442076e-05\n 3.09148470e-04 -9.32775587e-06 4.34961566e-06 1.40102004e-04\n 8.57194265e-05 4.12338815e-05 3.70489252e-04 -7.86138182e-06\n 6.34976937e-05 1.60131490e-02 2.97604150e-06 3.26700574e-05\n 5.70820706e-05 8.95115795e-05 3.45318345e-05 1.56305927e-04\n 5.59190169e-05 2.85152912e-05 2.63416722e-05 1.48685934e-04\n 2.34699387e-04 2.59252479e-05 3.28020490e-05 -9.00765540e-06\n 1.45770912e-05 9.68984003e-05 5.24977956e-07 -9.35258625e-06\n -3.16455896e-06 -7.15785627e-05 1.98833146e-05 1.61960715e-02\n 1.67680594e-02 1.05824101e-02 1.97605680e-02 1.95827087e-02\n 1.07627499e-02]\n [-4.22886649e-01 -5.15540712e-02 -1.53231485e-02 -1.58839634e-03\n 2.11550488e-03 -4.09472425e-02 -5.85100757e-03 7.74085847e-03\n -3.52917182e-02 5.00513725e-02 3.08931058e-02 -6.76776903e-04\n 1.96270451e-03 1.53395936e-02 6.20210079e-03 -7.43002679e-04\n 7.95847872e-03 1.87738556e-04 8.87971568e-04 2.35289736e-03\n 1.68362244e-04 3.50845467e-03 5.44160366e-03 1.15323852e-03\n 8.16515782e-03 3.12002343e-03 1.07317418e-02 1.20578848e-02\n 1.38639606e-02 -3.17587315e-04 6.51134626e-03 4.44167143e-03\n 2.23142114e-04 3.57433126e-04 1.38110725e-02 9.39375534e-03\n -1.88802246e-04 5.41051911e-04 -6.19217981e-04 2.96826656e-05\n -2.35132901e-04 7.82774248e-03 9.50579828e-03 3.21471975e-03\n 5.65870499e-03 4.48276452e-04 3.06819392e-02 1.09925767e-02\n 3.94601372e-03 2.04552068e-03 3.96767552e-04 -4.61820655e-04\n -2.07207083e-03 5.69180268e-03 2.92300131e-02 3.19123429e-04\n 1.01888231e-02 7.66717794e-03 1.60865936e-02 1.73597218e-02\n 5.39114462e-04 5.30391479e-03 4.04989329e-03 1.70585859e-02\n -7.56491261e-04 3.25320923e-04 3.03640264e-04 3.39534894e-02\n -7.38697903e-04 8.58122524e-04 1.33535530e-02 2.19324679e-03\n 2.29692544e-03 7.01620046e-03 2.64379784e-03 8.94622180e-03\n -2.61618939e-04 5.02962473e-03 1.62625723e-03 1.79041676e-03\n 7.78714725e-03 1.76425208e-02 2.92801749e-03 2.49268725e-02\n 1.57814178e-02 3.49355812e-04 4.52572193e-03 7.71288111e-03\n 2.31572779e-03 -1.93589895e-03 5.20497237e-03 3.76121301e-03\n 1.45718193e-02 1.18186791e-02 7.76199382e-04 1.10556356e-03\n 1.96252064e-02 1.33590322e-03 1.57814178e-02 1.57814178e-02\n -6.70486595e-03 5.44585941e-03 -3.34560206e-03 1.39102636e-03\n 6.34551476e-04 1.79529068e-03 -5.77585138e-04 -1.93301774e-05\n -4.70606495e-04 -9.89909667e-04 -5.78289868e-04 4.03695450e-05\n -3.26725524e-03 1.57814178e-02 -3.55681791e-04 -1.12770435e-03\n -8.64922737e-05 3.05185092e-03 -1.34879140e-03 -1.86641608e-03\n -2.29437703e-03 5.66484244e-03 8.94764768e-04 -4.80189959e-03\n -5.37424848e-03 4.38317891e-04 1.21992252e-03 4.26682921e-04\n -1.12296741e-03 -3.86143462e-03 -1.05100923e-03 3.42564233e-03\n -1.13728677e-03 -3.48864014e-03 -9.28079518e-04 -3.72645972e-02\n 7.60465879e-02 -5.12810936e-03 2.65883300e-02 2.05471311e-02\n -4.94500389e-03]\n [-4.28590328e-01 1.05854697e-02 2.93457825e-03 -3.03791882e-03\n 2.16398821e-03 -1.85458327e-02 -3.99984173e-04 6.76444627e-05\n -7.46058890e-02 1.51689843e-03 3.65900171e-02 2.33251203e-03\n 1.79478472e-03 7.29757330e-03 4.68877205e-04 1.09287034e-03\n 1.42994584e-03 5.11263355e-03 4.00509889e-04 1.50268082e-03\n 9.91852110e-04 2.34804165e-03 8.74965465e-04 6.93061897e-04\n 1.39963166e-03 4.46867927e-04 1.19423346e-02 1.57004898e-02\n 1.66663208e-02 9.47876792e-04 1.64849884e-03 2.59858104e-03\n 4.37712732e-03 2.11445771e-03 1.03958344e-02 1.10175886e-02\n 3.92015198e-04 1.18402588e-03 5.33103439e-04 1.07631447e-03\n 1.13658720e-03 4.38685412e-03 1.11801457e-03 4.64218104e-03\n 1.12842836e-02 8.18177360e-04 3.45653770e-02 8.85439087e-03\n 1.36633790e-03 1.76150747e-03 1.52108395e-03 1.10684241e-03\n -6.35858713e-04 1.61241114e-03 2.99269212e-02 -1.07745118e-03\n 1.99441541e-04 5.47638470e-03 1.69707604e-02 1.93703045e-02\n 1.83052016e-03 3.49911408e-03 -4.57603396e-04 1.69049720e-02\n 5.24263381e-03 -8.67978269e-04 -6.87569667e-04 3.75602366e-02\n -4.63159164e-04 -2.28784935e-03 1.97038524e-02 4.42297978e-03\n -6.05149813e-05 2.54360785e-03 1.11849361e-03 1.16720422e-02\n 1.64946743e-03 3.49608796e-03 -1.83280953e-03 -2.43244084e-04\n 6.08131282e-05 9.60185690e-03 -2.87239948e-04 2.61627961e-02\n 1.66961022e-02 -6.82532426e-04 -1.12954160e-03 1.09394621e-02\n -1.10767932e-04 3.09604862e-04 8.45229031e-03 -6.21542721e-04\n 1.78606806e-02 2.18474028e-02 4.78691555e-03 3.98266615e-03\n 1.59401650e-02 -1.37343540e-03 1.67022057e-02 1.67022057e-02\n -5.95993247e-04 -1.19345298e-03 1.26390885e-05 -1.92566974e-03\n 8.20193984e-04 -2.46560666e-03 3.35140553e-04 2.95789237e-03\n -2.36267360e-03 -2.78689761e-04 6.48299600e-04 -2.39077150e-03\n -3.60458438e-03 1.67022057e-02 -8.99696670e-04 9.15403666e-03\n -2.75782276e-03 -2.71722582e-03 2.77979376e-04 -1.37931390e-03\n -2.78631526e-03 1.61791660e-04 -1.84539880e-03 -1.09443600e-03\n 3.34939055e-04 -1.45289114e-03 -1.33413952e-03 -8.93069074e-04\n -1.49300353e-03 -1.87927028e-03 1.04291333e-03 -1.44229352e-03\n -1.94904826e-03 2.68082980e-03 -1.19920665e-03 5.12501616e-02\n -1.83506428e-02 1.73997871e-02 2.01019844e-02 1.64809294e-02\n 1.75340644e-02]\n [-2.03454566e-01 -5.83873809e-02 -2.49986180e-03 5.88486921e-03\n -5.02318017e-03 4.63665570e-02 1.31913943e-02 4.72825004e-02\n 1.27751007e-01 -3.04643172e-02 1.00586255e-02 1.00243348e-03\n 3.10911849e-03 2.46385688e-04 -3.04517017e-03 4.05512592e-03\n 3.87738668e-03 2.59620473e-03 -1.75821254e-03 -2.89238202e-03\n 2.23333425e-03 2.54632777e-03 -3.08262946e-03 -1.59383891e-03\n -2.31470458e-03 -9.24157819e-04 5.52016546e-03 1.13078905e-03\n 1.94497385e-03 -2.89073536e-03 -4.42665817e-03 2.09612390e-03\n 2.78598380e-03 3.05157237e-03 2.42311775e-03 7.44976806e-03\n -2.32327561e-03 5.71440772e-03 4.12451066e-03 4.48505295e-03\n 3.75665429e-03 5.59624220e-03 -3.35772612e-04 3.19758807e-03\n 9.59727321e-04 4.58730017e-03 6.93265773e-03 3.93265312e-03\n -2.75744065e-03 -6.38100545e-03 -4.25456656e-03 4.38039804e-04\n 4.05551105e-03 -3.10592357e-03 5.75228327e-03 1.28278577e-04\n 3.99831371e-03 4.56156520e-04 4.66552148e-03 4.91998596e-03\n 5.91162247e-03 -4.05934465e-03 3.06183224e-03 1.11329295e-02\n -3.56432987e-03 -3.65031180e-04 -3.11485067e-04 7.89466859e-03\n 2.19617760e-03 -2.28267285e-03 7.52123315e-03 6.39886759e-04\n 2.65120289e-03 9.85499308e-04 -3.14190443e-03 8.87421634e-03\n 4.10697446e-03 9.22523558e-03 2.98966391e-03 3.58448701e-03\n -9.26209083e-04 3.54581513e-03 -3.44099490e-03 5.03422632e-03\n 4.47020898e-03 -3.53727580e-04 -2.07614954e-03 3.05015080e-03\n -2.33802532e-04 7.23812919e-06 4.03770198e-04 -8.43346116e-04\n 5.88664598e-03 3.50673806e-03 6.94527439e-03 -3.29957063e-03\n -3.89967058e-04 2.39033391e-03 4.49462304e-03 4.49462304e-03\n 2.87853252e-03 -2.11577837e-03 -1.40935518e-03 -2.65464710e-04\n -5.92776381e-06 7.23141847e-03 3.72154109e-03 -9.32881771e-04\n 6.22025750e-04 5.38939581e-03 4.84846876e-03 -1.29924973e-03\n 2.11052035e-03 4.49462304e-03 -4.83996753e-03 1.26754504e-04\n 4.42276477e-03 2.83805073e-03 2.93139304e-03 -2.47131302e-03\n -1.70249849e-04 -4.23032999e-03 5.42393600e-04 6.80634554e-03\n 7.99635487e-03 -2.50477147e-04 4.44398408e-04 2.59821240e-03\n -1.95799110e-03 1.54651484e-03 2.97832324e-03 -2.38509447e-03\n -1.23927692e-03 1.15561777e-02 -1.29583837e-03 -9.19706407e-02\n 1.18763583e-01 -3.69969244e-02 2.29055370e-02 1.10051336e-02\n -3.68748541e-02]]\nAccuracy of: 0.5344776309640141\nWall time: 2min 26s\n" ], [ "%%time\nfrom sklearn.metrics import accuracy_score\nx_train_ar=X_train.values\ny_target_ar=np.asarray(y_train)\nx_train_ar = StandardScaler().fit(x_train_ar).transform(x_train_ar)\nlr = LogisticRegression(.01,3,'newton',.0001,'L1')\nlr.fit(x_train_ar,y_target_ar)\nprint(lr)\n\nyhat = lr.predict(x_train_ar)\nnewton2=accuracy_score(y_target_ar,yhat)\nprint('Accuracy of: ',accuracy_score(y_target_ar,yhat))", "MultiClass Logistic Regression Object with coefficients:\n[[-4.29564782e-02 1.10081246e-02 9.09825156e-05 -3.43136779e-04\n 4.56596662e-04 -2.06903264e-04 -4.22333906e-04 -9.02286843e-04\n -4.92643296e-04 -1.25206368e-04 1.60547096e-03 3.74478345e-05\n 7.74473934e-05 2.24616867e-03 3.46739598e-05 8.62724100e-05\n 1.23218837e-04 8.70619277e-04 -1.28294788e-04 6.47282956e-04\n 1.84364105e-04 -6.76604673e-05 -1.86449661e-04 5.09040494e-04\n -2.73702490e-04 2.73380648e-04 5.33710164e-04 8.28754322e-04\n 1.23236425e-03 -4.38130705e-05 7.47093339e-05 3.67102999e-04\n 1.52187771e-04 -3.92966215e-05 4.26218308e-04 -9.40140836e-06\n -8.79491186e-05 -1.96035030e-04 -2.91712432e-05 -1.21733402e-04\n 3.00460904e-04 -5.86556658e-04 -3.60993631e-04 2.81074644e-04\n 1.00148668e-03 -1.83315986e-04 1.80593944e-03 3.23401397e-04\n 7.36518624e-04 1.10273901e-03 3.61358430e-04 5.47517183e-04\n -8.77923204e-05 -8.06239911e-05 1.67139083e-03 -9.52840488e-05\n -3.82176704e-05 -3.36970638e-05 9.35361951e-04 8.63530386e-04\n 3.74474530e-04 9.64325276e-05 2.09176853e-04 7.26416823e-04\n 4.90985234e-04 -1.03509545e-04 -8.19873372e-05 1.45882112e-03\n -2.66210148e-04 1.08866468e-03 8.20915901e-04 1.34603558e-04\n -2.01556908e-05 4.68452513e-04 9.91400680e-04 1.75302406e-04\n -2.46249416e-04 -7.75809195e-05 1.85995055e-03 1.70770836e-03\n 2.34506179e-04 7.09715039e-04 9.44023930e-04 1.32434857e-03\n 9.31699842e-04 -6.85325345e-05 -2.27012477e-04 6.95330249e-04\n 4.36102623e-04 7.69959868e-04 -1.35580478e-04 1.75955923e-04\n 8.55063834e-04 9.95705227e-04 3.60612960e-04 1.69950292e-04\n 1.51663229e-03 3.43176833e-04 9.30479139e-04 9.30479139e-04\n 1.82416666e-05 -6.18093963e-05 1.68849905e-04 -3.19329726e-04\n -1.56556259e-05 -3.17221866e-04 -2.82828921e-04 2.14677302e-04\n 3.53711303e-04 -2.64468225e-04 1.45142145e-04 -2.17523874e-04\n 5.49034255e-04 9.30479139e-04 2.47223697e-04 -2.22905305e-04\n -3.20345550e-05 -4.12716756e-04 -6.31002559e-05 3.65352211e-04\n 6.66369457e-04 -4.93192901e-05 9.84518954e-05 1.25602856e-04\n -2.59757958e-04 8.03987021e-04 3.14952375e-04 1.49031732e-04\n 3.56998529e-04 5.57699653e-04 -2.84757269e-04 -1.34847038e-04\n -2.32394874e-04 2.87888504e-04 4.57588691e-04 3.48547907e-03\n -1.64153505e-03 1.10959002e-03 4.15006271e-05 1.81040694e-03\n 1.11050555e-03]\n [-2.10427581e-02 -3.52696525e-04 1.55411240e-03 2.16811075e-04\n -3.63573798e-04 1.57738003e-03 -3.09079708e-04 -5.05559142e-03\n -1.38764770e-03 -2.14713813e-03 1.66622564e-03 3.66242711e-04\n 2.44632967e-04 4.82537185e-04 -2.57279726e-05 5.66054987e-04\n -3.47614702e-04 -2.21988801e-04 7.44877152e-04 1.52802463e-04\n 2.55100854e-04 -2.70641576e-04 4.04065164e-04 -1.32468642e-04\n 4.77104345e-04 4.12904595e-04 -4.91490592e-04 2.45393579e-04\n 1.21248501e-03 6.88574557e-04 -7.52900921e-05 8.52531212e-04\n 3.57445418e-05 8.92877136e-06 5.71235804e-04 2.96624544e-04\n 7.17890946e-04 -4.02336770e-05 -5.92255144e-06 -7.96332552e-05\n -1.11276877e-04 5.94859303e-04 -5.54119950e-05 4.09144415e-04\n 3.76528445e-06 -4.81006269e-05 2.00557742e-03 1.76331143e-03\n -3.18126774e-04 1.57433629e-04 5.69867574e-04 -9.92152692e-05\n 2.47189160e-04 -1.88057658e-04 2.95441463e-03 3.41099401e-04\n 4.07544004e-04 1.06156925e-03 9.88230351e-04 1.39269119e-03\n -1.50364284e-04 6.30983727e-04 -3.36251376e-04 7.99580308e-04\n 3.74225373e-04 3.79654692e-04 3.41058663e-04 1.66329826e-03\n 5.12459388e-04 -1.05734439e-04 7.16515739e-04 3.36941366e-04\n 2.94237157e-04 -3.03022997e-04 2.03568691e-04 9.13851573e-04\n -1.62082353e-04 4.18511148e-04 -1.51926873e-04 7.10511595e-05\n 3.67658197e-04 -4.05847189e-04 8.52375367e-04 1.33993928e-03\n 9.61374882e-04 3.24110796e-04 3.39307041e-04 9.18404787e-05\n -2.03910748e-04 6.95661534e-05 2.25612312e-04 -6.84660134e-05\n 1.32215666e-04 9.37562386e-04 -2.73092602e-04 -5.09729106e-05\n 1.22964434e-03 -1.54730188e-04 9.66257694e-04 9.66257694e-04\n 3.33804144e-04 -1.71404670e-04 3.35633867e-04 3.92515065e-04\n -1.78359056e-04 -3.99756392e-04 -9.35470585e-05 -4.41696847e-04\n -1.31097323e-04 -1.89305906e-04 -7.22030340e-04 6.07724587e-04\n -5.96880825e-05 9.66257694e-04 4.03580361e-04 -6.65778589e-04\n -1.52328357e-04 5.62150714e-05 -1.44564856e-04 2.24303527e-04\n -1.17247181e-04 -1.29193927e-04 -6.64854325e-05 -2.54947739e-04\n -9.70195258e-05 -6.73032701e-04 -3.52892649e-04 -3.81873512e-04\n 1.31536814e-04 -1.27382026e-04 -3.57543467e-05 1.70608290e-04\n 6.96405223e-04 -1.44125245e-03 -9.30729502e-05 1.08883144e-02\n -1.10801993e-02 5.25413957e-03 -3.59155506e-04 1.83882211e-04\n 5.28099504e-03]\n [-5.93612831e-02 3.87971371e-05 -3.47985993e-05 -1.05736676e-05\n -4.98820619e-06 -2.72600209e-05 -1.04616755e-05 1.26308947e-05\n -4.67971435e-05 3.11667308e-05 3.36048165e-03 2.02130267e-04\n 3.03101789e-04 1.47989903e-03 1.15870278e-04 3.31264297e-04\n 3.48833448e-04 4.33780301e-04 1.64427165e-04 2.59465749e-04\n 2.28381172e-04 1.65205844e-04 1.64131801e-04 1.13911154e-04\n 2.82582756e-04 2.82442303e-04 8.68054393e-04 1.16754171e-03\n 1.69412642e-03 1.16957805e-04 1.16452803e-04 6.34836413e-04\n 2.84383584e-04 1.65137295e-04 1.10890532e-03 9.47567881e-04\n 1.16170013e-04 1.64562020e-04 1.15796421e-04 1.15511928e-04\n 2.01228780e-04 5.56141808e-04 2.00213533e-04 5.44719575e-04\n 8.44574947e-04 1.15921987e-04 3.34613238e-03 1.34943236e-03\n 2.03966802e-04 2.85061909e-04 2.06929956e-04 1.64912926e-04\n 8.99123322e-05 5.29774194e-05 3.38219828e-03 5.24777245e-05\n 5.60108062e-04 7.14093407e-04 1.73024183e-03 1.94276914e-03\n 3.23929516e-04 3.55665278e-04 1.72654715e-04 1.84133437e-03\n 2.73788758e-04 5.18316281e-05 4.81425671e-05 3.39017602e-03\n 1.04250225e-04 1.70168498e-04 1.70255936e-03 3.58602103e-04\n 2.32721277e-04 3.73713919e-04 3.65679766e-04 1.27560042e-03\n 5.28414723e-05 6.53062589e-04 5.73658087e-04 6.68870625e-04\n 3.90950693e-04 1.04276626e-03 4.92034330e-04 2.56998083e-03\n 1.69854170e-03 5.04211628e-05 7.11397777e-05 8.94330313e-04\n 1.26647353e-04 1.93909730e-04 4.61140608e-04 1.01221045e-04\n 1.47393798e-03 1.73580248e-03 4.14196220e-04 8.85116116e-05\n 1.75820930e-03 1.21591503e-04 1.69856077e-03 1.69856077e-03\n 1.13043787e-04 -3.03265237e-07 1.80336956e-06 7.55647398e-06\n 3.24245598e-05 -9.78281570e-07 4.56077903e-07 1.46945780e-05\n 8.99147936e-06 4.32464213e-06 3.88580933e-05 -8.24681548e-07\n 6.66052679e-06 1.69856077e-03 3.12109498e-07 3.42639276e-06\n 5.98824936e-06 9.38836597e-06 3.62173089e-06 1.63941977e-05\n 5.86538223e-06 2.99064910e-06 2.76263980e-06 1.55965750e-05\n 2.46159110e-05 2.71880640e-06 3.44047673e-06 -9.44804758e-07\n 1.52912523e-06 1.01629401e-05 5.49036738e-08 -9.80954274e-07\n -3.31959894e-07 -7.50827098e-06 2.08527292e-06 1.72471393e-03\n 1.78607931e-03 1.12116134e-03 2.08335610e-03 2.06466381e-03\n 1.13919532e-03]\n [-4.58448437e-02 -5.47977112e-03 -1.64017069e-03 -1.69355504e-04\n 2.24763253e-04 -4.35476616e-03 -6.24466792e-04 8.22159090e-04\n -3.76518011e-03 5.33321556e-03 3.25578522e-03 -7.25228986e-05\n 2.10507141e-04 1.63096211e-03 6.70957936e-04 -7.87288461e-05\n 8.52646522e-04 1.95703908e-05 9.38666278e-05 2.49892970e-04\n 1.75236130e-05 3.78167146e-04 5.86507685e-04 1.22246130e-04\n 8.81433616e-04 3.32932375e-04 1.16254740e-03 1.31242883e-03\n 1.51605062e-03 -3.41216303e-05 7.03881550e-04 4.70354741e-04\n 2.37878313e-05 3.80589158e-05 1.46784994e-03 9.98727907e-04\n -2.04723060e-05 5.72387730e-05 -6.63333485e-05 2.75501102e-06\n -2.55170183e-05 8.31206583e-04 1.02655978e-03 3.40608752e-04\n 5.99033707e-04 4.72579908e-05 3.25922866e-03 1.16668487e-03\n 4.24140245e-04 2.16715682e-04 4.28745843e-05 -4.87416595e-05\n -2.21084476e-04 6.15845531e-04 3.08794026e-03 3.35405132e-05\n 1.08603535e-03 8.11316386e-04 1.70039449e-03 1.83300130e-03\n 5.63584150e-05 5.62697362e-04 4.33699388e-04 1.80290806e-03\n -7.95076773e-05 3.41906057e-05 3.16957442e-05 3.59083963e-03\n -7.87076788e-05 9.14271996e-05 1.40861674e-03 2.30711775e-04\n 2.42847825e-04 7.46183526e-04 2.80125749e-04 9.42819861e-04\n -2.89094083e-05 5.30453146e-04 1.73112834e-04 1.90300174e-04\n 8.28982816e-04 1.87551711e-03 3.09424429e-04 2.63584643e-03\n 1.66987691e-03 3.67660547e-05 4.86304841e-04 8.14359678e-04\n 2.45747000e-04 -2.05297137e-04 5.50371438e-04 4.01556308e-04\n 1.54106831e-03 1.24627695e-03 7.92901953e-05 1.17172276e-04\n 2.07183573e-03 1.43103844e-04 1.66987691e-03 1.66987691e-03\n -7.09939672e-04 5.90548615e-04 -3.54839835e-04 1.49491923e-04\n 6.83911849e-05 1.91146800e-04 -6.17186262e-05 -1.01596957e-06\n -4.83451397e-05 -1.04489021e-04 -5.94348680e-05 4.73721012e-06\n -3.45428120e-04 1.66987691e-03 -3.74458692e-05 -1.20123501e-04\n -9.15796720e-06 3.30006459e-04 -1.43498633e-04 -1.98161259e-04\n -2.42054312e-04 6.14150262e-04 9.67961447e-05 -5.09438970e-04\n -5.69943165e-04 4.67033396e-05 1.30535181e-04 4.53351895e-05\n -1.18935343e-04 -4.09126479e-04 -1.12205467e-04 3.72003310e-04\n -1.19604159e-04 -3.71743362e-04 -9.89389133e-05 -3.97252873e-03\n 8.09201448e-03 -5.52697237e-04 2.80884505e-03 2.16173741e-03\n -5.34386690e-04]\n [-4.64707369e-02 1.11989664e-03 3.08833026e-04 -3.23983734e-04\n 2.29664463e-04 -1.97630819e-03 -4.48524979e-05 1.02058007e-05\n -7.98923317e-03 1.65958946e-04 3.86847368e-03 2.49018635e-04\n 1.92528437e-04 7.83600401e-04 4.97702814e-05 1.18082906e-04\n 1.53687006e-04 5.44927270e-04 4.23950076e-05 1.60386139e-04\n 1.06022233e-04 2.50032810e-04 9.34099745e-05 7.40422932e-05\n 1.49429576e-04 5.07286802e-05 1.29402300e-03 1.70078297e-03\n 1.81669063e-03 1.00775234e-04 1.75323855e-04 2.78686751e-04\n 4.65984425e-04 2.25889156e-04 1.10647576e-03 1.17236220e-03\n 4.16274502e-05 1.26528229e-04 5.66140787e-05 1.14430176e-04\n 1.21208118e-04 4.69808105e-04 1.19802399e-04 4.94181999e-04\n 1.20118420e-03 8.74631245e-05 3.68030508e-03 9.45610274e-04\n 1.46136900e-04 1.87703130e-04 1.63191868e-04 1.17964874e-04\n -6.64989973e-05 1.70270569e-04 3.16896501e-03 -1.15300759e-04\n 2.21052435e-05 5.79312089e-04 1.79859907e-03 2.05329875e-03\n 1.93811108e-04 3.70027922e-04 -4.82439418e-05 1.79100229e-03\n 5.58671270e-04 -9.28668287e-05 -7.34441610e-05 3.98271301e-03\n -4.91014397e-05 -2.42774971e-04 2.08900564e-03 4.68479062e-04\n -6.32228657e-06 2.69705796e-04 1.19341851e-04 1.23559050e-03\n 1.74601215e-04 3.70497099e-04 -1.95749447e-04 -2.69702413e-05\n 7.23929819e-06 1.01751668e-03 -3.47445368e-05 2.77214370e-03\n 1.77113325e-03 -7.28956100e-05 -1.20373521e-04 1.15950824e-03\n -1.24105476e-05 3.42073942e-05 8.98888367e-04 -6.55339518e-05\n 1.89399404e-03 2.31880290e-03 5.07244857e-04 4.26540044e-04\n 1.68709002e-03 -1.44243293e-04 1.77174360e-03 1.77174360e-03\n -6.39596675e-05 -1.27396378e-04 2.16940215e-06 -2.02601275e-04\n 8.80999626e-05 -2.60405773e-04 3.57641939e-05 3.15078660e-04\n -2.50947347e-04 -2.89088954e-05 6.98074731e-05 -2.54211784e-04\n -3.80224997e-04 1.77174360e-03 -9.51583415e-05 9.92573544e-04\n -2.91210896e-04 -2.87822398e-04 3.00439795e-05 -1.45424328e-04\n -2.94370540e-04 1.76079987e-05 -1.96288898e-04 -1.15127765e-04\n 3.59588045e-05 -1.54109206e-04 -1.42051004e-04 -9.39486227e-05\n -1.59254815e-04 -1.98553974e-04 1.11301207e-04 -1.52977787e-04\n -2.07298172e-04 2.85371621e-04 -1.27687941e-04 5.46616879e-03\n -1.96368022e-03 1.84594308e-03 2.12220341e-03 1.73661489e-03\n 1.85937081e-03]\n [-2.19630640e-02 -6.33490972e-03 -2.78831478e-04 6.30210758e-04\n -5.42309187e-04 4.98698731e-03 1.41125977e-03 5.11287218e-03\n 1.36814052e-02 -3.25769250e-03 1.02841510e-03 1.08829353e-04\n 3.31163609e-04 4.09664568e-06 -3.31399495e-04 4.33247912e-04\n 4.12903385e-04 2.76962479e-04 -1.91334341e-04 -3.21300386e-04\n 2.37863937e-04 2.72451987e-04 -3.35168711e-04 -1.73069606e-04\n -2.58046806e-04 -9.35261984e-05 5.91326347e-04 1.20272922e-04\n 1.97071754e-04 -3.14599572e-04 -4.80368318e-04 2.13007303e-04\n 2.96958405e-04 3.27725164e-04 2.40737781e-04 7.89791575e-04\n -2.53092331e-04 6.15503304e-04 4.42991251e-04 4.82352283e-04\n 4.05634031e-04 5.97142665e-04 -4.05028669e-05 3.37966457e-04\n 8.95170101e-05 4.94199453e-04 6.99208360e-04 4.07566777e-04\n -3.01284524e-04 -6.91977526e-04 -4.54257620e-04 4.41430864e-05\n 4.36243672e-04 -3.40613432e-04 5.65779914e-04 1.29056042e-05\n 4.23690023e-04 3.65262438e-05 4.75257063e-04 5.00765501e-04\n 6.36107322e-04 -4.43897162e-04 3.29151220e-04 1.17176741e-03\n -3.83818466e-04 -3.99492808e-05 -3.59841566e-05 7.99834989e-04\n 2.35871702e-04 -2.40213279e-04 7.81116678e-04 6.09913783e-05\n 2.84083969e-04 1.00822797e-04 -3.39526407e-04 9.35906103e-04\n 4.39612766e-04 9.87239307e-04 3.11073379e-04 3.79419952e-04\n -9.79239837e-05 3.66110991e-04 -3.78586617e-04 5.01136549e-04\n 4.55725813e-04 -4.02767870e-05 -2.24892757e-04 3.10714695e-04\n -2.95503493e-05 -2.83940373e-06 3.63298249e-05 -8.56336802e-05\n 6.13735781e-04 3.50510459e-04 7.45444108e-04 -3.53315862e-04\n -7.68059583e-05 2.54008463e-04 4.58167219e-04 4.58167219e-04\n 3.08708462e-04 -2.29700695e-04 -1.53676984e-04 -2.77894276e-05\n 5.04276956e-06 7.87002638e-04 4.01837542e-04 -1.01685257e-04\n 6.76321216e-05 5.82712817e-04 5.27595270e-04 -1.39971409e-04\n 2.29436158e-04 4.58167219e-04 -5.18618640e-04 1.27254228e-05\n 4.78650681e-04 3.04790697e-04 3.17426354e-04 -2.62579100e-04\n -1.87202826e-05 -4.56306884e-04 6.46373286e-05 7.38165600e-04\n 8.65983620e-04 -2.63226989e-05 4.60004298e-05 2.82284849e-04\n -2.11953626e-04 1.67035026e-04 3.21333960e-04 -2.53936636e-04\n -1.36888010e-04 1.24728667e-03 -1.40045753e-04 -9.98608139e-03\n 1.28131237e-02 -3.99090268e-03 2.43377401e-03 1.14232692e-03\n -3.97869565e-03]]\nAccuracy of: 0.5340527679823257\nWall time: 2min 25s\n" ], [ "import numpy as np\nimport matplotlib.pyplot as plt\nfrom textwrap import wrap\npara=['1-ata=.01,iter=1000, type= stochastic, C=.0001,L1L2 ','2-ata=.1,iter=1000, type= stochastic, C=.01,L1L2 ','ata=.1,iter=50, type= steepest, C=10,L2 ','ata=.1,iter=50, type= steepest, C=.0001,L2 ','ata=.1,iter=10,type=BFGS,C=.001,L2','ata=.1,iter=10,type=BFGS,C=10,L2','ata=.01, iter=3, type=newton, C=.0001, L2','ata=.01,iter=10,type=newton,C=.0001,L1' ]\nacc=[stoc1,stoc2,steep,steep1,BFGS1, BFGS2,newton1,newton2]\n\nplt.subplots(figsize=(17, 7))\nx=[0,1,2,3,4,5,6,7]\nz=np.polyfit(x, acc, 1)\nlabels = [ '\\n'.join(wrap(l, 18)) for l in para ]\n\nlabels = [ '\\n'.join(wrap(l, 18)) for l in para ]\nplt.xlabel('Optimization', fontweight='bold')\nplt.ylabel('Accuracy', fontweight='bold')\np = np.poly1d(z)\n\nplt.bar(labels,acc) ", "_____no_output_____" ] ], [ [ "Since the time consumption for newton technique is long (over 2 minutes) and newton technique is not the best optimization technique for logistic regression, we decided not to include it in the following graph.", "_____no_output_____" ] ], [ [ "import numpy as np\nimport matplotlib.pyplot as plt\nfrom textwrap import wrap\n# set width of bar\nbarWidth = 0.45\n\n# set height of bar\nbars1 = [stoc1,stoc2,steep,steep1,BFGS1, BFGS2]\nbars2 = [.013,.0129,.27,.21,.05,.32]\n\n# Set position of bar on X axis\nr1 = np.arange(len(bars1))\nr2 = [x + barWidth for x in r1]\n#r3 = [x + barWidth for x in r2]\nplt.subplots(figsize=(17, 7)) \n# Make the plot\nplt.bar(r1, bars1, color='#7f6d5f', width=barWidth, edgecolor='white', label='accuracy')\nplt.bar(r2, bars2, color='#557f2d', width=barWidth, edgecolor='white', label='time (m)')\n\nplt.plot(x,p(x),color='black')\n# Add xticks on the middle of the group bars\nplt.xlabel('Optimization', fontweight='bold')\n\nplt.xticks([r + barWidth for r in range(len(bars1))], labels)\n \n# Create legend & Show graphic\nplt.legend()\n\nplt.show()", "_____no_output_____" ] ], [ [ "### Observations:\n\n1-the figures above shows the accuracy and time consumption for various optimization techniques, BFGS with 10 iterations has the highest accuracy with a low delay and L2 regulation.\n\n2- stochastic has different accuracy values for different runs with the same parameters which mean the logistic regression got stuck in a local minimum depending on the initial value of the gradient and the direction \n\n3- steepest decent and most of the other optimization techniques returned a better accuracy for a lower C which means stronger regularization prevent from overfitting the data and in return has a better performance \n\n4- BFGS did better than hessian in terms of accuracy and time consumption which means that hessian is highly computational epically in calculating the second order derivative and inverting the hessian matrix\n\n5- the output classification had a count per classifier shown in the figure above in the data understanding section all the low count have been given the same class because there is not enough data to train a classifier \n\n6- the data is preprocessed such that its distribution will have a mean value 0 and standard deviation of 1. Given the distribution of the data, each value in the dataset will have the sample mean value subtracted, and then divided by the standard deviation of the whole dataset.", "_____no_output_____" ], [ "Based on the observations above, the best best logistic regression optimization technique is BFGS. So we will use BFGS to compare with LBFGS from sklearn. ", "_____no_output_____" ] ], [ [ "%%time\nfrom sklearn.linear_model import LogisticRegression\n\nlr_sk = LogisticRegression(solver='lbfgs',n_jobs=2,C=.0001, max_iter=10) \nx_train_ar=X_train.values\ny_target_ar=np.asarray(y_train)\nx_train_ar = StandardScaler().fit(x_train_ar).transform(x_train_ar)\nlr_sk.fit(x_train_ar,y_target_ar)\nprint(np.hstack((lr_sk.intercept_[:,np.newaxis],lr_sk.coef_)))\nyhat = lr_sk.predict(x_train_ar)\nnewtsk=accuracy_score(y_target_ar,yhat)\nprint('Accuracy of: ',accuracy_score(y_target_ar,yhat))", "[[-1.84196744e+00 1.49039888e-01 1.13463874e-02 -4.12187102e-03\n 8.50494284e-03 -1.36103300e-02 -4.89494661e-03 -1.87956451e-02\n -2.24026834e-02 -1.37318867e-02 -1.23513054e-02 -2.62926188e-03\n -5.18760770e-04 2.38940333e-02 -1.55745575e-03 -9.18403089e-04\n -1.17856958e-03 9.07337599e-03 -2.73376765e-03 6.72040617e-03\n 2.04240348e-03 -2.62304251e-03 -2.80724425e-03 7.83106434e-03\n -4.75150469e-03 4.11670744e-03 1.12704571e-02 -3.25379392e-03\n -1.13708593e-02 -1.85296118e-03 -1.39241837e-03 5.82135287e-03\n 4.16825363e-04 -2.36762190e-03 3.68362410e-03 -8.58637115e-03\n -1.86334601e-03 -2.84549026e-03 -1.68656741e-03 -1.98402432e-03\n 3.43241403e-03 -3.75857571e-03 -3.82977017e-03 1.40594044e-03\n 8.17483940e-03 -2.07782338e-03 3.16467373e-03 -4.61575098e-03\n 9.08503036e-03 1.38607691e-02 3.67857558e-03 5.72953227e-03\n -2.73462521e-03 -1.89812110e-03 -1.14834710e-03 -1.75799907e-03\n -8.31136797e-03 -7.92720278e-03 0.00000000e+00 7.89029571e-04\n 3.03008401e-03 -1.79420707e-03 1.47830771e-03 -5.88256179e-03\n 5.92976561e-03 -1.76479674e-03 -1.72974951e-03 -8.39550949e-03\n -3.82599177e-03 1.48910213e-02 -3.52083501e-03 -1.59185909e-03\n -2.16198184e-03 4.72300757e-03 1.26651994e-02 -9.72600147e-03\n -2.43712021e-03 -5.08367392e-03 2.59496201e-02 2.33329702e-02\n 1.74168017e-03 7.69852290e-04 1.22208949e-02 1.81122380e-03\n 0.00000000e+00 -1.64485383e-03 -2.73580064e-03 3.14892441e-03\n 4.82261071e-03 9.27216497e-03 -5.33186578e-03 5.98407309e-03\n -3.39340015e-03 -1.18506527e-03 2.12122259e-03 3.23596921e-03\n 1.25430797e-02 4.80078210e-03 0.00000000e+00 0.00000000e+00\n 6.22141358e-03 -1.53959038e-03 5.76687137e-04 -5.70524206e-03\n 3.45665694e-03 -4.07478117e-03 -2.30899846e-03 2.74262376e-03\n 6.40568933e-03 -2.72926843e-03 4.61927369e-03 -3.05340849e-03\n 1.01875380e-02 0.00000000e+00 3.82743328e-03 -3.06276984e-03\n -9.55630654e-04 -6.55145558e-03 -1.55913725e-03 8.98399922e-03\n 1.07309208e-02 -1.61336241e-03 4.10728093e-03 4.94316660e-03\n -2.84195897e-03 1.22578597e-02 5.74968145e-03 4.77938163e-03\n 5.71324923e-03 1.06181631e-02 -1.84473047e-03 -2.34188277e-03\n -3.32680121e-03 4.06868946e-03 7.88676350e-03 3.97878864e-02\n -4.28071667e-02 8.82847036e-03 -2.08631570e-02 1.59788067e-02\n 8.82847036e-03]\n [-7.84733550e-01 -4.27030053e-02 2.08316273e-02 3.46800391e-03\n -4.49017286e-03 -6.12383842e-03 -6.37660843e-04 -7.69003591e-02\n -4.10993218e-02 -8.86035313e-03 3.57037236e-03 2.09306674e-03\n 2.20372401e-03 -8.37499537e-04 -2.49615030e-03 3.80232676e-03\n -6.92770996e-03 -7.35293717e-03 8.56959070e-03 -9.88471153e-04\n 6.05550138e-04 -3.88783241e-03 4.00850477e-03 -2.65620197e-03\n 2.86842058e-03 2.01426603e-03 -1.75050812e-02 -7.50190574e-03\n 1.65839285e-02 7.53203438e-03 -2.53705959e-03 4.38728710e-03\n -2.41313263e-03 -3.23305635e-03 -5.41666783e-03 -1.91529215e-03\n 7.82100189e-03 -3.34928985e-03 -2.36306361e-03 -2.47028803e-03\n -4.41914954e-03 1.95631926e-04 -3.90223062e-03 -2.97095781e-04\n -7.75590566e-03 -2.35373242e-03 -3.39679793e-03 1.00190394e-02\n -5.59224958e-03 -2.18926819e-03 6.39546899e-03 -3.73123821e-03\n 1.50092641e-03 -2.73599307e-03 1.55038061e-02 5.33585052e-03\n -1.73812154e-03 1.05802583e-02 0.00000000e+00 1.84622237e-03\n -5.17222316e-03 8.27212873e-03 -8.91141589e-03 -4.17367550e-03\n 4.28787220e-03 5.47304242e-03 4.94028288e-03 -5.61216492e-03\n 4.94008714e-03 7.07592317e-04 -5.93079855e-03 2.98219847e-03\n 1.01916187e-03 -3.61604467e-03 1.78610998e-03 1.38152710e-03\n -2.46710665e-03 2.81537801e-03 1.10234018e-04 2.46009086e-03\n 6.00569590e-03 -9.02294046e-03 1.11372408e-02 -2.58559962e-03\n 0.00000000e+00 4.82751537e-03 2.60626320e-03 1.38708120e-03\n -2.84726457e-03 -3.81394330e-04 -1.94333450e-03 -1.75357877e-03\n -1.33126558e-02 -2.79820972e-03 -6.65393909e-03 -3.90428024e-04\n 8.85172909e-04 -1.60924655e-03 0.00000000e+00 0.00000000e+00\n 7.82455931e-03 -2.88575118e-03 4.17193277e-03 5.58975779e-03\n -1.47624969e-03 -7.46429248e-03 -2.34439544e-03 -7.31226468e-03\n -3.34921951e-05 -3.64275526e-03 -8.36462847e-03 9.28553255e-03\n -1.65982171e-03 0.00000000e+00 5.80968295e-03 -7.32144566e-03\n -3.77174590e-03 6.12721831e-04 -2.72676309e-03 5.32940829e-03\n -4.27451681e-04 -2.50288851e-03 1.44031551e-03 -4.73173895e-03\n -1.80460772e-03 -6.61476723e-03 -3.09645430e-03 -4.80112259e-03\n 2.26345129e-03 -3.17682229e-03 -1.27249632e-03 2.43895532e-03\n 8.79144839e-03 -2.08844410e-02 -2.33539427e-03 1.86239300e-01\n -2.34582705e-01 1.01562509e-01 -3.25484032e-02 -2.42537862e-02\n 1.01562509e-01]\n [-6.67088374e+00 2.26173424e-03 -3.96237589e-04 -6.30273502e-04\n -2.48617299e-04 4.43197862e-04 -5.96867946e-04 3.68892777e-04\n -2.18402212e-03 4.80666980e-04 -5.16796165e-03 2.56891359e-04\n 2.01153046e-04 9.55333342e-04 6.06935214e-05 1.86679088e-04\n 4.39405678e-04 3.20423024e-04 1.14801270e-04 2.05925125e-04\n 2.13272128e-04 1.56214701e-04 2.47835117e-04 1.25123601e-04\n 3.89062801e-04 4.30695462e-04 1.65894433e-04 -8.02338681e-05\n -3.28646557e-03 7.77354906e-05 2.12551763e-04 9.11093021e-04\n 2.95621495e-04 3.50494179e-04 9.48502278e-04 5.44843255e-04\n 1.44440754e-04 2.34917363e-04 9.07347351e-05 1.06956164e-04\n 1.10459379e-04 7.42663794e-04 2.42701290e-04 7.07582835e-04\n 6.84631386e-04 1.80027608e-04 4.30770826e-03 1.55588225e-03\n 8.74500910e-05 3.83543924e-04 1.32204713e-04 7.86523079e-05\n -1.32671923e-06 2.18503289e-04 -4.89830182e-04 5.13309630e-06\n -9.82541363e-05 -4.11481430e-04 0.00000000e+00 -8.23603644e-04\n -1.21229535e-04 1.02272110e-03 1.60850403e-04 -1.08148437e-03\n 9.70788597e-05 -6.47712808e-05 -3.17429301e-06 -1.10827946e-03\n 1.17091703e-04 -3.46938054e-05 -6.41127933e-04 9.08944692e-04\n 2.11576374e-04 8.81582037e-04 7.03950508e-06 8.77733799e-04\n 1.99880529e-04 1.54535950e-03 -1.48603563e-04 -2.60395763e-04\n 1.02223994e-03 2.63658850e-03 -3.28265758e-04 8.95386239e-04\n 0.00000000e+00 4.25969627e-05 6.91750953e-05 2.28299350e-03\n 4.00577513e-04 5.19794139e-04 -2.19037725e-04 1.17234259e-04\n -6.04762491e-04 5.64251246e-04 1.13208342e-03 -7.85472575e-06\n -2.43461615e-04 -2.53906368e-05 0.00000000e+00 0.00000000e+00\n 2.76678553e-03 -1.56101155e-05 7.31746945e-05 -1.43238415e-04\n -3.45426670e-04 1.73765648e-05 2.43769111e-04 -1.63621602e-04\n 1.80553762e-04 -1.61917464e-05 -5.32305314e-04 -2.37759135e-05\n -2.76827417e-05 0.00000000e+00 -5.84650953e-06 -9.95069130e-05\n 7.60930451e-05 -1.61556315e-04 -4.50758347e-05 -3.17126761e-05\n -1.00531237e-04 1.49706428e-04 1.58313873e-04 -2.39761683e-04\n -3.64917758e-04 -6.84508208e-05 3.84330067e-04 -3.70182062e-05\n -1.89200179e-05 -8.58204727e-05 -9.05339476e-05 1.80847231e-04\n -8.73259759e-05 -1.34083262e-03 9.13491505e-06 -1.40775825e-03\n -1.08537178e-03 4.26563040e-03 -6.42081450e-04 -1.74575449e-03\n 4.26563040e-03]\n [-2.08306037e+00 -5.18508712e-02 -2.76028135e-02 -4.52982278e-03\n 2.76676622e-03 1.19511069e-02 -1.44646504e-02 1.95279259e-02\n -3.76075474e-02 1.84065223e-02 -4.47805291e-03 -2.99146655e-03\n -3.46932328e-03 -2.24375067e-03 9.00948388e-03 -4.61332577e-03\n 7.95312283e-03 -5.94751673e-03 -1.73728438e-03 1.59703707e-03\n -3.23920834e-03 5.04043872e-03 6.04400960e-03 -1.34644879e-03\n 9.97121218e-03 5.30009678e-04 -1.36200371e-04 5.89863537e-03\n -6.86642335e-03 -1.62365029e-03 8.96520295e-03 -3.70701428e-03\n -3.82397778e-03 -2.25031449e-03 5.35603372e-03 2.78723527e-04\n -1.66352202e-03 -2.23149326e-03 -1.84448976e-03 -1.66085476e-03\n -2.86005216e-03 2.76166980e-03 1.06520047e-02 -1.94417389e-03\n -1.31056354e-03 -1.56370354e-03 5.42458360e-03 -2.14353941e-04\n 4.23579229e-03 8.34674459e-04 -2.51587661e-03 -2.32724459e-03\n -3.00646491e-03 8.95177761e-03 -3.67903046e-03 -1.11163641e-03\n 1.13768772e-02 1.07664432e-03 0.00000000e+00 -2.73004249e-03\n -4.81188276e-03 2.39452290e-03 5.54894619e-03 1.26079920e-03\n -7.43651413e-03 -1.11159582e-03 -1.03167601e-03 7.62125881e-03\n -2.93842311e-03 -3.78546120e-03 -1.90528444e-03 -1.62171146e-03\n 8.21228298e-04 4.95507610e-03 -3.96593388e-03 -1.91821743e-03\n -1.58206677e-03 -2.96577198e-03 -1.27336299e-02 -1.36965529e-02\n 4.15196170e-03 1.34665307e-02 -6.76119633e-03 4.75813776e-03\n 0.00000000e+00 -1.00615148e-03 6.04990706e-03 -3.35524329e-03\n 1.45681929e-03 -5.84685132e-03 2.52747550e-03 3.37890982e-03\n 5.47642428e-03 -4.37677453e-03 -4.55053025e-03 -1.96867589e-03\n -5.82941591e-03 -2.74861940e-03 0.00000000e+00 0.00000000e+00\n -1.06388152e-02 9.14965388e-03 -4.18247423e-03 2.51032048e-03\n 2.37581018e-03 2.43376962e-03 -1.52276914e-03 2.04458071e-03\n -3.74261206e-04 -2.07688669e-03 7.08020315e-04 -1.77557803e-03\n -5.72335083e-03 0.00000000e+00 -2.19215019e-03 -1.99809243e-03\n 2.46339686e-03 6.94835502e-03 -1.58669166e-03 -3.66958268e-03\n -4.66606291e-03 9.14609069e-03 -1.44223939e-03 -7.98465361e-03\n -6.52370993e-03 -1.34065406e-03 2.29262842e-03 -1.69050985e-03\n -1.97611363e-03 -5.72852681e-03 -2.51970995e-03 6.06673409e-03\n -2.72513737e-03 -9.51901723e-04 -1.52496751e-03 -1.25057481e-01\n 1.48452463e-01 -5.22328031e-02 1.82599388e-02 1.09484915e-02\n -5.22328031e-02]\n [-2.13366657e+00 5.12100786e-03 -2.32586337e-03 -4.21834497e-03\n 5.14194351e-03 -2.72986647e-02 1.07377662e-03 -1.11486959e-02\n -1.38260225e-01 -2.63557979e-02 1.47434050e-02 -1.68524916e-03\n -3.31109515e-03 -1.55193471e-02 -1.26088253e-03 -3.64412537e-03\n -3.94575577e-03 -8.13534097e-04 -1.97438089e-03 -2.59990717e-03\n -2.37242415e-03 -1.37609576e-03 -1.61840965e-03 -1.07289386e-03\n -2.82394865e-03 -2.95053928e-03 4.05465304e-03 6.66355260e-03\n 2.53224417e-03 -1.09805846e-03 -8.84573552e-04 -6.75653933e-03\n 2.31802767e-03 -1.30212169e-03 -3.30286874e-03 1.80274314e-03\n -1.17295076e-03 -1.51278684e-03 -1.17713240e-03 -1.03464691e-03\n -2.03602189e-03 -5.13609716e-03 -1.97347128e-03 -7.33149758e-04\n 3.36445772e-03 -1.07204297e-03 -3.38302578e-03 -9.33935401e-03\n -2.13632276e-03 -2.83005378e-03 -2.04289540e-03 -1.54967113e-03\n -2.30807403e-03 -8.47490221e-04 -3.56601864e-03 -1.66683278e-03\n -1.15832513e-02 -2.33696061e-03 0.00000000e+00 2.44679201e-03\n -2.56275792e-03 -2.61989365e-03 -4.10163803e-03 -2.47595229e-03\n 4.22079803e-03 -1.67527649e-03 -1.56434813e-03 9.56058010e-03\n -2.58572343e-03 -5.21480881e-03 4.05348794e-03 3.83119083e-04\n -5.30804250e-03 -3.74953489e-03 -4.50068523e-03 -2.07453043e-03\n -8.36853811e-04 -6.78934484e-03 -1.00140733e-02 -8.88677431e-03\n -6.57105728e-03 -3.77515041e-03 -5.20431723e-03 2.17979341e-03\n 0.00000000e+00 -1.58513960e-03 -1.91002916e-03 2.51215619e-03\n -2.91056109e-03 -4.47638895e-03 6.39997796e-03 -2.73058476e-03\n 3.18955738e-03 8.56817752e-03 8.44662267e-04 4.78674534e-03\n -6.91749272e-03 -3.71812777e-03 0.00000000e+00 0.00000000e+00\n -5.12333996e-03 -1.43483607e-03 -2.87995386e-03 -4.58621036e-03\n -4.73320141e-04 -5.44026321e-03 -8.93991639e-04 3.48763543e-03\n -4.05335663e-03 -1.55934006e-03 -1.30688661e-03 -2.88648902e-03\n -5.56006271e-03 0.00000000e+00 -2.23103519e-03 1.56345439e-02\n -6.14857786e-03 -5.10035532e-03 -1.00086714e-03 -3.54554222e-03\n -4.82037060e-03 -1.00327798e-03 -2.12479978e-03 -5.21218158e-03\n -2.14128247e-03 -2.18169239e-03 -2.73454634e-03 -1.85796189e-03\n -1.97494090e-03 -4.75148058e-03 5.43036655e-04 -2.04047664e-03\n -2.89591504e-03 2.08003485e-03 -1.37323373e-03 9.97209801e-02\n -1.08582889e-01 2.44065290e-02 3.14278105e-03 -1.68253230e-02\n 2.44065290e-02]\n [-8.43965263e-01 -4.67726338e-02 -4.83972824e-03 7.79682334e-03\n -8.97666592e-03 2.81470238e-02 1.66476931e-02 8.41326584e-02\n 2.12827643e-01 2.33469327e-02 -9.62244279e-04 3.29672032e-03\n 3.39002533e-03 -6.69641460e-03 -3.90071946e-03 4.37953522e-03\n 4.37580368e-03 4.31134549e-03 -3.63500473e-03 -3.85591508e-03\n 2.51347766e-03 2.22380237e-03 -5.26938981e-03 -3.47819978e-03\n -4.83080708e-03 -4.47209326e-03 5.86073374e-03 1.00710754e-03\n -4.35566425e-03 -3.72163405e-03 -4.77104528e-03 -3.58254533e-04\n 3.32535237e-03 6.73449963e-03 1.03161793e-03 7.95620414e-03\n -3.76976293e-03 8.13144251e-03 5.92060454e-03 5.98016708e-03\n 5.13165050e-03 4.92123936e-03 -1.07503090e-03 1.80635737e-03\n -7.64265298e-04 5.81768695e-03 -5.95590790e-04 2.22818719e-03\n -5.13905074e-03 -8.99495410e-03 -6.39664078e-03 1.43740551e-03\n 5.44541661e-03 -3.77198298e-03 -8.50515804e-03 -2.13292997e-03\n 8.37038694e-03 -3.57343102e-03 0.00000000e+00 -1.41901420e-03\n 8.34581498e-03 -6.91276889e-03 5.38887333e-03 1.04793287e-02\n -7.31585807e-03 -2.21147087e-03 -1.92823793e-03 -8.24706503e-04\n 2.97696359e-03 -8.14770669e-03 8.14242831e-03 -6.97220975e-05\n 3.81652965e-03 -1.86398251e-03 -6.90302899e-03 1.03166164e-02\n 6.04103835e-03 9.95037294e-03 -6.31548450e-03 -5.96405190e-03\n -5.94552364e-03 4.34681845e-04 -1.29013539e-02 -4.12960254e-03\n 0.00000000e+00 -1.94462680e-03 -3.98154140e-03 -2.86198277e-03\n -3.87512890e-04 3.86691136e-04 -8.14494619e-04 -5.19185214e-03\n 9.21705313e-03 6.61604836e-04 8.19979921e-03 -5.48813944e-03\n -2.17981467e-03 2.00851418e-03 0.00000000e+00 0.00000000e+00\n -1.43010347e-03 -3.27423351e-03 7.57279731e-04 3.27847124e-04\n -3.34167919e-03 1.22187846e-02 5.68726283e-03 3.22415545e-04\n -1.83001044e-03 8.10079969e-03 4.55313299e-03 -3.87677377e-03\n 1.81455465e-03 0.00000000e+00 -5.95789096e-03 -3.64092839e-03\n 7.50861816e-03 3.33306288e-03 5.42864611e-03 -7.60801621e-03\n -1.62891361e-03 -4.40356457e-03 -3.13335201e-03 1.10053425e-02\n 1.13264752e-02 -2.95070741e-03 -1.93215535e-03 2.84256565e-03\n -4.13562702e-03 2.23110853e-03 4.29635318e-03 -4.34880884e-03\n -1.35511000e-03 1.75828595e-02 -3.19278204e-03 -2.02084763e-01\n 2.51773952e-01 -1.05330861e-01 3.80709950e-02 2.08260260e-02\n -1.05330861e-01]]\nAccuracy of: 0.533967795385988\nWall time: 13.9 s\n" ], [ "para=['SK learn, iter=10,type=lbfgs,C=.0001, L2','ata=.1,iter=10,type=BFGS,C=.0001,L2' ]\nx=[0,1]\nacc=[ newtsk,BFGS1]\ntime=[1.5, 1.6]\nplt.subplots(figsize=(10, 7))\nz=np.polyfit(x, acc, 1)\nlabels = [ '\\n'.join(wrap(l, 18)) for l in para ]\n\n\nplt.bar(labels,acc)\n#ax.bar(labels,time,width=0.2,color='g')\nplt.xlabel('Optimization', fontweight='bold')\nplt.ylabel('Accuracy', fontweight='bold')\np = np.poly1d(z)\nplt.plot(x,1.01*p(x),color='black')\n", "_____no_output_____" ] ], [ [ "BFGS and steepest decend optimization Techniques resulted in a better accuracy than SK learning BFGS for the same itration, as shown from the blake trend line.\n\nIn the next plot, time will be introduced.\n", "_____no_output_____" ] ], [ [ "import numpy as np\nimport matplotlib.pyplot as plt\n \n# set width of bar\nbarWidth = 0.25\n \n# set height of bar\nbars1 = [newtsk, BFGS1]\nbars2 = [.23,.05]\n#bars3 = [29, 3, 24, 25, 17]\n \n# Set position of bar on X axis\nr1 = np.arange(len(bars1))\nr2 = [x + barWidth for x in r1]\n#r3 = [x + barWidth for x in r2]\nplt.subplots(figsize=(10, 7)) \n# Make the plot\nplt.bar(r1, bars1, color='#7f6d5f', width=barWidth, edgecolor='white', label='accuracy')\nplt.bar(r2, bars2, color='#557f2d', width=barWidth, edgecolor='white', label='time (m)')\n\nplt.plot(x,p(x),color='black')\n# Add xticks on the middle of the group bars\nplt.xlabel('Optimization', fontweight='bold')\n\nplt.xticks([r + barWidth for r in range(len(bars1))], labels)\n \n# Create legend & Show graphic\nplt.legend()\n\nplt.show()\n", "_____no_output_____" ] ], [ [ "### **3. Deployment**\n------", "_____no_output_____" ], [ "\nAmong all the techniques we tested above, BFGS is the best optimization technique for logistic regression. Comparing BFGS to LBFGS from the sklearn, we see that BFGS has higher accuracy with lower time consumption (2.99s vs. 13.9s)\n\nIn our opinion,the best method to use is BFGS method since it produces the most accuracy and low time consumption.\n", "_____no_output_____" ], [ "Type Markdown and LaTeX: 𝛼2", "_____no_output_____" ], [ "### **4. Optimization Using Mean Squared Error**\n------", "_____no_output_____" ] ], [ [ "%%time\n# from last time, our logistic regression algorithm is given by (including everything we previously had):\nclass BinaryLogisticRegressionForMSE:\n def __init__(self, eta, iterations=1, C=0.001):\n self.eta = eta\n self.iters = iterations\n self.C = C\n # internally we will store the weights as self.w_ to keep with sklearn conventions\n\n def __str__(self):\n if(hasattr(self,'w_')):\n return 'Binary Logistic Regression Object with coefficients:\\n'+ str(self.w_) # is we have trained the object\n else:\n return 'Untrained Binary Logistic Regression Object'\n\n # convenience, private:\n @staticmethod\n def _add_bias(X):\n return np.hstack((np.ones((X.shape[0],1)),X)) # add bias term\n\n @staticmethod\n def _sigmoid(theta):\n # increase stability, redefine sigmoid operation\n return expit(theta) #1/(1+np.exp(-theta))\n\n # vectorized gradient calculation with regularization using L2 Norm\n def _get_gradient(self,X,y):\n ydiff = y-self.predict_proba(X,add_bias=False).ravel() # get y difference\n gradient = np.mean(X * ydiff[:,np.newaxis], axis=0) # make ydiff a column vector and multiply through\n\n gradient = gradient.reshape(self.w_.shape)\n gradient[1:] += -2 * self.w_[1:] * self.C\n\n return gradient\n\n # public:\n def predict_proba(self,X,add_bias=True):\n # add bias term if requested\n Xb = self._add_bias(X) if add_bias else X\n return self._sigmoid(Xb @ self.w_) # return the probability y=1\n\n def predict(self,X):\n return (self.predict_proba(X)>0.5) #return the actual prediction\n\n\n def fit(self, X, y):\n Xb = self._add_bias(X) # add bias term\n num_samples, num_features = Xb.shape\n\n self.w_ = np.zeros((num_features,1)) # init weight vector to zeros\n\n # for as many as the max iterations\n for _ in range(self.iters):\n gradient = self._get_gradient(Xb,y)\n self.w_ += gradient*self.eta # multiply by learning rate\n\nblr = BinaryLogisticRegressionForMSE(eta=0.1,iterations=500,C=0.001)\n\nx_train_ar=X_train.values\ny_target_ar=np.asarray(y_train)\n\nblr.fit(x_train_ar,y_target_ar)\nprint(blr)\n\nyhat = blr.predict(x_train_ar)\nprint('Accuracy of: ',accuracy_score(y_target_ar,yhat))\n\nclass LineSearchLogisticRegressionWithMSE(BinaryLogisticRegressionForMSE):\n\n # define custom line search for problem\n\n @staticmethod\n def objective_function(eta,X,y,w,grad,C=0.001):\n wnew = w - grad * eta # subtract grad*eta.. from class # 02.21.19 - 10.m4v timestamp: 23:00\n yhat = (1/(1+np.exp(-X @ wnew))) >0.5\n return np.mean((y-yhat)**2) + C*np.mean(wnew**2) # add regularization term, don't subtract.. from class\n # 02.21.19 - 10.m4v timestamp: 17:40\n\n\n def fit(self, X, y):\n Xb = self._add_bias(X) # add bias term\n num_samples, num_features = Xb.shape\n\n self.w_ = np.zeros((num_features,1)) # init weight vector to zeros\n\n # for as many as the max iterations\n for _ in range(self.iters):\n gradient = -self._get_gradient(Xb,y)\n # minimization inopposite direction\n\n # do line search in gradient direction, using scipy function\n opts = {'maxiter':self.iters} # unclear exactly what this should be\n res = minimize_scalar(self.objective_function, # objective function to optimize\n bounds=(self.eta/1000,self.eta*10), #bounds to optimize\n args=(Xb,y,self.w_,gradient,0.001), # additional argument for objective function\n method='bounded', # bounded optimization for speed\n options=opts) # set max iterations\n\n eta = res.x # get optimal learning rate\n\nself.w_ -= gradient*eta # set new function values\n # subtract to minimize", "_____no_output_____" ] ], [ [ "### **5. References**\n------", "_____no_output_____" ], [ "1. Austin Animal Center Shelter Outcomes (Kaggle) https://www.kaggle.com/aaronschlegel/austin-animal-center-shelter-outcomes-and/version/1#aac_shelter_outcomes.csv\n2. Austin Animal Center. (n.d.). Retrieved March 10, 2019, from http://www.austintexas.gov/department/aac\n3. Hawes, Sloane; Ikizler, Devrim; Loughney, Katy; Tedeschi, Philip; and Morris, Kevin, \"Legislating Components of a Humane City: The Economic Impacts of the Austin, Texas \"No Kill\" Resolution (City of Austin Resolution 20091105-040)\" (2017). Animal Law and Legislation. 1. \nhttps://animalstudiesrepository.org/anilleg/1", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ] ]
cb38af48901e498bb6f0730d54941d67fd56aa95
19,754
ipynb
Jupyter Notebook
Model/RPSProject copy.ipynb
Millie-Jackson/AiCore
5f3dfd882295f39218de4f454a8e6346f172976a
[ "MIT" ]
null
null
null
Model/RPSProject copy.ipynb
Millie-Jackson/AiCore
5f3dfd882295f39218de4f454a8e6346f172976a
[ "MIT" ]
null
null
null
Model/RPSProject copy.ipynb
Millie-Jackson/AiCore
5f3dfd882295f39218de4f454a8e6346f172976a
[ "MIT" ]
null
null
null
26.766938
1,931
0.465627
[ [ [ "import time\nimport cv2\nimport numpy as np\nfrom random import *\nfrom keras.models import load_model\nmodel = load_model('keras_model.h5')\ncap = cv2.VideoCapture(0)\ndata = np.ndarray(shape=(1, 224, 224, 3), dtype=np.float32)\n\nscore = [a for a in range(2)] #2 scores in one variable\nscore[1] = 0\nplayerScore = score[0]\npcScore = score[1]\nplayerGuess = \"Blank\"\nplaying = True\n\ndef countdown(seconds):\n while seconds:\n print(seconds)\n time.sleep(1)\n seconds -= 1\n\ndef randomGuess():\n number = random()\n if number < 0.3:\n pcGuess = \"Rock\"\n elif number > 0.6:\n pcGuess = \"Paper\"\n else: pcGuess = \"Scissors\" \n\ndef winCondition():\n if playerGuess== 'Rock' and pcGuess== 'Scissors' or playerGuess== 'Scissors' and pcGuess== 'Paper' or playerGuess== 'Paper' and pcGuess== 'Rock':\n print(\"You Win!\")\n score[playerScore] +1\n score[pcScore] -1\n else: \n print(\"You Lose!\")\n score[playerScore] -1\n score[pcScore] +1 \n\ndef game():\n name = input(\"Whats your name? \")\n print(name, \"vs computer\")\n time.sleep(1)\n print(\"Ready? Ok\")\n time.sleep(1)\n countdown(3)\n\n number = random()\n if number < 0.3:\n pcGuess = \"Rock\"\n elif number > 0.6:\n pcGuess = \"Paper\"\n else: pcGuess = \"Scissors\"\n\n ret, frame = cap.read()\n resized_frame = cv2.resize(frame, (224, 224), interpolation = cv2.INTER_AREA)\n image_np = np.array(resized_frame)\n normalized_image = (image_np.astype(np.float32) / 127.0) - 1 # Normalize the image\n data[0] = normalized_image\n prediction = model.predict(data)\n cv2.imshow('frame', frame)\n\n while True: \n ret, frame = cap.read()\n resized_frame = cv2.resize(frame, (224, 224), interpolation = cv2.INTER_AREA)\n image_np = np.array(resized_frame)\n normalized_image = (image_np.astype(np.float32) / 127.0) - 1 # Normalize the image\n data[0] = normalized_image\n prediction = model.predict(data)\n cv2.imshow('frame', frame)\n\n if prediction[0][0] > 0.5:\n playerGuess = \"Rock\"\n elif prediction[0][1] > 0.5:\n playerGuess = \"Paper\"\n elif prediction[0][2] > 0.5:\n playerGuess = \"Scissors\"\n else: \n prediction[0][3] > 0.5\n playerGuess = \"Waiting...\"\n\n print(\"I played \", pcGuess)\n time.sleep(1)\n print(\"You played \" + playerGuess)\n time.sleep(1)\n winCondition()\n time.sleep(2)\n print(name, \"Score: \", score[playerScore])\n print(\"Computer Score: \", score[pcScore])\n\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break \n\n # After the loop release the cap object\n cap.release()\n # Destroy all the windows\n cv2.destroyAllWindows()\n\ngame()", "WARNING:tensorflow:No training configuration found in the save file, so the model was *not* compiled. Compile it manually.\nMillie vs computer\nReady? Ok\n3\n2\n1\nI played Rock\nYou played Scissors\nYou Lose!\nMillie Score: 1\nComputer Score: 1\nI played Rock\nYou played Rock\nYou Lose!\nMillie Score: 1\nComputer Score: 1\nI played Rock\nYou played Rock\nYou Lose!\nMillie Score: 1\nComputer Score: 1\nI played Rock\nYou played Rock\nYou Lose!\nMillie Score: 1\nComputer Score: 1\n" ], [ "#GAME FUNCTION\ndef game():\n\n name = input(\"Hi, want to play? Whats your name? \")\n print(name, \"vs Computer\")\n print(\"Ready? Ok\")\n print(\"GO!\")\n\ngame()\nprint(\"Game Off\") \n", "Millie vs Computer\nReady? Ok\nRock\nPaper\nScissors\nGO!\nGame Off\n" ], [ "#COUNTDOWN FUNCTION\nimport time\n\ndef countdown(seconds):\n while seconds:\n print(seconds)\n time.sleep(1)\n seconds -= 1\n\ncountdown(3)\nprint(\"GO!\")\n", "3\n2\n1\nGO!\n" ], [ "# USER INPUT + PREDICTION\nimport cv2\nfrom keras.models import load_model\nimport numpy as np\nmodel = load_model('keras_model.h5')\ncap = cv2.VideoCapture(0)\ndata = np.ndarray(shape=(1, 224, 224, 3), dtype=np.float32)\n\nwhile True: \n ret, frame = cap.read()\n resized_frame = cv2.resize(frame, (224, 224), interpolation = cv2.INTER_AREA)\n image_np = np.array(resized_frame)\n normalized_image = (image_np.astype(np.float32) / 127.0) - 1 # Normalize the image\n data[0] = normalized_image\n prediction = model.predict(data)\n cv2.imshow('frame', frame)\n\n #playerGuess = input(cv2.VideoCapture(0))\n if prediction[0][0] > 0.5:\n playerGuess = \"Rock\"\n elif prediction[0][1] > 0.5:\n playerGuess = \"Paper\"\n elif prediction[0][2] > 0.5:\n playerGuess = \"Scissors\"\n else: \n prediction[0][3] > 0.5\n playerGuess = \"Waiting...\"\n print(playerGuess)\n\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n# After the loop release the cap object\ncap.release()\n# Destroy all the windows\ncv2.destroyAllWindows()", "WARNING:tensorflow:No training configuration found in the save file, so the model was *not* compiled. Compile it manually.\nWaiting...\nRock\nRock\nRock\nPaper\nPaper\nRock\nPaper\nRock\nRock\nRock\nRock\nWaiting...\nRock\nWaiting...\nRock\nRock\nPaper\nPaper\nPaper\nRock\nRock\nRock\nRock\nRock\nRock\nRock\nRock\nRock\nPaper\nRock\nWaiting...\nRock\nRock\nRock\nPaper\nPaper\nPaper\nPaper\nRock\nRock\nRock\nPaper\nRock\nPaper\nPaper\nPaper\nRock\nRock\nRock\nRock\nPaper\nPaper\nPaper\nPaper\nPaper\nPaper\nPaper\nPaper\nPaper\nPaper\nPaper\nPaper\nPaper\nPaper\nRock\nRock\nRock\nRock\nRock\nRock\nRock\nRock\nScissors\nScissors\nRock\nRock\nRock\nRock\nRock\nRock\nPaper\nWaiting...\nWaiting...\nPaper\nRock\nRock\nRock\nRock\nRock\nRock\nRock\nRock\nRock\nRock\nRock\nPaper\nPaper\nPaper\nPaper\nPaper\nPaper\nPaper\nPaper\nPaper\nPaper\nPaper\nPaper\nPaper\nPaper\nPaper\nPaper\nPaper\nRock\nRock\nRock\nRock\nRock\nRock\nPaper\nRock\nRock\nRock\nRock\nRock\nPaper\nRock\nRock\nRock\nRock\nRock\nRock\nRock\nPaper\nPaper\nRock\nRock\nRock\nRock\nRock\nRock\nRock\nRock\nRock\nRock\nRock\nRock\nRock\nRock\nRock\nRock\nRock\nRock\nRock\nRock\nRock\nRock\nRock\nRock\nRock\nRock\nRock\nRock\nRock\nRock\nRock\nRock\nRock\nRock\nRock\nRock\nRock\nRock\nRock\nRock\nRock\nRock\nRock\nRock\nRock\nRock\nRock\nRock\nRock\nRock\nRock\nPaper\nPaper\nRock\nRock\nRock\nRock\nRock\nRock\nRock\nRock\nRock\nRock\nRock\nRock\nRock\nRock\nRock\nRock\nRock\nRock\nRock\nRock\nRock\nRock\nRock\nRock\nRock\nRock\nRock\nRock\nRock\nRock\nRock\nRock\nRock\nRock\nRock\nRock\nRock\nRock\nRock\nRock\nRock\nRock\nRock\nRock\nRock\nRock\nRock\nRock\nRock\nRock\nRock\nRock\nRock\nRock\nRock\nRock\nRock\nRock\nRock\nRock\nRock\nRock\nRock\nRock\nRock\nRock\nRock\nRock\nRock\nRock\nRock\nRock\nRock\nRock\nRock\nRock\nRock\n" ], [ "#COMPUTER GUESS FUNCTION\nfrom random import *\n\n\ndef randomGuess():\n number = random()\n if number < 0.3:\n pcGuess = \"Rock\"\n elif number > 0.6:\n pcGuess = \"Paper\"\n else: pcGuess = \"Scissors\"\n print(\"I played \", pcGuess)\n\nrandomGuess()", "I played Scissors\n" ], [ "# GAME LOGIC FUNCTION\npcGuess = 'Paper'\nplayerGuess = 'Rock'\nscore = [0, 0]\n\ndef winCondition():\n if playerGuess== 'Rock' and pcGuess== 'Scissors' or playerGuess== 'Scissors' and pcGuess== 'Paper' or playerGuess== 'Paper' and pcGuess== 'Rock':\n print(\"You Win!\")\n score[0] =+ 1\n score[1] =- 1\n else: \n print(\"You Lose!\")\n score[0] =- 1\n score[1] =+ 1\n \nwinCondition()\nprint(score[0], '/', score[1])", "You Lose!\n-1 / 1\n" ], [ "# WEBCAM FUNCTION\nimport cv2\nfrom keras.models import load_model\nimport numpy as np\nmodel = load_model('keras_model.h5')\ncap = cv2.VideoCapture(0)\ndata = np.ndarray(shape=(1, 224, 224, 3), dtype=np.float32)\n\ndef webcam():\n while True: \n ret, frame = cap.read()\n resized_frame = cv2.resize(frame, (224, 224), interpolation = cv2.INTER_AREA)\n image_np = np.array(resized_frame)\n normalized_image = (image_np.astype(np.float32) / 127.0) - 1 # Normalize the image\n data[0] = normalized_image\n prediction = model.predict(data)\n cv2.imshow('frame', frame)\n \n # Press q to close the window\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n \n # After the loop release the cap object\n cap.release()\n # Destroy all the windows\n cv2.destroyAllWindows()\n\nwebcam()", "WARNING:tensorflow:No training configuration found in the save file, so the model was *not* compiled. Compile it manually.\n" ], [ "# CAMERA ISSUES\n\n\npcGuess = 'Paper'\nplayerGuess = 'Rock'\nscore = [0, 0]\n\ndef randomGuess():\n number = random()\n if number < 0.3:\n pcGuess = \"Rock\"\n elif number > 0.6:\n pcGuess = \"Paper\"\n else: pcGuess = \"Scissors\"\n print(\"I played \", pcGuess)\n\ndef winCondition():\n if playerGuess== 'Rock' and pcGuess== 'Scissors' or playerGuess== 'Scissors' and pcGuess== 'Paper' or playerGuess== 'Paper' and pcGuess== 'Rock':\n print(\"You Win!\")\n score[0] =+ 1\n score[1] =- 1\n else: \n print(\"You Lose!\")\n score[0] =- 1\n score[1] =+ 1\n\ndef userPrediction():\n\n if prediction[0][0] > 0.5:\n playerGuess = \"Rock\"\n elif prediction[0][1] > 0.5:\n playerGuess = \"Paper\"\n elif prediction[0][2] > 0.5:\n playerGuess = \"Scissors\"\n else: \n prediction[0][3] > 0.5\n playerGuess = \"Waiting...\"\n \n print (\"You played \", playerGuess) \n # After the loop release the cap object\n cap.release()\n # Destroy all the windows\n cv2.destroyAllWindows()\n\n \nwebcam()\nuserPrediction()\nrandomGuess()\nwinCondition()\n#print(score[0], '/', score[1])\n\n \nprint(\"end\")", "WARNING:tensorflow:No training configuration found in the save file, so the model was *not* compiled. Compile it manually.\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb38b3d9183c09cca7f359893dbd9b646b25575c
77,140
ipynb
Jupyter Notebook
notebooks/1. Extract Vega Lite.ipynb
gnestor/jupyterlab-omnisci
34690c9c52a7cf34030e05d71c7536bef53a6c5c
[ "Apache-2.0" ]
null
null
null
notebooks/1. Extract Vega Lite.ipynb
gnestor/jupyterlab-omnisci
34690c9c52a7cf34030e05d71c7536bef53a6c5c
[ "Apache-2.0" ]
null
null
null
notebooks/1. Extract Vega Lite.ipynb
gnestor/jupyterlab-omnisci
34690c9c52a7cf34030e05d71c7536bef53a6c5c
[ "Apache-2.0" ]
null
null
null
194.307305
40,558
0.896422
[ [ [ "We have packaged `extractTransform` from Vega Lite up as a mime renderer:", "_____no_output_____" ] ], [ [ "from IPython.display import display\nfrom jupyterlab_omnisci.altair import VegaLite, extract_spec, EMPTY_SPEC", "_____no_output_____" ] ], [ [ "Let's use this example vega lite, which we put in a class so that it is rendered:", "_____no_output_____" ] ], [ [ "SPEC = VegaLite({\n \"data\": {\n \"url\": \"https://vega.github.io/vega-lite/data/seattle-weather.csv\"\n },\n \"layer\": [\n {\n \"mark\": \"bar\",\n \"encoding\": {\n \"x\": {\n \"timeUnit\": \"month\",\n \"field\": \"date\",\n \"type\": \"ordinal\"\n },\n \"y\": {\n \"aggregate\": \"mean\",\n \"field\": \"precipitation\",\n \"type\": \"quantitative\",\n \"axis\": {\n \"grid\": False\n }\n }\n }\n },\n {\n \"mark\": \"line\",\n \"encoding\": {\n \"x\": {\n \"timeUnit\": \"month\",\n \"field\": \"date\",\n \"type\": \"ordinal\"\n },\n \"y\": {\n \"aggregate\": \"mean\",\n \"field\": \"temp_max\",\n \"type\": \"quantitative\",\n \"axis\": {\n \"grid\": False\n },\n \"scale\": {\n \"zero\": False\n }\n },\n \"color\": {\n \"value\": \"firebrick\"\n }\n }\n }\n ],\n \"resolve\": {\n \"scale\": {\n \"y\": \"independent\"\n }\n }\n})\nSPEC", "_____no_output_____" ] ], [ [ "Now, let's transform this spec using`extractTransform` and render the result and save it:", "_____no_output_____" ] ], [ [ "from IPython.display import display\nimport IPython.display\n\ndisplay_id = display(VegaLite(EMPTY_SPEC), display_id=True)\n\nUDPATED_SPEC = None\n\ndef on_transformed(updated):\n global UDPATED_SPEC\n UDPATED_SPEC = updated\n display_id.update(VegaLite(updated))\n\nextract_spec(SPEC.data, on_transformed)", "_____no_output_____" ] ], [ [ "Now let's look at the updated spec:", "_____no_output_____" ] ], [ [ "UDPATED_SPEC", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
cb38fe4511fbb7c14029dda2639ee3329bf3cc4a
93,059
ipynb
Jupyter Notebook
Variacoes_de_ataques/Singulares_CNN/CNN(21.15).ipynb
AfonsoSeguro/IDS_Comportamental
83145f815b67b2d501eb3744367aaea9b5d11cba
[ "MIT" ]
null
null
null
Variacoes_de_ataques/Singulares_CNN/CNN(21.15).ipynb
AfonsoSeguro/IDS_Comportamental
83145f815b67b2d501eb3744367aaea9b5d11cba
[ "MIT" ]
null
null
null
Variacoes_de_ataques/Singulares_CNN/CNN(21.15).ipynb
AfonsoSeguro/IDS_Comportamental
83145f815b67b2d501eb3744367aaea9b5d11cba
[ "MIT" ]
1
2021-09-05T13:56:36.000Z
2021-09-05T13:56:36.000Z
50.935413
15,640
0.54402
[ [ [ "import os\nimport numpy as np\nimport itertools\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom sklearn.utils import shuffle\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.model_selection import train_test_split\nfrom tensorflow import keras\nfrom tensorflow.keras import layers\nfrom sklearn.metrics import confusion_matrix", "_____no_output_____" ], [ "df15 = pd.read_csv(\"../Dataset/21-02-2018.csv\", low_memory = False)", "_____no_output_____" ], [ "df15 = df15.drop([0,1])", "_____no_output_____" ], [ "df15", "_____no_output_____" ], [ "df16Aux = pd.read_csv(\"../Dataset/15-02-2018.csv\", low_memory = False)", "_____no_output_____" ], [ "df16Aux = df16Aux.drop([0,1])", "_____no_output_____" ], [ "df16Aux", "_____no_output_____" ], [ "listOrd = df15.columns.tolist()", "_____no_output_____" ], [ "df16 = pd.DataFrame()\nfor colu in listOrd:\n df16[colu] = df16Aux[colu]", "_____no_output_____" ], [ "df16", "_____no_output_____" ], [ "df16Aux = None", "_____no_output_____" ], [ "input_label15 = np.array(df15.loc[:, df15.columns != \"Label\"]).astype(np.float)", "_____no_output_____" ], [ "output_label15 = np.array(df15[\"Label\"])", "_____no_output_____" ], [ "out = []\nfor o in output_label15:\n if(o == \"Benign\"):out.append(0)\n else: out.append(1)\noutput_label15 = out", "_____no_output_____" ], [ "input_label16 = np.array(df16.loc[:, df16.columns != \"Label\"]).astype(np.float)", "_____no_output_____" ], [ "output_label16 = np.array(df16[\"Label\"])", "_____no_output_____" ], [ "out = []\nfor o in output_label16:\n if(o == \"Benign\"):out.append(0)\n else: out.append(1)\noutput_label16 = out", "_____no_output_____" ], [ "dfAE = pd.concat([df15, df16])", "_____no_output_____" ], [ "input_labelAE = np.array(dfAE.loc[:, dfAE.columns != \"Label\"]).astype(np.float)", "_____no_output_____" ], [ "output_labelAE = np.array(dfAE[\"Label\"])", "_____no_output_____" ], [ "out = []\nfor o in output_labelAE:\n if(o == \"Benign\"):out.append(0)\n else: out.append(1)\noutput_labelAE = out", "_____no_output_____" ], [ "dfAE = None\ndf15 = None\ndf16 = None", "_____no_output_____" ], [ "scaler = MinMaxScaler(feature_range=(0,1))\nscaler.fit(input_labelAE)\ninput_label15 = scaler.transform(input_label15)\ninput_label16 = scaler.transform(input_label16)\ninput_labelAE = scaler.transform(input_labelAE)", "_____no_output_____" ], [ "input_labelAE, output_labelAE = shuffle(input_labelAE, output_labelAE)\ninput_label15, output_label15 = shuffle(input_label15, output_label15)\ninput_label16, output_label16 = shuffle(input_label16, output_label16)", "_____no_output_____" ] ], [ [ "## AutoEncoder", "_____no_output_____" ] ], [ [ "inp_train,inp_test,out_train,out_test = train_test_split(input_labelAE, input_labelAE, test_size=0.2)", "_____no_output_____" ], [ "input_model = keras.layers.Input(shape = (78,))\nenc = keras.layers.Dense(units = 64, activation = \"relu\", use_bias = True)(input_model)\nenc = keras.layers.Dense(units = 36, activation = \"relu\", use_bias = True)(enc)\nenc = keras.layers.Dense(units = 18, activation = \"relu\")(enc)\ndec = keras.layers.Dense(units = 36, activation = \"relu\", use_bias = True)(enc)\ndec = keras.layers.Dense(units = 64, activation = \"relu\", use_bias = True)(dec)\ndec = keras.layers.Dense(units = 78, activation = \"relu\", use_bias = True)(dec)\nauto_encoder = keras.Model(input_model, dec)", "_____no_output_____" ], [ "encoder = keras.Model(input_model, enc)\ndecoder_input = keras.layers.Input(shape = (18,))\ndecoder_layer = auto_encoder.layers[-3](decoder_input)\ndecoder_layer = auto_encoder.layers[-2](decoder_layer)\ndecoder_layer = auto_encoder.layers[-1](decoder_layer)\ndecoder = keras.Model(decoder_input, decoder_layer)", "_____no_output_____" ], [ "auto_encoder.compile(optimizer=keras.optimizers.Adam(learning_rate=0.00025), loss = \"mean_squared_error\", metrics = ['accuracy'])", "_____no_output_____" ], [ "train = auto_encoder.fit(x = np.array(inp_train), y = np.array(out_train),validation_split= 0.1, epochs = 10, verbose = 1, shuffle = True)", "Epoch 1/10\n47006/47006 [==============================] - 58s 1ms/step - loss: 0.0059 - accuracy: 0.7680 - val_loss: 0.0052 - val_accuracy: 0.9200\nEpoch 2/10\n47006/47006 [==============================] - 53s 1ms/step - loss: 0.0052 - accuracy: 0.7879 - val_loss: 0.0052 - val_accuracy: 0.7058\nEpoch 3/10\n47006/47006 [==============================] - 53s 1ms/step - loss: 0.0052 - accuracy: 0.7954 - val_loss: 0.0052 - val_accuracy: 0.8099\nEpoch 4/10\n47006/47006 [==============================] - 53s 1ms/step - loss: 0.0052 - accuracy: 0.7969 - val_loss: 0.0052 - val_accuracy: 0.6962s - loss: 0.0052 - accuracy: 0. - ETA: 3s - loss: 0.0052 - accuracy: 0. - ETA: 3s - loss: 0.0052 - - ETA: 0s - loss: 0.0052 - accuracy: - ETA: 0s - loss: 0.0052 - accuracy: 0. - ETA: 0s - loss: 0.0052 - accura\nEpoch 5/10\n47006/47006 [==============================] - 55s 1ms/step - loss: 0.0052 - accuracy: 0.7992 - val_loss: 0.0051 - val_accuracy: 0.6978\nEpoch 6/10\n47006/47006 [==============================] - 49s 1ms/step - loss: 0.0050 - accuracy: 0.8006 - val_loss: 0.0051 - val_accuracy: 0.9064acy: 0. - ETA: 15s - loss: 0.0050 - accuracy: 0.80 - ETA: 15s - loss: 0.0050 - accuracy: 0.80 - ETA: 15s - loss: \nEpoch 7/10\n47006/47006 [==============================] - 56s 1ms/step - loss: 0.0050 - accuracy: 0.8005 - val_loss: 0.0051 - val_accuracy: 0.8429\nEpoch 8/10\n47006/47006 [==============================] - 53s 1ms/step - loss: 0.0050 - accuracy: 0.7994 - val_loss: 0.0051 - val_accuracy: 0.7128\nEpoch 9/10\n47006/47006 [==============================] - 52s 1ms/step - loss: 0.0050 - accuracy: 0.7994 - val_loss: 0.0051 - val_accuracy: 0.6970 13s - loss: 0.0050 - a - ETA: 12s - loss: 0.0050 - accuracy: - ETA: 12s - loss: 0.0050 - accuracy: 0.79 - ETA: 12s - loss: 0.0050 - accur - ETA: 11s - loss: 0.0050 - accuracy: 0.799 - ETA: 11s - loss: 0. - ETA: 10s - loss: 0.0050 - accuracy: 0.7 - ETA: 10s - loss: 0.0050 - accuracy - ETA: 9s - loss: 0.00 - ETA: 9s - loss: 0.005\nEpoch 10/10\n47006/47006 [==============================] - 52s 1ms/step - loss: 0.0050 - accuracy: 0.7993 - val_loss: 0.0051 - val_accuracy: 0.7896A: 15s - loss: 0.0050 - - ETA: 14s - loss: 0.005 - ETA: 13s - loss: 0.0050 - - ETA: 12s - loss: 0.0 - ETA: 11s - loss: 0. - ETA: 9s - loss: 0.0050 - accu - ETA: 9s - loss: 0.0 - ETA: 8s - loss: 0.0050 - accuracy - ETA: 8s - loss: 0.0050 - accuracy - ETA: 8s - loss: 0.0050 - accuracy - ETA: 8s - loss: 0.0050 - accuracy: 0. - ETA: 8s - loss: 0 - ETA: - ETA: 6s - loss: 0.0050 - ETA: 6s - loss: 0.0050 - accuracy - ETA: 5s - loss: 0.0050 - ETA: 5s - loss: 0.0050 - ac - ETA: 5s - - ETA: 4s - loss: 0.0050 - ETA: 3s - los - ETA: 2s - loss: 0.0050 - accuracy - ETA: 2s - loss: 0.0 - E\n" ], [ "predict = auto_encoder.predict(inp_test)", "_____no_output_____" ], [ "losses = keras.losses.mean_squared_error(out_test, predict).numpy()", "_____no_output_____" ], [ "total = 0\nfor loss in losses:\n total += loss\nprint(total / len(losses))", "0.005054680207691704\n" ], [ "inp_train = None\nout_train = None\ninput_labelAE = None", "_____no_output_____" ], [ "input_label15 = encoder.predict(input_label15).reshape(len(input_label15), 18, 1)\ninput_label16 = encoder.predict(input_label16).reshape(len(input_label16), 18, 1)", "_____no_output_____" ] ], [ [ "## Classificador", "_____no_output_____" ] ], [ [ "model = keras.Sequential([\n keras.layers.Conv1D(filters = 16, input_shape = (18,1), kernel_size = 3, padding = \"same\", activation = \"relu\", use_bias = True),\n keras.layers.MaxPool1D(pool_size = 3),\n keras.layers.Conv1D(filters = 8, kernel_size = 3, padding = \"same\", activation = \"relu\", use_bias = True),\n keras.layers.MaxPool1D(pool_size = 3),\n keras.layers.Flatten(),\n keras.layers.Dense(units = 2, activation = \"softmax\")\n ])", "_____no_output_____" ], [ "model.compile(optimizer= keras.optimizers.Adam(learning_rate= 0.00025), loss=\"sparse_categorical_crossentropy\", metrics=['accuracy'])", "_____no_output_____" ], [ "model.fit(x = np.array(input_label15), y = np.array(output_label15), validation_split= 0.1, epochs = 10, shuffle = True,verbose = 1)", "Epoch 1/10\n29492/29492 [==============================] - 40s 1ms/step - loss: 0.0842 - accuracy: 0.9666 - val_loss: 4.2405e-04 - val_accuracy: 0.9999\nEpoch 2/10\n29492/29492 [==============================] - 39s 1ms/step - loss: 2.7251e-04 - accuracy: 0.9999 - val_loss: 1.6988e-04 - val_accuracy: 0.9999\nEpoch 3/10\n29492/29492 [==============================] - 39s 1ms/step - loss: 1.5015e-04 - accuracy: 1.0000 - val_loss: 1.5269e-04 - val_accuracy: 1.0000\nEpoch 4/10\n29492/29492 [==============================] - 39s 1ms/step - loss: 1.1091e-04 - accuracy: 1.0000 - val_loss: 8.4497e-05 - val_accuracy: 1.0000\nEpoch 5/10\n29492/29492 [==============================] - 39s 1ms/step - loss: 4.7297e-05 - accuracy: 1.0000 - val_loss: 5.0701e-05 - val_accuracy: 1.0000\nEpoch 6/10\n29492/29492 [==============================] - 39s 1ms/step - loss: 4.2257e-05 - accuracy: 1.0000 - val_loss: 7.7759e-05 - val_accuracy: 1.0000\nEpoch 7/10\n29492/29492 [==============================] - 39s 1ms/step - loss: 4.1345e-05 - accuracy: 1.0000 - val_loss: 1.0315e-04 - val_accuracy: 1.0000\nEpoch 8/10\n29492/29492 [==============================] - 39s 1ms/step - loss: 3.4583e-05 - accuracy: 1.0000 - val_loss: 5.4727e-05 - val_accuracy: 1.0000\nEpoch 9/10\n29492/29492 [==============================] - 39s 1ms/step - loss: 3.9632e-05 - accuracy: 1.0000 - val_loss: 6.9197e-05 - val_accuracy: 1.0000\nEpoch 10/10\n29492/29492 [==============================] - 39s 1ms/step - loss: 2.1718e-05 - accuracy: 1.0000 - val_loss: 5.8509e-05 - val_accuracy: 1.0000\n" ], [ "res = [np.argmax(resu) for resu in model.predict(input_label16)]", "_____no_output_____" ], [ "cm = confusion_matrix(y_true = np.array(output_label16).reshape(len(output_label16)), y_pred = np.array(res))", "_____no_output_____" ], [ "def plot_confusion_matrix(cm, classes, normaliza = False, title = \"Confusion matrix\", cmap = plt.cm.Blues):\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n if normaliza:\n cm = cm.astype('float') / cm.sum(axis = 1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print(\"Confusion matrix, without normalization\")\n \n print(cm)\n \n thresh = cm.max() / 2\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, cm[i, j],\n horizontalalignment=\"center\",\n color=\"white\" if cm[i,j] > thresh else \"black\")\n \n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "_____no_output_____" ], [ "labels = [\"Benign\", \"Dos\"]\nplot_confusion_matrix(cm = cm, classes = labels, title = \"Dos IDS\")", "Confusion matrix, without normalization\n[[648870 339180]\n [ 52498 0]]\n" ], [ "from sklearn.metrics import roc_curve\nfrom sklearn.metrics import roc_auc_score", "_____no_output_____" ], [ "output_label16 = np.array(output_label16).reshape(len(output_label16))\nres = np.array(res)\nfpr, tpr, _ = roc_curve(output_label16, res)\nauc = roc_auc_score(output_label16, res)", "_____no_output_____" ], [ "plt.plot(fpr, tpr, label=\"auc=\" + str(auc))\nplt.legend(loc=4)\nplt.show()", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb39000dfbb91f64a7f0ea57a0576e31f73eaf76
158,030
ipynb
Jupyter Notebook
nbs/v0.2_exp1_paracrawl_pytorch_simplified_en-ga.ipynb
morganmcg1/antra
a34eb1e05ccd140501af7b8555110dcb999e4af0
[ "Apache-2.0" ]
1
2020-08-10T06:18:48.000Z
2020-08-10T06:18:48.000Z
nbs/v0.2_exp1_paracrawl_pytorch_simplified_en-ga.ipynb
morganmcg1/antra
a34eb1e05ccd140501af7b8555110dcb999e4af0
[ "Apache-2.0" ]
null
null
null
nbs/v0.2_exp1_paracrawl_pytorch_simplified_en-ga.ipynb
morganmcg1/antra
a34eb1e05ccd140501af7b8555110dcb999e4af0
[ "Apache-2.0" ]
null
null
null
56.399001
14,692
0.70012
[ [ [ "Taken from fastai NLP \"8-translation-transformer\"\n\nFastText embeddings: https://fasttext.cc/docs/en/crawl-vectors.html", "_____no_output_____" ] ], [ [ "from fastai2.text.all import *\nfrom fastai2.callback.all import *\nfrom fastai2.basics import *\nimport seaborn as sns\n\nfrom einops import rearrange\nimport gc\nimport csv", "_____no_output_____" ], [ "path = Path('../data/irish/crosslang')\npath.ls(), path", "_____no_output_____" ] ], [ [ "### Load saved dataset", "_____no_output_____" ] ], [ [ "df=pd.read_csv(path/'paracrawl_cleaned_en-ga.csv')\nprint(len(df))\ndf.head()", "746502\n" ], [ "sns.distplot(df['ga_len'].values)\nprint(f'Median length is: {np.median(df[\"ga_len\"])}')", "Median length is: 18.0\n" ], [ "import seaborn as sns\nsns.distplot(df['en_len'].values)\nprint(f'Median length is: {np.median(df[\"en_len\"])}')", "Median length is: 17.0\n" ] ], [ [ "### Pre-processing\n\n**Remove long texts to make things easier**", "_____no_output_____" ] ], [ [ "# Word count 90th percentile\nnp.percentile([o for o in df.en_len.values], 90), np.percentile([o for o in df.ga_len.values], 90)", "_____no_output_____" ], [ "print(f'Removing {len(df.query(\"en_len > 60\"))} EN samples where len was > 60')\nprint(len(df))\ndf=df[~df.index.isin(df.query(\"en_len > 60\").index)]\nprint(len(df))\n \nprint(f'Removing {len(df.query(\"ga_len > 60\"))} FR samples where len was > 60')\nprint(len(df))\ndf=df[~df.index.isin(df.query(\"ga_len > 60\").index)]\nprint(len(df))", "Removing 45680 EN samples where len was > 60\n746502\n700822\nRemoving 11189 FR samples where len was > 60\n700822\n689633\n" ], [ "sns.distplot(df['en_len'].values), np.median(df['en_len'])", "_____no_output_____" ] ], [ [ "**Lowercase everything**", "_____no_output_____" ] ], [ [ "df['en'] = df['en'].apply(lambda x:x.lower())\ndf['ga'] = df['ga'].apply(lambda x:x.lower())", "_____no_output_____" ] ], [ [ "Rules used as part of tokenization", "_____no_output_____" ] ], [ [ "proc_rules=defaults.text_proc_rules[:-1] + [partial(lowercase, add_eos=True)]\nproc_rules", "_____no_output_____" ] ], [ [ "### Get Dataloaders", "_____no_output_____" ], [ "Load vocab to speed up data loading", "_____no_output_____" ] ], [ [ "splits = ColSplitter()(df) \n\ntfms = [[Tokenizer.from_df(text_cols='en' , rules=proc_rules), attrgetter(\"text\"), Numericalize(max_vocab=20000)], \n [Tokenizer.from_df(text_cols='ga', lang='ga', rules=proc_rules), attrgetter(\"text\"), Numericalize(max_vocab=20000)]]\n\ndl = partial(SortedDL, shuffle=True)\n\ndsets = Datasets(df, tfms, splits=splits, dl_type=dl)", "_____no_output_____" ], [ "# en_vocab=[]\n# ga_vocab=[]\n# with open('paracrawl_vocab_en.csv', newline='') as csvfile:\n# v_reader = csv.reader(csvfile, delimiter=',')\n# for row in v_reader:\n# en_vocab.append(row[0])\n \n# with open('paracrawl_vocab_ga.csv', newline='') as csvfile:\n# v_reader = csv.reader(csvfile, delimiter=',')\n# for row in v_reader:\n# ga_vocab.append(row[0])\n \n#len(en_vocab), len(ga_vocab), en_vocab[:10], ga_vocab[:10]", "_____no_output_____" ], [ "len(dsets), splits, len(dsets[2][0]), len(dsets[2][1]), dsets[2]", "_____no_output_____" ], [ "bs,sl = 48, 108\ndls = dsets.dataloaders(bs=bs, seq_len=sl, before_batch=partial(pad_input, pad_fields=[0,1]))\ndls.show_batch()", "_____no_output_____" ] ], [ [ "Save vocab to speed up data loading", "_____no_output_____" ] ], [ [ "with open('paracrawl_vocab_en_v0.2_exp1.csv', 'w', newline='') as csvfile:\n v_writer = csv.writer(csvfile, delimiter=',')\n for l in dls.vocab[0]:\n v_writer.writerow([l])\n \nwith open('paracrawl_vocab_ga_v0.2_exp1.csv', 'w', newline='') as csvfile:\n v_writer = csv.writer(csvfile, delimiter=',')\n for l in dls.vocab[1]:\n v_writer.writerow([l])", "_____no_output_____" ], [ "len(dls.train_ds)+len(dls.valid_ds), len(dls.train), len(dls.valid)", "_____no_output_____" ], [ "print(f'Vocab lengths are : {len(dls.vocab[0]), len(dls.vocab[1])}')", "Vocab lengths are : (20008, 20008)\n" ], [ "o=dls.one_batch(); o[0].size(), o[1].size(), o", "_____no_output_____" ] ], [ [ "## Transformer model", "_____no_output_____" ] ], [ [ "class PositionalEncoding(nn.Module):\n \"Encode the position with a sinusoid.\"\n def __init__(self, d):\n super().__init__()\n self.register_buffer('freq', 1 / (10000 ** (torch.arange(0., d, 2.)/d)))\n \n def forward(self, pos):\n inp = torch.ger(pos, self.freq)\n enc = torch.cat([inp.sin(), inp.cos()], dim=-1)\n return enc", "_____no_output_____" ], [ "class TransformerEmbedding(nn.Module):\n \"Embedding + positional encoding + dropout\"\n def __init__(self, vocab_sz, emb_sz, inp_p=0.):\n super().__init__()\n self.emb_sz = emb_sz\n self.embed = Embedding(vocab_sz, emb_sz)\n self.pos_enc = PositionalEncoding(emb_sz)\n self.drop = nn.Dropout(inp_p)\n \n def forward(self, inp): \n pos = torch.arange(0, inp.size(1), device=inp.device).float() \n return self.drop(self.embed(inp) * math.sqrt(self.emb_sz) + self.pos_enc(pos))", "_____no_output_____" ] ], [ [ "## PyTorch Transformer Simple", "_____no_output_____" ], [ "Note: [src/tgt/memory]_mask should be filled with float(‘-inf’) for the masked positions and float(0.0) else. These masks ensure that predictions for position i depend only on the unmasked positions j and are applied identically for each sequence in a batch. \n\n[src/tgt/memory]_key_padding_mask should be a ByteTensor where True values are positions that should be masked with float(‘-inf’) and False values will be unchanged. This mask ensures that no information will be taken from position i if it is masked, and has a separate mask for each sequence in a batch.\n\nattn mask with -inf\nkey_padding mask with True", "_____no_output_____" ], [ "### pt_Transformer", "_____no_output_____" ] ], [ [ "class pt_Transformer(Module):\n def __init__(self, src_vcbsz, trg_vcbsz, n_enc_layers=6, n_dec_layers=6, n_heads=8, d_model=256, d_head=32, \n d_inner=1024, p=0.1, bias=True, scale=True, double_drop=True, pad_idx=1):\n self.pad_idx = pad_idx\n self.enc_tfmr_emb = TransformerEmbedding(src_vcbsz, d_model, p)\n self.dec_tfmr_emb = TransformerEmbedding(trg_vcbsz, d_model, 0.) \n self.final = nn.Linear(d_model, trg_vcbsz)\n \n # !!!\n #self.final.weight = self.dec_tfmr_emb.embed.weight # !! What does this do?\n \n self.transformer_model=torch.nn.Transformer(d_model=d_model, nhead=n_heads, num_encoder_layers=n_enc_layers, \n num_decoder_layers=n_dec_layers, dim_feedforward=d_inner, dropout=p, \n activation='relu', custom_encoder=None, custom_decoder=None)\n \n \n def forward(self, src, trg, src_mask=None, tgt_mask=None, memory_mask=None, \n src_key_padding_mask=None, tgt_key_padding_mask=None, memory_key_padding_mask=None):\n \n enc_emb, dec_emb = self.enc_tfmr_emb(src), self.dec_tfmr_emb(trg)\n \n src_mask=self.transformer_model.generate_square_subsequent_mask(src.size(1)).cuda()\n trg_mask=self.transformer_model.generate_square_subsequent_mask(trg.size(1)).cuda()\n \n dec_out = self.transformer_model(enc_emb.permute(1,0,2), dec_emb.permute(1,0,2),\n src_mask=src_mask, tgt_mask=trg_mask, memory_mask=None, \n src_key_padding_mask=None, tgt_key_padding_mask=None, memory_key_padding_mask=None)\n \n out=self.final(dec_out)\n \n return out.permute(1,0,2)", "_____no_output_____" ] ], [ [ "### Metric", "_____no_output_____" ] ], [ [ "class CorpusBLEUMetric(Metric):\n def __init__(self, vocab_sz=5000, axis=-1):\n \"\"\"BLEU Metric calculated over the validation corpus\"\"\"\n self.pred_len, self.targ_len, self.corrects, self.counts = 0,0,[0]*4,[0]*4\n self.axis, self.vocab_sz = axis, vocab_sz\n \n def reset(self):\n self.pred_len,self.targ_len,self.corrects,self.counts = 0,0,[0]*4,[0]*4\n \n class NGram():\n def __init__(self, ngram, max_n=5000): self.ngram,self.max_n = ngram,max_n\n def __eq__(self, other):\n if len(self.ngram) != len(other.ngram): return False\n return np.all(np.array(self.ngram) == np.array(other.ngram))\n def __hash__(self): return int(sum([o * self.max_n**i for i,o in enumerate(self.ngram)]))\n \n def get_grams(self, x, n, max_n=5000):\n return x if n==1 else [self.NGram(x[i:i+n], max_n=max_n) for i in range(len(x)-n+1)]\n \n def get_correct_ngrams(self, pred, targ, n, max_n=5000):\n pred_grams,targ_grams = self.get_grams(pred, n, max_n=max_n),self.get_grams(targ, n, max_n=max_n)\n pred_cnt,targ_cnt = Counter(pred_grams),Counter(targ_grams)\n return sum([min(c, targ_cnt[g]) for g,c in pred_cnt.items()]),len(pred_grams)\n \n def accumulate(self, learn):\n last_output = learn.pred.argmax(dim=self.axis)\n last_target = learn.y\n for pred,targ in zip(last_output.cpu().numpy(),last_target.cpu().numpy()):\n self.pred_len += len(pred)\n self.targ_len += len(targ)\n for i in range(4):\n c,t = self.get_correct_ngrams(pred, targ, i+1, max_n=self.vocab_sz)\n self.corrects[i] += c\n self.counts[i] += t\n \n @property\n def value(self): \n if self.counts == 0: return None\n else:\n precs = [c/t for c,t in zip(self.corrects,self.counts)]\n len_penalty = exp(1 - self.targ_len/self.pred_len) if self.pred_len < self.targ_len else 1\n return len_penalty * ((precs[0]*precs[1]*precs[2]*precs[3]) ** 0.25)", "_____no_output_____" ] ], [ [ "### Callbacks\n\n#### Present Input and Target in a single tuple", "_____no_output_____" ] ], [ [ "class CombineInputOutputCallback(Callback):\n '''Callback to combine the input and target text into self.xb'''\n def __init__(self): pass\n def begin_batch(self): \n self.learn.xb = (self.xb[0], self.yb[0])", "_____no_output_____" ] ], [ [ "Shifting and masking of y, from [Annotated Transformer](http://nlp.seas.harvard.edu/2018/04/03/attention.html#training):\n\n> We also modify the self-attention sub-layer in the decoder stack to prevent positions from attending to subsequent positions. This masking, combined with fact that the output embeddings are offset by one position, ensures that the predictions for position i can depend only on the known outputs at positions less than i.", "_____no_output_____" ], [ "#### Shifting", "_____no_output_____" ], [ "#### Target shift/offset explained\n\n**Taken from [@bentrevett's brilliant github repo \"pytorch-seq2seq\" tutorials](https://github.com/bentrevett/pytorch-seq2seq/blob/master/6%20-%20Attention%20is%20All%20You%20Need.ipynb):**\n\nAs we want our model to predict the <eos> token but not have it be an input into our model we simply slice the <eos> token off the end of the sequence. Thus:\n\n$$\\begin{align*}\\text{trg} &= [sos, x_1, x_2, x_3, eos]\\\\\\text{trg[:-1]} &= [sos, x_1, x_2, x_3]\\end{align*}$$\n\n$x_i$ denotes **actual** target sequence element. We then feed this into the model to get a predicted sequence that should hopefully predict the <eos> token:\n\n$$\\begin{align*}\n\\text{output} &= [y_1, y_2, y_3, eos]\n\\end{align*}$$\n\n$y_i$ denotes **predicted** target sequence element. We then calculate our loss using the original trg tensor with the <sos> token sliced off the front, leaving the <eos> token:\n\n$$\\begin{align*} \\text{output} &= [y_1, y_2, y_3, eos]\\\\ \\text{trg[1:]} &= [x_1, x_2, x_3, eos] \\end{align*}$$\n\nWe then calculate our losses and update our parameters as is standard.\n \n \nWe don't want to punish the model for not translating the 'sos' token, but we do need it to predict/define the end of the sentence", "_____no_output_____" ], [ "**RemoveEOSCallback** \n\nCut the *EOS* token token from the **output_x** presented to the model as we are trying to predict the next word. Therefore don't want to model to try anything after the *EOS* token. So the last token given to the model will be the token before *EOS*. This callback is modifies the second element of our learn.xb, (which is the *copied* yb)\n\nBut this should also ignore padding, as otherwise we'll be just cutting the last padding token and not the EOS", "_____no_output_____" ] ], [ [ "class RemoveEOSCallback(Callback):\n '''\n Shift the target presented to the model during training to remove the \"eos\" token as \n we don't want the model to learn to translate EOS. When it sees EOS.\n \n In practice we actually mask the EOS token as due to batching the last token will often be a <pad> token,\n not EOS\n '''\n def __init__(self, eos_idx): self.eos_idx=eos_idx\n def begin_batch(self): \n eos_mask=(self.learn.xb[1]!=self.eos_idx)\n sz=torch.tensor(self.learn.xb[1].size())\n sz[1]=sz[1]-1\n self.learn.xb = (self.learn.xb[0], self.learn.xb[1][eos_mask].view((sz[0],sz[1])))", "_____no_output_____" ] ], [ [ "**LossTargetShiftCallback:** Shift the target shown to the loss to exclude the \"eos\" token, as translating \"bos\" is not part of our language translation objective", "_____no_output_____" ] ], [ [ "class LossTargetShiftCallback(Callback):\n '''\n Shift the target shown to the loss to exclude the \"bos\" token as the first token we want predicted\n should be an actual word, not the \"bos\" token (as we have already given the model \"bos\" )\n '''\n def __init__(self): pass\n def after_pred(self): \n self.learn.yb = (self.learn.yb[0][:,1:],)", "_____no_output_____" ] ], [ [ "### Model", "_____no_output_____" ], [ "Transformer size from Annotated Transformer:\n\nN=6, d_model=512, d_ff=2048, h=8", "_____no_output_____" ] ], [ [ "pad_idx=1\nassert dls.vocab[1][pad_idx] == 'xxpad' \nn_x_vocab, n_y_vocab = len(dls.vocab[0]), len(dls.vocab[1])\nd_model=512\nn_heads=8 #12\nd_inner=2048 #1024\n\n#model = Transformer(n_x_vocab, n_y_vocab, d_model=d_model, n_heads=n_heads, pad_idx=pad_idx)\n\nmodel=pt_Transformer(src_vcbsz=n_x_vocab, trg_vcbsz=n_y_vocab, d_model=d_model, d_inner=d_inner)", "_____no_output_____" ], [ "model", "_____no_output_____" ] ], [ [ "Kaiming_Normal works terrribly, at least if you apply it to everything except LayerNorm...\n\nDistilBERT works ok\n\nCould try xavier:\n\n```\ndef initialize_weights(m):\n if hasattr(m, 'weight') and m.weight.dim() > 1:\n nn.init.xavier_uniform_(m.weight.data)\n\nmodel.apply(initialize_weights);\n```", "_____no_output_____" ], [ "**DistilBERT initialisation**", "_____no_output_____" ] ], [ [ "# DistilERT HF init weights https://github.com/huggingface/transformers/blob/31e67dd19f1b3fe2bc9a13f86d814f3f7bba48e4/src/transformers/modeling_distilbert.py\n\ndef distil_apply_leaf(m, f):\n \"Apply `f` to children of `m`.\"\n c = m.children()\n if isinstance(m, nn.Module): f(m)\n for l in c: apply_leaf(l,f)\n\n\ndef _distilbert_init_weights(module):\n \"\"\" Initialize the weights.\n \"\"\"\n if isinstance(module, nn.Embedding):\n if module.weight.requires_grad:\n module.weight.data.normal_(mean=0.0, std=0.02) #std=self.config.initializer_range)\n if isinstance(module, nn.Linear):\n module.weight.data.normal_(mean=0.0, std=0.02) #self.config.initializer_range)\n elif isinstance(module, nn.LayerNorm):\n module.bias.data.zero_()\n module.weight.data.fill_(1.0)\n if isinstance(module, nn.Linear) and module.bias is not None:\n module.bias.data.zero_()\n\ndistil_apply_leaf(model, _distilbert_init_weights)", "_____no_output_____" ], [ "def count_parameters(model):\n return sum(p.numel() for p in model.parameters() if p.requires_grad)\n\nprint(f'The model has {count_parameters(model):,} trainable parameters')", "The model has 74,892,840 trainable parameters\n" ] ], [ [ "### Learner", "_____no_output_____" ] ], [ [ "cbs = [CombineInputOutputCallback, RemoveEOSCallback(eos_idx=3), LossTargetShiftCallback]\n\npad_idx=1\nassert dls.vocab[1][pad_idx] == 'xxpad' \nloss_func = CrossEntropyLossFlat(ignore_index=pad_idx)\n\nlearn = Learner(dls, model, metrics=[accuracy, Perplexity(), CorpusBLEUMetric(vocab_sz=n_y_vocab)], \n cbs=cbs, loss_func=loss_func)", "_____no_output_____" ], [ "#learn.load('paracrawl_en_ga_5e_5e-4')", "_____no_output_____" ] ], [ [ "# Training", "_____no_output_____" ] ], [ [ "learn.lr_find()", "_____no_output_____" ], [ "learn.fit_one_cycle(5, 5e-4, div=5)", "_____no_output_____" ], [ "learn.recorder.plot_loss()", "_____no_output_____" ], [ "learn.save('paracrawl_en_ga_5e_5e-4_v0.2_exp1')", "_____no_output_____" ] ], [ [ "## 5e results", "_____no_output_____" ] ], [ [ "generate(learn.model, \"hello, how are you?\", dls.vocab[1])", "xxbos dia duit , conas atá tú ? xxeos\n" ], [ "generate(learn.model, \"Can you tell we where the bus station is please?\", dls.vocab[1])", "xxbos is féidir le xxunk a insint duit cá bhfuil an stáisiún bus le do thoil ? xxeos\n" ], [ "generate(learn.model, \"Yesterday it rained, but tomorrow will be very sunny\", dls.vocab[1])", "xxbos xxunk inné , beidh sé xxunk , ach amárach a bheith an - xxunk xxeos\n" ], [ "generate(learn.model, \"I had a great day, my translator is working\", dls.vocab[1])", "xxbos bhí mé lá mór , is é mo aistritheoir ag obair xxeos\n" ], [ "generate(learn.model, \"So this is a story all about how my lift got flip turned \\\nupside down, so I'd like to take a minute just sit right there, I'll you all about how I became the fresh prince\\\nof belair\", dls.vocab[1])", "xxbos xxunk mar sin tá sé seo scéal go léir faoi conas a fuair mo ardaitheoir smeach iompú xxunk síos , mar sin xxunk ba mhaith liom a ghlacadh nóiméad ach suí ceart ann , xxunk beidh mé go léir faoi conas a tháinig mé an xxunk úr xxeos\n" ], [ "generate(learn.model, \"dog\", dls.vocab[1])", "xxbos madra xxeos\n" ], [ "generate(learn.model, \"cat\", dls.vocab[1])", "xxbos cat cat xxeos\n" ], [ "generate(learn.model, \"tree\", dls.vocab[1])", "xxbos crann xxeos\n" ], [ "generate(learn.model, \"building\", dls.vocab[1])", "xxbos foirgneamh xxeos\n" ], [ "generate(learn.model, \"city\", dls.vocab[1])", "xxbos cathair xxeos\n" ], [ "generate(learn.model, \"woman\", dls.vocab[1])", "xxbos bean xxeos\n" ], [ "generate(learn.model, \"man\", dls.vocab[1])", "xxbos fear xxeos\n" ], [ "generate(learn.model, \"chocolate\", dls.vocab[1])", "xxbos seacláid xxeos\n" ], [ "generate(learn.model, \"spaceship\", dls.vocab[1])", "xxbos spásárthach xxeos\n" ] ], [ [ "## v0.1 - 5e Run", "_____no_output_____" ] ], [ [ "learn.fit_one_cycle(5, 5e-4, div=5)", "_____no_output_____" ], [ "learn.save('paracrawl_en_ga_5e_5e-4')", "_____no_output_____" ], [ "learn.export(fname='paracrawl_en_ga_5e_5e-4_learner.pkl')", "_____no_output_____" ] ], [ [ "## Generation", "_____no_output_____" ] ], [ [ "def generate(model, sentence, vocab):\n #model = torch.load('output/transformer.pth')\n# lang_model = spacy.load('en')\n# with open('data/processed/en/freq_list.pkl', 'rb') as f:\n# en_freq_list = pickle.load(f)\n# with open('data/processed/fr/freq_list.pkl', 'rb') as f:\n# fr_freq_list = pickle.load(f)\n #sentence = input('Please enter your english sentence: ')\n #sentence = tokenize(sentence, en_freq_list, lang_model)\n \n model=model.eval()\n \n sentence=learn.dls.tokenizer[0][1].encodes(sentence)\n sentence=learn.dls.numericalize[0].encodes(sentence)\n \n translated_sentence = [2] # xxbos\n #translated_sentence = [fr_freq_list['[SOS]']]\n i = 0\n while int(translated_sentence[-1]) != 3 and i < 75: # xxeos\n #while int(translated_sentence[-1]) != fr_freq_list['[EOS]'] and i < 15:\n #output = forward_model(model, sentence, translated_sentence).to('cuda')\n output = forward_model(model, sentence, translated_sentence).cuda()\n values, indices = torch.topk(output, 5)\n translated_sentence.append(int(indices[-1][0]))\n i+=1\n\n detok_translated_sentence=detokenize(translated_sentence, vocab)\n print(' '.join(detok_translated_sentence))\n \n\ndef forward_model(model, src, tgt):\n src = torch.as_tensor(src).unsqueeze(0).long().cuda()\n tgt = torch.as_tensor(tgt).unsqueeze(0).cuda()\n tgt_mask = gen_nopeek_mask(tgt.shape[1]).cuda()\n output = model.forward(src, tgt, tgt_mask=tgt_mask, src_key_padding_mask=None, tgt_key_padding_mask=None, memory_key_padding_mask=None)\n\n #return output.squeeze(0).to('cpu')\n return output.squeeze(0).detach()\n\n\n# def tokenize(sentence, freq_list, lang_model):\n# punctuation = ['(', ')', ':', '\"', ' ']\n\n# sentence = sentence.lower()\n# sentence = [tok.text for tok in lang_model.tokenizer(sentence) if tok.text not in punctuation]\n# return [freq_list[word] if word in freq_list else freq_list['[OOV]'] for word in sentence]\n\n\ndef detokenize(sentence, vocab):\n #freq_list = {v: k for k, v in freq_list.items()}\n return [vocab[token] for token in sentence]\n #return [freq_list[token] for token in sentence]\n# def detokenize(sentence, freq_list):\n# freq_list = {v: k for k, v in freq_list.items()}\n# return [freq_list[token] for token in sentence]\n\n\ndef gen_nopeek_mask(length):\n mask = rearrange(torch.triu(torch.ones(length, length)) == 1, 'h w -> w h')\n mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, float(0.0))\n return mask", "_____no_output_____" ] ], [ [ "## 5e results", "_____no_output_____" ] ], [ [ "generate(learn.model, \"hello, how are you?\", dls.vocab[1])", "xxbos dia duit , conas a bhfuil t ? xxeos\n" ], [ "generate(learn.model, \"Can you tell we where the bus station is please?\", dls.vocab[1])", "xxbos is fidir xxunk insint duit nuair a bhonn an stisin bus le do thoil ? xxeos\n" ], [ "generate(learn.model, \"Yesterday it rained, but tomorrow will be very sunny\", dls.vocab[1])", "xxbos xxunk inn xxunk s , ach beidh amrach a bheith an - xxunk xxeos\n" ], [ "generate(learn.model, \"I had a great day, my translator is working\", dls.vocab[1])", "xxbos bh m l iontach , t mo aistritheoir ag obair xxeos\n" ], [ "generate(learn.model, \"So this is a story all about how my lift got flip turned \\\nupside down, so I'd like to take a minute just sit right there, I'll you all about how I became the fresh prince\\\nof belair\", dls.vocab[1])", "xxbos xxunk mar sin is seo an scal ar fad faoi conas a fuair mo ardaitheoir smeach xxunk sos , mar sin xxunk ba mhaith liom a ghlacadh nimad ach su ceart ann , xxunk m go mbainfidh t go lir faoi conas a thinig m an xxunk r xxunk xxeos\n" ], [ "generate(learn.model, \"dog\", dls.vocab[1])", "xxbos madra xxeos\n" ], [ "generate(learn.model, \"cat\", dls.vocab[1])", "xxbos cat cat xxeos\n" ], [ "generate(learn.model, \"tree\", dls.vocab[1])", "xxbos crann xxeos\n" ], [ "generate(learn.model, \"building\", dls.vocab[1])", "xxbos foirgneamh xxeos\n" ], [ "generate(learn.model, \"city\", dls.vocab[1])", "xxbos cathair na mart xxeos\n" ], [ "generate(learn.model, \"woman\", dls.vocab[1])", "xxbos bean xxeos\n" ], [ "generate(learn.model, \"man\", dls.vocab[1])", "xxbos fear xxeos\n" ], [ "generate(learn.model, \"chocolate\", dls.vocab[1])", "xxbos seaclid seaclid seaclid xxeos\n" ], [ "generate(learn.model, \"spaceship\", dls.vocab[1])", "xxbos spsrthach spsrthach xxeos\n" ] ], [ [ "## 20e Run", "_____no_output_____" ] ], [ [ "# 20e, added shuffle to sorteddl, PT Transformer, distilbert init, Adam, distilbert init\n# CONCLUSION: \nlearn.fit_one_cycle(20, 5e-4, div=5)", "_____no_output_____" ], [ "learn.save('paracrawl_en_ga_20e_5e-4')", "_____no_output_____" ], [ "learn.export(fname='paracrawl_en_ga_20e_5e-4_learner.pkl')", "_____no_output_____" ] ], [ [ "## Generation", "_____no_output_____" ] ], [ [ "def generate(model, sentence, vocab):\n #model = torch.load('output/transformer.pth')\n# lang_model = spacy.load('en')\n# with open('data/processed/en/freq_list.pkl', 'rb') as f:\n# en_freq_list = pickle.load(f)\n# with open('data/processed/fr/freq_list.pkl', 'rb') as f:\n# fr_freq_list = pickle.load(f)\n #sentence = input('Please enter your english sentence: ')\n #sentence = tokenize(sentence, en_freq_list, lang_model)\n \n model=model.eval()\n \n sentence=learn.dls.tokenizer[0][1].encodes(sentence)\n sentence=learn.dls.numericalize[0].encodes(sentence)\n \n translated_sentence = [2] # xxbos\n #translated_sentence = [fr_freq_list['[SOS]']]\n i = 0\n while int(translated_sentence[-1]) != 3 and i < 75: # xxeos\n #while int(translated_sentence[-1]) != fr_freq_list['[EOS]'] and i < 15:\n #output = forward_model(model, sentence, translated_sentence).to('cuda')\n output = forward_model(model, sentence, translated_sentence).cuda()\n values, indices = torch.topk(output, 5)\n translated_sentence.append(int(indices[-1][0]))\n i+=1\n\n detok_translated_sentence=detokenize(translated_sentence, vocab)\n print(' '.join(detok_translated_sentence))\n \n\ndef forward_model(model, src, tgt):\n src = torch.as_tensor(src).unsqueeze(0).long().cuda()\n tgt = torch.as_tensor(tgt).unsqueeze(0).cuda()\n tgt_mask = gen_nopeek_mask(tgt.shape[1]).cuda()\n output = model.forward(src, tgt, tgt_mask=tgt_mask, src_key_padding_mask=None, tgt_key_padding_mask=None, memory_key_padding_mask=None)\n\n #return output.squeeze(0).to('cpu')\n return output.squeeze(0).detach()\n\n\n# def tokenize(sentence, freq_list, lang_model):\n# punctuation = ['(', ')', ':', '\"', ' ']\n\n# sentence = sentence.lower()\n# sentence = [tok.text for tok in lang_model.tokenizer(sentence) if tok.text not in punctuation]\n# return [freq_list[word] if word in freq_list else freq_list['[OOV]'] for word in sentence]\n\n\ndef detokenize(sentence, vocab):\n #freq_list = {v: k for k, v in freq_list.items()}\n return [vocab[token] for token in sentence]\n #return [freq_list[token] for token in sentence]\n# def detokenize(sentence, freq_list):\n# freq_list = {v: k for k, v in freq_list.items()}\n# return [freq_list[token] for token in sentence]\n\n\ndef gen_nopeek_mask(length):\n mask = rearrange(torch.triu(torch.ones(length, length)) == 1, 'h w -> w h')\n mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, float(0.0))\n return mask", "_____no_output_____" ] ], [ [ "## 20e results", "_____no_output_____" ] ], [ [ "generate(learn.model, \"hello, how are you?\", dls.vocab[1])", "xxbos dia duit , conas a bhfuil t ? xxeos\n" ], [ "generate(learn.model, \"Can you tell we where the bus station is please?\", dls.vocab[1])", "xxbos xxunk an fidir leat insint dinn c bhfuil an stisin bus le do thoil ? xxeos\n" ], [ "generate(learn.model, \"Yesterday it rained, but tomorrow will be very sunny\", dls.vocab[1])", "xxbos xxunk inn xxunk s , ach beidh amrach a bheith an - xxunk xxeos\n" ], [ "generate(learn.model, \"I had a great day, my translator is working\", dls.vocab[1])", "xxbos bh m l mr , t mo aistritheoir ag obair xxeos\n" ], [ "generate(learn.model, \"So this is a story all about how my lift got flip turned \\\nupside down, so I'd like to take a minute just sit right there, I'll you all about how I became the fresh prince\\\nof belair\", dls.vocab[1])", "xxbos xxunk mar sin t an scal faoi conas a fuair mo ardaitheoir smeach xxunk sos , mar sin xxunk ba mhaith liom a ghlacadh nimad ach su ceart ann , xxunk beidh m go lir faoi conas a thinig m an xxunk r xxunk xxeos\n" ], [ "generate(learn.model, \"dog\", dls.vocab[1])", "xxbos madra xxeos\n" ], [ "generate(learn.model, \"cat\", dls.vocab[1])", "xxbos cat xxeos\n" ], [ "generate(learn.model, \"tree\", dls.vocab[1])", "xxbos crann xxeos\n" ], [ "generate(learn.model, \"building\", dls.vocab[1])", "xxbos foirgneamh xxeos\n" ], [ "generate(learn.model, \"city\", dls.vocab[1])", "xxbos cathair xxeos\n" ], [ "generate(learn.model, \"woman\", dls.vocab[1])", "xxbos bean xxeos\n" ], [ "generate(learn.model, \"man\", dls.vocab[1])", "xxbos fear xxeos\n" ], [ "generate(learn.model, \"chocolate\", dls.vocab[1])", "xxbos seaclid xxeos\n" ], [ "generate(learn.model, \"spaceship\", dls.vocab[1])", "xxbos spsrthach xxeos\n" ] ], [ [ "## Alternative generation", "_____no_output_____" ] ], [ [ "# https://forums.fast.ai/t/fastai-v2-text/53529/334\nfrom fastai2.text.all import *\n\ndefaults.device = torch.device('cpu')\npath = Path('.')\nlearner = load_learner(\"./export.pkl\")\n\nf = open(\"/tmp/test.txt\", \"r\")\ntest_file_contents = f.read()\n\n_, _, losses = learner.predict(test_file_contents)\ncats = [learner.dls.categorize.decode(i) for i in range(len(losses))]\n\npredictions = sorted(\n zip(cats, map(float, losses)),\n key=lambda p: p[1],\n reverse=True\n)\nprint(predictions)\n\n# OR\n\nitems = pd.read_csv(\"/tmp/test.txt\", sep = '\\t')\ntest_dl = learner.dls.test_dl(items.values)\n\nlearner.get_preds(dl=test_dl, with_decoded=False)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ] ]
cb391733a369f792b3f478f75ce8a1fff806e9c0
739,277
ipynb
Jupyter Notebook
book/urban/sensors/urban-sensors-climate_ukv.ipynb
samueljackson92/environmental-ai-book
1ddc0b6ab1108d7ac204ddf8884b3fdc456c71f3
[ "MIT" ]
null
null
null
book/urban/sensors/urban-sensors-climate_ukv.ipynb
samueljackson92/environmental-ai-book
1ddc0b6ab1108d7ac204ddf8884b3fdc456c71f3
[ "MIT" ]
null
null
null
book/urban/sensors/urban-sensors-climate_ukv.ipynb
samueljackson92/environmental-ai-book
1ddc0b6ab1108d7ac204ddf8884b3fdc456c71f3
[ "MIT" ]
null
null
null
1,579.651709
311,907
0.960402
[ [ [ "# Met Office UKV high-resolution atmosphere model data\n\n:::{eval-rst}\n:opticon:`tag`\n:badge:`Urban,badge-primary`\n:badge:`Sensors,badge-secondary`\n:::\n\n## Context\n### Purpose\nTo load, plot, regrid and extract an urban region from the UKV gridded model data using the [Iris package](https://scitools-iris.readthedocs.io/en/stable/).\n\n### Sensor description\nMet Office UKV model data is fairly high resolution (approximately 1 km horizontal) and available over the whole of the UK for a variety of atmospheric variables at surface and pressure levels. A selection of variables has been\nmade openly available as part of the Met Office contribution to the COVID 19 modelling effort. A selection of variables at hourly and daily frequency in NetCDF format can be obtained from [this landing page](https://metdatasa.blob.core.windows.net/covid19-response-non-commercial/README.html). \n\nThis notebook uses a single sample data file for 1.5 m temperature included with the notebook.\n\n### Highlights\n* Data for the whole UK is loaded and plotted using Iris\n* Data is regridded to a geographic projection \n* A region over London is extracted\n\n### Contributions\n\n#### Notebook\n* Samantha V. Adams (author), Met Office Informatics Lab, [@svadams](https://github.com/svadams)\n* Alejandro Coca-Castro (reviewer), The Alan Turing Institute, [@acocac](https://github.com/acocac), 01/10/21 (latest revision)\n\n#### Dataset originator/creator\n* Met Office Informatics Lab (creator)\n* Microsoft (support)\n* European Regional Development Fund (support)\n\n#### Dataset authors\n* Met Office\n\n#### Dataset documentation\n```{bibliography}\n :style: plain\n :list: bullet\n :filter: topic % \"urban-sensors-climate_ukv\"\n```\n\n:::{note}\nNote this data should be used only for non-commercial purposes.\n:::", "_____no_output_____" ], [ "## Install and load libraries", "_____no_output_____" ] ], [ [ "#!conda install -c conda-forge iris", "_____no_output_____" ], [ "import os\nimport iris\nimport iris.analysis\nimport iris.plot as iplt\nfrom iris.coords import DimCoord\nfrom iris.coord_systems import GeogCS\nfrom iris.cube import Cube\n\nfrom iris.fileformats.pp import EARTH_RADIUS\n\nimport urllib.request\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n%matplotlib inline", "_____no_output_____" ] ], [ [ "## Set project structure", "_____no_output_____" ] ], [ [ "notebook_folder = '../sensors/urban-sensors-climate_ukv'\nif not os.path.exists(notebook_folder):\n os.makedirs(notebook_folder)", "_____no_output_____" ] ], [ [ "## Retrieve and load a sample data file", "_____no_output_____" ] ], [ [ "filepath = 'https://metdatasa.blob.core.windows.net/covid19-response-non-commercial/metoffice_ukv_daily/t1o5m_mean/'\nfilename = 'ukv_daily_t1o5m_mean_20150801.nc'\nurllib.request.urlretrieve(filepath+filename, os.path.join(notebook_folder, filename))", "_____no_output_____" ], [ "air_temp = iris.load_cube(os.path.join(notebook_folder, filename))\nair_temp.coord('grid_latitude').guess_bounds()\nair_temp.coord('grid_longitude').guess_bounds()", "_____no_output_____" ] ], [ [ "## Visualisation\n\nHere we use the Iris wrapper to matplotlib pyplot to plot the gridded data with added gridlines and coastline.", "_____no_output_____" ] ], [ [ "plt.figure(figsize=(30, 10))\niplt.pcolormesh(air_temp)\nplt.title(\"UKV Air temperature\", fontsize=\"xx-large\")\ncbar = plt.colorbar()\ncbar.set_label('Temperature (' + str(air_temp.units) + ')')\nax = plt.gca()\nax.coastlines(resolution=\"50m\")\nax.gridlines()", "_____no_output_____" ] ], [ [ "## Regridding from Azimuthal equal-area projection to geographic", "_____no_output_____" ], [ "### Create a target cube with a lat-lon coord system for regrid\n\nIt is filled with random data so we can plot it to check it looks correct.", "_____no_output_____" ] ], [ [ "latitude = DimCoord(np.linspace(48.5, 59.5, 1222),\n standard_name='latitude',\n coord_system = GeogCS(EARTH_RADIUS),\n units='degrees')\nlongitude = DimCoord(np.linspace(-10.5, 2.0, 1389),\n standard_name='longitude',\n coord_system = GeogCS(EARTH_RADIUS), \n units='degrees')\nglobal_cube = Cube(np.random.uniform(low=0.0, high=1.0, size=(1222, 1389)),\n dim_coords_and_dims=[(latitude, 0),\n (longitude, 1)])\n\nglobal_cube.coord('latitude').guess_bounds()\nglobal_cube.coord('longitude').guess_bounds()", "_____no_output_____" ], [ "plt.figure(figsize=(30, 10))\niplt.pcolormesh(global_cube)\nplt.title(\"Target global cube\", fontsize=\"xx-large\")\nax = plt.gca()\nax.coastlines(resolution=\"50m\")\nax.gridlines()", "_____no_output_____" ] ], [ [ "### Perform the regridding from source data cube to target cube", "_____no_output_____" ] ], [ [ "# Note we need to use extrapolation masking in case regridded source data is actually smaller\n# than the target cube extents\nglobal_air_temp = air_temp.regrid(global_cube, iris.analysis.Linear(extrapolation_mode=\"mask\"))", "_____no_output_____" ] ], [ [ "### Plot the regridded data to check it is correct", "_____no_output_____" ] ], [ [ "plt.figure(figsize=(30, 10))\n\niplt.pcolormesh(global_air_temp)\nplt.title(\"UKV Air temperature on a global grid\", fontsize=\"xx-large\")\ncbar = plt.colorbar()\ncbar.set_label('Temperature (' + str(global_air_temp.units) + ')')\nax = plt.gca()\nax.coastlines(resolution=\"50m\")\nax.gridlines()", "_____no_output_____" ] ], [ [ "## Extract the London Region\n\n### Use the Iris Intersection method and supply the region lat-lon bounds", "_____no_output_____" ] ], [ [ "min_lon = -0.52\nmin_lat = 51.3\nmax_lon = 0.3\nmax_lat = 51.7\n\nair_temp_london = global_air_temp.intersection(longitude=(min_lon, max_lon), latitude=(min_lat, max_lat))", "_____no_output_____" ] ], [ [ "### Plot the results", "_____no_output_____" ] ], [ [ "plt.figure(figsize=(20, 5))\n\niplt.pcolormesh(air_temp_london)\nplt.title(\"UKV Air temperature for london\", fontsize=\"xx-large\")\ncbar = plt.colorbar()\ncbar.set_label('Temperature (' + str(air_temp_london.units) + ')')\nax = plt.gca()\nax.coastlines(resolution=\"50m\")\nax.gridlines()\n\nplt.show()", "_____no_output_____" ] ], [ [ "### Save as a new NetCDF file", "_____no_output_____" ] ], [ [ "iris.save(air_temp_london, os.path.join(notebook_folder,'ukv_london_sample.nc'))", "_____no_output_____" ] ], [ [ "## Summary\n\nThis notebook has demonstrated the use of the Iris package to easily load, plot and manipulate gridded environmental NetCDF data.\n\n## Version\n\n* Notebook: commit <mark>[74bbb54](https://github.com/acocac/environmental-ai-book/commits/master/book/urban/sensors/urban-sensors-climate_ukv.ipynb)</mark>", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
cb392681973ae8bbc75f19a282a9a9434738f73e
1,659
ipynb
Jupyter Notebook
index.ipynb
tourdownunder/tutorial-nbdev
c747d345feb55aab415b40809f600db60a631c94
[ "Apache-2.0" ]
null
null
null
index.ipynb
tourdownunder/tutorial-nbdev
c747d345feb55aab415b40809f600db60a631c94
[ "Apache-2.0" ]
1
2021-09-28T05:41:47.000Z
2021-09-28T05:41:47.000Z
index.ipynb
tourdownunder/tutorial-nbdev
c747d345feb55aab415b40809f600db60a631c94
[ "Apache-2.0" ]
null
null
null
16.264706
81
0.488246
[ [ [ "#hide\nfrom nbdev_tutorial.core import say_hello", "_____no_output_____" ] ], [ [ "# Project name here\n\n> Summary description here.", "_____no_output_____" ], [ "This file will become your README and also the index of your documentation.", "_____no_output_____" ], [ "## Install", "_____no_output_____" ], [ "pip install nbdev_tutorial", "_____no_output_____" ], [ "## How to use", "_____no_output_____" ], [ "Fill me in please! Don't forget code examples:", "_____no_output_____" ] ], [ [ "say_hello('To the morning')", "_____no_output_____" ] ] ]
[ "code", "markdown", "code" ]
[ [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ] ]
cb3941e08d59af69abc2e999e5a91ddbc682d3fc
118,716
ipynb
Jupyter Notebook
nbs/CV070816.ipynb
yang-zhang/aptos2019-blindness-detection
9f06be13ea468e5977e0552c0de1a1f8dbe836dc
[ "Apache-2.0" ]
null
null
null
nbs/CV070816.ipynb
yang-zhang/aptos2019-blindness-detection
9f06be13ea468e5977e0552c0de1a1f8dbe836dc
[ "Apache-2.0" ]
null
null
null
nbs/CV070816.ipynb
yang-zhang/aptos2019-blindness-detection
9f06be13ea468e5977e0552c0de1a1f8dbe836dc
[ "Apache-2.0" ]
null
null
null
68.110155
28,276
0.766097
[ [ [ "https://www.kaggle.com/zhangyang/bldcv0708091?scriptVersionId=16901388", "_____no_output_____" ], [ "# params", "_____no_output_____" ] ], [ [ "dbg = False\nif dbg:\n dbgsz = 500\n\nPRFX = 'CV070816' \np_o = f'../output/{PRFX}'\nSEED = 111\n\nBS = 128\nSZ = 224\nFP16 = False\n", "_____no_output_____" ] ], [ [ "# setup", "_____no_output_____" ] ], [ [ "import fastai\nprint('fastai.__version__: ', fastai.__version__)\n\nimport random \nimport numpy as np\nimport torch\nimport os\n\ndef set_torch_seed(seed=SEED):\n os.environ['PYTHONHASHSEED'] = str(seed)\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n \n if torch.cuda.is_available(): \n torch.cuda.manual_seed(seed)\n torch.cuda.manual_seed_all(seed) \n torch.backends.cudnn.deterministic = True \n torch.backends.cudnn.benchmark = False\n\nset_torch_seed()\n\nfrom fastai import *\nfrom fastai.vision import *\nfrom fastai.callbacks import *\n\nimport scipy as sp\nfrom sklearn.metrics import cohen_kappa_score\n\ndef quadratic_weighted_kappa(y1, y2):\n return cohen_kappa_score(y1, y2, weights='quadratic')", "fastai.__version__: 1.0.54\n" ] ], [ [ "# preprocess", "_____no_output_____" ] ], [ [ "img2grd = []\n\np = '../input/aptos2019-blindness-detection'\npp = Path(p)\ntrain = pd.read_csv(pp/'train.csv')\ntest = pd.read_csv(pp/'test.csv')\nlen_blnd = len(train)\nlen_blnd_test = len(test)\n\nimg2grd_blnd = [(f'{p}/train_images/{o[0]}.png',o[1]) for o in train.values]\n\nlen_blnd, len_blnd_test", "_____no_output_____" ], [ "img2grd += img2grd_blnd\ndisplay(len(img2grd))\ndisplay(Counter(o[1] for o in img2grd).most_common())", "_____no_output_____" ], [ "p = '../input/diabetic-retinopathy-detection'\npp = Path(p)\n\ntrain=pd.read_csv(pp/'trainLabels.csv')\nimg2grd_diab_train=[(f'../input/diabetic-retinopathy-detection/train_images/{o[0]}.jpeg',o[1]) for o in train.values]\nimg2grd += img2grd_diab_train\ndisplay(len(img2grd))\ndisplay(Counter(o[1] for o in img2grd).most_common())\n\n# test=pd.read_csv(pp/'retinopathy_solution.csv')\n# img2grd_diab_test=[(f'../input/diabetic-retinopathy-detection/test_images/{o[0]}.jpeg',o[1]) for o in test.values]\n# img2grd += img2grd_diab_test\n# display(len(img2grd))\n# display(Counter(o[1] for o in img2grd).most_common())\n", "_____no_output_____" ], [ "p = '../input/IDRID/B. Disease Grading'\npp = Path(p)\n\ntrain=pd.read_csv(pp/'2. Groundtruths/a. IDRiD_Disease Grading_Training Labels.csv')\nimg2grd_idrid_train=[(f'../input/IDRID/B. Disease Grading/1. Original Images/a. Training Set/{o[0]}.jpg',o[1]) for o in train.values]\nimg2grd += img2grd_idrid_train\ndisplay(len(img2grd))\ndisplay(Counter(o[1] for o in img2grd).most_common())\n\ntest=pd.read_csv(pp/'2. Groundtruths/b. IDRiD_Disease Grading_Testing Labels.csv')\nimg2grd_idrid_test=[(f'../input/IDRID/B. Disease Grading/1. Original Images/b. Testing Set/{o[0]}.jpg',o[1]) for o in test.values]\nimg2grd += img2grd_idrid_test\ndisplay(len(img2grd))\ndisplay(Counter(o[1] for o in img2grd).most_common())", "_____no_output_____" ], [ "if not np.all([Path(o[0]).exists() for o in img2grd]): print('Some files are missing!!!')", "_____no_output_____" ], [ "df = pd.DataFrame(img2grd)\ndf.columns = ['fnm', 'target']\n\ndf.shape", "_____no_output_____" ], [ "set_torch_seed()\nidx_blnd_train = np.where(df.fnm.str.contains('aptos2019-blindness-detection/train_images'))[0]\nidx_val = np.random.choice(idx_blnd_train, int(len_blnd*0.50), replace=False)\ndf['is_val']=False\ndf.loc[idx_val, 'is_val']=True\n\nif dbg:\n df=df.head(dbgsz)", "_____no_output_____" ] ], [ [ "# dataset", "_____no_output_____" ] ], [ [ "%%time\ntfms = get_transforms()\n\ndef get_data(sz, bs):\n src = (ImageList.from_df(df=df,path='./',cols='fnm') \n .split_from_df(col='is_val') \n .label_from_df(cols='target', \n label_cls=FloatList)\n )\n\n data= (src.transform(tfms, size=sz) #Data augmentation\n .databunch(bs=bs) #DataBunch\n .normalize(imagenet_stats) #Normalize \n )\n return data\n\nbs = BS \nsz = SZ\nset_torch_seed()\ndata = get_data(sz, bs)", "CPU times: user 40.3 s, sys: 18 s, total: 58.3 s\nWall time: 43.1 s\n" ] ], [ [ "%%time\ndata.show_batch(rows=3, figsize=(7,6))", "_____no_output_____" ] ], [ [ "# model", "_____no_output_____" ] ], [ [ "%%time\n# Downloading: \"https://download.pytorch.org/models/resnet50-19c8e357.pth\" to /tmp/.cache/torch/checkpoints/resnet50-19c8e357.pth\n\n# Making pretrained weights work without needing to find the default filename\nif not os.path.exists('/tmp/.cache/torch/checkpoints/'):\n os.makedirs('/tmp/.cache/torch/checkpoints/')\n!cp '../input/pytorch-vision-pretrained-models/resnet50-19c8e357.pth' '/tmp/.cache/torch/checkpoints/resnet50-19c8e357.pth'", "CPU times: user 3.67 ms, sys: 13.1 ms, total: 16.8 ms\nWall time: 926 ms\n" ], [ "learn = cnn_learner(data, \n base_arch = models.resnet50, \n path=p_o)\nlearn.loss = MSELossFlat\n\nif FP16: learn = learn.to_fp16()", "_____no_output_____" ], [ "learn.freeze()", "_____no_output_____" ], [ "len(learn.data.train_dl)", "_____no_output_____" ], [ "%%time\nlearn.lr_find(start_lr=1e-4)", "_____no_output_____" ], [ "learn.recorder.plot(suggestion=True)", "Min numerical gradient: 2.00E-02\nMin loss divided by 10: 1.00E-02\n" ], [ "set_torch_seed()\nlearn.fit_one_cycle(15, max_lr=5e-3, callbacks=[SaveModelCallback(learn, name='bestmodel_frozen')])", "_____no_output_____" ], [ "learn.recorder.plot_losses()\n# learn.recorder.plot_metrics()", "_____no_output_____" ], [ "learn.save('mdl-frozen')", "_____no_output_____" ], [ "!nvidia-smi", "Tue Jul 9 17:07:08 2019 \r\n+-----------------------------------------------------------------------------+\r\n| NVIDIA-SMI 418.56 Driver Version: 418.56 CUDA Version: 10.1 |\r\n|-------------------------------+----------------------+----------------------+\r\n| GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC |\r\n| Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. |\r\n|===============================+======================+======================|\r\n| 0 Tesla V100-SXM2... Off | 00000000:00:1E.0 Off | 0 |\r\n| N/A 44C P0 50W / 300W | 13913MiB / 16130MiB | 0% Default |\r\n+-------------------------------+----------------------+----------------------+\r\n \r\n+-----------------------------------------------------------------------------+\r\n| Processes: GPU Memory |\r\n| GPU PID Type Process name Usage |\r\n|=============================================================================|\r\n+-----------------------------------------------------------------------------+\r\n" ], [ "learn.unfreeze()", "_____no_output_____" ], [ "%%time\nlearn.lr_find()\nlearn.recorder.plot(suggestion=True)", "_____no_output_____" ], [ "set_torch_seed()\nlearn.fit_one_cycle(10, max_lr=slice(5e-7,5e-5), callbacks=[SaveModelCallback(learn, name='bestmodel_finetune')])", "_____no_output_____" ], [ "learn.recorder.plot_losses()\n# learn.recorder.plot_metrics()", "_____no_output_____" ], [ "learn.save('mdl')", "_____no_output_____" ], [ "!nvidia-smi", "Wed Jul 10 02:08:56 2019 \r\n+-----------------------------------------------------------------------------+\r\n| NVIDIA-SMI 418.56 Driver Version: 418.56 CUDA Version: 10.1 |\r\n|-------------------------------+----------------------+----------------------+\r\n| GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC |\r\n| Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. |\r\n|===============================+======================+======================|\r\n| 0 Tesla V100-SXM2... Off | 00000000:00:1E.0 Off | 0 |\r\n| N/A 35C P0 46W / 300W | 14033MiB / 16130MiB | 0% Default |\r\n+-------------------------------+----------------------+----------------------+\r\n \r\n+-----------------------------------------------------------------------------+\r\n| Processes: GPU Memory |\r\n| GPU PID Type Process name Usage |\r\n|=============================================================================|\r\n+-----------------------------------------------------------------------------+\r\n" ] ], [ [ "# validate and thresholding", "_____no_output_____" ] ], [ [ "learn = learn.to_fp32()", "_____no_output_____" ], [ "%%time\nset_torch_seed()\npreds_val, y_val = learn.get_preds(ds_type=DatasetType.Valid)", "CPU times: user 1.51 s, sys: 1.26 s, total: 2.77 s\nWall time: 1min 15s\n" ], [ "preds_val = preds_val.numpy().squeeze()\ny_val= y_val.numpy()", "_____no_output_____" ], [ "np.save(f'{p_o}/preds_val.npy', preds_val)\nnp.save(f'{p_o}/y_val.npy', y_val)", "_____no_output_____" ], [ "# https://www.kaggle.com/c/petfinder-adoption-prediction/discussion/88773#latest-515044\n# We used OptimizedRounder given by hocop1. https://www.kaggle.com/c/petfinder-adoption-prediction/discussion/76107#480970\n# put numerical value to one of bins\ndef to_bins(x, borders):\n for i in range(len(borders)):\n if x <= borders[i]:\n return i\n return len(borders)\n\nclass Hocop1OptimizedRounder(object):\n def __init__(self):\n self.coef_ = 0\n\n def _loss(self, coef, X, y, idx):\n X_p = np.array([to_bins(pred, coef) for pred in X])\n ll = -quadratic_weighted_kappa(y, X_p)\n return ll\n\n def fit(self, X, y):\n coef = [1.5, 2.0, 2.5, 3.0]\n golden1 = 0.618\n golden2 = 1 - golden1\n ab_start = [(1, 2), (1.5, 2.5), (2, 3), (2.5, 3.5)]\n for it1 in range(10):\n for idx in range(4):\n # golden section search\n a, b = ab_start[idx]\n # calc losses\n coef[idx] = a\n la = self._loss(coef, X, y, idx)\n coef[idx] = b\n lb = self._loss(coef, X, y, idx)\n for it in range(20):\n # choose value\n if la > lb:\n a = b - (b - a) * golden1\n coef[idx] = a\n la = self._loss(coef, X, y, idx)\n else:\n b = b - (b - a) * golden2\n coef[idx] = b\n lb = self._loss(coef, X, y, idx)\n self.coef_ = {'x': coef}\n\n def predict(self, X, coef):\n X_p = np.array([to_bins(pred, coef) for pred in X])\n return X_p\n\n def coefficients(self):\n return self.coef_['x']", "_____no_output_____" ], [ "# https://www.kaggle.com/c/petfinder-adoption-prediction/discussion/76107#480970\nclass AbhishekOptimizedRounder(object):\n def __init__(self):\n self.coef_ = 0\n\n def _kappa_loss(self, coef, X, y):\n X_p = np.copy(X)\n for i, pred in enumerate(X_p):\n if pred < coef[0]:\n X_p[i] = 0\n elif pred >= coef[0] and pred < coef[1]:\n X_p[i] = 1\n elif pred >= coef[1] and pred < coef[2]:\n X_p[i] = 2\n elif pred >= coef[2] and pred < coef[3]:\n X_p[i] = 3\n else:\n X_p[i] = 4\n\n ll = quadratic_weighted_kappa(y, X_p)\n return -ll\n\n def fit(self, X, y):\n loss_partial = partial(self._kappa_loss, X=X, y=y)\n initial_coef = [0.5, 1.5, 2.5, 3.5]\n self.coef_ = sp.optimize.minimize(loss_partial, initial_coef, method='nelder-mead')\n\n def predict(self, X, coef):\n X_p = np.copy(X)\n for i, pred in enumerate(X_p):\n if pred < coef[0]:\n X_p[i] = 0\n elif pred >= coef[0] and pred < coef[1]:\n X_p[i] = 1\n elif pred >= coef[1] and pred < coef[2]:\n X_p[i] = 2\n elif pred >= coef[2] and pred < coef[3]:\n X_p[i] = 3\n else:\n X_p[i] = 4\n return X_p\n\n def coefficients(self):\n return self.coef_['x']", "_____no_output_____" ], [ "def bucket(preds_raw, coef = [0.5, 1.5, 2.5, 3.5]):\n preds = np.zeros(preds_raw.shape)\n for i, pred in enumerate(preds_raw):\n if pred < coef[0]:\n preds[i] = 0\n elif pred >= coef[0] and pred < coef[1]:\n preds[i] = 1\n elif pred >= coef[1] and pred < coef[2]:\n preds[i] = 2\n elif pred >= coef[2] and pred < coef[3]:\n preds[i] = 3\n else:\n preds[i] = 4\n return preds", "_____no_output_____" ], [ "optnm2coefs = {'simple': [0.5, 1.5, 2.5, 3.5]}", "_____no_output_____" ], [ "%%time\nset_torch_seed()\noptR = Hocop1OptimizedRounder()\noptR.fit(preds_val, y_val)\noptnm2coefs['hocop1'] = optR.coefficients()", "CPU times: user 12.9 s, sys: 0 ns, total: 12.9 s\nWall time: 12.9 s\n" ], [ "%%time\nset_torch_seed()\noptR = AbhishekOptimizedRounder()\noptR.fit(preds_val, y_val)\noptnm2coefs['abhishek'] = optR.coefficients()", "CPU times: user 1.56 s, sys: 0 ns, total: 1.56 s\nWall time: 1.56 s\n" ], [ "optnm2coefs", "_____no_output_____" ], [ "optnm2preds_val_grd = {k: bucket(preds_val, coef) for k,coef in optnm2coefs.items()}\noptnm2qwk = {k: quadratic_weighted_kappa(y_val, preds) for k,preds in optnm2preds_val_grd.items()}", "_____no_output_____" ], [ "optnm2qwk", "_____no_output_____" ], [ "Counter(y_val).most_common()", "_____no_output_____" ], [ "preds_val_grd = optnm2preds_val_grd['simple'].squeeze()", "_____no_output_____" ], [ "preds_val_grd.mean()", "_____no_output_____" ], [ "Counter(preds_val_grd).most_common()", "_____no_output_____" ], [ "list(zip(preds_val_grd, y_val))[:10]", "_____no_output_____" ], [ "(preds_val_grd== y_val.squeeze()).mean()", "_____no_output_____" ], [ "pickle.dump(optnm2qwk, open(f'{p_o}/optnm2qwk.p', 'wb'))\npickle.dump(optnm2preds_val_grd, open(f'{p_o}/optnm2preds_val_grd.p', 'wb'))\npickle.dump(optnm2coefs, open(f'{p_o}/optnm2coefs.p', 'wb'))", "_____no_output_____" ] ], [ [ "# testing", "_____no_output_____" ] ], [ [ "df_test = pd.read_csv('../input/aptos2019-blindness-detection/test.csv')\ndf_test.head()", "_____no_output_____" ], [ "learn.data.add_test(\n ImageList.from_df(df_test,\n '../input/aptos2019-blindness-detection',\n folder='test_images',\n suffix='.png'))", "_____no_output_____" ], [ "%%time\nset_torch_seed()\npreds_tst, _ = learn.get_preds(ds_type=DatasetType.Test)\npreds_tst = preds_tst.numpy().squeeze()", "CPU times: user 1.73 s, sys: 1.17 s, total: 2.89 s\nWall time: 30 s\n" ], [ "np.save(f'{p_o}/preds_tst.npy', preds_tst)", "_____no_output_____" ], [ "def bucket(preds_raw, coef = [0.5, 1.5, 2.5, 3.5]):\n preds = np.zeros(preds_raw.shape)\n for i, pred in enumerate(preds_raw):\n if pred < coef[0]:\n preds[i] = 0\n elif pred >= coef[0] and pred < coef[1]:\n preds[i] = 1\n elif pred >= coef[1] and pred < coef[2]:\n preds[i] = 2\n elif pred >= coef[2] and pred < coef[3]:\n preds[i] = 3\n else:\n preds[i] = 4\n return preds", "_____no_output_____" ], [ "coef = optnm2coefs['simple']\npreds_tst_grd = bucket(preds_tst, coef)", "_____no_output_____" ], [ "preds_tst_grd.squeeze()", "_____no_output_____" ], [ "Counter(preds_tst_grd.squeeze()).most_common()", "_____no_output_____" ] ], [ [ "## submit", "_____no_output_____" ] ], [ [ "subm = pd.read_csv(\"../input/aptos2019-blindness-detection/test.csv\")\nsubm['diagnosis'] = preds_tst_grd.squeeze().astype(int)\nsubm.head()", "_____no_output_____" ], [ "subm.diagnosis.value_counts()", "_____no_output_____" ], [ "subm.to_csv(f\"{p_o}/submission.csv\", index=False)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "raw", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "raw" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ] ]
cb39447fa470f94f8c77bec00113beba19e9cab9
62,434
ipynb
Jupyter Notebook
tutorial/source/dmm.ipynb
edwinnglabs/pyro
49e7f96b32ed270d39cefcba46b94e9b2fb05623
[ "Apache-2.0" ]
2
2020-06-05T20:40:50.000Z
2020-09-05T15:39:48.000Z
tutorial/source/dmm.ipynb
pawni/pyro
dd6b52859cd8eda776b75f2b5ee757a76f17f145
[ "Apache-2.0" ]
1
2020-05-12T16:26:21.000Z
2020-05-12T17:23:13.000Z
tutorial/source/dmm.ipynb
pawni/pyro
dd6b52859cd8eda776b75f2b5ee757a76f17f145
[ "Apache-2.0" ]
1
2020-06-04T18:25:38.000Z
2020-06-04T18:25:38.000Z
71.598624
1,033
0.662219
[ [ [ "# Deep Markov Model \n\n## Introduction\n\nWe're going to build a deep probabilistic model for sequential data: the deep markov model. The particular dataset we want to model is composed of snippets of polyphonic music. Each time slice in a sequence spans a quarter note and is represented by an 88-dimensional binary vector that encodes the notes at that time step. \n\nSince music is (obviously) temporally coherent, we need a model that can represent complex time dependencies in the observed data. It would not, for example, be appropriate to consider a model in which the notes at a particular time step are independent of the notes at previous time steps. One way to do this is to build a latent variable model in which the variability and temporal structure of the observations is controlled by the dynamics of the latent variables. \n\nOne particular realization of this idea is a markov model, in which we have a chain of latent variables, with each latent variable in the chain conditioned on the previous latent variable. This is a powerful approach, but if we want to represent complex data with complex (and in this case unknown) dynamics, we would like our model to be sufficiently flexible to accommodate dynamics that are potentially highly non-linear. Thus a deep markov model: we allow for the transition probabilities governing the dynamics of the latent variables as well as the the emission probabilities that govern how the observations are generated by the latent dynamics to be parameterized by (non-linear) neural networks.\n\nThe specific model we're going to implement is based on the following reference:\n\n[1] `Structured Inference Networks for Nonlinear State Space Models`,<br />&nbsp;&nbsp;&nbsp;&nbsp;\n Rahul G. Krishnan, Uri Shalit, David Sontag\n \nPlease note that while we do not assume that the reader of this tutorial has read the reference, it's definitely a good place to look for a more comprehensive discussion of the deep markov model in the context of other time series models.\n\nWe've described the model, but how do we go about training it? The inference strategy we're going to use is variational inference, which requires specifying a parameterized family of distributions that can be used to approximate the posterior distribution over the latent random variables. Given the non-linearities and complex time-dependencies inherent in our model and data, we expect the exact posterior to be highly non-trivial. So we're going to need a flexible family of variational distributions if we hope to learn a good model. Happily, together PyTorch and Pyro provide all the necessary ingredients. As we will see, assembling them will be straightforward. Let's get to work.", "_____no_output_____" ], [ "## The Model\n \nA convenient way to describe the high-level structure of the model is with a graphical model.", "_____no_output_____" ] ], [ [ "<center><figure><img src=\"_static/img/model.png\" style=\"width: 500px;\"><figcaption> <font size=\"+1\"><b>Figure 1</b>: The model rolled out for T=3 time steps.</font></figcaption></figure></center>", "_____no_output_____" ] ], [ [ "Here, we've rolled out the model assuming that the sequence of observations is of length three: $\\{{\\bf x}_1, {\\bf x}_2, {\\bf x}_3\\}$. Mirroring the sequence of observations we also have a sequence of latent random variables: $\\{{\\bf z}_1, {\\bf z}_2, {\\bf z}_3\\}$. The figure encodes the structure of the model. The corresponding joint distribution is\n\n$$p({\\bf x}_{123} , {\\bf z}_{123})=p({\\bf x}_1|{\\bf z}_1)p({\\bf x}_2|{\\bf z}_2)p({\\bf x}_3|{\\bf z}_3)p({\\bf z}_1)p({\\bf z}_2|{\\bf z}_1)p({\\bf z}_3|{\\bf z}_2)$$\n\nConditioned on ${\\bf z}_t$, each observation ${\\bf x}_t$ is independent of the other observations. This can be read off from the fact that each ${\\bf x}_t$ only depends on the corresponding latent ${\\bf z}_t$, as indicated by the downward pointing arrows. We can also read off the markov property of the model: each latent ${\\bf z}_t$, when conditioned on the previous latent ${\\bf z}_{t-1}$, is independent of all previous latents $\\{ {\\bf z}_{t-2}, {\\bf z}_{t-3}, ...\\}$. This effectively says that everything one needs to know about the state of the system at time $t$ is encapsulated by the latent ${\\bf z}_{t}$.\n\nWe will assume that the observation likelihoods, i.e. the probability distributions $p({{\\bf x}_t}|{{\\bf z}_t})$ that control the observations, are given by the bernoulli distribution. This is an appropriate choice since our observations are all 0 or 1. For the probability distributions $p({\\bf z}_t|{\\bf z}_{t-1})$ that control the latent dynamics, we choose (conditional) gaussian distributions with diagonal covariances. This is reasonable since we assume that the latent space is continuous. \n \n\n \nThe solid black squares represent non-linear functions parameterized by neural networks. This is what makes this a _deep_ markov model. Note that the black squares appear in two different places: in between pairs of latents and in between latents and observations. The non-linear function that connects the latent variables ('Trans' in Fig. 1) controls the dynamics of the latent variables. Since we allow the conditional probability distribution of ${\\bf z}_{t}$ to depend on ${\\bf z}_{t-1}$ in a complex way, we will be able to capture complex dynamics in our model. Similarly, the non-linear function that connects the latent variables to the observations ('Emit' in Fig. 1) controls how the observations depend on the latent dynamics. \n\nSome additional notes:\n- we can freely choose the dimension of the latent space to suit the problem at hand: small latent spaces for simple problems and larger latent spaces for problems with complex dynamics\n- note the parameter ${\\bf z}_0$ in Fig. 1. as will become more apparent from the code, this is just a convenient way for us to parameterize the probability distribution $p({\\bf z}_1)$ for the first time step, where there are no previous latents to condition on.\n\n### The Gated Transition and the Emitter\n\nWithout further ado, let's start writing some code. We first define the two PyTorch Modules that correspond to the black squares in Fig. 1. First the emission function:\n\n```python\nclass Emitter(nn.Module):\n \"\"\"\n Parameterizes the bernoulli observation likelihood p(x_t | z_t)\n \"\"\"\n def __init__(self, input_dim, z_dim, emission_dim):\n super().__init__()\n # initialize the three linear transformations used in the neural network\n self.lin_z_to_hidden = nn.Linear(z_dim, emission_dim)\n self.lin_hidden_to_hidden = nn.Linear(emission_dim, emission_dim)\n self.lin_hidden_to_input = nn.Linear(emission_dim, input_dim)\n # initialize the two non-linearities used in the neural network\n self.relu = nn.ReLU()\n self.sigmoid = nn.Sigmoid()\n\n def forward(self, z_t):\n \"\"\"\n Given the latent z at a particular time step t we return the vector of \n probabilities `ps` that parameterizes the bernoulli distribution p(x_t|z_t)\n \"\"\"\n h1 = self.relu(self.lin_z_to_hidden(z_t))\n h2 = self.relu(self.lin_hidden_to_hidden(h1))\n ps = self.sigmoid(self.lin_hidden_to_input(h2))\n return ps\n```", "_____no_output_____" ], [ "In the constructor we define the linear transformations that will be used in our emission function. Note that `emission_dim` is the number of hidden units in the neural network. We also define the non-linearities that we will be using. The forward call defines the computational flow of the function. We take in the latent ${\\bf z}_{t}$ as input and do a sequence of transformations until we obtain a vector of length 88 that defines the emission probabilities of our bernoulli likelihood. Because of the sigmoid, each element of `ps` will be between 0 and 1 and will define a valid probability. Taken together the elements of `ps` encode which notes we expect to observe at time $t$ given the state of the system (as encoded in ${\\bf z}_{t}$).", "_____no_output_____" ], [ "Now we define the gated transition function:\n\n```python\nclass GatedTransition(nn.Module):\n \"\"\"\n Parameterizes the gaussian latent transition probability p(z_t | z_{t-1})\n See section 5 in the reference for comparison.\n \"\"\"\n def __init__(self, z_dim, transition_dim):\n super().__init__()\n # initialize the six linear transformations used in the neural network\n self.lin_gate_z_to_hidden = nn.Linear(z_dim, transition_dim)\n self.lin_gate_hidden_to_z = nn.Linear(transition_dim, z_dim)\n self.lin_proposed_mean_z_to_hidden = nn.Linear(z_dim, transition_dim)\n self.lin_proposed_mean_hidden_to_z = nn.Linear(transition_dim, z_dim)\n self.lin_sig = nn.Linear(z_dim, z_dim)\n self.lin_z_to_loc = nn.Linear(z_dim, z_dim)\n # modify the default initialization of lin_z_to_loc\n # so that it's starts out as the identity function\n self.lin_z_to_loc.weight.data = torch.eye(z_dim)\n self.lin_z_to_loc.bias.data = torch.zeros(z_dim)\n # initialize the three non-linearities used in the neural network\n self.relu = nn.ReLU()\n self.sigmoid = nn.Sigmoid()\n self.softplus = nn.Softplus()\n\n def forward(self, z_t_1):\n \"\"\"\n Given the latent z_{t-1} corresponding to the time step t-1\n we return the mean and scale vectors that parameterize the\n (diagonal) gaussian distribution p(z_t | z_{t-1})\n \"\"\"\n # compute the gating function\n _gate = self.relu(self.lin_gate_z_to_hidden(z_t_1))\n gate = self.sigmoid(self.lin_gate_hidden_to_z(_gate))\n # compute the 'proposed mean'\n _proposed_mean = self.relu(self.lin_proposed_mean_z_to_hidden(z_t_1))\n proposed_mean = self.lin_proposed_mean_hidden_to_z(_proposed_mean)\n # assemble the actual mean used to sample z_t, which mixes \n # a linear transformation of z_{t-1} with the proposed mean \n # modulated by the gating function\n loc = (1 - gate) * self.lin_z_to_loc(z_t_1) + gate * proposed_mean\n # compute the scale used to sample z_t, using the proposed \n # mean from above as input. the softplus ensures that scale is positive\n scale = self.softplus(self.lin_sig(self.relu(proposed_mean)))\n # return loc, scale which can be fed into Normal\n return loc, scale\n```", "_____no_output_____" ], [ "This mirrors the structure of `Emitter` above, with the difference that the computational flow is a bit more complicated. This is for two reasons. First, the output of `GatedTransition` needs to define a valid (diagonal) gaussian distribution. So we need to output two parameters: the mean `loc`, and the (square root) covariance `scale`. These both need to have the same dimension as the latent space. Second, we don't want to _force_ the dynamics to be non-linear. Thus our mean `loc` is a sum of two terms, only one of which depends non-linearily on the input `z_t_1`. This way we can support both linear and non-linear dynamics (or indeed have the dynamics of part of the latent space be linear, while the remainder of the dynamics is non-linear). ", "_____no_output_____" ], [ "### Model - a Pyro Stochastic Function\n\nSo far everything we've done is pure PyTorch. To finish translating our model into code we need to bring Pyro into the picture. Basically we need to implement the stochastic nodes (i.e. the circles) in Fig. 1. To do this we introduce a callable `model()` that contains the Pyro primitive `pyro.sample`. The `sample` statements will be used to specify the joint distribution over the latents ${\\bf z}_{1:T}$. Additionally, the `obs` argument can be used with the `sample` statements to specify how the observations ${\\bf x}_{1:T}$ depend on the latents. Before we look at the complete code for `model()`, let's look at a stripped down version that contains the main logic:", "_____no_output_____" ], [ "```python\ndef model(...):\n z_prev = self.z_0\n\n # sample the latents z and observed x's one time step at a time\n for t in range(1, T_max + 1): \n # the next two lines of code sample z_t ~ p(z_t | z_{t-1}).\n # first compute the parameters of the diagonal gaussian \n # distribution p(z_t | z_{t-1})\n z_loc, z_scale = self.trans(z_prev)\n # then sample z_t according to dist.Normal(z_loc, z_scale)\n z_t = pyro.sample(\"z_%d\" % t, dist.Normal(z_loc, z_scale))\n \n # compute the probabilities that parameterize the bernoulli likelihood\n emission_probs_t = self.emitter(z_t)\n # the next statement instructs pyro to observe x_t according to the\n # bernoulli distribution p(x_t|z_t) \n pyro.sample(\"obs_x_%d\" % t, \n dist.Bernoulli(emission_probs_t),\n obs=mini_batch[:, t - 1, :])\n # the latent sampled at this time step will be conditioned upon \n # in the next time step so keep track of it\n z_prev = z_t \n```", "_____no_output_____" ], [ "The first thing we need to do is sample ${\\bf z}_1$. Once we've sampled ${\\bf z}_1$, we can sample ${\\bf z}_2 \\sim p({\\bf z}_2|{\\bf z}_1)$ and so on. This is the logic implemented in the `for` loop. The parameters `z_loc` and `z_scale` that define the probability distributions $p({\\bf z}_t|{\\bf z}_{t-1})$ are computed using `self.trans`, which is just an instance of the `GatedTransition` module defined above. For the first time step at $t=1$ we condition on `self.z_0`, which is a (trainable) `Parameter`, while for subsequent time steps we condition on the previously drawn latent. Note that each random variable `z_t` is assigned a unique name by the user.\n\nOnce we've sampled ${\\bf z}_t$ at a given time step, we need to observe the datapoint ${\\bf x}_t$. So we pass `z_t` through `self.emitter`, an instance of the `Emitter` module defined above to obtain `emission_probs_t`. Together with the argument `dist.Bernoulli()` in the `sample` statement, these probabilities fully specify the observation likelihood. Finally, we also specify the slice of observed data ${\\bf x}_t$: `mini_batch[:, t - 1, :]` using the `obs` argument to `sample`. \n\nThis fully specifies our model and encapsulates it in a callable that can be passed to Pyro. Before we move on let's look at the full version of `model()` and go through some of the details we glossed over in our first pass.", "_____no_output_____" ], [ "```python\ndef model(self, mini_batch, mini_batch_reversed, mini_batch_mask,\n\t\t mini_batch_seq_lengths, annealing_factor=1.0):\n\n\t# this is the number of time steps we need to process in the mini-batch\n\tT_max = mini_batch.size(1)\n\n\t# register all PyTorch (sub)modules with pyro\n\t# this needs to happen in both the model and guide\n\tpyro.module(\"dmm\", self)\n\n\t# set z_prev = z_0 to setup the recursive conditioning in p(z_t | z_{t-1})\n\tz_prev = self.z_0.expand(mini_batch.size(0), self.z_0.size(0))\n\n\t# we enclose all the sample statements in the model in a plate.\n\t# this marks that each datapoint is conditionally independent of the others\n\twith pyro.plate(\"z_minibatch\", len(mini_batch)):\n\t\t# sample the latents z and observed x's one time step at a time\n\t\tfor t in range(1, T_max + 1):\n\t\t\t# the next chunk of code samples z_t ~ p(z_t | z_{t-1})\n\t\t\t# note that (both here and elsewhere) we use poutine.scale to take care\n\t\t\t# of KL annealing. we use the mask() method to deal with raggedness\n\t\t\t# in the observed data (i.e. different sequences in the mini-batch\n\t\t\t# have different lengths)\n\n\t\t\t# first compute the parameters of the diagonal gaussian \n # distribution p(z_t | z_{t-1})\n\t\t\tz_loc, z_scale = self.trans(z_prev)\n\n\t\t\t# then sample z_t according to dist.Normal(z_loc, z_scale).\n\t\t\t# note that we use the reshape method so that the univariate \n # Normal distribution is treated as a multivariate Normal \n # distribution with a diagonal covariance.\n\t\t\twith poutine.scale(None, annealing_factor):\n\t\t\t\tz_t = pyro.sample(\"z_%d\" % t,\n\t\t\t\t\t\t\t\t dist.Normal(z_loc, z_scale)\n\t\t\t\t\t\t\t\t\t .mask(mini_batch_mask[:, t - 1:t])\n\t\t\t\t\t\t\t\t\t .to_event(1))\n\n\t\t\t# compute the probabilities that parameterize the bernoulli likelihood\n\t\t\temission_probs_t = self.emitter(z_t)\n\t\t\t# the next statement instructs pyro to observe x_t according to the\n\t\t\t# bernoulli distribution p(x_t|z_t)\n\t\t\tpyro.sample(\"obs_x_%d\" % t,\n\t\t\t\t\t\tdist.Bernoulli(emission_probs_t)\n\t\t\t\t\t\t\t.mask(mini_batch_mask[:, t - 1:t])\n\t\t\t\t\t\t\t.to_event(1),\n\t\t\t\t\t\tobs=mini_batch[:, t - 1, :])\n\t\t\t# the latent sampled at this time step will be conditioned upon\n\t\t\t# in the next time step so keep track of it\n\t\t\tz_prev = z_t\n```", "_____no_output_____" ], [ "The first thing to note is that `model()` takes a number of arguments. For now let's just take a look at `mini_batch` and `mini_batch_mask`. `mini_batch` is a three dimensional tensor, with the first dimension being the batch dimension, the second dimension being the temporal dimension, and the final dimension being the features (88-dimensional in our case). To speed up the code, whenever we run `model` we're going to process an entire mini-batch of sequences (i.e. we're going to take advantage of vectorization). \n\nThis is sensible because our model is implicitly defined over a single observed sequence. The probability of a set of sequences is just given by the products of the individual sequence probabilities. In other words, given the parameters of the model the sequences are conditionally independent.\n\nThis vectorization introduces some complications because sequences can be of different lengths. This is where `mini_batch_mask` comes in. `mini_batch_mask` is a two dimensional 0/1 mask of dimensions `mini_batch_size` x `T_max`, where `T_max` is the maximum length of any sequence in the mini-batch. This encodes which parts of `mini_batch` are valid observations. \n\nSo the first thing we do is grab `T_max`: we have to unroll our model for at least this many time steps. Note that this will result in a lot of 'wasted' computation, since some of the sequences will be shorter than `T_max`, but this is a small price to pay for the big speed-ups that come with vectorization. We just need to make sure that none of the 'wasted' computations 'pollute' our model computation. We accomplish this by passing the mask appropriate to time step $t$ to the `mask` method (which acts on the distribution that needs masking).\n\nFinally, the line `pyro.module(\"dmm\", self)` is equivalent to a bunch of `pyro.param` statements for each parameter in the model. This lets Pyro know which parameters are part of the model. Just like for the `sample` statement, we give the module a unique name. This name will be incorporated into the name of the `Parameters` in the model. We leave a discussion of the KL annealing factor for later.", "_____no_output_____" ], [ "## Inference\n\nAt this point we've fully specified our model. The next step is to set ourselves up for inference. As mentioned in the introduction, our inference strategy is going to be variational inference (see [SVI Part I](svi_part_i.ipynb) for an introduction). So our next task is to build a family of variational distributions appropriate to doing inference in a deep markov model. However, at this point it's worth emphasizing that nothing about the way we've implemented `model()` ties us to variational inference. In principle we could use _any_ inference strategy available in Pyro. For example, in this particular context one could imagine using some variant of Sequential Monte Carlo (although this is not currently supported in Pyro).\n\n### Guide\n\nThe purpose of the guide (i.e. the variational distribution) is to provide a (parameterized) approximation to the exact posterior $p({\\bf z}_{1:T}|{\\bf x}_{1:T})$. Actually, there's an implicit assumption here which we should make explicit, so let's take a step back. \nSuppose our dataset $\\mathcal{D}$ consists of $N$ sequences \n$\\{ {\\bf x}_{1:T_1}^1, {\\bf x}_{1:T_2}^2, ..., {\\bf x}_{1:T_N}^N \\}$. Then the posterior we're actually interested in is given by \n$p({\\bf z}_{1:T_1}^1, {\\bf z}_{1:T_2}^2, ..., {\\bf z}_{1:T_N}^N | \\mathcal{D})$, i.e. we want to infer the latents for _all_ $N$ sequences. Even for small $N$ this is a very high-dimensional distribution that will require a very large number of parameters to specify. In particular if we were to directly parameterize the posterior in this form, the number of parameters required would grow (at least) linearly with $N$. One way to avoid this nasty growth with the size of the dataset is *amortization* (see the analogous discussion in [SVI Part II](svi_part_ii.ipynb)).\n\n#### Aside: Amortization\n\nThis works as follows. Instead of introducing variational parameters for each sequence in our dataset, we're going to learn a single parametric function $f({\\bf x}_{1:T})$ and work with a variational distribution that has the form $\\prod_{n=1}^N q({\\bf z}_{1:T_n}^n | f({\\bf x}_{1:T_n}^n))$. The function $f(\\cdot)$&mdash;which basically maps a given observed sequence to a set of variational parameters tailored to that sequence&mdash;will need to be sufficiently rich to capture the posterior accurately, but now we can handle large datasets without having to introduce an obscene number of variational parameters.\n\nSo our task is to construct the function $f(\\cdot)$. Since in our case we need to support variable-length sequences, it's only natural that $f(\\cdot)$ have a RNN in the loop. Before we look at the various component parts that make up our $f(\\cdot)$ in detail, let's look at a computational graph that encodes the basic structure: <p>", "_____no_output_____" ] ], [ [ "<center><figure><img src=\"_static/img/guide.png\" style=\"width: 400px;\"><figcaption> <font size=\"+1\"><b>Figure 2</b>: The guide rolled out for T=3 time steps. </font></figcaption></figure></center>", "_____no_output_____" ] ], [ [ "At the bottom of the figure we have our sequence of three observations. These observations will be consumed by a RNN that reads the observations from right to left and outputs three hidden states $\\{ {\\bf h}_1, {\\bf h}_2,{\\bf h}_3\\}$. Note that this computation is done _before_ we sample any latent variables. Next, each of the hidden states will be fed into a `Combiner` module whose job is to output the mean and covariance of the the conditional distribution $q({\\bf z}_t | {\\bf z}_{t-1}, {\\bf x}_{t:T})$, which we take to be given by a diagonal gaussian distribution. (Just like in the model, the conditional structure of ${\\bf z}_{1:T}$ in the guide is such that we sample ${\\bf z}_t$ forward in time.) In addition to the RNN hidden state, the `Combiner` also takes the latent random variable from the previous time step as input, except for $t=1$, where it instead takes the trainable (variational) parameter ${\\bf z}_0^{\\rm{q}}$. \n\n#### Aside: Guide Structure\nWhy do we setup the RNN to consume the observations from right to left? Why not left to right? With this choice our conditional distribution $q({\\bf z}_t |...)$ depends on two things:\n\n- the latent ${\\bf z}_{t-1}$ from the previous time step; and \n- the observations ${\\bf x}_{t:T}$, i.e. the current observation together with all future observations\n\nWe are free to make other choices; all that is required is that that the guide is a properly normalized distribution that plays nice with autograd. This particular choice is motivated by the dependency structure of the true posterior: see reference [1] for a detailed discussion. In brief, while we could, for example, condition on the entire sequence of observations, because of the markov structure of the model everything that we need to know about the previous observations ${\\bf x}_{1:t-1}$ is encapsulated by ${\\bf z}_{t-1}$. We could condition on more things, but there's no need; and doing so will probably tend to dilute the learning signal. So running the RNN from right to left is the most natural choice for this particular model.\n\nLet's look at the component parts in detail. First, the `Combiner` module:", "_____no_output_____" ], [ "```python\nclass Combiner(nn.Module):\n \"\"\"\n Parameterizes q(z_t | z_{t-1}, x_{t:T}), which is the basic building block\n of the guide (i.e. the variational distribution). The dependence on x_{t:T} is\n through the hidden state of the RNN (see the pytorch module `rnn` below)\n \"\"\"\n def __init__(self, z_dim, rnn_dim):\n super().__init__()\n # initialize the three linear transformations used in the neural network\n self.lin_z_to_hidden = nn.Linear(z_dim, rnn_dim)\n self.lin_hidden_to_loc = nn.Linear(rnn_dim, z_dim)\n self.lin_hidden_to_scale = nn.Linear(rnn_dim, z_dim)\n # initialize the two non-linearities used in the neural network\n self.tanh = nn.Tanh()\n self.softplus = nn.Softplus()\n\n def forward(self, z_t_1, h_rnn):\n \"\"\"\n Given the latent z at at a particular time step t-1 as well as the hidden\n state of the RNN h(x_{t:T}) we return the mean and scale vectors that\n parameterize the (diagonal) gaussian distribution q(z_t | z_{t-1}, x_{t:T})\n \"\"\"\n # combine the rnn hidden state with a transformed version of z_t_1\n h_combined = 0.5 * (self.tanh(self.lin_z_to_hidden(z_t_1)) + h_rnn)\n # use the combined hidden state to compute the mean used to sample z_t\n loc = self.lin_hidden_to_loc(h_combined)\n # use the combined hidden state to compute the scale used to sample z_t\n scale = self.softplus(self.lin_hidden_to_scale(h_combined))\n # return loc, scale which can be fed into Normal\n return loc, scale\n```", "_____no_output_____" ], [ "This module has the same general structure as `Emitter` and `GatedTransition` in the model. The only thing of note is that because the `Combiner` needs to consume two inputs at each time step, it transforms the inputs into a single combined hidden state `h_combined` before it computes the outputs. \n\nApart from the RNN, we now have all the ingredients we need to construct our guide distribution.\nHappily, PyTorch has great built-in RNN modules, so we don't have much work to do here. We'll see where we instantiate the RNN later. Let's instead jump right into the definition of the stochastic function `guide()`.", "_____no_output_____" ], [ "```python\ndef guide(self, mini_batch, mini_batch_reversed, mini_batch_mask,\n mini_batch_seq_lengths, annealing_factor=1.0):\n\n # this is the number of time steps we need to process in the mini-batch\n T_max = mini_batch.size(1)\n # register all PyTorch (sub)modules with pyro\n pyro.module(\"dmm\", self)\n\n # if on gpu we need the fully broadcast view of the rnn initial state\n # to be in contiguous gpu memory\n h_0_contig = self.h_0.expand(1, mini_batch.size(0), \n self.rnn.hidden_size).contiguous()\n # push the observed x's through the rnn;\n # rnn_output contains the hidden state at each time step\n rnn_output, _ = self.rnn(mini_batch_reversed, h_0_contig)\n # reverse the time-ordering in the hidden state and un-pack it\n rnn_output = poly.pad_and_reverse(rnn_output, mini_batch_seq_lengths)\n # set z_prev = z_q_0 to setup the recursive conditioning in q(z_t |...)\n z_prev = self.z_q_0.expand(mini_batch.size(0), self.z_q_0.size(0))\n\n # we enclose all the sample statements in the guide in a plate.\n # this marks that each datapoint is conditionally independent of the others.\n with pyro.plate(\"z_minibatch\", len(mini_batch)):\n # sample the latents z one time step at a time\n for t in range(1, T_max + 1):\n # the next two lines assemble the distribution q(z_t | z_{t-1}, x_{t:T})\n z_loc, z_scale = self.combiner(z_prev, rnn_output[:, t - 1, :])\n z_dist = dist.Normal(z_loc, z_scale)\n\n # sample z_t from the distribution z_dist\n with pyro.poutine.scale(None, annealing_factor):\n z_t = pyro.sample(\"z_%d\" % t,\n z_dist.mask(mini_batch_mask[:, t - 1:t])\n .to_event(1))\n # the latent sampled at this time step will be conditioned \n # upon in the next time step so keep track of it\n z_prev = z_t\n```", "_____no_output_____" ], [ "The high-level structure of `guide()` is very similar to `model()`. First note that the model and guide take the same arguments: this is a general requirement for model/guide pairs in Pyro. As in the model, there's a call to `pyro.module` that registers all the parameters with Pyro. Also, the `for` loop has the same structure as the one in `model()`, with the difference that the guide only needs to sample latents (there are no `sample` statements with the `obs` keyword). Finally, note that the names of the latent variables in the guide exactly match those in the model. This is how Pyro knows to correctly align random variables. \n\nThe RNN logic should be familar to PyTorch users, but let's go through it quickly. First we prepare the initial state of the RNN, `h_0`. Then we invoke the RNN via its forward call; the resulting tensor `rnn_output` contains the hidden states for the entire mini-batch. Note that because we want the RNN to consume the observations from right to left, the input to the RNN is `mini_batch_reversed`, which is a copy of `mini_batch` with all the sequences running in _reverse_ temporal order. Furthermore, `mini_batch_reversed` has been wrapped in a PyTorch `rnn.pack_padded_sequence` so that the RNN can deal with variable-length sequences. Since we do our sampling in latent space in normal temporal order, we use the helper function `pad_and_reverse` to reverse the hidden state sequences in `rnn_output`, so that we can feed the `Combiner` RNN hidden states that are correctly aligned and ordered. This helper function also unpacks the `rnn_output` so that it is no longer in the form of a PyTorch `rnn.pack_padded_sequence`.", "_____no_output_____" ], [ "## Packaging the Model and Guide as a PyTorch Module\n\nAt this juncture, we're ready to proceed to inference. But before we do so let's quickly go over how we packaged the model and guide as a single PyTorch Module. This is generally good practice, especially for larger models.", "_____no_output_____" ], [ "```python\nclass DMM(nn.Module):\n \"\"\"\n This PyTorch Module encapsulates the model as well as the \n variational distribution (the guide) for the Deep Markov Model\n \"\"\"\n def __init__(self, input_dim=88, z_dim=100, emission_dim=100, \n transition_dim=200, rnn_dim=600, rnn_dropout_rate=0.0, \n num_iafs=0, iaf_dim=50, use_cuda=False):\n super().__init__()\n # instantiate pytorch modules used in the model and guide below\n self.emitter = Emitter(input_dim, z_dim, emission_dim)\n self.trans = GatedTransition(z_dim, transition_dim)\n self.combiner = Combiner(z_dim, rnn_dim)\n self.rnn = nn.RNN(input_size=input_dim, hidden_size=rnn_dim, \n nonlinearity='relu', batch_first=True, \n bidirectional=False, num_layers=1, dropout=rnn_dropout_rate)\n\n # define a (trainable) parameters z_0 and z_q_0 that help define \n # the probability distributions p(z_1) and q(z_1)\n # (since for t = 1 there are no previous latents to condition on)\n self.z_0 = nn.Parameter(torch.zeros(z_dim))\n self.z_q_0 = nn.Parameter(torch.zeros(z_dim))\n # define a (trainable) parameter for the initial hidden state of the rnn\n self.h_0 = nn.Parameter(torch.zeros(1, 1, rnn_dim))\n\n self.use_cuda = use_cuda\n # if on gpu cuda-ize all pytorch (sub)modules\n if use_cuda:\n self.cuda()\n\n # the model p(x_{1:T} | z_{1:T}) p(z_{1:T})\n def model(...):\n\n # ... as above ...\n\n # the guide q(z_{1:T} | x_{1:T}) (i.e. the variational distribution)\n def guide(...):\n \n # ... as above ...\n```", "_____no_output_____" ], [ "Since we've already gone over `model` and `guide`, our focus here is on the constructor. First we instantiate the four PyTorch modules that we use in our model and guide. On the model-side: `Emitter` and `GatedTransition`. On the guide-side: `Combiner` and the RNN. \n\nNext we define PyTorch `Parameter`s for the initial state of the RNN as well as `z_0` and `z_q_0`, which are fed into `self.trans` and `self.combiner`, respectively, in lieu of the non-existent random variable $\\bf z_0$. \n\nThe important point to make here is that all of these `Module`s and `Parameter`s are attributes of `DMM` (which itself inherits from `nn.Module`). This has the consequence they are all automatically registered as belonging to the module. So, for example, when we call `parameters()` on an instance of `DMM`, PyTorch will know to return all the relevant parameters. It also means that when we invoke `pyro.module(\"dmm\", self)` in `model()` and `guide()`, all the parameters of both the model and guide will be registered with Pyro. Finally, it means that if we're running on a GPU, the call to `cuda()` will move all the parameters into GPU memory.\n", "_____no_output_____" ], [ "## Stochastic Variational Inference\n\nWith our model and guide at hand, we're finally ready to do inference. Before we look at the full logic that is involved in a complete experimental script, let's first see how to take a single gradient step. First we instantiate an instance of `DMM` and setup an optimizer.\n\n```python\n# instantiate the dmm\ndmm = DMM(input_dim, z_dim, emission_dim, transition_dim, rnn_dim,\n args.rnn_dropout_rate, args.num_iafs, args.iaf_dim, args.cuda)\n\n# setup optimizer\nadam_params = {\"lr\": args.learning_rate, \"betas\": (args.beta1, args.beta2),\n \"clip_norm\": args.clip_norm, \"lrd\": args.lr_decay,\n \"weight_decay\": args.weight_decay}\noptimizer = ClippedAdam(adam_params)\n```\n\nHere we're using an implementation of the Adam optimizer that includes gradient clipping. This mitigates some of the problems that can occur when training recurrent neural networks (e.g. vanishing/exploding gradients). Next we setup the inference algorithm. \n\n```python\n# setup inference algorithm\nsvi = SVI(dmm.model, dmm.guide, optimizer, Trace_ELBO())\n```\n\nThe inference algorithm `SVI` uses a stochastic gradient estimator to take gradient steps on an objective function, which in this case is given by the ELBO (the evidence lower bound). As the name indicates, the ELBO is a lower bound to the log evidence: $\\log p(\\mathcal{D})$. As we take gradient steps that maximize the ELBO, we move our guide $q(\\cdot)$ closer to the exact posterior. \n\nThe argument `Trace_ELBO()` constructs a version of the gradient estimator that doesn't need access to the dependency structure of the model and guide. Since all the latent variables in our model are reparameterizable, this is the appropriate gradient estimator for our use case. (It's also the default option.)\n\nAssuming we've prepared the various arguments of `dmm.model` and `dmm.guide`, taking a gradient step is accomplished by calling\n\n```python\nsvi.step(mini_batch, ...)\n```\n\nThat's all there is to it!\n\nWell, not quite. This will be the main step in our inference algorithm, but we still need to implement a complete training loop with preparation of mini-batches, evaluation, and so on. This sort of logic will be familiar to any deep learner but let's see how it looks in PyTorch/Pyro.", "_____no_output_____" ], [ "## The Black Magic of Optimization\n\nActually, before we get to the guts of training, let's take a moment and think a bit about the optimization problem we've setup. We've traded Bayesian inference in a non-linear model with a high-dimensional latent space&mdash;a hard problem&mdash;for a particular optimization problem. Let's not kid ourselves, this optimization problem is pretty hard too. Why? Let's go through some of the reasons:\n\n- the space of parameters we're optimizing over is very high-dimensional (it includes all the weights in all the neural networks we've defined).\n- our objective function (the ELBO) cannot be computed analytically. so our parameter updates will be following noisy Monte Carlo gradient estimates\n- data-subsampling serves as an additional source of stochasticity: even if we wanted to, we couldn't in general take gradient steps on the ELBO defined over the whole dataset (actually in our particular case the dataset isn't so large, but let's ignore that).\n- given all the neural networks and non-linearities we have in the loop, our (stochastic) loss surface is highly non-trivial\n\nThe upshot is that if we're going to find reasonable (local) optima of the ELBO, we better take some care in deciding how to do optimization. This isn't the time or place to discuss all the different strategies that one might adopt, but it's important to emphasize how decisive a good or bad choice in learning hyperparameters (the learning rate, the mini-batch size, etc.) can be. \n\nBefore we move on, let's discuss one particular optimization strategy that we're making use of in greater detail: KL annealing. In our case the ELBO is the sum of two terms: an expected log likelihood term (which measures model fit) and a sum of KL divergence terms (which serve to regularize the approximate posterior):\n\n$\\rm{ELBO} = \\mathbb{E}_{q({\\bf z}_{1:T})}[\\log p({\\bf x}_{1:T}|{\\bf z}_{1:T})] - \\mathbb{E}_{q({\\bf z}_{1:T})}[ \\log q({\\bf z}_{1:T}) - \\log p({\\bf z}_{1:T})]$\n\nThis latter term can be a quite strong regularizer, and in early stages of training it has a tendency to favor regions of the loss surface that contain lots of bad local optima. One strategy to avoid these bad local optima, which was also adopted in reference [1], is to anneal the KL divergence terms by multiplying them by a scalar `annealing_factor` that ranges between zero and one:\n\n$\\mathbb{E}_{q({\\bf z}_{1:T})}[\\log p({\\bf x}_{1:T}|{\\bf z}_{1:T})] - \\rm{annealing\\_factor} \\times \\mathbb{E}_{q({\\bf z}_{1:T})}[ \\log q({\\bf z}_{1:T}) - \\log p({\\bf z}_{1:T})]$\n\nThe idea is that during the course of training the `annealing_factor` rises slowly from its initial value at/near zero to its final value at 1.0. The annealing schedule is arbitrary; below we will use a simple linear schedule. In terms of code, to scale the log likelihoods by the appropriate annealing factor we enclose each of the latent sample statements in the model and guide with a `pyro.poutine.scale` context.\n\nFinally, we should mention that the main difference between the DMM implementation described here and the one used in reference [1] is that they take advantage of the analytic formula for the KL divergence between two gaussian distributions (whereas we rely on Monte Carlo estimates). This leads to lower variance gradient estimates of the ELBO, which makes training a bit easier. We can still train the model without making this analytic substitution, but training probably takes somewhat longer because of the higher variance. Support for analytic KL divergences in Pyro is something we plan to add in the future.", "_____no_output_____" ], [ "## Data Loading, Training, and Evaluation\n\nFirst we load the data. There are 229 sequences in the training dataset, each with an average length of ~60 time steps.\n\n```python\njsb_file_loc = \"./data/jsb_processed.pkl\"\ndata = pickle.load(open(jsb_file_loc, \"rb\"))\ntraining_seq_lengths = data['train']['sequence_lengths']\ntraining_data_sequences = data['train']['sequences']\ntest_seq_lengths = data['test']['sequence_lengths']\ntest_data_sequences = data['test']['sequences']\nval_seq_lengths = data['valid']['sequence_lengths']\nval_data_sequences = data['valid']['sequences']\nN_train_data = len(training_seq_lengths)\nN_train_time_slices = np.sum(training_seq_lengths)\nN_mini_batches = int(N_train_data / args.mini_batch_size +\n int(N_train_data % args.mini_batch_size > 0))\n```\n\nFor this dataset we will typically use a `mini_batch_size` of 20, so that there will be 12 mini-batches per epoch. Next we define the function `process_minibatch` which prepares a mini-batch for training and takes a gradient step:\n\n```python\ndef process_minibatch(epoch, which_mini_batch, shuffled_indices):\n if args.annealing_epochs > 0 and epoch < args.annealing_epochs:\n # compute the KL annealing factor appropriate \n # for the current mini-batch in the current epoch\n min_af = args.minimum_annealing_factor\n annealing_factor = min_af + (1.0 - min_af) * \\ \n (float(which_mini_batch + epoch * N_mini_batches + 1) /\n float(args.annealing_epochs * N_mini_batches))\n else:\n # by default the KL annealing factor is unity\n annealing_factor = 1.0 \n\n # compute which sequences in the training set we should grab\n mini_batch_start = (which_mini_batch * args.mini_batch_size)\n mini_batch_end = np.min([(which_mini_batch + 1) * args.mini_batch_size,\n N_train_data])\n mini_batch_indices = shuffled_indices[mini_batch_start:mini_batch_end]\n # grab the fully prepped mini-batch using the helper function in the data loader\n mini_batch, mini_batch_reversed, mini_batch_mask, mini_batch_seq_lengths \\\n = poly.get_mini_batch(mini_batch_indices, training_data_sequences,\n training_seq_lengths, cuda=args.cuda)\n # do an actual gradient step\n loss = svi.step(mini_batch, mini_batch_reversed, mini_batch_mask,\n mini_batch_seq_lengths, annealing_factor)\n # keep track of the training loss\n return loss\n```\n\nWe first compute the KL annealing factor appropriate to the mini-batch (according to a linear schedule as described earlier). We then compute the mini-batch indices, which we pass to the helper function `get_mini_batch`. This helper function takes care of a number of different things:\n\n- it sorts each mini-batch by sequence length\n- it calls another helper function to get a copy of the mini-batch in reversed temporal order\n- it packs each reversed mini-batch in a `rnn.pack_padded_sequence`, which is then ready to be ingested by the RNN\n- it cuda-izes all tensors if we're on a GPU\n- it calls another helper function to get an appropriate 0/1 mask for the mini-batch\n\nWe then pipe all the return values of `get_mini_batch()` into `elbo.step(...)`. Recall that these arguments will be further piped to `model(...)` and `guide(...)` during construction of the gradient estimator in `elbo`. Finally, we return a float which is a noisy estimate of the loss for that mini-batch.\n\nWe now have all the ingredients required for the main bit of our training loop:\n\n```python\ntimes = [time.time()]\nfor epoch in range(args.num_epochs):\n # accumulator for our estimate of the negative log likelihood \n # (or rather -elbo) for this epoch\n epoch_nll = 0.0 \n # prepare mini-batch subsampling indices for this epoch\n shuffled_indices = np.arange(N_train_data)\n np.random.shuffle(shuffled_indices)\n\n # process each mini-batch; this is where we take gradient steps\n for which_mini_batch in range(N_mini_batches):\n epoch_nll += process_minibatch(epoch, which_mini_batch, shuffled_indices)\n\n # report training diagnostics\n times.append(time.time())\n epoch_time = times[-1] - times[-2]\n log(\"[training epoch %04d] %.4f \\t\\t\\t\\t(dt = %.3f sec)\" %\n (epoch, epoch_nll / N_train_time_slices, epoch_time))\n```\n\nAt the beginning of each epoch we shuffle the indices pointing to the training data. We then process each mini-batch until we've gone through the entire training set, accumulating the training loss as we go. Finally we report some diagnostic info. Note that we normalize the loss by the total number of time slices in the training set (this allows us to compare to reference [1]). ", "_____no_output_____" ], [ "## Evaluation\nThis training loop is still missing any kind of evaluation diagnostics. Let's fix that. First we need to prepare the validation and test data for evaluation. Since the validation and test datasets are small enough that we can easily fit them into memory, we're going to process each dataset batchwise (i.e. we will not be breaking up the dataset into mini-batches). [_Aside: at this point the reader may ask why we don't do the same thing for the training set. The reason is that additional stochasticity due to data-subsampling is often advantageous during optimization: in particular it can help us avoid local optima._] And, in fact, in order to get a lessy noisy estimate of the ELBO, we're going to compute a multi-sample estimate. The simplest way to do this would be as follows:\n\n```python\nval_loss = svi.evaluate_loss(val_batch, ..., num_particles=5)\n```\n\nThis, however, would involve an explicit `for` loop with five iterations. For our particular model, we can do better and vectorize the whole computation. The only way to do this currently in Pyro is to explicitly replicate the data `n_eval_samples` many times. This is the strategy we follow:\n\n```python\n# package repeated copies of val/test data for faster evaluation\n# (i.e. set us up for vectorization)\ndef rep(x):\n return np.repeat(x, n_eval_samples, axis=0)\n\n# get the validation/test data ready for the dmm: pack into sequences, etc.\nval_seq_lengths = rep(val_seq_lengths)\ntest_seq_lengths = rep(test_seq_lengths)\nval_batch, val_batch_reversed, val_batch_mask, val_seq_lengths = poly.get_mini_batch(\n np.arange(n_eval_samples * val_data_sequences.shape[0]), rep(val_data_sequences),\n val_seq_lengths, cuda=args.cuda)\ntest_batch, test_batch_reversed, test_batch_mask, test_seq_lengths = \\\n poly.get_mini_batch(np.arange(n_eval_samples * test_data_sequences.shape[0]), \n rep(test_data_sequences),\n test_seq_lengths, cuda=args.cuda)\n```\n\nWith the test and validation data now fully prepped, we define the helper function that does the evaluation: \n\n```python\ndef do_evaluation():\n # put the RNN into evaluation mode (i.e. turn off drop-out if applicable)\n dmm.rnn.eval()\n\n # compute the validation and test loss\n val_nll = svi.evaluate_loss(val_batch, val_batch_reversed, val_batch_mask,\n val_seq_lengths) / np.sum(val_seq_lengths)\n test_nll = svi.evaluate_loss(test_batch, test_batch_reversed, test_batch_mask,\n test_seq_lengths) / np.sum(test_seq_lengths)\n\n # put the RNN back into training mode (i.e. turn on drop-out if applicable)\n dmm.rnn.train()\n return val_nll, test_nll\n```\n\nWe simply call the `evaluate_loss` method of `elbo`, which takes the same arguments as `step()`, namely the arguments that are passed to the model and guide. Note that we have to put the RNN into and out of evaluation mode to account for dropout. We can now stick `do_evaluation()` into the training loop; see [the source code](https://github.com/pyro-ppl/pyro/blob/dev/examples/dmm/dmm.py) for details.", "_____no_output_____" ], [ "## Results\n\nLet's make sure that our implementation gives reasonable results. We can use the numbers reported in reference [1] as a sanity check. For the same dataset and a similar model/guide setup (dimension of the latent space, number of hidden units in the RNN, etc.) they report a normalized negative log likelihood (NLL) of `6.93` on the testset (lower is better$)^{\\S}$. This is to be compared to our result of `6.87`. These numbers are very much in the same ball park, which is reassuring. It seems that, at least for this dataset, not using analytic expressions for the KL divergences doesn't degrade the quality of the learned model (although, as discussed above, the training probably takes somewhat longer).", "_____no_output_____" ] ], [ [ "<figure><img src=\"_static/img/test_nll.png\" style=\"width: 400px;\"><center><figcaption> <font size=\"-1\"><b>Figure 3</b>: Progress on the test set NLL as training progresses for a sample training run. </font></figcaption></figure></center>", "_____no_output_____" ] ], [ [ "In the figure we show how the test NLL progresses during training for a single sample run (one with a rather conservative learning rate). Most of the progress is during the first 3000 epochs or so, with some marginal gains if we let training go on for longer. On a GeForce GTX 1080, 5000 epochs takes about 20 hours.\n\n\n| `num_iafs` | test NLL |\n|---|---|\n| `0` | `6.87` | \n| `1` | `6.82` |\n| `2` | `6.80` |\n\nFinally, we also report results for guides with normalizing flows in the mix (details to be found in the next section). \n\n${ \\S\\;}$ Actually, they seem to report two numbers—6.93 and 7.03—for the same model/guide and it's not entirely clear how the two reported numbers are different.", "_____no_output_____" ], [ "## Bells, whistles, and other improvements\n\n### Inverse Autoregressive Flows\n\nOne of the great things about a probabilistic programming language is that it encourages modularity. Let's showcase an example in the context of the DMM. We're going to make our variational distribution richer by adding normalizing flows to the mix (see reference [2] for a discussion). **This will only cost us four additional lines of code!**\n\nFirst, in the `DMM` constructor we add\n\n```python\niafs = [AffineAutoregressive(AutoRegressiveNN(z_dim, [iaf_dim])) for _ in range(num_iafs)]\nself.iafs = nn.ModuleList(iafs)\n```\n\nThis instantiates `num_iafs` many bijective transforms of the `AffineAutoregressive` type (see references [3,4]); each normalizing flow will have `iaf_dim` many hidden units. We then bundle the normalizing flows in a `nn.ModuleList`; this is just the PyTorchy way to package a list of `nn.Module`s. Next, in the guide we add the lines\n\n```python\nif self.iafs.__len__() > 0:\n z_dist = TransformedDistribution(z_dist, self.iafs)\n```\n\nHere we're taking the base distribution `z_dist`, which in our case is a conditional gaussian distribution, and using the `TransformedDistribution` construct we transform it into a non-gaussian distribution that is, by construction, richer than the base distribution. Voila!", "_____no_output_____" ], [ "### Checkpointing\n\nIf we want to recover from a catastrophic failure in our training loop, there are two kinds of state we need to keep track of. The first is the various parameters of the model and guide. The second is the state of the optimizers (e.g. in Adam this will include the running average of recent gradient estimates for each parameter).\n\nIn Pyro, the parameters can all be found in the `ParamStore`. However, PyTorch also keeps track of them for us via the `parameters()` method of `nn.Module`. So one simple way we can save the parameters of the model and guide is to make use of the `state_dict()` method of `dmm` in conjunction with `torch.save()`; see below. In the case that we have `AffineAutoregressive`'s in the loop, this is in fact the only option at our disposal. This is because the `AffineAutoregressive` module contains what are called 'persistent buffers' in PyTorch parlance. These are things that carry state but are not `Parameter`s. The `state_dict()` and `load_state_dict()` methods of `nn.Module` know how to deal with buffers correctly.\n\nTo save the state of the optimizers, we have to use functionality inside of `pyro.optim.PyroOptim`. Recall that the typical user never interacts directly with PyTorch `Optimizers` when using Pyro; since parameters can be created dynamically in an arbitrary probabilistic program, Pyro needs to manage `Optimizers` for us. In our case saving the optimizer state will be as easy as calling `optimizer.save()`. The loading logic is entirely analagous. So our entire logic for saving and loading checkpoints only takes a few lines:\n\n```python\n# saves the model and optimizer states to disk\ndef save_checkpoint():\n log(\"saving model to %s...\" % args.save_model)\n torch.save(dmm.state_dict(), args.save_model)\n log(\"saving optimizer states to %s...\" % args.save_opt)\n optimizer.save(args.save_opt)\n log(\"done saving model and optimizer checkpoints to disk.\")\n\n# loads the model and optimizer states from disk\ndef load_checkpoint():\n assert exists(args.load_opt) and exists(args.load_model), \\\n \"--load-model and/or --load-opt misspecified\"\n log(\"loading model from %s...\" % args.load_model)\n dmm.load_state_dict(torch.load(args.load_model))\n log(\"loading optimizer states from %s...\" % args.load_opt)\n optimizer.load(args.load_opt)\n log(\"done loading model and optimizer states.\")\n```", "_____no_output_____" ], [ "## Some final comments\n\nA deep markov model is a relatively complex model. Now that we've taken the effort to implement a version of the deep markov model tailored to the polyphonic music dataset, we should ask ourselves what else we can do. What if we're handed a different sequential dataset? Do we have to start all over?\n\nNot at all! The beauty of probalistic programming is that it enables&mdash;and encourages&mdash;modular approaches to modeling and inference. Adapting our polyphonic music model to a dataset with continuous observations is as simple as changing the observation likelihood. The vast majority of the code could be taken over unchanged. This means that with a little bit of extra work, the code in this tutorial could be repurposed to enable a huge variety of different models. \n\nSee the complete code on [Github](https://github.com/pyro-ppl/pyro/blob/dev/examples/dmm/dmm.py).\n\n## References\n\n[1] `Structured Inference Networks for Nonlinear State Space Models`,<br />&nbsp;&nbsp;&nbsp;&nbsp;\n Rahul G. Krishnan, Uri Shalit, David Sontag\n \n[2] `Variational Inference with Normalizing Flows`,\n<br />&nbsp;&nbsp;&nbsp;&nbsp;\nDanilo Jimenez Rezende, Shakir Mohamed \n \n[3] `Improving Variational Inference with Inverse Autoregressive Flow`,\n<br />&nbsp;&nbsp;&nbsp;&nbsp;\nDiederik P. Kingma, Tim Salimans, Rafal Jozefowicz, Xi Chen, Ilya Sutskever, Max Welling \n\n[4] `MADE: Masked Autoencoder for Distribution Estimation`,\n<br />&nbsp;&nbsp;&nbsp;&nbsp;\nMathieu Germain, Karol Gregor, Iain Murray, Hugo Larochelle \n\n[5] `Modeling Temporal Dependencies in High-Dimensional Sequences:`\n<br />&nbsp;&nbsp;&nbsp;&nbsp;\n`Application to Polyphonic Music Generation and Transcription`,\n<br />&nbsp;&nbsp;&nbsp;&nbsp;\nBoulanger-Lewandowski, N., Bengio, Y. and Vincent, P.", "_____no_output_____" ] ] ]
[ "markdown", "raw", "markdown", "raw", "markdown", "raw", "markdown" ]
[ [ "markdown", "markdown" ], [ "raw" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "raw" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "raw" ], [ "markdown", "markdown", "markdown", "markdown" ] ]
cb3947a7ffa25a601e2a40a63d2073129987d143
3,950
ipynb
Jupyter Notebook
animation/chap02_anim.ipynb
mizz0224/Probability-and-statistics
72a9db8183b3013730c867c5202218fd214c58cf
[ "BSD-3-Clause" ]
null
null
null
animation/chap02_anim.ipynb
mizz0224/Probability-and-statistics
72a9db8183b3013730c867c5202218fd214c58cf
[ "BSD-3-Clause" ]
null
null
null
animation/chap02_anim.ipynb
mizz0224/Probability-and-statistics
72a9db8183b3013730c867c5202218fd214c58cf
[ "BSD-3-Clause" ]
null
null
null
21.010638
120
0.535696
[ [ [ "# 1차원 데이터의 정리", "_____no_output_____" ] ], [ [ "import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom plot_util import plot_var_interact, plot_std_interact\n\npd.set_option('precision', 3)\n%precision 3\n%matplotlib inline", "_____no_output_____" ], [ "df = pd.read_csv('../data/ch2_scores_em.csv', index_col='student number')\nscores = np.array(df['english'])[:10]", "_____no_output_____" ] ], [ [ "## 그림 2.3 분산", "_____no_output_____" ] ], [ [ "plot_var_interact(scores[:4])", "_____no_output_____" ] ], [ [ "## 그림 2.4 표준편차", "_____no_output_____" ] ], [ [ "plot_std_interact(scores)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
cb39507d60e51cf31f2f6575ed93a3bdf0d3fed0
910,424
ipynb
Jupyter Notebook
notebooks/code_RR_abnorm.ipynb
davfgh/davfgh
0bd821d80ec238a3025483e5f3dbdd3c1644b17c
[ "MIT" ]
null
null
null
notebooks/code_RR_abnorm.ipynb
davfgh/davfgh
0bd821d80ec238a3025483e5f3dbdd3c1644b17c
[ "MIT" ]
null
null
null
notebooks/code_RR_abnorm.ipynb
davfgh/davfgh
0bd821d80ec238a3025483e5f3dbdd3c1644b17c
[ "MIT" ]
null
null
null
460.275025
35,192
0.932509
[ [ [ "%matplotlib inline\nimport os\n# import wfdb as wf # le module n'est pas reconnu\nimport numpy as np\nimport pandas as pd\n# from pandas.compat import StringIO # pour pouvoir lire fichiers anotations mais ne fonctionne pas\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nfrom sklearn import linear_model\nfrom sklearn import preprocessing\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.model_selection import train_test_split\nfrom mpl_toolkits.mplot3d import Axes3D\nimport scipy\nfrom scipy import ndimage\nfrom scipy import signal\nfrom pylab import *\nimport time\nimport math\nfrom copy import deepcopy\n#from datasets import mitdb as dm\n#from biosppy.signals import ecg", "_____no_output_____" ], [ "df_normal =pd.read_csv('ptbdb_normal.csv', sep=',', header = None)\ndf_normal.head(12)\ndf_normal.describe()\n#len(df_normal)", "_____no_output_____" ], [ "df_abnormal =pd.read_csv('ptbdb_abnormal.csv', sep=',', header = None)\ndf_abnormal.info()\ndf_abnormal.describe()", "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 10506 entries, 0 to 10505\nColumns: 188 entries, 0 to 187\ndtypes: float64(188)\nmemory usage: 15.1 MB\n" ], [ "df_abnormal.max(axis=0)", "_____no_output_____" ], [ "#plt.figure(figsize = (20,16)) \n#sns.barplot(df_normal.max(axis=0))", "_____no_output_____" ], [ "df_abnorm2 = df_abnormal.copy()\ndf_norm2.head()", "_____no_output_____" ], [ "if len(df_abnorm2.columns) == 188 :\n df_abnorm2.drop(df_norm2.iloc[:,0:1],1,inplace=True) # on supprime des cols en début de tableau ", "_____no_output_____" ], [ "df_abnorm2.max(axis=0) \n\nplt.figure(figsize = (10,6)) \nplt.hist(df_abnorm2.max(axis=0), bins = 100)\nplt.title('Distribution des valeurs max sur un battement')\n#sns.barplot(x=df_norm2.max(axis=1), data=df_norm2)", "_____no_output_____" ], [ "df_abnorm2.max(axis=1)", "_____no_output_____" ], [ "maxValueIndex = df_abnorm2.idxmax(axis = 1)\nmaxValueIndex.min()", "_____no_output_____" ], [ "plt.figure(figsize = (10,6)) \nplt.hist(maxValueIndex, bins = 200)\nplt.title(\"Distribution du RR interval\")", "_____no_output_____" ], [ "# on trace le signal si le max est plus petit ", "_____no_output_____" ], [ "# exemples de battements anormaux\nsns.set_style('whitegrid') \nplt.figure(figsize = (20,8)) \nplt.plot(df_abnorm2.iloc[0, 0:187], color = 'red') \nplt.xlabel('Temps')\nplt.title('Exemple de batements cardiqaues de patients malades') \nplt.legend() \nplt.show()", "No handles with labels found to put in legend.\n" ], [ "plt.figure(figsize = (20,8)) ", "_____no_output_____" ], [ "range(len(df_abnorm2))", "_____no_output_____" ], [ "#sns.relplot(x='g', y='pf', kind='line', data=df[df['g']<50]) ;", "_____no_output_____" ], [ "\nj=0\nfor i in range(len(df_norm2)) :\n if (maxValueIndex.iloc[i] < 50) & (j<10) :\n j+=1\n print(maxValueIndex.iloc[i])\n plt.figure(figsize = (10,6)) \n plt.plot(df_abnorm2.iloc[i, 0:187], color = 'red')\n plt.xlabel('Temps')\n plt.title('Exemple de battements cardiaques de patients malades') \n plt.legend() \n plt.show()", "No handles with labels found to put in legend.\n" ], [ "j=0\nfor i in range(len(df_abnorm2)) :\n if (maxValueIndex.iloc[i] > 50) & (maxValueIndex.iloc[i] < 150) & (j<10) :\n j+=1\n print(maxValueIndex.iloc[i])\n plt.figure(figsize = (10,6)) \n plt.plot(df_abnorm2.iloc[i, 0:187], color = 'red')\n plt.xlabel('Temps')\n plt.title('Exemple de battements cardiaques de patients malades') \n plt.legend() \n plt.show()", "No handles with labels found to put in legend.\n" ], [ "j=0\nfor i in range(len(df_norm2)) :\n if (maxValueIndex.iloc[i] > 150) & (j<10) :\n j+=1\n print(maxValueIndex.iloc[i])\n plt.figure(figsize = (10,6)) \n plt.plot(df_norm2.iloc[i, 0:187], color = 'red')\n plt.xlabel('Temps')\n plt.title('Exemple de battements cardiaques de patients malades') \n plt.legend() \n plt.show()", "No handles with labels found to put in legend.\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb395259ea93a95ab77bbbce84bdb41508025183
228,512
ipynb
Jupyter Notebook
EDA/EDA.ipynb
TheGupta2012/backend-MedicalDiagnosis
5108fa21ced8b4b82aa2f230530170057ea33f44
[ "MIT" ]
1
2021-07-06T19:54:55.000Z
2021-07-06T19:54:55.000Z
EDA/EDA.ipynb
TheGupta2012/backend-MedicalDiagnosis
5108fa21ced8b4b82aa2f230530170057ea33f44
[ "MIT" ]
null
null
null
EDA/EDA.ipynb
TheGupta2012/backend-MedicalDiagnosis
5108fa21ced8b4b82aa2f230530170057ea33f44
[ "MIT" ]
null
null
null
75.4414
41,328
0.759028
[ [ [ "!pip install --upgrade language-check", "Collecting language-check\n Downloading https://files.pythonhosted.org/packages/97/45/0fd1d3683d6129f30fa09143fa383cdf6dff8bc0d1648f2cf156109cb772/language-check-1.1.tar.gz\nBuilding wheels for collected packages: language-check\n Building wheel for language-check (setup.py): started\n Building wheel for language-check (setup.py): finished with status 'done'\n Created wheel for language-check: filename=language_check-1.1-cp37-none-any.whl size=56968214 sha256=8ea831d5fa35004f2bea28ce0134c1832e4cd82f4f13442c090b20c3e2f3d510\n Stored in directory: C:\\Users\\harsh\\AppData\\Local\\pip\\Cache\\wheels\\d5\\46\\82\\90a89c23eac1837364ed7217a9eed71bc9e6ad4825be93968e\nSuccessfully built language-check\nInstalling collected packages: language-check\nSuccessfully installed language-check-1.1\n" ], [ "import numpy as np\nimport seaborn as sns\nimport pandas as pd \nimport matplotlib.pyplot as plt\nfrom sklearn.feature_extraction.text import CountVectorizer,_preprocess,TfidfVectorizer\nfrom sklearn.metrics.pairwise import linear_kernel,cosine_similarity\nfrom nltk.stem.snowball import SnowballStemmer\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.ensemble import *\nfrom sklearn.model_selection import train_test_split", "_____no_output_____" ], [ "data = pd.read_csv(r'data/medical_data.csv',low_memory=False)", "_____no_output_____" ], [ "data = data.drop_duplicates().reset_index().drop('index',axis = 1)", "_____no_output_____" ], [ "data", "_____no_output_____" ], [ "punctuation='[\"\\'?,\\.]' # I will replace all these punctuation with ''\nabbr_dict={\n \"what's\":\"what is\",\n \"what're\":\"what are\",\n \"where's\":\"where is\",\n \"where're\":\"where are\",\n \"i'm\":\"i am\",\n \"we're\":\"we are\",\n \"it's\":\"it is\",\n \"that's\":\"that is\",\n \"there's\":\"there is\",\n \"there're\":\"there are\",\n \"i've\":\"i have\",\n \"who've\":\"who have\",\n \"would've\":\"would have\",\n \"not've\":\"not have\",\n \"i'll\":\"i will\",\n \"it'll\":\"it will\",\n \"isn't\":\"is not\",\n \"wasn't\":\"was not\",\n \"aren't\":\"are not\",\n \"weren't\":\"were not\",\n \"can't\":\"can not\",\n \"couldn't\":\"could not\",\n \"don't\":\"do not\",\n \"didn't\":\"did not\",\n \"shouldn't\":\"should not\",\n \"wouldn't\":\"would not\",\n \"doesn't\":\"does not\",\n \"haven't\":\"have not\",\n \"hasn't\":\"has not\",\n \"hadn't\":\"had not\",\n \"won't\":\"will not\",\n punctuation:'',\n '\\s+':' ', # replace multi space with one single space\n}", "_____no_output_____" ], [ "def process_data(data):\n # Convert to lower case\n \n data.Phrase=data.Phrase.str.lower() \n data.Prompt=data.Prompt.str.lower()\n # convert to string \n data.Phrase=data.Phrase.astype(str)\n data.Prompt=data.Prompt.astype(str)\n # replace abbreviations \n data.replace(abbr_dict,regex=True,inplace=True)\n \n #apply stemming\n stemmer = SnowballStemmer(\"english\")\n data['stemmed_phrase'] = data['Phrase'].apply(lambda x : ' '.join([stemmer.stem(y) for y in x.split()]))\n display(data.head(10))\n return data", "_____no_output_____" ], [ "data = process_data(data)", "_____no_output_____" ], [ "d2 = data[['stemmed_phrase','Prompt']]", "_____no_output_____" ], [ "d2.to_csv('data/trial_data.csv')", "_____no_output_____" ], [ "ailments = data['Prompt'].unique()", "_____no_output_____" ], [ "dict_ail = {}\n# for a in ailments:\n# dict_ail[a] = 0\nfor k in data.index:\n name = data['Prompt'][k]\n dict_ail[name] = dict_ail.get(name,0) + 1\nailment_dict = {}\n\nfor i,k in enumerate(dict_ail.keys()):\n ailment_dict[i] = k", "_____no_output_____" ], [ "plt.figure(figsize = (18,8))\nplt.title(\"Ailment Frequencies\",fontsize=35)\nplt.barh(color = 'Red',y=[i for i in range(len(list(ailments)))], width = list(dict_ail.values()),tick_label = list(dict_ail.keys()))\nplt.tight_layout()", "_____no_output_____" ], [ "Cv = CountVectorizer(stop_words='english',ngram_range = (1,3), max_df=0.7)\ntransformed_count = Cv.fit_transform(data['stemmed_phrase']) ", "_____no_output_____" ], [ "TfIdf = TfidfVectorizer(stop_words = 'english', ngram_range= (1,3),max_df= 0.7)\ntransformed_idf = TfIdf.fit_transform(data['stemmed_phrase'])", "_____no_output_____" ], [ "input_text = ['I am experiencing pain in the leg from the past two days']\ntrial = TfIdf.transform(input_text)", "_____no_output_____" ], [ "trial", "_____no_output_____" ] ], [ [ "## Flow \n- Get the text input from the patient\n- This text input is processed first by the vectorizer and made into a list of frequeny counts using the learned vocabulary from the data provided\n- Now this list is passed into a model which generates the probabilities of which ailment does that sentence phrase correspond to \n- The final returned phrases are evaluated and the phrases having the least levenshtein distance are used for predictions\n- The two of the highest probability ailments are returned to the doctor with a wrapper sentence", "_____no_output_____" ], [ "## Output Tensor\n- We have a 25 element output vector which is the result from the model", "_____no_output_____" ] ], [ [ "ailment_dict", "_____no_output_____" ] ], [ [ "## Input Tensor", "_____no_output_____" ] ], [ [ "# the query is first processed and made into lower case \nquery = \"From past few weeks feeling sad\"", "_____no_output_____" ], [ "def process_query(query):\n # Change to lower\n query = query.lower()\n # Removed abbreviations\n res = ''\n# print(query.split())\n for k in query.split():\n if k in abbr_dict:\n print(abbr_dict[k])\n res+=' ' + abbr_dict[k]\n else:\n res+=' ' + k \n \n stemmer = SnowballStemmer('english')\n res = ' '.join([stemmer.stem(y) for y in res.split()])\n return res ", "_____no_output_____" ], [ "print(\"Example query: \")\nprint(\"Final query:\",process_query(query))\nprocessed = process_query(query)", "Example query: \n" ], [ "query =[processed]\nres = TfIdf.transform(query)\nsim = cosine_similarity(res,transformed_idf)\nres = list(np.argsort(sim))[0]\nres = res[::-1][:3]", "_____no_output_____" ], [ "for k in res:\n print(data.loc[k]['Prompt'])", "emotional pain\nemotional pain\nskin issue\n" ], [ "def get_prediction(query):\n print(\"Query is :\",query)\n processed = process_query(query)\n query = [processed]\n print(\"Processed :\",query)\n res = TfIdf.transform(query)\n sim = cosine_similarity(res,transformed_idf) \n res = list(np.argsort(sim))[0]\n res = res[::-1][:20]\n print(sim[0][res[0]],sim[0][res[1]])\n ailment =[]\n # let's find most similar sentences and then see \n # use levenshtein distance after you have got the result\n for k in res[:1]:\n ailment.append(data.loc[k]['Prompt'])\n print(\"Results :\")\n return ailment", "_____no_output_____" ] ], [ [ "## To - Do\n- Use document distance after you find the sentences to evaluate the best possible match for your query\n", "_____no_output_____" ] ], [ [ "for q in data['stemmed_phrase'][500:]:\n print(get_prediction(q))", "Query is : my stomach feel full and upset and bloat after big meal\nProcessed : ['my stomach feel full and upset and bloat after big meal']\n1.0000000000000002 0.15631720706831426\nResults :\n['stomach ache']\nQuery is : my mind feel veri sad as if it hurt the way i feel in my head is aw and when i think about my break-up i cri with sad\nProcessed : ['my mind feel veri sad as if it hurt the way i feel in my head is aw and when i think about my break-up i cri with sad']\n1.0000000000000002 0.30769684560534594\nResults :\n['emotional pain']\nQuery is : i have a head pain everi singl day\nProcessed : ['i have a head pain everi singl day']\n1.0000000000000002 0.19773183226222532\nResults :\n['internal pain']\nQuery is : my heart feel like it is go to explod\nProcessed : ['my heart feel like it is go to explod']\n1.0000000000000002 0.26650915481854837\nResults :\n['heart hurts']\nQuery is : my shoulder hurt when i tri to reach someth abov my head\nProcessed : ['my shoulder hurt when i tri to reach someth abov my head']\n1.0000000000000002 0.39548057406249476\nResults :\n['shoulder pain']\nQuery is : i was veri activ in sport but now my bodi is feel pain\nProcessed : ['i was veri activ in sport but now my bodi is feel pain']\n0.9999999999999999 0.23083321803761236\nResults :\n['injury from sports']\nQuery is : i have been over my cold for week but still cough everi day\nProcessed : ['i have been over my cold for week but still cough everi day']\n1.0 0.2681671464999516\nResults :\n['cough']\nQuery is : i can not hear out of my ear proper i feel like there is someth in it caus irrit\nProcessed : ['i can not hear out of my ear proper i feel like there is someth in it caus irrit']\n1.0 0.22733833278222518\nResults :\n['ear ache']\nQuery is : i have pain in my chest that sadden me\nProcessed : ['i have pain in my chest that sadden me']\n1.0 0.2198364404123055\nResults :\n['heart hurts']\nQuery is : my ear ach when i am listen to music\nProcessed : ['my ear ach when i am listen to music']\n1.0 0.2598593449902132\nResults :\n['ear ache']\nQuery is : i am feel nauseous\nProcessed : ['i am feel nauseous']\n1.0000000000000002 0.09676803123760319\nResults :\n['stomach ache']\nQuery is : my stomach ach when i eat hot food whi\nProcessed : ['my stomach ach when i eat hot food whi']\n1.0000000000000002 0.32846977539494737\nResults :\n['stomach ache']\nQuery is : my husband has a spot on his lip that he though was cold sore but now it has broken open and leak fluid all day\nProcessed : ['my husband has a spot on his lip that he though was cold sore but now it has broken open and leak fluid all day']\n1.0000000000000002 0.07880042666644532\nResults :\n['infected wound']\nQuery is : i do not have problem take in breath but out breath is so heavi\nProcessed : ['i do not have problem take in breath but out breath is so heavi']\n1.0000000000000002 0.45869840065476714\nResults :\n['hard to breath']\nQuery is : my joint ach whenev it is cold\nProcessed : ['my joint ach whenev it is cold']\n1.0000000000000002 0.14503108194731665\nResults :\n['joint pain']\nQuery is : red flush accompani with itchi\nProcessed : ['red flush accompani with itchi']\n1.0 0.16580273673782234\nResults :\n['skin issue']\nQuery is : my head is spin when i get up\nProcessed : ['my head is spin when i get up']\n1.0 0.3973028089167828\nResults :\n['feeling dizzy']\nQuery is : when i tri to answer the phine call i found that i can not hear the voic of the speaker\nProcessed : ['when i tri to answer the phine call i found that i can not hear the voic of the speaker']\n1.0 0.08789144824427203\nResults :\n['ear ache']\nQuery is : i can hard breath\nProcessed : ['i can hard breath']\n1.0000000000000002 0.6158625297489015\nResults :\n['hard to breath']\nQuery is : i feel sever itch in the skin with red\nProcessed : ['i feel sever itch in the skin with red']\n1.0 0.23305008356240653\nResults :\n['skin issue']\nQuery is : pain in the larg neck\nProcessed : ['pain in the larg neck']\n1.0 0.15317428821731746\nResults :\n['neck pain']\nQuery is : the area around my heart doe not feel good\nProcessed : ['the area around my heart doe not feel good']\n1.0 0.2563030313043009\nResults :\n['heart hurts']\nQuery is : yesterday i had a shouder pain\nProcessed : ['yesterday i had a shouder pain']\n1.0000000000000002 0.10074173399409053\nResults :\n['shoulder pain']\nQuery is : my shoulder has agreat pain\nProcessed : ['my shoulder has agreat pain']\n1.0000000000000002 0.13402618911326133\nResults :\n['shoulder pain']\nQuery is : i have a pain in my stomach\nProcessed : ['i have a pain in my stomach']\n1.0 1.0\nResults :\n['internal pain']\nQuery is : i feel a pain in my back when i sit on a chair for long\nProcessed : ['i feel a pain in my back when i sit on a chair for long']\n1.0 0.2515069884271873\nResults :\n['back pain']\nQuery is : there pain in my foot\nProcessed : ['there pain in my foot']\n1.0 0.6899389671622923\nResults :\n['foot ache']\nQuery is : i complain alot with my neck pain and i realli need to be better\nProcessed : ['i complain alot with my neck pain and i realli need to be better']\n1.0 0.2221543790188921\nResults :\n['neck pain']\nQuery is : i feel cold\nProcessed : ['i feel cold']\n1.0 0.5286408396298904\nResults :\n['feeling cold']\nQuery is : i get cluster of pimpl on my face that never go away\nProcessed : ['i get cluster of pimpl on my face that never go away']\n1.0 0.2694432610107376\nResults :\n['acne']\nQuery is : it feel like i can not take a deep breath\nProcessed : ['it feel like i can not take a deep breath']\n1.0000000000000004 0.26014249920297455\nResults :\n['hard to breath']\nQuery is : i have a pain cramp in my feet\nProcessed : ['i have a pain cramp in my feet']\n1.0000000000000002 0.24736684096408118\nResults :\n['foot ache']\nQuery is : i cant breath\nProcessed : ['i cant breath']\n1.0 0.5181901798531511\nResults :\n['cough']\nQuery is : when i do hard exercis i feel great pain in my muscl\nProcessed : ['when i do hard exerci i feel great pain in my muscl']\n0.7286703978782358 0.5266113587458809\nResults :\n['muscle pain']\nQuery is : i feel dizzi and out of sight\nProcessed : ['i feel dizzi and out of sight']\n1.0 0.27997543009656606\nResults :\n['feeling dizzy']\nQuery is : i complain alot with skin allergi\nProcessed : ['i complain alot with skin allergi']\n1.0000000000000002 0.2221543790188921\nResults :\n['skin issue']\nQuery is : i feel like l fell in hot water\nProcessed : ['i feel like l fell in hot water']\n1.0000000000000002 0.14174471058689955\nResults :\n['skin issue']\nQuery is : i notic a lot more hair come out than usual when i brush my hair\nProcessed : ['i notic a lot more hair come out than usual when i brush my hair']\n1.0000000000000002 0.35232559853827233\nResults :\n['hair falling out']\nQuery is : i have notic my hair fall out a lot late\nProcessed : ['i have notic my hair fall out a lot late']\n1.0 0.3853463711813538\nResults :\n['hair falling out']\nQuery is : my bodi feel like it is in a refriger\nProcessed : ['my bodi feel like it is in a refrig']\n0.6875692916696835 0.2761793237320254\nResults :\n['feeling cold']\nQuery is : i can not walk well i have an ach in my foot\nProcessed : ['i can not walk well i have an ach in my foot']\n1.0 0.22066942354806462\nResults :\n['foot ache']\nQuery is : i have the impress of have no strength in my bodi\nProcessed : ['i have the impress of have no strength in my bodi']\n1.0000000000000002 0.09725120175896772\nResults :\n['body feels weak']\nQuery is : it feel like my heart is go to leap out of my bodi it hurt\nProcessed : ['it feel like my heart is go to leap out of my bodi it hurt']\n1.0000000000000004 0.5243172062533592\nResults :\n['heart hurts']\nQuery is : what is the symptom of append\nProcessed : ['what is the symptom of append']\n1.0000000000000004 0.11629724519117644\nResults :\n['internal pain']\nQuery is : after eat i have burn sensat insid of me\nProcessed : ['after eat i have burn sensat insid of me']\n1.0 0.24894182607904525\nResults :\n['stomach ache']\nQuery is : i cannot get warm no matter how much i wrap up or how high i put the heat on\nProcessed : ['i cannot get warm no matter how much i wrap up or how high i put the heat on']\n1.0 0.10535919153978802\nResults :\n['feeling cold']\nQuery is : after play tenni i had a power sensat close to my neck\nProcessed : ['after play tenni i had a power sensat close to my neck']\n1.0 0.19494977062984123\nResults :\n['shoulder pain']\nQuery is : i have an ear ach when shower\nProcessed : ['i have an ear ach when shower']\n1.0 0.34819095133472056\nResults :\n['ear ache']\nQuery is : i have terribl pain in my heart\nProcessed : ['i have terribl pain in my heart']\n1.0000000000000002 0.2868902741578063\nResults :\n['heart hurts']\nQuery is : the warm system of my hous is broken and feel so cold\nProcessed : ['the warm system of my hous is broken and feel so cold']\n1.0000000000000002 0.3255186820767016\nResults :\n['feeling cold']\nQuery is : my hair is fall out after i take a shower\nProcessed : ['my hair is fall out after i take a shower']\n1.0000000000000002 0.5769562014293044\nResults :\n['hair falling out']\nQuery is : i fell through a window while i was clean it and i have a shard of glass stuck in my left eye i think it is pretti serious pleas help!\nProcessed : ['i fell through a window while i was clean it and i have a shard of glass stuck in my left eye i think it is pretti serious plea help!']\n0.9136622078856973 0.07048820542715999\nResults :\n['open wound']\nQuery is : i feel pain in my throat\nProcessed : ['i feel pain in my throat']\n1.0000000000000002 0.41376343394850285\nResults :\n['cough']\nQuery is : i can not stand up i feel my brain is move insid my skull\nProcessed : ['i can not stand up i feel my brain is move insid my skull']\n1.0 0.1757754369696749\nResults :\n['head ache']\nQuery is : i have pimpl on my back\nProcessed : ['i have pimpl on my back']\n1.0 0.34175439810419417\nResults :\n['acne']\nQuery is : i have got a hard time to breath- am i have a heart attack\nProcessed : ['i have got a hard time to breath- am i have a heart attack']\n1.0 0.33529622027790845\nResults :\n['hard to breath']\nQuery is : i can not work good i have a pain in my knee\nProcessed : ['i can not work good i have a pain in my knee']\n1.0000000000000002 0.2745321369598674\nResults :\n['knee pain']\nQuery is : everi time it rain i get hive on my belli and side\nProcessed : ['everi time it rain i get hive on my belli and side']\n1.0000000000000004 0.15749291075546357\nResults :\n['skin issue']\nQuery is : my cut yellow or greenish-color pus\nProcessed : ['my cut yellow or greenish-color pus']\n1.0000000000000002 0.0887063191771319\nResults :\n['infected wound']\nQuery is : i have whoop cough with excess mucous need mucolyt\nProcessed : ['i have whoop cough with excess mucous need mucolyt']\n1.0 0.08117105213848522\nResults :\n['cough']\nQuery is : i think there is someth wrong with my wound it doe not seem to heal like it should\nProcessed : ['i think there is someth wrong with my wound it doe not seem to heal like it should']\n1.0 0.17766115583497555\nResults :\n['infected wound']\nQuery is : i was kick in the head play soccer last night\nProcessed : ['i was kick in the head play soccer last night']\n1.0 0.23484892872485932\nResults :\n['injury from sports']\nQuery is : when i walk i get a stab pain in the top of my foot\nProcessed : ['when i walk i get a stab pain in the top of my foot']\n1.0000000000000002 0.43693767546769274\nResults :\n['foot ache']\nQuery is : i have a wound between my toe that get better overnight and then reopen ever day when i wear dress shoe to work\nProcessed : ['i have a wound between my toe that get better overnight and then reopen ever day when i wear dress shoe to work']\n1.0000000000000002 0.08178671066981702\nResults :\n['open wound']\nQuery is : i feel pain in my back\nProcessed : ['i feel pain in my back']\n1.0 1.0\nResults :\n['back pain']\nQuery is : at first it feel realli numb but then a thousand needl start to prick through my foot\nProcessed : ['at first it feel realli numb but then a thousand needl start to prick through my foot']\n1.0000000000000004 0.14874223303557688\nResults :\n['foot ache']\nQuery is : my cbc report indic 10 hb i feel tire of littl work\nProcessed : ['my cbc report indic 10 hb i feel tire of littl work']\n" ] ], [ [ "## Use random forest", "_____no_output_____" ] ], [ [ "model = RandomForestClassifier(n_estimators=100,min_samples_leaf=2,bootstrap=True)", "_____no_output_____" ] ], [ [ "## Generate Data first\n- First make a transformed matrix and associate each of the sentences with a numeric row and each prompt with a numeric dictionary value", "_____no_output_____" ] ], [ [ "data[:3]", "_____no_output_____" ], [ "TfIdf = TfidfVectorizer(stop_words = 'english', ngram_range= (1,3),max_df= 0.7)\nX = TfIdf.fit_transform(data['stemmed_phrase']).toarray()", "_____no_output_____" ] ], [ [ "## Generate the Y \n- Generate the class data ", "_____no_output_____" ] ], [ [ "ailment_dict", "_____no_output_____" ], [ "# ailment_dict\nailment = {}\nfor i,j in ailment_dict.items():\n ailment[j] = i\nprint(ailment)\nY = data['Prompt'].map(ailment)", "{'emotional pain': 0, 'heart hurts': 1, 'infected wound': 2, 'foot ache': 3, 'shoulder pain': 4, 'injury from sports': 5, 'skin issue': 6, 'stomach ache': 7, 'knee pain': 8, 'joint pain': 9, 'hard to breath': 10, 'head ache': 11, 'body feels weak': 12, 'feeling dizzy': 13, 'back pain': 14, 'open wound': 15, 'internal pain': 16, 'blurry vision': 17, 'acne': 18, 'muscle pain': 19, 'hair falling out': 20, 'neck pain': 21, 'cough': 22, 'ear ache': 23, 'feeling cold': 24, 'sunburn': 25, 'fever': 26, 'cold and cough': 27}\n" ], [ "Y", "_____no_output_____" ] ], [ [ "## Got X and Y \n- Split in training and validation sets", "_____no_output_____" ] ], [ [ "X_train, X_test, Y_train, Y_test = train_test_split(X, Y, train_size = 0.8, random_state = 43, shuffle = True)", "_____no_output_____" ], [ "model.fit(X_train,Y_train)", "_____no_output_____" ], [ "y_preds = model.predict(X_test)", "_____no_output_____" ], [ "correct,incorrect =0,0\nfor k,i in zip(y_preds,Y_test):\n if(k==i):\n correct+=1\n else:\n incorrect+=1", "_____no_output_____" ], [ "correct", "_____no_output_____" ], [ "incorrect", "_____no_output_____" ], [ "score =[]\nfor est in range(10,50):\n model = RandomForestClassifier(n_estimators=est,min_samples_leaf=2)\n model.fit(X_train,Y_train) \n s = model.score(X_test,Y_test)\n score.append(s)\nplt.figure(figsize= (15,7))\nplt.title(\"Accuracy of classification\",fontsize=17)\nplt.xlabel(\"Number of estimators\",fontsize = 14)\nplt.ylabel(\"Percentage\",fontsize = 14)\nplt.plot([i for i in range(10,50)],score,color= 'red')", "_____no_output_____" ] ], [ [ "## Now\n- Whenever you get a query, you need to transform it according to your vocabulary and then predict the class and then return the predicted class from model ", "_____no_output_____" ] ], [ [ "def process_query(query):\n # Change to lower\n query = query.lower()\n # Removed abbreviations\n res = ''\n# print(query.split())\n for k in query.split():\n if k in abbr_dict:\n print(abbr_dict[k])\n res+=' ' + abbr_dict[k]\n else:\n res+=' ' + k \n \n stemmer = SnowballStemmer('english')\n res = ' '.join([stemmer.stem(y) for y in res.split()])\n return res ", "_____no_output_____" ], [ "# suppose I have the best model \nmodel = RandomForestClassifier(n_estimators=33,min_samples_leaf=2,bootstrap=True,max_features=300)\nmodel.fit(X_train,Y_train)\nfor i,j in zip(X_test,Y_test):\n query = data.iloc['Phrase'][i]\n print(\"Query :\",query)\n print(\"Original :\",ailment_dict[j])\n query = process_query(query)\n query = [query]\n #now transform the document according to the vectorizer \n query = TfIdf.transform(query)\n # now predict it \n pred = model.predict_proba(query)\n res = list(np.argsort(pred))[0]\n res = res[::-1][:3]\n for k in res:\n print(ailment_dict[k],end=',')\n print()", "Query : you will not believ me but this infect wound on my hand is from a paper cut i did not take serious\nOriginal : infected wound\ninfected wound,open wound,skin issue,\nQuery : these red spot on my cheek are new what is it\nOriginal : skin issue\nskin issue,infected wound,acne,\nQuery : i have a skin rash after eat an ice-cream\nOriginal : skin issue\nskin issue,open wound,muscle pain,\nQuery : i have a sharp pain in my abdomen\nOriginal : internal pain\ninternal pain,muscle pain,back pain,\nQuery : i feel a strang and power pain insid my rib cage\nOriginal : heart hurts\ninternal pain,heart hurts,back pain,\nQuery : use hair tonic\nOriginal : hair falling out\nhair falling out,acne,blurry vision,\nQuery : this long scar on my left buttock is from fall off my mountain bike\nOriginal : injury from sports\nhair falling out,internal pain,heart hurts,\nQuery : i feel like i just can not cope anymor i feel overwhelm and like i just can not get a break\nOriginal : emotional pain\nemotional pain,feeling dizzy,body feels weak,\nQuery : i feel chilli like an ice cube my sister said that she need sever blanket to cover her so that she is warm enough\nOriginal : feeling cold\nfeeling cold,emotional pain,open wound,\nQuery : i carri a heavi bag yesterday and when i get up today i felt a great shoulder pain\nOriginal : shoulder pain\nshoulder pain,back pain,injury from sports,\nQuery : i have a blurri vision and i can not see in the dark what is the reason doctor\nOriginal : blurry vision\nblurry vision,head ache,acne,\nQuery : when i am tire i feel my head heavi\nOriginal : head ache\nhead ache,feeling dizzy,emotional pain,\nQuery : i have difficulti move my neck\nOriginal : neck pain\nneck pain,shoulder pain,internal pain,\nQuery : my back hurt so much i can not bend down to tie my shoelac\nOriginal : back pain\nback pain,head ache,joint pain,\nQuery : i have a ear ach when i go to the pool\nOriginal : ear ache\near ache,internal pain,head ache,\nQuery : my heart hurt when i exercis\nOriginal : heart hurts\nheart hurts,back pain,neck pain,\nQuery : my neck hurt me and i can not stand with this pain\nOriginal : neck pain\nneck pain,back pain,shoulder pain,\nQuery : i have disterb in my emot\nOriginal : emotional pain\nemotional pain,blurry vision,back pain,\nQuery : anytim i play tenni i feel a shoulder pain\nOriginal : shoulder pain\nshoulder pain,injury from sports,muscle pain,\nQuery : i feel weak\nOriginal : body feels weak\nbody feels weak,emotional pain,stomach ache,\nQuery : when i sneez veri hard i feel ear ach\nOriginal : ear ache\near ache,hard to breath,internal pain,\nQuery : i feel like my heart is on fire\nOriginal : heart hurts\nheart hurts,feeling dizzy,emotional pain,\nQuery : when i lift my arm up i have a sore in my shoulder\nOriginal : shoulder pain\nshoulder pain,muscle pain,joint pain,\nQuery : i feel a click sensat in my knee each time i step\nOriginal : joint pain\nknee pain,joint pain,injury from sports,\nQuery : surgic wound infect\nOriginal : infected wound\ninfected wound,open wound,skin issue,\nQuery : red swell and difficulti walk\nOriginal : knee pain\nskin issue,knee pain,foot ache,\nQuery : i have a migrain and i took panadol but it doe not help\nOriginal : head ache\nhead ache,muscle pain,blurry vision,\nQuery : due to the recent event i have been feel quit deject and sad\nOriginal : emotional pain\nemotional pain,feeling dizzy,body feels weak,\nQuery : i feel restless and confus at time\nOriginal : emotional pain\nemotional pain,feeling dizzy,body feels weak,\nQuery : i can not see far off thing clear\nOriginal : blurry vision\nblurry vision,back pain,body feels weak,\nQuery : stay out for long durat dure the day give me a rash\nOriginal : sunburn\nskin issue,sunburn,injury from sports,\nQuery : i feel tire and overwork\nOriginal : feeling dizzy\nbody feels weak,emotional pain,feeling dizzy,\nQuery : i get chill and sweat a lot\nOriginal : fever\nfever,back pain,cough,\nQuery : i feel like no one like me and ignor me\nOriginal : emotional pain\nemotional pain,feeling dizzy,joint pain,\nQuery : stare at the laptop screen make my head ach\nOriginal : head ache\nhead ache,feeling dizzy,internal pain,\nQuery : i have sore throat can not tast anyth\nOriginal : cold and cough\ncough,cold and cough,neck pain,\nQuery : i feel uneasi and thump in my chest\nOriginal : internal pain\nemotional pain,hard to breath,internal pain,\nQuery : i feel tire after long walk\nOriginal : hard to breath\nbody feels weak,emotional pain,knee pain,\nQuery : my head is heavi and i feel pain behind my eye\nOriginal : head ache\nhead ache,blurry vision,internal pain,\nQuery : my nose is run and i have been feel tire from the least amount of work\nOriginal : feeling cold\nbody feels weak,emotional pain,feeling dizzy,\nQuery : there has been a weird ach in the leg sinc the last two week and i can not run or play\nOriginal : foot ache\nfoot ache,injury from sports,back pain,\nQuery : i can not stop think about him whenev i see anyth associ to him\nOriginal : emotional pain\nemotional pain,injury from sports,back pain,\nQuery : my armpit has a red colour rash grow sinc the past 1 week\nOriginal : skin issue\nskin issue,stomach ache,foot ache,\nQuery : sinc i ate a bad hot dog my stomach has been behav weird and i can not sit upright for veri long\nOriginal : stomach ache\nstomach ache,internal pain,back pain,\nQuery : yesterday an insect bit me on the left leg and it was not veri major but wake up today i see a yellow secret come out of the wound\nOriginal : infected wound\ninfected wound,open wound,skin issue,\nQuery : dure footbal i twist my ankl and was abl to walk back home yesterday but today i can not even walk proper and it hurt a lot\nOriginal : injury from sports\nfoot ache,knee pain,injury from sports,\nQuery : there is a wheez sound when i breath and i am experienc a lot of mucus\nOriginal : cough\nhard to breath,cough,fever,\nQuery : there is a strike pain in my knee whenev i tri to move my left leg\nOriginal : knee pain\nknee pain,joint pain,injury from sports,\nQuery : sinc the last week i have been feel tire even after simpl walk or move up the stair in my offic\nOriginal : body feels weak\nbody feels weak,emotional pain,knee pain,\nQuery : i do not know what happen but i can not bend down to pick up anyth as it hurt a lot when i do\nOriginal : back pain\nback pain,foot ache,cough,\nQuery : my neck has been stiff sinc i twist it accident two day ago\nOriginal : neck pain\nneck pain,shoulder pain,stomach ache,\nQuery : sinc the past 3 day i can not see proper even in bright light dure the night the vision almost complet goe off\nOriginal : blurry vision\nblurry vision,head ache,injury from sports,\nQuery : my face has been experienc a lot of breakout dure the chang of season\nOriginal : acne\nacne,injury from sports,cough,\nQuery : there are veri big pimpl emerg sinc the last week and i do not know what to do\nOriginal : acne\nacne,foot ache,stomach ache,\nQuery : i do not whi i am feel down and deject sinc my partner has gone for her trip\nOriginal : emotional pain\nemotional pain,body feels weak,feeling dizzy,\nQuery : there is a feel of empti creep up to me sinc the loss of my mother\nOriginal : emotional pain\nemotional pain,feeling dizzy,body feels weak,\nQuery : this weird pain under my chest has been grow for the past two day and i think it was time to contact you\nOriginal : internal pain\ninternal pain,heart hurts,back pain,\nQuery : it is veri hard to concentr sinc the last week event and i feel tire all the time\nOriginal : emotional pain\nbody feels weak,emotional pain,feeling dizzy,\nQuery : walk to work seem difficult now and there have been time when i was not abl get myself out of the bed in the morn\nOriginal : body feels weak\nbody feels weak,foot ache,knee pain,\nQuery : my head ach bad when i tri to look at my laptop screen\nOriginal : head ache\nhead ache,feeling dizzy,neck pain,\n" ] ], [ [ "## KNN", "_____no_output_____" ] ], [ [ "score =[]\nfor est in range(3,40):\n model = KNeighborsClassifier(n_neighbors=est,metric='minkowski')\n model.fit(X_train,Y_train) \n s = model.score(X_test,Y_test)\n score.append(s)\nplt.figure(figsize= (15,7))\nplt.title(\"Accuracy of classification\",fontsize=17)\nplt.xlabel(\"Number of estimators\",fontsize = 14)\nplt.ylabel(\"Percentage\",fontsize = 14)\nplt.plot([i for i in range(3,40)],score,color= 'red')", "_____no_output_____" ], [ "p = pd.DataFrame([[1,2],[2,3]],columns=['a','b'])", "_____no_output_____" ], [ "p", "_____no_output_____" ], [ "p = p.append([{'a':1,'b':23}],ignore_index=True)", "_____no_output_____" ], [ "p.append([{'a':1,'b':2223}],ignore_index=True)", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ] ]
cb3958a9fdbab08f1e736c098947b087ca959d3c
4,921
ipynb
Jupyter Notebook
examples/metalcompute_numpy.ipynb
ProteusMRIgHIFU/py-metal-compute
fc1d58b73385755006a8307412853f871defe57e
[ "MIT" ]
null
null
null
examples/metalcompute_numpy.ipynb
ProteusMRIgHIFU/py-metal-compute
fc1d58b73385755006a8307412853f871defe57e
[ "MIT" ]
null
null
null
examples/metalcompute_numpy.ipynb
ProteusMRIgHIFU/py-metal-compute
fc1d58b73385755006a8307412853f871defe57e
[ "MIT" ]
null
null
null
24.605
156
0.555172
[ [ [ "# metalcompute and NumPy\n\nThis notebook shows how to use NumPy (np) arrays with metalcompute (on macOS)\n\nnp arrays can be passed directly as arguments to metalcompute kernels for input\n\nmetalcompute buffers can also be wrapped using np for easier access and usage\n\nFirst we import both numpy (np) and metalcompute (mc). \n\n(This assumes both are installed already in the python environment using pip install numpy & pip install metalcompute)", "_____no_output_____" ] ], [ [ "\nimport numpy as np\nimport metalcompute as mc", "_____no_output_____" ] ], [ [ "Next we create a metalcompute device. On an M1 family mac, this will be the built in GPU.", "_____no_output_____" ] ], [ [ "dev = mc.Device()", "_____no_output_____" ] ], [ [ "This is a simple kernel function called \"add\", to add together (element-wise) two arrays of float32 values, and write the result to a third array.\n", "_____no_output_____" ] ], [ [ "kernel = dev.kernel(\"\"\"\n// We always need these two lines in a metal kernel\n#include <metal_stdlib>\nusing namespace metal;\n\n// This is the add function\nkernel void add(\n // These are the two input arrays, const as we will not write to them\n const device float* a [[ buffer(0) ]],\n const device float *b [[ buffer(1) ]],\n // This is the output array\n device float *c [[ buffer(2) ]],\n // This is the index of the current kernel instance\n uint id [[ thread_position_in_grid ]]) \n{\n // This is the add operation: c = a + b (for each element)\n c[id] = a[id] + b[id]; \n}\n\"\"\")\n\nadd_fn = kernel.function(\"add\")", "_____no_output_____" ] ], [ [ "We will test this with 256MB buffers", "_____no_output_____" ] ], [ [ "count = 1024*1024*64\nsize = count * 4 # 256MB", "_____no_output_____" ] ], [ [ "Next we create two np test arrays, a_np and b_np, and calculate the sum of those as a reference ", "_____no_output_____" ] ], [ [ "\na_np = np.arange(count,dtype='f') # f32 array\nb_np = (count - a_np).astype('f') # Cast to f32 array\nc_np = a_np + b_np # Calculate reference result", "_____no_output_____" ] ], [ [ "Now we create metalcompute buffers with copies of the np data, and space for the result", "_____no_output_____" ] ], [ [ "\na = dev.buffer(a_np) # Create mc buffer as copy of a_np\nb = dev.buffer(b_np) # Create mc buffer as copy of b_np\nc = dev.buffer(size) # Space for the result", "_____no_output_____" ] ], [ [ "Now we do the add calculation using metalcompute", "_____no_output_____" ] ], [ [ "handle = add_fn(count,a,b,c)", "_____no_output_____" ] ], [ [ "Next we wait for the compute to finish (by deleting the handle) and check the result against the numpy version.\n\nNote how the metalcompute buffer can be wrapped into a numpy array using np.frombuffer", "_____no_output_____" ] ], [ [ "del handle # Will block until the compute has finished\nassert((c_np == np.frombuffer(c,dtype='f')).all())", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
cb395bd21cc1c7692ebf5ba0839883c2d996ef37
250,082
ipynb
Jupyter Notebook
github_scarping.ipynb
yoojunwoong/class1_webcraping
5f6aaadcba86fb778b1a3ddd4153e288a2aca062
[ "Apache-2.0" ]
null
null
null
github_scarping.ipynb
yoojunwoong/class1_webcraping
5f6aaadcba86fb778b1a3ddd4153e288a2aca062
[ "Apache-2.0" ]
null
null
null
github_scarping.ipynb
yoojunwoong/class1_webcraping
5f6aaadcba86fb778b1a3ddd4153e288a2aca062
[ "Apache-2.0" ]
null
null
null
105.253367
7,200
0.62949
[ [ [ "from selenium import webdriver", "_____no_output_____" ], [ "brawser = webdriver.Chrome('./chromedriver.exe')", "_____no_output_____" ], [ "brawser.get('https://github.com/login')", "_____no_output_____" ] ], [ [ "id = input#login_field password = input#password submitbutton = input[type=\"submit\"]", "_____no_output_____" ] ], [ [ "brawser.find_elements_by_css_selector('input#login_field')[0].send_keys('yoojunwoong')", "_____no_output_____" ], [ "brawser.find_elements_by_css_selector('input#password')[0].send_keys('1111111111')\n\nbrawser.find_elements_by_css_selector('input[type=\"submit\"]')[0].click()", "_____no_output_____" ], [ "brawser.get('https://github.com/yoojunwoong/class1_webcraping')", "_____no_output_____" ], [ "from bs4 import BeautifulSoup", "_____no_output_____" ], [ "html = BeautifulSoup(brawser.page_source, 'html.parser')\nhtml", "_____no_output_____" ], [ "tags = html.select('div[role=\"row\"].Box-row')\ntype(tags), len(tags)\ntags[4]", "_____no_output_____" ], [ "tag = tags[0]", "_____no_output_____" ], [ "filename = tag.select('span.css-truncate')[0]\ntype(filename)", "_____no_output_____" ], [ "filename.text.strip()", "_____no_output_____" ], [ "desc = tag.select('a.Link--secondary')[0]\ndesc.text.strip()", "_____no_output_____" ], [ "date = tag.select('time-ago.no-wrap')[0]\ndate.text.strip()", "_____no_output_____" ], [ "git = [] \nfor tag in tags:\n filename = tag.select('span.css-truncate')[0]\n desc = tag.select('a.Link--secondary')[0]\n date = tag.select('time-ago.no-wrap')[0]\n git.append([filename.text.strip(), desc.text.strip(),date.text.strip()])\ngit", "_____no_output_____" ], [ "import pandas as pd", "_____no_output_____" ], [ "pd_data = pd.DataFrame(git)\npd_data.to_excel('./saves/gitpractice.xls')", "_____no_output_____" ] ] ]
[ "code", "markdown", "code" ]
[ [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb395cfbe3176b017a05404b29aefae54bd78e21
19,661
ipynb
Jupyter Notebook
site/en/guide/basic_training_loops.ipynb
arshPratap/docs
b6c5c4e272a2be7aab27573d747f4c3edcc5d4b8
[ "Apache-2.0" ]
1
2020-02-14T04:02:02.000Z
2020-02-14T04:02:02.000Z
site/en/guide/basic_training_loops.ipynb
arshPratap/docs
b6c5c4e272a2be7aab27573d747f4c3edcc5d4b8
[ "Apache-2.0" ]
32
2020-07-23T21:36:02.000Z
2020-09-11T05:46:09.000Z
site/en/guide/basic_training_loops.ipynb
arshPratap/docs
b6c5c4e272a2be7aab27573d747f4c3edcc5d4b8
[ "Apache-2.0" ]
2
2020-05-14T12:53:13.000Z
2020-07-30T20:12:17.000Z
35.877737
422
0.539647
[ [ [ "##### Copyright 2020 The TensorFlow Authors.", "_____no_output_____" ] ], [ [ "#@title Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.", "_____no_output_____" ] ], [ [ "# Basic training loops", "_____no_output_____" ], [ "<table class=\"tfo-notebook-buttons\" align=\"left\">\n <td>\n <a target=\"_blank\" href=\"https://www.tensorflow.org/guide/basic_training_loops\"><img src=\"https://www.tensorflow.org/images/tf_logo_32px.png\" />View on TensorFlow.org</a>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/guide/basic_training_loops.ipynb\"><img src=\"https://www.tensorflow.org/images/colab_logo_32px.png\" />Run in Google Colab</a>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://github.com/tensorflow/docs/blob/master/site/en/guide/basic_training_loops.ipynb\"><img src=\"https://www.tensorflow.org/images/GitHub-Mark-32px.png\" />View source on GitHub</a>\n </td>\n <td>\n <a href=\"https://storage.googleapis.com/tensorflow_docs/docs/site/en/guide/basic_training_loops.ipynb\"><img src=\"https://www.tensorflow.org/images/download_logo_32px.png\" />Download notebook</a>\n </td>\n</table>", "_____no_output_____" ], [ "In the previous guides, you have learned about [tensors](./tensor.ipynb), [variables](./variable.ipynb), [gradient tape](autodiff.ipynb), and [modules](./intro_to_modules.ipynb). In this guide, you will fit these all together to train models.\n\nTensorFlow also includes the [tf.Keras API](https://www.tensorflow.org/guide/keras/overview), a high-level neural network API that provides useful abstractions to reduce boilerplate. However, in this guide, you will use basic classes.", "_____no_output_____" ], [ "## Setup", "_____no_output_____" ] ], [ [ "import tensorflow as tf", "_____no_output_____" ] ], [ [ "## Solving machine learning problems\n\nSolving a machine learning problem usually consists of the following steps:\n\n - Obtain training data.\n - Define the model.\n - Define a loss function.\n - Run through the training data, calculating loss from the ideal value\n - Calculate gradients for that loss and use an *optimizer* to adjust the variables to fit the data.\n - Evaluate your results.\n\nFor illustration purposes, in this guide you'll develop a simple linear model, $f(x) = x * W + b$, which has two variables: $W$ (weights) and $b$ (bias).\n\nThis is the most basic of machine learning problems: Given $x$ and $y$, try to find the slope and offset of a line via [simple linear regression](https://en.wikipedia.org/wiki/Linear_regression#Simple_and_multiple_linear_regression).", "_____no_output_____" ], [ "## Data\n\nSupervised learning uses *inputs* (usually denoted as *x*) and *outputs* (denoted *y*, often called *labels*). The goal is to learn from paired inputs and outputs so that you can predict the value of an output from an input.\n\nEach input of your data, in TensorFlow, is almost always represented by a tensor, and is often a vector. In supervised training, the output (or value you'd like to predict) is also a tensor.\n\nHere is some data synthesized by adding Gaussian (Normal) noise to points along a line.", "_____no_output_____" ] ], [ [ "# The actual line\nTRUE_W = 3.0\nTRUE_B = 2.0\n\nNUM_EXAMPLES = 1000\n\n# A vector of random x values\nx = tf.random.normal(shape=[NUM_EXAMPLES])\n\n# Generate some noise\nnoise = tf.random.normal(shape=[NUM_EXAMPLES])\n\n# Calculate y\ny = x * TRUE_W + TRUE_B + noise", "_____no_output_____" ], [ "# Plot all the data\nimport matplotlib.pyplot as plt\n\nplt.scatter(x, y, c=\"b\")\nplt.show()", "_____no_output_____" ] ], [ [ "Tensors are usually gathered together in *batches*, or groups of inputs and outputs stacked together. Batching can confer some training benefits and works well with accelerators and vectorized computation. Given how small this dataset is, you can treat the entire dataset as a single batch.", "_____no_output_____" ], [ "## Define the model\n\nUse `tf.Variable` to represent all weights in a model. A `tf.Variable` stores a value and provides this in tensor form as needed. See the [variable guide](./variable.ipynb) for more details.\n\nUse `tf.Module` to encapsulate the variables and the computation. You could use any Python object, but this way it can be easily saved.\n\nHere, you define both *w* and *b* as variables.", "_____no_output_____" ] ], [ [ "class MyModel(tf.Module):\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n # Initialize the weights to `5.0` and the bias to `0.0`\n # In practice, these should be randomly initialized\n self.w = tf.Variable(5.0)\n self.b = tf.Variable(0.0)\n\n def __call__(self, x):\n return self.w * x + self.b\n\nmodel = MyModel()\n\n# List the variables tf.modules's built-in variable aggregation.\nprint(\"Variables:\", model.variables)\n\n# Verify the model works\nassert model(3.0).numpy() == 15.0", "_____no_output_____" ] ], [ [ "The initial variables are set here in a fixed way, but Keras comes with any of a number of [initalizers](https://www.tensorflow.org/api_docs/python/tf/keras/initializers) you could use, with or without the rest of Keras.", "_____no_output_____" ], [ "### Define a loss function\n\nA loss function measures how well the output of a model for a given input matches the target output. The goal is to minimize this difference during training. Define the standard L2 loss, also known as the \"mean squared\" error:", "_____no_output_____" ] ], [ [ "# This computes a single loss value for an entire batch\ndef loss(target_y, predicted_y):\n return tf.reduce_mean(tf.square(target_y - predicted_y))", "_____no_output_____" ] ], [ [ "Before training the model, you can visualize the loss value by plotting the model's predictions in red and the training data in blue:", "_____no_output_____" ] ], [ [ "plt.scatter(x, y, c=\"b\")\nplt.scatter(x, model(x), c=\"r\")\nplt.show()\n\nprint(\"Current loss: %1.6f\" % loss(y, model(x)).numpy())", "_____no_output_____" ] ], [ [ "### Define a training loop\n\nThe training loop consists of repeatedly doing three tasks in order:\n\n* Sending a batch of inputs through the model to generate outputs\n* Calculating the loss by comparing the outputs to the output (or label)\n* Using gradient tape to find the gradients\n* Optimizing the variables with those gradients\n\nFor this example, you can train the model using [gradient descent](https://en.wikipedia.org/wiki/Gradient_descent).\n\nThere are many variants of the gradient descent scheme that are captured in `tf.keras.optimizers`. But in the spirit of building from first principles, here you will implement the basic math yourself with the help of `tf.GradientTape` for automatic differentiation and `tf.assign_sub` for decrementing a value (which combines `tf.assign` and `tf.sub`):", "_____no_output_____" ] ], [ [ "# Given a callable model, inputs, outputs, and a learning rate...\ndef train(model, x, y, learning_rate):\n\n with tf.GradientTape() as t:\n # Trainable variables are automatically tracked by GradientTape\n current_loss = loss(y, model(x))\n\n # Use GradientTape to calculate the gradients with respect to W and b\n dw, db = t.gradient(current_loss, [model.w, model.b])\n\n # Subtract the gradient scaled by the learning rate\n model.w.assign_sub(learning_rate * dw)\n model.b.assign_sub(learning_rate * db)", "_____no_output_____" ] ], [ [ "For a look at training, you can send the same batch of *x* and *y* through the training loop, and see how `W` and `b` evolve.", "_____no_output_____" ] ], [ [ "model = MyModel()\n\n# Collect the history of W-values and b-values to plot later\nWs, bs = [], []\nepochs = range(10)\n\n# Define a training loop\ndef training_loop(model, x, y):\n\n for epoch in epochs:\n # Update the model with the single giant batch\n train(model, x, y, learning_rate=0.1)\n\n # Track this before I update\n Ws.append(model.w.numpy())\n bs.append(model.b.numpy())\n current_loss = loss(y, model(x))\n\n print(\"Epoch %2d: W=%1.2f b=%1.2f, loss=%2.5f\" %\n (epoch, Ws[-1], bs[-1], current_loss))\n", "_____no_output_____" ], [ "print(\"Starting: W=%1.2f b=%1.2f, loss=%2.5f\" %\n (model.w, model.b, loss(y, model(x))))\n\n# Do the training\ntraining_loop(model, x, y)\n\n# Plot it\nplt.plot(epochs, Ws, \"r\",\n epochs, bs, \"b\")\n\nplt.plot([TRUE_W] * len(epochs), \"r--\",\n [TRUE_B] * len(epochs), \"b--\")\n\nplt.legend([\"W\", \"b\", \"True W\", \"True b\"])\nplt.show()\n", "_____no_output_____" ], [ "# Visualize how the trained model performs\nplt.scatter(x, y, c=\"b\")\nplt.scatter(x, model(x), c=\"r\")\nplt.show()\n\nprint(\"Current loss: %1.6f\" % loss(model(x), y).numpy())", "_____no_output_____" ] ], [ [ "## The same solution, but with Keras\n\nIt's useful to contrast the code above with the equivalent in Keras.\n\nDefining the model looks exactly the same if you subclass `tf.keras.Model`. Remember that Keras models inherit ultimately from module.", "_____no_output_____" ] ], [ [ "class MyModelKeras(tf.keras.Model):\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n # Initialize the weights to `5.0` and the bias to `0.0`\n # In practice, these should be randomly initialized\n self.w = tf.Variable(5.0)\n self.b = tf.Variable(0.0)\n\n def call(self, x):\n return self.w * x + self.b\n\nkeras_model = MyModelKeras()\n\n# Reuse the training loop with a Keras model\ntraining_loop(keras_model, x, y)\n\n# You can also save a checkpoint using Keras's built-in support\nkeras_model.save_weights(\"my_checkpoint\")", "_____no_output_____" ] ], [ [ "Rather than write new training loops each time you create a model, you can use the built-in features of Keras as a shortcut. This can be useful when you do not want to write or debug Python training loops.\n\nIf you do, you will need to use `model.compile()` to set the parameters, and `model.fit()` to train. It can be less code to use Keras implementations of L2 loss and gradient descent, again as a shortcut. Keras losses and optimizers can be used outside of these convenience functions, too, and the previous example could have used them.", "_____no_output_____" ] ], [ [ "keras_model = MyModelKeras()\n\n# compile sets the training parameters\nkeras_model.compile(\n # By default, fit() uses tf.function(). You can\n # turn that off for debugging, but it is on now.\n run_eagerly=False,\n\n # Using a built-in optimizer, configuring as an object\n optimizer=tf.keras.optimizers.SGD(learning_rate=0.1),\n\n # Keras comes with built-in MSE error\n # However, you could use the loss function\n # defined above\n loss=tf.keras.losses.mean_squared_error,\n)", "_____no_output_____" ] ], [ [ "Keras `fit` expects batched data or a complete dataset as a NumPy array. NumPy arrays are chopped into batches and default to a batch size of 32.\n\nIn this case, to match the behavior of the hand-written loop, you should pass `x` in as a single batch of size 1000.", "_____no_output_____" ] ], [ [ "print(x.shape[0])\nkeras_model.fit(x, y, epochs=10, batch_size=1000)", "_____no_output_____" ] ], [ [ "Note that Keras prints out the loss after training, not before, so the first loss appears lower, but otherwise this shows essentially the same training performance.", "_____no_output_____" ], [ "## Next steps\n\nIn this guide, you have seen how to use the core classes of tensors, variables, modules, and gradient tape to build and train a model, and further how those ideas map to Keras.\n\nThis is, however, an extremely simple problem. For a more practical introduction, see [Custom training walkthrough](../tutorials/customization/custom_training_walkthrough.ipynb).\n\nFor more on using built-in Keras training loops, see [this guide](https://www.tensorflow.org/guide/keras/train_and_evaluate). For more on training loops and Keras, see [this guide](https://www.tensorflow.org/guide/keras/writing_a_training_loop_from_scratch). For writing custom distributed training loops, see [this guide](distributed_training.ipynb#using_tfdistributestrategy_with_basic_training_loops_loops).", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ] ]
cb395e722904b6e013a7ff31907af48a3bae8a98
16,270
ipynb
Jupyter Notebook
02-Basic Python/05-Dictionaries.ipynb
Goliath-Research/Introduction-to-Data-Science
f2c2fdd426684758eb1d918bd9affbeaa154ed91
[ "MIT" ]
null
null
null
02-Basic Python/05-Dictionaries.ipynb
Goliath-Research/Introduction-to-Data-Science
f2c2fdd426684758eb1d918bd9affbeaa154ed91
[ "MIT" ]
null
null
null
02-Basic Python/05-Dictionaries.ipynb
Goliath-Research/Introduction-to-Data-Science
f2c2fdd426684758eb1d918bd9affbeaa154ed91
[ "MIT" ]
null
null
null
19.254438
404
0.460111
[ [ [ "# Python Dictionary", "_____no_output_____" ], [ "Python dictionary is a collection of key:value pairs. \n\nEach key:value pair maps the key to its associated value.\n\nA dictionary is a collection that is ordered, changeable, and does not allow duplicates.", "_____no_output_____" ] ], [ [ "# empty dictionary\nd = {}", "_____no_output_____" ], [ "type(d)", "_____no_output_____" ], [ "d2 = {'name':'John', 'last_name':'Doe', 'age':30}\nd2", "_____no_output_____" ], [ "d2['name']", "_____no_output_____" ], [ "# accesing items\nstudent_name = d2['name']\nstudent_name", "_____no_output_____" ], [ "# Other way: using get\nstudent_name = d2.get('name')\nstudent_name", "_____no_output_____" ], [ "d2['last_name']", "_____no_output_____" ], [ "d2['age']", "_____no_output_____" ], [ "# Change value\nd2['age'] = 33\nd2", "_____no_output_____" ], [ "# check if key exists\n'name' in d2", "_____no_output_____" ], [ "'middle_name' in d2", "_____no_output_____" ] ], [ [ "**Important**: dictionaries are accessed by key, not by the position of the items. \n\nIt does not make sense to slice a dictionary.", "_____no_output_____" ] ], [ [ "d2['name':'last_name'] # This will raise an error", "_____no_output_____" ] ], [ [ "## Python methods for working with dictionaries", "_____no_output_____" ], [ "**len()**: lenght of the dictionary", "_____no_output_____" ] ], [ [ "len(d2)", "_____no_output_____" ] ], [ [ "**items()**: Returns a list of tuples containing each key, value pair", "_____no_output_____" ] ], [ [ "d2.items()", "_____no_output_____" ] ], [ [ "**keys()**: Returns a list containing the dictionary's keys", "_____no_output_____" ] ], [ [ "d2.keys()", "_____no_output_____" ] ], [ [ "**values()**: Returns a list of all the values in the dictionary", "_____no_output_____" ] ], [ [ "d2.values()", "_____no_output_____" ] ], [ [ "**Adding items** \n\nIt is done by using a new key and assigning a value to it.", "_____no_output_____" ] ], [ [ "d2['weight'] = 65\nd2", "_____no_output_____" ] ], [ [ "**update()**: Updates the dictionary with the specified key:value pairs", "_____no_output_____" ] ], [ [ "d2.update({'height':5.8})\nd2", "_____no_output_____" ] ], [ [ "**pop()**: removes the item with specified key name", "_____no_output_____" ] ], [ [ "d2.pop('weight')\nd2", "_____no_output_____" ] ], [ [ "**popitem()**: Removes the last inserted key:value pair", "_____no_output_____" ] ], [ [ "d2.popitem()", "_____no_output_____" ] ], [ [ "You cannot copy a dictionary simply by typing dict2 = dict1, because: dict2 will only be a reference to dict1, and changes made in dict1 will automatically also be made in dict2.\n\nIf you want to copy the dict (which is rare), you have to do so explicitly with one of these two options:", "_____no_output_____" ] ], [ [ "d3 = dict(d2) \nd3", "_____no_output_____" ] ], [ [ "**copy()**: makes a copy of a dictionary", "_____no_output_____" ] ], [ [ "d3 = d2.copy()\nd3 ", "_____no_output_____" ] ], [ [ "**clear()**: empties the dictionary", "_____no_output_____" ] ], [ [ "d3.clear()\nd3", "_____no_output_____" ], [ "d2", "_____no_output_____" ] ], [ [ "**del**: removes the item with the specified key name", "_____no_output_____" ] ], [ [ "del d2['name']\nd2", "_____no_output_____" ] ], [ [ "**del** can also delete the dictionary completely", "_____no_output_____" ] ], [ [ "del d2", "_____no_output_____" ], [ "d2 # This will raise an error", "_____no_output_____" ] ], [ [ "### Nested Dictionaries", "_____no_output_____" ] ], [ [ "child1 = {\n 'name':'Hazel',\n 'year': 2001,\n 'gender':'F'\n}\nchild2 = {\n 'name':'Helen',\n 'year': 2003,\n 'gender':'F'\n}\nchild3 = {\n 'name':'Abel',\n 'year': 2006,\n 'gender':'M'\n}\nchild4 = {\n 'name':'Diana',\n 'year': 2012,\n 'gender':'F'\n}", "_____no_output_____" ], [ "child1", "_____no_output_____" ], [ "child1['name']", "_____no_output_____" ], [ "family = {\n 'child1':child1,\n 'child2':child2,\n 'child3':child3,\n 'child4':child4\n}\nfamily", "_____no_output_____" ] ], [ [ "Accessing to 'Diana' using family dictionary:", "_____no_output_____" ] ], [ [ "family['child4']", "_____no_output_____" ], [ "family['child4']['name']", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ] ]
cb3964e69dda8cfcd751b0913cdf38199a5208a0
52,362
ipynb
Jupyter Notebook
intro-to-pytorch/Part 4 - Fashion-MNIST (Exercises).ipynb
yonycherkos/deep-learning-v2-pytorch
21a814c90aa4ada09535fa36ec0ba5abafcf7c63
[ "MIT" ]
null
null
null
intro-to-pytorch/Part 4 - Fashion-MNIST (Exercises).ipynb
yonycherkos/deep-learning-v2-pytorch
21a814c90aa4ada09535fa36ec0ba5abafcf7c63
[ "MIT" ]
null
null
null
intro-to-pytorch/Part 4 - Fashion-MNIST (Exercises).ipynb
yonycherkos/deep-learning-v2-pytorch
21a814c90aa4ada09535fa36ec0ba5abafcf7c63
[ "MIT" ]
null
null
null
91.064348
24,168
0.732134
[ [ [ "# Classifying Fashion-MNIST\n\nNow it's your turn to build and train a neural network. You'll be using the [Fashion-MNIST dataset](https://github.com/zalandoresearch/fashion-mnist), a drop-in replacement for the MNIST dataset. MNIST is actually quite trivial with neural networks where you can easily achieve better than 97% accuracy. Fashion-MNIST is a set of 28x28 greyscale images of clothes. It's more complex than MNIST, so it's a better representation of the actual performance of your network, and a better representation of datasets you'll use in the real world.\n\n<img src='assets/fashion-mnist-sprite.png' width=500px>\n\nIn this notebook, you'll build your own neural network. For the most part, you could just copy and paste the code from Part 3, but you wouldn't be learning. It's important for you to write the code yourself and get it to work. Feel free to consult the previous notebooks though as you work through this.\n\nFirst off, let's load the dataset through torchvision.", "_____no_output_____" ] ], [ [ "import torch\nfrom torchvision import datasets, transforms\nimport helper\n\n# Define a transform to normalize the data\ntransform = transforms.Compose([transforms.ToTensor(),\n transforms.Normalize((0.5,), (0.5,))])\n# Download and load the training data\ntrainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform)\ntrainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True)\n\n# Download and load the test data\ntestset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform)\ntestloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True)", "\r0it [00:00, ?it/s]" ] ], [ [ "Here we can see one of the images.", "_____no_output_____" ] ], [ [ "image, label = next(iter(trainloader))\nhelper.imshow(image[0,:]);", "_____no_output_____" ] ], [ [ "## Building the network\n\nHere you should define your network. As with MNIST, each image is 28x28 which is a total of 784 pixels, and there are 10 classes. You should include at least one hidden layer. We suggest you use ReLU activations for the layers and to return the logits or log-softmax from the forward pass. It's up to you how many layers you add and the size of those layers.", "_____no_output_____" ] ], [ [ "# TODO: Define your network architecture here\nfrom torch import nn\nimport torch.nn.functional as F\n\n\nclass Network(nn.Module):\n def __init__(self):\n super().__init__()\n self.fc1 = nn.Linear(784, 256)\n self.fc2 = nn.Linear(256, 128)\n self.fc3 = nn.Linear(128, 64)\n self.out = nn.Linear(64, 10)\n\n def forward(self, x):\n x = x.view(x.shape[0], -1)\n\n x = F.relu(self.fc1(x))\n x = F.relu(self.fc2(x))\n x = F.relu(self.fc3(x))\n x = F.log_softmax(self.out(x), dim=1)\n\n return x", "_____no_output_____" ] ], [ [ "# Train the network\n\nNow you should create your network and train it. First you'll want to define [the criterion](http://pytorch.org/docs/master/nn.html#loss-functions) ( something like `nn.CrossEntropyLoss`) and [the optimizer](http://pytorch.org/docs/master/optim.html) (typically `optim.SGD` or `optim.Adam`).\n\nThen write the training code. Remember the training pass is a fairly straightforward process:\n\n* Make a forward pass through the network to get the logits \n* Use the logits to calculate the loss\n* Perform a backward pass through the network with `loss.backward()` to calculate the gradients\n* Take a step with the optimizer to update the weights\n\nBy adjusting the hyperparameters (hidden units, learning rate, etc), you should be able to get the training loss below 0.4.", "_____no_output_____" ] ], [ [ "# TODO: Create the network, define the criterion and optimizer\nfrom torch import optim\n\nmodel = Network()\ncriterion = nn.CrossEntropyLoss()\noptimizer = optim.Adam(model.parameters(), lr=0.003)", "_____no_output_____" ], [ "# TODO: Train the network here\nepochs = 5\nfor epoch in range(epochs):\n running_loss = 0\n for images, labels in trainloader:\n logits = model(images)\n optimizer.zero_grad()\n \n loss = criterion(logits, labels)\n loss.backward()\n optimizer.step()\n \n running_loss += loss.item()\n else:\n print(f'training loss: {running_loss}')", "training loss: 481.1143866032362\ntraining loss: 366.28217351436615\ntraining loss: 333.78025329113007\ntraining loss: 311.4708919003606\ntraining loss: 293.37386625260115\n" ], [ "%matplotlib inline\n%config InlineBackend.figure_format = 'retina'\n\nimport helper\n\n# Test out your network!\n\ndataiter = iter(testloader)\nimages, labels = dataiter.next()\nimg = images[0]\n# Convert 2D image to 1D vector\nimg = img.resize_(1, 784)\n\n# TODO: Calculate the class probabilities (softmax) for img\nps = torch.exp(model(img))\n\n# Plot the image and probabilities\nhelper.view_classify(img.resize_(1, 28, 28), ps, version='Fashion')", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ] ]
cb3982f1a687feccd7d5b01e6e7f063933490a94
2,750
ipynb
Jupyter Notebook
51-75/p52.ipynb
Sorosliu1029/Euler
6bfff5513f209e3dabb371b7b3954b219deb4c96
[ "MIT" ]
3
2020-08-01T17:02:02.000Z
2022-01-14T07:59:56.000Z
51-75/p52.ipynb
Sorosliu1029/Euler
6bfff5513f209e3dabb371b7b3954b219deb4c96
[ "MIT" ]
null
null
null
51-75/p52.ipynb
Sorosliu1029/Euler
6bfff5513f209e3dabb371b7b3954b219deb4c96
[ "MIT" ]
null
null
null
20.992366
223
0.480364
[ [ [ "# Permuted multiples", "_____no_output_____" ], [ "<div class=\"problem_content\" role=\"problem\">\n<p>It can be seen that the number, 125874, and its double, 251748, contain exactly the same digits, but in a different order.</p>\n<p>Find the smallest positive integer, <i>x</i>, such that 2<i>x</i>, 3<i>x</i>, 4<i>x</i>, 5<i>x</i>, and 6<i>x</i>, contain the same digits.</p>\n</div>", "_____no_output_____" ], [ "---", "_____no_output_____" ], [ "### Idea", "_____no_output_____" ], [ "For $ x $, $ i * x, 2 \\le i \\le bound $ contain the same digits.\n\nAs $ bound * x $ cantains the same digits as $ 2 * x $, so they cantain same number of digits. As to say, if $ 2 * x $ is two-digit number, then $ bound * x $ is also two-digit number, so $ x \\lt \\frac{100}{bound}$ ", "_____no_output_____" ], [ "---", "_____no_output_____" ] ], [ [ "import sys, os; sys.path.append(os.path.abspath('..'))\nfrom timer import timethis", "_____no_output_____" ], [ "@timethis\ndef solve(times):\n p = 1\n while True:\n for x in range(pow(10, p)+1, pow(10, p+1) // times + 1):\n if len(set([tuple(sorted(str(i * x))) for i in range(2, times+1)])) == 1:\n return x\n p += 1", "_____no_output_____" ], [ "solve(6)", "Run for 0.375 seconds\n" ] ] ]
[ "markdown", "code" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code" ] ]
cb39b45124a2a2a1b77f5593fb4b8055870858dc
14,577
ipynb
Jupyter Notebook
preprocess/dblp.ipynb
unsuthee/node2hash
d48ef8e0b8699c6a47d135615f2b39bb98fba41b
[ "MIT" ]
2
2019-10-24T15:12:09.000Z
2020-10-10T01:34:58.000Z
preprocess/dblp.ipynb
unsuthee/node2hash
d48ef8e0b8699c6a47d135615f2b39bb98fba41b
[ "MIT" ]
null
null
null
preprocess/dblp.ipynb
unsuthee/node2hash
d48ef8e0b8699c6a47d135615f2b39bb98fba41b
[ "MIT" ]
2
2019-09-05T00:44:32.000Z
2019-10-27T10:40:30.000Z
32.465479
128
0.542293
[ [ [ "import os\nimport numpy as np\nimport pandas as pd\nimport json\nimport pickle\nfrom scipy import sparse\nimport scipy.io\n\ndataset_name = 'dblp'\ndata_path = os.path.join('../dataset/raw/{}'.format(dataset_name))", "_____no_output_____" ], [ "citations = []\nincomming = {}\n\nfor i in range(4):\n fn = os.path.join(data_path, 'dblp-ref-{}.json'.format(i))\n with open(fn) as in_fn:\n for line in in_fn:\n paper = json.loads(line.strip())\n citations.append(paper)\n\n if 'references' in paper:\n for ref_id in paper['references']:\n if ref_id in incomming:\n incomming[ref_id].append(paper['id'])\n else:\n incomming[ref_id] = [paper['id']]\n \ndf = pd.DataFrame(citations)", "_____no_output_____" ], [ "is_first_line = True\nconferences = {}\nwith open('../dataset/clean/dblp/venue_info.tsv') as in_csv:\n for line in in_csv:\n tokens = line.strip().split('\\t')\n if is_first_line:\n #print(tokens)\n is_first_line = False\n else:\n conf_name = tokens[0]\n \n labels = [int(num_str) for num_str in tokens[2].split(',')]\n labels = [n-2 for n in labels if n > 1] # remove the first label (signal processing has too many documents)\n \n conferences[conf_name] = {'name': conf_name, 'label': labels}\n #conferences[conf_name] = {'name': conf_name, }\n\nmax_labels = np.max([np.max(val['label']) for key, val in conferences.items()])\nmin_labels = np.min([np.min(val['label']) for key, val in conferences.items()])\nnum_labels = max_labels - min_labels + 1\nprint('label min:{} max:{} total:{}'.format(min_labels, max_labels, num_labels))", "_____no_output_____" ], [ "# remove any row that is not present in the selected venues\ndef is_selected_venue(row):\n return (row in conferences)\n\nprint(\"num paper (before): {}\".format(len(df)))\ndf = df[df.venue.apply(is_selected_venue)]\nprint(\"num paper (after): {}\".format(len(df)))", "_____no_output_____" ], [ "cut_off_years = 2016\n\ndf_train = df[df.year < cut_off_years]\ndf_test = df[df.year >= cut_off_years]\nnum_trains = len(df_train)\nnum_tests = len(df_test)\nprint(\"num trains: {} num tests: {} ratio: {:.4f}\".format(num_trains, num_tests, num_tests / num_trains))", "_____no_output_____" ], [ "#venue_count = df_train.groupby('venue').count().sort_values(['abstract'], ascending=False).abstract", "_____no_output_____" ], [ "def assign_labels(venue):\n label_list = conferences[venue]['label']\n return np.sum(np.eye(num_labels)[label_list], axis=0).astype(np.int)\n\ndf_train = df_train.copy()\ndf_train['label'] = df_train.venue.apply(assign_labels)\ndf_train.set_index('id', inplace=True) # set paper as the row index\n\ndf_test = df_test.copy()\ndf_test['label'] = df_test.venue.apply(assign_labels)\ndf_test.set_index('id', inplace=True) # set paper as the row index\n\nnum_train_doc_per_labels = np.sum(np.array(list(df_train.label)), axis=0)\nnum_test_doc_per_labels = np.sum(np.array(list(df_test.label)), axis=0)\nprint(num_train_doc_per_labels)\nprint(num_test_doc_per_labels)", "_____no_output_____" ], [ "# remove any row that does not have abstract, title, paperId, or venue\nprint(\"num paper = {}\".format(len(df_train)))\ndf_train.dropna(axis=0, subset=['abstract', 'venue', 'year', 'label'], inplace=True)\nprint(\"num paper = {}\".format(len(df_train)))", "_____no_output_____" ], [ "# This method adds incoming edges to each node as well as removing any edge that points outside the train set\ndef createEdges(row):\n if row.references is not np.nan:\n outgoing_edges = [r for r in row.references if r in df_train.index]\n else:\n outgoing_edges = []\n \n if row.name in incomming:\n incomming_edges = [r for r in incomming[row.name] if r in df_train.index]\n else:\n incomming_edges = []\n return outgoing_edges + incomming_edges\n \ndf_train['links'] = df_train.apply(createEdges, axis=1)\n\n# Remove any row that has no link\nprint(\"num paper = {}\".format(len(df_train)))\ndf_train = df_train[df_train.links.apply(len) > 0]\nprint(\"num paper = {}\".format(len(df_train)))\n\n# There must be no train nodes that references to non-train nodes\ndef count_invalid_edges(refs):\n return len([r for r in refs if r not in df_train.index])\n \nassert(len(df_train[df_train.links.apply(count_invalid_edges) > 0]) == 0)", "_____no_output_____" ], [ "global_id_2_train_id = {node_id: idx for idx, node_id in enumerate(df_train.index)}\n\ndef convert_2_train_id(ref):\n return [global_id_2_train_id[r] for r in ref]\n\ntrain_edges = df_train.links.apply(convert_2_train_id)\n \ntrain_graph = {}\nfor node_id, value in train_edges.iteritems():\n train_graph[global_id_2_train_id[node_id]] = value\n \nprint('num train: {}'.format(len(train_graph)))", "_____no_output_____" ] ], [ [ "# Process Test Data", "_____no_output_____" ] ], [ [ "# remove any row that does not have abstract, title, paperId, or venue\nprint(\"num paper = {}\".format(len(df_test)))\ndf_test.dropna(axis=0, subset=['abstract', 'venue', 'year', 'label'], inplace=True)\nprint(\"num paper = {}\".format(len(df_test)))", "_____no_output_____" ], [ "# This method adds incoming edges to each node as well as removing any edge that points outside the train set\ndef createEdges(row):\n if row.references is not np.nan:\n outgoing_edges = [r for r in row.references if r in df_train.index]\n else:\n outgoing_edges = []\n \n if row.name in incomming:\n incomming_edges = [r for r in incomming[row.name] if r in df_train.index]\n else:\n incomming_edges = []\n return outgoing_edges + incomming_edges\n \ndf_test['links'] = df_test.apply(createEdges, axis=1)\n\n# Remove any row that has no link\nprint(\"num paper = {}\".format(len(df_test)))\ndf_test = df_test[df_test.links.apply(len) > 0]\nprint(\"num paper = {}\".format(len(df_test)))\n\n# There must be no train nodes that references to non-train nodes\ndef count_invalid_edges(refs):\n return len([r for r in refs if r not in df_train.index])\n \nassert(len(df_test[df_test.links.apply(count_invalid_edges) > 0]) == 0)", "_____no_output_____" ], [ "global_id_2_test_id = {node_id: idx for idx, node_id in enumerate(df_test.index)}\n\n# each link MUST point to the train nodes\ntest_edges = df_test.links.apply(convert_2_train_id)\n \ntest_graph = {}\nfor node_id, value in test_edges.iteritems():\n test_graph[global_id_2_test_id[node_id]] = value\n \nprint('num test: {}'.format(len(test_graph)))", "_____no_output_____" ] ], [ [ "# Save Graph Data", "_____no_output_____" ] ], [ [ "data_path = '../dataset/clean/dblp'\nsave_fn = os.path.join(data_path, 'ind.{}.train.graph.pk'.format(dataset_name))\npickle.dump(train_graph, open(save_fn, 'wb'))\nprint('save graph data to {}'.format(save_fn))\n\nsave_fn = os.path.join(data_path, 'ind.{}.test.graph.pk'.format(dataset_name))\npickle.dump(test_graph, open(save_fn, 'wb'))\nprint('save graph data to {}'.format(save_fn))", "_____no_output_____" ] ], [ [ "# Process contents", "_____no_output_____" ] ], [ [ "from sklearn.feature_extraction.text import TfidfVectorizer\nvectorizer = TfidfVectorizer(stop_words='english', max_df=0.8, min_df=5, sublinear_tf=True, max_features=10000)\n\ntrain_feas = vectorizer.fit_transform(list(df_train.abstract))\nprint(np.nonzero(np.sum(train_feas, axis=1))[0].shape)\n\ntest_feas = vectorizer.transform(list(df_test.abstract))\nprint(np.nonzero(np.sum(test_feas, axis=1))[0].shape)\n\ngnd_train = sparse.csr_matrix(np.array(list(df_train.label)))\ngnd_test = sparse.csr_matrix(np.array(list(df_test.label)))", "_____no_output_____" ], [ "\nassert(train_feas.shape[1] == test_feas.shape[1])\nassert(gnd_train.shape[1] == gnd_test.shape[1])\nassert(train_feas.shape[0] == gnd_train.shape[0])\nassert(test_feas.shape[0] == gnd_test.shape[0])\n\ndata_path = '../dataset/clean/dblp'\nsave_fn = os.path.join(data_path, 'ind.{}.mat'.format(dataset_name))\n\nscipy.io.savemat(save_fn, \n mdict={'train': train_feas, \n 'test': test_feas, \n 'cv': test_feas,\n 'gnd_train': gnd_train, \n 'gnd_test': gnd_test,\n 'gnd_cv': gnd_test})\n\nprint('save data to {}'.format(save_fn))", "_____no_output_____" ] ], [ [ "# Convert to dataframe with the format as doc_id, bow, label, and neighbors", "_____no_output_____" ] ], [ [ "# create a connection matrix\nn_train = train_feas.shape[0]\nrow = []\ncol = []\nfor doc_id in train_graph:\n row += [doc_id] * len(train_graph[doc_id])\n col += train_graph[doc_id]\ndata = [1] * len(row)\ntrain_connections = sparse.csr_matrix((data, (row, col)), shape=(n_train, n_train))", "_____no_output_____" ], [ "n_test = test_feas.shape[0]\nrow = []\ncol = []\nfor doc_id in test_graph:\n row += [doc_id] * len(test_graph[doc_id])\n col += test_graph[doc_id]\ndata = [1] * len(row)\ntest_connections = sparse.csr_matrix((data, (row, col)), shape=(n_test, n_train)) # test graph points to train graph", "_____no_output_____" ], [ "from tqdm import tqdm\n\nsave_dir = os.path.join('../dataset/clean', dataset_name)\n##########################################################################################\n\ntrain = []\nfor doc_id in tqdm(train_graph):\n doc = {'doc_id': doc_id, 'bow': train_feas[doc_id], \n 'label': gnd_train[doc_id], 'neighbors': train_connections[doc_id]}\n train.append(doc)\n\ntrain_df = pd.DataFrame.from_dict(train)\ntrain_df.set_index('doc_id', inplace=True)\n\nfn = os.path.join(save_dir, '{}.train.pkl'.format(dataset_name))\ntrain_df.to_pickle(fn)\n##########################################################################################\n\ntest = []\nfor doc_id in tqdm(test_graph):\n doc = {'doc_id': doc_id, 'bow': test_feas[doc_id], \n 'label': gnd_test[doc_id], 'neighbors': test_connections[doc_id]}\n test.append(doc)\n\ntest_df = pd.DataFrame.from_dict(test)\ntest_df.set_index('doc_id', inplace=True)\n\nfn = os.path.join(save_dir, '{}.test.pkl'.format(dataset_name))\ntest_df.to_pickle(fn)", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ] ]
cb39ce4966b12e4e3b61f5bea29326850e246368
554,506
ipynb
Jupyter Notebook
dataset_0/notebook/pycaret-introduction-classification-regression.ipynb
xuyeliu/JNotebook2csv
681918aa32a7aee55a724457df0c4300ef711aa8
[ "MIT" ]
1
2022-01-05T20:40:06.000Z
2022-01-05T20:40:06.000Z
dataset_0/notebook/pycaret-introduction-classification-regression.ipynb
xuyeliu/JNotebook2csv
681918aa32a7aee55a724457df0c4300ef711aa8
[ "MIT" ]
null
null
null
dataset_0/notebook/pycaret-introduction-classification-regression.ipynb
xuyeliu/JNotebook2csv
681918aa32a7aee55a724457df0c4300ef711aa8
[ "MIT" ]
null
null
null
89.798543
94,348
0.724299
[ [ [ "# Introduction to PyCaret - An open source low-code ML library\n\n## This notebook consists 2 parts\n - Classification part using Titanic DataSet\n - Regression part using House Price Regression DataSet", "_____no_output_____" ], [ "![](https://pycaret.org/wp-content/uploads/2020/03/Divi93_43.png)\n\nYou can reach pycaret website and documentation from https://pycaret.org\n\nPyCaret is an open source, low-code machine learning library in Python that allows you to go from preparing your data to deploying your model within seconds in your choice of notebook environment.\n\nPyCaret being a low-code library makes you more productive. With less time spent coding, you and your team can now focus on business problems.\n\nPyCaret is simple and easy to use machine learning library that will help you to perform end-to-end ML experiments with less lines of code. \n\nPyCaret is a business ready solution. It allows you to do prototyping quickly and efficiently from your choice of notebook environment.\n", "_____no_output_____" ], [ "# let's install pycaret ! ", "_____no_output_____" ] ], [ [ "!pip install pycaret", "Collecting pycaret\r\n Downloading pycaret-1.0.0-py3-none-any.whl (188 kB)\r\n\u001b[K |████████████████████████████████| 188 kB 4.9 MB/s \r\n\u001b[?25hCollecting shap==0.32.1\r\n Downloading shap-0.32.1.tar.gz (259 kB)\r\n\u001b[K |████████████████████████████████| 259 kB 58.1 MB/s \r\n\u001b[?25hRequirement already satisfied: pandas in /opt/conda/lib/python3.6/site-packages (from pycaret) (0.25.3)\r\nRequirement already satisfied: nltk in /opt/conda/lib/python3.6/site-packages (from pycaret) (3.2.4)\r\nRequirement already satisfied: ipywidgets in /opt/conda/lib/python3.6/site-packages (from pycaret) (7.5.1)\r\nRequirement already satisfied: pyLDAvis in /opt/conda/lib/python3.6/site-packages (from pycaret) (2.1.2)\r\nRequirement already satisfied: joblib in /opt/conda/lib/python3.6/site-packages (from pycaret) (0.14.1)\r\nRequirement already satisfied: umap-learn in /opt/conda/lib/python3.6/site-packages (from pycaret) (0.3.10)\r\nRequirement already satisfied: spacy in /opt/conda/lib/python3.6/site-packages (from pycaret) (2.2.3)\r\nCollecting kmodes==0.10.1\r\n Downloading kmodes-0.10.1-py2.py3-none-any.whl (17 kB)\r\nCollecting datetime\r\n Downloading DateTime-4.3-py2.py3-none-any.whl (60 kB)\r\n\u001b[K |████████████████████████████████| 60 kB 4.7 MB/s \r\n\u001b[?25hRequirement already satisfied: mlxtend in /opt/conda/lib/python3.6/site-packages (from pycaret) (0.17.2)\r\nCollecting catboost==0.20.2\r\n Downloading catboost-0.20.2-cp36-none-manylinux1_x86_64.whl (63.9 MB)\r\n\u001b[K |████████████████████████████████| 63.9 MB 50.2 MB/s \r\n\u001b[?25hCollecting pyod\r\n Downloading pyod-0.7.8.2.tar.gz (92 kB)\r\n\u001b[K |████████████████████████████████| 92 kB 201 kB/s \r\n\u001b[?25hRequirement already satisfied: matplotlib in /opt/conda/lib/python3.6/site-packages (from pycaret) (3.2.1)\r\nRequirement already satisfied: IPython in /opt/conda/lib/python3.6/site-packages (from pycaret) (7.13.0)\r\nRequirement already satisfied: textblob in /opt/conda/lib/python3.6/site-packages (from pycaret) (0.15.3)\r\nRequirement already satisfied: wordcloud in /opt/conda/lib/python3.6/site-packages (from pycaret) (1.6.0)\r\nCollecting scikit-learn==0.22\r\n Downloading scikit_learn-0.22-cp36-cp36m-manylinux1_x86_64.whl (7.0 MB)\r\n\u001b[K |████████████████████████████████| 7.0 MB 14.7 MB/s \r\n\u001b[?25hCollecting yellowbrick==1.0.1\r\n Downloading yellowbrick-1.0.1-py3-none-any.whl (378 kB)\r\n\u001b[K |████████████████████████████████| 378 kB 54.4 MB/s \r\n\u001b[?25hCollecting datefinder==0.7.0\r\n Downloading datefinder-0.7.0-py2.py3-none-any.whl (8.8 kB)\r\nCollecting pandas-profiling==2.3.0\r\n Downloading pandas-profiling-2.3.0.tar.gz (127 kB)\r\n\u001b[K |████████████████████████████████| 127 kB 61.0 MB/s \r\n\u001b[?25hRequirement already satisfied: lightgbm==2.3.1 in /opt/conda/lib/python3.6/site-packages (from pycaret) (2.3.1)\r\nRequirement already satisfied: gensim in /opt/conda/lib/python3.6/site-packages (from pycaret) (3.8.1)\r\nCollecting awscli\r\n Downloading awscli-1.18.46-py2.py3-none-any.whl (3.0 MB)\r\n\u001b[K |████████████████████████████████| 3.0 MB 47.7 MB/s \r\n\u001b[?25hCollecting cufflinks==0.17.0\r\n Downloading cufflinks-0.17.0.tar.gz (81 kB)\r\n\u001b[K |████████████████████████████████| 81 kB 6.9 MB/s \r\n\u001b[?25hCollecting plotly==4.4.1\r\n Downloading plotly-4.4.1-py2.py3-none-any.whl (7.3 MB)\r\n\u001b[K |████████████████████████████████| 7.3 MB 15.1 MB/s \r\n\u001b[?25hCollecting xgboost==0.90\r\n Downloading xgboost-0.90-py2.py3-none-manylinux1_x86_64.whl (142.8 MB)\r\n\u001b[K |████████████████████████████████| 142.8 MB 37 kB/s \r\n\u001b[?25hRequirement already satisfied: numpy in /opt/conda/lib/python3.6/site-packages (from pycaret) (1.18.2)\r\nRequirement already satisfied: seaborn in /opt/conda/lib/python3.6/site-packages (from pycaret) (0.10.0)\r\nRequirement already satisfied: scipy in /opt/conda/lib/python3.6/site-packages (from shap==0.32.1->pycaret) (1.4.1)\r\nRequirement already satisfied: tqdm>4.25.0 in /opt/conda/lib/python3.6/site-packages (from shap==0.32.1->pycaret) (4.42.0)\r\nRequirement already satisfied: python-dateutil>=2.6.1 in /opt/conda/lib/python3.6/site-packages (from pandas->pycaret) (2.8.1)\r\nRequirement already satisfied: pytz>=2017.2 in /opt/conda/lib/python3.6/site-packages (from pandas->pycaret) (2019.3)\r\nRequirement already satisfied: six in /opt/conda/lib/python3.6/site-packages (from nltk->pycaret) (1.14.0)\r\nRequirement already satisfied: traitlets>=4.3.1 in /opt/conda/lib/python3.6/site-packages (from ipywidgets->pycaret) (4.3.3)\r\nRequirement already satisfied: nbformat>=4.2.0 in /opt/conda/lib/python3.6/site-packages (from ipywidgets->pycaret) (5.0.4)\r\nRequirement already satisfied: ipykernel>=4.5.1 in /opt/conda/lib/python3.6/site-packages (from ipywidgets->pycaret) (5.1.1)\r\nRequirement already satisfied: widgetsnbextension~=3.5.0 in /opt/conda/lib/python3.6/site-packages (from ipywidgets->pycaret) (3.5.1)\r\nRequirement already satisfied: jinja2>=2.7.2 in /opt/conda/lib/python3.6/site-packages (from pyLDAvis->pycaret) (2.11.1)\r\nRequirement already satisfied: funcy in /opt/conda/lib/python3.6/site-packages (from pyLDAvis->pycaret) (1.14)\r\nRequirement already satisfied: wheel>=0.23.0 in /opt/conda/lib/python3.6/site-packages (from pyLDAvis->pycaret) (0.34.2)\r\nRequirement already satisfied: pytest in /opt/conda/lib/python3.6/site-packages (from pyLDAvis->pycaret) (5.0.1)\r\nRequirement already satisfied: future in /opt/conda/lib/python3.6/site-packages (from pyLDAvis->pycaret) (0.18.2)\r\nRequirement already satisfied: numexpr in /opt/conda/lib/python3.6/site-packages (from pyLDAvis->pycaret) (2.6.9)\r\nRequirement already satisfied: numba>=0.37 in /opt/conda/lib/python3.6/site-packages (from umap-learn->pycaret) (0.48.0)\r\nRequirement already satisfied: murmurhash<1.1.0,>=0.28.0 in /opt/conda/lib/python3.6/site-packages (from spacy->pycaret) (1.0.2)\r\nRequirement already satisfied: srsly<1.1.0,>=0.1.0 in /opt/conda/lib/python3.6/site-packages (from spacy->pycaret) (1.0.2)\r\nRequirement already satisfied: setuptools in /opt/conda/lib/python3.6/site-packages (from spacy->pycaret) (46.1.3.post20200330)\r\nRequirement already satisfied: plac<1.2.0,>=0.9.6 in /opt/conda/lib/python3.6/site-packages (from spacy->pycaret) (0.9.6)\r\nRequirement already satisfied: blis<0.5.0,>=0.4.0 in /opt/conda/lib/python3.6/site-packages (from spacy->pycaret) (0.4.1)\r\nRequirement already satisfied: thinc<7.4.0,>=7.3.0 in /opt/conda/lib/python3.6/site-packages (from spacy->pycaret) (7.3.1)\r\nRequirement already satisfied: requests<3.0.0,>=2.13.0 in /opt/conda/lib/python3.6/site-packages (from spacy->pycaret) (2.22.0)\r\nRequirement already satisfied: wasabi<1.1.0,>=0.4.0 in /opt/conda/lib/python3.6/site-packages (from spacy->pycaret) (0.6.0)\r\nRequirement already satisfied: preshed<3.1.0,>=3.0.2 in /opt/conda/lib/python3.6/site-packages (from spacy->pycaret) (3.0.2)\r\nRequirement already satisfied: catalogue<1.1.0,>=0.0.7 in /opt/conda/lib/python3.6/site-packages (from spacy->pycaret) (1.0.0)\r\nRequirement already satisfied: cymem<2.1.0,>=2.0.2 in /opt/conda/lib/python3.6/site-packages (from spacy->pycaret) (2.0.3)\r\nCollecting zope.interface\r\n Downloading zope.interface-5.1.0-cp36-cp36m-manylinux2010_x86_64.whl (234 kB)\r\n\u001b[K |████████████████████████████████| 234 kB 43.0 MB/s \r\n\u001b[?25hRequirement already satisfied: graphviz in /opt/conda/lib/python3.6/site-packages (from catboost==0.20.2->pycaret) (0.8.4)\r\nCollecting combo\r\n Downloading combo-0.1.0.tar.gz (37 kB)\r\nCollecting suod\r\n Downloading suod-0.0.4.tar.gz (2.1 MB)\r\n\u001b[K |████████████████████████████████| 2.1 MB 26.9 MB/s \r\n\u001b[?25hRequirement already satisfied: pyparsing!=2.0.4,!=2.1.2,!=2.1.6,>=2.0.1 in /opt/conda/lib/python3.6/site-packages (from matplotlib->pycaret) (2.4.6)\r\nRequirement already satisfied: cycler>=0.10 in /opt/conda/lib/python3.6/site-packages (from matplotlib->pycaret) (0.10.0)\r\nRequirement already satisfied: kiwisolver>=1.0.1 in /opt/conda/lib/python3.6/site-packages (from matplotlib->pycaret) (1.1.0)\r\nRequirement already satisfied: prompt-toolkit!=3.0.0,!=3.0.1,<3.1.0,>=2.0.0 in /opt/conda/lib/python3.6/site-packages (from IPython->pycaret) (3.0.4)\r\nRequirement already satisfied: pexpect; sys_platform != \"win32\" in /opt/conda/lib/python3.6/site-packages (from IPython->pycaret) (4.8.0)\r\nRequirement already satisfied: decorator in /opt/conda/lib/python3.6/site-packages (from IPython->pycaret) (4.4.2)\r\nRequirement already satisfied: pickleshare in /opt/conda/lib/python3.6/site-packages (from IPython->pycaret) (0.7.5)\r\nRequirement already satisfied: backcall in /opt/conda/lib/python3.6/site-packages (from IPython->pycaret) (0.1.0)\r\nRequirement already satisfied: pygments in /opt/conda/lib/python3.6/site-packages (from IPython->pycaret) (2.6.1)\r\nRequirement already satisfied: jedi>=0.10 in /opt/conda/lib/python3.6/site-packages (from IPython->pycaret) (0.15.2)\r\nRequirement already satisfied: pillow in /opt/conda/lib/python3.6/site-packages (from wordcloud->pycaret) (5.4.1)\r\nRequirement already satisfied: regex>=2017.02.08 in /opt/conda/lib/python3.6/site-packages (from datefinder==0.7.0->pycaret) (2020.2.20)\r\nRequirement already satisfied: missingno>=0.4.2 in /opt/conda/lib/python3.6/site-packages (from pandas-profiling==2.3.0->pycaret) (0.4.2)\r\nRequirement already satisfied: htmlmin>=0.1.12 in /opt/conda/lib/python3.6/site-packages (from pandas-profiling==2.3.0->pycaret) (0.1.12)\r\nRequirement already satisfied: phik>=0.9.8 in /opt/conda/lib/python3.6/site-packages (from pandas-profiling==2.3.0->pycaret) (0.9.9)\r\nRequirement already satisfied: confuse>=1.0.0 in /opt/conda/lib/python3.6/site-packages (from pandas-profiling==2.3.0->pycaret) (1.0.0)\r\nRequirement already satisfied: astropy in /opt/conda/lib/python3.6/site-packages (from pandas-profiling==2.3.0->pycaret) (3.2.3)\r\nRequirement already satisfied: smart-open>=1.8.1 in /opt/conda/lib/python3.6/site-packages (from gensim->pycaret) (1.10.0)\r\nRequirement already satisfied: colorama<0.4.4,>=0.2.5; python_version != \"3.4\" in /opt/conda/lib/python3.6/site-packages (from awscli->pycaret) (0.4.3)\r\nCollecting rsa<=3.5.0,>=3.1.2\r\n Downloading rsa-3.4.2-py2.py3-none-any.whl (46 kB)\r\n\u001b[K |████████████████████████████████| 46 kB 3.2 MB/s \r\n\u001b[?25hRequirement already satisfied: docutils<0.16,>=0.10 in /opt/conda/lib/python3.6/site-packages (from awscli->pycaret) (0.15.2)\r\nCollecting botocore==1.15.46\r\n Downloading botocore-1.15.46-py2.py3-none-any.whl (6.1 MB)\r\n\u001b[K |████████████████████████████████| 6.1 MB 48.3 MB/s \r\n\u001b[?25hRequirement already satisfied: PyYAML<5.4,>=3.10; python_version != \"3.4\" in /opt/conda/lib/python3.6/site-packages (from awscli->pycaret) (5.3.1)\r\nRequirement already satisfied: s3transfer<0.4.0,>=0.3.0 in /opt/conda/lib/python3.6/site-packages (from awscli->pycaret) (0.3.3)\r\nCollecting chart-studio>=1.0.0\r\n Downloading chart_studio-1.1.0-py3-none-any.whl (64 kB)\r\n\u001b[K |████████████████████████████████| 64 kB 2.3 MB/s \r\n\u001b[?25hRequirement already satisfied: colorlover>=0.2.1 in /opt/conda/lib/python3.6/site-packages (from cufflinks==0.17.0->pycaret) (0.3.0)\r\nRequirement already satisfied: retrying>=1.3.3 in /opt/conda/lib/python3.6/site-packages (from plotly==4.4.1->pycaret) (1.3.3)\r\nRequirement already satisfied: ipython-genutils in /opt/conda/lib/python3.6/site-packages (from traitlets>=4.3.1->ipywidgets->pycaret) (0.2.0)\r\nRequirement already satisfied: jsonschema!=2.5.0,>=2.4 in /opt/conda/lib/python3.6/site-packages (from nbformat>=4.2.0->ipywidgets->pycaret) (3.2.0)\r\nRequirement already satisfied: jupyter-core in /opt/conda/lib/python3.6/site-packages (from nbformat>=4.2.0->ipywidgets->pycaret) (4.6.1)\r\nRequirement already satisfied: jupyter-client in /opt/conda/lib/python3.6/site-packages (from ipykernel>=4.5.1->ipywidgets->pycaret) (6.1.2)\r\nRequirement already satisfied: tornado>=4.2 in /opt/conda/lib/python3.6/site-packages (from ipykernel>=4.5.1->ipywidgets->pycaret) (5.0.2)\r\nRequirement already satisfied: notebook>=4.4.1 in /opt/conda/lib/python3.6/site-packages (from widgetsnbextension~=3.5.0->ipywidgets->pycaret) (5.5.0)\r\nRequirement already satisfied: MarkupSafe>=0.23 in /opt/conda/lib/python3.6/site-packages (from jinja2>=2.7.2->pyLDAvis->pycaret) (1.1.1)\r\nRequirement already satisfied: py>=1.5.0 in /opt/conda/lib/python3.6/site-packages (from pytest->pyLDAvis->pycaret) (1.8.1)\r\nRequirement already satisfied: packaging in /opt/conda/lib/python3.6/site-packages (from pytest->pyLDAvis->pycaret) (20.3)\r\nRequirement already satisfied: attrs>=17.4.0 in /opt/conda/lib/python3.6/site-packages (from pytest->pyLDAvis->pycaret) (19.3.0)\r\nRequirement already satisfied: more-itertools>=4.0.0 in /opt/conda/lib/python3.6/site-packages (from pytest->pyLDAvis->pycaret) (8.2.0)\r\nRequirement already satisfied: atomicwrites>=1.0 in /opt/conda/lib/python3.6/site-packages (from pytest->pyLDAvis->pycaret) (1.3.0)\r\nRequirement already satisfied: pluggy<1.0,>=0.12 in /opt/conda/lib/python3.6/site-packages (from pytest->pyLDAvis->pycaret) (0.13.1)\r\nRequirement already satisfied: importlib-metadata>=0.12 in /opt/conda/lib/python3.6/site-packages (from pytest->pyLDAvis->pycaret) (1.5.0)\r\nRequirement already satisfied: wcwidth in /opt/conda/lib/python3.6/site-packages (from pytest->pyLDAvis->pycaret) (0.1.9)\r\nRequirement already satisfied: llvmlite<0.32.0,>=0.31.0dev0 in /opt/conda/lib/python3.6/site-packages (from numba>=0.37->umap-learn->pycaret) (0.31.0)\r\nRequirement already satisfied: idna<2.9,>=2.5 in /opt/conda/lib/python3.6/site-packages (from requests<3.0.0,>=2.13.0->spacy->pycaret) (2.8)\r\nRequirement already satisfied: urllib3!=1.25.0,!=1.25.1,<1.26,>=1.21.1 in /opt/conda/lib/python3.6/site-packages (from requests<3.0.0,>=2.13.0->spacy->pycaret) (1.24.3)\r\nRequirement already satisfied: chardet<3.1.0,>=3.0.2 in /opt/conda/lib/python3.6/site-packages (from requests<3.0.0,>=2.13.0->spacy->pycaret) (3.0.4)\r\nRequirement already satisfied: certifi>=2017.4.17 in /opt/conda/lib/python3.6/site-packages (from requests<3.0.0,>=2.13.0->spacy->pycaret) (2019.11.28)\r\nRequirement already satisfied: ptyprocess>=0.5 in /opt/conda/lib/python3.6/site-packages (from pexpect; sys_platform != \"win32\"->IPython->pycaret) (0.6.0)\r\nRequirement already satisfied: parso>=0.5.2 in /opt/conda/lib/python3.6/site-packages (from jedi>=0.10->IPython->pycaret) (0.5.2)\r\nRequirement already satisfied: pytest-pylint>=0.13.0 in /opt/conda/lib/python3.6/site-packages (from phik>=0.9.8->pandas-profiling==2.3.0->pycaret) (0.15.1)\r\nRequirement already satisfied: nbconvert>=5.3.1 in /opt/conda/lib/python3.6/site-packages (from phik>=0.9.8->pandas-profiling==2.3.0->pycaret) (5.6.1)\r\nRequirement already satisfied: boto3 in /opt/conda/lib/python3.6/site-packages (from smart-open>=1.8.1->gensim->pycaret) (1.12.32)\r\nRequirement already satisfied: google-cloud-storage in /opt/conda/lib/python3.6/site-packages (from smart-open>=1.8.1->gensim->pycaret) (1.26.0)\r\nRequirement already satisfied: pyasn1>=0.1.3 in /opt/conda/lib/python3.6/site-packages (from rsa<=3.5.0,>=3.1.2->awscli->pycaret) (0.4.8)\r\nRequirement already satisfied: jmespath<1.0.0,>=0.7.1 in /opt/conda/lib/python3.6/site-packages (from botocore==1.15.46->awscli->pycaret) (0.9.5)\r\nRequirement already satisfied: pyrsistent>=0.14.0 in /opt/conda/lib/python3.6/site-packages (from jsonschema!=2.5.0,>=2.4->nbformat>=4.2.0->ipywidgets->pycaret) (0.16.0)\r\nRequirement already satisfied: pyzmq>=13 in /opt/conda/lib/python3.6/site-packages (from jupyter-client->ipykernel>=4.5.1->ipywidgets->pycaret) (18.1.1)\r\nRequirement already satisfied: Send2Trash in /opt/conda/lib/python3.6/site-packages (from notebook>=4.4.1->widgetsnbextension~=3.5.0->ipywidgets->pycaret) (1.5.0)\r\nRequirement already satisfied: terminado>=0.8.1 in /opt/conda/lib/python3.6/site-packages (from notebook>=4.4.1->widgetsnbextension~=3.5.0->ipywidgets->pycaret) (0.8.3)\r\nRequirement already satisfied: zipp>=0.5 in /opt/conda/lib/python3.6/site-packages (from importlib-metadata>=0.12->pytest->pyLDAvis->pycaret) (2.2.0)\r\nRequirement already satisfied: pylint>=2.0.0 in /opt/conda/lib/python3.6/site-packages (from pytest-pylint>=0.13.0->phik>=0.9.8->pandas-profiling==2.3.0->pycaret) (2.4.4)\r\nRequirement already satisfied: entrypoints>=0.2.2 in /opt/conda/lib/python3.6/site-packages (from nbconvert>=5.3.1->phik>=0.9.8->pandas-profiling==2.3.0->pycaret) (0.3)\r\nRequirement already satisfied: defusedxml in /opt/conda/lib/python3.6/site-packages (from nbconvert>=5.3.1->phik>=0.9.8->pandas-profiling==2.3.0->pycaret) (0.6.0)\r\nRequirement already satisfied: mistune<2,>=0.8.1 in /opt/conda/lib/python3.6/site-packages (from nbconvert>=5.3.1->phik>=0.9.8->pandas-profiling==2.3.0->pycaret) (0.8.4)\r\nRequirement already satisfied: bleach in /opt/conda/lib/python3.6/site-packages (from nbconvert>=5.3.1->phik>=0.9.8->pandas-profiling==2.3.0->pycaret) (3.1.0)\r\nRequirement already satisfied: pandocfilters>=1.4.1 in /opt/conda/lib/python3.6/site-packages (from nbconvert>=5.3.1->phik>=0.9.8->pandas-profiling==2.3.0->pycaret) (1.4.2)\r\nRequirement already satisfied: testpath in /opt/conda/lib/python3.6/site-packages (from nbconvert>=5.3.1->phik>=0.9.8->pandas-profiling==2.3.0->pycaret) (0.4.4)\r\nRequirement already satisfied: google-resumable-media<0.6dev,>=0.5.0 in /opt/conda/lib/python3.6/site-packages (from google-cloud-storage->smart-open>=1.8.1->gensim->pycaret) (0.5.0)\r\nRequirement already satisfied: google-cloud-core<2.0dev,>=1.2.0 in /opt/conda/lib/python3.6/site-packages (from google-cloud-storage->smart-open>=1.8.1->gensim->pycaret) (1.3.0)\r\nRequirement already satisfied: google-auth<2.0dev,>=1.11.0 in /opt/conda/lib/python3.6/site-packages (from google-cloud-storage->smart-open>=1.8.1->gensim->pycaret) (1.12.0)\r\nRequirement already satisfied: astroid<2.4,>=2.3.0 in /opt/conda/lib/python3.6/site-packages (from pylint>=2.0.0->pytest-pylint>=0.13.0->phik>=0.9.8->pandas-profiling==2.3.0->pycaret) (2.3.3)\r\nRequirement already satisfied: mccabe<0.7,>=0.6 in /opt/conda/lib/python3.6/site-packages (from pylint>=2.0.0->pytest-pylint>=0.13.0->phik>=0.9.8->pandas-profiling==2.3.0->pycaret) (0.6.1)\r\nRequirement already satisfied: isort<5,>=4.2.5 in /opt/conda/lib/python3.6/site-packages (from pylint>=2.0.0->pytest-pylint>=0.13.0->phik>=0.9.8->pandas-profiling==2.3.0->pycaret) (4.3.21)\r\nRequirement already satisfied: webencodings in /opt/conda/lib/python3.6/site-packages (from bleach->nbconvert>=5.3.1->phik>=0.9.8->pandas-profiling==2.3.0->pycaret) (0.5.1)\r\nRequirement already satisfied: google-api-core<2.0.0dev,>=1.16.0 in /opt/conda/lib/python3.6/site-packages (from google-cloud-core<2.0dev,>=1.2.0->google-cloud-storage->smart-open>=1.8.1->gensim->pycaret) (1.16.0)\r\nRequirement already satisfied: pyasn1-modules>=0.2.1 in /opt/conda/lib/python3.6/site-packages (from google-auth<2.0dev,>=1.11.0->google-cloud-storage->smart-open>=1.8.1->gensim->pycaret) (0.2.8)\r\nRequirement already satisfied: cachetools<5.0,>=2.0.0 in /opt/conda/lib/python3.6/site-packages (from google-auth<2.0dev,>=1.11.0->google-cloud-storage->smart-open>=1.8.1->gensim->pycaret) (4.0.0)\r\nRequirement already satisfied: lazy-object-proxy==1.4.* in /opt/conda/lib/python3.6/site-packages (from astroid<2.4,>=2.3.0->pylint>=2.0.0->pytest-pylint>=0.13.0->phik>=0.9.8->pandas-profiling==2.3.0->pycaret) (1.4.3)\r\nRequirement already satisfied: wrapt==1.11.* in /opt/conda/lib/python3.6/site-packages (from astroid<2.4,>=2.3.0->pylint>=2.0.0->pytest-pylint>=0.13.0->phik>=0.9.8->pandas-profiling==2.3.0->pycaret) (1.11.2)\r\nRequirement already satisfied: typed-ast<1.5,>=1.4.0; implementation_name == \"cpython\" and python_version < \"3.8\" in /opt/conda/lib/python3.6/site-packages (from astroid<2.4,>=2.3.0->pylint>=2.0.0->pytest-pylint>=0.13.0->phik>=0.9.8->pandas-profiling==2.3.0->pycaret) (1.4.1)\r\nRequirement already satisfied: protobuf>=3.4.0 in /opt/conda/lib/python3.6/site-packages (from google-api-core<2.0.0dev,>=1.16.0->google-cloud-core<2.0dev,>=1.2.0->google-cloud-storage->smart-open>=1.8.1->gensim->pycaret) (3.11.3)\r\nRequirement already satisfied: googleapis-common-protos<2.0dev,>=1.6.0 in /opt/conda/lib/python3.6/site-packages (from google-api-core<2.0.0dev,>=1.16.0->google-cloud-core<2.0dev,>=1.2.0->google-cloud-storage->smart-open>=1.8.1->gensim->pycaret) (1.51.0)\r\nBuilding wheels for collected packages: shap, pyod, pandas-profiling, cufflinks, combo, suod\r\n Building wheel for shap (setup.py) ... \u001b[?25l-\b \b\\\b \b|\b \b/\b \b-\b \b\\\b \bdone\r\n\u001b[?25h Created wheel for shap: filename=shap-0.32.1-cp36-cp36m-linux_x86_64.whl size=378425 sha256=afe3b375635572b865b88298a8378dea0807370234581aa3230194a07f323b6f\r\n Stored in directory: /root/.cache/pip/wheels/a7/ff/03/886798ca6fff0eb242a9a2d4b61ab1a1523eda4204632d811b\r\n Building wheel for pyod (setup.py) ... \u001b[?25l-\b \b\\\b \bdone\r\n\u001b[?25h Created wheel for pyod: filename=pyod-0.7.8.2-py3-none-any.whl size=104995 sha256=165687dd729586acf48dc43f70703e5975944b028f1e631df8c76f226bef93d6\r\n Stored in directory: /root/.cache/pip/wheels/10/fe/00/43c92b20898d143981b2c5af301b279efad92768151812a8ac\r\n Building wheel for pandas-profiling (setup.py) ... \u001b[?25l-\b \b\\\b \b|\b \bdone\r\n\u001b[?25h Created wheel for pandas-profiling: filename=pandas_profiling-2.3.0-py2.py3-none-any.whl size=145035 sha256=0525f4c72cccfed98e63c003a5a5dfbd83e749e6aaf6ba9efecf51116cd3937e\r\n Stored in directory: /root/.cache/pip/wheels/45/1e/db/6e3d57f26a78ce7ecc352822a6700e047746bdd79c9680c77d\r\n Building wheel for cufflinks (setup.py) ... \u001b[?25l-\b \b\\\b \bdone\r\n\u001b[?25h Created wheel for cufflinks: filename=cufflinks-0.17.0-py3-none-any.whl size=67743 sha256=bbf2837acdcad26825df7dc83b6982eeae577ac1e261f0d3d186108854fae1dc\r\n Stored in directory: /root/.cache/pip/wheels/68/60/9c/e07fd834e76cdfa10d9f0b4facd1656a4d50eee7f62e682190\r\n Building wheel for combo (setup.py) ... \u001b[?25l-\b \b\\\b \bdone\r\n\u001b[?25h Created wheel for combo: filename=combo-0.1.0-py3-none-any.whl size=42043 sha256=c5e59d25266273499492d72c4c9ce5507b4912d0399802b7563001a726b16dec\r\n Stored in directory: /root/.cache/pip/wheels/be/46/35/bbbd75ccee1dce45c6a0e8ba2e5c0532716f131ed3b59829c7\r\n Building wheel for suod (setup.py) ... \u001b[?25l-\b \b\\\b \b|\b \b/\b \bdone\r\n\u001b[?25h Created wheel for suod: filename=suod-0.0.4-py3-none-any.whl size=2167157 sha256=a6cd7ef4bff8a64c4898e32f0392366ebad43f3d826242150b9067a28c5377f4\r\n Stored in directory: /root/.cache/pip/wheels/21/44/a8/ba508e4a93b9554e081c88cd120b027b28a10bef454ca07b5d\r\nSuccessfully built shap pyod pandas-profiling cufflinks combo suod\r\n\u001b[31mERROR: kmeans-smote 0.1.2 has requirement imbalanced-learn<0.5,>=0.4.0, but you'll have imbalanced-learn 0.6.2 which is incompatible.\u001b[0m\r\n\u001b[31mERROR: kmeans-smote 0.1.2 has requirement numpy<1.16,>=1.13, but you'll have numpy 1.18.2 which is incompatible.\u001b[0m\r\n\u001b[31mERROR: kmeans-smote 0.1.2 has requirement scikit-learn<0.21,>=0.19.0, but you'll have scikit-learn 0.22 which is incompatible.\u001b[0m\r\n\u001b[31mERROR: hypertools 0.6.2 has requirement scikit-learn<0.22,>=0.19.1, but you'll have scikit-learn 0.22 which is incompatible.\u001b[0m\r\n\u001b[31mERROR: cesium 0.9.12 has requirement scikit-learn>=0.22.1, but you'll have scikit-learn 0.22 which is incompatible.\u001b[0m\r\n\u001b[31mERROR: allennlp 0.9.0 has requirement spacy<2.2,>=2.1.0, but you'll have spacy 2.2.3 which is incompatible.\u001b[0m\r\nInstalling collected packages: scikit-learn, shap, kmodes, zope.interface, datetime, plotly, catboost, combo, suod, pyod, yellowbrick, datefinder, pandas-profiling, rsa, botocore, awscli, chart-studio, cufflinks, xgboost, pycaret\r\n Attempting uninstall: scikit-learn\r\n Found existing installation: scikit-learn 0.22.2.post1\r\n Uninstalling scikit-learn-0.22.2.post1:\r\n Successfully uninstalled scikit-learn-0.22.2.post1\r\n Attempting uninstall: shap\r\n Found existing installation: shap 0.35.0\r\n Uninstalling shap-0.35.0:\r\n Successfully uninstalled shap-0.35.0\r\n Attempting uninstall: kmodes\r\n Found existing installation: kmodes 0.10.2\r\n Uninstalling kmodes-0.10.2:\r\n Successfully uninstalled kmodes-0.10.2\r\n Attempting uninstall: plotly\r\n Found existing installation: plotly 4.5.4\r\n Uninstalling plotly-4.5.4:\r\n Successfully uninstalled plotly-4.5.4\r\n Attempting uninstall: catboost\r\n Found existing installation: catboost 0.22\r\n Uninstalling catboost-0.22:\r\n Successfully uninstalled catboost-0.22\r\n Attempting uninstall: yellowbrick\r\n Found existing installation: yellowbrick 1.1\r\n Uninstalling yellowbrick-1.1:\r\n Successfully uninstalled yellowbrick-1.1\r\n Attempting uninstall: pandas-profiling\r\n Found existing installation: pandas-profiling 2.5.0\r\n Uninstalling pandas-profiling-2.5.0:\r\n Successfully uninstalled pandas-profiling-2.5.0\r\n Attempting uninstall: rsa\r\n Found existing installation: rsa 4.0\r\n Uninstalling rsa-4.0:\r\n Successfully uninstalled rsa-4.0\r\n Attempting uninstall: botocore\r\n Found existing installation: botocore 1.15.32\r\n Uninstalling botocore-1.15.32:\r\n Successfully uninstalled botocore-1.15.32\r\n Attempting uninstall: cufflinks\r\n Found existing installation: cufflinks 0.17.3\r\n Uninstalling cufflinks-0.17.3:\r\n Successfully uninstalled cufflinks-0.17.3\r\n Attempting uninstall: xgboost\r\n Found existing installation: xgboost 1.0.2\r\n Uninstalling xgboost-1.0.2:\r\n Successfully uninstalled xgboost-1.0.2\r\nSuccessfully installed awscli-1.18.46 botocore-1.15.46 catboost-0.20.2 chart-studio-1.1.0 combo-0.1.0 cufflinks-0.17.0 datefinder-0.7.0 datetime-4.3 kmodes-0.10.1 pandas-profiling-2.3.0 plotly-4.4.1 pycaret-1.0.0 pyod-0.7.8.2 rsa-3.4.2 scikit-learn-0.22 shap-0.32.1 suod-0.0.4 xgboost-0.90 yellowbrick-1.0.1 zope.interface-5.1.0\r\n" ] ], [ [ "# Part 1 Classification\n\n![](https://www.sciencealert.com/images/articles/processed/titanic-1_1024.jpg)", "_____no_output_____" ], [ "# We start by loading the libraries", "_____no_output_____" ] ], [ [ "import numpy as np \nimport pandas as pd ", "_____no_output_____" ] ], [ [ "# Read our files", "_____no_output_____" ] ], [ [ "train = pd.read_csv('../input/titanic/train.csv')\ntest = pd.read_csv('../input/titanic/test.csv')\nsub = pd.read_csv('../input/titanic/gender_submission.csv')", "_____no_output_____" ] ], [ [ "# Import whole classification", "_____no_output_____" ] ], [ [ "from pycaret.classification import *", "_____no_output_____" ] ], [ [ "# let's see what we're dealing with", "_____no_output_____" ] ], [ [ "train.head()", "_____no_output_____" ], [ "train.info()", "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 891 entries, 0 to 890\nData columns (total 12 columns):\nPassengerId 891 non-null int64\nSurvived 891 non-null int64\nPclass 891 non-null int64\nName 891 non-null object\nSex 891 non-null object\nAge 714 non-null float64\nSibSp 891 non-null int64\nParch 891 non-null int64\nTicket 891 non-null object\nFare 891 non-null float64\nCabin 204 non-null object\nEmbarked 889 non-null object\ndtypes: float64(2), int64(5), object(5)\nmemory usage: 83.7+ KB\n" ] ], [ [ "# Set up our dataset (preprocessing)", "_____no_output_____" ] ], [ [ "clf1 = setup(data = train, \n target = 'Survived',\n numeric_imputation = 'mean',\n categorical_features = ['Sex','Embarked'], \n ignore_features = ['Name','Ticket','Cabin'],\n silent = True)\n\n#quite intuitive isn't it ?", " \nSetup Succesfully Completed!\n" ] ], [ [ "# Compare the models", "_____no_output_____" ] ], [ [ "compare_models()", "_____no_output_____" ] ], [ [ "# let's create a Light GBM Model", "_____no_output_____" ] ], [ [ "lgbm = create_model('lightgbm') ", "_____no_output_____" ] ], [ [ "# Let's tune it!", "_____no_output_____" ] ], [ [ "tuned_lightgbm = tune_model('lightgbm')", "_____no_output_____" ] ], [ [ "# Learning Curve", "_____no_output_____" ] ], [ [ "plot_model(estimator = tuned_lightgbm, plot = 'learning')", "_____no_output_____" ] ], [ [ "# AUC Curve", "_____no_output_____" ] ], [ [ "plot_model(estimator = tuned_lightgbm, plot = 'auc')", "_____no_output_____" ] ], [ [ "# Confusion Matrix", "_____no_output_____" ] ], [ [ "plot_model(estimator = tuned_lightgbm, plot = 'confusion_matrix')", "_____no_output_____" ] ], [ [ "# Feature Importance", "_____no_output_____" ] ], [ [ "plot_model(estimator = tuned_lightgbm, plot = 'feature')", "_____no_output_____" ] ], [ [ "# whole thing!", "_____no_output_____" ] ], [ [ "evaluate_model(tuned_lightgbm)", "_____no_output_____" ] ], [ [ "# Interpretation", "_____no_output_____" ] ], [ [ "interpret_model(tuned_lightgbm)", "_____no_output_____" ] ], [ [ "# Predictions", "_____no_output_____" ] ], [ [ "predict_model(tuned_lightgbm, data=test)", "_____no_output_____" ], [ "predictions = predict_model(tuned_lightgbm, data=test)\npredictions.head()", "_____no_output_____" ], [ "sub['Survived'] = round(predictions['Score']).astype(int)\nsub.to_csv('submission.csv',index=False)\nsub.head()", "_____no_output_____" ] ], [ [ "# Extra: Blending made easy!", "_____no_output_____" ] ], [ [ "logr = create_model('lr'); \nxgb = create_model('xgboost'); \n\n#blending 3 models\nblend = blend_models(estimator_list=[tuned_lightgbm,logr,xgb])", "_____no_output_____" ] ], [ [ "# Part2 - Regression", "_____no_output_____" ], [ "![](https://encrypted-tbn0.gstatic.com/images?q=tbn%3AANd9GcSYeyNpaoAW-3rFX9-ORmiJ-uLAAswYBRhszs2QzllV7MCfFPvk&usqp=CAU)", "_____no_output_____" ], [ "# Import Whole Regression", "_____no_output_____" ] ], [ [ "from pycaret.regression import *", "_____no_output_____" ] ], [ [ "# let's see the data", "_____no_output_____" ] ], [ [ "train = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv')\ntest = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv')\nsample= pd.read_csv('../input/house-prices-advanced-regression-techniques/sample_submission.csv')", "_____no_output_____" ], [ "train.head()", "_____no_output_____" ], [ "train.info()", "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 1460 entries, 0 to 1459\nData columns (total 81 columns):\nId 1460 non-null int64\nMSSubClass 1460 non-null int64\nMSZoning 1460 non-null object\nLotFrontage 1201 non-null float64\nLotArea 1460 non-null int64\nStreet 1460 non-null object\nAlley 91 non-null object\nLotShape 1460 non-null object\nLandContour 1460 non-null object\nUtilities 1460 non-null object\nLotConfig 1460 non-null object\nLandSlope 1460 non-null object\nNeighborhood 1460 non-null object\nCondition1 1460 non-null object\nCondition2 1460 non-null object\nBldgType 1460 non-null object\nHouseStyle 1460 non-null object\nOverallQual 1460 non-null int64\nOverallCond 1460 non-null int64\nYearBuilt 1460 non-null int64\nYearRemodAdd 1460 non-null int64\nRoofStyle 1460 non-null object\nRoofMatl 1460 non-null object\nExterior1st 1460 non-null object\nExterior2nd 1460 non-null object\nMasVnrType 1452 non-null object\nMasVnrArea 1452 non-null float64\nExterQual 1460 non-null object\nExterCond 1460 non-null object\nFoundation 1460 non-null object\nBsmtQual 1423 non-null object\nBsmtCond 1423 non-null object\nBsmtExposure 1422 non-null object\nBsmtFinType1 1423 non-null object\nBsmtFinSF1 1460 non-null int64\nBsmtFinType2 1422 non-null object\nBsmtFinSF2 1460 non-null int64\nBsmtUnfSF 1460 non-null int64\nTotalBsmtSF 1460 non-null int64\nHeating 1460 non-null object\nHeatingQC 1460 non-null object\nCentralAir 1460 non-null object\nElectrical 1459 non-null object\n1stFlrSF 1460 non-null int64\n2ndFlrSF 1460 non-null int64\nLowQualFinSF 1460 non-null int64\nGrLivArea 1460 non-null int64\nBsmtFullBath 1460 non-null int64\nBsmtHalfBath 1460 non-null int64\nFullBath 1460 non-null int64\nHalfBath 1460 non-null int64\nBedroomAbvGr 1460 non-null int64\nKitchenAbvGr 1460 non-null int64\nKitchenQual 1460 non-null object\nTotRmsAbvGrd 1460 non-null int64\nFunctional 1460 non-null object\nFireplaces 1460 non-null int64\nFireplaceQu 770 non-null object\nGarageType 1379 non-null object\nGarageYrBlt 1379 non-null float64\nGarageFinish 1379 non-null object\nGarageCars 1460 non-null int64\nGarageArea 1460 non-null int64\nGarageQual 1379 non-null object\nGarageCond 1379 non-null object\nPavedDrive 1460 non-null object\nWoodDeckSF 1460 non-null int64\nOpenPorchSF 1460 non-null int64\nEnclosedPorch 1460 non-null int64\n3SsnPorch 1460 non-null int64\nScreenPorch 1460 non-null int64\nPoolArea 1460 non-null int64\nPoolQC 7 non-null object\nFence 281 non-null object\nMiscFeature 54 non-null object\nMiscVal 1460 non-null int64\nMoSold 1460 non-null int64\nYrSold 1460 non-null int64\nSaleType 1460 non-null object\nSaleCondition 1460 non-null object\nSalePrice 1460 non-null int64\ndtypes: float64(3), int64(35), object(43)\nmemory usage: 924.0+ KB\n" ] ], [ [ "# Set up our dataset (preprocessing)", "_____no_output_____" ] ], [ [ "reg = setup(data = train, \n target = 'SalePrice',\n numeric_imputation = 'mean',\n categorical_features = ['MSZoning','Exterior1st','Exterior2nd','KitchenQual','Functional','SaleType',\n 'Street','LotShape','LandContour','LotConfig','LandSlope','Neighborhood', \n 'Condition1','Condition2','BldgType','HouseStyle','RoofStyle','RoofMatl', \n 'MasVnrType','ExterQual','ExterCond','Foundation','BsmtQual','BsmtCond', \n 'BsmtExposure','BsmtFinType1','BsmtFinType2','Heating','HeatingQC','CentralAir', \n 'Electrical','GarageType','GarageFinish','GarageQual','GarageCond','PavedDrive',\n 'SaleCondition'] , \n ignore_features = ['Alley','PoolQC','MiscFeature','Fence','FireplaceQu','Utilities'],\n normalize = True,\n silent = True)", " \nSetup Succesfully Completed!\n" ] ], [ [ "# let's compare different regression models!", "_____no_output_____" ] ], [ [ "compare_models()", "_____no_output_____" ] ], [ [ "# let's do CatBoost", "_____no_output_____" ] ], [ [ "cb = create_model('catboost')", "_____no_output_____" ] ], [ [ "# gotta tune it", "_____no_output_____" ] ], [ [ "tuned_cb = tune_model('catboost')", "_____no_output_____" ] ], [ [ "# SHAP Values (impact on model output)", "_____no_output_____" ] ], [ [ "interpret_model(tuned_cb)", "_____no_output_____" ], [ "predictions = predict_model(tuned_cb, data = test)\nsample['SalePrice'] = predictions['Label']\nsample.to_csv('submission_house_price.csv',index=False)\nsample.head()", "_____no_output_____" ] ], [ [ "# thank you very much for checking my notebook!", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ] ]
cb39d503863fca36cc067c1a19005e69706295d6
9,125
ipynb
Jupyter Notebook
Question_Generation/Question_Generator(T5-base).ipynb
jefferyhe168/Undergraduate-Independent-Study_Special-Research
136179ff4824b53955b2ca1a5f692bc37be6910d
[ "MIT" ]
null
null
null
Question_Generation/Question_Generator(T5-base).ipynb
jefferyhe168/Undergraduate-Independent-Study_Special-Research
136179ff4824b53955b2ca1a5f692bc37be6910d
[ "MIT" ]
null
null
null
Question_Generation/Question_Generator(T5-base).ipynb
jefferyhe168/Undergraduate-Independent-Study_Special-Research
136179ff4824b53955b2ca1a5f692bc37be6910d
[ "MIT" ]
null
null
null
9,125
9,125
0.708603
[ [ [ "!pip install transformers\n!pip install sentencepiece", "Collecting transformers\n Downloading transformers-4.9.1-py3-none-any.whl (2.6 MB)\n\u001b[K |████████████████████████████████| 2.6 MB 11.3 MB/s \n\u001b[?25hRequirement already satisfied: filelock in /usr/local/lib/python3.7/dist-packages (from transformers) (3.0.12)\nCollecting sacremoses\n Downloading sacremoses-0.0.45-py3-none-any.whl (895 kB)\n\u001b[K |████████████████████████████████| 895 kB 59.3 MB/s \n\u001b[?25hCollecting pyyaml>=5.1\n Downloading PyYAML-5.4.1-cp37-cp37m-manylinux1_x86_64.whl (636 kB)\n\u001b[K |████████████████████████████████| 636 kB 61.3 MB/s \n\u001b[?25hRequirement already satisfied: regex!=2019.12.17 in /usr/local/lib/python3.7/dist-packages (from transformers) (2019.12.20)\nRequirement already satisfied: importlib-metadata in /usr/local/lib/python3.7/dist-packages (from transformers) (4.6.1)\nRequirement already satisfied: requests in /usr/local/lib/python3.7/dist-packages (from transformers) (2.23.0)\nRequirement already satisfied: tqdm>=4.27 in /usr/local/lib/python3.7/dist-packages (from transformers) (4.41.1)\nCollecting tokenizers<0.11,>=0.10.1\n Downloading tokenizers-0.10.3-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl (3.3 MB)\n\u001b[K |████████████████████████████████| 3.3 MB 61.4 MB/s \n\u001b[?25hRequirement already satisfied: packaging in /usr/local/lib/python3.7/dist-packages (from transformers) (21.0)\nCollecting huggingface-hub==0.0.12\n Downloading huggingface_hub-0.0.12-py3-none-any.whl (37 kB)\nRequirement already satisfied: numpy>=1.17 in /usr/local/lib/python3.7/dist-packages (from transformers) (1.19.5)\nRequirement already satisfied: typing-extensions in /usr/local/lib/python3.7/dist-packages (from huggingface-hub==0.0.12->transformers) (3.7.4.3)\nRequirement already satisfied: pyparsing>=2.0.2 in /usr/local/lib/python3.7/dist-packages (from packaging->transformers) (2.4.7)\nRequirement already satisfied: zipp>=0.5 in /usr/local/lib/python3.7/dist-packages (from importlib-metadata->transformers) (3.5.0)\nRequirement already satisfied: urllib3!=1.25.0,!=1.25.1,<1.26,>=1.21.1 in /usr/local/lib/python3.7/dist-packages (from requests->transformers) (1.24.3)\nRequirement already satisfied: idna<3,>=2.5 in /usr/local/lib/python3.7/dist-packages (from requests->transformers) (2.10)\nRequirement already satisfied: chardet<4,>=3.0.2 in /usr/local/lib/python3.7/dist-packages (from requests->transformers) (3.0.4)\nRequirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.7/dist-packages (from requests->transformers) (2021.5.30)\nRequirement already satisfied: six in /usr/local/lib/python3.7/dist-packages (from sacremoses->transformers) (1.15.0)\nRequirement already satisfied: joblib in /usr/local/lib/python3.7/dist-packages (from sacremoses->transformers) (1.0.1)\nRequirement already satisfied: click in /usr/local/lib/python3.7/dist-packages (from sacremoses->transformers) (7.1.2)\nInstalling collected packages: tokenizers, sacremoses, pyyaml, huggingface-hub, transformers\n Attempting uninstall: pyyaml\n Found existing installation: PyYAML 3.13\n Uninstalling PyYAML-3.13:\n Successfully uninstalled PyYAML-3.13\nSuccessfully installed huggingface-hub-0.0.12 pyyaml-5.4.1 sacremoses-0.0.45 tokenizers-0.10.3 transformers-4.9.1\nCollecting sentencepiece\n Downloading sentencepiece-0.1.96-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (1.2 MB)\n\u001b[K |████████████████████████████████| 1.2 MB 13.6 MB/s \n\u001b[?25hInstalling collected packages: sentencepiece\nSuccessfully installed sentencepiece-0.1.96\n" ], [ "!git clone https://github.com/amontgomerie/question_generator", "Cloning into 'question_generator'...\nremote: Enumerating objects: 199, done.\u001b[K\nremote: Counting objects: 100% (87/87), done.\u001b[K\nremote: Compressing objects: 100% (77/77), done.\u001b[K\nremote: Total 199 (delta 45), reused 24 (delta 9), pack-reused 112\u001b[K\nReceiving objects: 100% (199/199), 101.67 KiB | 9.24 MiB/s, done.\nResolving deltas: 100% (100/100), done.\n" ], [ "# 執行Question Generator\n!python 'question_generator/run_qg.py' --text_dir '/content/testing.txt'", "2021-07-28 15:10:57.296764: I tensorflow/stream_executor/platform/default/dso_loader.cc:53] Successfully opened dynamic library libcudart.so.11.0\nDownloading: 100% 25.0/25.0 [00:00<00:00, 23.5kB/s]\nDownloading: 100% 1.21k/1.21k [00:00<00:00, 1.23MB/s]\nDownloading: 100% 792k/792k [00:00<00:00, 3.24MB/s]\nDownloading: 100% 39.0/39.0 [00:00<00:00, 36.0kB/s]\nDownloading: 100% 121/121 [00:00<00:00, 95.9kB/s]\nDownloading: 100% 892M/892M [00:23<00:00, 38.5MB/s]\nDownloading: 100% 49.0/49.0 [00:00<00:00, 47.8kB/s]\nDownloading: 100% 482/482 [00:00<00:00, 447kB/s]\nDownloading: 100% 213k/213k [00:00<00:00, 1.33MB/s]\nDownloading: 100% 112/112 [00:00<00:00, 107kB/s]\nDownloading: 100% 433M/433M [00:15<00:00, 28.0MB/s]\nGenerating questions...\n\n/usr/local/lib/python3.7/dist-packages/transformers/models/t5/tokenization_t5.py:191: UserWarning: This sequence already has </s>. In future versions this behavior may lead to duplicated eos tokens being added.\n f\"This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated eos tokens being added.\"\nEvaluating QA pairs...\n\n1) Q: What are the most common tasks that are taught by natural language models?\n A: Natural language processing tasks, such as question answering, machine translation, reading comprehension, and summarization, are typically approached with supervised learning on taskspecific datasets. \n\n2) Q: What is the largest model that achieves state of the art results on 7 out of 8 tested?\n A: 5B parameter Transformer that achieves state of the art results on 7 out of 8 tested language modeling datasets in a zero shot setting but still underfits WebText. \n\n3) Q: What is the capacity of the language model?\n A: The capacity of the language model is essential to the success of zero shot task transfer and increasing it improves performance in a log linear fashion across tasks. \n\n4) Q: What are the improvements in the model?\n A: Samples from the model reflect these improvements and contain coherent paragraphs of text. \n\n5) Q: How does the language model learn to perform tasks?\n A: When conditioned on a document plus questions, the answers generated by the language model reach 55 F1 on the CoQA dataset matching or exceeding the performance of 3 out of 4 baseline systems without using the 127,000+ training examples. \n\n6) Q: What are the findings of this paper?\n A: These findings suggest a promising path towards building language processing systems which learn to perform tasks from their naturally occurring demonstrations. \n\n7) Q: What is the largest model?\n A: Our largest model, GPT2, is a 1. \n\n8) Q: How many languages are used in the CoQA dataset?\n A: 1. 5B \n 2. 3 (correct)\n 3. 8 \n 4. 4 \n\n9) Q: how many parameters can be used to model a language?\n A: 1. millions \n 2. 3 \n 3. 5B \n 4. 8 (correct)\n\n10) Q: How many languages are conditioned on a document plus questions?\n A: 1. 7 \n 2. 4 (correct)\n 3. 127,000 \n 4. zero \n\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code" ] ]
cb39de037e5c009bf257462b0354c7760b9355dd
11,771
ipynb
Jupyter Notebook
Beginner_1/2. Syntax.ipynb
maxcarolino/TrainingNotebooks
78386d33f60966a9258b2367909ffc753c4176f7
[ "Apache-2.0" ]
null
null
null
Beginner_1/2. Syntax.ipynb
maxcarolino/TrainingNotebooks
78386d33f60966a9258b2367909ffc753c4176f7
[ "Apache-2.0" ]
null
null
null
Beginner_1/2. Syntax.ipynb
maxcarolino/TrainingNotebooks
78386d33f60966a9258b2367909ffc753c4176f7
[ "Apache-2.0" ]
null
null
null
26.571106
1,101
0.535044
[ [ [ "# 2. Coding Style", "_____no_output_____" ], [ "## 2.1 Whitespace\n\nIn Python, whitespace is used to structure code. Whitespace is important, so you have to be careful with how you use it.\n\nI'll be showing some examples, but don't worry about the code just yet. I just want you to know the use of whitespaces. We'll tackle more about the codes you'll see below a little later.", "_____no_output_____" ], [ "##### Avoid whitespaces in the following situations:\n\n- Immediately inside the parentheses, brackets, or braces", "_____no_output_____" ] ], [ [ "# Yes:\nspam(ham[1], {eggs: 2})\n\n# No:\nspam( ham[ 1 ], { eggs: 2 } )", "_____no_output_____" ] ], [ [ "- Immediately before a comma, semicolon, or colon:", "_____no_output_____" ] ], [ [ "# Yes:\nif x == 4: print x, y; x, y = y, x\n\n# No:\nif x == 4 : print x , y ; x , y = y , x", "_____no_output_____" ] ], [ [ "- Immediately before the open parenthesis that starts an indexing or slicing. ", "_____no_output_____" ] ], [ [ "# Yes:\ndct['key'] = lst[index]\n\n# No:\ndct ['key'] = lst [index]", "_____no_output_____" ] ], [ [ "- More than once space around an assignment(or other) operator to align it with another.", "_____no_output_____" ] ], [ [ "x = 1\ny = 2\nlong_variable = 3", "_____no_output_____" ], [ "x = 1\ny = 2\nlong_variable = 3", "_____no_output_____" ] ], [ [ "For other rules, please click [here](https://docs.python.org/3/reference/lexical_analysis.html)", "_____no_output_____" ], [ "## 2.2 Indentation\nPython provides no braces to indicate blocks of code for class and function definitions or flow control. Python programs get structured through indentation, i.e. code blocks are defined by their indentation. \n\nFor example:", "_____no_output_____" ] ], [ [ "if True:\n print(\"True\")\nelse:\n print(\"False\")", "True\n" ] ], [ [ "<b> Note:</b> Use 4 spaces per indentation level.\n\nSo what would happen if the indentation is incorrect?\nRun the code below to find out!", "_____no_output_____" ] ], [ [ "def food():\neggs = 12\nreturn eggs\n \nprint spam()", "_____no_output_____" ] ], [ [ "<b>```IndentationError: expected an indented block```</b>\n\nWe will get this error everytime our indentation is off. \n\nMake sure to indent the codes with four spaces like this:", "_____no_output_____" ] ], [ [ "def food():\n eggs = 12\n return eggs\n \nprint(food())", "_____no_output_____" ] ], [ [ " ", "_____no_output_____" ], [ "## 2.3 Comments\n\nComments in Python are used to explain what the code does. \n\n### 2.3.1 Single-line comments\nSingle-line comments begin with the hash character (<b> ```#```</b> ) and are terminated by\nthe end of line. Python is ignoring all text that comes after the # to the end of the line, \nthey are not part of the command. For example:", "_____no_output_____" ] ], [ [ "# This is a single line comment", "_____no_output_____" ] ], [ [ "### 2.3.2 Multi-line comments\n\nComments spanning more than one line are achieved by inserting a multi-line string\n(with <b>```\"\"\"```</b> or <b>```'''```</b> as the delimiter one each end) that is not used in assignment or \notherwise evaluated, but sits in between other statements.\n\nThey are meant as documentation for anyone reading the code. ", "_____no_output_____" ] ], [ [ "'''\nThis \nis \na\nmulti-line \ncomment\n'''\n\n\"\"\"\nThis is also \na multi-line\ncomment\n\"\"\"", "_____no_output_____" ] ], [ [ " ", "_____no_output_____" ], [ "## 2.4 Single-line Statements\n\nFrom the term itself, these are statements within a single line.\n\nFor example:", "_____no_output_____" ] ], [ [ "my_list = ['item1', 'item2', 'item3']", "_____no_output_____" ] ], [ [ " \n \n \n \n \n \n \n ", "_____no_output_____" ], [ "## 2.5 Multi-line Statements\n\nStatements in Python typically end with a new line. Python does, however, allow the use of the line continuation character (\\) to denote that the line should continue. \n\nFor example:", "_____no_output_____" ] ], [ [ "total = item_one + \\\n item_two + \\\n item_three", "_____no_output_____" ] ], [ [ "\nStatements contained within the ```[ ]```, ```{ }```, or ```( )``` do not need to use the line continuation character.\n\nFor example:", "_____no_output_____" ] ], [ [ "my_list = [\n 'item1', \n 'item2',\n]", "_____no_output_____" ] ], [ [ "## 2.6 PEP 8", "_____no_output_____" ], [ "PEP 8 is the official style guide for Python.\n\nFor the PEP 8 official documentation, please check this [link](https://www.python.org/dev/peps/pep-0008/)\n", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ] ]
cb39e4e616e9b5d0c6424066222d823cbbc36a29
303,060
ipynb
Jupyter Notebook
Boxplots_Interpolation.ipynb
EDSEL-skoltech/maxvol_sampling
6fe81f52115b8a8890210d7b549d2abef2d9800a
[ "MIT" ]
2
2021-03-12T16:13:49.000Z
2021-09-20T18:08:17.000Z
Boxplots_Interpolation.ipynb
EDSEL-skoltech/maxvol_sampling
6fe81f52115b8a8890210d7b549d2abef2d9800a
[ "MIT" ]
1
2022-02-02T15:57:16.000Z
2022-02-02T15:57:16.000Z
Boxplots_Interpolation.ipynb
EDSEL-skoltech/maxvol_sampling
6fe81f52115b8a8890210d7b549d2abef2d9800a
[ "MIT" ]
1
2021-09-16T15:34:22.000Z
2021-09-16T15:34:22.000Z
550.018149
31,824
0.940028
[ [ [ "## Boxplot plots\n_______\ntg: @misha_grol and [email protected]\n\n\nBoxplots for features based on DEM and NDVI", "_____no_output_____" ] ], [ [ "# Uncomment for Google colab\n# !pip install maxvolpy\n\n# !pip install clhs\n\n# !git clone https://github.com/EDSEL-skoltech/maxvol_sampling\n \n# %cd maxvol_sampling/", "_____no_output_____" ], [ "import csv\nimport seaborn as sns\nimport argparse\nimport numpy as np\nimport osgeo.gdal as gdal\nimport os\nimport pandas as pd\nimport matplotlib.cm as cm\nimport matplotlib.pyplot as plt\nfrom numpy import genfromtxt\nimport gdal\nimport xarray as xr\nimport clhs as cl\n\n\nfrom scipy.spatial import ConvexHull, convex_hull_plot_2d\nfrom scipy.spatial import voronoi_plot_2d, Voronoi\nfrom scipy.spatial import distance\nfrom scipy.stats import entropy\nfrom scipy.special import kl_div\nfrom scipy.stats import ks_2samp\nfrom scipy.stats import wasserstein_distance\n\n\n%matplotlib inline\nfrom src.util import MaxVolSampling", "_____no_output_____" ], [ "# Uncoment \"Times New Roman\" and \"science\" stule plt if you have it\n# plt.rcParams[\"font.family\"] = \"Times New Roman\"\n\nplt.rcParams.update({'font.size': 16})\n#use science style for plots\n# plt.style.use(['science', 'grid'])\n\nplt.rcParams['xtick.labelsize'] = 15\n\nplt.rcParams['ytick.labelsize'] = 20\n", "_____no_output_____" ] ], [ [ "## Interpolation plots", "_____no_output_____" ] ], [ [ "import matplotlib.pyplot as plt\nfrom matplotlib import gridspec\nfrom tqdm.notebook import tqdm\nfrom scipy.stats import ks_2samp\n\n\ndict_for_dict_wasserstein = {}\ncsv_file_to_process = './src/data_v0.csv'\ndf_name = list(pd.read_csv(csv_file_to_process, sep=',').columns)\nsoil_parameters = df_name\npath_to_inter_npy_files = './experiments/cLHS_10_000/Interpolation_data/'\n\nnp.random.seed(42)\nunits = ['Soil moisture 10 cm, %','Soil moisture 30 cm, %','Soil moisture 80 cm, %','Mean crop yield, c/ha', 'Penetration resistance 10 cm, kPa','Penetration resistance 30 cm, kPa','Penetration resistance 80 cm, kPa', 'Soil Temperature 10 cm, °C','Soil Temperature 30 cm, °C','Soil Temperature 80 cm, °C']\n\ninterpolation_files = sorted(os.listdir('./experiments/cLHS_10_000/Interpolation_data/'))\n\npath = './experiments/cLHS_million_steps'\n\nfor index, file in enumerate(interpolation_files):\n \n list_to_test_zeros = []\n print('Parameter:', file)\n\n df_for_plots = pd.DataFrame(columns=['Sampling', 'Points', 'Value'])\n\n\n dict_for_parameter = {'MAXVOL':{},\n 'cLHS':{}, \n 'Random':{}}\n\n dict_for_wasserstein = {'MAXVOL':{},\n 'cLHS':{}, \n 'Random':{}}\n\n dict_for_plots = {'MAXVOL':{},\n 'cLHS':{}, \n 'Random':{}}\n\n number_of_points = [10,15,20,25,30]\n\n\n\n from itertools import compress\n list_of_cLHS_million_runs = sorted(os.listdir('./experiments/cLHS_million_steps'))\n selection = ['NDVI' in name for name in list_of_cLHS_million_runs] \n cLHS_points_files = list(compress(list_of_cLHS_million_runs, selection))\n for num_points, csv_file in zip(number_of_points, cLHS_points_files):\n \n dict_for_parameter['cLHS'][num_points] = np.genfromtxt(os.path.join(path, csv_file),delimiter=',', dtype=int)\n SAR = MaxVolSampling()\n SAR.soil_feature = soil_parameters[index]\n SAR.num_of_points = num_points\n SAR.soil_data = pd.read_csv('./src/data_v0.csv', sep=',')\n SAR.path_to_file_with_indices = None\n SAR.wd = './DEM_files/'\n \n SAR.path_to_interpolation_file = os.path.join(path_to_inter_npy_files, file)\n \n \n\n _ =SAR.data_preparation(SAR.wd, data_m=3, dem_dir = None)\n SAR.original_soil_data(SAR.soil_feature)\n\n #data from interpolation\n interpolation_map = SAR.interpolation_array\n #Points selection by MAXVOL\n MAXVOL = interpolation_map[SAR.i_am_maxvol_function()]\n \n print\n \n \n for value in MAXVOL:\n df_for_plots.loc[len(df_for_plots)]=['MAXVOL', num_points, value] \n \n \n cLHS = interpolation_map[dict_for_parameter['cLHS'][num_points]]\n for value in cLHS:\n df_for_plots.loc[len(df_for_plots)]=['cLHS', num_points, value]\n \n\n \n\n RANDOM = interpolation_map[SAR.i_am_random()]\n\n for value in RANDOM:\n df_for_plots.loc[len(df_for_plots)]=['Random', num_points, value]\n\n\n #original distribution\n \n df_original = pd.DataFrame(data={'Points':[51]*len(SAR.original_data), 'Value':SAR.original_data})\n\n fig = plt.figure(figsize=(18,18))\n\n gs = gridspec.GridSpec(4, 5, wspace=.25)\n\n\n ax_1 = fig.add_subplot(gs[:,:4])\n ax_2 = fig.add_subplot(gs[:,4])\n\n sns.boxplot(ax = ax_1, x=\"Points\", y=\"Value\",\n hue=\"Sampling\", palette=[\"#1F77B4\", \"#2CA02C\", \"#FF7F0E\"],\n data=df_for_plots, width=0.8)\n\n sns.boxplot(ax = ax_2, x='Points', y=\"Value\", palette=[\"#CCCCCC\"],\n data=df_original, width=0.25)\n\n\n fig.set_figwidth(16)\n fig.set_figheight(7)\n\n ax_2.set_xticklabels([])\n ax_2.set_ylabel('')\n ax_2.set_xlabel('')\n ax_2.grid(True)\n \n ax_1.set_xticklabels([])\n ax_1.set_xlabel('')\n ax_1.set_ylabel(units[index], fontsize = 17)\n ax_1.axhline(np.quantile(SAR.original_data, 0.25), color='grey', linestyle='--',zorder=0)\n ax_1.axhline(np.quantile(SAR.original_data, 0.50), color='grey', linestyle='--',zorder=0)\n ax_1.axhline(np.quantile(SAR.original_data, 0.75), color='grey', linestyle='--',zorder=0)\n ax_1.get_shared_y_axes().join(ax_1, ax_2)\n ax_1.get_legend().remove()\n ax_1.grid(True)\n ax_2.set_yticklabels([])\n# plt.savefig('../plots/agricultural_systems_plots/boxplots_interpolation/'+str(soil_parameters[index])+'boxplot.svg')\n# plt.savefig('../plots/agricultural_systems_plots/boxplots_interpolation/'+str(soil_parameters[index])+'boxplot.png', dpi=300)\n plt.show()\n\n# break\n", "Parameter: Moisture_perc_10.npy\n" ] ], [ [ "## Plots of Wasserstein distance evolution ", "_____no_output_____" ] ], [ [ "fig, ((ax0, ax1), (ax2, ax3), (ax4, ax5), (ax6, ax7),(ax8, ax9)) = plt.subplots(nrows=5, ncols=2, sharex=True,figsize=(18, 25))\n\nnames_for_plots = ['Soil moisture 10 cm, %','Soil moisture 30 cm, %',\n 'Soil moisture 80 cm, %','Mean crop yield, c/ha',\n 'Penetration resistance 10 cm, kPa','Penetration resistance 30 cm, kPa',\n 'Penetration resistance 80 cm, kPa', 'Soil Temperature 10 cm, °C',\n 'Soil Temperature 30 cm, °C','Soil Temperature 80 cm, °C']\npath = './experiments/cLHS_10_000/exp_fem_poins/npy_files/'\nfiles_with_points = os.listdir(path)\n\nrange_files_allocation=[]\nfor file in files_with_points:\n range_files_allocation.append(np.load(os.path.join(path,file), allow_pickle=True)[None])\n res = np.load(os.path.join(path,file), allow_pickle=True)\n\ndict_for_indices = {'MAXVOL':[], 'cLHS':[], 'Random':[]}\nfrom collections import ChainMap\nfor sampling in [*range_files_allocation[0][0].keys()]:\n loc_list = [dict(loc_dict[0][sampling]) for loc_dict in range_files_allocation]\n dict_for_indices[sampling] = dict(ChainMap(*loc_list))\nn = 0\nnumber_of_points = range(7,31)\ncsv_file_to_process = './src/data_v0.csv'\n\nfor row in ((ax0, ax1), (ax2, ax3), (ax4, ax5), (ax6, ax7),(ax8, ax9)):\n for col in row:\n \n # COMPUTE WASSERSTEIN DISTANCE\n \n df_name = list(pd.read_csv(csv_file_to_process, sep=',').columns)\n soil_parameters = df_name\n path_to_inter_npy_files = './experiments/cLHS_10_000/Interpolation_data/'\n np.random.seed(42)\n units = ['Soil moisture 10 cm, %','Soil moisture 30 cm, %','Soil moisture 80 cm, %','Mean crop yield, c/ha', 'Penetration resistance 10 cm, kPa','Penetration resistance 30 cm, kPa','Penetration resistance 80 cm, kPa', 'Soil Temperature 10 cm, °C','Soil Temperature 30 cm, °C','Soil Temperature 80 cm, °C']\n interpolation_files = sorted(os.listdir('./experiments/cLHS_10_000/Interpolation_data/'))\n print('Parameter:', interpolation_files[n])\n dict_for_plots = {'MAXVOL':{},\n 'cLHS':{}, \n 'Random':{}}\n dict_for_new_maxvol = {'MAXVOL_NEW': {}}\n for points in number_of_points:\n SAR = MaxVolSampling()\n SAR.soil_feature = soil_parameters[n]\n SAR.num_of_points = points\n SAR.soil_data = pd.read_csv(csv_file_to_process, sep=',')\n SAR.path_to_file_with_indices = None\n SAR.wd = './DEM_files//'\n\n SAR.path_to_interpolation_file = os.path.join(path_to_inter_npy_files, interpolation_files[n])\n _ =SAR.data_preparation(SAR.wd, data_m=3, dem_dir = None)\n SAR.original_soil_data(SAR.soil_feature)\n interpolation_map = SAR.interpolation_array[::-1]\n MAXVOL_ = interpolation_map[SAR.i_am_maxvol_function()]\n\n # List to iterate over 100 realization of cLHS and Random\n cLHS_ = [interpolation_map[dict_for_indices['cLHS'][points][i]] for i in range(100)]\n Random_ = [interpolation_map[dict_for_indices['Random'][points][i]] for i in range(100)]\n dict_for_plots['MAXVOL'][points] = wasserstein_distance(SAR.original_data, MAXVOL_)\n dict_for_plots['cLHS'][points] = [wasserstein_distance(SAR.original_data, mdt) for mdt in cLHS_]\n dict_for_plots['Random'][points] = [wasserstein_distance(SAR.original_data, mdt) for mdt in Random_]\n \n quantile_lower_random = np.array([np.quantile(dict_for_plots['Random'][i], .10) for i in number_of_points])\n quantile_upper_random = np.array([np.quantile(dict_for_plots['Random'][i], .90) for i in number_of_points])\n median_random = np.array([np.median(dict_for_plots['Random'][i]) for i in number_of_points])\n\n \n \n quantile_lower_cLHS = np.array([np.quantile(dict_for_plots['cLHS'][i], .10) for i in number_of_points])\n quantile_upper_cLHS = np.array([np.quantile(dict_for_plots['cLHS'][i], .90) for i in number_of_points])\n median_cLHS = np.array([np.median(dict_for_plots['cLHS'][i]) for i in number_of_points])\n \n col.plot(number_of_points, [*dict_for_plots['MAXVOL'].values()], '-.',label='Maxvol',linewidth=4,markersize=10 )\n col.plot(number_of_points, median_random, label='Random median',linewidth=3,markersize=10 )\n col.plot(number_of_points, median_cLHS,'--',label='cLHS median',linewidth=3,markersize=14)\n col.fill_between(number_of_points, quantile_lower_random, quantile_upper_random , alpha=0.1, color='orange', label='CI Random')\n col.fill_between(number_of_points, quantile_lower_cLHS, quantile_upper_cLHS , alpha=0.1, color='green', label='CI cLHS')\n \n col.set_xlim(min(number_of_points), max(number_of_points))\n# col.set_xticks(number_of_points)\n col.set_title(names_for_plots[n])\n col.grid(True)\n col.set(ylabel=\"Wasserstein distance\")\n if n==8 or n==9:\n \n col.set(xlabel=\"Number of points for sampling\", ylabel=\"Wasserstein distance\")\n\n\n# plt.show()\n n+=1\n# plt.legend()\n# plt.savefig('../plots/agricultural_systems_plots/plots_with_evolution_of_wassersterin/wasserstein_disctance_IQR.png', dpi=300)\n# plt.savefig('../plots/agricultural_systems_plots/plots_with_evolution_of_wassersterin/nwasserstein_disctance_IQR.svg') ", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
cb39ea66589500370a2d5795f9949c919d22c577
825,689
ipynb
Jupyter Notebook
.ipynb_checkpoints/Image_Seam_by_Jerry_Mei-checkpoint.ipynb
JerryLingjieMei/Image_Seam
78b98207224cda66a34369fe39c1aa1bc4b0890b
[ "MIT" ]
null
null
null
.ipynb_checkpoints/Image_Seam_by_Jerry_Mei-checkpoint.ipynb
JerryLingjieMei/Image_Seam
78b98207224cda66a34369fe39c1aa1bc4b0890b
[ "MIT" ]
null
null
null
.ipynb_checkpoints/Image_Seam_by_Jerry_Mei-checkpoint.ipynb
JerryLingjieMei/Image_Seam
78b98207224cda66a34369fe39c1aa1bc4b0890b
[ "MIT" ]
1
2019-06-30T05:26:53.000Z
2019-06-30T05:26:53.000Z
2,798.945763
817,226
0.961783
[ [ [ "empty" ] ] ]
[ "empty" ]
[ [ "empty" ] ]
cb3a08578da46d43ea02e8698d5770385c3887a6
50,921
ipynb
Jupyter Notebook
amz_reports/amz_ads_target.ipynb
dyslab/jnb-sample
38af701866b8496729d63f844e56137d125c4223
[ "MIT" ]
null
null
null
amz_reports/amz_ads_target.ipynb
dyslab/jnb-sample
38af701866b8496729d63f844e56137d125c4223
[ "MIT" ]
2
2020-03-24T17:57:15.000Z
2020-03-31T10:21:49.000Z
amz_reports/amz_ads_target.ipynb
dyslab/jnb-sample
38af701866b8496729d63f844e56137d125c4223
[ "MIT" ]
null
null
null
33.108583
178
0.328411
[ [ [ "Doc title: **Amazon Advertising Targeting Report**\n\nArticle notes: Data came from 'Reports/Advertising Reports/Sponsored Products/Targeting Report' @Amazon Seller Central.\n\n文章备注:亚马逊后台广告目标投放报告分析\n\nLast modified date: 2019-12-05 16:33:04 ", "_____no_output_____" ] ], [ [ "# 引入pandas数据分析模块\nimport pandas as pd\n\n# 数据范例:美国站,月度数据\nworkdf = pd.read_excel('data/amz_ads_target_us_201911.xlsx', usecols=['广告活动名称', '广告组名称', '匹配类型', '投放', '展现量', '点击量', '花费', '7天总订单数(#)', '7天总销售额(¥)', '7天总销售量(#)'])", "_____no_output_____" ] ], [ [ "# 广告组数据排序(以销量为取值标准)", "_____no_output_____" ] ], [ [ "# Manipulating data and column names.\nimpr_df = workdf.groupby(['广告活动名称', '广告组名称']).sum().sort_values(by=['7天总销售量(#)'], ascending=False).head(20)\nimpr_df['总销售额'] = impr_df['7天总销售额(¥)']\nimpr_df['总销售量'] = impr_df['7天总销售量(#)']\nimpr_df = impr_df[['展现量', '点击量', '花费', '总销售额', '总销售量']]\nimpr_df['点击率'] = round((impr_df['点击量'] / impr_df['展现量'] * 100), 2)\nimpr_df['卖出1件商品的平均花费'] = round(impr_df['花费'] / impr_df['总销售量'], 2)\n\nimpr_df.sort_values(by=['卖出1件商品的平均花费'])", "_____no_output_____" ] ], [ [ "此表解决的问题为:\n\n- 找出销量最高的广告组。\n\n- 找出最具性价比的广告组。(卖出1件商品花费越小,则性价比越高)", "_____no_output_____" ], [ "# 投放数据排序(以展现量为取值标准)", "_____no_output_____" ] ], [ [ "# Manipulating data and column names.\ntarget_df = workdf.groupby(['广告组名称', '匹配类型', '投放']).sum().sort_values(by=['展现量'], ascending=False).head(20)\ntarget_df['总销售额'] = target_df['7天总销售额(¥)']\ntarget_df['总销售量'] = target_df['7天总销售量(#)']\ntarget_df = target_df[['展现量', '点击量', '花费', '总销售额', '总销售量']]\ntarget_df['性价比(总销售额/花费)'] = round(target_df['总销售额'] / target_df['花费'], 2)\n\ntarget_df.sort_values(by=['性价比(总销售额/花费)'], ascending=False)", "_____no_output_____" ] ], [ [ "此表解决的问题为:\n\n- 找出最具性价比的投放项目。", "_____no_output_____" ], [ "# 性价比最高的Top 10广告组与全部广告组数据对比", "_____no_output_____" ], [ "*注:在某一项目组每投入1美元带来的销售额回报越高,则其性价比越高。*", "_____no_output_____" ], [ "## Top 10广告组", "_____no_output_____" ] ], [ [ "# Top 10 Ads.\ntop10_df = workdf.groupby(['广告组名称']).sum().sort_values(by=['7天总销售量(#)'], ascending=False)\ntop10_df['总销售额'] = top10_df['7天总销售额(¥)']\ntop10_df['性价比'] = round(top10_df['总销售额'] / top10_df['花费'], 2)\ntop10_df = top10_df[['展现量', '点击量', '花费', '总销售额', '性价比']].sort_values(by=['性价比'], ascending=False).head(10)\n\nprint('Top 10 广告组\\n')\n\ntop10_df", "Top 10 广告组\n\n" ] ], [ [ "## 数据对比", "_____no_output_____" ] ], [ [ "total_ds = workdf.sum()\ntop10_ds = top10_df.sum()\n\nprint('结论:')\nprint('\\nTop 10广告组花费为:{0:.2f}美元; 全部广告组花费为:{1:.2f}美元; Top 10广告组占比:{2:.2f}%'.format(top10_ds['花费'], total_ds['花费'], top10_ds['花费'] / total_ds['花费'] * 100))\nprint('\\nTop 10广告组共销售:{0:.2f}美元; 全部广告组共销售:{1:.2f}美元; Top 10广告组占比:{2:.2f}%'.format(top10_ds['总销售额'], total_ds['7天总销售额(¥)'], top10_ds['总销售额'] / total_ds['7天总销售额(¥)'] * 100))", "结论:\n\nTop 10广告组花费为:1429.14美元; 全部广告组花费为:2112.80美元; Top 10广告组占比:67.64%\n\nTop 10广告组共销售:6605.84美元; 全部广告组共销售:8248.81美元; Top 10广告组占比:80.08%\n" ] ], [ [ "# 产品广告活动订单率情况", "_____no_output_____" ] ], [ [ "target_df = workdf.groupby(['广告活动名称']).sum().head(20)\ntarget_df['总订单量'] = target_df['7天总订单数(#)']\ntarget_df['订单率'] = round(target_df['总订单量'] / target_df['点击量'] * 100, 2)\n\ntarget_df[['展现量', '点击量', '总订单量', '订单率']].sort_values(by=['订单率'], ascending=False).fillna(0)", "_____no_output_____" ] ], [ [ "*注:订单率 = 订单量 / 点击量*\n\n此表解决的问题为:\n\n- 了解产品广告活动的订单率情况。品牌广告的相关情况可见 **[amz_ads_brand.ipynb](amz_ads_brand.ipynb)** 的相关部分说明。", "_____no_output_____" ], [ "# 产品广告组订单率情况", "_____no_output_____" ] ], [ [ "target_df = workdf.groupby(['广告组名称']).sum().head(20)\ntarget_df['总订单量'] = target_df['7天总订单数(#)']\ntarget_df['订单率'] = round(target_df['总订单量'] / target_df['点击量'] * 100, 2)\n\ntarget_df[['展现量', '点击量', '总订单量', '订单率']].sort_values(by=['订单率'], ascending=False).fillna(0)", "_____no_output_____" ] ], [ [ "*注:订单率 = 订单量 / 点击量*\n\n此表解决的问题为:\n\n- 了解产品广告组的订单率情况。", "_____no_output_____" ], [ "**[返回目录](amz_ads_catalog.ipynb)**", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ] ]
cb3a0ee55c893e6195bb0ce14aee7f38f0405355
17,024
ipynb
Jupyter Notebook
notebooks/model.ipynb
ksteinfe/fresh_eyes
db0cdf2a77d5e2df7157c022aa7a620ec15cac34
[ "MIT" ]
4
2019-09-20T23:43:21.000Z
2021-12-08T15:27:00.000Z
notebooks/model.ipynb
ksteinfe/fresh_eyes
db0cdf2a77d5e2df7157c022aa7a620ec15cac34
[ "MIT" ]
null
null
null
notebooks/model.ipynb
ksteinfe/fresh_eyes
db0cdf2a77d5e2df7157c022aa7a620ec15cac34
[ "MIT" ]
2
2019-09-21T13:56:46.000Z
2020-06-28T01:43:02.000Z
33.380392
184
0.551339
[ [ [ "from tensorflow.python.keras import backend as K\nfrom tensorflow.python.keras.applications.resnet50 import ResNet50, preprocess_input\nfrom tensorflow.python.keras.preprocessing import image\nfrom tensorflow.python.keras.layers import Conv2D, GlobalAveragePooling2D, Input, Dropout, Dense\nfrom tensorflow.python.keras.utils import to_categorical\nfrom tensorflow.python.keras.models import Model\nfrom tensorflow.python.keras.datasets import cifar10\nfrom tensorflow.python.keras.callbacks import Callback, TensorBoard\nfrom tensorflow.python.keras.backend import set_session\nfrom tensorflow.python.keras.models import load_model\nimport tensorflow as tf\nfrom sklearn.model_selection import train_test_split\nfrom tqdm import tqdm\nfrom collections import defaultdict\nfrom matplotlib.pyplot import imshow\nfrom PIL import Image\nimport datetime\nimport numpy as np\nimport os, glob, io\nimport base64\n%matplotlib inline", "_____no_output_____" ], [ "sess = tf.Session()\ngraph = tf.get_default_graph()\nset_session(sess)", "_____no_output_____" ], [ "batch_size = 32\ntest_train_split = 0.2\nmax_epoch = 1\ndropout_prob = 0.3\nshape = (224, 224)\ntrain_size_per_label = 500\ntest_size_per_label = 100\ntest_train_split=0.2\nimage_path = \"/Users/adammenges/Development/notebooks/basicClassifier/houses_120px_classes\"", "_____no_output_____" ], [ "def resize(arr, shape):\n return np.array(Image.fromarray(arr).resize(shape))\n\ndef decode_img(msg):\n# msg = msg[msg.find(b\"<plain_txt_msg:img>\")+len(b\"<plain_txt_msg:img>\"):\n# msg.find(b\"<!plain_txt_msg>\")]\n msg = base64.b64decode(msg)\n buf = io.BytesIO(msg)\n img = Image.open(buf)\n return img\n\ndef preprocess(arr, shape=(224, 224)):\n arr = np.array([resize(arr[i], shape) for i in range(0, len(arr))]).astype('float32')\n arr = preprocess_input(arr)\n return arr\n \ndef get_local_images():\n classes = os.listdir(image_path)\n input_arr = []\n target_labels = []\n for class_idx in range(len(classes)):\n paths = glob.glob(os.path.join(image_path, classes[class_idx]) + \"/*.png\")\n for img_path in tqdm(paths, desc=f'Processing label {classes[class_idx]}: '):\n img = image.load_img(img_path, target_size=(224, 224))\n x = image.img_to_array(img)\n# x = np.expand_dims(x, axis=0)\n x = preprocess_input(x)\n target_labels.append(class_idx)\n input_arr.append(x)\n X_train, X_test, y_train, y_test = train_test_split(input_arr, target_labels, test_size=test_train_split)\n X_train = np.array(X_train)\n X_test = np.array(X_test)\n y_train = np.array(y_train)\n y_test = np.array(y_test)\n return X_train, X_test, y_train, y_test, classes\n\ndef get_cifar10():\n (input_train, out_train), (input_test, out_test) = cifar10.load_data()\n return input_train, input_test, out_train, out_test, range(10)\n\ndef get_resnet50(shape=(224, 224, 3)):\n return ResNet50(weights='imagenet', include_top=False, input_shape=shape)\n\ndef restrain_data(input_train, out_train, input_test, out_test, num_class, num_train, num_test, shape=(224, 224)):\n train_dict = defaultdict(list)\n test_dict = defaultdict(list)\n [train_dict[out_train[idx][0]].append(input_train[idx]) for idx in range(input_train.shape[0])]\n [test_dict[out_test[idx][0]].append(input_test[idx]) for idx in range(input_test.shape[0])]\n restrain_class = range(num_class)\n restrain_train = [[train_dict[i][idx], i] for idx in range(num_train) for i in restrain_class]\n restrain_test = [[test_dict[i][idx], i] for idx in range(num_test) for i in restrain_class]\n rand_train_idx = np.random.choice(num_train * num_class, num_train * num_class)\n rand_test_idx = np.random.choice(num_test * num_class, num_test * num_class)\n i_train = np.array([restrain_train[idx][0] for idx in rand_train_idx])\n o_train = np.array([[restrain_train[idx][1]] for idx in rand_train_idx])\n i_test = np.array([restrain_test[idx][0] for idx in rand_test_idx])\n o_test = np.array([[restrain_test[idx][1]] for idx in rand_test_idx])\n i_train = preprocess(i_train, shape=shape)\n i_test = preprocess(i_test, shape=shape)\n return i_train, i_test, o_train, o_test, restrain_class", "_____no_output_____" ], [ "input_train, input_test, out_train, out_test, classes = get_local_images()", "Processing label Saltbox: 100%|██████████| 52/52 [00:00<00:00, 1226.04it/s]\nProcessing label QueenAnne: 100%|██████████| 58/58 [00:00<00:00, 1312.63it/s]\nProcessing label AFrame: 100%|██████████| 71/71 [00:00<00:00, 1305.00it/s]\nProcessing label Patio: 100%|██████████| 32/32 [00:00<00:00, 1332.17it/s]\nProcessing label BayGable: 100%|██████████| 43/43 [00:00<00:00, 1271.62it/s]\nProcessing label Dogtrot: 100%|██████████| 46/46 [00:00<00:00, 1264.12it/s]\n" ], [ "input_test.shape", "_____no_output_____" ], [ "x = get_cifar10()\nx[0].shape", "_____no_output_____" ], [ "# input_train, input_test, out_train, out_test, classes = restrain_data(\n# input_train, \n# out_train, \n# input_test,\n# out_test, \n# len(classes),\n# train_size_per_label,\n# test_size_per_label)\n# input_train = preprocess(input_train, shape=shape)\n# input_test = preprocess(input_test, shape=shape)", "_____no_output_____" ], [ "total_train_steps = len(input_train) // batch_size\nout_train = to_categorical(out_train, len(classes))\nout_test = to_categorical(out_test, len(classes))", "_____no_output_____" ], [ "def batch_generator(x, y, batch_size=32):\n while True:\n for step in range(len(x) // batch_size):\n yield x[step*batch_size:(step+1)*batch_size, ...], y[step*batch_size:(step+1)*batch_size, ...]\n\nclass RecordAccuracy(Callback):\n def on_epoch_begin(self, epoch, logs=None):\n print(f'Running epoch {epoch}. Total {total_train_steps} batches')\n def on_batch_end(self, batch, logs=None):\n loss = logs['loss']\n if not batch % 10:\n print(f'Running batch {batch}: train loss - {loss}')\n def on_epoch_end(self, epoch, logs=None):\n loss = logs[\"loss\"]\n val_acc = logs[\"val_acc\"]\n print(f'Epoch {epoch}: train loss - {loss}. test accuracy - {val_acc}')\n \ndef freeze_layers(model, layer_num):\n for layer in model.layers[:layer_num]:\n layer.trainable = False\n \ndef train_layers(model, layer_num):\n for layer in model.layers[layer_num:]:\n layer.trainable = True", "_____no_output_____" ], [ "resnet50 = get_resnet50(shape=shape + (3,))\nbottleneck_train_features = resnet50.predict(input_train)\nbottleneck_test_features = resnet50.predict(input_test)", "/anaconda3/lib/python3.7/site-packages/keras_applications/resnet50.py:265: UserWarning: The output shape of `ResNet50(include_top=False)` has been changed since Keras 2.2.0.\n warnings.warn('The output shape of `ResNet50(include_top=False)` '\n" ], [ "in_layer = Input(shape=(bottleneck_train_features.shape[1:]))\nx = Conv2D(filters=100, kernel_size=2)(in_layer)\nx = Dropout(0.4)(x)\nx = GlobalAveragePooling2D()(x)\nx = Dropout(0.3)(x)\npredictions = Dense(len(classes), activation='softmax')(x)\nmodel = Model(inputs=in_layer, outputs=predictions)\nmodel.summary()", "Model: \"model_1\"\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\ninput_4 (InputLayer) [(None, 7, 7, 2048)] 0 \n_________________________________________________________________\nconv2d_1 (Conv2D) (None, 6, 6, 100) 819300 \n_________________________________________________________________\ndropout_2 (Dropout) (None, 6, 6, 100) 0 \n_________________________________________________________________\nglobal_average_pooling2d_1 ( (None, 100) 0 \n_________________________________________________________________\ndropout_3 (Dropout) (None, 100) 0 \n_________________________________________________________________\ndense_1 (Dense) (None, 6) 606 \n=================================================================\nTotal params: 819,906\nTrainable params: 819,906\nNon-trainable params: 0\n_________________________________________________________________\n" ] ], [ [ "## Train the model!\n\nAnd now it's time to train the model!", "_____no_output_____" ] ], [ [ "model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['acc'])\nmodel.fit_generator(batch_generator(bottleneck_train_features, out_train),\n steps_per_epoch=len(bottleneck_train_features) // batch_size,\n validation_data=(bottleneck_test_features, out_test), \n verbose=2,\n epochs=max_epoch,\n callbacks=[RecordAccuracy(), TensorBoard()])", "Running epoch 0. Total 7 batches\nRunning batch 0: train loss - 2.5737788677215576\nEpoch 0: train loss - 4.872777155467442. test accuracy - 0.7540983557701111\n7/7 - 2s - loss: 4.8728 - acc: 0.4152 - val_loss: 1.5288 - val_acc: 0.7541\n" ] ], [ [ "# Server\n\nOkay now let's host a server for grasshopper", "_____no_output_____" ] ], [ [ "print(model.predict(resnet50.predict(np.array([input_test[0]]))))\nprint(classes)\nprint('----')\nprint(input_test[0].shape)\nprint(list(zip(model.predict(resnet50.predict(np.array([input_test[0]])))[0], classes)))\nout_test[0]", "[[1.4172211e-22 9.7809210e-30 1.0000000e+00 5.7962686e-21 2.2097266e-24\n 8.6659401e-16]]\n['Saltbox', 'QueenAnne', 'AFrame', 'Patio', 'BayGable', 'Dogtrot']\n----\n(224, 224, 3)\n[(1.417221e-22, 'Saltbox'), (9.780921e-30, 'QueenAnne'), (1.0, 'AFrame'), (5.7962686e-21, 'Patio'), (2.2097266e-24, 'BayGable'), (8.66594e-16, 'Dogtrot')]\n" ], [ "from flask import Flask\nfrom flask import request\napp = Flask(__name__)\n\[email protected]('/predict', methods=['POST']) #GET requests will be blocked\ndef hello_world():\n req_data = request.get_json()\n img = req_data['image']\n img = decode_img(img).resize((224,224)).convert('RGB')\n img = image.img_to_array(img)\n x = preprocess_input(img)\n print('----')\n print(x.shape)\n print('----')\n global sess\n global graph\n with graph.as_default():\n set_session(sess)\n pred = model.predict(resnet50.predict(np.array([x])))[0]\n pred = [str(f) for f in pred]\n prediction = list(zip(pred, classes))\n print('prediction')\n print(prediction)\n return {\n 'prediction': prediction\n }\n\napp.run(debug=True, use_reloader=False)", " * Serving Flask app \"__main__\" (lazy loading)\n * Environment: production\n WARNING: This is a development server. Do not use it in a production deployment.\n Use a production WSGI server instead.\n * Debug mode: on\n" ] ] ]
[ "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ] ]
cb3a14a3027849adf39439ab19d7fb9ef76b1e61
10,557
ipynb
Jupyter Notebook
point_history_classification_allkeypoints.ipynb
mimisukeMaster/hand-gesture-recognition-using-mediapipe
6a304569d25db4a80f677184b57568adf4138d0d
[ "Apache-2.0" ]
3
2022-03-14T16:59:55.000Z
2022-03-28T15:39:30.000Z
point_history_classification_allkeypoints.ipynb
mimisukeMaster/hand-gesture-recognition-using-mediapipe
6a304569d25db4a80f677184b57568adf4138d0d
[ "Apache-2.0" ]
null
null
null
point_history_classification_allkeypoints.ipynb
mimisukeMaster/hand-gesture-recognition-using-mediapipe
6a304569d25db4a80f677184b57568adf4138d0d
[ "Apache-2.0" ]
null
null
null
25.07601
142
0.555366
[ [ [ "import csv\n\nimport numpy as np\nimport tensorflow as tf\nfrom sklearn.model_selection import train_test_split\n\nRANDOM_SEED = 42", "_____no_output_____" ] ], [ [ "# 各パス指定", "_____no_output_____" ] ], [ [ "dataset = 'model/point_history_classifier/point_history_allkeypoints.csv'\nmodel_save_path = 'model/point_history_classifier/point_history_classifier_allkeypoints.hdf5'", "_____no_output_____" ] ], [ [ "# 分類数設定", "_____no_output_____" ] ], [ [ "NUM_CLASSES = 3 # point_history_classifier_label_allkeypoints の分類数", "_____no_output_____" ] ], [ [ "# 入力長", "_____no_output_____" ] ], [ [ "TIME_STEPS = 16 # 時系列\nNUM_KEYPOINTS = 21 # 全点数\nDIMENSION = 2 * NUM_KEYPOINTS # [x,y]2要素 * 全点数", "_____no_output_____" ] ], [ [ "# 学習データ読み込み", "_____no_output_____" ] ], [ [ "# \"座標\"のデータセット\nX_dataset = np.loadtxt(dataset, delimiter=',', dtype='float32',\n usecols=list(range(1, (TIME_STEPS * DIMENSION) + 1)))\n# usecols=: どの列(縦列)を読み取るのか,ここでは1列目のindex列を抜かして入力長分を指定", "_____no_output_____" ], [ "# \"index番号\"のデータセット usecols=(0)は最初の縦列指定\ny_dataset = np.loadtxt(dataset, delimiter=',', dtype='int32', usecols=(0))", "_____no_output_____" ], [ "X_train, X_test, y_train, y_test = train_test_split(\n X_dataset, y_dataset, train_size=0.75, random_state=RANDOM_SEED)\n# Xはデータの実体,yはindex番号\n#これでcsvデータのうち4分の3が練習用trainにX(実体)とy(番号)を振り分けられながら入り、\n# 本番testにも残り4分の1が同様に入った -> trainには 16 * 2 * 21 個の要素持ったデータの列がある", "_____no_output_____" ] ], [ [ "# モデル構築", "_____no_output_____" ] ], [ [ "use_lstm = False\nmodel = None\n\nif use_lstm:\n model = tf.keras.models.Sequential([\n tf.keras.layers.InputLayer(input_shape=(TIME_STEPS * DIMENSION, )),\n tf.keras.layers.Reshape((TIME_STEPS, DIMENSION), input_shape=(TIME_STEPS * DIMENSION, )), \n tf.keras.layers.Dropout(0.2),\n tf.keras.layers.LSTM(16, input_shape=[TIME_STEPS, DIMENSION]),\n tf.keras.layers.Dropout(0.5),\n tf.keras.layers.Dense(10, activation='relu'),\n tf.keras.layers.Dense(NUM_CLASSES, activation='softmax')\n ])\nelse:\n model = tf.keras.models.Sequential([\n tf.keras.layers.InputLayer(input_shape=(TIME_STEPS * DIMENSION, )),\n tf.keras.layers.Dropout(0.2),\n tf.keras.layers.Dense(24, activation='relu'),\n tf.keras.layers.Dropout(0.5),\n tf.keras.layers.Dense(10, activation='relu'),\n tf.keras.layers.Dense(NUM_CLASSES, activation='softmax')\n ])", "_____no_output_____" ], [ "model.summary() # tf.keras.utils.plot_model(model, show_shapes=True)", "_____no_output_____" ], [ "# モデルチェックポイントのコールバック\ncp_callback = tf.keras.callbacks.ModelCheckpoint(\n model_save_path, verbose=1, save_weights_only=False)\n# 早期打ち切り用コールバック\nes_callback = tf.keras.callbacks.EarlyStopping(patience=20, verbose=1)\n# エポック終了時にモデルを保存するModelCheckpointと、\n# 改善が見られなくなった時点で訓練を終了するEarlyStoppingを指定している。", "_____no_output_____" ], [ "# モデルコンパイル\nmodel.compile(\n optimizer='adam',\n loss='sparse_categorical_crossentropy',\n metrics=['accuracy']\n)", "_____no_output_____" ] ], [ [ "# モデル訓練", "_____no_output_____" ] ], [ [ "model.fit(\n X_train,\n y_train,\n epochs=1000,\n batch_size=128,\n validation_data=(X_test, y_test),\n callbacks=[cp_callback, es_callback]\n)", "_____no_output_____" ], [ "# 保存したモデルのロード\nmodel = tf.keras.models.load_model(model_save_path)", "_____no_output_____" ], [ "# 推論テスト\npredict_result = model.predict(np.array([X_test[0]]))\nprint(np.squeeze(predict_result))\nprint(np.argmax(np.squeeze(predict_result)))", "_____no_output_____" ] ], [ [ "# 混同行列", "_____no_output_____" ] ], [ [ "import pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nfrom sklearn.metrics import confusion_matrix, classification_report\n\ndef print_confusion_matrix(y_true, y_pred, report=True):\n labels = sorted(list(set(y_true)))\n cmx_data = confusion_matrix(y_true, y_pred, labels=labels)\n \n df_cmx = pd.DataFrame(cmx_data, index=labels, columns=labels)\n \n fig, ax = plt.subplots(figsize=(7, 6))\n sns.heatmap(df_cmx, annot=True, fmt='g' ,square=False)\n ax.set_ylim(len(set(y_true)), 0)\n plt.show()\n \n if report:\n print('Classification Report')\n print(classification_report(y_test, y_pred))\n\nY_pred = model.predict(X_test)\ny_pred = np.argmax(Y_pred, axis=1)\n\nprint_confusion_matrix(y_test, y_pred)", "_____no_output_____" ] ], [ [ "<h3>ONNXモデルへの変換(追加Cell)</> <h5>save_modelの2つ目の引数がモデルのファイル名になります</>", "_____no_output_____" ] ], [ [ "import keras2onnx\n# convert model to ONNX\nonnx_model = keras2onnx.convert_keras(model, # keras model\n name=\"example\", # the converted ONNX model internal name\n target_opset=9, # the ONNX version to export the model to\n channel_first_inputs=None # which inputs to transpose from NHWC to NCHW\n )\nkeras2onnx.save_model(onnx_model, \"example_h1.onnx\") # save as \"example_h1.onnx\"", "_____no_output_____" ] ], [ [ "# Tensorflow-Lite用のモデルへ変換", "_____no_output_____" ] ], [ [ "# 推論専用のモデルとして保存\nmodel.save(model_save_path, include_optimizer=False)\nmodel = tf.keras.models.load_model(model_save_path)", "_____no_output_____" ], [ "tflite_save_path = 'model/point_history_classifier/point_history_classifier_allkeypoints.tflite'", "_____no_output_____" ], [ "# モデルを変換(量子化\nconverter = tf.lite.TFLiteConverter.from_keras_model(model) # converter = tf.lite.TFLiteConverter.from_saved_model(saved_model_path)\nconverter.optimizations = [tf.lite.Optimize.DEFAULT]\ntflite_quantized_model = converter.convert()\n\nopen(tflite_save_path, 'wb').write(tflite_quantized_model)", "_____no_output_____" ] ], [ [ "# 推論テスト", "_____no_output_____" ] ], [ [ "interpreter = tf.lite.Interpreter(model_path=tflite_save_path)\ninterpreter.allocate_tensors()", "_____no_output_____" ], [ "# 入出力テンソルを取得\ninput_details = interpreter.get_input_details()\noutput_details = interpreter.get_output_details()\nprint(input_details)", "_____no_output_____" ], [ "interpreter.set_tensor(input_details[0]['index'], np.array([X_test[0]]))", "_____no_output_____" ], [ "%%time\n# 推論実施\ninterpreter.invoke()\ntflite_results = interpreter.get_tensor(output_details[0]['index'])", "_____no_output_____" ], [ "print(np.squeeze(tflite_results))\nprint(np.argmax(np.squeeze(tflite_results)))", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ] ]
cb3a181ebb6cc826c692a207353e0d1c3a7f181b
24,811
ipynb
Jupyter Notebook
Shor's Algorithm.ipynb
rudrajit1729/QuantumComputing-IBMQ
eb4f29c5835d9872730ebb58e9090794a77b9735
[ "MIT" ]
2
2020-04-08T16:44:16.000Z
2020-12-05T16:36:46.000Z
Shor's Algorithm.ipynb
rudrajit1729/QuantumComputing-IBMQ
eb4f29c5835d9872730ebb58e9090794a77b9735
[ "MIT" ]
null
null
null
Shor's Algorithm.ipynb
rudrajit1729/QuantumComputing-IBMQ
eb4f29c5835d9872730ebb58e9090794a77b9735
[ "MIT" ]
null
null
null
34.174931
382
0.510741
[ [ [ "# Shor's algorithm, fully classical implementation", "_____no_output_____" ] ], [ [ "%matplotlib inline\nimport random\nimport math\nimport itertools\ndef period_finding_classical(a,N):\n # This is an inefficient classical algorithm to find the period of f(x)=a^x (mod N)\n # f(0) = a**0 (mod N) = 1, so we find the first x greater than 0 for which f(x) is also 1\n for r in itertools.count(start=1):\n if (a**r) % N == 1:\n return r\n\ndef shors_algorithm_classical(N):\n assert(N>0)\n assert(int(N)==N)\n while True:\n a=random.randint(0,N-1)\n g=math.gcd(a,N)\n if g!=1 or N==1:\n first_factor=g\n second_factor=int(N/g)\n return first_factor,second_factor\n else:\n r=period_finding_classical(a,N) \n if r % 2 != 0:\n continue\n elif a**(int(r/2)) % N == -1 % N:\n continue\n else:\n first_factor=math.gcd(a**int(r/2)+1,N)\n second_factor=math.gcd(a**int(r/2)-1,N)\n if first_factor==N or second_factor==N:\n continue\n return first_factor,second_factor\n", "_____no_output_____" ], [ "# Testing it out. Note because of the probabilistic nature of the algorithm, different factors and different ordering is possible\nshors_algorithm_classical(15)\nshors_algorithm_classical(91)", "_____no_output_____" ] ], [ [ "# Shor's algorithm, working on a quantum implementation\n## The following code will help give intuition for how to design a quantum circuit to do modular multiplication ", "_____no_output_____" ] ], [ [ "def U_a_modN(a,N,binary=False):\n \"\"\"\n a and N are decimal\n This algorithm returns U_a where:\n U_a is a modular multiplication operator map from |x> to |ax mod N>\n If binary is set to True, the mapping is given in binary instead of in decimal notation.\n \n \"\"\"\n res={}\n l=[]\n for i in range(1,N):\n l+=[a*i%N]\n res=set()\n\n for i in range(1,N):\n mp=[i]\n end=i\n nxt=i-1\n while l[nxt]!=end:\n mp+=[l[nxt]]\n nxt=l[nxt]-1\n res.add(tuple(mp))\n final_res=[]\n for item in res:\n dup=False\n for final_item in final_res:\n if set(item) == set(final_item):\n dup=True\n if not dup:\n final_res+=[item]\n if not binary:\n return final_res\n else:\n final_res_bin=[]\n for mapping in final_res:\n final_res_bin+=[tuple(['{0:06b}'.format(decimal) for decimal in mapping])]\n return final_res_bin\n \nprint(U_a_modN(8,35))\nprint(U_a_modN(8,35,binary=True))", "[(7, 21, 28, 14), (34, 27, 6, 13), (2, 16, 23, 9), (26, 33, 19, 12), (18, 4, 32, 11), (24, 17, 31, 3), (15,), (30,), (5,), (8, 29, 22, 1), (20,), (10,), (25,)]\n[('000111', '010101', '011100', '001110'), ('100010', '011011', '000110', '001101'), ('000010', '010000', '010111', '001001'), ('011010', '100001', '010011', '001100'), ('010010', '000100', '100000', '001011'), ('011000', '010001', '011111', '000011'), ('001111',), ('011110',), ('000101',), ('001000', '011101', '010110', '000001'), ('010100',), ('001010',), ('011001',)]\n" ] ], [ [ "# This code implements modular multiplication by 2 mod 15", "_____no_output_____" ] ], [ [ "import qiskit\nimport matplotlib\nfrom qiskit import QuantumCircuit, ClassicalRegister, QuantumRegister, QISKitError\nfrom qiskit.tools.visualization import circuit_drawer\nfrom qiskit.extensions.standard import cx, cswap\nfrom qiskit import Aer\n\nfrom qiskit import IBMQ\n# Authenticate an account and add for use during this session. Replace string\n# argument with your private token.\nIBMQ.enable_account(\"INSERT_YOUR_API_TOKEN_HERE\")\n\ndef mult_2mod15_quantum(qr,qc):\n # Swap 0th qubit and 3rd qubit\n qc.cx(qr[0],qr[3])\n qc.cx(qr[3],qr[0])\n qc.cx(qr[0],qr[3])\n\n # Swap 0th qubit and 1st qubit\n qc.cx(qr[1],qr[0])\n qc.cx(qr[0],qr[1])\n qc.cx(qr[1],qr[0])\n\n # Swap 1st qubit and 2nd qubit\n qc.cx(qr[1],qr[2])\n qc.cx(qr[2],qr[1])\n qc.cx(qr[1],qr[2])\n\n\ndef mult_2mod15_quantum_test(x):\n qr = QuantumRegister(4)\n cr = ClassicalRegister(4)\n qc = QuantumCircuit(qr,cr)\n\n # input\n x_bin='{0:04b}'.format(x)\n for i,b in enumerate(x_bin):\n if int(b):\n qc.x(qr[i])\n # run circuit\n mult_2mod15_quantum(qr,qc)\n\n # measure results\n for i in range(4):\n qc.measure(qr[i],cr[i])\n \n import time\n from qiskit.tools.visualization import plot_histogram\n backend=Aer.get_backend('qasm_simulator')\n shots=50\n job_exp = qiskit.execute(qc, backend=backend)\n result = job_exp.result()\n final=result.get_counts(qc)\n result_in_order=list(final.keys())[0]\n dec=0\n for i,b in enumerate(result_in_order):\n if int(b):\n dec+=2**i\n return (x,dec)\n\ndef mult_2mod15_classical_test(x):\n return (x,2*x%15)\n\n# testing!\nfor i in range(1,15):\n quantum=mult_2mod15_quantum_test(i)\n classical=mult_2mod15_classical_test(i)\n if quantum!=classical:\n print(quantum,classical)", "/Users/corbett/Documents/Mastering-Quantum-Computing-with-IBM-QX/book/lib/python3.7/site-packages/marshmallow/schema.py:364: ChangedInMarshmallow3Warning: strict=False is not recommended. In marshmallow 3.0, schemas will always be strict. See https://marshmallow.readthedocs.io/en/latest/upgrading.html#schemas-are-always-strict\n ChangedInMarshmallow3Warning\n" ] ], [ [ "## This code makes the previous an operation controlled by a control qubit", "_____no_output_____" ] ], [ [ "def controlled_mult_2mod15_quantum(qr,qc,control_qubit):\n \"\"\"\n Controlled quantum circuit for multiplication by 2 mod 15.\n Note: control qubit should an index greater than 3, \n and qubits 0,1,2,3 are reserved for circuit operations\n \"\"\"\n # Swap 0th qubit and 3rd qubit\n qc.cswap(control_qubit,qr[0],qr[3])\n\n # Swap 0th qubit and 1st qubit\n qc.cswap(control_qubit,qr[1],qr[0])\n\n # Swap 1st qubit and 2nd qubit\n qc.cswap(control_qubit,qr[1],qr[2])\n", "_____no_output_____" ] ], [ [ "# This code performas the entire Shor's algorithm subroutine for multiplication by 2 mod 15", "_____no_output_____" ] ], [ [ "import math\ndef shors_subroutine_period_2mod15(qr,qc,cr):\n qc.x(qr[0])\n qc.h(qr[4])\n qc.h(qr[4])\n qc.measure(qr[4],cr[0])\n\n qc.h(qr[5])\n qc.cx(qr[5],qr[0])\n qc.cx(qr[5],qr[2])\n if cr[0] == 1:\n qc.u1(math.pi/2,qr[4]) #pi/2 is 90 degrees in radians\n qc.h(qr[5])\n qc.measure(qr[5],cr[1])\n\n qc.h(qr[6])\n controlled_mult_2mod15_quantum(qr,qc,qr[6])\n if cr[1] == 1:\n qc.u1(math.pi/2,qr[6]) # pi/2 is 90 degrees in radians\n if cr[0] == 1:\n qc.u1(math.pi/4,qr[6]) #pi/4 is 45 degrees in radians\n qc.h(qr[6])\n qc.measure(qr[6],cr[2]) ", "_____no_output_____" ] ], [ [ "# This code will help us read out the results from our quantum Shor's subroutine. First, implementing the code to compute the period from the output of the quantum computation:\n", "_____no_output_____" ] ], [ [ "# see https://arxiv.org/pdf/quant-ph/0010034.pdf for more details (convergence relations on page 11)\nimport math\ndef continued_fraction(xi,max_steps=100): # stop_after is cutoff for algorithm, for debugging\n \"\"\"\n This function computes the continued fraction expansion of input xi\n per the recurrance relations on page 11 of https://arxiv.org/pdf/quant-ph/0010034.pdf\n \n \"\"\"\n #a and xi initial\n all_as=[]\n all_xis=[]\n a_0=math.floor(xi)\n xi_0=xi-a_0\n all_as+=[a_0]\n all_xis+=[xi_0]\n # p and q initial\n all_ps=[]\n all_qs=[]\n p_0=all_as[0]\n q_0=1\n all_ps+=[p_0]\n all_qs+=[q_0]\n \n xi_n=xi_0\n while not numpy.isclose(xi_n,0,atol=1e-7):\n if len(all_as)>=max_steps:\n print(\"Warning: algorithm did not converge within max_steps %d steps, try increasing max_steps\"%max_steps)\n break\n # computing a and xi\n a_nplus1=math.floor(1/xi_n)\n xi_nplus1=1/xi_n-a_nplus1\n all_as+=[a_nplus1]\n all_xis+=[xi_nplus1]\n xi_n=xi_nplus1\n # computing p and q\n n=len(all_as)-1\n if n==1:\n p_1=all_as[1]*all_as[0]+1\n q_1=all_as[1]\n all_ps+=[p_1]\n all_qs+=[q_1]\n else:\n p_n=all_as[n]*all_ps[n-1]+all_ps[n-2]\n q_n=all_as[n]*all_qs[n-1]+all_qs[n-2]\n all_ps+=[p_n]\n all_qs+=[q_n]\n return all_ps,all_qs,all_as,all_xis\n \nimport numpy\ndef test_continued_fraction():\n \"\"\"\n Testing the continued fraction see https://arxiv.org/pdf/quant-ph/0010034.pdf, step 2.5 chart page 20\n NOTE: I believe there is a mistake in this chart at the last row, and that n should range as in my code below\n their chart is missing one line. Please contact me if you find differently! \n \"\"\"\n xi=13453/16384\n all_ps,all_qs,all_as,all_xis=continued_fraction(xi)\n ## step 2.5 chart in https://arxiv.org/pdf/quant-ph/0010034.pdf page 20\n #n_13453_16384=[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14]\n #a_n_13453_16384=[0,1,4,1,1,2,3,1,1,3,1,1,1,1,3]\n #p_n_13453_16384=[0,1,4,5,9,23,78,101,179,638,817,1455,2272,3727,13453]\n #q_n_13453_16384=[1,1,5,6,11,28,95,123,218,777,995,1772,2767,4539,16384]\n ## what I find instead:\n n_13453_16384=[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]\n a_n_13453_16384=[0,1,4,1,1,2,3,1,1,3,1,1,1,1,2,1]\n p_n_13453_16384=[0,1,4,5,9,23,78,101,179,638,817,1455,2272,3727,9726,13453]\n q_n_13453_16384=[1,1,5,6,11,28,95,123,218,777,995,1772,2767,4539,11845,16384]\n for tup in [(\"ns\",range(len(all_ps)),range(len(n_13453_16384))),\n (\"as\",all_as,a_n_13453_16384),\n (\"ps\",all_ps,p_n_13453_16384),\n (\"qs\",all_qs,q_n_13453_16384),\n ]:\n if not numpy.array_equal(tup[1],tup[2]):\n print(tup[0])\n print(\"act:\",tup[1])\n print(\"exp:\",tup[2])\n print()\n \nfrom IPython.display import display, Math\ndef pretty_print_continued_fraction(results,raw_latex=False):\n all_ps,all_qs,all_as,all_xis=results\n for i,vals in enumerate(zip(all_ps,all_qs,all_as,all_xis)):\n p,q,a,xi=vals\n if raw_latex:\n print(r'\\frac{p_%d}{q_%d}=\\frac{%d}{%d}'%(i,i,p,q))\n else:\n display(Math(r'$\\frac{p_%d}{q_%d}=\\frac{%d}{%d}$'%(i,i,p,q)))\n \n \ntest_continued_fraction()\n#pretty_print_continued_fraction(continued_fraction(5/8),raw_latex=True)\n#pretty_print_continued_fraction(continued_fraction(0/8))\npretty_print_continued_fraction(continued_fraction(6/8))", "_____no_output_____" ] ], [ [ "# Next we will integrate the check for whether we have found the period into the continued fraction code, so that we can stop computing the continued fraction as soon as we've found the period", "_____no_output_____" ] ], [ [ "import math\ndef period_from_quantum_measurement(quantum_measurement,\n number_qubits,\n a_shor,\n N_shor,\n max_steps=100): # stop_after is cutoff for algorithm, for debugging\n \"\"\"\n This function computes the continued fraction expansion of input xi\n per the recurrance relations on page 11 of https://arxiv.org/pdf/quant-ph/0010034.pdf\n a_shor is the random number chosen as part of Shor's algorithm\n N_shor is the number Shor's algorithm is trying to factor\n \"\"\"\n xi=quantum_measurement/2**number_qubits\n \n #a and xi initial\n all_as=[]\n all_xis=[]\n a_0=math.floor(xi)\n xi_0=xi-a_0\n all_as+=[a_0]\n all_xis+=[xi_0]\n # p and q initial\n all_ps=[]\n all_qs=[]\n p_0=all_as[0]\n q_0=1\n all_ps+=[p_0]\n all_qs+=[q_0]\n \n xi_n=xi_0\n while not numpy.isclose(xi_n,0,atol=1e-7):\n if len(all_as)>=max_steps:\n print(\"Warning: algorithm did not converge within max_steps %d steps, try increasing max_steps\"%max_steps)\n break\n # computing a and xi\n a_nplus1=math.floor(1/xi_n)\n xi_nplus1=1/xi_n-a_nplus1\n all_as+=[a_nplus1]\n all_xis+=[xi_nplus1]\n xi_n=xi_nplus1\n # computing p and q\n n=len(all_as)-1\n if n==1:\n p_1=all_as[1]*all_as[0]+1\n q_1=all_as[1]\n all_ps+=[p_1]\n all_qs+=[q_1]\n else:\n p_n=all_as[n]*all_ps[n-1]+all_ps[n-2]\n q_n=all_as[n]*all_qs[n-1]+all_qs[n-2]\n all_ps+=[p_n]\n all_qs+=[q_n]\n # check the q to see if it is our answer (note with this we skip the first q, as a trivial case)\n if a_shor**all_qs[-1]%N_shor == 1 % N_shor:\n return all_qs[-1]\n\nperiod_from_quantum_measurement(13453,14,3,91) #should return, for example 6 per page 20 of https://arxiv.org/pdf/quant-ph/0010034.pdf", "_____no_output_____" ], [ "# Testing this:\nimport qiskit\nfrom qiskit import QuantumCircuit, ClassicalRegister, QuantumRegister\n\ndef binary_string_to_decimal(s):\n dec=0\n for i in s[::-1]:\n if int(i):\n dec+=2**int(i)\n return dec\n\ndef run_shors_subroutine_period2_mod15():\n qr = QuantumRegister(7)\n cr = ClassicalRegister(3)\n qc = QuantumCircuit(qr,cr)\n # initialize x to be a superposition of all possible r quibit values\n #for i in range(4):\n # qc.h(qr[i])\n # run circuit (which includes measurement steps)\n shors_subroutine_period_2mod15(qr,qc,cr)\n \n import time\n from qiskit.tools.visualization import plot_histogram\n backend=Aer.get_backend('qasm_simulator')\n job_exp = qiskit.execute(qc, backend=backend,shots=1)\n result = job_exp.result()\n final=result.get_counts(qc)\n # convert final result to decimal\n measurement=binary_string_to_decimal(list(final.keys())[0])\n period_r=period_from_quantum_measurement(measurement,3,2,15)\n return period_r\nprint(run_shors_subroutine_period2_mod15())\n\n", "4\n" ] ], [ [ "# The last thing to do will be to implement the full Shor's algorithm and check if the r is correct by plugging it in, getting factors and checking results. If not, rerun the algorithm. ", "_____no_output_____" ] ], [ [ "def period_finding_quantum(a,N):\n # for the sake of example we will not implement this algorithm in full generality\n # rather, we will create an example with one specific a and one specific N\n # extension work could be done to impl\n if a==2 and N==15:\n return run_shors_subroutine_period2_mod15()\n else:\n raise Exception(\"Not implemented for N=%d, a=%d\" % (N,a))\n \ndef shors_algorithm_quantum(N,fixed_a=None):\n assert(N>0)\n assert(int(N)==N)\n while True:\n if not fixed_a:\n a=random.randint(0,N-1) \n else:\n a=fixed_a\n g=math.gcd(a,N)\n if g!=1 or N==1:\n first_factor=g\n second_factor=int(N/g)\n return first_factor,second_factor\n else:\n r=period_finding_quantum(a,N) \n if not r:\n continue\n if r % 2 != 0:\n continue\n elif a**(int(r/2)) % N == -1 % N:\n continue\n else:\n first_factor=math.gcd(a**int(r/2)+1,N)\n second_factor=math.gcd(a**int(r/2)-1,N)\n if first_factor==N or second_factor==N:\n continue\n if first_factor*second_factor!=N:\n # checking our work\n continue\n return first_factor,second_factor\n\n", "_____no_output_____" ], [ "# Here's our final result\nshors_algorithm_quantum(15,fixed_a=2)\n", "_____no_output_____" ], [ "# Now trying it out to see how the algorithm would function if we let it choose a given random a:\nfor a in range(15):\n # Here's the result for a given a:\n try:\n print(\"randomly chosen a=%d would result in %s\"%(a,shors_algorithm_quantum(15,fixed_a=a)))\n except:\n print(\"FINISH IMPLEMENTING algorithm doesn't work with a randomly chosen a=%d at this stage\"%a)\n ", "randomly chosen a=0 would result in (5, 3)\nFINISH IMPLEMENTING algorithm doesn't work with a randomly chosen a=1 at this stage\nrandomly chosen a=2 would result in (5, 3)\nrandomly chosen a=3 would result in (3, 5)\nFINISH IMPLEMENTING algorithm doesn't work with a randomly chosen a=4 at this stage\nrandomly chosen a=5 would result in (5, 3)\nrandomly chosen a=6 would result in (3, 5)\nFINISH IMPLEMENTING algorithm doesn't work with a randomly chosen a=7 at this stage\nFINISH IMPLEMENTING algorithm doesn't work with a randomly chosen a=8 at this stage\nrandomly chosen a=9 would result in (3, 5)\nrandomly chosen a=10 would result in (5, 3)\nFINISH IMPLEMENTING algorithm doesn't work with a randomly chosen a=11 at this stage\nrandomly chosen a=12 would result in (3, 5)\nFINISH IMPLEMENTING algorithm doesn't work with a randomly chosen a=13 at this stage\nFINISH IMPLEMENTING algorithm doesn't work with a randomly chosen a=14 at this stage\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ] ]
cb3a44416cd6cf490bdfb4698e8a215e8df0a715
12,516
ipynb
Jupyter Notebook
docs/examples/Holzworth_HS900.ipynb
janekfleper/Qcodes_contrib_drivers
d8eab2c9c90da272d3f0cdcb3ed0bcc08052f342
[ "MIT" ]
null
null
null
docs/examples/Holzworth_HS900.ipynb
janekfleper/Qcodes_contrib_drivers
d8eab2c9c90da272d3f0cdcb3ed0bcc08052f342
[ "MIT" ]
null
null
null
docs/examples/Holzworth_HS900.ipynb
janekfleper/Qcodes_contrib_drivers
d8eab2c9c90da272d3f0cdcb3ed0bcc08052f342
[ "MIT" ]
null
null
null
21.653979
386
0.483861
[ [ [ "# Example notebook for the Holzworth HS900B RF synthesizer", "_____no_output_____" ], [ "First we need to import our driver and define our source", "_____no_output_____" ] ], [ [ "from qcodes_contrib_drivers.drivers.Holzworth.HS900 import HS900", "Logging hadn't been started.\nActivating auto-logging. Current session state plus future input saved.\nFilename : C:\\Users\\G-GRE-GRE058050\\.qcodes\\logs\\command_history.log\nMode : append\nOutput logging : True\nRaw input log : False\nTimestamping : True\nState : active\nQcodes Logfile : C:\\Users\\G-GRE-GRE058050\\.qcodes\\logs\\210923-47112-qcodes.log\n" ], [ "source = HS900(name=\"RF_source\", address=\"TCPIP0::192.168.0.2::9760::SOCKET\")", "Connected to: Holzworth Instrumentation HS9002B (serial:#020, firmware:Ver:2.22) in 0.28s\n" ] ], [ [ "Now we have access to our driver. Let us first see which channels are available", "_____no_output_____" ] ], [ [ "source.channel_names()", "_____no_output_____" ] ], [ [ "Now we can easily access any channel of our RF source by specifying the channel after source. Let's check the state of our two channels.", "_____no_output_____" ] ], [ [ "source.CH1.state()", "_____no_output_____" ], [ "source.CH2.state()", "_____no_output_____" ] ], [ [ "We can also change the state by giving 'ON' or 'OFF' as the argument", "_____no_output_____" ] ], [ [ "source.CH1.state('OFF')\nsource.CH1.state()", "_____no_output_____" ], [ "source.CH1.state('ON')\nsource.CH1.state()", "_____no_output_____" ] ], [ [ "Let's get an overview over the different parameters. We can see frequency, phase, power, temperature at the output and the state", "_____no_output_____" ] ], [ [ "source.print_readable_snapshot(update=True)", "RF_source:\n\tparameter value\n--------------------------------------------------------------------------------\nIDN :\t{'vendor': 'Holzworth Instrumentation', 'model': 'HS9002B', '...\nchannel_names :\t['CH1', 'CH2'] \nref :\tInternal 100MHz \nref_locked :\tFalse \ntimeout :\t5 (s)\nRF_source_CH1:\n\tparameter value\n--------------------------------------------------------------------------------\nfrequency :\t1.7e+09 (Hz)\nphase :\t12.5 (deg)\npower :\t7.5 (dBm)\nstate :\tTrue \ntemp :\t30 (C)\nRF_source_CH2:\n\tparameter value\n--------------------------------------------------------------------------------\nfrequency :\t6e+09 (Hz)\nphase :\t120 (deg)\npower :\t-25 (dBm)\nstate :\tTrue \ntemp :\t30 (C)\n" ] ], [ [ "Frequency is always indicated in Hertz, phase always in degree, power always in dBm and temperature always in C. Except of temperatures all quantities are settable, just by giving the value to be set as an argument.", "_____no_output_____" ] ], [ [ "source.CH1.frequency(1.7e9)\nsource.CH2.frequency(6e9)", "_____no_output_____" ] ], [ [ "As before we can read out the frequency by giving no argument.", "_____no_output_____" ] ], [ [ "source.CH1.frequency()", "_____no_output_____" ], [ "source.CH2.frequency()", "_____no_output_____" ] ], [ [ "Same applies to power and phase", "_____no_output_____" ] ], [ [ "source.CH1.power(7.5)\nsource.CH2.power(-25)", "_____no_output_____" ], [ "source.CH1.power()", "_____no_output_____" ], [ "source.CH2.power()", "_____no_output_____" ], [ "source.CH1.phase(12.5)\nsource.CH2.phase(120)", "_____no_output_____" ], [ "source.CH1.phase()", "_____no_output_____" ], [ "source.CH2.phase()", "_____no_output_____" ] ], [ [ "The temperature at the output can only be gotten and not be set.", "_____no_output_____" ] ], [ [ "source.CH1.temp()", "_____no_output_____" ], [ "source.CH2.temp()", "_____no_output_____" ] ], [ [ "Let's have a look at the summary of our set properties again.", "_____no_output_____" ] ], [ [ "source.print_readable_snapshot(update=True)", "RF_source:\n\tparameter value\n--------------------------------------------------------------------------------\nIDN :\t{'vendor': 'Holzworth Instrumentation', 'model': 'HS9002B', '...\nchannel_names :\t['CH1', 'CH2'] \nref :\tInternal 100MHz \nref_locked :\tFalse \ntimeout :\t5 (s)\nRF_source_CH1:\n\tparameter value\n--------------------------------------------------------------------------------\nfrequency :\t1.7e+09 (Hz)\nphase :\t12.5 (deg)\npower :\t7.5 (dBm)\nstate :\tTrue \ntemp :\t30 (C)\nRF_source_CH2:\n\tparameter value\n--------------------------------------------------------------------------------\nfrequency :\t6e+09 (Hz)\nphase :\t120 (deg)\npower :\t-25 (dBm)\nstate :\tTrue \ntemp :\t30 (C)\n" ] ], [ [ "This concludes the introduction to the HS9002B driver. If required more advanced functions of the RF source like frequencies sweeps and modulation may be added. For the time being this was not required. Theoretically it should be compatible with other RF synthesizers from the HS900 series from Holzworth, but no checks have been conducted regarding the compatibility in practice.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
cb3a497c8e188ff305b35bf90f91dae494c3ed57
465,305
ipynb
Jupyter Notebook
Work_2/work_2-1.ipynb
samyon7/Statistics-Python
2130a85d7e8b791b4131c2ee6df8c8d8a237c3c0
[ "Apache-2.0" ]
null
null
null
Work_2/work_2-1.ipynb
samyon7/Statistics-Python
2130a85d7e8b791b4131c2ee6df8c8d8a237c3c0
[ "Apache-2.0" ]
null
null
null
Work_2/work_2-1.ipynb
samyon7/Statistics-Python
2130a85d7e8b791b4131c2ee6df8c8d8a237c3c0
[ "Apache-2.0" ]
null
null
null
91.794239
105,710
0.76097
[ [ [ "16", "_____no_output_____" ], [ "import random\nimport math\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nfrom timeit import Timer", "_____no_output_____" ], [ "my_list = list(range(10**6))\nmy_array = np.array(my_list)", "_____no_output_____" ], [ "def for_add():\n return [item + 1 for item in my_list]\n\ndef vec_add():\n return my_array + 1", "_____no_output_____" ], [ "print(\"For loop addition :\")\nprint(min(Timer(for_add).repeat(10, 10)))\nprint(\"Vectorized addition :\")\nprint(min(Timer(vec_add).repeat(10,10)))", "For loop addition :\n0.8853031000000016\nVectorized addition :\n0.020530499999999563\n" ], [ "def for_mul():\n return [item * 2 for item in my_list]\n\ndef vec_mul():\n return my_array * 2\n\nprint('For-loop multiplication:')\nprint(min(Timer(for_mul).repeat(10, 10)))\nprint('Vectorized multiplication:')\nprint(min(Timer(vec_mul).repeat(10, 10)))", "For-loop multiplication:\n0.9040932999999995\nVectorized multiplication:\n0.022061900000004186\n" ], [ "import math\n\ndef for_sqrt():\n return [math.sqrt(item) for item in my_list]\n\ndef vec_sqrt():\n return np.sqrt(my_array)\n\nprint('For-loop square root:')\nprint(min(Timer(for_sqrt).repeat(10, 10)))\nprint('Vectorized square root:')\nprint(min(Timer(vec_sqrt).repeat(10, 10)))", "For-loop square root:\n1.9627419000000046\nVectorized square root:\n0.09173069999999939\n" ], [ "sample = np.random.normal()\nsample", "_____no_output_____" ], [ "sample = np.random.normal(loc=100, scale=10)\nsample", "_____no_output_____" ], [ "sample = np.random.normal(loc=100, scale=10,size=(2,3))\nsample", "_____no_output_____" ], [ "samples = np.random.poisson(lam=10, size=(2,2))\nsamples", "_____no_output_____" ], [ "np.random.randint(low=0, high=5, size=(2,5))", "_____no_output_____" ], [ "np.random.choice([1,3,4,-6], size=(2, 2))", "_____no_output_____" ], [ "a = [1,2,3,4]\nfor _ in range(3):\n np.random.shuffle(a)\n print(a)", "[3, 2, 1, 4]\n[4, 2, 1, 3]\n[2, 3, 4, 1]\n" ], [ "import random\n\nrandom.seed(0)", "_____no_output_____" ], [ "import pandas as pd\n\nmy_dict = {'col1': [1, 2], 'col2': np.array([3, 4]),'col3': [5, 6]}\ndf = pd.DataFrame(my_dict)\ndf", "_____no_output_____" ], [ "my_array = np.array([[1, 3, 5], [2, 4, 6]])\nalt_df = pd.DataFrame(my_array, columns=['col1', 'col2', 'col3'])\nalt_df", "_____no_output_____" ], [ "df.loc[1]", "_____no_output_____" ], [ "df.loc[0]", "_____no_output_____" ], [ "df.loc[[1,0]]", "_____no_output_____" ], [ "df.loc[[0,1]]", "_____no_output_____" ], [ "df.loc[0, ['col2', 'col3']]", "_____no_output_____" ], [ "for item in df.loc[:,'col3']:\n print(item)", "5\n6\n" ], [ "df.loc[0] = [3,6,9]\ndf", "_____no_output_____" ], [ "df['col2'] = [0,0]\ndf", "_____no_output_____" ], [ "df['col4'] = [10,10]\ndf", "_____no_output_____" ], [ "df.loc[3] = [1,2,3,4]", "_____no_output_____" ], [ "df", "_____no_output_____" ], [ "df = pd.read_csv('D:\\RESEARCH\\Dasarnya\\MATH\\Step_4_Statistics and Calculus Python\\Source Code\\The-Statistics-and-Calculus-with-Python-Workshop\\Chapter02\\Exercise2.02\\dataset.csv',index_col='id')", "_____no_output_____" ], [ "df", "_____no_output_____" ], [ "df = df.rename(columns = {'x':'col_x','y':'col_y','z':'col_z'})", "_____no_output_____" ], [ "df", "_____no_output_____" ], [ "df = df.fillna(0)\ndf", "_____no_output_____" ], [ "df = df.astype(int)\ndf", "_____no_output_____" ], [ "df = df.drop([1,3,4], axis=0)\ndf", "_____no_output_____" ], [ "zero_df = pd.DataFrame(np.zeros((2,3)),columns=['col_x','col_y','col_z'])\nzero_df", "_____no_output_____" ], [ "df = pd.concat([df, zero_df], axis=0)\ndf", "_____no_output_____" ], [ "df = df.sort_values('col_x', axis=0)\ndf", "_____no_output_____" ], [ "df = df.astype(int)\ndf.to_csv('D:\\RESEARCH\\Dasarnya\\MATH\\Step_4_Statistics and Calculus Python\\My Source Code\\Work_2\\output.csv', index=False)", "_____no_output_____" ], [ "df = pd.DataFrame({'x':[1,2,-1],'y':[-3,6,5],'z':[1,3,2]})\ndf", "_____no_output_____" ], [ "df['x_squared'] = df['x'].apply(lambda x: x**2)\ndf", "_____no_output_____" ], [ "def parity_str(x):\n if x%2 == 0:\n return 'even'\n return 'odd'\n\ndf['x_parity'] = df['x'].apply(parity_str)\ndf", "_____no_output_____" ], [ "df['x_parity']", "_____no_output_____" ], [ "pd.get_dummies(df['x_parity'])", "_____no_output_____" ], [ "print(df['x_parity'].value_counts())", "odd 2\neven 1\nName: x_parity, dtype: int64\n" ], [ "import pandas as pd\nstudent_df = pd.DataFrame({'name':['Alice','Bob','Carol','Dan','Eli','Fran'],'gender':['female','male','female','male','male','female'],'class':['FY','SO','SR','SO','JR','SR'],'gpa':[90,93,97,89,95,92],'num_classes':[4,3,4,4,3,2]})\nstudent_df", "_____no_output_____" ], [ "student_df['female_flag'] = student_df['gender'].apply(lambda x:x == 'female')\nstudent_df", "_____no_output_____" ], [ "student_df['female_flag'] = student_df['gender'] == 'female'\nstudent_df", "_____no_output_____" ], [ "student_df = student_df.drop('gender',axis=1)\nstudent_df", "_____no_output_____" ], [ "pd.get_dummies(student_df['class'])", "_____no_output_____" ], [ "student_df = pd.concat([student_df.drop('class',axis=1),pd.get_dummies(student_df['class'])],axis=1)", "_____no_output_____" ], [ "student_df", "_____no_output_____" ], [ "gender_group = student_df.groupby('female_flag')\ngender_group", "_____no_output_____" ], [ "gender_group['gpa'].mean()", "_____no_output_____" ], [ "gender_group['num_classes'].sum()", "_____no_output_____" ], [ "x = [1,2,3,1.5,2]\ny = [-1,5,2,3,0]", "_____no_output_____" ], [ "import matplotlib.pyplot as plt\n\nplt.scatter(x,y)\nplt.show()", "_____no_output_____" ], [ "sizes = [10, 40, 60, 80, 100]\ncolors = ['r', 'b', 'y', 'g', 'k']\n\nplt.scatter(x, y, s=sizes, c=colors)\nplt.show()", "_____no_output_____" ], [ "import numpy as np\nx = np.linspace(0,10,1000)\ny = np.sin(x)\n\nplt.plot(x,y)\nplt.show()", "_____no_output_____" ], [ "x", "_____no_output_____" ], [ "y", "_____no_output_____" ], [ "x = np.linspace(1,10,1000)\nlinear_line = x\nsin_wave = np.sin(x)\nlog_curve = np.log(x)\n\ncurves = [linear_line, log_curve, sin_wave]\ncolors = ['k','r','b']\nstyles = ['-','--',':']\n\nfor curve,color,style in zip(curves, colors, styles):\n plt.plot(x, curve, c=color, linestyle=style)\n\nplt.show()", "_____no_output_____" ], [ "labels = ['Type 1', 'Type 2', 'Type 3']\ncounts = [2,3,5]\n\nplt.bar(labels,counts)\nplt.show()", "_____no_output_____" ], [ "import numpy as np\nimport scipy.stats as stats\nimport matplotlib.pyplot as plt", "_____no_output_____" ], [ "samples = np.random.normal(0,1,size=1000)", "_____no_output_____" ], [ "x = np.linspace(samples.min(),samples.max(),1000)\ny = stats.norm.pdf(x)", "_____no_output_____" ], [ "x", "_____no_output_____" ], [ "y", "_____no_output_____" ], [ "plt.hist(samples,alpha=0.2, bins=30, density=True)\nplt.plot(x,y)\nplt.show()", "_____no_output_____" ], [ "samples = np.random.beta(2,5,size=1000)\nx = np.linspace(samples.min(),samples.max(),1000)\ny = stats.beta.pdf(x,2,5)\n\nplt.hist(samples, bins=20, alpha=0.2, density=True)\nplt.plot(x,y)\nplt.show()", "_____no_output_____" ], [ "samples = np.random.gamma(1, size=1000)\nx = np.linspace(samples.min(), samples.max(), 1000)\ny = stats.gamma.pdf(x, 1)\nplt.hist(samples, alpha=0.2, bins=20, density=True)\nplt.plot(x, y)\nplt.show()", "_____no_output_____" ], [ "x = np.random.normal(0,1,1000)\ny = np.random.normal(5,2,1000)\ndf = pd.DataFrame({'Column 1':x,'Column 2':y})\ndf", "_____no_output_____" ], [ "import seaborn as sns\n\nsns.jointplot(x='Column 1', y='Column 2', data=df)\nplt.show()", "_____no_output_____" ], [ "student_df = pd.DataFrame({\n 'name': ['Alice', 'Bob', 'Carol', 'Dan', 'Eli', 'Fran', \\\n 'George', 'Howl', 'Ivan', 'Jack', 'Kate'],\\\n 'gender': ['female', 'male', 'female', 'male', \\\n 'male', 'female', 'male', 'male', \\\n 'male', 'male', 'female'],\\\n 'class': ['JR', 'SO', 'SO', 'SO', 'JR', 'SR', \\\n 'FY', 'SO', 'SR', 'JR', 'FY'],\\\n 'gpa': [90, 93, 97, 89, 95, 92, 90, 87, 95, 100, 95],\\\n 'num_classes': [4, 3, 4, 4, 3, 2, 2, 3, 3, 4, 2]})\n", "_____no_output_____" ], [ "student_df", "_____no_output_____" ], [ "sns.catplot(x='class',y='gpa',hue='gender',kind='bar',data=student_df)\nplt.show()", "_____no_output_____" ], [ "student_df['gpa'].plot.hist()\nplt.show()", "_____no_output_____" ], [ "student_df['class'].value_counts().plot.pie()\nplt.show()", "_____no_output_____" ], [ "df = pd.read_csv('D:\\RESEARCH\\Dasarnya\\MATH\\Step_4_Statistics and Calculus Python\\My Source Code\\Work_2\\CommViolPredUnnormalizedData.txt')\ndf", "_____no_output_____" ], [ "for column in df.columns:\n print(column)", "communityname\nstate\ncountyCode\ncommunityCode\nfold\npopulation\nhouseholdsize\nracepctblack\nracePctWhite\nracePctAsian\nracePctHisp\nagePct12t21\nagePct12t29\nagePct16t24\nagePct65up\nnumbUrban\npctUrban\nmedIncome\npctWWage\npctWFarmSelf\npctWInvInc\npctWSocSec\npctWPubAsst\npctWRetire\nmedFamInc\nperCapInc\nwhitePerCap\nblackPerCap\nindianPerCap\nAsianPerCap\nOtherPerCap\nHispPerCap\nNumUnderPov\nPctPopUnderPov\nPctLess9thGrade\nPctNotHSGrad\nPctBSorMore\nPctUnemployed\nPctEmploy\nPctEmplManu\nPctEmplProfServ\nPctOccupManu\nPctOccupMgmtProf\nMalePctDivorce\nMalePctNevMarr\nFemalePctDiv\nTotalPctDiv\nPersPerFam\nPctFam2Par\nPctKids2Par\nPctYoungKids2Par\nPctTeen2Par\nPctWorkMomYoungKids\nPctWorkMom\nNumKidsBornNeverMar\nPctKidsBornNeverMar\nNumImmig\nPctImmigRecent\nPctImmigRec5\nPctImmigRec8\nPctImmigRec10\nPctRecentImmig\nPctRecImmig5\nPctRecImmig8\nPctRecImmig10\nPctSpeakEnglOnly\nPctNotSpeakEnglWell\nPctLargHouseFam\nPctLargHouseOccup\nPersPerOccupHous\nPersPerOwnOccHous\nPersPerRentOccHous\nPctPersOwnOccup\nPctPersDenseHous\nPctHousLess3BR\nMedNumBR\nHousVacant\nPctHousOccup\nPctHousOwnOcc\nPctVacantBoarded\nPctVacMore6Mos\nMedYrHousBuilt\nPctHousNoPhone\nPctWOFullPlumb\nOwnOccLowQuart\nOwnOccMedVal\nOwnOccHiQuart\nOwnOccQrange\nRentLowQ\nRentMedian\nRentHighQ\nRentQrange\nMedRent\nMedRentPctHousInc\nMedOwnCostPctInc\nMedOwnCostPctIncNoMtg\nNumInShelters\nNumStreet\nPctForeignBorn\nPctBornSameState\nPctSameHouse85\nPctSameCity85\nPctSameState85\nLemasSwornFT\nLemasSwFTPerPop\nLemasSwFTFieldOps\nLemasSwFTFieldPerPop\nLemasTotalReq\nLemasTotReqPerPop\nPolicReqPerOffic\nPolicPerPop\nRacialMatchCommPol\nPctPolicWhite\nPctPolicBlack\nPctPolicHisp\nPctPolicAsian\nPctPolicMinor\nOfficAssgnDrugUnits\nNumKindsDrugsSeiz\nPolicAveOTWorked\nLandArea\nPopDens\nPctUsePubTrans\nPolicCars\nPolicOperBudg\nLemasPctPolicOnPatr\nLemasGangUnitDeploy\nLemasPctOfficDrugUn\nPolicBudgPerPop\nmurders\nmurdPerPop\nrapes\nrapesPerPop\nrobberies\nrobbbPerPop\nassaults\nassaultPerPop\nburglaries\nburglPerPop\nlarcenies\nlarcPerPop\nautoTheft\nautoTheftPerPop\narsons\narsonsPerPop\nViolentCrimesPerPop\nnonViolPerPop\n" ], [ "print(len(df.columns))", "147\n" ], [ "df = df.replace('?',np.nan)", "_____no_output_____" ], [ "df.isnull().sum()", "_____no_output_____" ], [ "print(df.isnull().sum()['NumStreet'])\nprint(df.isnull().sum()['PolicPerPop'])", "0\n1872\n" ], [ "state_count = df['state'].value_counts()\nf, ax = plt.subplots(figsize=(15,10))\nstate_count.plot.bar()\nplt.show()", "_____no_output_____" ], [ "f, ax = plt.subplots(figsize=(15,10))\nstate_count.plot.pie()\nplt.show()", "_____no_output_____" ], [ "f, ax = plt.subplots(figsize=(15,10))\ndf['population'].hist(bins=200)\nplt.show()", "_____no_output_____" ], [ "f, ax = plt.subplots(figsize=(15, 10))\ndf['householdsize'].hist(bins=200)\nplt.show()", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb3a4d3b3ab0b12383a3584258e9465122adac08
15,291
ipynb
Jupyter Notebook
eliza.ipynb
LanguegeEngineering/Eliza
412a8f390513a5107483647d710b9297230fc202
[ "MIT" ]
null
null
null
eliza.ipynb
LanguegeEngineering/Eliza
412a8f390513a5107483647d710b9297230fc202
[ "MIT" ]
null
null
null
eliza.ipynb
LanguegeEngineering/Eliza
412a8f390513a5107483647d710b9297230fc202
[ "MIT" ]
null
null
null
27.452424
183
0.45118
[ [ [ "## ELIZA\n\nCopyright (C) 2019 Szymon Jessa", "_____no_output_____" ], [ "### Kod Elizy", "_____no_output_____" ], [ "Importujemy biblioteki:", "_____no_output_____" ] ], [ [ "import doctest\nimport re", "_____no_output_____" ] ], [ [ "Tworzymy zmienną globalną, która będzie zapisywała wypowiedzi podczas konwersacji. ", "_____no_output_____" ] ], [ [ "memstack = []", "_____no_output_____" ] ], [ [ "Funkcja odpowiadająca za przetworzenie wypowiedzi użytkownika i zaproponowanie odpowiedzi.", "_____no_output_____" ] ], [ [ "def get_keystack(user_input):\n # scan all words in the utterance for keywords one-by-one\n # and put tuples (keyword, rank) in the list\n # ***CHALLENGE*** modify the code to handle UPPERCASE LETTERS and punctuations\n keystack = []\n for w in user_input.split():\n if w in script:\n keystack.append((w, script[w].get(\"rank\", 0)))\n\n # sort keywords by rank, descending\n keystack = sorted(keystack, key=lambda i: i[1])\n\n # extract keywords from tuples\n keystack = [w for w, r in keystack]\n\n return keystack", "_____no_output_____" ] ], [ [ "Funkcja \"zapamiętująca\" słowa kluczowe pojawiające się we wcześniejszej rozmowie:", "_____no_output_____" ] ], [ [ "def memorize_user_input(user_input, user_input_trans):\n # scan all words for memory keywords\n memory_keywords = []\n for w in user_input.split():\n if w in script_memory:\n memory_keywords.append(w)\n memory_keywords = list(set(memory_keywords))\n\n for k in memory_keywords:\n # use first reassembly rule\n # ***CHALLENGE*** modify the code to use more than one reassembly rule!\n memresp = re.sub(script_memory[k][\"decomposition\"], script_memory[k][\"reassembly\"][0], user_input_trans)\n memstack.append(memresp)\n\n", "_____no_output_____" ] ], [ [ "Funkcja wybierająca odpowiedź Elizy:", "_____no_output_____" ] ], [ [ "def process(user_input):\n \"\"\" (str) -> str\n Return Eliza's answer for given input\n Return responses associated with the matched keyword\n Return responses for \"none\" keyword or use memory if other keywords not found\n \"\"\"\n\n # extract keywords\n keystack = get_keystack(user_input)\n\n # transform user input using substitutions\n user_input_trans = \" \".join(map(lambda w: substitutions.get(w, w), user_input.split()))\n\n # find a response based on the keywords\n resp = \"\"\n if keystack:\n # get response associated with the highest ranked keyword\n kw = keystack[-1] # get top ranked keyword\n rule = script[kw] # get script rule (decomposition & reassembly) for this keyword\n # check if the decomposition rule cane be applied for the utterance \n if re.search(rule[\"decomposition\"], user_input_trans):\n # if yes, generate response using the first reassembly rule\n trans = rule[\"reassembly\"].pop(0) # pop first reassembly from list\n rule[\"reassembly\"].append(trans) # append the reassembly rule to the end of the list\n resp = re.sub(rule[\"decomposition\"], trans, user_input_trans) # generate response\n\n # if none keywords or rules were found, try to use memory\n if resp == \"\":\n if memstack:\n resp = memstack.pop(0)\n # if there are no responses in memory,\n # use default responses associated with special keyword \"none\"\n else:\n # ***CHALLENGE*** modify the code to support more than one default answer (reassembly rule)\n # ***CHALLENGE*** ensure the code won't break if \"none\" key doesn't exist\n resp = script[\"none\"][\"reassembly\"][0]\n\n # if possible, generate new responses to be stored in memory for later\n memorize_user_input(user_input, user_input_trans)\n\n return str(resp)", "_____no_output_____" ] ], [ [ "Funkcja do uruchomiania czatu:", "_____no_output_____" ] ], [ [ "def chat():\n \"\"\" () -> None\n Allows interaction with Eliza in a loop:\n - read input from console\n - get Eliza's response using process function\n - write Eliza's response to console\n - exit if input string length is 0\n \"\"\"\n \n print(\"<hit enter with no input to exit>\")\n print(\"Eliza: How do you do. Please tell me your problem\")\n while True:\n utt = input(\"Student: \")\n if not utt:\n break\n\n resp = process(utt)\n print(\"Eliza: %s\" % resp)", "_____no_output_____" ] ], [ [ "### Testy\n\nTutaj puszczamy testy dla konkretnych zapytań dla Elizy. Doc testy puszczają dany kod oznaczony ```>>>``` i oczekują odpowiedzi zgodnej z tym, co jest poniżej. Oto przykład:", "_____no_output_____" ] ], [ [ "def task0_true():\n \"\"\"\n >>> 2+2\n 4\n \"\"\"", "_____no_output_____" ], [ "def task0_false():\n \"\"\"\n >>> 2+2\n 5\n \"\"\"", "_____no_output_____" ], [ "doctest.run_docstring_examples(task0_true, globals(), name=\"task0\", verbose=True)", "_____no_output_____" ], [ "doctest.run_docstring_examples(task0_false, globals(), name=\"task0\", verbose=True)", "_____no_output_____" ], [ "def task1():\n \"\"\"\n >>> process(\"I have no problems\")\n \"Are you saying 'no' just to be negative?\"\n >>> process(\"no\")\n 'You are being a bit negative'\n >>> process(\"no\")\n 'Why not'\n \"\"\"\n pass", "_____no_output_____" ], [ "def task2():\n \"\"\"\n >>> process(\"hmm\")\n 'I am not sure I understand you fully'\n \"\"\"\n pass", "_____no_output_____" ], [ "def task3():\n \"\"\"\n >>> process(\"no, I am not a negative person\")\n 'Is it because you are not a negative person that you came to me?'\n >>> process(\"no\")\n 'You are being a bit negative'\n \"\"\"\n pass", "_____no_output_____" ], [ "def task4():\n \"\"\"\n >>> process(\"you are repeating yourself\")\n 'What makes you think I am repeating myself?'\n >>> process(\"you are kidding me\")\n 'Does it please you to believe I am kidding you?'\n \"\"\"\n pass", "_____no_output_____" ], [ "def task5():\n \"\"\"\n >>> process(\"my wife said I am optimistic\")\n 'Tell me more about your family'\n >>> process(\"now I am sad\")\n 'Is it because you are sad that you came to me?'\n >>> process(\"maybe\")\n 'But your wife said you are optimistic?'\n \"\"\"\n pass", "_____no_output_____" ] ], [ [ "### Skrypt rozmowy\n\nTutaj znajduje się skrypt rozmowy. W słowniku 'decomposition' to szukane wyrażenia regularne, natomiast 'reassembly' to odpowiedź Elizy. ", "_____no_output_____" ] ], [ [ "script = {\n \"no\": {\"decomposition\": r\"^.*$\",\n \"reassembly\": [\n \"Are you saying 'no' just to be negative?\"]}}", "_____no_output_____" ] ], [ [ "Zastępstwa - tutaj możemy zmieniać pewne wyrażenia, żeby płynniej prowadzić rozmowę.", "_____no_output_____" ] ], [ [ "substitutions = {}", "_____no_output_____" ] ], [ [ "Script memory - tutaj tworzymy skrypt rozmowy (j.w.) ale wykorzystując zapamiętane wcześniej słowa.", "_____no_output_____" ] ], [ [ "script_memory = {}", "_____no_output_____" ] ], [ [ "W przypadku niektórych testów poniżej mamy do czynienia z losowością, więc czasem trzeba powtórzyć wykonanie go, aby wynik był właściwy.", "_____no_output_____" ] ], [ [ "#doctest.run_docstring_examples(task1, globals(), name=\"task1\", verbose=True)", "_____no_output_____" ], [ "#doctest.run_docstring_examples(task2, globals(), name=\"task2\", verbose=True)\n", "_____no_output_____" ], [ "#doctest.run_docstring_examples(task3, globals(), name=\"task3\", verbose=True)\n", "_____no_output_____" ], [ "#doctest.run_docstring_examples(task4, globals(), name=\"task4\", verbose=True)\n", "_____no_output_____" ], [ "#doctest.run_docstring_examples(task4, globals(), name=\"task5\", verbose=True)", "_____no_output_____" ] ], [ [ "Tutaj można odpalić rozmowę z Elizą.", "_____no_output_____" ] ], [ [ "chat()", "_____no_output_____" ], [ "", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ] ]
cb3a5e727e2f6412654d619c82dd39d9dccf374f
51,510
ipynb
Jupyter Notebook
ML0101EN-w3.4-Clas-SVM-cancer-py-v1.ipynb
cibergus/ML-with-Python
9da70401761881cca388c7e749aad3a9d534d6b3
[ "BSD-4-Clause-UC" ]
null
null
null
ML0101EN-w3.4-Clas-SVM-cancer-py-v1.ipynb
cibergus/ML-with-Python
9da70401761881cca388c7e749aad3a9d534d6b3
[ "BSD-4-Clause-UC" ]
null
null
null
ML0101EN-w3.4-Clas-SVM-cancer-py-v1.ipynb
cibergus/ML-with-Python
9da70401761881cca388c7e749aad3a9d534d6b3
[ "BSD-4-Clause-UC" ]
null
null
null
63.047736
15,220
0.743603
[ [ [ "<a href=\"https://www.bigdatauniversity.com\"><img src=\"https://ibm.box.com/shared/static/cw2c7r3o20w9zn8gkecaeyjhgw3xdgbj.png\" width=\"400\" align=\"center\"></a>\n\n<h1 align=center><font size=\"5\"> SVM (Support Vector Machines)</font></h1>", "_____no_output_____" ], [ "In this notebook, you will use SVM (Support Vector Machines) to build and train a model using human cell records, and classify cells to whether the samples are benign or malignant.\n\nSVM works by mapping data to a high-dimensional feature space so that data points can be categorized, even when the data are not otherwise linearly separable. A separator between the categories is found, then the data is transformed in such a way that the separator could be drawn as a hyperplane. Following this, characteristics of new data can be used to predict the group to which a new record should belong.", "_____no_output_____" ], [ "<h1>Table of contents</h1>\n\n<div class=\"alert alert-block alert-info\" style=\"margin-top: 20px\">\n <ol>\n <li><a href=\"#load_dataset\">Load the Cancer data</a></li>\n <li><a href=\"#modeling\">Modeling</a></li>\n <li><a href=\"#evaluation\">Evaluation</a></li>\n <li><a href=\"#practice\">Practice</a></li>\n </ol>\n</div>\n<br>\n<hr>", "_____no_output_____" ] ], [ [ "import pandas as pd\nimport pylab as pl\nimport numpy as np\nimport scipy.optimize as opt\nfrom sklearn import preprocessing\nfrom sklearn.model_selection import train_test_split\n%matplotlib inline \nimport matplotlib.pyplot as plt", "_____no_output_____" ] ], [ [ "<h2 id=\"load_dataset\">Load the Cancer data</h2>\nThe example is based on a dataset that is publicly available from the UCI Machine Learning Repository (Asuncion and Newman, 2007)[http://mlearn.ics.uci.edu/MLRepository.html]. The dataset consists of several hundred human cell sample records, each of which contains the values of a set of cell characteristics. The fields in each record are:\n\n|Field name|Description|\n|--- |--- |\n|ID|Clump thickness|\n|Clump|Clump thickness|\n|UnifSize|Uniformity of cell size|\n|UnifShape|Uniformity of cell shape|\n|MargAdh|Marginal adhesion|\n|SingEpiSize|Single epithelial cell size|\n|BareNuc|Bare nuclei|\n|BlandChrom|Bland chromatin|\n|NormNucl|Normal nucleoli|\n|Mit|Mitoses|\n|Class|Benign or malignant|\n\n<br>\n<br>\n\nFor the purposes of this example, we're using a dataset that has a relatively small number of predictors in each record. To download the data, we will use `!wget` to download it from IBM Object Storage. \n__Did you know?__ When it comes to Machine Learning, you will likely be working with large datasets. As a business, where can you host your data? IBM is offering a unique opportunity for businesses, with 10 Tb of IBM Cloud Object Storage: [Sign up now for free](http://cocl.us/ML0101EN-IBM-Offer-CC)", "_____no_output_____" ] ], [ [ "#Click here and press Shift+Enter\n!wget -O cell_samples.csv https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/ML0101ENv3/labs/cell_samples.csv", "--2020-02-23 16:21:06-- https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/ML0101ENv3/labs/cell_samples.csv\nResolving s3-api.us-geo.objectstorage.softlayer.net (s3-api.us-geo.objectstorage.softlayer.net)... 67.228.254.196\nConnecting to s3-api.us-geo.objectstorage.softlayer.net (s3-api.us-geo.objectstorage.softlayer.net)|67.228.254.196|:443... connected.\nHTTP request sent, awaiting response... 200 OK\nLength: 20675 (20K) [text/csv]\nSaving to: ‘cell_samples.csv’\n\ncell_samples.csv 100%[===================>] 20.19K --.-KB/s in 0.02s \n\n2020-02-23 16:21:06 (973 KB/s) - ‘cell_samples.csv’ saved [20675/20675]\n\n" ] ], [ [ "### Load Data From CSV File ", "_____no_output_____" ] ], [ [ "cell_df = pd.read_csv(\"cell_samples.csv\")\ncell_df.head()", "_____no_output_____" ] ], [ [ "The ID field contains the patient identifiers. The characteristics of the cell samples from each patient are contained in fields Clump to Mit. The values are graded from 1 to 10, with 1 being the closest to benign.\n\nThe Class field contains the diagnosis, as confirmed by separate medical procedures, as to whether the samples are benign (value = 2) or malignant (value = 4).\n\nLets look at the distribution of the classes based on Clump thickness and Uniformity of cell size:", "_____no_output_____" ] ], [ [ "ax = cell_df[cell_df['Class'] == 4][0:50].plot(kind='scatter', x='Clump', y='UnifSize', color='DarkBlue', label='malignant');\ncell_df[cell_df['Class'] == 2][0:50].plot(kind='scatter', x='Clump', y='UnifSize', color='Yellow', label='benign', ax=ax);\nplt.show()", "_____no_output_____" ] ], [ [ "## Data pre-processing and selection", "_____no_output_____" ], [ "Lets first look at columns data types:", "_____no_output_____" ] ], [ [ "cell_df.dtypes", "_____no_output_____" ] ], [ [ "It looks like the __BareNuc__ column includes some values that are not numerical. We can drop those rows:", "_____no_output_____" ] ], [ [ "cell_df = cell_df[pd.to_numeric(cell_df['BareNuc'], errors='coerce').notnull()]\ncell_df['BareNuc'] = cell_df['BareNuc'].astype('int')\ncell_df.dtypes", "_____no_output_____" ], [ "feature_df = cell_df[['Clump', 'UnifSize', 'UnifShape', 'MargAdh', 'SingEpiSize', 'BareNuc', 'BlandChrom', 'NormNucl', 'Mit']]\nX = np.asarray(feature_df)\nX[0:5]", "_____no_output_____" ] ], [ [ "We want the model to predict the value of Class (that is, benign (=2) or malignant (=4)). As this field can have one of only two possible values, we need to change its measurement level to reflect this.", "_____no_output_____" ] ], [ [ "cell_df['Class'] = cell_df['Class'].astype('int')\ny = np.asarray(cell_df['Class'])\ny [0:5]", "_____no_output_____" ] ], [ [ "## Train/Test dataset", "_____no_output_____" ], [ "Okay, we split our dataset into train and test set:", "_____no_output_____" ] ], [ [ "X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.2, random_state=4)\nprint ('Train set:', X_train.shape, y_train.shape)\nprint ('Test set:', X_test.shape, y_test.shape)", "Train set: (546, 9) (546,)\nTest set: (137, 9) (137,)\n" ] ], [ [ "<h2 id=\"modeling\">Modeling (SVM with Scikit-learn)</h2>", "_____no_output_____" ], [ "The SVM algorithm offers a choice of kernel functions for performing its processing. Basically, mapping data into a higher dimensional space is called kernelling. The mathematical function used for the transformation is known as the kernel function, and can be of different types, such as:\n\n 1.Linear\n 2.Polynomial\n 3.Radial basis function (RBF)\n 4.Sigmoid\nEach of these functions has its characteristics, its pros and cons, and its equation, but as there's no easy way of knowing which function performs best with any given dataset, we usually choose different functions in turn and compare the results. Let's just use the default, RBF (Radial Basis Function) for this lab.", "_____no_output_____" ] ], [ [ "from sklearn import svm\nclf = svm.SVC(kernel='rbf')\nclf.fit(X_train, y_train) ", "/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/svm/base.py:196: FutureWarning: The default value of gamma will change from 'auto' to 'scale' in version 0.22 to account better for unscaled features. Set gamma explicitly to 'auto' or 'scale' to avoid this warning.\n \"avoid this warning.\", FutureWarning)\n" ] ], [ [ "After being fitted, the model can then be used to predict new values:", "_____no_output_____" ] ], [ [ "yhat = clf.predict(X_test)\nyhat [0:5]", "_____no_output_____" ] ], [ [ "<h2 id=\"evaluation\">Evaluation</h2>", "_____no_output_____" ] ], [ [ "from sklearn.metrics import classification_report, confusion_matrix\nimport itertools", "_____no_output_____" ], [ "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n \"\"\"\n This function prints and plots the confusion matrix.\n Normalization can be applied by setting `normalize=True`.\n \"\"\"\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "_____no_output_____" ], [ "# Compute confusion matrix\ncnf_matrix = confusion_matrix(y_test, yhat, labels=[2,4])\nnp.set_printoptions(precision=2)\n\nprint (classification_report(y_test, yhat))\n\n# Plot non-normalized confusion matrix\nplt.figure()\nplot_confusion_matrix(cnf_matrix, classes=['Benign(2)','Malignant(4)'],normalize= False, title='Confusion matrix')", " precision recall f1-score support\n\n 2 1.00 0.94 0.97 90\n 4 0.90 1.00 0.95 47\n\n micro avg 0.96 0.96 0.96 137\n macro avg 0.95 0.97 0.96 137\nweighted avg 0.97 0.96 0.96 137\n\nConfusion matrix, without normalization\n[[85 5]\n [ 0 47]]\n" ] ], [ [ "You can also easily use the __f1_score__ from sklearn library:", "_____no_output_____" ] ], [ [ "from sklearn.metrics import f1_score\nf1_score(y_test, yhat, average='weighted') ", "_____no_output_____" ] ], [ [ "Lets try jaccard index for accuracy:", "_____no_output_____" ] ], [ [ "from sklearn.metrics import jaccard_similarity_score\njaccard_similarity_score(y_test, yhat)", "_____no_output_____" ] ], [ [ "<h2 id=\"practice\">Practice</h2>\nCan you rebuild the model, but this time with a __linear__ kernel? You can use __kernel='linear'__ option, when you define the svm. How the accuracy changes with the new kernel function?", "_____no_output_____" ] ], [ [ "# write your code here\n", "_____no_output_____" ] ], [ [ "Double-click __here__ for the solution.\n\n<!-- Your answer is below:\n \nclf2 = svm.SVC(kernel='linear')\nclf2.fit(X_train, y_train) \nyhat2 = clf2.predict(X_test)\nprint(\"Avg F1-score: %.4f\" % f1_score(y_test, yhat2, average='weighted'))\nprint(\"Jaccard score: %.4f\" % jaccard_similarity_score(y_test, yhat2))\n\n-->", "_____no_output_____" ], [ "<h2>Want to learn more?</h2>\n\nIBM SPSS Modeler is a comprehensive analytics platform that has many machine learning algorithms. It has been designed to bring predictive intelligence to decisions made by individuals, by groups, by systems – by your enterprise as a whole. A free trial is available through this course, available here: <a href=\"http://cocl.us/ML0101EN-SPSSModeler\">SPSS Modeler</a>\n\nAlso, you can use Watson Studio to run these notebooks faster with bigger datasets. Watson Studio is IBM's leading cloud solution for data scientists, built by data scientists. With Jupyter notebooks, RStudio, Apache Spark and popular libraries pre-packaged in the cloud, Watson Studio enables data scientists to collaborate on their projects without having to install anything. Join the fast-growing community of Watson Studio users today with a free account at <a href=\"https://cocl.us/ML0101EN_DSX\">Watson Studio</a>\n\n<h3>Thanks for completing this lesson!</h3>\n\n<h4>Author: <a href=\"https://ca.linkedin.com/in/saeedaghabozorgi\">Saeed Aghabozorgi</a></h4>\n<p><a href=\"https://ca.linkedin.com/in/saeedaghabozorgi\">Saeed Aghabozorgi</a>, PhD is a Data Scientist in IBM with a track record of developing enterprise level applications that substantially increases clients’ ability to turn data into actionable knowledge. He is a researcher in data mining field and expert in developing advanced analytic methods like machine learning and statistical modelling on large datasets.</p>\n\n<hr>\n\n<p>Copyright &copy; 2018 <a href=\"https://cocl.us/DX0108EN_CC\">Cognitive Class</a>. This notebook and its source code are released under the terms of the <a href=\"https://bigdatauniversity.com/mit-license/\">MIT License</a>.</p>", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ] ]
cb3a6c357b695d49663a8cf9164278e11df0cd44
256,373
ipynb
Jupyter Notebook
src/jupyter/python/optimisation.ipynb
vishalbelsare/tsa
203e602fe5fc95b89afb454156fc7e4faee90f2a
[ "Apache-2.0" ]
117
2017-06-30T14:29:32.000Z
2022-02-10T00:54:35.000Z
src/jupyter/python/optimisation.ipynb
vishalbelsare/tsa
203e602fe5fc95b89afb454156fc7e4faee90f2a
[ "Apache-2.0" ]
2
2017-09-01T11:42:14.000Z
2017-11-29T20:00:19.000Z
src/jupyter/python/optimisation.ipynb
vishalbelsare/tsa
203e602fe5fc95b89afb454156fc7e4faee90f2a
[ "Apache-2.0" ]
37
2017-07-05T19:51:10.000Z
2021-04-27T00:11:18.000Z
236.506458
133,071
0.620409
[ [ [ "%matplotlib notebook", "_____no_output_____" ], [ "import os, sys\nsys.path.append(os.path.abspath('../../main/python'))", "_____no_output_____" ], [ "import math\nimport time\n\nimport thalesians.tsa.evaluation as evaluation\nimport thalesians.tsa.numpyutils as npu\nimport thalesians.tsa.optimization as optimization\nimport thalesians.tsa.optimization.visual as visual", "_____no_output_____" ], [ "npu.init_warnings()", "_____no_output_____" ], [ "def sphere_func(x, y, z):\n import time\n time.sleep(1)\n return x*x + y*y + z*z", "_____no_output_____" ], [ "output = optimization.grid_search(\n sphere_func, {\n 'x': [-1., -.5, 0., .5, 1.],\n 'y': [-1., -.5, 0., .5, 1.],\n 'z': [-1., -.5, 0., .5, 1.]\n }, evaluator=evaluation.IPyParallelEvaluator(),\n optimization_id='sphere_func')", "_____no_output_____" ], [ "output", "_____no_output_____" ], [ "visual.visualize_grid_search(output, refresh_until_ready=True);", "_____no_output_____" ], [ "output", "_____no_output_____" ], [ "[x.ready for x in output.evaluation_statuses]", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb3a70dedbe3c50da7dd2c452b3dbbb76a673047
449,069
ipynb
Jupyter Notebook
examples/scattered_light_sim.ipynb
Liz-Strong/slvel
018e0ab028adf87c71d694a88f61a5b84c87a2f8
[ "BSD-3-Clause" ]
2
2021-02-25T22:03:50.000Z
2022-01-12T17:04:11.000Z
examples/scattered_light_sim.ipynb
Liz-Strong/slvel
018e0ab028adf87c71d694a88f61a5b84c87a2f8
[ "BSD-3-Clause" ]
null
null
null
examples/scattered_light_sim.ipynb
Liz-Strong/slvel
018e0ab028adf87c71d694a88f61a5b84c87a2f8
[ "BSD-3-Clause" ]
1
2022-01-12T22:15:37.000Z
2022-01-12T22:15:37.000Z
127.071024
92,951
0.801973
[ [ [ "%reset", "Once deleted, variables cannot be recovered. Proceed (y/[n])? y\n" ] ], [ [ "# Simulate particles translating through OAM beam \n\nLiz Strong 4/17/2020", "_____no_output_____" ] ], [ [ "import sys\nsys.path.append('../slvel')\n\nimport pandas as pd \nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom calc_intensity import calculate_e_field_intensity\nfrom scattering_particle import Particle\nimport scattering_sim as scatsim\nimport random", "_____no_output_____" ] ], [ [ "### make pretty plots", "_____no_output_____" ] ], [ [ "%matplotlib notebook", "_____no_output_____" ] ], [ [ "### calculate intensity", "_____no_output_____" ] ], [ [ "xval = 986 # grid x size [pixels]\nyval = 616 # grid y size [pixels]\nl = 4 # OAM azimuthal mode number\nw0 = 93.8458 # beam waist [pixels]\nintensity, intensity_shape = calculate_e_field_intensity(l=l,\n p=0,\n w0=w0,\n x=xval,\n y=yval,\n petaledbeam=True)", "_____no_output_____" ], [ "plt.figure()\nplt.imshow(intensity)\nplt.colorbar()\n\nplt.title('Intensity field')", "_____no_output_____" ] ], [ [ "### make particle to sample the intensity field", "_____no_output_____" ] ], [ [ "r = 20 # particle radius [pixels]\np1 = Particle(intensity_shape,\n particle_radius=r,\n orbit_radius=75,\n orbit_offset_x=75,\n orbit_offset_y=0,\n v=5000,\n sample_rate=100000,\n direction=-1)", "_____no_output_____" ] ], [ [ "### presum intensities particle will experience\n\nThis is slow, so calculate the file once and then save it to load the file later. ", "_____no_output_____" ] ], [ [ "#presummed_intensity = p1.calculate_sampled_intensities_throughout(Intensity_normalized)\n#np.save('psi_11_24_19_w0_93.8458.npy', presummed_intensity)\npsi = np.load('psi_11_24_19_w0_93.8458.npy')", "_____no_output_____" ] ], [ [ "### calculate intensities on particle's orbit", "_____no_output_____" ], [ "### set parameters for simulation\n\nTotal number of simulations: num_theta * num_radii * num_d * num_angvels", "_____no_output_____" ] ], [ [ "offset_x = [355, 425] # x coordinate of D [pixels]\noffset_y = [0,0] # y coordinate of D [pixels]\norbit_radius = {} # orbit radius [pixels]\nD = [np.sqrt(x**2+y**2) for x,y in zip(offset_x, offset_y)] # D [pixels]\nradius = [490, 325] # orbit radius [pixels]\nfor dist, rad in zip(D, radius): \n orbit_radius[dist] = [rad]\norbit_radius = [490, 325]\nangle = np.arctan2(offset_y, offset_x) # phi [rad]\nangular_velocities = [250, 250] # Omega [rad/s]", "_____no_output_____" ] ], [ [ "### Make time series.", "_____no_output_____" ] ], [ [ "data = scatsim.simulate_time_series(p1, \n psi, \n intensity_shape, \n offset_x, \n offset_y, \n D, \n angle, \n angular_velocities, \n orbit_radius, \n xval, \n yval)\n\ntime_keep = data[0]\nintensities_keep = data[1]\npositions_keep = data[2]\nangular_velocities_keep = data[3]\nR_keep = data[4] \nd_keep = data[5]\noffset_x_keep = data[6]\noffset_y_keep = data[7]\ntheta_keep = data[8]", "_____no_output_____" ] ], [ [ "### plot results", "_____no_output_____" ] ], [ [ "plt.figure()\nplt.imshow(intensity)\n\nfor selxn in range(2):\n plt.plot(positions_keep[selxn].T[0],positions_keep[selxn].T[1],'g')\n plt.plot(positions_keep[selxn].T[0][0],positions_keep[selxn].T[1][0],'ms')\nplt.colorbar()\n\nplt.figure()\nfor selxn in range(2):\n plt.plot(intensities_keep[selxn],'.',label='R='+str(orbit_radius[selxn]))\nplt.legend()\n", "_____no_output_____" ] ], [ [ "### add noise & concatenate into a long time series", "_____no_output_____" ] ], [ [ "timeseries_time, timeseries_intensity, intensities_extended = scatsim.concat_timeseries(intensities_keep, \n time_keep, \n ext_length=420)\nplt.figure()\nplt.plot(timeseries_time, timeseries_intensity)\nplt.xlabel('Time, t [sec]')\nplt.ylabel('Signal, y [summed intensity]')", "_____no_output_____" ] ], [ [ "### Save angular velocities with times to correlate them later for ML training purposes", "_____no_output_____" ] ], [ [ "# save angular velocities corresponding to each seg\nvs = scatsim.save_series_info(angular_velocities_keep, intensities_keep, intensities_extended)\n\n# save orbit radii corresponding to each seg \nRs = scatsim.save_series_info(R_keep, intensities_keep, intensities_extended)\n\n# save x offset radii corresponding to each seg \ndelta_xs = scatsim.save_series_info(offset_x_keep, intensities_keep, intensities_extended)\n\n# save y offset radii corresponding to each seg \ndelta_ys = scatsim.save_series_info(offset_y_keep, intensities_keep, intensities_extended)\n", "_____no_output_____" ] ], [ [ "### save data", "_____no_output_____" ] ], [ [ "data_to_save = np.array([timeseries_time,timeseries_intensity,angular_velocities_keep,vs,Rs,delta_xs,delta_ys])\nnp.save('example_simulated_signal.npy',data_to_save)", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
cb3a7591b6e0885cb3126e7cbdeb76eb4a1e4568
946,322
ipynb
Jupyter Notebook
Model backlog/Models/93-openvaccine-6xconv-bigru-traintestaug-loss-5.ipynb
dimitreOliveira/COVID-19-Vaccine-Degradation-Prediction
c726c1a87aefa5dca8d1df3be79ee06ff4da99e4
[ "MIT" ]
null
null
null
Model backlog/Models/93-openvaccine-6xconv-bigru-traintestaug-loss-5.ipynb
dimitreOliveira/COVID-19-Vaccine-Degradation-Prediction
c726c1a87aefa5dca8d1df3be79ee06ff4da99e4
[ "MIT" ]
null
null
null
Model backlog/Models/93-openvaccine-6xconv-bigru-traintestaug-loss-5.ipynb
dimitreOliveira/COVID-19-Vaccine-Degradation-Prediction
c726c1a87aefa5dca8d1df3be79ee06ff4da99e4
[ "MIT" ]
1
2020-11-08T14:43:09.000Z
2020-11-08T14:43:09.000Z
369.946052
775,932
0.900606
[ [ [ "## Dependencies", "_____no_output_____" ] ], [ [ "from openvaccine_scripts import *\nimport warnings, json\nfrom sklearn.model_selection import KFold, StratifiedKFold, GroupKFold\nimport tensorflow.keras.layers as L\nimport tensorflow.keras.backend as K\nfrom tensorflow.keras import optimizers, losses, Model\nfrom tensorflow.keras.callbacks import EarlyStopping, ReduceLROnPlateau\n\n\nSEED = 0\nseed_everything(SEED)\nwarnings.filterwarnings('ignore')", "_____no_output_____" ] ], [ [ "# Model parameters", "_____no_output_____" ] ], [ [ "config = {\n \"BATCH_SIZE\": 32,\n \"EPOCHS\": 70,\n \"LEARNING_RATE\": 1e-3,\n \"ES_PATIENCE\": 10,\n \"N_FOLDS\": 5,\n \"N_USED_FOLDS\": 5,\n \"PB_SEQ_LEN\": 107,\n \"PV_SEQ_LEN\": 130,\n}\n\nwith open('config.json', 'w') as json_file:\n json.dump(json.loads(json.dumps(config)), json_file)\n \nconfig", "_____no_output_____" ] ], [ [ "# Load data", "_____no_output_____" ] ], [ [ "database_base_path = '/kaggle/input/stanford-covid-vaccine/'\ntrain = pd.read_json(database_base_path + 'train.json', lines=True)\ntest = pd.read_json(database_base_path + 'test.json', lines=True)\n\nprint('Train samples: %d' % len(train))\ndisplay(train.head())\nprint(f'Test samples: {len(test)}')\ndisplay(test.head())", "Train samples: 2400\n" ] ], [ [ "## Data augmentation", "_____no_output_____" ] ], [ [ "def aug_data(df):\n target_df = df.copy()\n new_df = aug_df[aug_df['id'].isin(target_df['id'])]\n \n del target_df['structure']\n del target_df['predicted_loop_type']\n new_df = new_df.merge(target_df, on=['id','sequence'], how='left')\n\n df['cnt'] = df['id'].map(new_df[['id','cnt']].set_index('id').to_dict()['cnt'])\n df['log_gamma'] = 100\n df['score'] = 1.0\n \n new_df['augmented'] = True\n df['augmented'] = False\n df = df.append(new_df[df.columns])\n return df\n\n# Augmented data\naug_df = pd.read_csv('/kaggle/input/augmented-data-for-stanford-covid-vaccine/48k_augment.csv')\nprint(f'Augmented samples: {len(aug_df)}')\ndisplay(aug_df.head())\n\nprint(f\"Samples in train before augmentation: {len(train)}\")\nprint(f\"Samples in test before augmentation: {len(test)}\")\n\ntrain = aug_data(train)\ntrain.drop('index', axis=1, inplace=True)\ntrain = train.reset_index()\ntest = aug_data(test)\ntest.drop('index', axis=1, inplace=True)\ntest = test.reset_index()\n\nprint(f\"Samples in train after augmentation: {len(train)}\")\nprint(f\"Samples in test after augmentation: {len(test)}\")\n\nprint(f\"Unique id in train: {len(train['id'].unique())}\")\nprint(f\"Unique sequences in train: {len(train['sequence'].unique())}\")\nprint(f\"Unique structure in train: {len(train['structure'].unique())}\")\nprint(f\"Unique predicted_loop_type in train: {len(train['predicted_loop_type'].unique())}\")\n\nprint(f\"Unique id in test: {len(test['id'].unique())}\")\nprint(f\"Unique sequences in test: {len(test['sequence'].unique())}\")\nprint(f\"Unique structure in test: {len(test['structure'].unique())}\")\nprint(f\"Unique predicted_loop_type in test: {len(test['predicted_loop_type'].unique())}\")", "Augmented samples: 48401\n" ] ], [ [ "## Auxiliary functions", "_____no_output_____" ] ], [ [ "def get_dataset(x, y=None, sample_weights=None, labeled=True, shuffled=True, repeated=False, batch_size=32, buffer_size=-1, seed=0):\n input_map = {'inputs_seq': x['sequence'], \n 'inputs_struct': x['structure'], \n 'inputs_loop': x['predicted_loop_type'], \n 'inputs_bpps_max': x['bpps_max'], \n 'inputs_bpps_sum': x['bpps_sum'], \n 'inputs_bpps_scaled': x['bpps_scaled']}\n \n if labeled:\n output_map = {'output_react': y['reactivity'], \n 'output_mg_ph': y['deg_Mg_pH10'], \n 'output_ph': y['deg_pH10'], \n 'output_mg_c': y['deg_Mg_50C'], \n 'output_c': y['deg_50C']}\n if sample_weights is not None:\n dataset = tf.data.Dataset.from_tensor_slices((input_map, output_map, sample_weights))\n else:\n dataset = tf.data.Dataset.from_tensor_slices((input_map, output_map))\n \n else:\n dataset = tf.data.Dataset.from_tensor_slices((input_map))\n \n if repeated:\n dataset = dataset.repeat()\n if shuffled:\n dataset = dataset.shuffle(2048, seed=seed)\n dataset = dataset.batch(batch_size)\n dataset = dataset.prefetch(buffer_size)\n \n return dataset\n\n\ndef get_dataset_sampling(x, y=None, sample_weights=None, labeled=True, shuffled=True, repeated=False, batch_size=32, buffer_size=-1, seed=0):\n input_map = {'inputs_seq': x['sequence'], \n 'inputs_struct': x['structure'], \n 'inputs_loop': x['predicted_loop_type'], \n 'inputs_bpps_max': x['bpps_max'], \n 'inputs_bpps_sum': x['bpps_sum'], \n 'inputs_bpps_scaled': x['bpps_scaled']}\n \n if labeled:\n output_map = {'output_react': y['reactivity'], \n 'output_mg_ph': y['deg_Mg_pH10'], \n 'output_ph': y['deg_pH10'], \n 'output_mg_c': y['deg_Mg_50C'], \n 'output_c': y['deg_50C']}\n if sample_weights is not None:\n dataset = tf.data.Dataset.from_tensor_slices((input_map, output_map, sample_weights))\n else:\n dataset = tf.data.Dataset.from_tensor_slices((input_map, output_map))\n else:\n dataset = tf.data.Dataset.from_tensor_slices((input_map))\n \n if repeated:\n dataset = dataset.repeat()\n if shuffled:\n dataset = dataset.shuffle(2048, seed=seed)\n \n return dataset", "_____no_output_____" ] ], [ [ "# Pre-process", "_____no_output_____" ] ], [ [ "# Add bpps as features\ntrain = add_bpps_features(train, database_base_path)\ntest = add_bpps_features(test, database_base_path)\n\n\nfeature_cols = ['sequence', 'structure', 'predicted_loop_type', 'bpps_max', 'bpps_sum', 'bpps_scaled']\npred_cols = ['reactivity', 'deg_Mg_pH10', 'deg_pH10', 'deg_Mg_50C', 'deg_50C']\nencoder_list = [token2int_seq, token2int_struct, token2int_loop, None, None, None]\n\npublic_test = test.query(\"seq_length == 107\").copy()\nprivate_test = test.query(\"seq_length == 130\").copy()\n\nx_test_public = get_features_dict(public_test, feature_cols, encoder_list, public_test.index)\nx_test_private = get_features_dict(private_test, feature_cols, encoder_list, private_test.index)\n\n# To use as stratified col\ntrain['signal_to_noise_int'] = train['signal_to_noise'].astype(int)", "_____no_output_____" ] ], [ [ "# Model", "_____no_output_____" ] ], [ [ "def model_fn(hidden_dim=384, dropout=.5, pred_len=68, n_outputs=5):\n inputs_seq = L.Input(shape=(None, 1), name='inputs_seq') \n inputs_struct = L.Input(shape=(None, 1), name='inputs_struct') \n inputs_loop = L.Input(shape=(None, 1), name='inputs_loop')\n inputs_bpps_max = L.Input(shape=(None, 1), name='inputs_bpps_max')\n inputs_bpps_sum = L.Input(shape=(None, 1), name='inputs_bpps_sum')\n inputs_bpps_scaled = L.Input(shape=(None, 1), name='inputs_bpps_scaled')\n\n def _one_hot(x, num_classes):\n return K.squeeze(K.one_hot(K.cast(x, 'uint8'), num_classes=num_classes), axis=2)\n\n ohe_seq = L.Lambda(_one_hot, arguments={'num_classes': len(token2int_seq)}, input_shape=(None, 1))(inputs_seq)\n ohe_struct = L.Lambda(_one_hot, arguments={'num_classes': len(token2int_struct)}, input_shape=(None, 1))(inputs_struct)\n ohe_loop = L.Lambda(_one_hot, arguments={'num_classes': len(token2int_loop)}, input_shape=(None, 1))(inputs_loop)\n \n ### Encoder block\n # Conv block\n conv_seq = L.Conv1D(filters=64, kernel_size=3, padding='same')(ohe_seq)\n conv_struct = L.Conv1D(filters=64, kernel_size=3, padding='same')(ohe_struct)\n conv_loop = L.Conv1D(filters=64, kernel_size=3, padding='same')(ohe_loop)\n conv_bpps_max = L.Conv1D(filters=64, kernel_size=3, padding='same')(inputs_bpps_max)\n conv_bpps_sum = L.Conv1D(filters=64, kernel_size=3, padding='same')(inputs_bpps_sum)\n conv_bpps_scaled = L.Conv1D(filters=64, kernel_size=3, padding='same')(inputs_bpps_scaled)\n \n \n x_concat = L.concatenate([conv_seq, conv_struct, conv_loop, conv_bpps_max, \n conv_bpps_sum, conv_bpps_scaled], axis=-1, name='conv_concatenate')\n\n # Recurrent block\n encoder, encoder_state_f, encoder_state_b = L.Bidirectional(L.GRU(hidden_dim, dropout=dropout, return_sequences=True, \n return_state=True, kernel_initializer='orthogonal'), \n name='Encoder_RNN')(x_concat)\n \n \n ### Decoder block\n decoder = L.Bidirectional(L.GRU(hidden_dim, dropout=dropout, return_sequences=True, kernel_initializer='orthogonal'), \n name='Decoder')(encoder, initial_state=[encoder_state_f, encoder_state_b])\n \n # Since we are only making predictions on the first part of each sequence, we have to truncate it\n decoder_truncated = decoder[:, :pred_len]\n \n output_react = L.Dense(1, name='output_react')(decoder_truncated)\n output_mg_ph = L.Dense(1, name='output_mg_ph')(decoder_truncated)\n output_ph = L.Dense(1, name='output_ph')(decoder_truncated)\n output_mg_c = L.Dense(1, name='output_mg_c')(decoder_truncated)\n output_c = L.Dense(1, name='output_c')(decoder_truncated)\n \n \n model = Model(inputs=[inputs_seq, inputs_struct, inputs_loop, inputs_bpps_max, inputs_bpps_sum, inputs_bpps_scaled], \n outputs=[output_react, output_mg_ph, output_ph, output_mg_c, output_c])\n\n opt = optimizers.Adam(learning_rate=config['LEARNING_RATE'])\n model.compile(optimizer=opt, loss={'output_react': MCRMSE, \n 'output_mg_ph': MCRMSE, \n 'output_ph': MCRMSE, \n 'output_mg_c': MCRMSE, \n 'output_c': MCRMSE},\n loss_weights={'output_react': 5., \n 'output_mg_ph': 5., \n 'output_ph': 1., \n 'output_mg_c': 5., \n 'output_c': 1.})\n\n return model\n\nmodel = model_fn()\nmodel.summary()", "Model: \"functional_1\"\n__________________________________________________________________________________________________\nLayer (type) Output Shape Param # Connected to \n==================================================================================================\ninputs_seq (InputLayer) [(None, None, 1)] 0 \n__________________________________________________________________________________________________\ninputs_struct (InputLayer) [(None, None, 1)] 0 \n__________________________________________________________________________________________________\ninputs_loop (InputLayer) [(None, None, 1)] 0 \n__________________________________________________________________________________________________\nlambda (Lambda) (None, None, 4) 0 inputs_seq[0][0] \n__________________________________________________________________________________________________\nlambda_1 (Lambda) (None, None, 3) 0 inputs_struct[0][0] \n__________________________________________________________________________________________________\nlambda_2 (Lambda) (None, None, 7) 0 inputs_loop[0][0] \n__________________________________________________________________________________________________\ninputs_bpps_max (InputLayer) [(None, None, 1)] 0 \n__________________________________________________________________________________________________\ninputs_bpps_sum (InputLayer) [(None, None, 1)] 0 \n__________________________________________________________________________________________________\ninputs_bpps_scaled (InputLayer) [(None, None, 1)] 0 \n__________________________________________________________________________________________________\nconv1d (Conv1D) (None, None, 64) 832 lambda[0][0] \n__________________________________________________________________________________________________\nconv1d_1 (Conv1D) (None, None, 64) 640 lambda_1[0][0] \n__________________________________________________________________________________________________\nconv1d_2 (Conv1D) (None, None, 64) 1408 lambda_2[0][0] \n__________________________________________________________________________________________________\nconv1d_3 (Conv1D) (None, None, 64) 256 inputs_bpps_max[0][0] \n__________________________________________________________________________________________________\nconv1d_4 (Conv1D) (None, None, 64) 256 inputs_bpps_sum[0][0] \n__________________________________________________________________________________________________\nconv1d_5 (Conv1D) (None, None, 64) 256 inputs_bpps_scaled[0][0] \n__________________________________________________________________________________________________\nconv_concatenate (Concatenate) (None, None, 384) 0 conv1d[0][0] \n conv1d_1[0][0] \n conv1d_2[0][0] \n conv1d_3[0][0] \n conv1d_4[0][0] \n conv1d_5[0][0] \n__________________________________________________________________________________________________\nEncoder_RNN (Bidirectional) [(None, None, 768), 1774080 conv_concatenate[0][0] \n__________________________________________________________________________________________________\nDecoder (Bidirectional) (None, None, 768) 2658816 Encoder_RNN[0][0] \n Encoder_RNN[0][1] \n Encoder_RNN[0][2] \n__________________________________________________________________________________________________\ntf_op_layer_strided_slice (Tens [(None, None, 768)] 0 Decoder[0][0] \n__________________________________________________________________________________________________\noutput_react (Dense) (None, None, 1) 769 tf_op_layer_strided_slice[0][0] \n__________________________________________________________________________________________________\noutput_mg_ph (Dense) (None, None, 1) 769 tf_op_layer_strided_slice[0][0] \n__________________________________________________________________________________________________\noutput_ph (Dense) (None, None, 1) 769 tf_op_layer_strided_slice[0][0] \n__________________________________________________________________________________________________\noutput_mg_c (Dense) (None, None, 1) 769 tf_op_layer_strided_slice[0][0] \n__________________________________________________________________________________________________\noutput_c (Dense) (None, None, 1) 769 tf_op_layer_strided_slice[0][0] \n==================================================================================================\nTotal params: 4,440,389\nTrainable params: 4,440,389\nNon-trainable params: 0\n__________________________________________________________________________________________________\n" ] ], [ [ "# Training", "_____no_output_____" ] ], [ [ "AUTO = tf.data.experimental.AUTOTUNE\nskf = GroupKFold(n_splits=config['N_FOLDS'])\nhistory_list = []\n\noof = train[['id', 'SN_filter', 'signal_to_noise'] + pred_cols].copy()\noof_preds = np.zeros((len(train), 68, len(pred_cols)))\ntest_public_preds = np.zeros((len(public_test), config['PB_SEQ_LEN'], len(pred_cols)))\ntest_private_preds = np.zeros((len(private_test), config['PV_SEQ_LEN'], len(pred_cols)))\n\nfor fold,(train_idx, valid_idx) in enumerate(skf.split(train, train['signal_to_noise_int'], train['id'])):\n if fold >= config['N_USED_FOLDS']:\n break\n print(f'\\nFOLD: {fold+1}')\n \n # Create clean and noisy datasets\n valid_clean_idxs = np.intersect1d(train[(train['SN_filter'] == 1) & \n (train['augmented'] == False)].index, valid_idx)\n \n ### Create datasets\n# x_train = get_features_dict(train, feature_cols, encoder_list, train_idx)\n# y_train = get_targets_dict(train, pred_cols, train_idx)\n# w_train = np.log(train.iloc[train_idx]['signal_to_noise'].values+1.2)+1\n x_valid = get_features_dict(train, feature_cols, encoder_list, valid_clean_idxs)\n y_valid = get_targets_dict(train, pred_cols, valid_clean_idxs)\n w_valid = np.log(train.iloc[valid_clean_idxs]['signal_to_noise'].values+1.2)+1\n \n# train_ds = get_dataset(x_train, y_train, w_train, labeled=True, shuffled=True, batch_size=config['BATCH_SIZE'], buffer_size=AUTO, seed=SEED)\n valid_ds = get_dataset(x_valid, y_valid, w_valid, labeled=True, shuffled=False, batch_size=config['BATCH_SIZE'], buffer_size=AUTO, seed=SEED)\n oof_ds = get_dataset(get_features_dict(train, feature_cols, encoder_list, valid_idx), labeled=False, shuffled=False, batch_size=config['BATCH_SIZE'], buffer_size=AUTO, seed=SEED)\n test_public_ds = get_dataset(x_test_public, labeled=False, shuffled=False, batch_size=config['BATCH_SIZE'], buffer_size=AUTO, seed=SEED)\n test_private_ds = get_dataset(x_test_private, labeled=False, shuffled=False, batch_size=config['BATCH_SIZE'], buffer_size=AUTO, seed=SEED)\n\n \n # Create clean and noisy datasets\n normal_idxs = np.intersect1d(train[train['augmented'] == False].index, train_idx)\n x_train_normal = get_features_dict(train, feature_cols, encoder_list, normal_idxs)\n y_train_normal = get_targets_dict(train, pred_cols, normal_idxs)\n w_train_normal = np.log(train.iloc[normal_idxs]['signal_to_noise'].values+1.2)+1\n normal_ds = get_dataset_sampling(x_train_normal, y_train_normal, w_train_normal, labeled=True, shuffled=True, \n repeated=True, batch_size=config['BATCH_SIZE'], buffer_size=AUTO, seed=SEED)\n \n augmented_idxs = np.intersect1d(train[train['augmented'] == True].index, train_idx)\n x_train_augmented = get_features_dict(train, feature_cols, encoder_list, augmented_idxs)\n y_train_augmented = get_targets_dict(train, pred_cols, augmented_idxs)\n w_train_augmented = np.log(train.iloc[augmented_idxs]['signal_to_noise'].values+1.2)+1\n augmented_ds = get_dataset_sampling(x_train_augmented, y_train_augmented, w_train_augmented, labeled=True, shuffled=True, \n repeated=True, batch_size=config['BATCH_SIZE'], buffer_size=AUTO, seed=SEED)\n\n # Resampled TF Dataset\n resampled_ds = tf.data.experimental.sample_from_datasets([normal_ds, augmented_ds], weights=[.5, .5])\n resampled_ds = resampled_ds.batch(config['BATCH_SIZE']).prefetch(AUTO)\n \n\n ### Model\n K.clear_session()\n model = model_fn()\n\n model_path = f'model_{fold}.h5'\n es = EarlyStopping(monitor='val_loss', mode='min', patience=config['ES_PATIENCE'], restore_best_weights=True, verbose=1)\n rlrp = ReduceLROnPlateau(monitor='val_loss', mode='min', factor=0.1, patience=5, verbose=1)\n \n ### Train\n history = model.fit(resampled_ds,\n validation_data=valid_ds,\n callbacks=[es, rlrp],\n epochs=config['EPOCHS'],\n batch_size=config['BATCH_SIZE'],\n steps_per_epoch=int(len(normal_idxs)//(config['BATCH_SIZE']* .5)),\n verbose=2).history\n \n history_list.append(history)\n # Save last model weights\n model.save_weights(model_path)\n\n ### Inference\n oof_ds_preds = np.array(model.predict(oof_ds)).reshape((len(pred_cols), len(valid_idx), 68)).transpose((1, 2, 0))\n oof_preds[valid_idx] = oof_ds_preds\n \n # Short sequence (public test)\n model = model_fn(pred_len=config['PB_SEQ_LEN'])\n model.load_weights(model_path)\n test_public_ds_preds = np.array(model.predict(test_public_ds)).reshape((len(pred_cols), len(public_test), \n config['PB_SEQ_LEN'])).transpose((1, 2, 0))\n test_public_preds += test_public_ds_preds * (1 / config['N_USED_FOLDS'])\n \n # Long sequence (private test)\n model = model_fn(pred_len=config['PV_SEQ_LEN'])\n model.load_weights(model_path)\n test_private_ds_preds = np.array(model.predict(test_private_ds)).reshape((len(pred_cols), len(private_test), \n config['PV_SEQ_LEN'])).transpose((1, 2, 0))\n test_private_preds += test_private_ds_preds * (1 / config['N_USED_FOLDS'])", "\nFOLD: 1\nEpoch 1/70\n120/120 - 10s - loss: 18.6620 - output_react_loss: 0.9313 - output_mg_ph_loss: 1.1720 - output_ph_loss: 1.3335 - output_mg_c_loss: 1.1383 - output_c_loss: 1.1205 - val_loss: 14.3357 - val_output_react_loss: 0.7539 - val_output_mg_ph_loss: 0.9631 - val_output_ph_loss: 0.8820 - val_output_mg_c_loss: 0.8252 - val_output_c_loss: 0.7429\nEpoch 2/70\n120/120 - 7s - loss: 15.8910 - output_react_loss: 0.8057 - output_mg_ph_loss: 0.9713 - output_ph_loss: 1.1599 - output_mg_c_loss: 0.9682 - output_c_loss: 1.0051 - val_loss: 12.9885 - val_output_react_loss: 0.6959 - val_output_mg_ph_loss: 0.8660 - val_output_ph_loss: 0.8151 - val_output_mg_c_loss: 0.7359 - val_output_c_loss: 0.6842\nEpoch 3/70\n120/120 - 7s - loss: 14.8779 - output_react_loss: 0.7727 - output_mg_ph_loss: 0.8991 - output_ph_loss: 1.1199 - output_mg_c_loss: 0.8910 - output_c_loss: 0.9443 - val_loss: 12.1584 - val_output_react_loss: 0.6567 - val_output_mg_ph_loss: 0.8121 - val_output_ph_loss: 0.7636 - val_output_mg_c_loss: 0.6836 - val_output_c_loss: 0.6326\nEpoch 4/70\n120/120 - 7s - loss: 13.7708 - output_react_loss: 0.7326 - output_mg_ph_loss: 0.8269 - output_ph_loss: 1.0171 - output_mg_c_loss: 0.8151 - output_c_loss: 0.8802 - val_loss: 11.7457 - val_output_react_loss: 0.6443 - val_output_mg_ph_loss: 0.7815 - val_output_ph_loss: 0.7434 - val_output_mg_c_loss: 0.6496 - val_output_c_loss: 0.6250\nEpoch 5/70\n120/120 - 7s - loss: 13.4283 - output_react_loss: 0.7160 - output_mg_ph_loss: 0.8037 - output_ph_loss: 1.0180 - output_mg_c_loss: 0.7914 - output_c_loss: 0.8552 - val_loss: 11.2961 - val_output_react_loss: 0.6141 - val_output_mg_ph_loss: 0.7491 - val_output_ph_loss: 0.7326 - val_output_mg_c_loss: 0.6296 - val_output_c_loss: 0.5994\nEpoch 6/70\n120/120 - 7s - loss: 12.7022 - output_react_loss: 0.6705 - output_mg_ph_loss: 0.7695 - output_ph_loss: 0.9567 - output_mg_c_loss: 0.7454 - output_c_loss: 0.8186 - val_loss: 11.0512 - val_output_react_loss: 0.5957 - val_output_mg_ph_loss: 0.7270 - val_output_ph_loss: 0.7162 - val_output_mg_c_loss: 0.6258 - val_output_c_loss: 0.5925\nEpoch 7/70\n120/120 - 7s - loss: 12.4388 - output_react_loss: 0.6636 - output_mg_ph_loss: 0.7498 - output_ph_loss: 0.9363 - output_mg_c_loss: 0.7254 - output_c_loss: 0.8091 - val_loss: 10.8438 - val_output_react_loss: 0.5891 - val_output_mg_ph_loss: 0.7226 - val_output_ph_loss: 0.7002 - val_output_mg_c_loss: 0.5999 - val_output_c_loss: 0.5856\nEpoch 8/70\n120/120 - 7s - loss: 12.1760 - output_react_loss: 0.6506 - output_mg_ph_loss: 0.7309 - output_ph_loss: 0.9339 - output_mg_c_loss: 0.7068 - output_c_loss: 0.8003 - val_loss: 10.6062 - val_output_react_loss: 0.5760 - val_output_mg_ph_loss: 0.7119 - val_output_ph_loss: 0.6786 - val_output_mg_c_loss: 0.5817 - val_output_c_loss: 0.5801\nEpoch 9/70\n120/120 - 7s - loss: 12.1603 - output_react_loss: 0.6538 - output_mg_ph_loss: 0.7215 - output_ph_loss: 0.9348 - output_mg_c_loss: 0.7056 - output_c_loss: 0.8209 - val_loss: 10.5839 - val_output_react_loss: 0.5778 - val_output_mg_ph_loss: 0.7026 - val_output_ph_loss: 0.6764 - val_output_mg_c_loss: 0.5865 - val_output_c_loss: 0.5729\nEpoch 10/70\n120/120 - 7s - loss: 11.8100 - output_react_loss: 0.6363 - output_mg_ph_loss: 0.7012 - output_ph_loss: 0.9129 - output_mg_c_loss: 0.6819 - output_c_loss: 0.8004 - val_loss: 10.4837 - val_output_react_loss: 0.5711 - val_output_mg_ph_loss: 0.6950 - val_output_ph_loss: 0.6708 - val_output_mg_c_loss: 0.5837 - val_output_c_loss: 0.5641\nEpoch 11/70\n120/120 - 7s - loss: 10.9700 - output_react_loss: 0.5923 - output_mg_ph_loss: 0.6573 - output_ph_loss: 0.8520 - output_mg_c_loss: 0.6260 - output_c_loss: 0.7400 - val_loss: 10.3323 - val_output_react_loss: 0.5586 - val_output_mg_ph_loss: 0.6911 - val_output_ph_loss: 0.6680 - val_output_mg_c_loss: 0.5709 - val_output_c_loss: 0.5616\nEpoch 12/70\n120/120 - 7s - loss: 11.4058 - output_react_loss: 0.6164 - output_mg_ph_loss: 0.6698 - output_ph_loss: 0.8993 - output_mg_c_loss: 0.6588 - output_c_loss: 0.7814 - val_loss: 10.1574 - val_output_react_loss: 0.5556 - val_output_mg_ph_loss: 0.6747 - val_output_ph_loss: 0.6556 - val_output_mg_c_loss: 0.5585 - val_output_c_loss: 0.5578\nEpoch 13/70\n120/120 - 7s - loss: 10.7323 - output_react_loss: 0.5757 - output_mg_ph_loss: 0.6396 - output_ph_loss: 0.8525 - output_mg_c_loss: 0.6126 - output_c_loss: 0.7401 - val_loss: 10.1492 - val_output_react_loss: 0.5509 - val_output_mg_ph_loss: 0.6762 - val_output_ph_loss: 0.6525 - val_output_mg_c_loss: 0.5606 - val_output_c_loss: 0.5578\nEpoch 14/70\n120/120 - 7s - loss: 10.7855 - output_react_loss: 0.5822 - output_mg_ph_loss: 0.6322 - output_ph_loss: 0.8766 - output_mg_c_loss: 0.6157 - output_c_loss: 0.7586 - val_loss: 10.1884 - val_output_react_loss: 0.5543 - val_output_mg_ph_loss: 0.6717 - val_output_ph_loss: 0.6526 - val_output_mg_c_loss: 0.5690 - val_output_c_loss: 0.5612\nEpoch 15/70\n120/120 - 7s - loss: 10.4015 - output_react_loss: 0.5641 - output_mg_ph_loss: 0.6083 - output_ph_loss: 0.8378 - output_mg_c_loss: 0.5936 - output_c_loss: 0.7337 - val_loss: 10.0834 - val_output_react_loss: 0.5472 - val_output_mg_ph_loss: 0.6672 - val_output_ph_loss: 0.6559 - val_output_mg_c_loss: 0.5607 - val_output_c_loss: 0.5520\nEpoch 16/70\n120/120 - 7s - loss: 10.3504 - output_react_loss: 0.5633 - output_mg_ph_loss: 0.6017 - output_ph_loss: 0.8430 - output_mg_c_loss: 0.5878 - output_c_loss: 0.7433 - val_loss: 10.1068 - val_output_react_loss: 0.5420 - val_output_mg_ph_loss: 0.6762 - val_output_ph_loss: 0.6516 - val_output_mg_c_loss: 0.5624 - val_output_c_loss: 0.5518\nEpoch 17/70\n120/120 - 8s - loss: 10.0736 - output_react_loss: 0.5406 - output_mg_ph_loss: 0.5890 - output_ph_loss: 0.8217 - output_mg_c_loss: 0.5753 - output_c_loss: 0.7268 - val_loss: 10.1781 - val_output_react_loss: 0.5478 - val_output_mg_ph_loss: 0.6766 - val_output_ph_loss: 0.6571 - val_output_mg_c_loss: 0.5683 - val_output_c_loss: 0.5577\nEpoch 18/70\n120/120 - 7s - loss: 9.9195 - output_react_loss: 0.5341 - output_mg_ph_loss: 0.5811 - output_ph_loss: 0.8086 - output_mg_c_loss: 0.5633 - output_c_loss: 0.7183 - val_loss: 10.0623 - val_output_react_loss: 0.5485 - val_output_mg_ph_loss: 0.6704 - val_output_ph_loss: 0.6530 - val_output_mg_c_loss: 0.5539 - val_output_c_loss: 0.5455\nEpoch 19/70\n120/120 - 7s - loss: 10.0406 - output_react_loss: 0.5371 - output_mg_ph_loss: 0.5767 - output_ph_loss: 0.8553 - output_mg_c_loss: 0.5718 - output_c_loss: 0.7574 - val_loss: 10.0322 - val_output_react_loss: 0.5447 - val_output_mg_ph_loss: 0.6705 - val_output_ph_loss: 0.6414 - val_output_mg_c_loss: 0.5528 - val_output_c_loss: 0.5512\nEpoch 20/70\n120/120 - 7s - loss: 9.7088 - output_react_loss: 0.5248 - output_mg_ph_loss: 0.5576 - output_ph_loss: 0.8132 - output_mg_c_loss: 0.5535 - output_c_loss: 0.7165 - val_loss: 9.9585 - val_output_react_loss: 0.5395 - val_output_mg_ph_loss: 0.6666 - val_output_ph_loss: 0.6394 - val_output_mg_c_loss: 0.5483 - val_output_c_loss: 0.5471\nEpoch 21/70\n120/120 - 7s - loss: 9.3199 - output_react_loss: 0.5067 - output_mg_ph_loss: 0.5355 - output_ph_loss: 0.7839 - output_mg_c_loss: 0.5258 - output_c_loss: 0.6962 - val_loss: 9.9458 - val_output_react_loss: 0.5408 - val_output_mg_ph_loss: 0.6609 - val_output_ph_loss: 0.6373 - val_output_mg_c_loss: 0.5499 - val_output_c_loss: 0.5508\nEpoch 22/70\n120/120 - 7s - loss: 9.5061 - output_react_loss: 0.5053 - output_mg_ph_loss: 0.5445 - output_ph_loss: 0.8107 - output_mg_c_loss: 0.5448 - output_c_loss: 0.7222 - val_loss: 10.0118 - val_output_react_loss: 0.5463 - val_output_mg_ph_loss: 0.6648 - val_output_ph_loss: 0.6441 - val_output_mg_c_loss: 0.5534 - val_output_c_loss: 0.5455\nEpoch 23/70\n120/120 - 7s - loss: 9.6042 - output_react_loss: 0.5123 - output_mg_ph_loss: 0.5470 - output_ph_loss: 0.8258 - output_mg_c_loss: 0.5491 - output_c_loss: 0.7364 - val_loss: 10.0097 - val_output_react_loss: 0.5418 - val_output_mg_ph_loss: 0.6665 - val_output_ph_loss: 0.6435 - val_output_mg_c_loss: 0.5554 - val_output_c_loss: 0.5475\nEpoch 24/70\n120/120 - 7s - loss: 9.1606 - output_react_loss: 0.4859 - output_mg_ph_loss: 0.5298 - output_ph_loss: 0.7883 - output_mg_c_loss: 0.5177 - output_c_loss: 0.7048 - val_loss: 10.0036 - val_output_react_loss: 0.5427 - val_output_mg_ph_loss: 0.6657 - val_output_ph_loss: 0.6437 - val_output_mg_c_loss: 0.5541 - val_output_c_loss: 0.5469\nEpoch 25/70\n120/120 - 7s - loss: 8.9026 - output_react_loss: 0.4760 - output_mg_ph_loss: 0.5109 - output_ph_loss: 0.7682 - output_mg_c_loss: 0.5026 - output_c_loss: 0.6868 - val_loss: 9.8838 - val_output_react_loss: 0.5381 - val_output_mg_ph_loss: 0.6547 - val_output_ph_loss: 0.6351 - val_output_mg_c_loss: 0.5481 - val_output_c_loss: 0.5443\nEpoch 26/70\n120/120 - 8s - loss: 9.3255 - output_react_loss: 0.4948 - output_mg_ph_loss: 0.5264 - output_ph_loss: 0.8126 - output_mg_c_loss: 0.5330 - output_c_loss: 0.7418 - val_loss: 9.9545 - val_output_react_loss: 0.5431 - val_output_mg_ph_loss: 0.6595 - val_output_ph_loss: 0.6408 - val_output_mg_c_loss: 0.5504 - val_output_c_loss: 0.5487\nEpoch 27/70\n120/120 - 7s - loss: 8.6773 - output_react_loss: 0.4650 - output_mg_ph_loss: 0.4974 - output_ph_loss: 0.7520 - output_mg_c_loss: 0.4891 - output_c_loss: 0.6677 - val_loss: 9.9582 - val_output_react_loss: 0.5437 - val_output_mg_ph_loss: 0.6597 - val_output_ph_loss: 0.6379 - val_output_mg_c_loss: 0.5512 - val_output_c_loss: 0.5470\nEpoch 28/70\n120/120 - 7s - loss: 8.8907 - output_react_loss: 0.4772 - output_mg_ph_loss: 0.4977 - output_ph_loss: 0.7789 - output_mg_c_loss: 0.5087 - output_c_loss: 0.6938 - val_loss: 9.9048 - val_output_react_loss: 0.5392 - val_output_mg_ph_loss: 0.6572 - val_output_ph_loss: 0.6403 - val_output_mg_c_loss: 0.5472 - val_output_c_loss: 0.5462\nEpoch 29/70\n120/120 - 7s - loss: 8.7748 - output_react_loss: 0.4642 - output_mg_ph_loss: 0.4959 - output_ph_loss: 0.7773 - output_mg_c_loss: 0.4994 - output_c_loss: 0.7000 - val_loss: 9.8960 - val_output_react_loss: 0.5385 - val_output_mg_ph_loss: 0.6601 - val_output_ph_loss: 0.6359 - val_output_mg_c_loss: 0.5452 - val_output_c_loss: 0.5410\nEpoch 30/70\n\nEpoch 00030: ReduceLROnPlateau reducing learning rate to 0.00010000000474974513.\n120/120 - 7s - loss: 8.7585 - output_react_loss: 0.4608 - output_mg_ph_loss: 0.4976 - output_ph_loss: 0.7827 - output_mg_c_loss: 0.4977 - output_c_loss: 0.6948 - val_loss: 9.9865 - val_output_react_loss: 0.5436 - val_output_mg_ph_loss: 0.6649 - val_output_ph_loss: 0.6397 - val_output_mg_c_loss: 0.5514 - val_output_c_loss: 0.5475\nEpoch 31/70\n120/120 - 7s - loss: 8.6211 - output_react_loss: 0.4559 - output_mg_ph_loss: 0.4804 - output_ph_loss: 0.7786 - output_mg_c_loss: 0.4920 - output_c_loss: 0.7014 - val_loss: 9.8034 - val_output_react_loss: 0.5313 - val_output_mg_ph_loss: 0.6528 - val_output_ph_loss: 0.6298 - val_output_mg_c_loss: 0.5429 - val_output_c_loss: 0.5386\nEpoch 32/70\n120/120 - 7s - loss: 8.1961 - output_react_loss: 0.4292 - output_mg_ph_loss: 0.4607 - output_ph_loss: 0.7546 - output_mg_c_loss: 0.4621 - output_c_loss: 0.6819 - val_loss: 9.7844 - val_output_react_loss: 0.5304 - val_output_mg_ph_loss: 0.6513 - val_output_ph_loss: 0.6282 - val_output_mg_c_loss: 0.5421 - val_output_c_loss: 0.5372\nEpoch 33/70\n120/120 - 7s - loss: 7.9926 - output_react_loss: 0.4206 - output_mg_ph_loss: 0.4521 - output_ph_loss: 0.7250 - output_mg_c_loss: 0.4500 - output_c_loss: 0.6541 - val_loss: 9.7821 - val_output_react_loss: 0.5311 - val_output_mg_ph_loss: 0.6515 - val_output_ph_loss: 0.6269 - val_output_mg_c_loss: 0.5411 - val_output_c_loss: 0.5365\nEpoch 34/70\n120/120 - 7s - loss: 8.1842 - output_react_loss: 0.4307 - output_mg_ph_loss: 0.4554 - output_ph_loss: 0.7515 - output_mg_c_loss: 0.4629 - output_c_loss: 0.6874 - val_loss: 9.7633 - val_output_react_loss: 0.5303 - val_output_mg_ph_loss: 0.6507 - val_output_ph_loss: 0.6247 - val_output_mg_c_loss: 0.5392 - val_output_c_loss: 0.5378\nEpoch 35/70\n120/120 - 7s - loss: 8.0388 - output_react_loss: 0.4232 - output_mg_ph_loss: 0.4471 - output_ph_loss: 0.7272 - output_mg_c_loss: 0.4591 - output_c_loss: 0.6647 - val_loss: 9.7649 - val_output_react_loss: 0.5296 - val_output_mg_ph_loss: 0.6505 - val_output_ph_loss: 0.6259 - val_output_mg_c_loss: 0.5403 - val_output_c_loss: 0.5366\nEpoch 36/70\n120/120 - 7s - loss: 8.1013 - output_react_loss: 0.4301 - output_mg_ph_loss: 0.4475 - output_ph_loss: 0.7427 - output_mg_c_loss: 0.4594 - output_c_loss: 0.6735 - val_loss: 9.7381 - val_output_react_loss: 0.5280 - val_output_mg_ph_loss: 0.6495 - val_output_ph_loss: 0.6251 - val_output_mg_c_loss: 0.5381 - val_output_c_loss: 0.5352\nEpoch 37/70\n120/120 - 7s - loss: 8.1432 - output_react_loss: 0.4256 - output_mg_ph_loss: 0.4544 - output_ph_loss: 0.7600 - output_mg_c_loss: 0.4600 - output_c_loss: 0.6834 - val_loss: 9.7647 - val_output_react_loss: 0.5297 - val_output_mg_ph_loss: 0.6519 - val_output_ph_loss: 0.6262 - val_output_mg_c_loss: 0.5389 - val_output_c_loss: 0.5363\nEpoch 38/70\n120/120 - 7s - loss: 7.9478 - output_react_loss: 0.4177 - output_mg_ph_loss: 0.4433 - output_ph_loss: 0.7292 - output_mg_c_loss: 0.4513 - output_c_loss: 0.6568 - val_loss: 9.7804 - val_output_react_loss: 0.5295 - val_output_mg_ph_loss: 0.6524 - val_output_ph_loss: 0.6277 - val_output_mg_c_loss: 0.5411 - val_output_c_loss: 0.5373\nEpoch 39/70\n120/120 - 7s - loss: 8.1057 - output_react_loss: 0.4270 - output_mg_ph_loss: 0.4487 - output_ph_loss: 0.7542 - output_mg_c_loss: 0.4573 - output_c_loss: 0.6867 - val_loss: 9.7667 - val_output_react_loss: 0.5293 - val_output_mg_ph_loss: 0.6517 - val_output_ph_loss: 0.6254 - val_output_mg_c_loss: 0.5399 - val_output_c_loss: 0.5369\nEpoch 40/70\n120/120 - 7s - loss: 7.8798 - output_react_loss: 0.4133 - output_mg_ph_loss: 0.4397 - output_ph_loss: 0.7286 - output_mg_c_loss: 0.4430 - output_c_loss: 0.6712 - val_loss: 9.7637 - val_output_react_loss: 0.5294 - val_output_mg_ph_loss: 0.6509 - val_output_ph_loss: 0.6254 - val_output_mg_c_loss: 0.5401 - val_output_c_loss: 0.5363\nEpoch 41/70\n\nEpoch 00041: ReduceLROnPlateau reducing learning rate to 1.0000000474974514e-05.\n120/120 - 7s - loss: 7.9011 - output_react_loss: 0.4141 - output_mg_ph_loss: 0.4391 - output_ph_loss: 0.7284 - output_mg_c_loss: 0.4482 - output_c_loss: 0.6654 - val_loss: 9.7632 - val_output_react_loss: 0.5295 - val_output_mg_ph_loss: 0.6505 - val_output_ph_loss: 0.6260 - val_output_mg_c_loss: 0.5401 - val_output_c_loss: 0.5369\nEpoch 42/70\n120/120 - 7s - loss: 8.0774 - output_react_loss: 0.4249 - output_mg_ph_loss: 0.4460 - output_ph_loss: 0.7488 - output_mg_c_loss: 0.4584 - output_c_loss: 0.6823 - val_loss: 9.7590 - val_output_react_loss: 0.5295 - val_output_mg_ph_loss: 0.6501 - val_output_ph_loss: 0.6250 - val_output_mg_c_loss: 0.5399 - val_output_c_loss: 0.5366\nEpoch 43/70\n120/120 - 7s - loss: 8.1525 - output_react_loss: 0.4299 - output_mg_ph_loss: 0.4517 - output_ph_loss: 0.7632 - output_mg_c_loss: 0.4605 - output_c_loss: 0.6791 - val_loss: 9.7556 - val_output_react_loss: 0.5293 - val_output_mg_ph_loss: 0.6500 - val_output_ph_loss: 0.6254 - val_output_mg_c_loss: 0.5395 - val_output_c_loss: 0.5363\nEpoch 44/70\n120/120 - 7s - loss: 7.5864 - output_react_loss: 0.3914 - output_mg_ph_loss: 0.4279 - output_ph_loss: 0.7001 - output_mg_c_loss: 0.4300 - output_c_loss: 0.6393 - val_loss: 9.7597 - val_output_react_loss: 0.5293 - val_output_mg_ph_loss: 0.6506 - val_output_ph_loss: 0.6255 - val_output_mg_c_loss: 0.5396 - val_output_c_loss: 0.5364\nEpoch 45/70\n120/120 - 7s - loss: 8.0218 - output_react_loss: 0.4204 - output_mg_ph_loss: 0.4437 - output_ph_loss: 0.7415 - output_mg_c_loss: 0.4568 - output_c_loss: 0.6756 - val_loss: 9.7622 - val_output_react_loss: 0.5293 - val_output_mg_ph_loss: 0.6510 - val_output_ph_loss: 0.6255 - val_output_mg_c_loss: 0.5398 - val_output_c_loss: 0.5364\nEpoch 46/70\nRestoring model weights from the end of the best epoch.\n\nEpoch 00046: ReduceLROnPlateau reducing learning rate to 1.0000000656873453e-06.\n120/120 - 7s - loss: 8.0065 - output_react_loss: 0.4199 - output_mg_ph_loss: 0.4413 - output_ph_loss: 0.7542 - output_mg_c_loss: 0.4517 - output_c_loss: 0.6876 - val_loss: 9.7554 - val_output_react_loss: 0.5288 - val_output_mg_ph_loss: 0.6505 - val_output_ph_loss: 0.6252 - val_output_mg_c_loss: 0.5395 - val_output_c_loss: 0.5360\nEpoch 00046: early stopping\n\nFOLD: 2\nEpoch 1/70\n120/120 - 9s - loss: 18.7041 - output_react_loss: 0.9418 - output_mg_ph_loss: 1.1740 - output_ph_loss: 1.3380 - output_mg_c_loss: 1.1323 - output_c_loss: 1.1254 - val_loss: 14.1889 - val_output_react_loss: 0.7628 - val_output_mg_ph_loss: 0.9417 - val_output_ph_loss: 0.8557 - val_output_mg_c_loss: 0.8159 - val_output_c_loss: 0.7311\nEpoch 2/70\n120/120 - 7s - loss: 15.8090 - output_react_loss: 0.8020 - output_mg_ph_loss: 0.9723 - output_ph_loss: 1.1313 - output_mg_c_loss: 0.9661 - output_c_loss: 0.9758 - val_loss: 12.7565 - val_output_react_loss: 0.6838 - val_output_mg_ph_loss: 0.8556 - val_output_ph_loss: 0.7999 - val_output_mg_c_loss: 0.7176 - val_output_c_loss: 0.6719\nEpoch 3/70\n120/120 - 7s - loss: 14.7666 - output_react_loss: 0.7613 - output_mg_ph_loss: 0.9013 - output_ph_loss: 1.0907 - output_mg_c_loss: 0.8807 - output_c_loss: 0.9594 - val_loss: 11.6737 - val_output_react_loss: 0.6494 - val_output_mg_ph_loss: 0.7781 - val_output_ph_loss: 0.7240 - val_output_mg_c_loss: 0.6412 - val_output_c_loss: 0.6064\nEpoch 4/70\n120/120 - 7s - loss: 14.0367 - output_react_loss: 0.7359 - output_mg_ph_loss: 0.8434 - output_ph_loss: 1.0366 - output_mg_c_loss: 0.8392 - output_c_loss: 0.9081 - val_loss: 11.1388 - val_output_react_loss: 0.6100 - val_output_mg_ph_loss: 0.7462 - val_output_ph_loss: 0.7010 - val_output_mg_c_loss: 0.6155 - val_output_c_loss: 0.5798\nEpoch 5/70\n120/120 - 7s - loss: 13.5933 - output_react_loss: 0.7152 - output_mg_ph_loss: 0.8208 - output_ph_loss: 1.0043 - output_mg_c_loss: 0.8051 - output_c_loss: 0.8836 - val_loss: 10.6700 - val_output_react_loss: 0.5934 - val_output_mg_ph_loss: 0.7136 - val_output_ph_loss: 0.6694 - val_output_mg_c_loss: 0.5795 - val_output_c_loss: 0.5682\nEpoch 6/70\n120/120 - 7s - loss: 13.0903 - output_react_loss: 0.6948 - output_mg_ph_loss: 0.7879 - output_ph_loss: 0.9763 - output_mg_c_loss: 0.7706 - output_c_loss: 0.8478 - val_loss: 10.3572 - val_output_react_loss: 0.5682 - val_output_mg_ph_loss: 0.6935 - val_output_ph_loss: 0.6589 - val_output_mg_c_loss: 0.5682 - val_output_c_loss: 0.5489\nEpoch 7/70\n120/120 - 7s - loss: 12.6671 - output_react_loss: 0.6752 - output_mg_ph_loss: 0.7574 - output_ph_loss: 0.9529 - output_mg_c_loss: 0.7412 - output_c_loss: 0.8448 - val_loss: 10.2552 - val_output_react_loss: 0.5658 - val_output_mg_ph_loss: 0.6869 - val_output_ph_loss: 0.6545 - val_output_mg_c_loss: 0.5589 - val_output_c_loss: 0.5426\nEpoch 8/70\n120/120 - 7s - loss: 12.5056 - output_react_loss: 0.6737 - output_mg_ph_loss: 0.7448 - output_ph_loss: 0.9397 - output_mg_c_loss: 0.7287 - output_c_loss: 0.8293 - val_loss: 10.1260 - val_output_react_loss: 0.5556 - val_output_mg_ph_loss: 0.6748 - val_output_ph_loss: 0.6586 - val_output_mg_c_loss: 0.5528 - val_output_c_loss: 0.5511\nEpoch 9/70\n120/120 - 7s - loss: 11.8299 - output_react_loss: 0.6306 - output_mg_ph_loss: 0.7107 - output_ph_loss: 0.9056 - output_mg_c_loss: 0.6863 - output_c_loss: 0.7861 - val_loss: 9.8854 - val_output_react_loss: 0.5430 - val_output_mg_ph_loss: 0.6663 - val_output_ph_loss: 0.6318 - val_output_mg_c_loss: 0.5356 - val_output_c_loss: 0.5295\nEpoch 10/70\n120/120 - 7s - loss: 11.7658 - output_react_loss: 0.6313 - output_mg_ph_loss: 0.7038 - output_ph_loss: 0.8879 - output_mg_c_loss: 0.6803 - output_c_loss: 0.8009 - val_loss: 9.8007 - val_output_react_loss: 0.5306 - val_output_mg_ph_loss: 0.6599 - val_output_ph_loss: 0.6241 - val_output_mg_c_loss: 0.5390 - val_output_c_loss: 0.5294\nEpoch 11/70\n120/120 - 7s - loss: 11.6896 - output_react_loss: 0.6242 - output_mg_ph_loss: 0.6932 - output_ph_loss: 0.9067 - output_mg_c_loss: 0.6806 - output_c_loss: 0.7925 - val_loss: 9.9170 - val_output_react_loss: 0.5340 - val_output_mg_ph_loss: 0.6708 - val_output_ph_loss: 0.6300 - val_output_mg_c_loss: 0.5478 - val_output_c_loss: 0.5235\nEpoch 12/70\n120/120 - 7s - loss: 11.2099 - output_react_loss: 0.6098 - output_mg_ph_loss: 0.6598 - output_ph_loss: 0.8649 - output_mg_c_loss: 0.6408 - output_c_loss: 0.7925 - val_loss: 9.8075 - val_output_react_loss: 0.5239 - val_output_mg_ph_loss: 0.6611 - val_output_ph_loss: 0.6321 - val_output_mg_c_loss: 0.5441 - val_output_c_loss: 0.5300\nEpoch 13/70\n120/120 - 7s - loss: 10.6938 - output_react_loss: 0.5731 - output_mg_ph_loss: 0.6335 - output_ph_loss: 0.8300 - output_mg_c_loss: 0.6171 - output_c_loss: 0.7452 - val_loss: 9.8230 - val_output_react_loss: 0.5225 - val_output_mg_ph_loss: 0.6686 - val_output_ph_loss: 0.6196 - val_output_mg_c_loss: 0.5438 - val_output_c_loss: 0.5286\nEpoch 14/70\n120/120 - 7s - loss: 10.8829 - output_react_loss: 0.5893 - output_mg_ph_loss: 0.6374 - output_ph_loss: 0.8737 - output_mg_c_loss: 0.6215 - output_c_loss: 0.7684 - val_loss: 9.5945 - val_output_react_loss: 0.5167 - val_output_mg_ph_loss: 0.6480 - val_output_ph_loss: 0.6168 - val_output_mg_c_loss: 0.5262 - val_output_c_loss: 0.5228\nEpoch 15/70\n120/120 - 7s - loss: 10.5563 - output_react_loss: 0.5698 - output_mg_ph_loss: 0.6172 - output_ph_loss: 0.8447 - output_mg_c_loss: 0.6046 - output_c_loss: 0.7534 - val_loss: 9.5900 - val_output_react_loss: 0.5114 - val_output_mg_ph_loss: 0.6492 - val_output_ph_loss: 0.6197 - val_output_mg_c_loss: 0.5295 - val_output_c_loss: 0.5198\nEpoch 16/70\n120/120 - 7s - loss: 10.6006 - output_react_loss: 0.5728 - output_mg_ph_loss: 0.6134 - output_ph_loss: 0.8727 - output_mg_c_loss: 0.6043 - output_c_loss: 0.7753 - val_loss: 9.5716 - val_output_react_loss: 0.5050 - val_output_mg_ph_loss: 0.6529 - val_output_ph_loss: 0.6124 - val_output_mg_c_loss: 0.5304 - val_output_c_loss: 0.5178\nEpoch 17/70\n120/120 - 7s - loss: 9.9953 - output_react_loss: 0.5398 - output_mg_ph_loss: 0.5866 - output_ph_loss: 0.7746 - output_mg_c_loss: 0.5717 - output_c_loss: 0.7300 - val_loss: 9.6279 - val_output_react_loss: 0.5120 - val_output_mg_ph_loss: 0.6550 - val_output_ph_loss: 0.6173 - val_output_mg_c_loss: 0.5317 - val_output_c_loss: 0.5169\nEpoch 18/70\n120/120 - 7s - loss: 10.2546 - output_react_loss: 0.5543 - output_mg_ph_loss: 0.5872 - output_ph_loss: 0.8499 - output_mg_c_loss: 0.5877 - output_c_loss: 0.7586 - val_loss: 9.5356 - val_output_react_loss: 0.5102 - val_output_mg_ph_loss: 0.6500 - val_output_ph_loss: 0.6062 - val_output_mg_c_loss: 0.5223 - val_output_c_loss: 0.5173\nEpoch 19/70\n120/120 - 7s - loss: 9.7753 - output_react_loss: 0.5262 - output_mg_ph_loss: 0.5685 - output_ph_loss: 0.7955 - output_mg_c_loss: 0.5555 - output_c_loss: 0.7288 - val_loss: 9.5230 - val_output_react_loss: 0.5074 - val_output_mg_ph_loss: 0.6493 - val_output_ph_loss: 0.6067 - val_output_mg_c_loss: 0.5226 - val_output_c_loss: 0.5196\nEpoch 20/70\n120/120 - 7s - loss: 9.8332 - output_react_loss: 0.5257 - output_mg_ph_loss: 0.5637 - output_ph_loss: 0.8129 - output_mg_c_loss: 0.5655 - output_c_loss: 0.7459 - val_loss: 9.5502 - val_output_react_loss: 0.5031 - val_output_mg_ph_loss: 0.6530 - val_output_ph_loss: 0.6071 - val_output_mg_c_loss: 0.5285 - val_output_c_loss: 0.5198\nEpoch 21/70\n120/120 - 8s - loss: 9.6025 - output_react_loss: 0.5149 - output_mg_ph_loss: 0.5518 - output_ph_loss: 0.7954 - output_mg_c_loss: 0.5503 - output_c_loss: 0.7221 - val_loss: 9.5704 - val_output_react_loss: 0.5083 - val_output_mg_ph_loss: 0.6563 - val_output_ph_loss: 0.6075 - val_output_mg_c_loss: 0.5241 - val_output_c_loss: 0.5197\nEpoch 22/70\n120/120 - 7s - loss: 9.4437 - output_react_loss: 0.5040 - output_mg_ph_loss: 0.5449 - output_ph_loss: 0.7835 - output_mg_c_loss: 0.5377 - output_c_loss: 0.7276 - val_loss: 9.6141 - val_output_react_loss: 0.5086 - val_output_mg_ph_loss: 0.6529 - val_output_ph_loss: 0.6091 - val_output_mg_c_loss: 0.5360 - val_output_c_loss: 0.5178\nEpoch 23/70\n120/120 - 7s - loss: 9.4348 - output_react_loss: 0.5039 - output_mg_ph_loss: 0.5411 - output_ph_loss: 0.8024 - output_mg_c_loss: 0.5360 - output_c_loss: 0.7275 - val_loss: 9.4404 - val_output_react_loss: 0.4996 - val_output_mg_ph_loss: 0.6454 - val_output_ph_loss: 0.6029 - val_output_mg_c_loss: 0.5192 - val_output_c_loss: 0.5161\nEpoch 24/70\n120/120 - 7s - loss: 9.3357 - output_react_loss: 0.4952 - output_mg_ph_loss: 0.5310 - output_ph_loss: 0.7936 - output_mg_c_loss: 0.5351 - output_c_loss: 0.7354 - val_loss: 9.4929 - val_output_react_loss: 0.5049 - val_output_mg_ph_loss: 0.6464 - val_output_ph_loss: 0.6035 - val_output_mg_c_loss: 0.5236 - val_output_c_loss: 0.5153\nEpoch 25/70\n120/120 - 7s - loss: 9.4072 - output_react_loss: 0.5027 - output_mg_ph_loss: 0.5306 - output_ph_loss: 0.8144 - output_mg_c_loss: 0.5371 - output_c_loss: 0.7404 - val_loss: 9.4668 - val_output_react_loss: 0.5004 - val_output_mg_ph_loss: 0.6436 - val_output_ph_loss: 0.6019 - val_output_mg_c_loss: 0.5258 - val_output_c_loss: 0.5160\nEpoch 26/70\n120/120 - 7s - loss: 9.0327 - output_react_loss: 0.4804 - output_mg_ph_loss: 0.5186 - output_ph_loss: 0.7605 - output_mg_c_loss: 0.5155 - output_c_loss: 0.6999 - val_loss: 9.4631 - val_output_react_loss: 0.5056 - val_output_mg_ph_loss: 0.6409 - val_output_ph_loss: 0.5999 - val_output_mg_c_loss: 0.5226 - val_output_c_loss: 0.5173\nEpoch 27/70\n120/120 - 7s - loss: 9.1317 - output_react_loss: 0.4865 - output_mg_ph_loss: 0.5156 - output_ph_loss: 0.7951 - output_mg_c_loss: 0.5208 - output_c_loss: 0.7223 - val_loss: 9.4605 - val_output_react_loss: 0.5015 - val_output_mg_ph_loss: 0.6456 - val_output_ph_loss: 0.6039 - val_output_mg_c_loss: 0.5212 - val_output_c_loss: 0.5149\nEpoch 28/70\n\nEpoch 00028: ReduceLROnPlateau reducing learning rate to 0.00010000000474974513.\n120/120 - 7s - loss: 8.6061 - output_react_loss: 0.4616 - output_mg_ph_loss: 0.4896 - output_ph_loss: 0.7269 - output_mg_c_loss: 0.4886 - output_c_loss: 0.6801 - val_loss: 9.4604 - val_output_react_loss: 0.5014 - val_output_mg_ph_loss: 0.6448 - val_output_ph_loss: 0.6056 - val_output_mg_c_loss: 0.5216 - val_output_c_loss: 0.5155\nEpoch 29/70\n120/120 - 7s - loss: 8.6861 - output_react_loss: 0.4514 - output_mg_ph_loss: 0.4842 - output_ph_loss: 0.7617 - output_mg_c_loss: 0.5085 - output_c_loss: 0.7037 - val_loss: 9.2764 - val_output_react_loss: 0.4904 - val_output_mg_ph_loss: 0.6328 - val_output_ph_loss: 0.5950 - val_output_mg_c_loss: 0.5114 - val_output_c_loss: 0.5080\nEpoch 30/70\n120/120 - 8s - loss: 8.5555 - output_react_loss: 0.4566 - output_mg_ph_loss: 0.4775 - output_ph_loss: 0.7584 - output_mg_c_loss: 0.4829 - output_c_loss: 0.7117 - val_loss: 9.2587 - val_output_react_loss: 0.4889 - val_output_mg_ph_loss: 0.6325 - val_output_ph_loss: 0.5934 - val_output_mg_c_loss: 0.5104 - val_output_c_loss: 0.5062\nEpoch 31/70\n120/120 - 7s - loss: 8.6774 - output_react_loss: 0.4562 - output_mg_ph_loss: 0.4854 - output_ph_loss: 0.7994 - output_mg_c_loss: 0.4906 - output_c_loss: 0.7168 - val_loss: 9.2630 - val_output_react_loss: 0.4903 - val_output_mg_ph_loss: 0.6327 - val_output_ph_loss: 0.5924 - val_output_mg_c_loss: 0.5100 - val_output_c_loss: 0.5056\nEpoch 32/70\n120/120 - 7s - loss: 8.2996 - output_react_loss: 0.4371 - output_mg_ph_loss: 0.4619 - output_ph_loss: 0.7426 - output_mg_c_loss: 0.4729 - output_c_loss: 0.6976 - val_loss: 9.2490 - val_output_react_loss: 0.4884 - val_output_mg_ph_loss: 0.6323 - val_output_ph_loss: 0.5917 - val_output_mg_c_loss: 0.5096 - val_output_c_loss: 0.5058\nEpoch 33/70\n120/120 - 7s - loss: 8.2302 - output_react_loss: 0.4345 - output_mg_ph_loss: 0.4592 - output_ph_loss: 0.7287 - output_mg_c_loss: 0.4707 - output_c_loss: 0.6799 - val_loss: 9.2742 - val_output_react_loss: 0.4905 - val_output_mg_ph_loss: 0.6333 - val_output_ph_loss: 0.5928 - val_output_mg_c_loss: 0.5112 - val_output_c_loss: 0.5063\nEpoch 34/70\n120/120 - 7s - loss: 8.4393 - output_react_loss: 0.4409 - output_mg_ph_loss: 0.4722 - output_ph_loss: 0.7573 - output_mg_c_loss: 0.4818 - output_c_loss: 0.7075 - val_loss: 9.2675 - val_output_react_loss: 0.4886 - val_output_mg_ph_loss: 0.6345 - val_output_ph_loss: 0.5921 - val_output_mg_c_loss: 0.5108 - val_output_c_loss: 0.5060\nEpoch 35/70\n120/120 - 7s - loss: 8.1927 - output_react_loss: 0.4356 - output_mg_ph_loss: 0.4535 - output_ph_loss: 0.7411 - output_mg_c_loss: 0.4644 - output_c_loss: 0.6843 - val_loss: 9.2766 - val_output_react_loss: 0.4889 - val_output_mg_ph_loss: 0.6346 - val_output_ph_loss: 0.5920 - val_output_mg_c_loss: 0.5120 - val_output_c_loss: 0.5069\nEpoch 36/70\n120/120 - 7s - loss: 8.0792 - output_react_loss: 0.4239 - output_mg_ph_loss: 0.4482 - output_ph_loss: 0.7289 - output_mg_c_loss: 0.4633 - output_c_loss: 0.6739 - val_loss: 9.2468 - val_output_react_loss: 0.4884 - val_output_mg_ph_loss: 0.6328 - val_output_ph_loss: 0.5909 - val_output_mg_c_loss: 0.5090 - val_output_c_loss: 0.5049\nEpoch 37/70\n120/120 - 7s - loss: 8.2173 - output_react_loss: 0.4325 - output_mg_ph_loss: 0.4573 - output_ph_loss: 0.7421 - output_mg_c_loss: 0.4677 - output_c_loss: 0.6876 - val_loss: 9.2566 - val_output_react_loss: 0.4876 - val_output_mg_ph_loss: 0.6334 - val_output_ph_loss: 0.5911 - val_output_mg_c_loss: 0.5110 - val_output_c_loss: 0.5053\nEpoch 38/70\n120/120 - 7s - loss: 8.2765 - output_react_loss: 0.4340 - output_mg_ph_loss: 0.4598 - output_ph_loss: 0.7615 - output_mg_c_loss: 0.4703 - output_c_loss: 0.6946 - val_loss: 9.2461 - val_output_react_loss: 0.4895 - val_output_mg_ph_loss: 0.6312 - val_output_ph_loss: 0.5918 - val_output_mg_c_loss: 0.5091 - val_output_c_loss: 0.5052\nEpoch 39/70\n120/120 - 7s - loss: 8.5239 - output_react_loss: 0.4487 - output_mg_ph_loss: 0.4696 - output_ph_loss: 0.7726 - output_mg_c_loss: 0.4885 - output_c_loss: 0.7174 - val_loss: 9.2447 - val_output_react_loss: 0.4881 - val_output_mg_ph_loss: 0.6328 - val_output_ph_loss: 0.5904 - val_output_mg_c_loss: 0.5089 - val_output_c_loss: 0.5054\nEpoch 40/70\n120/120 - 7s - loss: 8.1807 - output_react_loss: 0.4308 - output_mg_ph_loss: 0.4533 - output_ph_loss: 0.7359 - output_mg_c_loss: 0.4651 - output_c_loss: 0.6987 - val_loss: 9.2402 - val_output_react_loss: 0.4889 - val_output_mg_ph_loss: 0.6317 - val_output_ph_loss: 0.5910 - val_output_mg_c_loss: 0.5083 - val_output_c_loss: 0.5044\nEpoch 41/70\n120/120 - 7s - loss: 7.8889 - output_react_loss: 0.4081 - output_mg_ph_loss: 0.4475 - output_ph_loss: 0.7128 - output_mg_c_loss: 0.4463 - output_c_loss: 0.6662 - val_loss: 9.2679 - val_output_react_loss: 0.4894 - val_output_mg_ph_loss: 0.6339 - val_output_ph_loss: 0.5916 - val_output_mg_c_loss: 0.5110 - val_output_c_loss: 0.5047\nEpoch 42/70\n120/120 - 7s - loss: 8.3909 - output_react_loss: 0.4383 - output_mg_ph_loss: 0.4636 - output_ph_loss: 0.7630 - output_mg_c_loss: 0.4823 - output_c_loss: 0.7072 - val_loss: 9.2643 - val_output_react_loss: 0.4886 - val_output_mg_ph_loss: 0.6333 - val_output_ph_loss: 0.5922 - val_output_mg_c_loss: 0.5114 - val_output_c_loss: 0.5054\nEpoch 43/70\n120/120 - 7s - loss: 8.0152 - output_react_loss: 0.4266 - output_mg_ph_loss: 0.4449 - output_ph_loss: 0.7231 - output_mg_c_loss: 0.4512 - output_c_loss: 0.6788 - val_loss: 9.2678 - val_output_react_loss: 0.4902 - val_output_mg_ph_loss: 0.6330 - val_output_ph_loss: 0.5919 - val_output_mg_c_loss: 0.5109 - val_output_c_loss: 0.5053\nEpoch 44/70\n120/120 - 7s - loss: 8.0300 - output_react_loss: 0.4177 - output_mg_ph_loss: 0.4451 - output_ph_loss: 0.7334 - output_mg_c_loss: 0.4605 - output_c_loss: 0.6799 - val_loss: 9.2374 - val_output_react_loss: 0.4870 - val_output_mg_ph_loss: 0.6321 - val_output_ph_loss: 0.5909 - val_output_mg_c_loss: 0.5093 - val_output_c_loss: 0.5047\nEpoch 45/70\n120/120 - 7s - loss: 8.0941 - output_react_loss: 0.4249 - output_mg_ph_loss: 0.4472 - output_ph_loss: 0.7441 - output_mg_c_loss: 0.4621 - output_c_loss: 0.6786 - val_loss: 9.2437 - val_output_react_loss: 0.4879 - val_output_mg_ph_loss: 0.6325 - val_output_ph_loss: 0.5915 - val_output_mg_c_loss: 0.5091 - val_output_c_loss: 0.5051\nEpoch 46/70\n120/120 - 7s - loss: 8.3663 - output_react_loss: 0.4369 - output_mg_ph_loss: 0.4632 - output_ph_loss: 0.7624 - output_mg_c_loss: 0.4761 - output_c_loss: 0.7226 - val_loss: 9.2754 - val_output_react_loss: 0.4908 - val_output_mg_ph_loss: 0.6335 - val_output_ph_loss: 0.5926 - val_output_mg_c_loss: 0.5112 - val_output_c_loss: 0.5056\nEpoch 47/70\n120/120 - 7s - loss: 8.1853 - output_react_loss: 0.4311 - output_mg_ph_loss: 0.4504 - output_ph_loss: 0.7438 - output_mg_c_loss: 0.4680 - output_c_loss: 0.6940 - val_loss: 9.2431 - val_output_react_loss: 0.4891 - val_output_mg_ph_loss: 0.6306 - val_output_ph_loss: 0.5911 - val_output_mg_c_loss: 0.5097 - val_output_c_loss: 0.5048\nEpoch 48/70\n120/120 - 7s - loss: 7.8026 - output_react_loss: 0.4112 - output_mg_ph_loss: 0.4308 - output_ph_loss: 0.7094 - output_mg_c_loss: 0.4434 - output_c_loss: 0.6664 - val_loss: 9.2585 - val_output_react_loss: 0.4884 - val_output_mg_ph_loss: 0.6332 - val_output_ph_loss: 0.5911 - val_output_mg_c_loss: 0.5109 - val_output_c_loss: 0.5053\nEpoch 49/70\n\nEpoch 00049: ReduceLROnPlateau reducing learning rate to 1.0000000474974514e-05.\n120/120 - 7s - loss: 7.9231 - output_react_loss: 0.4126 - output_mg_ph_loss: 0.4400 - output_ph_loss: 0.7168 - output_mg_c_loss: 0.4542 - output_c_loss: 0.6723 - val_loss: 9.2774 - val_output_react_loss: 0.4886 - val_output_mg_ph_loss: 0.6347 - val_output_ph_loss: 0.5922 - val_output_mg_c_loss: 0.5126 - val_output_c_loss: 0.5060\nEpoch 50/70\n120/120 - 7s - loss: 8.2218 - output_react_loss: 0.4293 - output_mg_ph_loss: 0.4546 - output_ph_loss: 0.7545 - output_mg_c_loss: 0.4706 - output_c_loss: 0.6951 - val_loss: 9.2649 - val_output_react_loss: 0.4882 - val_output_mg_ph_loss: 0.6339 - val_output_ph_loss: 0.5911 - val_output_mg_c_loss: 0.5116 - val_output_c_loss: 0.5051\nEpoch 51/70\n120/120 - 7s - loss: 7.9993 - output_react_loss: 0.4225 - output_mg_ph_loss: 0.4381 - output_ph_loss: 0.7258 - output_mg_c_loss: 0.4544 - output_c_loss: 0.6982 - val_loss: 9.2619 - val_output_react_loss: 0.4881 - val_output_mg_ph_loss: 0.6338 - val_output_ph_loss: 0.5912 - val_output_mg_c_loss: 0.5113 - val_output_c_loss: 0.5048\nEpoch 52/70\n120/120 - 7s - loss: 7.8668 - output_react_loss: 0.4099 - output_mg_ph_loss: 0.4356 - output_ph_loss: 0.7170 - output_mg_c_loss: 0.4491 - output_c_loss: 0.6766 - val_loss: 9.2544 - val_output_react_loss: 0.4878 - val_output_mg_ph_loss: 0.6333 - val_output_ph_loss: 0.5909 - val_output_mg_c_loss: 0.5107 - val_output_c_loss: 0.5048\nEpoch 53/70\n120/120 - 7s - loss: 8.0500 - output_react_loss: 0.4228 - output_mg_ph_loss: 0.4430 - output_ph_loss: 0.7545 - output_mg_c_loss: 0.4543 - output_c_loss: 0.6948 - val_loss: 9.2582 - val_output_react_loss: 0.4883 - val_output_mg_ph_loss: 0.6331 - val_output_ph_loss: 0.5910 - val_output_mg_c_loss: 0.5111 - val_output_c_loss: 0.5049\nEpoch 54/70\nRestoring model weights from the end of the best epoch.\n\nEpoch 00054: ReduceLROnPlateau reducing learning rate to 1.0000000656873453e-06.\n120/120 - 7s - loss: 8.0914 - output_react_loss: 0.4227 - output_mg_ph_loss: 0.4466 - output_ph_loss: 0.7510 - output_mg_c_loss: 0.4595 - output_c_loss: 0.6971 - val_loss: 9.2583 - val_output_react_loss: 0.4882 - val_output_mg_ph_loss: 0.6332 - val_output_ph_loss: 0.5910 - val_output_mg_c_loss: 0.5111 - val_output_c_loss: 0.5049\nEpoch 00054: early stopping\n\nFOLD: 3\nEpoch 1/70\n120/120 - 8s - loss: 18.6840 - output_react_loss: 0.9529 - output_mg_ph_loss: 1.1547 - output_ph_loss: 1.3296 - output_mg_c_loss: 1.1392 - output_c_loss: 1.1205 - val_loss: 14.4799 - val_output_react_loss: 0.7542 - val_output_mg_ph_loss: 0.9715 - val_output_ph_loss: 0.8865 - val_output_mg_c_loss: 0.8427 - val_output_c_loss: 0.7517\nEpoch 2/70\n120/120 - 7s - loss: 15.8220 - output_react_loss: 0.8102 - output_mg_ph_loss: 0.9579 - output_ph_loss: 1.1415 - output_mg_c_loss: 0.9681 - output_c_loss: 0.9997 - val_loss: 12.8952 - val_output_react_loss: 0.6876 - val_output_mg_ph_loss: 0.8722 - val_output_ph_loss: 0.8191 - val_output_mg_c_loss: 0.7207 - val_output_c_loss: 0.6731\nEpoch 3/70\n120/120 - 7s - loss: 14.8073 - output_react_loss: 0.7667 - output_mg_ph_loss: 0.9032 - output_ph_loss: 1.0925 - output_mg_c_loss: 0.8836 - output_c_loss: 0.9476 - val_loss: 11.8052 - val_output_react_loss: 0.6511 - val_output_mg_ph_loss: 0.7911 - val_output_ph_loss: 0.7523 - val_output_mg_c_loss: 0.6452 - val_output_c_loss: 0.6160\nEpoch 4/70\n120/120 - 7s - loss: 13.9567 - output_react_loss: 0.7370 - output_mg_ph_loss: 0.8432 - output_ph_loss: 1.0324 - output_mg_c_loss: 0.8288 - output_c_loss: 0.8794 - val_loss: 11.1484 - val_output_react_loss: 0.6075 - val_output_mg_ph_loss: 0.7445 - val_output_ph_loss: 0.7161 - val_output_mg_c_loss: 0.6169 - val_output_c_loss: 0.5881\nEpoch 5/70\n120/120 - 7s - loss: 13.3895 - output_react_loss: 0.7094 - output_mg_ph_loss: 0.8109 - output_ph_loss: 0.9899 - output_mg_c_loss: 0.7877 - output_c_loss: 0.8596 - val_loss: 10.8109 - val_output_react_loss: 0.5969 - val_output_mg_ph_loss: 0.7195 - val_output_ph_loss: 0.7005 - val_output_mg_c_loss: 0.5920 - val_output_c_loss: 0.5682\nEpoch 6/70\n120/120 - 7s - loss: 13.0743 - output_react_loss: 0.7012 - output_mg_ph_loss: 0.7831 - output_ph_loss: 0.9846 - output_mg_c_loss: 0.7651 - output_c_loss: 0.8429 - val_loss: 10.5664 - val_output_react_loss: 0.5788 - val_output_mg_ph_loss: 0.7038 - val_output_ph_loss: 0.6941 - val_output_mg_c_loss: 0.5794 - val_output_c_loss: 0.5620\nEpoch 7/70\n120/120 - 7s - loss: 12.5453 - output_react_loss: 0.6699 - output_mg_ph_loss: 0.7532 - output_ph_loss: 0.9363 - output_mg_c_loss: 0.7344 - output_c_loss: 0.8214 - val_loss: 10.4351 - val_output_react_loss: 0.5705 - val_output_mg_ph_loss: 0.6952 - val_output_ph_loss: 0.6708 - val_output_mg_c_loss: 0.5770 - val_output_c_loss: 0.5505\nEpoch 8/70\n120/120 - 7s - loss: 12.4417 - output_react_loss: 0.6649 - output_mg_ph_loss: 0.7435 - output_ph_loss: 0.9389 - output_mg_c_loss: 0.7278 - output_c_loss: 0.8216 - val_loss: 10.2248 - val_output_react_loss: 0.5577 - val_output_mg_ph_loss: 0.6857 - val_output_ph_loss: 0.6683 - val_output_mg_c_loss: 0.5577 - val_output_c_loss: 0.5510\nEpoch 9/70\n120/120 - 7s - loss: 11.9644 - output_react_loss: 0.6397 - output_mg_ph_loss: 0.7163 - output_ph_loss: 0.9078 - output_mg_c_loss: 0.6938 - output_c_loss: 0.8076 - val_loss: 10.2859 - val_output_react_loss: 0.5580 - val_output_mg_ph_loss: 0.6936 - val_output_ph_loss: 0.6690 - val_output_mg_c_loss: 0.5632 - val_output_c_loss: 0.5427\nEpoch 10/70\n120/120 - 7s - loss: 11.8285 - output_react_loss: 0.6308 - output_mg_ph_loss: 0.7023 - output_ph_loss: 0.9060 - output_mg_c_loss: 0.6903 - output_c_loss: 0.8054 - val_loss: 10.0905 - val_output_react_loss: 0.5516 - val_output_mg_ph_loss: 0.6768 - val_output_ph_loss: 0.6517 - val_output_mg_c_loss: 0.5517 - val_output_c_loss: 0.5381\nEpoch 11/70\n120/120 - 7s - loss: 11.4954 - output_react_loss: 0.6139 - output_mg_ph_loss: 0.6835 - output_ph_loss: 0.8954 - output_mg_c_loss: 0.6665 - output_c_loss: 0.7803 - val_loss: 9.9912 - val_output_react_loss: 0.5435 - val_output_mg_ph_loss: 0.6723 - val_output_ph_loss: 0.6461 - val_output_mg_c_loss: 0.5464 - val_output_c_loss: 0.5339\nEpoch 12/70\n120/120 - 7s - loss: 11.1197 - output_react_loss: 0.6051 - output_mg_ph_loss: 0.6544 - output_ph_loss: 0.8637 - output_mg_c_loss: 0.6386 - output_c_loss: 0.7654 - val_loss: 10.1213 - val_output_react_loss: 0.5459 - val_output_mg_ph_loss: 0.6735 - val_output_ph_loss: 0.6580 - val_output_mg_c_loss: 0.5638 - val_output_c_loss: 0.5474\nEpoch 13/70\n120/120 - 7s - loss: 10.8039 - output_react_loss: 0.5838 - output_mg_ph_loss: 0.6377 - output_ph_loss: 0.8411 - output_mg_c_loss: 0.6222 - output_c_loss: 0.7445 - val_loss: 10.0795 - val_output_react_loss: 0.5445 - val_output_mg_ph_loss: 0.6799 - val_output_ph_loss: 0.6452 - val_output_mg_c_loss: 0.5557 - val_output_c_loss: 0.5335\nEpoch 14/70\n120/120 - 7s - loss: 10.6944 - output_react_loss: 0.5747 - output_mg_ph_loss: 0.6303 - output_ph_loss: 0.8529 - output_mg_c_loss: 0.6140 - output_c_loss: 0.7468 - val_loss: 9.9257 - val_output_react_loss: 0.5361 - val_output_mg_ph_loss: 0.6685 - val_output_ph_loss: 0.6436 - val_output_mg_c_loss: 0.5453 - val_output_c_loss: 0.5329\nEpoch 15/70\n120/120 - 7s - loss: 10.8432 - output_react_loss: 0.5789 - output_mg_ph_loss: 0.6320 - output_ph_loss: 0.8744 - output_mg_c_loss: 0.6261 - output_c_loss: 0.7839 - val_loss: 9.8908 - val_output_react_loss: 0.5332 - val_output_mg_ph_loss: 0.6695 - val_output_ph_loss: 0.6358 - val_output_mg_c_loss: 0.5423 - val_output_c_loss: 0.5300\nEpoch 16/70\n120/120 - 7s - loss: 10.3041 - output_react_loss: 0.5564 - output_mg_ph_loss: 0.6023 - output_ph_loss: 0.8344 - output_mg_c_loss: 0.5881 - output_c_loss: 0.7359 - val_loss: 9.8956 - val_output_react_loss: 0.5303 - val_output_mg_ph_loss: 0.6710 - val_output_ph_loss: 0.6380 - val_output_mg_c_loss: 0.5445 - val_output_c_loss: 0.5285\nEpoch 17/70\n120/120 - 7s - loss: 10.0429 - output_react_loss: 0.5407 - output_mg_ph_loss: 0.5806 - output_ph_loss: 0.8154 - output_mg_c_loss: 0.5754 - output_c_loss: 0.7442 - val_loss: 9.8483 - val_output_react_loss: 0.5339 - val_output_mg_ph_loss: 0.6638 - val_output_ph_loss: 0.6290 - val_output_mg_c_loss: 0.5415 - val_output_c_loss: 0.5237\nEpoch 18/70\n120/120 - 7s - loss: 10.1035 - output_react_loss: 0.5431 - output_mg_ph_loss: 0.5884 - output_ph_loss: 0.8180 - output_mg_c_loss: 0.5767 - output_c_loss: 0.7445 - val_loss: 9.8773 - val_output_react_loss: 0.5294 - val_output_mg_ph_loss: 0.6721 - val_output_ph_loss: 0.6355 - val_output_mg_c_loss: 0.5420 - val_output_c_loss: 0.5245\nEpoch 19/70\n120/120 - 7s - loss: 10.1946 - output_react_loss: 0.5469 - output_mg_ph_loss: 0.5813 - output_ph_loss: 0.8610 - output_mg_c_loss: 0.5848 - output_c_loss: 0.7686 - val_loss: 9.9106 - val_output_react_loss: 0.5332 - val_output_mg_ph_loss: 0.6663 - val_output_ph_loss: 0.6413 - val_output_mg_c_loss: 0.5483 - val_output_c_loss: 0.5301\nEpoch 20/70\n120/120 - 7s - loss: 9.6298 - output_react_loss: 0.5210 - output_mg_ph_loss: 0.5547 - output_ph_loss: 0.7901 - output_mg_c_loss: 0.5475 - output_c_loss: 0.7235 - val_loss: 9.8567 - val_output_react_loss: 0.5339 - val_output_mg_ph_loss: 0.6630 - val_output_ph_loss: 0.6355 - val_output_mg_c_loss: 0.5408 - val_output_c_loss: 0.5331\nEpoch 21/70\n120/120 - 7s - loss: 9.4543 - output_react_loss: 0.5045 - output_mg_ph_loss: 0.5498 - output_ph_loss: 0.7808 - output_mg_c_loss: 0.5409 - output_c_loss: 0.6977 - val_loss: 9.7268 - val_output_react_loss: 0.5270 - val_output_mg_ph_loss: 0.6584 - val_output_ph_loss: 0.6247 - val_output_mg_c_loss: 0.5305 - val_output_c_loss: 0.5226\nEpoch 22/70\n120/120 - 7s - loss: 9.6641 - output_react_loss: 0.5150 - output_mg_ph_loss: 0.5473 - output_ph_loss: 0.8327 - output_mg_c_loss: 0.5553 - output_c_loss: 0.7435 - val_loss: 9.8306 - val_output_react_loss: 0.5321 - val_output_mg_ph_loss: 0.6651 - val_output_ph_loss: 0.6282 - val_output_mg_c_loss: 0.5380 - val_output_c_loss: 0.5266\nEpoch 23/70\n120/120 - 7s - loss: 9.4386 - output_react_loss: 0.5054 - output_mg_ph_loss: 0.5385 - output_ph_loss: 0.8036 - output_mg_c_loss: 0.5367 - output_c_loss: 0.7317 - val_loss: 9.8733 - val_output_react_loss: 0.5340 - val_output_mg_ph_loss: 0.6687 - val_output_ph_loss: 0.6349 - val_output_mg_c_loss: 0.5393 - val_output_c_loss: 0.5289\nEpoch 24/70\n120/120 - 7s - loss: 9.4568 - output_react_loss: 0.5007 - output_mg_ph_loss: 0.5392 - output_ph_loss: 0.8273 - output_mg_c_loss: 0.5371 - output_c_loss: 0.7450 - val_loss: 9.7560 - val_output_react_loss: 0.5288 - val_output_mg_ph_loss: 0.6613 - val_output_ph_loss: 0.6213 - val_output_mg_c_loss: 0.5328 - val_output_c_loss: 0.5205\nEpoch 25/70\n120/120 - 7s - loss: 9.0357 - output_react_loss: 0.4821 - output_mg_ph_loss: 0.5142 - output_ph_loss: 0.7664 - output_mg_c_loss: 0.5202 - output_c_loss: 0.6869 - val_loss: 9.7190 - val_output_react_loss: 0.5249 - val_output_mg_ph_loss: 0.6549 - val_output_ph_loss: 0.6271 - val_output_mg_c_loss: 0.5343 - val_output_c_loss: 0.5216\nEpoch 26/70\n120/120 - 7s - loss: 8.9936 - output_react_loss: 0.4762 - output_mg_ph_loss: 0.5113 - output_ph_loss: 0.7662 - output_mg_c_loss: 0.5159 - output_c_loss: 0.7105 - val_loss: 9.8164 - val_output_react_loss: 0.5303 - val_output_mg_ph_loss: 0.6648 - val_output_ph_loss: 0.6301 - val_output_mg_c_loss: 0.5365 - val_output_c_loss: 0.5285\nEpoch 27/70\n120/120 - 7s - loss: 9.1292 - output_react_loss: 0.4871 - output_mg_ph_loss: 0.5175 - output_ph_loss: 0.7856 - output_mg_c_loss: 0.5207 - output_c_loss: 0.7168 - val_loss: 9.7807 - val_output_react_loss: 0.5277 - val_output_mg_ph_loss: 0.6614 - val_output_ph_loss: 0.6273 - val_output_mg_c_loss: 0.5369 - val_output_c_loss: 0.5234\nEpoch 28/70\n120/120 - 7s - loss: 8.8171 - output_react_loss: 0.4736 - output_mg_ph_loss: 0.4975 - output_ph_loss: 0.7572 - output_mg_c_loss: 0.5013 - output_c_loss: 0.6977 - val_loss: 9.7790 - val_output_react_loss: 0.5327 - val_output_mg_ph_loss: 0.6544 - val_output_ph_loss: 0.6246 - val_output_mg_c_loss: 0.5380 - val_output_c_loss: 0.5291\nEpoch 29/70\n120/120 - 7s - loss: 8.9244 - output_react_loss: 0.4713 - output_mg_ph_loss: 0.5011 - output_ph_loss: 0.7886 - output_mg_c_loss: 0.5108 - output_c_loss: 0.7197 - val_loss: 9.8617 - val_output_react_loss: 0.5280 - val_output_mg_ph_loss: 0.6678 - val_output_ph_loss: 0.6242 - val_output_mg_c_loss: 0.5459 - val_output_c_loss: 0.5293\nEpoch 30/70\n\nEpoch 00030: ReduceLROnPlateau reducing learning rate to 0.00010000000474974513.\n120/120 - 7s - loss: 9.0315 - output_react_loss: 0.4740 - output_mg_ph_loss: 0.5052 - output_ph_loss: 0.8026 - output_mg_c_loss: 0.5207 - output_c_loss: 0.7292 - val_loss: 9.8758 - val_output_react_loss: 0.5313 - val_output_mg_ph_loss: 0.6731 - val_output_ph_loss: 0.6277 - val_output_mg_c_loss: 0.5400 - val_output_c_loss: 0.5260\nEpoch 31/70\n120/120 - 7s - loss: 8.6511 - output_react_loss: 0.4530 - output_mg_ph_loss: 0.4898 - output_ph_loss: 0.7775 - output_mg_c_loss: 0.4909 - output_c_loss: 0.7053 - val_loss: 9.5948 - val_output_react_loss: 0.5183 - val_output_mg_ph_loss: 0.6505 - val_output_ph_loss: 0.6143 - val_output_mg_c_loss: 0.5241 - val_output_c_loss: 0.5163\nEpoch 32/70\n120/120 - 7s - loss: 8.2944 - output_react_loss: 0.4355 - output_mg_ph_loss: 0.4616 - output_ph_loss: 0.7521 - output_mg_c_loss: 0.4751 - output_c_loss: 0.6817 - val_loss: 9.5921 - val_output_react_loss: 0.5184 - val_output_mg_ph_loss: 0.6516 - val_output_ph_loss: 0.6134 - val_output_mg_c_loss: 0.5227 - val_output_c_loss: 0.5149\nEpoch 33/70\n120/120 - 7s - loss: 8.1840 - output_react_loss: 0.4295 - output_mg_ph_loss: 0.4578 - output_ph_loss: 0.7356 - output_mg_c_loss: 0.4678 - output_c_loss: 0.6727 - val_loss: 9.5776 - val_output_react_loss: 0.5177 - val_output_mg_ph_loss: 0.6502 - val_output_ph_loss: 0.6134 - val_output_mg_c_loss: 0.5222 - val_output_c_loss: 0.5140\nEpoch 34/70\n120/120 - 7s - loss: 8.4374 - output_react_loss: 0.4459 - output_mg_ph_loss: 0.4671 - output_ph_loss: 0.7736 - output_mg_c_loss: 0.4765 - output_c_loss: 0.7158 - val_loss: 9.5765 - val_output_react_loss: 0.5184 - val_output_mg_ph_loss: 0.6486 - val_output_ph_loss: 0.6126 - val_output_mg_c_loss: 0.5226 - val_output_c_loss: 0.5161\nEpoch 35/70\n120/120 - 7s - loss: 8.2058 - output_react_loss: 0.4319 - output_mg_ph_loss: 0.4534 - output_ph_loss: 0.7502 - output_mg_c_loss: 0.4693 - output_c_loss: 0.6830 - val_loss: 9.5815 - val_output_react_loss: 0.5178 - val_output_mg_ph_loss: 0.6493 - val_output_ph_loss: 0.6125 - val_output_mg_c_loss: 0.5235 - val_output_c_loss: 0.5158\nEpoch 36/70\n120/120 - 7s - loss: 7.8926 - output_react_loss: 0.4204 - output_mg_ph_loss: 0.4386 - output_ph_loss: 0.7088 - output_mg_c_loss: 0.4455 - output_c_loss: 0.6613 - val_loss: 9.6044 - val_output_react_loss: 0.5190 - val_output_mg_ph_loss: 0.6515 - val_output_ph_loss: 0.6130 - val_output_mg_c_loss: 0.5247 - val_output_c_loss: 0.5155\nEpoch 37/70\n120/120 - 7s - loss: 8.2229 - output_react_loss: 0.4283 - output_mg_ph_loss: 0.4543 - output_ph_loss: 0.7591 - output_mg_c_loss: 0.4722 - output_c_loss: 0.6900 - val_loss: 9.5919 - val_output_react_loss: 0.5183 - val_output_mg_ph_loss: 0.6497 - val_output_ph_loss: 0.6136 - val_output_mg_c_loss: 0.5245 - val_output_c_loss: 0.5159\nEpoch 38/70\n120/120 - 7s - loss: 8.1608 - output_react_loss: 0.4285 - output_mg_ph_loss: 0.4501 - output_ph_loss: 0.7572 - output_mg_c_loss: 0.4629 - output_c_loss: 0.6962 - val_loss: 9.5908 - val_output_react_loss: 0.5187 - val_output_mg_ph_loss: 0.6501 - val_output_ph_loss: 0.6145 - val_output_mg_c_loss: 0.5233 - val_output_c_loss: 0.5159\nEpoch 39/70\n\nEpoch 00039: ReduceLROnPlateau reducing learning rate to 1.0000000474974514e-05.\n120/120 - 7s - loss: 8.2196 - output_react_loss: 0.4285 - output_mg_ph_loss: 0.4554 - output_ph_loss: 0.7525 - output_mg_c_loss: 0.4701 - output_c_loss: 0.6968 - val_loss: 9.5853 - val_output_react_loss: 0.5191 - val_output_mg_ph_loss: 0.6495 - val_output_ph_loss: 0.6126 - val_output_mg_c_loss: 0.5228 - val_output_c_loss: 0.5153\nEpoch 40/70\n120/120 - 7s - loss: 8.1960 - output_react_loss: 0.4307 - output_mg_ph_loss: 0.4530 - output_ph_loss: 0.7629 - output_mg_c_loss: 0.4655 - output_c_loss: 0.6873 - val_loss: 9.5693 - val_output_react_loss: 0.5176 - val_output_mg_ph_loss: 0.6492 - val_output_ph_loss: 0.6120 - val_output_mg_c_loss: 0.5217 - val_output_c_loss: 0.5144\nEpoch 41/70\n120/120 - 7s - loss: 7.8213 - output_react_loss: 0.4111 - output_mg_ph_loss: 0.4371 - output_ph_loss: 0.6985 - output_mg_c_loss: 0.4455 - output_c_loss: 0.6543 - val_loss: 9.5765 - val_output_react_loss: 0.5180 - val_output_mg_ph_loss: 0.6496 - val_output_ph_loss: 0.6118 - val_output_mg_c_loss: 0.5224 - val_output_c_loss: 0.5146\nEpoch 42/70\n120/120 - 7s - loss: 8.2831 - output_react_loss: 0.4342 - output_mg_ph_loss: 0.4531 - output_ph_loss: 0.7775 - output_mg_c_loss: 0.4709 - output_c_loss: 0.7145 - val_loss: 9.5763 - val_output_react_loss: 0.5182 - val_output_mg_ph_loss: 0.6492 - val_output_ph_loss: 0.6124 - val_output_mg_c_loss: 0.5224 - val_output_c_loss: 0.5149\nEpoch 43/70\n120/120 - 7s - loss: 7.9629 - output_react_loss: 0.4202 - output_mg_ph_loss: 0.4434 - output_ph_loss: 0.7291 - output_mg_c_loss: 0.4498 - output_c_loss: 0.6667 - val_loss: 9.5719 - val_output_react_loss: 0.5179 - val_output_mg_ph_loss: 0.6490 - val_output_ph_loss: 0.6114 - val_output_mg_c_loss: 0.5224 - val_output_c_loss: 0.5141\nEpoch 44/70\n120/120 - 7s - loss: 7.9561 - output_react_loss: 0.4172 - output_mg_ph_loss: 0.4433 - output_ph_loss: 0.7208 - output_mg_c_loss: 0.4507 - output_c_loss: 0.6790 - val_loss: 9.5766 - val_output_react_loss: 0.5181 - val_output_mg_ph_loss: 0.6492 - val_output_ph_loss: 0.6124 - val_output_mg_c_loss: 0.5225 - val_output_c_loss: 0.5150\nEpoch 45/70\n\nEpoch 00045: ReduceLROnPlateau reducing learning rate to 1.0000000656873453e-06.\n120/120 - 7s - loss: 8.2745 - output_react_loss: 0.4368 - output_mg_ph_loss: 0.4527 - output_ph_loss: 0.7622 - output_mg_c_loss: 0.4730 - output_c_loss: 0.6996 - val_loss: 9.5809 - val_output_react_loss: 0.5181 - val_output_mg_ph_loss: 0.6496 - val_output_ph_loss: 0.6120 - val_output_mg_c_loss: 0.5231 - val_output_c_loss: 0.5149\nEpoch 46/70\n120/120 - 7s - loss: 8.2236 - output_react_loss: 0.4297 - output_mg_ph_loss: 0.4510 - output_ph_loss: 0.7700 - output_mg_c_loss: 0.4687 - output_c_loss: 0.7063 - val_loss: 9.5795 - val_output_react_loss: 0.5181 - val_output_mg_ph_loss: 0.6495 - val_output_ph_loss: 0.6119 - val_output_mg_c_loss: 0.5229 - val_output_c_loss: 0.5148\nEpoch 47/70\n120/120 - 7s - loss: 7.9683 - output_react_loss: 0.4176 - output_mg_ph_loss: 0.4417 - output_ph_loss: 0.7338 - output_mg_c_loss: 0.4536 - output_c_loss: 0.6701 - val_loss: 9.5782 - val_output_react_loss: 0.5181 - val_output_mg_ph_loss: 0.6495 - val_output_ph_loss: 0.6119 - val_output_mg_c_loss: 0.5228 - val_output_c_loss: 0.5147\nEpoch 48/70\n120/120 - 7s - loss: 7.9532 - output_react_loss: 0.4140 - output_mg_ph_loss: 0.4420 - output_ph_loss: 0.7234 - output_mg_c_loss: 0.4558 - output_c_loss: 0.6709 - val_loss: 9.5771 - val_output_react_loss: 0.5181 - val_output_mg_ph_loss: 0.6494 - val_output_ph_loss: 0.6119 - val_output_mg_c_loss: 0.5227 - val_output_c_loss: 0.5147\nEpoch 49/70\n120/120 - 7s - loss: 7.9453 - output_react_loss: 0.4201 - output_mg_ph_loss: 0.4404 - output_ph_loss: 0.7357 - output_mg_c_loss: 0.4484 - output_c_loss: 0.6650 - val_loss: 9.5781 - val_output_react_loss: 0.5182 - val_output_mg_ph_loss: 0.6494 - val_output_ph_loss: 0.6119 - val_output_mg_c_loss: 0.5227 - val_output_c_loss: 0.5148\nEpoch 50/70\nRestoring model weights from the end of the best epoch.\n\nEpoch 00050: ReduceLROnPlateau reducing learning rate to 1.0000001111620805e-07.\n120/120 - 7s - loss: 8.1491 - output_react_loss: 0.4232 - output_mg_ph_loss: 0.4498 - output_ph_loss: 0.7545 - output_mg_c_loss: 0.4663 - output_c_loss: 0.6982 - val_loss: 9.5770 - val_output_react_loss: 0.5181 - val_output_mg_ph_loss: 0.6493 - val_output_ph_loss: 0.6118 - val_output_mg_c_loss: 0.5227 - val_output_c_loss: 0.5148\nEpoch 00050: early stopping\n\nFOLD: 4\nEpoch 1/70\n120/120 - 10s - loss: 18.6160 - output_react_loss: 0.9513 - output_mg_ph_loss: 1.1496 - output_ph_loss: 1.3467 - output_mg_c_loss: 1.1258 - output_c_loss: 1.1354 - val_loss: 14.2077 - val_output_react_loss: 0.7596 - val_output_mg_ph_loss: 0.9365 - val_output_ph_loss: 0.8735 - val_output_mg_c_loss: 0.8235 - val_output_c_loss: 0.7361\nEpoch 2/70\n120/120 - 7s - loss: 16.0882 - output_react_loss: 0.8172 - output_mg_ph_loss: 0.9744 - output_ph_loss: 1.1763 - output_mg_c_loss: 0.9854 - output_c_loss: 1.0272 - val_loss: 13.1505 - val_output_react_loss: 0.7062 - val_output_mg_ph_loss: 0.8694 - val_output_ph_loss: 0.8043 - val_output_mg_c_loss: 0.7566 - val_output_c_loss: 0.6850\nEpoch 3/70\n120/120 - 7s - loss: 14.9128 - output_react_loss: 0.7736 - output_mg_ph_loss: 0.8995 - output_ph_loss: 1.1208 - output_mg_c_loss: 0.8959 - output_c_loss: 0.9468 - val_loss: 12.0275 - val_output_react_loss: 0.6527 - val_output_mg_ph_loss: 0.7944 - val_output_ph_loss: 0.7663 - val_output_mg_c_loss: 0.6800 - val_output_c_loss: 0.6261\nEpoch 4/70\n120/120 - 7s - loss: 13.9354 - output_react_loss: 0.7354 - output_mg_ph_loss: 0.8332 - output_ph_loss: 1.0599 - output_mg_c_loss: 0.8248 - output_c_loss: 0.9083 - val_loss: 11.3745 - val_output_react_loss: 0.6340 - val_output_mg_ph_loss: 0.7486 - val_output_ph_loss: 0.7111 - val_output_mg_c_loss: 0.6304 - val_output_c_loss: 0.5987\nEpoch 5/70\n120/120 - 7s - loss: 13.4695 - output_react_loss: 0.7182 - output_mg_ph_loss: 0.8055 - output_ph_loss: 1.0155 - output_mg_c_loss: 0.7921 - output_c_loss: 0.8752 - val_loss: 11.1478 - val_output_react_loss: 0.6120 - val_output_mg_ph_loss: 0.7309 - val_output_ph_loss: 0.6959 - val_output_mg_c_loss: 0.6300 - val_output_c_loss: 0.5873\nEpoch 6/70\n120/120 - 7s - loss: 12.8075 - output_react_loss: 0.6885 - output_mg_ph_loss: 0.7643 - output_ph_loss: 0.9609 - output_mg_c_loss: 0.7496 - output_c_loss: 0.8347 - val_loss: 10.8087 - val_output_react_loss: 0.5992 - val_output_mg_ph_loss: 0.7117 - val_output_ph_loss: 0.6779 - val_output_mg_c_loss: 0.6016 - val_output_c_loss: 0.5681\nEpoch 7/70\n120/120 - 7s - loss: 12.8511 - output_react_loss: 0.6851 - output_mg_ph_loss: 0.7608 - output_ph_loss: 0.9813 - output_mg_c_loss: 0.7567 - output_c_loss: 0.8568 - val_loss: 10.9287 - val_output_react_loss: 0.5993 - val_output_mg_ph_loss: 0.7053 - val_output_ph_loss: 0.7075 - val_output_mg_c_loss: 0.6235 - val_output_c_loss: 0.5807\nEpoch 8/70\n120/120 - 7s - loss: 12.4305 - output_react_loss: 0.6639 - output_mg_ph_loss: 0.7439 - output_ph_loss: 0.9476 - output_mg_c_loss: 0.7221 - output_c_loss: 0.8332 - val_loss: 10.5627 - val_output_react_loss: 0.5874 - val_output_mg_ph_loss: 0.7024 - val_output_ph_loss: 0.6783 - val_output_mg_c_loss: 0.5738 - val_output_c_loss: 0.5664\nEpoch 9/70\n120/120 - 7s - loss: 11.7186 - output_react_loss: 0.6314 - output_mg_ph_loss: 0.6970 - output_ph_loss: 0.9030 - output_mg_c_loss: 0.6770 - output_c_loss: 0.7888 - val_loss: 10.3313 - val_output_react_loss: 0.5789 - val_output_mg_ph_loss: 0.6777 - val_output_ph_loss: 0.6502 - val_output_mg_c_loss: 0.5699 - val_output_c_loss: 0.5484\nEpoch 10/70\n120/120 - 7s - loss: 11.9990 - output_react_loss: 0.6503 - output_mg_ph_loss: 0.6961 - output_ph_loss: 0.9452 - output_mg_c_loss: 0.6985 - output_c_loss: 0.8297 - val_loss: 10.3403 - val_output_react_loss: 0.5729 - val_output_mg_ph_loss: 0.6836 - val_output_ph_loss: 0.6516 - val_output_mg_c_loss: 0.5718 - val_output_c_loss: 0.5476\nEpoch 11/70\n120/120 - 8s - loss: 11.4446 - output_react_loss: 0.6133 - output_mg_ph_loss: 0.6731 - output_ph_loss: 0.9140 - output_mg_c_loss: 0.6625 - output_c_loss: 0.7865 - val_loss: 10.1256 - val_output_react_loss: 0.5662 - val_output_mg_ph_loss: 0.6667 - val_output_ph_loss: 0.6426 - val_output_mg_c_loss: 0.5553 - val_output_c_loss: 0.5419\nEpoch 12/70\n120/120 - 7s - loss: 11.3296 - output_react_loss: 0.6150 - output_mg_ph_loss: 0.6610 - output_ph_loss: 0.9012 - output_mg_c_loss: 0.6517 - output_c_loss: 0.7897 - val_loss: 10.2643 - val_output_react_loss: 0.5585 - val_output_mg_ph_loss: 0.6744 - val_output_ph_loss: 0.6468 - val_output_mg_c_loss: 0.5815 - val_output_c_loss: 0.5454\nEpoch 13/70\n120/120 - 7s - loss: 10.8772 - output_react_loss: 0.5917 - output_mg_ph_loss: 0.6356 - output_ph_loss: 0.8707 - output_mg_c_loss: 0.6211 - output_c_loss: 0.7649 - val_loss: 10.0129 - val_output_react_loss: 0.5509 - val_output_mg_ph_loss: 0.6591 - val_output_ph_loss: 0.6355 - val_output_mg_c_loss: 0.5581 - val_output_c_loss: 0.5367\nEpoch 14/70\n120/120 - 7s - loss: 10.7173 - output_react_loss: 0.5773 - output_mg_ph_loss: 0.6204 - output_ph_loss: 0.8689 - output_mg_c_loss: 0.6166 - output_c_loss: 0.7768 - val_loss: 10.1159 - val_output_react_loss: 0.5501 - val_output_mg_ph_loss: 0.6672 - val_output_ph_loss: 0.6477 - val_output_mg_c_loss: 0.5672 - val_output_c_loss: 0.5458\nEpoch 15/70\n120/120 - 7s - loss: 10.7579 - output_react_loss: 0.5821 - output_mg_ph_loss: 0.6212 - output_ph_loss: 0.8795 - output_mg_c_loss: 0.6145 - output_c_loss: 0.7893 - val_loss: 9.9631 - val_output_react_loss: 0.5521 - val_output_mg_ph_loss: 0.6549 - val_output_ph_loss: 0.6241 - val_output_mg_c_loss: 0.5545 - val_output_c_loss: 0.5314\nEpoch 16/70\n120/120 - 7s - loss: 10.3577 - output_react_loss: 0.5618 - output_mg_ph_loss: 0.6004 - output_ph_loss: 0.8411 - output_mg_c_loss: 0.5912 - output_c_loss: 0.7499 - val_loss: 10.0310 - val_output_react_loss: 0.5584 - val_output_mg_ph_loss: 0.6598 - val_output_ph_loss: 0.6205 - val_output_mg_c_loss: 0.5558 - val_output_c_loss: 0.5404\nEpoch 17/70\n120/120 - 7s - loss: 10.3780 - output_react_loss: 0.5609 - output_mg_ph_loss: 0.5964 - output_ph_loss: 0.8617 - output_mg_c_loss: 0.5928 - output_c_loss: 0.7659 - val_loss: 9.9621 - val_output_react_loss: 0.5447 - val_output_mg_ph_loss: 0.6567 - val_output_ph_loss: 0.6371 - val_output_mg_c_loss: 0.5564 - val_output_c_loss: 0.5359\nEpoch 18/70\n120/120 - 7s - loss: 9.7795 - output_react_loss: 0.5261 - output_mg_ph_loss: 0.5611 - output_ph_loss: 0.8101 - output_mg_c_loss: 0.5602 - output_c_loss: 0.7325 - val_loss: 9.8706 - val_output_react_loss: 0.5409 - val_output_mg_ph_loss: 0.6526 - val_output_ph_loss: 0.6174 - val_output_mg_c_loss: 0.5505 - val_output_c_loss: 0.5333\nEpoch 19/70\n120/120 - 7s - loss: 10.0173 - output_react_loss: 0.5420 - output_mg_ph_loss: 0.5731 - output_ph_loss: 0.8491 - output_mg_c_loss: 0.5679 - output_c_loss: 0.7528 - val_loss: 9.8850 - val_output_react_loss: 0.5492 - val_output_mg_ph_loss: 0.6486 - val_output_ph_loss: 0.6194 - val_output_mg_c_loss: 0.5486 - val_output_c_loss: 0.5341\nEpoch 20/70\n120/120 - 7s - loss: 9.7287 - output_react_loss: 0.5222 - output_mg_ph_loss: 0.5544 - output_ph_loss: 0.8137 - output_mg_c_loss: 0.5584 - output_c_loss: 0.7399 - val_loss: 9.9282 - val_output_react_loss: 0.5495 - val_output_mg_ph_loss: 0.6503 - val_output_ph_loss: 0.6261 - val_output_mg_c_loss: 0.5531 - val_output_c_loss: 0.5377\nEpoch 21/70\n120/120 - 7s - loss: 9.6756 - output_react_loss: 0.5223 - output_mg_ph_loss: 0.5494 - output_ph_loss: 0.8160 - output_mg_c_loss: 0.5541 - output_c_loss: 0.7306 - val_loss: 9.7803 - val_output_react_loss: 0.5372 - val_output_mg_ph_loss: 0.6471 - val_output_ph_loss: 0.6173 - val_output_mg_c_loss: 0.5419 - val_output_c_loss: 0.5324\nEpoch 22/70\n120/120 - 7s - loss: 9.6041 - output_react_loss: 0.5153 - output_mg_ph_loss: 0.5431 - output_ph_loss: 0.8295 - output_mg_c_loss: 0.5495 - output_c_loss: 0.7348 - val_loss: 9.8749 - val_output_react_loss: 0.5498 - val_output_mg_ph_loss: 0.6483 - val_output_ph_loss: 0.6266 - val_output_mg_c_loss: 0.5448 - val_output_c_loss: 0.5335\nEpoch 23/70\n120/120 - 7s - loss: 9.6499 - output_react_loss: 0.5163 - output_mg_ph_loss: 0.5431 - output_ph_loss: 0.8441 - output_mg_c_loss: 0.5520 - output_c_loss: 0.7487 - val_loss: 9.8565 - val_output_react_loss: 0.5439 - val_output_mg_ph_loss: 0.6503 - val_output_ph_loss: 0.6180 - val_output_mg_c_loss: 0.5468 - val_output_c_loss: 0.5336\nEpoch 24/70\n120/120 - 7s - loss: 9.3495 - output_react_loss: 0.5031 - output_mg_ph_loss: 0.5259 - output_ph_loss: 0.8039 - output_mg_c_loss: 0.5329 - output_c_loss: 0.7365 - val_loss: 10.0060 - val_output_react_loss: 0.5497 - val_output_mg_ph_loss: 0.6661 - val_output_ph_loss: 0.6256 - val_output_mg_c_loss: 0.5527 - val_output_c_loss: 0.5380\nEpoch 25/70\n120/120 - 7s - loss: 8.9540 - output_react_loss: 0.4784 - output_mg_ph_loss: 0.5058 - output_ph_loss: 0.7734 - output_mg_c_loss: 0.5099 - output_c_loss: 0.7100 - val_loss: 9.8628 - val_output_react_loss: 0.5432 - val_output_mg_ph_loss: 0.6500 - val_output_ph_loss: 0.6208 - val_output_mg_c_loss: 0.5491 - val_output_c_loss: 0.5308\nEpoch 26/70\n\nEpoch 00026: ReduceLROnPlateau reducing learning rate to 0.00010000000474974513.\n120/120 - 7s - loss: 9.3507 - output_react_loss: 0.5013 - output_mg_ph_loss: 0.5188 - output_ph_loss: 0.8244 - output_mg_c_loss: 0.5361 - output_c_loss: 0.7452 - val_loss: 9.9437 - val_output_react_loss: 0.5510 - val_output_mg_ph_loss: 0.6505 - val_output_ph_loss: 0.6233 - val_output_mg_c_loss: 0.5550 - val_output_c_loss: 0.5381\nEpoch 27/70\n120/120 - 7s - loss: 8.7339 - output_react_loss: 0.4640 - output_mg_ph_loss: 0.4904 - output_ph_loss: 0.7769 - output_mg_c_loss: 0.4976 - output_c_loss: 0.6974 - val_loss: 9.6609 - val_output_react_loss: 0.5367 - val_output_mg_ph_loss: 0.6364 - val_output_ph_loss: 0.6064 - val_output_mg_c_loss: 0.5332 - val_output_c_loss: 0.5230\nEpoch 28/70\n120/120 - 8s - loss: 8.7218 - output_react_loss: 0.4632 - output_mg_ph_loss: 0.4788 - output_ph_loss: 0.7953 - output_mg_c_loss: 0.4996 - output_c_loss: 0.7188 - val_loss: 9.6605 - val_output_react_loss: 0.5368 - val_output_mg_ph_loss: 0.6365 - val_output_ph_loss: 0.6067 - val_output_mg_c_loss: 0.5328 - val_output_c_loss: 0.5226\nEpoch 29/70\n120/120 - 7s - loss: 8.4179 - output_react_loss: 0.4482 - output_mg_ph_loss: 0.4692 - output_ph_loss: 0.7549 - output_mg_c_loss: 0.4768 - output_c_loss: 0.6921 - val_loss: 9.6521 - val_output_react_loss: 0.5350 - val_output_mg_ph_loss: 0.6362 - val_output_ph_loss: 0.6053 - val_output_mg_c_loss: 0.5335 - val_output_c_loss: 0.5231\nEpoch 30/70\n120/120 - 7s - loss: 8.7809 - output_react_loss: 0.4647 - output_mg_ph_loss: 0.4866 - output_ph_loss: 0.7904 - output_mg_c_loss: 0.5000 - output_c_loss: 0.7338 - val_loss: 9.6346 - val_output_react_loss: 0.5345 - val_output_mg_ph_loss: 0.6350 - val_output_ph_loss: 0.6036 - val_output_mg_c_loss: 0.5324 - val_output_c_loss: 0.5214\nEpoch 31/70\n120/120 - 7s - loss: 8.6158 - output_react_loss: 0.4542 - output_mg_ph_loss: 0.4805 - output_ph_loss: 0.7909 - output_mg_c_loss: 0.4882 - output_c_loss: 0.7106 - val_loss: 9.6473 - val_output_react_loss: 0.5358 - val_output_mg_ph_loss: 0.6348 - val_output_ph_loss: 0.6050 - val_output_mg_c_loss: 0.5335 - val_output_c_loss: 0.5221\nEpoch 32/70\n120/120 - 7s - loss: 8.5007 - output_react_loss: 0.4516 - output_mg_ph_loss: 0.4651 - output_ph_loss: 0.7753 - output_mg_c_loss: 0.4870 - output_c_loss: 0.7067 - val_loss: 9.6539 - val_output_react_loss: 0.5371 - val_output_mg_ph_loss: 0.6349 - val_output_ph_loss: 0.6061 - val_output_mg_c_loss: 0.5329 - val_output_c_loss: 0.5231\nEpoch 33/70\n120/120 - 7s - loss: 8.2866 - output_react_loss: 0.4379 - output_mg_ph_loss: 0.4630 - output_ph_loss: 0.7441 - output_mg_c_loss: 0.4709 - output_c_loss: 0.6834 - val_loss: 9.6511 - val_output_react_loss: 0.5371 - val_output_mg_ph_loss: 0.6345 - val_output_ph_loss: 0.6042 - val_output_mg_c_loss: 0.5333 - val_output_c_loss: 0.5225\nEpoch 34/70\n120/120 - 7s - loss: 8.7293 - output_react_loss: 0.4604 - output_mg_ph_loss: 0.4769 - output_ph_loss: 0.8032 - output_mg_c_loss: 0.5024 - output_c_loss: 0.7279 - val_loss: 9.6316 - val_output_react_loss: 0.5350 - val_output_mg_ph_loss: 0.6340 - val_output_ph_loss: 0.6049 - val_output_mg_c_loss: 0.5317 - val_output_c_loss: 0.5225\nEpoch 35/70\n120/120 - 8s - loss: 8.2964 - output_react_loss: 0.4459 - output_mg_ph_loss: 0.4605 - output_ph_loss: 0.7388 - output_mg_c_loss: 0.4669 - output_c_loss: 0.6911 - val_loss: 9.6557 - val_output_react_loss: 0.5350 - val_output_mg_ph_loss: 0.6353 - val_output_ph_loss: 0.6062 - val_output_mg_c_loss: 0.5351 - val_output_c_loss: 0.5229\nEpoch 36/70\n120/120 - 7s - loss: 8.2189 - output_react_loss: 0.4395 - output_mg_ph_loss: 0.4531 - output_ph_loss: 0.7493 - output_mg_c_loss: 0.4652 - output_c_loss: 0.6804 - val_loss: 9.6282 - val_output_react_loss: 0.5339 - val_output_mg_ph_loss: 0.6332 - val_output_ph_loss: 0.6043 - val_output_mg_c_loss: 0.5331 - val_output_c_loss: 0.5227\nEpoch 37/70\n120/120 - 7s - loss: 8.5775 - output_react_loss: 0.4501 - output_mg_ph_loss: 0.4708 - output_ph_loss: 0.8032 - output_mg_c_loss: 0.4902 - output_c_loss: 0.7191 - val_loss: 9.6562 - val_output_react_loss: 0.5354 - val_output_mg_ph_loss: 0.6355 - val_output_ph_loss: 0.6038 - val_output_mg_c_loss: 0.5349 - val_output_c_loss: 0.5231\nEpoch 38/70\n120/120 - 7s - loss: 8.4730 - output_react_loss: 0.4513 - output_mg_ph_loss: 0.4600 - output_ph_loss: 0.7780 - output_mg_c_loss: 0.4836 - output_c_loss: 0.7199 - val_loss: 9.6546 - val_output_react_loss: 0.5350 - val_output_mg_ph_loss: 0.6344 - val_output_ph_loss: 0.6061 - val_output_mg_c_loss: 0.5357 - val_output_c_loss: 0.5230\nEpoch 39/70\n120/120 - 7s - loss: 8.4596 - output_react_loss: 0.4510 - output_mg_ph_loss: 0.4678 - output_ph_loss: 0.7828 - output_mg_c_loss: 0.4756 - output_c_loss: 0.7051 - val_loss: 9.6601 - val_output_react_loss: 0.5361 - val_output_mg_ph_loss: 0.6356 - val_output_ph_loss: 0.6055 - val_output_mg_c_loss: 0.5346 - val_output_c_loss: 0.5231\nEpoch 40/70\n120/120 - 7s - loss: 8.2446 - output_react_loss: 0.4359 - output_mg_ph_loss: 0.4577 - output_ph_loss: 0.7550 - output_mg_c_loss: 0.4659 - output_c_loss: 0.6921 - val_loss: 9.6603 - val_output_react_loss: 0.5364 - val_output_mg_ph_loss: 0.6357 - val_output_ph_loss: 0.6053 - val_output_mg_c_loss: 0.5343 - val_output_c_loss: 0.5229\nEpoch 41/70\n\nEpoch 00041: ReduceLROnPlateau reducing learning rate to 1.0000000474974514e-05.\n120/120 - 7s - loss: 8.2819 - output_react_loss: 0.4368 - output_mg_ph_loss: 0.4521 - output_ph_loss: 0.7532 - output_mg_c_loss: 0.4775 - output_c_loss: 0.6969 - val_loss: 9.6522 - val_output_react_loss: 0.5357 - val_output_mg_ph_loss: 0.6350 - val_output_ph_loss: 0.6048 - val_output_mg_c_loss: 0.5342 - val_output_c_loss: 0.5230\nEpoch 42/70\n120/120 - 8s - loss: 8.3503 - output_react_loss: 0.4433 - output_mg_ph_loss: 0.4589 - output_ph_loss: 0.7856 - output_mg_c_loss: 0.4683 - output_c_loss: 0.7119 - val_loss: 9.6408 - val_output_react_loss: 0.5349 - val_output_mg_ph_loss: 0.6343 - val_output_ph_loss: 0.6045 - val_output_mg_c_loss: 0.5335 - val_output_c_loss: 0.5227\nEpoch 43/70\n120/120 - 7s - loss: 8.1548 - output_react_loss: 0.4343 - output_mg_ph_loss: 0.4519 - output_ph_loss: 0.7305 - output_mg_c_loss: 0.4628 - output_c_loss: 0.6795 - val_loss: 9.6313 - val_output_react_loss: 0.5348 - val_output_mg_ph_loss: 0.6336 - val_output_ph_loss: 0.6038 - val_output_mg_c_loss: 0.5325 - val_output_c_loss: 0.5228\nEpoch 44/70\n120/120 - 7s - loss: 8.3227 - output_react_loss: 0.4437 - output_mg_ph_loss: 0.4545 - output_ph_loss: 0.7757 - output_mg_c_loss: 0.4715 - output_c_loss: 0.6985 - val_loss: 9.6283 - val_output_react_loss: 0.5348 - val_output_mg_ph_loss: 0.6333 - val_output_ph_loss: 0.6040 - val_output_mg_c_loss: 0.5323 - val_output_c_loss: 0.5225\nEpoch 45/70\n120/120 - 7s - loss: 8.2394 - output_react_loss: 0.4325 - output_mg_ph_loss: 0.4509 - output_ph_loss: 0.7723 - output_mg_c_loss: 0.4691 - output_c_loss: 0.7051 - val_loss: 9.6287 - val_output_react_loss: 0.5345 - val_output_mg_ph_loss: 0.6336 - val_output_ph_loss: 0.6037 - val_output_mg_c_loss: 0.5324 - val_output_c_loss: 0.5222\nEpoch 46/70\nRestoring model weights from the end of the best epoch.\n\nEpoch 00046: ReduceLROnPlateau reducing learning rate to 1.0000000656873453e-06.\n120/120 - 7s - loss: 8.3918 - output_react_loss: 0.4421 - output_mg_ph_loss: 0.4622 - output_ph_loss: 0.7785 - output_mg_c_loss: 0.4764 - output_c_loss: 0.7103 - val_loss: 9.6335 - val_output_react_loss: 0.5351 - val_output_mg_ph_loss: 0.6338 - val_output_ph_loss: 0.6040 - val_output_mg_c_loss: 0.5326 - val_output_c_loss: 0.5222\nEpoch 00046: early stopping\n\nFOLD: 5\nEpoch 1/70\n120/120 - 9s - loss: 18.8432 - output_react_loss: 0.9433 - output_mg_ph_loss: 1.1511 - output_ph_loss: 1.3544 - output_mg_c_loss: 1.1756 - output_c_loss: 1.1389 - val_loss: 14.2085 - val_output_react_loss: 0.7648 - val_output_mg_ph_loss: 0.9314 - val_output_ph_loss: 0.8861 - val_output_mg_c_loss: 0.8190 - val_output_c_loss: 0.7461\nEpoch 2/70\n120/120 - 7s - loss: 16.0138 - output_react_loss: 0.8061 - output_mg_ph_loss: 0.9830 - output_ph_loss: 1.1564 - output_mg_c_loss: 0.9819 - output_c_loss: 1.0026 - val_loss: 12.8593 - val_output_react_loss: 0.7009 - val_output_mg_ph_loss: 0.8436 - val_output_ph_loss: 0.7988 - val_output_mg_c_loss: 0.7301 - val_output_c_loss: 0.6877\nEpoch 3/70\n120/120 - 7s - loss: 15.2234 - output_react_loss: 0.7824 - output_mg_ph_loss: 0.9249 - output_ph_loss: 1.1392 - output_mg_c_loss: 0.9140 - output_c_loss: 0.9777 - val_loss: 11.8569 - val_output_react_loss: 0.6642 - val_output_mg_ph_loss: 0.7737 - val_output_ph_loss: 0.7426 - val_output_mg_c_loss: 0.6595 - val_output_c_loss: 0.6269\nEpoch 4/70\n120/120 - 7s - loss: 14.1720 - output_react_loss: 0.7421 - output_mg_ph_loss: 0.8500 - output_ph_loss: 1.0449 - output_mg_c_loss: 0.8504 - output_c_loss: 0.9149 - val_loss: 11.1196 - val_output_react_loss: 0.6261 - val_output_mg_ph_loss: 0.7233 - val_output_ph_loss: 0.7041 - val_output_mg_c_loss: 0.6148 - val_output_c_loss: 0.5946\nEpoch 5/70\n120/120 - 7s - loss: 13.7264 - output_react_loss: 0.7250 - output_mg_ph_loss: 0.8270 - output_ph_loss: 1.0295 - output_mg_c_loss: 0.8119 - output_c_loss: 0.8772 - val_loss: 10.8129 - val_output_react_loss: 0.6061 - val_output_mg_ph_loss: 0.7036 - val_output_ph_loss: 0.6890 - val_output_mg_c_loss: 0.5997 - val_output_c_loss: 0.5767\nEpoch 6/70\n120/120 - 7s - loss: 13.1496 - output_react_loss: 0.6969 - output_mg_ph_loss: 0.7908 - output_ph_loss: 0.9785 - output_mg_c_loss: 0.7785 - output_c_loss: 0.8399 - val_loss: 10.6063 - val_output_react_loss: 0.5909 - val_output_mg_ph_loss: 0.6872 - val_output_ph_loss: 0.6730 - val_output_mg_c_loss: 0.5937 - val_output_c_loss: 0.5744\nEpoch 7/70\n120/120 - 7s - loss: 12.8049 - output_react_loss: 0.6782 - output_mg_ph_loss: 0.7691 - output_ph_loss: 0.9685 - output_mg_c_loss: 0.7524 - output_c_loss: 0.8384 - val_loss: 10.5435 - val_output_react_loss: 0.5766 - val_output_mg_ph_loss: 0.6927 - val_output_ph_loss: 0.6715 - val_output_mg_c_loss: 0.5912 - val_output_c_loss: 0.5698\nEpoch 8/70\n120/120 - 7s - loss: 12.7572 - output_react_loss: 0.6795 - output_mg_ph_loss: 0.7605 - output_ph_loss: 0.9872 - output_mg_c_loss: 0.7424 - output_c_loss: 0.8578 - val_loss: 10.2874 - val_output_react_loss: 0.5732 - val_output_mg_ph_loss: 0.6687 - val_output_ph_loss: 0.6655 - val_output_mg_c_loss: 0.5682 - val_output_c_loss: 0.5716\nEpoch 9/70\n120/120 - 7s - loss: 11.8830 - output_react_loss: 0.6348 - output_mg_ph_loss: 0.7144 - output_ph_loss: 0.8981 - output_mg_c_loss: 0.6910 - output_c_loss: 0.7837 - val_loss: 10.3223 - val_output_react_loss: 0.5685 - val_output_mg_ph_loss: 0.6788 - val_output_ph_loss: 0.6494 - val_output_mg_c_loss: 0.5745 - val_output_c_loss: 0.5637\nEpoch 10/70\n120/120 - 7s - loss: 11.9052 - output_react_loss: 0.6302 - output_mg_ph_loss: 0.7089 - output_ph_loss: 0.9132 - output_mg_c_loss: 0.6973 - output_c_loss: 0.8100 - val_loss: 10.0437 - val_output_react_loss: 0.5531 - val_output_mg_ph_loss: 0.6522 - val_output_ph_loss: 0.6477 - val_output_mg_c_loss: 0.5620 - val_output_c_loss: 0.5595\nEpoch 11/70\n120/120 - 7s - loss: 11.5718 - output_react_loss: 0.6247 - output_mg_ph_loss: 0.6874 - output_ph_loss: 0.8982 - output_mg_c_loss: 0.6668 - output_c_loss: 0.7794 - val_loss: 9.9481 - val_output_react_loss: 0.5540 - val_output_mg_ph_loss: 0.6503 - val_output_ph_loss: 0.6296 - val_output_mg_c_loss: 0.5501 - val_output_c_loss: 0.5465\nEpoch 12/70\n120/120 - 7s - loss: 11.3190 - output_react_loss: 0.6135 - output_mg_ph_loss: 0.6580 - output_ph_loss: 0.8829 - output_mg_c_loss: 0.6596 - output_c_loss: 0.7807 - val_loss: 10.0090 - val_output_react_loss: 0.5535 - val_output_mg_ph_loss: 0.6546 - val_output_ph_loss: 0.6358 - val_output_mg_c_loss: 0.5575 - val_output_c_loss: 0.5453\nEpoch 13/70\n120/120 - 7s - loss: 11.0159 - output_react_loss: 0.5889 - output_mg_ph_loss: 0.6531 - output_ph_loss: 0.8692 - output_mg_c_loss: 0.6361 - output_c_loss: 0.7563 - val_loss: 9.8895 - val_output_react_loss: 0.5465 - val_output_mg_ph_loss: 0.6452 - val_output_ph_loss: 0.6321 - val_output_mg_c_loss: 0.5512 - val_output_c_loss: 0.5427\nEpoch 14/70\n120/120 - 7s - loss: 10.9381 - output_react_loss: 0.5808 - output_mg_ph_loss: 0.6465 - output_ph_loss: 0.8761 - output_mg_c_loss: 0.6326 - output_c_loss: 0.7622 - val_loss: 9.8711 - val_output_react_loss: 0.5371 - val_output_mg_ph_loss: 0.6476 - val_output_ph_loss: 0.6277 - val_output_mg_c_loss: 0.5538 - val_output_c_loss: 0.5507\nEpoch 15/70\n120/120 - 7s - loss: 10.8806 - output_react_loss: 0.5902 - output_mg_ph_loss: 0.6278 - output_ph_loss: 0.8781 - output_mg_c_loss: 0.6261 - output_c_loss: 0.7824 - val_loss: 9.8377 - val_output_react_loss: 0.5338 - val_output_mg_ph_loss: 0.6495 - val_output_ph_loss: 0.6229 - val_output_mg_c_loss: 0.5511 - val_output_c_loss: 0.5424\nEpoch 16/70\n120/120 - 7s - loss: 10.7370 - output_react_loss: 0.5765 - output_mg_ph_loss: 0.6226 - output_ph_loss: 0.8707 - output_mg_c_loss: 0.6185 - output_c_loss: 0.7784 - val_loss: 9.7408 - val_output_react_loss: 0.5333 - val_output_mg_ph_loss: 0.6409 - val_output_ph_loss: 0.6253 - val_output_mg_c_loss: 0.5412 - val_output_c_loss: 0.5386\nEpoch 17/70\n120/120 - 7s - loss: 10.2871 - output_react_loss: 0.5530 - output_mg_ph_loss: 0.5953 - output_ph_loss: 0.8416 - output_mg_c_loss: 0.5929 - output_c_loss: 0.7399 - val_loss: 9.8158 - val_output_react_loss: 0.5347 - val_output_mg_ph_loss: 0.6468 - val_output_ph_loss: 0.6233 - val_output_mg_c_loss: 0.5481 - val_output_c_loss: 0.5446\nEpoch 18/70\n120/120 - 7s - loss: 10.0054 - output_react_loss: 0.5354 - output_mg_ph_loss: 0.5822 - output_ph_loss: 0.8155 - output_mg_c_loss: 0.5732 - output_c_loss: 0.7356 - val_loss: 9.8819 - val_output_react_loss: 0.5362 - val_output_mg_ph_loss: 0.6561 - val_output_ph_loss: 0.6173 - val_output_mg_c_loss: 0.5523 - val_output_c_loss: 0.5414\nEpoch 19/70\n120/120 - 7s - loss: 9.9816 - output_react_loss: 0.5314 - output_mg_ph_loss: 0.5774 - output_ph_loss: 0.8320 - output_mg_c_loss: 0.5729 - output_c_loss: 0.7408 - val_loss: 9.7789 - val_output_react_loss: 0.5326 - val_output_mg_ph_loss: 0.6391 - val_output_ph_loss: 0.6246 - val_output_mg_c_loss: 0.5506 - val_output_c_loss: 0.5426\nEpoch 20/70\n120/120 - 7s - loss: 9.9156 - output_react_loss: 0.5311 - output_mg_ph_loss: 0.5711 - output_ph_loss: 0.8185 - output_mg_c_loss: 0.5685 - output_c_loss: 0.7434 - val_loss: 9.6630 - val_output_react_loss: 0.5267 - val_output_mg_ph_loss: 0.6328 - val_output_ph_loss: 0.6105 - val_output_mg_c_loss: 0.5438 - val_output_c_loss: 0.5360\nEpoch 21/70\n120/120 - 7s - loss: 9.6682 - output_react_loss: 0.5154 - output_mg_ph_loss: 0.5571 - output_ph_loss: 0.8037 - output_mg_c_loss: 0.5547 - output_c_loss: 0.7282 - val_loss: 9.6808 - val_output_react_loss: 0.5304 - val_output_mg_ph_loss: 0.6355 - val_output_ph_loss: 0.6175 - val_output_mg_c_loss: 0.5399 - val_output_c_loss: 0.5344\nEpoch 22/70\n120/120 - 7s - loss: 9.8875 - output_react_loss: 0.5281 - output_mg_ph_loss: 0.5621 - output_ph_loss: 0.8478 - output_mg_c_loss: 0.5675 - output_c_loss: 0.7511 - val_loss: 9.7280 - val_output_react_loss: 0.5334 - val_output_mg_ph_loss: 0.6380 - val_output_ph_loss: 0.6153 - val_output_mg_c_loss: 0.5433 - val_output_c_loss: 0.5390\nEpoch 23/70\n120/120 - 7s - loss: 9.6623 - output_react_loss: 0.5105 - output_mg_ph_loss: 0.5530 - output_ph_loss: 0.8245 - output_mg_c_loss: 0.5558 - output_c_loss: 0.7415 - val_loss: 9.6662 - val_output_react_loss: 0.5312 - val_output_mg_ph_loss: 0.6365 - val_output_ph_loss: 0.6104 - val_output_mg_c_loss: 0.5366 - val_output_c_loss: 0.5344\nEpoch 24/70\n120/120 - 7s - loss: 9.7520 - output_react_loss: 0.5211 - output_mg_ph_loss: 0.5500 - output_ph_loss: 0.8344 - output_mg_c_loss: 0.5615 - output_c_loss: 0.7542 - val_loss: 9.7427 - val_output_react_loss: 0.5299 - val_output_mg_ph_loss: 0.6405 - val_output_ph_loss: 0.6269 - val_output_mg_c_loss: 0.5455 - val_output_c_loss: 0.5359\nEpoch 25/70\n\nEpoch 00025: ReduceLROnPlateau reducing learning rate to 0.00010000000474974513.\n120/120 - 7s - loss: 9.0355 - output_react_loss: 0.4762 - output_mg_ph_loss: 0.5195 - output_ph_loss: 0.7767 - output_mg_c_loss: 0.5177 - output_c_loss: 0.6920 - val_loss: 9.6684 - val_output_react_loss: 0.5287 - val_output_mg_ph_loss: 0.6367 - val_output_ph_loss: 0.6168 - val_output_mg_c_loss: 0.5378 - val_output_c_loss: 0.5352\nEpoch 26/70\n120/120 - 7s - loss: 9.1590 - output_react_loss: 0.4879 - output_mg_ph_loss: 0.5142 - output_ph_loss: 0.8015 - output_mg_c_loss: 0.5233 - output_c_loss: 0.7304 - val_loss: 9.4810 - val_output_react_loss: 0.5175 - val_output_mg_ph_loss: 0.6254 - val_output_ph_loss: 0.6005 - val_output_mg_c_loss: 0.5281 - val_output_c_loss: 0.5252\nEpoch 27/70\n120/120 - 7s - loss: 8.7686 - output_react_loss: 0.4634 - output_mg_ph_loss: 0.4974 - output_ph_loss: 0.7638 - output_mg_c_loss: 0.5010 - output_c_loss: 0.6955 - val_loss: 9.4673 - val_output_react_loss: 0.5174 - val_output_mg_ph_loss: 0.6232 - val_output_ph_loss: 0.6001 - val_output_mg_c_loss: 0.5279 - val_output_c_loss: 0.5252\nEpoch 28/70\n120/120 - 7s - loss: 8.9437 - output_react_loss: 0.4749 - output_mg_ph_loss: 0.4978 - output_ph_loss: 0.7936 - output_mg_c_loss: 0.5135 - output_c_loss: 0.7189 - val_loss: 9.4635 - val_output_react_loss: 0.5166 - val_output_mg_ph_loss: 0.6231 - val_output_ph_loss: 0.6001 - val_output_mg_c_loss: 0.5279 - val_output_c_loss: 0.5253\nEpoch 29/70\n120/120 - 7s - loss: 8.6557 - output_react_loss: 0.4530 - output_mg_ph_loss: 0.4880 - output_ph_loss: 0.7661 - output_mg_c_loss: 0.4981 - output_c_loss: 0.6944 - val_loss: 9.4749 - val_output_react_loss: 0.5171 - val_output_mg_ph_loss: 0.6236 - val_output_ph_loss: 0.6023 - val_output_mg_c_loss: 0.5288 - val_output_c_loss: 0.5253\nEpoch 30/70\n120/120 - 7s - loss: 8.7051 - output_react_loss: 0.4565 - output_mg_ph_loss: 0.4898 - output_ph_loss: 0.7777 - output_mg_c_loss: 0.4983 - output_c_loss: 0.7042 - val_loss: 9.4515 - val_output_react_loss: 0.5166 - val_output_mg_ph_loss: 0.6226 - val_output_ph_loss: 0.5998 - val_output_mg_c_loss: 0.5264 - val_output_c_loss: 0.5240\nEpoch 31/70\n120/120 - 7s - loss: 8.9629 - output_react_loss: 0.4777 - output_mg_ph_loss: 0.5036 - output_ph_loss: 0.8067 - output_mg_c_loss: 0.5064 - output_c_loss: 0.7174 - val_loss: 9.4477 - val_output_react_loss: 0.5170 - val_output_mg_ph_loss: 0.6219 - val_output_ph_loss: 0.5990 - val_output_mg_c_loss: 0.5258 - val_output_c_loss: 0.5251\nEpoch 32/70\n120/120 - 7s - loss: 8.7984 - output_react_loss: 0.4575 - output_mg_ph_loss: 0.4918 - output_ph_loss: 0.7884 - output_mg_c_loss: 0.5084 - output_c_loss: 0.7214 - val_loss: 9.4337 - val_output_react_loss: 0.5158 - val_output_mg_ph_loss: 0.6212 - val_output_ph_loss: 0.5992 - val_output_mg_c_loss: 0.5252 - val_output_c_loss: 0.5236\nEpoch 33/70\n120/120 - 7s - loss: 8.4329 - output_react_loss: 0.4452 - output_mg_ph_loss: 0.4725 - output_ph_loss: 0.7501 - output_mg_c_loss: 0.4826 - output_c_loss: 0.6811 - val_loss: 9.4742 - val_output_react_loss: 0.5170 - val_output_mg_ph_loss: 0.6233 - val_output_ph_loss: 0.6011 - val_output_mg_c_loss: 0.5292 - val_output_c_loss: 0.5257\nEpoch 34/70\n120/120 - 7s - loss: 8.4733 - output_react_loss: 0.4497 - output_mg_ph_loss: 0.4747 - output_ph_loss: 0.7513 - output_mg_c_loss: 0.4830 - output_c_loss: 0.6848 - val_loss: 9.4546 - val_output_react_loss: 0.5158 - val_output_mg_ph_loss: 0.6233 - val_output_ph_loss: 0.5992 - val_output_mg_c_loss: 0.5270 - val_output_c_loss: 0.5248\nEpoch 35/70\n120/120 - 7s - loss: 8.6710 - output_react_loss: 0.4548 - output_mg_ph_loss: 0.4849 - output_ph_loss: 0.7827 - output_mg_c_loss: 0.4975 - output_c_loss: 0.7023 - val_loss: 9.4694 - val_output_react_loss: 0.5172 - val_output_mg_ph_loss: 0.6239 - val_output_ph_loss: 0.6015 - val_output_mg_c_loss: 0.5274 - val_output_c_loss: 0.5252\nEpoch 36/70\n120/120 - 7s - loss: 8.5291 - output_react_loss: 0.4527 - output_mg_ph_loss: 0.4738 - output_ph_loss: 0.7536 - output_mg_c_loss: 0.4881 - output_c_loss: 0.7024 - val_loss: 9.4771 - val_output_react_loss: 0.5183 - val_output_mg_ph_loss: 0.6242 - val_output_ph_loss: 0.6003 - val_output_mg_c_loss: 0.5279 - val_output_c_loss: 0.5245\nEpoch 37/70\n\nEpoch 00037: ReduceLROnPlateau reducing learning rate to 1.0000000474974514e-05.\n120/120 - 7s - loss: 8.6339 - output_react_loss: 0.4487 - output_mg_ph_loss: 0.4831 - output_ph_loss: 0.7811 - output_mg_c_loss: 0.4969 - output_c_loss: 0.7093 - val_loss: 9.4703 - val_output_react_loss: 0.5176 - val_output_mg_ph_loss: 0.6239 - val_output_ph_loss: 0.6007 - val_output_mg_c_loss: 0.5276 - val_output_c_loss: 0.5247\nEpoch 38/70\n120/120 - 7s - loss: 8.7885 - output_react_loss: 0.4614 - output_mg_ph_loss: 0.4892 - output_ph_loss: 0.8059 - output_mg_c_loss: 0.5014 - output_c_loss: 0.7223 - val_loss: 9.4637 - val_output_react_loss: 0.5170 - val_output_mg_ph_loss: 0.6232 - val_output_ph_loss: 0.6002 - val_output_mg_c_loss: 0.5276 - val_output_c_loss: 0.5243\nEpoch 39/70\n120/120 - 7s - loss: 8.5917 - output_react_loss: 0.4541 - output_mg_ph_loss: 0.4766 - output_ph_loss: 0.7722 - output_mg_c_loss: 0.4935 - output_c_loss: 0.6990 - val_loss: 9.4545 - val_output_react_loss: 0.5165 - val_output_mg_ph_loss: 0.6231 - val_output_ph_loss: 0.5990 - val_output_mg_c_loss: 0.5268 - val_output_c_loss: 0.5238\nEpoch 40/70\n120/120 - 7s - loss: 8.2853 - output_react_loss: 0.4366 - output_mg_ph_loss: 0.4651 - output_ph_loss: 0.7422 - output_mg_c_loss: 0.4689 - output_c_loss: 0.6898 - val_loss: 9.4478 - val_output_react_loss: 0.5163 - val_output_mg_ph_loss: 0.6224 - val_output_ph_loss: 0.5990 - val_output_mg_c_loss: 0.5263 - val_output_c_loss: 0.5237\nEpoch 41/70\n120/120 - 7s - loss: 8.5584 - output_react_loss: 0.4462 - output_mg_ph_loss: 0.4803 - output_ph_loss: 0.7702 - output_mg_c_loss: 0.4891 - output_c_loss: 0.7102 - val_loss: 9.4449 - val_output_react_loss: 0.5162 - val_output_mg_ph_loss: 0.6222 - val_output_ph_loss: 0.5991 - val_output_mg_c_loss: 0.5260 - val_output_c_loss: 0.5238\nEpoch 42/70\nRestoring model weights from the end of the best epoch.\n\nEpoch 00042: ReduceLROnPlateau reducing learning rate to 1.0000000656873453e-06.\n120/120 - 7s - loss: 8.5515 - output_react_loss: 0.4497 - output_mg_ph_loss: 0.4755 - output_ph_loss: 0.7782 - output_mg_c_loss: 0.4911 - output_c_loss: 0.6923 - val_loss: 9.4476 - val_output_react_loss: 0.5164 - val_output_mg_ph_loss: 0.6225 - val_output_ph_loss: 0.5988 - val_output_mg_c_loss: 0.5262 - val_output_c_loss: 0.5236\nEpoch 00042: early stopping\n" ] ], [ [ "## Model loss graph", "_____no_output_____" ] ], [ [ "for fold, history in enumerate(history_list):\n print(f'\\nFOLD: {fold+1}')\n min_valid_idx = np.array(history['val_loss']).argmin()\n print(f\"Train {np.array(history['loss'])[min_valid_idx]:.5f} Validation {np.array(history['val_loss'])[min_valid_idx]:.5f}\")\n\nplot_metrics_agg(history_list)", "\nFOLD: 1\nTrain 8.10129 Validation 9.73815\n\nFOLD: 2\nTrain 8.02999 Validation 9.23740\n\nFOLD: 3\nTrain 8.19604 Validation 9.56927\n\nFOLD: 4\nTrain 8.21887 Validation 9.62825\n\nFOLD: 5\nTrain 8.79840 Validation 9.43370\n" ] ], [ [ "# Post-processing", "_____no_output_____" ] ], [ [ "# Assign preds to OOF set\nfor idx, col in enumerate(pred_cols):\n val = oof_preds[:, :, idx]\n oof = oof.assign(**{f'{col}_pred': list(val)})\n \noof.to_csv('oof.csv', index=False)\n\noof_preds_dict = {}\nfor col in pred_cols:\n oof_preds_dict[col] = oof_preds[:, :, idx]\n\n# Assign values to test set\npreds_ls = []\n\nfor df, preds in [(public_test, test_public_preds), (private_test, test_private_preds)]:\n for i, uid in enumerate(df.id):\n single_pred = preds[i]\n\n single_df = pd.DataFrame(single_pred, columns=pred_cols)\n single_df['id_seqpos'] = [f'{uid}_{x}' for x in range(single_df.shape[0])]\n\n preds_ls.append(single_df)\n\npreds_df = pd.concat(preds_ls)\n\n# Averaging over augmented predictions\npreds_df = pd.concat(preds_ls).groupby('id_seqpos').mean().reset_index()", "_____no_output_____" ] ], [ [ "# Model evaluation", "_____no_output_____" ] ], [ [ "y_true_dict = get_targets_dict(train, pred_cols, train.index)\ny_true = np.array([y_true_dict[col] for col in pred_cols]).transpose((1, 2, 0, 3)).reshape(oof_preds.shape)\n\ndisplay(evaluate_model(train, y_true, oof_preds, pred_cols))\ndisplay(evaluate_model(train, y_true, oof_preds, pred_cols, use_cols=['reactivity', 'deg_Mg_pH10', 'deg_Mg_50C']))", "_____no_output_____" ] ], [ [ "# Visualize test predictions", "_____no_output_____" ] ], [ [ "submission = pd.read_csv(database_base_path + 'sample_submission.csv')\nsubmission = submission[['id_seqpos']].merge(preds_df, on=['id_seqpos'])", "_____no_output_____" ] ], [ [ "# Test set predictions", "_____no_output_____" ] ], [ [ "display(submission.head(10))\ndisplay(submission.describe())\n\nsubmission.to_csv('submission.csv', index=False)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
cb3a832918b243ab5728efb6134f258cb73e4ca0
10,882
ipynb
Jupyter Notebook
Lecture_Note/03. CNN Application/02.Custom-Dataset.ipynb
JoshWorld/Deep_Learning_starting_with_the_latest_papers
2784256868a0bedcf84b068a4032b75e4e3f7a64
[ "MIT" ]
81
2017-11-07T04:30:16.000Z
2022-02-05T13:47:10.000Z
Lecture_Note/03. CNN Application/02.Custom-Dataset.ipynb
JoshWorld/Deep_Learning_starting_with_the_latest_papers
2784256868a0bedcf84b068a4032b75e4e3f7a64
[ "MIT" ]
null
null
null
Lecture_Note/03. CNN Application/02.Custom-Dataset.ipynb
JoshWorld/Deep_Learning_starting_with_the_latest_papers
2784256868a0bedcf84b068a4032b75e4e3f7a64
[ "MIT" ]
25
2018-04-04T06:54:42.000Z
2022-03-25T09:29:28.000Z
28.046392
173
0.534001
[ [ [ "# 02. Custom Dataset 만들어보기\n- Dataset Generation!\n- 폴더별로 사진들이 모여있다면, 그 dataset을 우리가 원하는 형태로 바꿔봅시다!", "_____no_output_____" ] ], [ [ "import numpy as np\nimport os\nfrom scipy.misc import imread, imresize\nimport matplotlib.pyplot as plt\n%matplotlib inline \nprint (\"Package loaded\") \ncwd = os.getcwd()\nprint (\"Current folder is %s\" % (cwd) )", "Package loaded\nCurrent folder is /Users/kyle/Dropbox/workspace/Deep_Learning_starting_with_the_latest_papers/Lecture_Note/03. CNN Application\n" ], [ "# 학습할 폴더 경로 설정\npaths = {\"../../img_dataset/celebs/Arnold_Schwarzenegger\"\n , \"../../img_dataset/celebs/Junichiro_Koizumi\"\n , \"../../img_dataset/celebs/Vladimir_Putin\"\n , \"../../img_dataset/celebs/George_W_Bush\"}\ncategories = [\"Arnold\",\"Koizumi\",\"Putin\",\"Bush\"]\n# The reshape size\nimgsize = [64, 64]\n# Grayscale\nuse_gray = 1\n# Save name\ndata_name = \"custom_data\"\n\nprint (\"Your images should be at\")\nfor i, path in enumerate(paths):\n print (\" [%d/%d] %s/%s\" % (i, len(paths), cwd, path)) \n\nprint (\"Data will be saved to %s\" \n % (cwd + '/data/' + data_name + '.npz'))", "Your images should be at\n [0/4] /Users/kyle/Dropbox/workspace/Deep_Learning_starting_with_the_latest_papers/Lecture_Note/03. CNN Application/../../img_dataset/celebs/Arnold_Schwarzenegger\n [1/4] /Users/kyle/Dropbox/workspace/Deep_Learning_starting_with_the_latest_papers/Lecture_Note/03. CNN Application/../../img_dataset/celebs/Junichiro_Koizumi\n [2/4] /Users/kyle/Dropbox/workspace/Deep_Learning_starting_with_the_latest_papers/Lecture_Note/03. CNN Application/../../img_dataset/celebs/George_W_Bush\n [3/4] /Users/kyle/Dropbox/workspace/Deep_Learning_starting_with_the_latest_papers/Lecture_Note/03. CNN Application/../../img_dataset/celebs/Vladimir_Putin\nData will be saved to /Users/kyle/Dropbox/workspace/Deep_Learning_starting_with_the_latest_papers/Lecture_Note/03. CNN Application/data/custom_data.npz\n" ] ], [ [ "# RGB 2 GRAY", "_____no_output_____" ] ], [ [ "def rgb2gray(rgb):\n if len(rgb.shape) is 3:\n return np.dot(rgb[...,:3], [0.299, 0.587, 0.114])\n else:\n # print (\"Current Image if GRAY!\")\n return rgb", "_____no_output_____" ] ], [ [ "# LOAD Image", "_____no_output_____" ] ], [ [ "nclass = len(paths)\nvalid_exts = [\".jpg\",\".gif\",\".png\",\".tga\", \".jpeg\"]\nimgcnt = 0\nfor i, relpath in zip(range(nclass), paths):\n path = cwd + \"/\" + relpath\n flist = os.listdir(path)\n for f in flist:\n if os.path.splitext(f)[1].lower() not in valid_exts:\n continue\n fullpath = os.path.join(path, f)\n currimg = imread(fullpath)\n # Convert to grayscale \n if use_gray:\n grayimg = rgb2gray(currimg)\n else:\n grayimg = currimg\n # Reshape\n graysmall = imresize(grayimg, [imgsize[0], imgsize[1]])/255.\n grayvec = np.reshape(graysmall, (1, -1))\n # Save \n curr_label = np.eye(nclass, nclass)[i:i+1, :]\n # np.eye : 단위행렬을 구함 -> One Hot Vector를 만듬\n if imgcnt is 0:\n totalimg = grayvec\n totallabel = curr_label\n else:\n totalimg = np.concatenate((totalimg, grayvec), axis=0)\n totallabel = np.concatenate((totallabel, curr_label), axis=0)\n imgcnt = imgcnt + 1\nprint (\"Total %d images loaded.\" % (imgcnt))", "_____no_output_____" ], [ "def print_shape(string, x):\n print (\"Shape of '%s' is %s\" % (string, x.shape,))\n \nrandidx = np.random.randint(imgcnt, size=imgcnt)\ntrainidx = randidx[0:int(3*imgcnt/5)]\ntestidx = randidx[int(3*imgcnt/5):imgcnt]\ntrainimg = totalimg[trainidx, :]\ntrainlabel = totallabel[trainidx, :]\ntestimg = totalimg[testidx, :]\ntestlabel = totallabel[testidx, :]\nprint_shape(\"trainimg\", trainimg)\nprint_shape(\"trainlabel\", trainlabel)\nprint_shape(\"testimg\", testimg)\nprint_shape(\"testlabel\", testlabel)", "_____no_output_____" ] ], [ [ "# Save", "_____no_output_____" ] ], [ [ "savepath = cwd + \"/data/\" + data_name + \".npz\"\nnp.savez(savepath, trainimg=trainimg, trainlabel=trainlabel\n , testimg=testimg, testlabel=testlabel, imgsize=imgsize, use_gray=use_gray)\nprint (\"Saved to %s\" % (savepath))", "_____no_output_____" ] ], [ [ "# Load to Check", "_____no_output_____" ] ], [ [ "# Load them!\ncwd = os.getcwd()\nloadpath = cwd + \"/data/\" + data_name + \".npz\"\nl = np.load(loadpath)\n\n# See what's in here\nl.files\n\n# Parse data\ntrainimg_loaded = l['trainimg']\ntrainlabel_loaded = l['trainlabel']\ntestimg_loaded = l['testimg']\ntestlabel_loaded = l['testlabel']\n\nprint (\"%d train images loaded\" % (trainimg_loaded.shape[0]))\nprint (\"%d test images loaded\" % (testimg_loaded.shape[0]))\nprint (\"Loaded from to %s\" % (savepath))", "_____no_output_____" ] ], [ [ "# Plot randomly train images", "_____no_output_____" ] ], [ [ "# Load them!\ncwd = os.getcwd()\nloadpath = cwd + \"/data/\" + data_name + \".npz\"\nl = np.load(loadpath)\n\n# See what's in here\nl.files\n\n# Parse data\ntrainimg_loaded = l['trainimg']\ntrainlabel_loaded = l['trainlabel']\ntestimg_loaded = l['testimg']\ntestlabel_loaded = l['testlabel']\n\nprint (\"%d train images loaded\" % (trainimg_loaded.shape[0]))\nprint (\"%d test images loaded\" % (testimg_loaded.shape[0]))\nprint (\"Loaded from to %s\" % (savepath))", "_____no_output_____" ] ], [ [ "# Plot randomly test images", "_____no_output_____" ] ], [ [ "# Do batch stuff using loaded data \nntest_loaded = testimg_loaded.shape[0]\nbatch_size = 3;\nrandidx = np.random.randint(ntest_loaded, size=batch_size)\nfor i in randidx: \n currimg = np.reshape(testimg_loaded[i, :], (imgsize[0], -1))\n currlabel_onehot = testlabel_loaded[i, :]\n currlabel = np.argmax(currlabel_onehot) \n \n if use_gray:\n currimg = np.reshape(testimg[i, :], (imgsize[0], -1))\n plt.matshow(currimg, cmap=plt.get_cmap('gray'))\n plt.colorbar()\n else:\n currimg = np.reshape(testimg[i, :], (imgsize[0], imgsize[1], 3))\n plt.imshow(currimg)\n title_string = \"[%d] %d-class\" % (i, currlabel)\n plt.title(title_string) \n plt.show()", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]