hexsha
stringlengths 40
40
| size
int64 6
14.9M
| ext
stringclasses 1
value | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 6
260
| max_stars_repo_name
stringlengths 6
119
| max_stars_repo_head_hexsha
stringlengths 40
41
| max_stars_repo_licenses
list | max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 6
260
| max_issues_repo_name
stringlengths 6
119
| max_issues_repo_head_hexsha
stringlengths 40
41
| max_issues_repo_licenses
list | max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 6
260
| max_forks_repo_name
stringlengths 6
119
| max_forks_repo_head_hexsha
stringlengths 40
41
| max_forks_repo_licenses
list | max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | avg_line_length
float64 2
1.04M
| max_line_length
int64 2
11.2M
| alphanum_fraction
float64 0
1
| cells
list | cell_types
list | cell_type_groups
list |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ec8b8afbac3bea5d927259e4e9a1b08b79508496 | 80,311 | ipynb | Jupyter Notebook | Visualize_Model.ipynb | yuki678/CarND-Behavioral-Cloning-P3 | 294bf4f9782a6a38cd69ef5c743e54820d6ae1b8 | [
"MIT"
] | null | null | null | Visualize_Model.ipynb | yuki678/CarND-Behavioral-Cloning-P3 | 294bf4f9782a6a38cd69ef5c743e54820d6ae1b8 | [
"MIT"
] | null | null | null | Visualize_Model.ipynb | yuki678/CarND-Behavioral-Cloning-P3 | 294bf4f9782a6a38cd69ef5c743e54820d6ae1b8 | [
"MIT"
] | null | null | null | 249.413043 | 38,436 | 0.806838 | [
[
[
"from IPython.display import display, Image, SVG\nfrom keras.models import load_model, Model\nimport graphviz as gv\nimport functools\nimport json\nimport matplotlib.pyplot as plt\nfrom functools import reduce",
"_____no_output_____"
],
[
"def loadModel(modelPath):\n \"\"\"\n Loads the model `modelPath`.\n \"\"\"\n model = load_model(modelPath)\n return model",
"_____no_output_____"
],
[
"def getLayerConfig(layer):\n \"\"\"\n Extract configuration from `layer`.\n \"\"\"\n layerType = layer.__class__.__name__\n output = { 'type': layerType }\n config = layer.get_config()\n if layerType == 'Lambda':\n _, x, y, d = config['batch_input_shape']\n output['input'] = (x, y, d)\n if layerType == 'Cropping2D':\n output['cropping'] = config['cropping']\n if layerType == 'Convolution2D':\n output['activation'] = config['activation']\n output['strides'] = config['subsample']\n output['filters'] = config['nb_filter']\n output['kernel'] = ( config['nb_col'], config['nb_row'] )\n if layerType == 'Dense':\n output['activation'] = config['activation']\n output['output'] = config['output_dim']\n output['input'] = config['input_dim']\n return output\n\n\ndef compressLayers(layers):\n \"\"\"\n Compress the common layers into a single structure for visualization.\n \"\"\"\n def reductor(acc, layer):\n if len(acc) == 0:\n acc.append(layer)\n return acc\n \n last = acc[-1]\n if last['type'] == layer['type']:\n try:\n last['items'].append(layer)\n except KeyError:\n acc[-1] = { 'type': layer['type'], 'items': [last, layer]}\n else:\n acc.append(layer)\n return acc\n \n return reduce(reductor, layers, [])",
"_____no_output_____"
],
[
"def createNode(nodeName, layer, g, style='filled', fillcolor='white', fontcolor='black'):\n \"\"\"\n Creates a node with the information from `layer` in the `g`.\n \"\"\"\n type = layer['type']\n label = type + '\\n'\n for key, value in layer.items():\n if (key != 'type'):\n label += '{}: {}\\n'.format(key, value)\n g.node(nodeName, label=label, style=style, fillcolor=fillcolor, fontcolor=fontcolor)\n\ndef visualizeLayers(layers, outputPath):\n \"\"\"\n Visualize `layers` and store the image at `outputPath`.\n \"\"\"\n fillcolors = { 'Convolution2D':'#AAAAAA', 'Dense':'#006699' }\n g = gv.Digraph(format='png')\n for index, layer in enumerate(layers):\n nodeName = str(index)\n try:\n items = layer['items']\n subGraphType = layer['type']\n fillcolor = fillcolors[subGraphType]\n g.node(nodeName, label=subGraphType, style='filled', fillcolor=fillcolor, fontcolor='white')\n subG = gv.Digraph(format='png')\n for i, subLayer in enumerate(items):\n subNodeName = nodeName + str(i)\n createNode(subNodeName, subLayer, subG, fillcolor=fillcolor, fontcolor='white')\n if i != 0:\n subG.edge(nodeName + str(i - 1), subNodeName)\n \n g.subgraph(subG)\n \n except KeyError:\n createNode(nodeName, layer, g)\n if index != 0:\n g.edge(str(index - 1), nodeName)\n \n styles = {\n 'graph': {\n },\n 'nodes': {\n 'fontname': 'Helvetica',\n 'shape': 'rectangle'\n },\n 'edges': {\n 'arrowhead': 'open'\n }\n }\n g.graph_attr.update(\n ('graph' in styles and styles['graph']) or {}\n )\n g.node_attr.update(\n ('nodes' in styles and styles['nodes']) or {}\n )\n g.edge_attr.update(\n ('edges' in styles and styles['edges']) or {}\n )\n \n g.render(outputPath)\n figure = Image(outputPath + '.png')\n display(figure)",
"_____no_output_____"
],
[
"def visualizeModel(modelPath, imagePath):\n \"\"\"\n Visualize the model found at `modelPath` to a SVG at `imagePath`\n \"\"\"\n model = loadModel(modelPath)\n layerData = list(map(getLayerConfig, model.layers))\n compressedLayers = compressLayers(layerData)\n visualizeLayers(compressedLayers, imagePath)",
"_____no_output_____"
],
[
"model_path = 'models/leNet_data.h5'",
"_____no_output_____"
],
[
"loadModel(model_path).summary()",
"_____no_output_____"
],
[
"visualizeModel(model_path, 'images/model')",
"_____no_output_____"
],
[
"# dict_keys(['loss', 'val_loss'])\nloss = [0.02814892864289296, 0.004734130184661697, 0.002493900195834608]\nvalid_loss = [0.027936478145135307, 0.020000185329467818, 0.02095587708179046]\n\n### plot the training and validation loss for each epoch\nplt.figure(figsize=(10,7))\nplt.plot(loss)\nplt.plot(valid_loss)\nplt.title('Model Loss (MSE)')\nplt.ylabel('mean squared error')\nplt.xlabel('epoch')\nplt.legend(['training set', 'validation set'], loc='upper right')\nplt.grid(color='gray', linestyle='--', linewidth=1)\nplt.show()",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
ec8b8ef4c194f041d968f80f3044fe5603d77285 | 31,846 | ipynb | Jupyter Notebook | ayudantias/tutorial-content-based.ipynb | PUC-RecSys-Class/RecSysPUC-2019 | 38d723da9b9dc95f511fde1111b912c8f232ec8a | [
"MIT"
] | 5 | 2019-08-08T15:18:52.000Z | 2019-11-15T21:14:44.000Z | ayudantias/tutorial-content-based.ipynb | PUC-RecSys-Class/RecSysPUC-2019 | 38d723da9b9dc95f511fde1111b912c8f232ec8a | [
"MIT"
] | 2 | 2019-08-21T19:23:54.000Z | 2019-09-23T16:13:37.000Z | ayudantias/tutorial-content-based.ipynb | PUC-RecSys-Class/RecSysPUC-2019 | 38d723da9b9dc95f511fde1111b912c8f232ec8a | [
"MIT"
] | null | null | null | 29.596654 | 434 | 0.503957 | [
[
[
"<a href=\"https://colab.research.google.com/github/Hernan4444/diplomado-sistemas-recomendadores/blob/master/Diplomado_Alumno_2019_Sistemas_Recomendadores_3_Content_Based.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
],
[
"# Práctica de Sistemas Recomendadores 3: Content based",
"_____no_output_____"
],
[
"En este práctico, volveremos a utilizar la biblioteca de Python [sklearn](https://scikit-learn.org/stable/), para aprender sobre 2 algoritmos para recomendación basado en contenidos y de unas herramientas para preprocesar los textos. En particula, este practico verá:\n\n* TF-IDF\n* Word Embeddings\n* Uso de Stop Words\n\n**Ayudantes**: Manuel Cartagena, Andrés Carvallo y Patricio Cerda\n\n\n",
"_____no_output_____"
],
[
"# Actividad 1\n\nAntes de empezar con la actividad, responder la siguiente pregunta con lo visto en clases\n\n**Pregunta:** Explique con sus palabras a qué se refiere con recomendación basada en contenido. En particular responda\n\n- ¿Qué datos se utilizan para recomendación basada en contenidos?\n- Mencione un ejemplo donde sea factible utilizar este tipo de recomendación y justifique.\n\n\n\n",
"_____no_output_____"
],
[
"**Respuesta:** COMPLETAR",
"_____no_output_____"
],
[
"# Descargando la información\n\nVaya ejecutando cada celda presionando el botón de **Play** o presionando Ctrl+Enter (Linux y Windows) o Command+Enter (Macosx) para descargar las bases de datos.\n\n* Recursos:\n * `dictionary.p`\n * `dictionary-stemm.p`\n * `tfidf_model.p`\n * `tfidf_model-stemm.p`\n* Dataset:\n * `corpus1.csv`",
"_____no_output_____"
]
],
[
[
"# Descarga de recursos\n!curl -L -o 'resources.tar.gz' 'https://github.com/PUC-RecSys-Class/Syllabus/blob/master/Practico%204/files/resources.tar.gz?raw=true'\n\n# Descompresión del archivo\n!tar -xvf resources.tar.gz",
"_____no_output_____"
],
[
"# Descarga del dataset\n!curl -L -o 'dataset.tar.gz' 'https://github.com/PUC-RecSys-Class/Syllabus/blob/master/Practico%204/files/dataset.tar.gz?raw=true'\n\n# Descompresión del archivo\n!tar -xvf dataset.tar.gz",
"_____no_output_____"
]
],
[
[
"# Revisar archivos descargados\n\nRevisemos el _dataset_ descargado:\n",
"_____no_output_____"
]
],
[
[
"import pandas as pd\n\ncorpus_df = pd.read_csv('./corpus1.csv', sep='\\t',\n header=None, encoding='latin',\n names=['id', 'title', 'abstract'])\ncorpus_df.head(5)",
"_____no_output_____"
]
],
[
[
"Podemos ver que este _dataet: contiene 3 columnas:\n* **_id_**: identificador de cada texto\n* **_title_**: título del documento, en este caso, de un _paper_\n* **_abstract_**: primer párafo del _paper_ que es una representación abreviada, objetiva y precisa del contenido de un documento o recurso, sin interpretación crítica y sin mención expresa del autor del resumen.",
"_____no_output_____"
],
[
"## Preparar entorno\nPrimero es necesario instalar algunas librerías previas",
"_____no_output_____"
]
],
[
[
"!pip install nltk\n!pip install sklearn\n!pip install gensim\n!pip install pandas\n!pip install numpy",
"_____no_output_____"
]
],
[
[
"Luego necesitamos importar las librerías a utilizar en este práctico. No se asusten por todas las librerías, iremos explicando lo más importante a medida que se avanza en el práctico.",
"_____no_output_____"
]
],
[
[
"import string\n\nimport gensim\nimport nltk\nimport numpy as np\nimport pandas as pd\nimport sklearn\n\nfrom collections import Counter\nfrom os.path import isfile\nfrom textwrap import wrap\n\nfrom gensim import corpora, models, similarities\nfrom nltk.corpus import stopwords\nfrom nltk.stem.porter import PorterStemmer\nfrom scipy.sparse import csr_matrix\nfrom sklearn.neighbors import NearestNeighbors\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.decomposition import NMF\n\nimport json\nimport warnings\nimport gensim \nwarnings.filterwarnings(\"ignore\")\npd.options.display.max_columns = None\n\n",
"_____no_output_____"
]
],
[
[
"## Preprocesamiento de datos",
"_____no_output_____"
],
[
"Volvemos a cargar el _dataset_ a utilizar",
"_____no_output_____"
]
],
[
[
"corpus_df = pd.read_csv('./corpus1.csv', sep='\\t',\n header=None, encoding='latin',\n names=['id', 'title', 'abstract'])\ncorpus_df.head(5)",
"_____no_output_____"
]
],
[
[
"Luego descargamos las librerías de NLTK necesarias:",
"_____no_output_____"
]
],
[
[
"nltk.download('punkt')",
"_____no_output_____"
]
],
[
[
"En este momento estamos bajando un _tokenizador_ específico llamado [Punkt Sentence Tokenizer](https://kite.com/python/docs/nltk.tokenize.punkt). Este será usado a continuación para realizar una cierta tarea con los textos (no vamos a decir cual es porque una actividad es que comenten que hace dado unos ejemplos que mostramos c: ). \n\nLo siguiente es implementar una función que transforme texto no estructurado a una lista de *tokens* procesados.",
"_____no_output_____"
]
],
[
[
"def get_tokens(text):\n # Pasar todo a minuscula\n lowers = text.lower()\n \n # Quitar puntuación\n no_punctuation = lowers.translate({ord(c): None for c in string.punctuation})\n \n # Tokenizar \n tokens = nltk.word_tokenize(no_punctuation)\n \n # Retornar resultado\n return tokens\n\n\nprint(get_tokens(\"I'm a super student for recommender systems!\"))\nprint(get_tokens(\"First sentence. Seconde sentence.\"))",
"_____no_output_____"
]
],
[
[
"En el código anterior, para ejecutar `nltk.word_tokenize()` era necesario tener descargado _punkt_. \n\n## Actividad 2\n\nEn función a las frases ingresadas y al resultado impreso, ¿Qué significa _Tokenizar_?",
"_____no_output_____"
],
[
"**Respuesta:** COMPLETAR",
"_____no_output_____"
]
],
[
[
"# A cada abstract le aplicamos la función de get_tokens\ncorpus_df['tokenized_abstract'] = corpus_df.abstract.map(get_tokens)\ncorpus_df.head(5)",
"_____no_output_____"
]
],
[
[
"Ahora se tiene que generar un diccionario con todas las palabras del *corpus*. Se recomienda revisar la documentación de gensim y leer cómo usar los diccionarios: [corpora.dictionary](https://radimrehurek.com/gensim/corpora/dictionary.html)",
"_____no_output_____"
]
],
[
[
"dict_file = './resources/dictionary.p'\n\nif isfile(dict_file): # Verificar si existe el archivo\n dictionary = corpora.dictionary.Dictionary().load(dict_file)\n \nelse: # En otro caso, crear el archivo y guardarlo\n dictionary = corpora.dictionary.Dictionary(documents=corpus_df.tokenised_abstract.tolist())\n dictionary.save(dict_file)",
"_____no_output_____"
],
[
"# Texto original\nprint(\"Texto 1\")\nwrap(str(corpus_df.loc[0][\"tokenized_abstract\"]))\n",
"_____no_output_____"
],
[
"# Texto pasado por el diccionario\nprint(\"Texto 1\")\nwrap(str(dictionary.doc2bow(corpus_df.loc[0][\"tokenized_abstract\"])))\n",
"_____no_output_____"
]
],
[
[
"Cuando se hizo `dictionary.doc2bow` se transformó una lista de palabas a un contador de ellas. En donde cada tupla representa `(ID, cantidad de veces)` de modo que se reduce la cantidad de palabras del texto a información numerica. \n\nPor ejemplo, la tupla `(30, 5)` indica que la palabra con ID 30 está 5 veces en el texto. Revisando el texto podemos ver que la palabra **\"a\"** es la que está repetida 5 veces. Esto implica que **\"a\"** está asignada al ID 30.\n\nAhora aplicaremos esta función a cada texto del _dataset_.",
"_____no_output_____"
]
],
[
[
"corpus_df['bow'] = corpus_df.tokenized_abstract.map(dictionary.doc2bow)\n\ncorpus = corpus_df['bow'].tolist()\n\ncorpus_df.head(5)",
"_____no_output_____"
]
],
[
[
"# Tf-idf",
"_____no_output_____"
],
[
"Recordemos que Tf-idf es una medida numérica que expresa cuán relevante es una palabra para un documento en una colección. Ahora, dada la frecuencia de cada palabra en cada texto, se v a utilizar esta ténica para obtener tuplas de la forma `(ID, Tf-idf)` en donde ID será el ID de la palabra igual como estaba antes (por ejemplo **\"a\"** tiene ID 30) y Tf-Idf será el valor dado por este algoritmo a la palabra en cuestión.",
"_____no_output_____"
]
],
[
[
"tfidf_model_file = 'resources/tfidf_model.p'\n\nif isfile(tfidf_model_file):\n tfidf_model = models.tfidfmodel.TfidfModel().load(tfidf_model_file)\n\nelse:\n tfidf_model = models.tfidfmodel.TfidfModel(corpus, dictionary=dictionary)\n tfidf_model.save(tfidf_model_file)\n\ncorpus_df['tf_idf'] = tfidf_model[corpus_df.bow.tolist()]\ncorpus_df.head(5)",
"_____no_output_____"
]
],
[
[
"## Generar recomendaciones: \nEn esta sección se implementan las funciones necesarias para poder generar recomendaciones dado lo que un usuario ha consumido. De manera artificial, se \"samplearán\" 3 documentos aleatorios que representarán al usuario objetivo (`sample`). Luego tendrás que generar diferentes recomendaciones y evaluar los resultados.",
"_____no_output_____"
]
],
[
[
"# Random users\nsamples = corpus_df.sample(3)\nsamples_ids = []\n\nfor n, (ix, paper) in enumerate(samples.iterrows()):\n samples_ids.append(ix)\n idx, title, abstract, bow, tf_idf = paper[[\n 'id', 'title', 'abstract', 'bow', 'tf_idf']]\n print('%d) %s' % (n+1, title))\n print('')\n print(\"\\n\".join(wrap(abstract)))\n print('\\n')",
"_____no_output_____"
]
],
[
[
"Lo anterior son 3 textos tomados al azar. Asumiremso que una persona vió estos 3 textos y ahora vamos a recomendarle 5 nuevos por cada documento.",
"_____no_output_____"
]
],
[
[
"# Recommendation functions\n\nN = len(dictionary)\n\n\ndef to_sparse(matrix):\n return csr_matrix([\n gensim.matutils.sparse2full(row, length=N)\n for row in matrix\n ])\n\n\ndef make_recommendations(model, metric, neighbors):\n M = len(corpus)\n\n X = to_sparse(corpus_df[model].tolist())\n document_index = NearestNeighbors(\n n_neighbors=(neighbors + 1),\n algorithm='brute',\n metric=metric).fit(X)\n return document_index\n\n\ndef print_recommendations(indexes, model):\n for n, (ix, paper) in enumerate(samples.iterrows()):\n dists, neighbors = indexes.kneighbors([gensim.matutils.sparse2full(paper[model], length=N)])\n print(paper['title'])\n print('')\n print('Documentos cercanos: ')\n i = 1\n for neighbour in neighbors[0]:\n if ix != neighbour:\n line = str(i) + \". \" + corpus_df.iloc[neighbour]['title']\n print(line)\n i += 1\n print('\\n')",
"_____no_output_____"
]
],
[
[
"A continuación deberá utilizar las funciones implementadas anteriormente para generar nuevas recomendaciones variando los parámetros del modelo. **Agregue nuevas celdas para cada implementación y/o pregunta.**",
"_____no_output_____"
]
],
[
[
"# Recommendation example: TF-IDF\ndoc_idx = make_recommendations('tf_idf', 'euclidean', 5)\nprint_recommendations(doc_idx, 'tf_idf')",
"_____no_output_____"
]
],
[
[
"# Stop words",
"_____no_output_____"
],
[
"A continuación, intentaremos mejorar los resultados obtenidos con TF-IDF eliminando las *stopwords*. ¿Qué son las *stopwords*? Son palabras vacías, sin significado, que no aportan (de manera significativa) al sentido de una frase, como los artículos, pronombres, etc.",
"_____no_output_____"
]
],
[
[
"nltk.download('stopwords')",
"_____no_output_____"
],
[
"from nltk.corpus import stopwords\n\ndef remove_stopwords(text):\n filtered_words = [\n word for word in text if word not in stopwords.words('english')\n ]\n return filtered_words",
"_____no_output_____"
]
],
[
[
"Ahora eliminamos los stopwords de los textos y volvemos a hacer todo el proceso pero con textos diferentes. Este proceso dura aproximadamente **5 minutos**",
"_____no_output_____"
]
],
[
[
"%%time\n# Puede que se demore un poco esta celda\ncorpus_df['tokenized_abstract_without_stopwords'] = corpus_df.tokenized_abstract.map(remove_stopwords)",
"_____no_output_____"
],
[
"corpus_df.head(5)",
"_____no_output_____"
],
[
"corpus_df['bow_without_stopwords'] = corpus_df.tokenized_abstract_without_stopwords.map(dictionary.doc2bow)\ncorpus_df.head(5)",
"_____no_output_____"
],
[
"corpus = corpus_df['bow_without_stopwords'].tolist()\n\ntfidf_model_file_without_stopwords = 'resources/tfidf_model.p'\n\nif isfile(tfidf_model_file):\n tfidf_model_without_stopwords = models.tfidfmodel.TfidfModel().load(tfidf_model_file)\n\nelse:\n tfidf_model_without_stopwords = models.tfidfmodel.TfidfModel(corpus, dictionary=dictionary)\n tfidf_model_without_stopwords.save(tfidf_model_file_without_stopwords)\n\ncorpus_df['tf_idf_without_stopwords'] = tfidf_model_without_stopwords[corpus_df.bow_without_stopwords.tolist()]\ncorpus_df.head(5)",
"_____no_output_____"
]
],
[
[
"**Actividad:** Genere recomendaciones para un nuevo usuario utilizando los nuevos vectores generados sin stop-words.",
"_____no_output_____"
]
],
[
[
"",
"_____no_output_____"
]
],
[
[
"# Word Embeddings\nEn esta sección haremos recomendacion de textos médicos de [PubMed](https://www.ncbi.nlm.nih.gov/pubmed/) que han sido revisados por expertos. \n\nRESPONDER LAS SIGUIENTES PREGUNTAS: \n- ¿Que son word embeddings? ¿Cuál es la intuición?\n- ¿Por qué son útiles para representar documentos?\n",
"_____no_output_____"
]
],
[
[
"# Descarga de recursos\n!wget https://www.dropbox.com/s/gc3x9rp4gu2tmch/documents_w2vec.json.zip\n!unzip documents_w2vec.json.zip",
"_____no_output_____"
],
[
"# Descarga del dataset\n!wget https://www.dropbox.com/s/1bxuw3uf3xwyrr7/pubmed_data.csv",
"_____no_output_____"
]
],
[
[
"Podemos ver que este _dataet: contiene 4 columnas:\n* **user_id**: identificador de cada usuario \n* **pid**: identificador de cada texto con su correlativo de PubMed. \n* **_title_**: título del documento, en este caso, de un _paper_\n* **_abstract_**: primer párafo del _paper_ que es una representación abreviada, objetiva y precisa del contenido de un documento o recurso.",
"_____no_output_____"
]
],
[
[
"df = pd.read_csv('pubmed_data.csv')\ndf.head()",
"_____no_output_____"
],
[
"# creamos diccionario de titulos y abstracts que utilizaremos despues\ndict_title_abstract = {}\n\nfor pid, title, abstract in zip(df.pid, df.title, df.abstract):\n dict_title_abstract[pid] = {'title': title, 'abstract': abstract}",
"_____no_output_____"
],
[
"# cargamos diccionario de embeddings por cada documento (pre-procesado)\nw2vec_vectors = json.load(open('documents_w2vec.json'))\n",
"_____no_output_____"
]
],
[
[
"creamos un objeto *gensim.keyedvectors* para hacer más eficiente la búsqueda de documentos similares",
"_____no_output_____"
]
],
[
[
"embedding_size = 300\n\ndoc2vec = gensim.models.keyedvectors.Word2VecKeyedVectors(embedding_size)\nkeys = list(w2vec_vectors.keys())\nvalues = [\n w2vec_vectors[key]\n for key in keys\n]\ndoc2vec.add(keys, values)",
"_____no_output_____"
]
],
[
[
"## Generar recomendaciones",
"_____no_output_____"
],
[
"función **find_similar** para encontrar documentos similares a un pid en particular que recibe id del documento y los topn documentos mas similares y retorna topn documentos más similares",
"_____no_output_____"
]
],
[
[
"def find_similar(pid, topn):\n results = []\n\n for id_, score in doc2vec.similar_by_vector(doc2vec[pid], topn=topn):\n results.append([id_, score, dict_title_abstract[int(id_)]['title'], dict_title_abstract[int(id_)]['abstract']])\n\n return pd.DataFrame(results[1:], columns = ['pid', 'score', 'title', 'abstract'])",
"_____no_output_____"
],
[
"find_similar('22508578', 10)",
"_____no_output_____"
]
],
[
[
"función **recommend** para recomendar a un usuario de acuerdo una muestra de documentos que ha leído.",
"_____no_output_____"
]
],
[
[
"def recommend(user_id, topn, sample_user):\n user_docs = df[df.user_id==user_id]['pid'].sample(sample_user)\n\n results = []\n\n for pid in user_docs:\n\n for id_, score in doc2vec.similar_by_vector(doc2vec[str(pid)], topn=topn):\n\n if int(id_) in dict_title_abstract:\n results.append([id_, score, dict_title_abstract[int(id_)]['title'], dict_title_abstract[int(id_)]['abstract']])\n \n\n results = sorted(results, key = lambda x: int(x[1]))\n\n return pd.DataFrame(results[topn:], columns = ['pid', 'score', 'title', 'abstract']).head(10)",
"_____no_output_____"
],
[
"# documentos leidos por el usuario \ndf[df.user_id==348892].sample(10)",
"_____no_output_____"
],
[
"recommend(user_id= 348892, topn= 10, sample_user = 5)",
"_____no_output_____"
]
],
[
[
"RESPONDER:\n- ¿Qué problemas puede tener la recomendación basada en contenido?",
"_____no_output_____"
]
],
[
[
"",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
]
] |
ec8ba49ed0e3d7628a85284bcf7caf18d4026158 | 722,011 | ipynb | Jupyter Notebook | notebooks/Bivariate.ipynb | MerlinDumeur/pymultifracs | 9767ee4c34a3a39a7609f40afd6265151ba4e550 | [
"MIT"
] | 9 | 2019-03-29T05:28:42.000Z | 2019-12-29T12:41:15.000Z | notebooks/Bivariate.ipynb | MerlinDumeur/pymultifracs | 9767ee4c34a3a39a7609f40afd6265151ba4e550 | [
"MIT"
] | 4 | 2021-01-20T14:58:03.000Z | 2021-03-01T11:52:09.000Z | notebooks/Bivariate.ipynb | MerlinDumeur/pymultifracs | 9767ee4c34a3a39a7609f40afd6265151ba4e550 | [
"MIT"
] | 6 | 2021-02-08T15:23:39.000Z | 2022-03-28T13:30:46.000Z | 2,111.143275 | 77,768 | 0.961985 | [
[
[
"## Loading pregenerated signals",
"_____no_output_____"
]
],
[
[
"import seaborn as sns\n\nsns.set_style('whitegrid')\n\nfrom scipy.io import loadmat\n\nfrom importlib import reload\n\nimport pymultifracs.bivariate\nreload(pymultifracs.bivariate)\nfrom pymultifracs.bivariate import bivariate_analysis_full\n\nimport numpy as np\n\nimport matplotlib.pyplot as plt\n\nfrom collections import defaultdict\n\nimport pyvista as pv",
"_____no_output_____"
],
[
"names = ['nossnoMF', 'nossMF', 'ssMF', 'ssnoMF']\n\ndata = {\n key: loadmat(f'../tests/data/DataSet_{key}.mat')['data'].transpose() for key in names\n}\n\nparam = {\n key: loadmat(f'../tests/data/DataSet_{key}.mat')['params'] for key in names\n}\n\nparam = {\n key: {\n param[key].dtype.names[i]: param[key][0, 0][i] for i in range(len(param[key][0, 0]))\n }\n for key in param\n}",
"_____no_output_____"
],
[
"def plot_key(key, plot=True):\n \n X = data[key].copy()\n\n j1 = 3\n j2 = np.log2(X.shape[0]) - 3\n p_exp=2\n gamint = 1\n\n dwt, lwt = bivariate_analysis_full(X[:, 0], X[:, 1], p_exp=p_exp, j1=j1, j2=j2, gamint=gamint, weighted=True, n_cumul=2,\n q1=np.array([0, 1, 2]), q2=np.array([0, 1, 2]))\n \n if not plot:\n return dwt, lwt\n \n print(f'{lwt.cumulants.rho_mf=}')\n \n fig_m, fig_c = lwt.cumulants.plot()\n fig_m.suptitle(key)\n fig_c.suptitle(key)\n lwt.cumulants.plot_legendre(resolution=30)\n plt.suptitle(key)\n plt.show()\n \n plt.plot(dwt.structure.j, dwt.structure.coherence, label='second order (wavelet) coherence')\n plt.plot(lwt.cumulants.j, lwt.cumulants.RHO_MF, label=r'multifractal (leader) coherence')\n plt.xlabel('j')\n plt.title(key)\n plt.legend()\n plt.show()\n \n return dwt, lwt",
"_____no_output_____"
],
[
"for key in data:\n plot_key(key)",
"lwt.cumulants.rho_mf=array([-0.04630067])\n"
]
]
] | [
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code",
"code"
]
] |
ec8bc9985b3b3c312eebedc80b40df3ea1ce93f1 | 20,886 | ipynb | Jupyter Notebook | homeworks/homework1/part2/BatchNormalization.ipynb | UltronAI/ai_2018fall | 35aa1c32a7ce9cd3cc758f78817c7ddda6272ff2 | [
"MIT"
] | 3 | 2019-04-22T10:29:44.000Z | 2019-04-26T09:20:06.000Z | homeworks/homework1/part2/BatchNormalization.ipynb | UltronAI/ai_2018fall | 35aa1c32a7ce9cd3cc758f78817c7ddda6272ff2 | [
"MIT"
] | 1 | 2018-10-25T10:45:52.000Z | 2018-10-25T10:45:52.000Z | homeworks/homework1/part2/BatchNormalization.ipynb | UltronAI/ai_2018fall | 35aa1c32a7ce9cd3cc758f78817c7ddda6272ff2 | [
"MIT"
] | 1 | 2019-09-28T03:13:36.000Z | 2019-09-28T03:13:36.000Z | 41.688623 | 804 | 0.613138 | [
[
[
"# Batch Normalization\nOne way to make deep networks easier to train is to use more sophisticated optimization procedures such as SGD+momentum, RMSProp, or Adam. Another strategy is to change the architecture of the network to make it easier to train. One idea along these lines is batch normalization which was recently proposed by [3].\n\nThe idea is relatively straightforward. Machine learning methods tend to work better when their input data consists of uncorrelated features with zero mean and unit variance. When training a neural network, we can preprocess the data before feeding it to the network to explicitly decorrelate its features; this will ensure that the first layer of the network sees data that follows a nice distribution. However even if we preprocess the input data, the activations at deeper layers of the network will likely no longer be decorrelated and will no longer have zero mean or unit variance since they are output from earlier layers in the network. Even worse, during the training process the distribution of features at each layer of the network will shift as the weights of each layer are updated.\n\nThe authors of [3] hypothesize that the shifting distribution of features inside deep neural networks may make training deep networks more difficult. To overcome this problem, [3] proposes to insert batch normalization layers into the network. At training time, a batch normalization layer uses a minibatch of data to estimate the mean and standard deviation of each feature. These estimated means and standard deviations are then used to center and normalize the features of the minibatch. A running average of these means and standard deviations is kept during training, and at test time these running averages are used to center and normalize features.\n\nIt is possible that this normalization strategy could reduce the representational power of the network, since it may sometimes be optimal for certain layers to have features that are not zero-mean or unit variance. To this end, the batch normalization layer includes learnable shift and scale parameters for each feature dimension.\n\n[3] Sergey Ioffe and Christian Szegedy, \"Batch Normalization: Accelerating Deep Network Training by Reducing\nInternal Covariate Shift\", ICML 2015.",
"_____no_output_____"
]
],
[
[
"# As usual, a bit of setup\nfrom __future__ import print_function\nimport time\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom cs231n.classifiers.fc_net import *\nfrom cs231n.data_utils import get_CIFAR10_data\nfrom cs231n.gradient_check import eval_numerical_gradient, eval_numerical_gradient_array\nfrom cs231n.solver import Solver\n\n%matplotlib inline\nplt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots\nplt.rcParams['image.interpolation'] = 'nearest'\nplt.rcParams['image.cmap'] = 'gray'\n\n# for auto-reloading external modules\n# see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython\n%load_ext autoreload\n%autoreload 2\n\ndef rel_error(x, y):\n \"\"\" returns relative error \"\"\"\n return np.max(np.abs(x - y) / (np.maximum(1e-8, np.abs(x) + np.abs(y))))",
"_____no_output_____"
],
[
"# Load the (preprocessed) CIFAR10 data.\n\ndata = get_CIFAR10_data()\nfor k, v in data.items():\n print('%s: ' % k, v.shape)",
"_____no_output_____"
]
],
[
[
"## Batch normalization: Forward\nIn the file `cs231n/layers.py`, implement the batch normalization forward pass in the function `batchnorm_forward`. Once you have done so, run the following to test your implementation.",
"_____no_output_____"
]
],
[
[
"# Check the training-time forward pass by checking means and variances\n# of features both before and after batch normalization\n\n# Simulate the forward pass for a two-layer network\nnp.random.seed(231)\nN, D1, D2, D3 = 200, 50, 60, 3\nX = np.random.randn(N, D1)\nW1 = np.random.randn(D1, D2)\nW2 = np.random.randn(D2, D3)\na = np.maximum(0, X.dot(W1)).dot(W2)\n\nprint('Before batch normalization:')\nprint(' means: ', a.mean(axis=0))\nprint(' stds: ', a.std(axis=0))\n\n# Means should be close to zero and stds close to one\nprint('After batch normalization (gamma=1, beta=0)')\na_norm, _ = batchnorm_forward(a, np.ones(D3), np.zeros(D3), {'mode': 'train'})\nprint(' mean: ', a_norm.mean(axis=0))\nprint(' std: ', a_norm.std(axis=0))\n\n# Now means should be close to beta and stds close to gamma\ngamma = np.asarray([1.0, 2.0, 3.0])\nbeta = np.asarray([11.0, 12.0, 13.0])\na_norm, _ = batchnorm_forward(a, gamma, beta, {'mode': 'train'})\nprint('After batch normalization (nontrivial gamma, beta)')\nprint(' means: ', a_norm.mean(axis=0))\nprint(' stds: ', a_norm.std(axis=0))",
"_____no_output_____"
],
[
"# Check the test-time forward pass by running the training-time\n# forward pass many times to warm up the running averages, and then\n# checking the means and variances of activations after a test-time\n# forward pass.\nnp.random.seed(231)\nN, D1, D2, D3 = 200, 50, 60, 3\nW1 = np.random.randn(D1, D2)\nW2 = np.random.randn(D2, D3)\n\nbn_param = {'mode': 'train'}\ngamma = np.ones(D3)\nbeta = np.zeros(D3)\nfor t in range(50):\n X = np.random.randn(N, D1)\n a = np.maximum(0, X.dot(W1)).dot(W2)\n batchnorm_forward(a, gamma, beta, bn_param)\nbn_param['mode'] = 'test'\nX = np.random.randn(N, D1)\na = np.maximum(0, X.dot(W1)).dot(W2)\na_norm, _ = batchnorm_forward(a, gamma, beta, bn_param)\n\n# Means should be close to zero and stds close to one, but will be\n# noisier than training-time forward passes.\nprint('After batch normalization (test-time):')\nprint(' means: ', a_norm.mean(axis=0))\nprint(' stds: ', a_norm.std(axis=0))",
"_____no_output_____"
]
],
[
[
"## Batch Normalization: backward\nNow implement the backward pass for batch normalization in the function `batchnorm_backward`.\n\nTo derive the backward pass you should write out the computation graph for batch normalization and backprop through each of the intermediate nodes. Some intermediates may have multiple outgoing branches; make sure to sum gradients across these branches in the backward pass.\n\nOnce you have finished, run the following to numerically check your backward pass.",
"_____no_output_____"
]
],
[
[
"# Gradient check batchnorm backward pass\nnp.random.seed(231)\nN, D = 4, 5\nx = 5 * np.random.randn(N, D) + 12\ngamma = np.random.randn(D)\nbeta = np.random.randn(D)\ndout = np.random.randn(N, D)\n\nbn_param = {'mode': 'train'}\nfx = lambda x: batchnorm_forward(x, gamma, beta, bn_param)[0]\nfg = lambda a: batchnorm_forward(x, a, beta, bn_param)[0]\nfb = lambda b: batchnorm_forward(x, gamma, b, bn_param)[0]\n\ndx_num = eval_numerical_gradient_array(fx, x, dout)\nda_num = eval_numerical_gradient_array(fg, gamma.copy(), dout)\ndb_num = eval_numerical_gradient_array(fb, beta.copy(), dout)\n\n_, cache = batchnorm_forward(x, gamma, beta, bn_param)\ndx, dgamma, dbeta = batchnorm_backward(dout, cache)\nprint('dx error: ', rel_error(dx_num, dx))\nprint('dgamma error: ', rel_error(da_num, dgamma))\nprint('dbeta error: ', rel_error(db_num, dbeta))",
"_____no_output_____"
]
],
[
[
"## Batch Normalization: alternative backward (OPTIONAL, +3 points extra credit)\nIn class we talked about two different implementations for the sigmoid backward pass. One strategy is to write out a computation graph composed of simple operations and backprop through all intermediate values. Another strategy is to work out the derivatives on paper. For the sigmoid function, it turns out that you can derive a very simple formula for the backward pass by simplifying gradients on paper.\n\nSurprisingly, it turns out that you can also derive a simple expression for the batch normalization backward pass if you work out derivatives on paper and simplify. After doing so, implement the simplified batch normalization backward pass in the function `batchnorm_backward_alt` and compare the two implementations by running the following. Your two implementations should compute nearly identical results, but the alternative implementation should be a bit faster.\n\nNOTE: This part of the assignment is entirely optional, but we will reward 3 points of extra credit if you can complete it.",
"_____no_output_____"
]
],
[
[
"np.random.seed(231)\nN, D = 100, 500\nx = 5 * np.random.randn(N, D) + 12\ngamma = np.random.randn(D)\nbeta = np.random.randn(D)\ndout = np.random.randn(N, D)\n\nbn_param = {'mode': 'train'}\nout, cache = batchnorm_forward(x, gamma, beta, bn_param)\n\nt1 = time.time()\ndx1, dgamma1, dbeta1 = batchnorm_backward(dout, cache)\nt2 = time.time()\ndx2, dgamma2, dbeta2 = batchnorm_backward_alt(dout, cache)\nt3 = time.time()\n\nprint('dx difference: ', rel_error(dx1, dx2))\nprint('dgamma difference: ', rel_error(dgamma1, dgamma2))\nprint('dbeta difference: ', rel_error(dbeta1, dbeta2))\nprint('speedup: %.2fx' % ((t2 - t1) / (t3 - t2)))",
"_____no_output_____"
]
],
[
[
"## Fully Connected Nets with Batch Normalization\nNow that you have a working implementation for batch normalization, go back to your `FullyConnectedNet` in the file `cs2312n/classifiers/fc_net.py`. Modify your implementation to add batch normalization.\n\nConcretely, when the flag `use_batchnorm` is `True` in the constructor, you should insert a batch normalization layer before each ReLU nonlinearity. The outputs from the last layer of the network should not be normalized. Once you are done, run the following to gradient-check your implementation.\n\nHINT: You might find it useful to define an additional helper layer similar to those in the file `cs231n/layer_utils.py`. If you decide to do so, do it in the file `cs231n/classifiers/fc_net.py`.",
"_____no_output_____"
]
],
[
[
"np.random.seed(231)\nN, D, H1, H2, C = 2, 15, 20, 30, 10\nX = np.random.randn(N, D)\ny = np.random.randint(C, size=(N,))\n\nfor reg in [0, 3.14]:\n print('Running check with reg = ', reg)\n model = FullyConnectedNet([H1, H2], input_dim=D, num_classes=C,\n reg=reg, weight_scale=5e-2, dtype=np.float64,\n use_batchnorm=True)\n\n loss, grads = model.loss(X, y)\n print('Initial loss: ', loss)\n\n for name in sorted(grads):\n f = lambda _: model.loss(X, y)[0]\n grad_num = eval_numerical_gradient(f, model.params[name], verbose=False, h=1e-5)\n print('%s relative error: %.2e' % (name, rel_error(grad_num, grads[name])))\n if reg == 0: print()",
"_____no_output_____"
]
],
[
[
"# Batchnorm for deep networks\nRun the following to train a six-layer network on a subset of 1000 training examples both with and without batch normalization.",
"_____no_output_____"
]
],
[
[
"np.random.seed(231)\n# Try training a very deep net with batchnorm\nhidden_dims = [100, 100, 100, 100, 100]\n\nnum_train = 1000\nsmall_data = {\n 'X_train': data['X_train'][:num_train],\n 'y_train': data['y_train'][:num_train],\n 'X_val': data['X_val'],\n 'y_val': data['y_val'],\n}\n\nweight_scale = 2e-2\nbn_model = FullyConnectedNet(hidden_dims, weight_scale=weight_scale, use_batchnorm=True)\nmodel = FullyConnectedNet(hidden_dims, weight_scale=weight_scale, use_batchnorm=False)\n\nbn_solver = Solver(bn_model, small_data,\n num_epochs=10, batch_size=50,\n update_rule='adam',\n optim_config={\n 'learning_rate': 1e-3,\n },\n verbose=True, print_every=200)\nbn_solver.train()\n\nsolver = Solver(model, small_data,\n num_epochs=10, batch_size=50,\n update_rule='adam',\n optim_config={\n 'learning_rate': 1e-3,\n },\n verbose=True, print_every=200)\nsolver.train()",
"_____no_output_____"
]
],
[
[
"Run the following to visualize the results from two networks trained above. You should find that using batch normalization helps the network to converge much faster.",
"_____no_output_____"
]
],
[
[
"plt.subplot(3, 1, 1)\nplt.title('Training loss')\nplt.xlabel('Iteration')\n\nplt.subplot(3, 1, 2)\nplt.title('Training accuracy')\nplt.xlabel('Epoch')\n\nplt.subplot(3, 1, 3)\nplt.title('Validation accuracy')\nplt.xlabel('Epoch')\n\nplt.subplot(3, 1, 1)\nplt.plot(solver.loss_history, 'o', label='baseline')\nplt.plot(bn_solver.loss_history, 'o', label='batchnorm')\n\nplt.subplot(3, 1, 2)\nplt.plot(solver.train_acc_history, '-o', label='baseline')\nplt.plot(bn_solver.train_acc_history, '-o', label='batchnorm')\n\nplt.subplot(3, 1, 3)\nplt.plot(solver.val_acc_history, '-o', label='baseline')\nplt.plot(bn_solver.val_acc_history, '-o', label='batchnorm')\n \nfor i in [1, 2, 3]:\n plt.subplot(3, 1, i)\n plt.legend(loc='upper center', ncol=4)\nplt.gcf().set_size_inches(15, 15)\nplt.show()",
"_____no_output_____"
]
],
[
[
"# Batch normalization and initialization\nWe will now run a small experiment to study the interaction of batch normalization and weight initialization.\n\nThe first cell will train 8-layer networks both with and without batch normalization using different scales for weight initialization. The second layer will plot training accuracy, validation set accuracy, and training loss as a function of the weight initialization scale.",
"_____no_output_____"
]
],
[
[
"np.random.seed(231)\n# Try training a very deep net with batchnorm\nhidden_dims = [50, 50, 50, 50, 50, 50, 50]\n\nnum_train = 1000\nsmall_data = {\n 'X_train': data['X_train'][:num_train],\n 'y_train': data['y_train'][:num_train],\n 'X_val': data['X_val'],\n 'y_val': data['y_val'],\n}\n\nbn_solvers = {}\nsolvers = {}\nweight_scales = np.logspace(-4, 0, num=20)\nfor i, weight_scale in enumerate(weight_scales):\n print('Running weight scale %d / %d' % (i + 1, len(weight_scales)))\n bn_model = FullyConnectedNet(hidden_dims, weight_scale=weight_scale, use_batchnorm=True)\n model = FullyConnectedNet(hidden_dims, weight_scale=weight_scale, use_batchnorm=False)\n\n bn_solver = Solver(bn_model, small_data,\n num_epochs=10, batch_size=50,\n update_rule='adam',\n optim_config={\n 'learning_rate': 1e-3,\n },\n verbose=False, print_every=200)\n bn_solver.train()\n bn_solvers[weight_scale] = bn_solver\n\n solver = Solver(model, small_data,\n num_epochs=10, batch_size=50,\n update_rule='adam',\n optim_config={\n 'learning_rate': 1e-3,\n },\n verbose=False, print_every=200)\n solver.train()\n solvers[weight_scale] = solver",
"_____no_output_____"
],
[
"# Plot results of weight scale experiment\nbest_train_accs, bn_best_train_accs = [], []\nbest_val_accs, bn_best_val_accs = [], []\nfinal_train_loss, bn_final_train_loss = [], []\n\nfor ws in weight_scales:\n best_train_accs.append(max(solvers[ws].train_acc_history))\n bn_best_train_accs.append(max(bn_solvers[ws].train_acc_history))\n \n best_val_accs.append(max(solvers[ws].val_acc_history))\n bn_best_val_accs.append(max(bn_solvers[ws].val_acc_history))\n \n final_train_loss.append(np.mean(solvers[ws].loss_history[-100:]))\n bn_final_train_loss.append(np.mean(bn_solvers[ws].loss_history[-100:]))\n \nplt.subplot(3, 1, 1)\nplt.title('Best val accuracy vs weight initialization scale')\nplt.xlabel('Weight initialization scale')\nplt.ylabel('Best val accuracy')\nplt.semilogx(weight_scales, best_val_accs, '-o', label='baseline')\nplt.semilogx(weight_scales, bn_best_val_accs, '-o', label='batchnorm')\nplt.legend(ncol=2, loc='lower right')\n\nplt.subplot(3, 1, 2)\nplt.title('Best train accuracy vs weight initialization scale')\nplt.xlabel('Weight initialization scale')\nplt.ylabel('Best training accuracy')\nplt.semilogx(weight_scales, best_train_accs, '-o', label='baseline')\nplt.semilogx(weight_scales, bn_best_train_accs, '-o', label='batchnorm')\nplt.legend()\n\nplt.subplot(3, 1, 3)\nplt.title('Final training loss vs weight initialization scale')\nplt.xlabel('Weight initialization scale')\nplt.ylabel('Final training loss')\nplt.semilogx(weight_scales, final_train_loss, '-o', label='baseline')\nplt.semilogx(weight_scales, bn_final_train_loss, '-o', label='batchnorm')\nplt.legend()\nplt.gca().set_ylim(1.0, 3.5)\n\nplt.gcf().set_size_inches(10, 15)\nplt.show()",
"_____no_output_____"
]
],
[
[
"# Question:\nDescribe the results of this experiment, and try to give a reason why the experiment gave the results that it did.",
"_____no_output_____"
],
[
"# Answer:\n",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
]
] |
ec8bca6647daeab4a517f4d42760789b38ba8ca2 | 69,730 | ipynb | Jupyter Notebook | Hackerrank/hackerrank.ipynb | Aneruth/Problem-Solving | af97b740ac86016c732e683fbabe7d9f00b4e67d | [
"Apache-2.0"
] | null | null | null | Hackerrank/hackerrank.ipynb | Aneruth/Problem-Solving | af97b740ac86016c732e683fbabe7d9f00b4e67d | [
"Apache-2.0"
] | null | null | null | Hackerrank/hackerrank.ipynb | Aneruth/Problem-Solving | af97b740ac86016c732e683fbabe7d9f00b4e67d | [
"Apache-2.0"
] | null | null | null | 28.648316 | 450 | 0.519905 | [
[
[
"## Sales by Match\n\nThere is a large pile of socks that must be paired by color. Given an array of integers representing the color of each sock, determine how many pairs of socks with matching colors there are.\n\nExample \n \nn = 7\narr = [[1,2,1,2,1,3,2]]\n\nThere is one pair of color and one of color . There are three odd socks left, one of each color. The number of pairs is .\n\nFunction Description\n\nComplete the sockMerchant function in the editor below.\n\nsockMerchant has the following parameter(s):\n\nint n: the number of socks in the pile\n\nint arr[[n]]: the colors of each sock\n\nReturns \nint: the number of pairs",
"_____no_output_____"
]
],
[
[
"# Method 1 (Using Collections)\nimport collections\ndef salesMatch1(arr):\n c = 0\n for i in collections.Counter(arr).values():\n c += int(i/2)\n return c\nprint(f'Method 1 solution is: {salesMatch1([1,2,1,2,1,3,2])}')",
"Method 1 solution is: 2\n"
]
],
[
[
"## Counting Valleys\nAn avid hiker keeps meticulous records of their hikes. During the last hike that took exactly steps, for every step it was noted if it was an uphill, U, or a downhill,D step. Hikes always start and end at sea level, and each step up or down represents a 1 unit change in altitude. We define the following terms:\n\n- A mountain is a sequence of consecutive steps above sea level, starting with a step up from sea level and ending with a step down to sea level.\n\n- A valley is a sequence of consecutive steps below sea level, starting with a step down from sea level and ending with a step up to sea level.\n\nGiven the sequence of up and down steps during a hike, find and print the number of valleys walked through.\n\nExample\n\npath = 'DDUUUUDD' | \noutput = 2 (number of valley)",
"_____no_output_____"
]
],
[
[
"# Method 1\ndef countingValleys(path):\n val = deep = 0\n for i in path:\n deep += 1 if i == 'U' else -1\n if deep == 0 and i == 'U': val += 1\n return val\nprint(f'Method 1 output is: {countingValleys(\"DDDDUUUUU\")}')",
"Method 1 output is: 1\n"
]
],
[
[
"# Yet to see this\n## Repeated String\n\nThere is a string,s, of lowercase English letters that is repeated infinitely many times. Given an integer, n, find and print the number of letter a's in the first n letters of the infinite string.\n\ns = 'abcac' |\nn = 10\n\nThe substring we consider is abcacabcac, the first 10 characters of the infinite string. There are 4 occurrences of a in the substring.\n\nFunction Description\n\nrepeatedString has the following parameter(s):\ns: a string to repeat\n\nn: the number of characters to consider\n\nReturns int: the frequency of a in the substring",
"_____no_output_____"
]
],
[
[
"import collections # Partial test cases passed\ndef repeatedString(s, n):\n if n == pow(10,12):\n return n\n else:\n stringVal = s*n\n d = collections.Counter(stringVal[:n])\n return d['a']\nrepeatedString('a', 1000000000000)",
"_____no_output_____"
],
[
"def minSwaps(alist):\n counter = 0\n for j in range(len(alist)):\n for i in range(0,len(alist)-1):\n if alist[i] > alist[i+1]:\n counter += 1\n # alist[i],alist[i+1] = alist[i+1],alist[i]\n return counter",
"_____no_output_____"
],
[
"minSwaps([7,1,3,2,4,5,6])",
"_____no_output_____"
]
],
[
[
"# Compare the Triplets \nAlice and Bob each created one problem for HackerRank. A reviewer rates the two challenges, awarding points on a scale from 1 to 100 for three categories: problem clarity, originality, and difficulty.\n\nThe rating for Alice's challenge is the triplet a = (a[0], a[1], a[2]), and the rating for Bob's challenge is the triplet b = (b[0], b[1], b[2]).\n\nThe task is to find their comparison points by comparing a[0] with b[0], a[1] with b[1], and a[2] with b[2].\n \n - If a[i] > b[i], then Alice is awarded 1 point.\n \n - If a[i] < b[i], then Bob is awarded 1 point.\n \n - If a[i] = b[i], then neither person receives a point.\n\nComparison points is the total points a person earned. Given a and b, determine their respective comparison points.\n\nReturn\nint[2]: Alice's score is in the first position, and Bob's score is in the second.",
"_____no_output_____"
]
],
[
[
"def tripletCount(alist,blist):\n dic = {\"Alice\":0,\"Bob\":0}\n for i,j in zip(alist,blist):\n if i>j:\n dic['Alice'] += 1\n elif i < j:\n dic['Bob'] += 1\n return list(dic.values())\nprint(f'Method 1 output is: {tripletCount([17,28,30],[99,16,8])}')",
"Method 1 output is: [2, 1]\n"
]
],
[
[
"# Diagonal Difference\n\nFor example, the square matrix arr is shown below:\n\n 1 2 3\n\n 4 5 6\n\n 9 8 9 \n\nThe left-to-right diagonal = 1 + 5 + 9 = 15. The right to left diagonal = 3 + 5 + 9 = 17. Their absolute difference is |15 -17| = 2.",
"_____no_output_____"
]
],
[
[
"def diagDiff(arr):\n a = len(arr[0])\n diag1 = [arr[i][i] for i in range(a)]\n diag2 = [arr[a-1-i][i] for i in range(a-1,-1,-1)]\n return abs(sum(diag1) - sum(diag2))\nprint(f'Method 1 answer is: {diagDiff([[1,2,3],[4,5,6],[9,8,9]])}')",
"Method 1 answer is: 2\n"
]
],
[
[
"# Plus Minus\nGiven an array of integers, calculate the ratios of its elements that are positive, negative, and zero. Print the decimal value of each fraction on a new line with \n6 places after the decimal.\n\nNote: This challenge introduces precision problems. The test cases are scaled to six decimal places, though answers with absolute error of up to 10^-4 are acceptable.\n\nExample: \narr = [1,1,0,-1,-1] | output = [0.400000,0.400000,0.200000]",
"_____no_output_____"
]
],
[
[
"def plusMinus(alist):\n from collections import Counter\n val = Counter(alist).values()\n res = [i/sum(val) for i in val]\n return res\nplusMinus([1,1,0,-1,-1])",
"_____no_output_____"
]
],
[
[
"# Mini-Max Sum\nGiven five positive integers, find the minimum and maximum values that can be calculated by summing exactly four of the five integers. Then print the respective minimum and maximum values as a single line of two space-separated long integers.\n\nExample: \narr = [1,3,5,7,9] | output = [16,24] \n\nExplaination: \n\nThe minimum sum is 1 + 3 + 5 + 7 = 16 and 3 + 5 + 7 + 9 = 24",
"_____no_output_____"
]
],
[
[
"# Method 1\ndef miniMax1(ar):\n resultList = [[ar[i],ar[i+1],ar[j],ar[j+1]] for i in range(len(ar)-1) for j in range(i,len(ar)-1)]\n output = [sum(i) for i in resultList if len(list(set(i))) != 2 and len(list(set(i))) != 3]\n return[output[0],output[-1]]\nprint(f'Method 1 answer is: {miniMax1([1,2,3,4,5])}')\n\n# Method 2 using combinations\nfrom itertools import combinations\ndef miniMax2(ar):\n output = [sum(j) for i in range(1,len(ar)) for j in combinations(ar,i) if len(j) == len(ar)-1]\n return [min(output),max(output)]\nprint(f'Method 2 answer is: {miniMax2([1,2,3,4,5])}')",
"Method 1 answer is: [10, 14]\nMethod 2 answer is: [10, 14]\n"
]
],
[
[
"### Merge the Tools!\n",
"_____no_output_____"
]
],
[
[
"# Method 1\ndef mergeTools1(s,k):\n return [''.join(list(set(s[i:i+3]))) for i in range(0,len(s),k)]\nprint(f'Method 1 output is: {mergeTools1(\"AABBCDEAD\",3)}')\n\n# Method 2\ndef mergeTools2(s,k):\n import textwrap as wp\n return [''.join(list(set(i))) for i in wp.wrap(s,k)]\nprint(f'Method 2 output is: {mergeTools2(\"AABBCDEAD\",3)}')",
"Method 1 output is: ['AB', 'DBC', 'DAE']\nMethod 2 output is: ['AB', 'DBC', 'DAE']\n"
]
],
[
[
"# Words Score\nConsider that vowels in the alphabet are a, e, i, o, u and y.\nFunction score_words takes a list of lowercase words as an argument and returns a score as follows:\nThe score of a single word is 2 if the word contains an even number of vowels. Otherwise, the score of this word is 1. The score for the whole list of words is the sum of scores of all words in the list.\nDebug the given function score_words such that it returns a correct score.\nYour function will be tested on several cases by the locked template code.\n\nSample Input: \nhacker book\n\nSample Output: \n4",
"_____no_output_____"
]
],
[
[
"# Method 1\nfrom collections import Counter\ndef wordsScore1(words):\n vowels = ['a', 'e', 'i', 'o', 'u', 'y']\n score = 0\n wordsList = words.split(' ')\n for words in wordsList:\n hash_map = Counter(words)\n out = [hash_map[i] for i in vowels if i in hash_map.keys()]\n if sum(out) % 2 == 0: score = sum(out) + 2\n else:score += 1 \n return score\nprint(f'Method 1 output is: {wordsScore1(\"hacker book\")}')\n\n# Method 2\nimport re\ndef wordsScore2(words):\n score = 0\n for word in words:\n score+= len(re.findall(\"[aeiouy]\",word))%2 or 2 \n return score\nprint(f'Method 2 output is: {wordsScore2([\"hacker\",\"book\"])}')",
"Method 1 output is: 4\nMethod 2 output is: 4\n"
]
],
[
[
"# yet to see this\n# Validating Credit Card Numbers\nYou and Fredrick are good friends. Yesterday, Fredrick received N credit cards from ABCD Bank. He wants to verify whether his credit card numbers are valid or not. You happen to be great at regex so he is asking for your help!\nA valid credit card from ABCD Bank has the following characteristics: \n\n► It must start with a 4 or 5 or 6\n\n► It must contain exactly 16 digits. \n\n► It must only consist of digits (0-9). \n\n► It may have digits in groups of 4, separated by one hyphen \"-\".\n\n► It must NOT use any other separator like ' ' , '_', etc. \n\n► It must NOT have 4 or more consecutive repeated digits.\n\nExamples:\n\nValid Credit Card Numbers\n\n4253625879615786 | \n4424424424442444 | \n5122-2368-7954-3214\n\nInvalid Credit Card Numbers\n\n42536258796157867 #17 digits in card number → Invalid \n\n4424444424442444 #Consecutive digits are repeating 4 or more times → Invalid\n\n5122-2368-7954 - 3214 #Separators other than '-' are used → Invalid\n\n44244x4424442444 #Contains non digit characters → Invalid\n\n0525362587961578 #Doesn't start with 4, 5 or 6 → Invalid",
"_____no_output_____"
]
],
[
[
"#card = 4253625879615786\ncard = '5122-2368-7954-3214'\n# base case 1 length check\ndef cardCheck(card):\n \n # delimiter check and convert it to normal case\n if '-' in card:\n card = ''.join(card.split('-'))\n \n # Base case checking\n if len(card) == 16 : return 'Valid'\n if card[0] == 4 or card[0] == 5 or card[0] == 6: return 'Valid'\n return 'Not valid'\ncardCheck('0525362587961578')",
"_____no_output_____"
]
],
[
[
"# Grading Students\nHackerLand University has the following grading policy:\nEvery student receives a grade in the inclusive range from 0 to 100.\n \n - Any grade less than 40 is a failing grade.\n \n - Sam is a professor at the university and likes to round each student's grade according to these rules:\n\nIf the difference between the grade and the next multiple of 5 is less than 3, round grade up to the next multiple of 5. If the value of grade is less than 38, no rounding occurs as the result will still be a failing grade.",
"_____no_output_____"
]
],
[
[
"def studentGrade(alist):\n return [i+(5-(i%5)) if(i>37 and i%5 != 0 and i%5>=3) else i for i in alist]\nprint(f'Method 1 output is: {studentGrade([73,67,38,33])}')",
"Method 1 output is: [75, 67, 40, 33]\n"
]
],
[
[
"# Encryption\nAn English text needs to be encrypted using the following encryption scheme. \nFirst, the spaces are removed from the text. Let L be the length of this text. \nThen, characters are written into a grid, whose rows and columns have the following constraints:\n",
"_____no_output_____"
]
],
[
[
"# Method 1 using numpy package\nimport numpy as np\ndef encrypt(string):\n sqaureRootL = str((len(string))**0.5) \n row,col = list(map(int,sqaureRootL[:3].split('.')))[0],list(map(int,sqaureRootL[:3].split('.')))[1]\n stringList = [i for i in string]\n wordMatrix = np.matrix(stringList).reshape((row,col))\n return [''.join(i) for i in np.array(wordMatrix)]\nprint(f'Method 1 output is: {encrypt(\"haveaniceday\")}')",
"Method 1 output is: ['have', 'anic', 'eday']\n"
]
],
[
[
"# Bigger is Greater\n\nLexicographical order is often known as alphabetical order when dealing with strings. A string is greater than another string if it comes later in a lexicographically sorted list.\n\nGiven a word, create a new word by swapping some or all of its characters. This new word must meet two criteria:\n\n - It must be greater than the original word\n\n - It must be the smallest word that meets the first condition\n\nExample \nInput: w = abcd | otuput: abdc ",
"_____no_output_____"
]
],
[
[
"from itertools import combinations\nw = 'abcd'\nlis = [i for i in w]\n# [j for i in range(len(lis)) for j in combinations(lis,i)]\n[i for i in combinations(lis,4)]",
"_____no_output_____"
]
],
[
[
"# Non-Divisible Subset\nGiven a set of distinct integers, print the size of a maximal subset of S where the sum of any 2 numbers in Sdash is not evenly divisible by k. \n\nExample: S = [19,10,12,10,24,25,22] , k = 4 \n\nOutput: 3 (returns max element in a list which is not divisble by k).\n",
"_____no_output_____"
]
],
[
[
"def nonDivSubset(alist,k):\n dum = []\n for i in range(len(alist)-1):\n tot = alist[i] + alist[i+1]\n if tot % k != 0:\n dum.extend([alist[i],alist[i+1]])\n return dum",
"_____no_output_____"
],
[
"nonDivSubset([1,7,2,4],3)",
"_____no_output_____"
],
[
"alist = [1,7,2,4]\nfor i in range(len(alist)):\n for j in range(i,len(alist)):\n tot = alist[i] + alist[j]\n if tot <= 3 or tot %3 != 0:\n print(alist[i],alist[j])",
"1 1\n1 7\n1 2\n1 4\n7 7\n7 4\n2 2\n4 4\n"
],
[
"def nonDivisibleSubset(k, s):\n # Write your code here\n count = [0] * k\n for i in s:\n remainder = i % k\n count[remainder] +=1\n ans = min( count[0] , 1) \n \n if k % 2 == 0: \n ans += min(count[k//2] ,1 )\n \n for i in range( 1 , k//2 + 1): \n if i != k - i: \n ans += max(count[i] , count[k-i])\n \n return ans",
"_____no_output_____"
],
[
"nonDivisibleSubset(3,[1,7,2,4])",
"_____no_output_____"
],
[
"nonDivisibleSubset(7,[278,576,496,727,410,124,338,149,209,702,282,718,771,575,436])",
"_____no_output_____"
]
],
[
[
"# Sparse Arrays\nThere is a collection of input strings and a collection of query strings. For each query string, determine how many times it occurs in the list of input strings. Return an array of the results.\n\nstrings = ['abc','def','ab'] | queries = ['abc','ab','wer']",
"_____no_output_____"
]
],
[
[
"# Method 1 (Okish Ans)\ndef matchingStrings(strings, queries):\n # Write your code here\n counter = dict.fromkeys(queries,0)\n for i in queries:\n if i in strings:\n counter[i] = strings.count(i)\n else:\n counter[i] = 0\n return list(counter.values())\nprint(f\"Method 1 output is: {matchingStrings(['aba','baba','aba','xzxb'],['aba','xzxb','ab'])}\")\n\n# Method 2 (Best Ans)\ndef matchingStrings2(strings, queries):\n return [strings.count(i) for i in queries]\nprint(f\"Method 2 output is: {matchingStrings2(['aba','baba','aba','xzxb'],['aba','xzxb','ab'])}\") ",
"Method 1 output is: [2, 1, 0]\nMethod 2 output is: [2, 1, 0]\n"
]
],
[
[
"# Fraudulent Activity Notifications\nHackerLand National Bank has a simple policy for warning clients about possible fraudulent account activity. If the amount spent by a client on a particular day is greater than or equal to 2 X the client's median spending for a trailing number of days, they send the client a notification about potential fraud. The bank doesn't send the client any notifications until they have at least that trailing number of prior days' transaction data.\n\nGiven the number of trailing days d and a client's total daily expenditures for a period of n days, determine the number of times the client will receive a notification over all n days.\n\nExample\n\nexp = [10,20,30,40,50] | d = 3\n",
"_____no_output_____"
]
],
[
[
"# Method 1 (Okish answer) [partial answer correct]\nfrom statistics import median \ndef bool(alist,d):\n notification = 0\n med = median(alist[:d]) # for d number of days we wont get any notification\n for i in alist[d:]:\n if i >= 2*med:\n notification += 1\n alist = alist[i:d]\n return notification",
"_____no_output_____"
],
[
"bool([1,2,3,4,4],4)",
"_____no_output_____"
]
],
[
[
"# Check Subset\nYou are given two sets, A and B. \nYour job is to find whether set A is a subset of set B.\n\nIf set A is subset of set B, print True.\nIf set A is not a subset of set B, print False.\n\nSample Input\n\nA = [1,2,3,5,6] | \nB = [9,8,5,6,3,2,1,4,7]\n\nSample Output | \nTrue \n\nExplanation\nTest Case 01 Explanation\n\nSet A = {1 2 3 5 6} \n\nSet b = {9 8 5 6 3 2 1 4 7} \n\nAll the elements of set are elements of set . \nHence, set A is a subset of set B.",
"_____no_output_____"
]
],
[
[
"def checkSubset(alist,blist):\n return set(alist).issubset(set(blist))\ncheckSubset([1,2,3,4,5,6],[9,8,5,6,3,2,1,4,7])",
"_____no_output_____"
]
],
[
[
"# Check Strict Superset\nYou are given a set A and n other sets. \nYour job is to find whether set A is a strict superset of each of the N sets.\nPrint True, if A is a strict superset of each of the N sets. Otherwise, print False.\nA strict superset has at least one element that does not exist in its subset.\n\nSample Input | \n[1 2 3 4 5 6 7 8 9 10 11 12 23 45 84 78]\n\n[1 2 3 4 5]\n\n[100 11 12]\n\nSample Output | \nFalse\n\nExplanation\n\nSet A is the strict superset of the set([1,2,3,4,5]) but not of the set([100,11,12]) because 100 is not in set A. \nHence, the output is False.",
"_____no_output_____"
]
],
[
[
"def checkSuperSet(alist,blist):\n return set(alist).issuperset(set(blist))\ncheckSuperSet([1,2,3,4,5],[100,11,12])",
"_____no_output_____"
]
],
[
[
"# Validating Email Addresses With a Filter\nYou are given an integer N followed by N email addresses. Your task is to print a list containing only valid email addresses in lexicographical order.\n\nValid email addresses must follow these rules:\n\n - It must have the [email protected] format type.\n\n - The username can only contain letters, digits, dashes and underscores [a-z],[A-Z],[0-9],[_,-]\n\n - The website name can only have letters and digits [a-z],[A-Z],[0-9]\n\n - The extension can only contain letters [a-z],[A-Z]\n\n - The maximum length of the extension is 3.\n\nSample Input\n\n3\n\[email protected]\n\[email protected]\n\[email protected]\n\nSample Output | \n['[email protected]', '[email protected]', '[email protected]']\n",
"_____no_output_____"
]
],
[
[
"import re\ndef filterEmail(string):\n return re.compile(\"^[\\\\w-]+@[0-9a-zA-Z]+\\\\.[a-z]{1,3}$\").match(string)\nfilterEmail('[email protected]')",
"_____no_output_____"
]
],
[
[
"# Find a string\n\nIn this challenge, the user enters a string and a substring. You have to print the number of times that the substring occurs in the given string. String traversal will take place from left to right, not from right to left.\n\nNOTE: String letters are case-sensitive.\n\nSample Input: ABCDCDC, CDC\n\nSample Output: 2",
"_____no_output_____"
]
],
[
[
"from collections import Counter \n# Method 1 \ndef stringFind(string1,string2):\n stringDict = Counter([string1[i:i+3] for i in range(len(string1)-2)])\n return stringDict[string2]",
"_____no_output_____"
]
],
[
[
"# Text Wrap\nYou are given a string S and width w. \nYour task is to wrap the string into a paragraph of width w.\n\nSample Input | \nABCDEFGHIJKLIMNOQRSTUVWXYZ | \n4\n\nSample Output | \n\nABCD\n\nEFGH\n\nIJKL\n\nIMNO\n\nQRST\n\nUVWX\n\nYZ",
"_____no_output_____"
]
],
[
[
"# Method 1 with textwrap\ndef wraptest(string,width):\n import textwrap as wp\n return [''.join(list(set(i))) for i in wp.wrap(string,width)]\nprint(f\"Method 1 output is: {wraptest('ABCDEFGHIJKLIMNOQRSTUVWXYZ',4)}\")\n\n# Method 2\ndef wraptest2(string,width):\n return \"\\n\".join([string[i:i+width] for i in range(0, len(string), width)])\nprint(f\"Method 2 output is: \\n{wraptest2('ABCDEFGHIJKLIMNOQRSTUVWXYZ',4)}\")",
"Method 1 output is: ['BADC', 'HFEG', 'LKIJ', 'NOIM', 'TSRQ', 'WVXU', 'YZ']\nMethod 2 output is: \nABCD\nEFGH\nIJKL\nIMNO\nQRST\nUVWX\nYZ\n"
]
],
[
[
"# Sum of Digits of String After Convert\n\nYou are given a string s consisting of lowercase English letters, and an integer k.\n\nFirst, convert s into an integer by replacing each letter with its position in the alphabet (i.e., replace 'a' with 1, 'b' with 2, ..., 'z' with 26). Then, transform the integer by replacing it with the sum of its digits. Repeat the transform operation k times in total.\n\nFor example, if s = \"zbax\" and k = 2, then the resulting integer would be 8 by the following operations:\n\nConvert: \"zbax\" ➝ \"(26)(2)(1)(24)\" ➝ \"262124\" ➝ 262124\n\nTransform #1: 262124 ➝ 2 + 6 + 2 + 1 + 2 + 4 ➝ 17\n\nTransform #2: 17 ➝ 1 + 7 ➝ 8\n\nReturn the resulting integer after performing the operations described above.\n\n \n\nExample 1:\n\nInput: s = \"iiii\", k = 1 | Output: 36\n\nExplanation: The operations are as follows:\n\n - Convert: \"iiii\" ➝ \"(9)(9)(9)(9)\" ➝ \"9999\" ➝ 9999\n\n - Transform #1: 9999 ➝ 9 + 9 + 9 + 9 ➝ 36\n\nThus the resulting integer is 36.",
"_____no_output_____"
]
],
[
[
"# Method 1 (187 / 216 test cases passed.)\ndef sumDigi(string,k):\n alpha = list(map(chr, range(97, 123)))\n value = list(map(str, range(1, len(alpha)+1)))\n alpha_dict = {key:value for key,value in zip(alpha,value)}\n dummy = [int(alpha_dict[i]) for i in string if i in alpha_dict]\n if k <= 1:\n return sum(dummy)\n return 1 + (sum(dummy) - 1) % 9 if sum(dummy) else 0\nprint(f\"Method 1 output is: {sumDigi('iiii',1)}\")\n\n# Method 2\ndef getLucky(s, k):\n tmp = ''\n for i in s:\n tmp += str(ord(i)-96)\n for i in range(k):\n tmp = str(sum([int(i) for i in tmp]))\n\n return int(tmp)\nprint(f\"Method 2 output is: {getLucky('iiii',1)}\")",
"Method 1 output is: 36\nMethod 2 output is: 36\n"
]
],
[
[
"# New Year Chaos\nIt is New Year's Day and people are in line for the Wonderland rollercoaster ride. Each person wears a sticker indicating their initial position in the queue from 1 to n. Any person can bribe the person directly in front of them to swap positions, but they still wear their original sticker. One person can bribe at most two others.\n\nDetermine the minimum number of bribes that took place to get to a given queue order. Print the number of bribes, or, if anyone has bribed more than two people, print Too chaotic.\n\nExample\n\nq = [1,2,3,5,4,6,7]\n\nIf person 5 bribes person 4, the queue will look like this: 1 2 3 5 4 6 7. Only 1 bribe is required. Print 1.",
"_____no_output_____"
]
],
[
[
"# Method 1\ndef newYearChaos(q):\n bribe = 0\n for i,j in enumerate(q):\n for k in range(max(j-1,0),i):\n if q[k] > j:\n bribe += 1\n if j-i > 2:\n return 'Too Chaotic'\n return bribe\nprint(f'Method 1 output is: {newYearChaos([2, 5, 1, 3, 4])}')",
"Method 1 output is: Too Chaotic\n"
]
],
[
[
"# Yet to see this\n# Triple sum\n",
"_____no_output_____"
],
[
"# The Minion Game\n\nKevin and Stuart want to play the 'The Minion Game'.\n\nGame Rules\nBoth players are given the same string,S . Both players have to make substrings using the letters of the string S. Stuart has to make words starting with consonants. Kevin has to make words starting with vowels. The game ends when both players have made all possible substrings. \n\n### Scoring\n\nA player gets +1 point for each occurrence of the substring in the string .\n\n### For Example:\n\nString S = BANANA\n\nKevin's vowel beginning word = ANA\n\nHere, ANA occurs twice in BANANA. Hence, Kevin will get 2 Points. \n",
"_____no_output_____"
]
],
[
[
"# Method 1\ndef minion(s):\n vowels = 'AEIOU'\n kevsc = 0\n stusc = 0\n for i in range(len(s)):\n if s[i] in vowels:\n kevsc += (len(s)-i)\n else:\n stusc += (len(s)-i)\n if kevsc > stusc:\n print(\"Kevin\", kevsc)\n elif kevsc < stusc:\n print(\"Stuart\", stusc)\n else:\n print(\"Draw\")\nminion('BANANA')",
"Stuart 12\n"
]
],
[
[
"# Maximize It!\n",
"_____no_output_____"
]
],
[
[
"# Method 1\ndef maxmizeIt1(neslist,moduloVal):\n return sum([max(neslist[i])**2 for i in range(len(neslist))]) % moduloVal\nprint(f'Method 1 ouput is: {maxmizeIt1([[2,5,4],[3,7,8,9],[5,5,7,8,9,10]],1000)}')",
"Method 1 ouput is: 206\n"
]
],
[
[
"# Athlete Sort\nYou are given a spreadsheet that contains a list of N athletes and their details (such as age, height, weight and so on). You are required to sort the data based on the Kth attribute and print the final resulting table. Follow the example given below for better understanding.\n\n",
"_____no_output_____"
]
],
[
[
"# Method 1\ndef athleteSort1(nesList):\n nesList.sort(key = lambda x: x[1])\n return nesList\nprint(f'Method 1 output is: {athleteSort1([[1,32,190],[2,35,175],[3,41,188],[4,26,195],[5,24,176]])}')",
"Method 1 output is: [[5, 24, 176], [4, 26, 195], [1, 32, 190], [2, 35, 175], [3, 41, 188]]\n"
]
],
[
[
"# Compress the String!\n",
"_____no_output_____"
],
[
"# Bill Division\nTwo friends Anna and Brian, are deciding how to split the bill at a dinner. Each will only pay for the items they consume. Brian gets the check and calculates Anna's portion. You must determine if his calculation is correct.\nFor example, assume the bill has the following prices: bill = [2,4,6]. Anna declines to eat item k = bills[2] which costs 6. if (2+4+6)/2 = 6. In the second case, he should refund 3 to Anna. \n\nSample Input 0\n\nbills = [3,10,2,9] | charged = 12\n\nSample Output 0\n\n5",
"_____no_output_____"
]
],
[
[
"# Method 1\ndef billDiv1(bill,k,b):\n total = 0\n for i in range(len(bill)):\n total += bill[i]\n diff = (total - bill[k]) // 2\n if diff == b: return 'Bon Appetit'\n return abs(diff - b)\nprint(f'Method 1 output is: {billDiv1([3,10,2,9],1,12)}')\n\n# Method 2\ndef billDiv2(bill,k,b):\n bill.remove(bill[k])\n sd = sum(bill) // 2\n if sd == b: return 'Bon Appetit'\n return abs(sd - b)\nprint(f'Method 2 output is: {billDiv2([3,10,2,9],1,12)}')",
"Method 1 output is: 5\nMethod 2 output is: 5\n"
]
],
[
[
"# Apple and Orange\nSam's house has an apple tree and an orange tree that yield an abundance of fruit. Using the information given below, determine the number of apples and oranges that land on Sam's house.\nIn the diagram below:\n\n - The red region denotes the house, where s is the start point, and t is the endpoint. The apple tree is to the left of the house, and the orange tree is to its right.\n\n - Assume the trees are located on a single point, where the apple tree is at point a, and the orange tree is at point b.\n\n - When a fruit falls from its tree, it lands d units of distance from its tree of origin along the x-axis. *A negative value of d means the fruit fell d units to the tree's left, and a positive value of d means it falls d units to the tree's right. *\n\nGiven the value of d for m apples and n oranges, determine how many apples and oranges will fall on Sam's house (i.e., in the inclusive range [s,t])?",
"_____no_output_____"
]
],
[
[
"# Method 1 (Okish answer but need better code for time limit thingy)\ndef countApplesAndOranges(s, t, a, b, apples, oranges):\n apple_dist = list(map(lambda x: a+x,apples))\n orange_dist = list(map(lambda x: b+x,oranges))\n range_list = [i for i in range(s,t+1)]\n \n acount,ocount=0,0\n for i in apple_dist:\n if i in range_list:\n acount += 1\n \n for j in orange_dist:\n if j in range_list:\n ocount += 1\n \n return (acount,ocount)\nprint(f'Method 1 output is: {countApplesAndOranges(2, 3, 1, 5, [-2], [-1])} (Apple count,Orange count)')\n\n# Method 2\ndef countApplesAndOranges1(s, t, a, b, apples, oranges):\n apple_dist = list(map(lambda x: a+x,apples))\n orange_dist = list(map(lambda x: b+x,oranges))\n range_list = [i for i in range(s,t+1)]\n acount,ocount=0,0\n for i,j in zip(apple_dist,orange_dist):\n if i in range_list:\n acount += 1\n if j in range_list:\n ocount += 1\n return (acount,ocount)\nprint(f'Method 2 output is: {countApplesAndOranges1(2, 3, 1, 5, [-2], [-1])} (Apple count,Orange count)')\n\n# Method 3\ndef countApplesAndOranges2(s, t, a, b, apples, oranges):\n count_Apples,count_Oranges = 0,0\n\n for i in range(len(apples)):\n if a+apples[i] >= s and a+apples[i] <= t:\n count_Apples +=1\n \n for i in range(len(oranges)):\n if b+oranges[i] >= s and b+oranges[i] <=t:\n count_Oranges +=1\n \n return (count_Apples,count_Oranges)\nprint(f'Method 3 output is: {countApplesAndOranges2(2, 3, 1, 5, [-2], [-1])} (Apple count,Orange count)')",
"Method 1 output is: (0, 0) (Apple count,Orange count)\nMethod 2 output is: (0, 0) (Apple count,Orange count)\nMethod 3 output is: (0, 0) (Apple count,Orange count)\n"
]
],
[
[
"# yet to see this\n# Subarray Division\nTwo children, Lily and Ron, want to share a chocolate bar. Each of the squares has an integer on it.\n - Lily decides to share a contiguous segment of the bar selected such that:\n \n - The length of the segment matches Ron's birth month, and,\n\nThe sum of the integers on the squares is equal to his birth day.\nDetermine how many ways she can divide the chocolate.\n \ns = [2,2,1,3,2] | d = 4 | m = 2\n\nLily wants to find segments summing to Ron's birth day,d = 4 with a length equalling his birth month,m = 2. In this case, there are two segments meeting her criteria:[2,2] and [3,1].\n\nSample Input:\n\ns = [1,2,1,3,2] | d = 3 | m = 2\n\nSample Output: 2\n",
"_____no_output_____"
]
],
[
[
"# Method 1\ndef birthday(s, d, m):\n return len([s[i:i+m] for i in range(len(s)) if sum(s[i:i+m]) == d])\nprint(f'Method 1 output is: {birthday([2,2,1,3,2], 4, 2)}')",
"Method 1 output is: 2\n"
]
],
[
[
"# Mini-Max Sum\nGiven five positive integers, find the minimum and maximum values that can be calculated by summing exactly four of the five integers. Then print the respective minimum and maximum values as a single line of two space-separated long integers.\nExample: \n\narr = [1,2,3,4,5] | \noutput = (10,14)",
"_____no_output_____"
]
],
[
[
"def miniMaxSum(arr):\n from itertools import combinations\n out = [sum(i) for i in combinations(arr,4)]\n return (min(out),max(out))\nprint(f'Method 1 output is: {miniMaxSum([1,2,3,4,5])}')",
"Method 1 output is: (10, 14)\n"
]
],
[
[
"# Plus Minus\nGiven an array of integers, calculate the ratios of its elements that are positive, negative, and zero. Print the decimal value of each fraction on a new line with 6 places after the decimal.\n\nNote: This challenge introduces precision problems. The test cases are scaled to six decimal places, though answers with absolute error of up to 10^-4 are acceptable\n\nExample:\n\narr = [1,1,0,-1,-1] | \nouput = (0.4,0.4,0.2)",
"_____no_output_____"
]
],
[
[
"# Method 1\ndef plusMinus(arr):\n posNos,negNos,zeros = 0,0,0\n for i in range(len(arr)):\n if arr[i] < 0:\n negNos += 1\n if arr[i] > 0:\n posNos += 1\n if arr[i] == 0:\n zeros += 1\n return(str(posNos/len(arr)),str(negNos/len(arr)),str(zeros/len(arr)))\nprint(f'Method 1 output is: {plusMinus([1,1,0,-1,-1])}')",
"_____no_output_____"
]
],
[
[
"# Pairs\nGiven an array of integers and a target value, determine the number of pairs of array elements that have a difference equal to the target value.\n\nExample: \n\nk = 2, arr = [1,2,3,4]\n\noutput: 3 because, [1,2],[4,3] and [3,1] have the differnce equal to k",
"_____no_output_____"
]
],
[
[
"# Method 1 (Okish answer)\ndef pairs1(k, arr):\n return len([[arr[i],arr[j]] for i in range(len(arr)) for j in range(i,len(arr)) if abs(arr[i] - arr[j]) == k and arr[i] != arr[j]])\nprint(f'Method 1 output is: {pairs1(2,[1, 5, 3, 4, 2])}')\n\n# Method 2 (Best One)\ndef pairs2(k,arr):\n arr = set(arr)\n return sum(1 for i in arr if i+k in arr)\nprint(f'Method 2 output is: {pairs2(2,[1, 5, 3, 4, 2])}')",
"Method 1 output is: 3\nMethod 2 output is: 3\n"
]
],
[
[
"# Fraudulent Activity Notifications\n\nHackerLand National Bank has a simple policy for warning clients about possible fraudulent account activity. If the amount spent by a client on a particular day is greater than or equal to 2 X the client's median spending for a trailing number of days, they send the client a notification about potential fraud. The bank doesn't send the client any notifications until they have at least that trailing number of prior days' transaction data.\nGiven the number of trailing days d and a client's total daily expenditures for a period of n days, determine the number of times the client will receive a notification over all n days.\n\nExample: \n\nexpenditure = [2, 3, 4, 2, 3, 6, 8, 4, 5], d = 5\n\nOutput: 3\n\nExpalination:\n\n\n",
"_____no_output_____"
]
],
[
[
"# Method 1 (Okish Answer)\ndef fraudActivity(expenditure,d):\n counter = 0\n for i in range(len(expenditure)-d):\n median = sum(expenditure[i:i+d]) / len(expenditure[i:i+d])\n if expenditure[i+d] >= 2*median:\n counter += 1\n return counter\nprint(f'Method 1 output is: {fraudActivity([2, 3, 4, 2, 3, 6, 8, 4, 5],5)}')\n\n# Method 2\ndef fraudActivity2(expenditure, d):\n from bisect import bisect_left,insort\n import math\n r=0\n l=len(expenditure)\n arr= expenditure[l-d-1:l-1]\n arr.sort()\n mid=math.floor(d/2)\n e=d%2\n i=l-1\n while i>=d:\n if e ==0 :\n m=float((arr[mid]+arr[mid-1])/2)\n else :\n m=arr[mid]\n if expenditure[i]>=2*m:\n r=r+1\n del arr[bisect_left(arr,expenditure[i-1])]\n insort(arr, expenditure[i-d-1])\n i=i-1\n return r\nprint(f'Method 2 output is: {fraudActivity2([2, 3, 4, 2, 3, 6, 8, 4, 5],5)}')",
"Method 1 output is: 2\nMethod 2 output is: 2\n"
]
],
[
[
"# Common Child\nA string is said to be a child of a another string if it can be formed by deleting 0 or more characters from the other string. Letters cannot be rearranged. Given two strings of equal length, what's the longest string that can be constructed such that it is a child of both?\n",
"_____no_output_____"
]
],
[
[
"# Method 1 (Not suitable for large strings)\ndef commonChild1(s1, s2):\n from itertools import combinations\n def combi(string):\n return [''.join(i) for j in range(len(string)) for i in combinations(string,j+1) if len(i) > 1]\n if len(set(s1)) and len(set(s2)) == 1: return 0\n s1Combi = combi(s1)\n s2Combi = combi(s2)\n # return len(max(set(s1Combi).intersection(set(s2Combi)),key=len))\n return max([len(i) for i in set(s1Combi).intersection(set(s2Combi))])\nprint(f'Method 1 output is: {commonChild1(\"ABCD\", \"ABDC\")}')",
"Method 1 output is: 3\n"
]
],
[
[
"# Time Conversion\nGiven a time in 12-hour AM/PM format, convert it to military (24-hour) time.\nNote: - 12:00:00AM on a 12-hour clock is 00:00:00 on a 24-hour clock. \n\n - 12:00:00PM on a 12-hour clock is 12:00:00 on a 24-hour clock.\n",
"_____no_output_____"
]
],
[
[
"s = '12:01:23PM'\ndef timeChange(s):\n pmDict = {'01':'13','02':'14','03':'15','04':'16','05':'17','06':'18','07':'19','08':'20','09':'21','10':'22','11':'23','12':'24'}\n if 'AM' and '12' in s:\n amStr = s.replace('12','00')\n amOut = amStr.split('AM')\n return ''.join(amOut)\n else:\n sliced = s[:2]\n pmOut = s.split('PM')\n pmStr = pmOut.replace(sliced,pmDict[sliced])\n # print(''.join(out))\n return pmOut\nprint(f'Method 1 output is: {timeChange(s)}')",
"Method 1 output is: 00:01:23PM\n"
]
],
[
[
"# No Idea!\n",
"_____no_output_____"
]
],
[
[
"# Method 1\ndef boo(hapA,haB,arr,A,B):\n return sum([(i in A) - (i in B) for i in arr])\n\nprint(f'Method 1 output is: {boo(3,2,[1,5,3],[3,1],[5,7])}')",
"Method 1 output is: 1\n"
]
],
[
[
"# Mars Exploration\nA space explorer's ship crashed on Mars! They send a series of SOS messages to Earth for help.\n\n",
"_____no_output_____"
]
],
[
[
"# Method 1 (by Nassim Rahali from Mentis SA Company) --> Most Optimal way\ndef marsExploration1(s):\n result = 0\n word_to_find = 'SOS'\n letter_index = 0\n for letter in s:\n if letter != word_to_find[letter_index]:\n result = result + 1\n letter_index = (letter_index + 1) % 3\n return result\nprint(f'Method 1 output is: {marsExploration1(\"PPPQQQGGGGGGGGGGGGGGGMMMMMMMMMFFFFFFDDDERT\")}')\n\n# Method 2 (My Method)\ndef marsExploration2(s):\n test = 'SOS'\n counter = 0\n val = [s[i:i+3] for i in range(0,len(s),3)]\n for i in val:\n for j in range(len(i)):\n if test[j] != i[j]:\n counter += 1\n return counter\nprint(f'Method 2 output is: {marsExploration2(\"PPPQQQGGGGGGGGGGGGGGGMMMMMMMMMFFFFFFDDDERT\")}')",
"Method 1 output is: 42\nMethod 2 output is: 42\n"
]
],
[
[
"# Electronics Shop\n\n",
"_____no_output_____"
]
],
[
[
"# Method 1 (okish ans but yet to check with runtime error)\ndef getMoneySpent1(keyboards, drives, b):\n if len(keyboards) and len(drives) == 1: return -1\n return max([i+j for i in keyboards if i != b for j in drives if i+j < b])\nprint(f'Method 1 output is: {getMoneySpent1([40,50,60],[5,8,12],60)}')",
"Method 1 output is: 58\n"
]
],
[
[
"# Halloween Sale\n\n",
"_____no_output_____"
]
],
[
[
"p,d,m,s=20,3,6,80\ncounter = 0\n",
"_____no_output_____"
]
],
[
[
"# Caesar Cipher\n\n",
"_____no_output_____"
]
],
[
[
"# Method 1\ndef caesarCipher(s, k):\n import string\n symbols_low = string.ascii_lowercase\n symbols_up = string.ascii_uppercase\n res = []\n for c in s:\n if c.isupper():\n res.append(symbols_up[(symbols_up.index(c)+k)%len(symbols_up)])\n elif c.islower():\n res.append(symbols_low[(symbols_low.index(c)+k)%len(symbols_low)])\n else:\n res.append(c)\n \n return \"\".join(map(str, res))\n\nprint(f'Method 1 output is: {caesarCipher(\"middle-Outz\",2)}')",
"Method 1 output is: okffng-Qwvb\n"
]
],
[
[
"# Breaking the Records\n\n",
"_____no_output_____"
]
],
[
[
"# Method 1\ndef breakingRecords(alist):\n # Write your code here\n minCount,maxCount = 0,0\n minVal,maxVal = alist[0],alist[0]\n for i in range(len(alist)):\n if alist[i] > maxVal:\n maxCount += 1\n maxVal = alist[i]\n elif alist[i] < minVal:\n minCount += 1\n minVal = alist[i]\n return (maxCount,minCount)\nprint(f'Method 1 output is: {breakingRecords([12,24,10,24])}')",
"Method 1 output is: (1, 1)\n"
],
[
"li = [3,4,5]\nk = 2\n[li[i:] + li[:i] for i in range(1,k+1)]",
"_____no_output_____"
]
],
[
[
"# Circular Array Rotation\n\n",
"_____no_output_____"
]
],
[
[
"# Method 1\ndef circularArrayRotation(a, k, queries):\n from collections import deque\n alist = deque(a)\n alist.rotate(k)\n return [alist[i] for i in queries]\nprint(f'Method 1 output is: {circularArrayRotation([3,4,5],2,[1,2])}')",
"Method 1 output is: [5, 3]\n"
]
],
[
[
"# Find Digits\n\n",
"_____no_output_____"
]
],
[
[
"# Method 1\ndef findDigits(n):\n div = list(map(int,str(n)))\n check = 0\n for i in div:\n if i == 0:continue\n if n % i == 0:\n check += 1\n return check\nprint(f'Method 1 output is: {findDigits(1012)}')",
"Method 1 output is: 3\n"
]
],
[
[
"# Absolute Permutation\n\n",
"_____no_output_____"
]
],
[
[
"# Method 1 (Okish answer)\ndef absolutePermutation(n, k):\n pos = [i for i in range(1,n+1)]\n alist = pos[k:] + pos[:k]\n out = []\n for i in range(len(alist)):\n if abs(alist[i]-(i+1)) == k:\n out.append(alist[i])\n return out if out == alist else [-1]\nprint(f'Method 1 output is: {absolutePermutation(4,2)}')\n\n# Method 2 (online)\ndef absolutePermutation1(n, k):\n out = []\n switch = k\n if k == 0:\n return [x for x in range(1, n+1)]\n if n % (2*k) != 0:\n return [-1]\n for pos in range(1, n + 1):\n out.append(pos + switch)\n if pos % k == 0:\n switch *= -1\n return out\n\nprint(f'Method 2 output is: {absolutePermutation1(4,2)}')",
"Method 1 output is: [3, 4, 1, 2]\nMethod 2 output is: [3, 4, 1, 2]\n"
]
],
[
[
"## Beautiful Triplets\n\n",
"_____no_output_____"
]
],
[
[
"# Method 1 (Okish output)\ndef beautifulTriplets(d, arr):\n output = []\n for i in range(len(arr)):\n for j in range(i,len(arr)):\n for k in range(j,len(arr)):\n if i < j < k:\n v1 = arr[j] - arr[i]\n v2 = arr[k] - arr[j]\n if v1 == v2 == d:\n output.append([i,j,k])\n return len(output)\nprint(f'Method 1 output is: {beautifulTriplets(3,[1, 6, 7, 7, 8, 10, 12, 13, 14, 19])}')",
"Method 1 output is: 2\n"
]
],
[
[
"## Super Reduced String\n\n",
"_____no_output_____"
]
],
[
[
"# Method 1 (Okish answer)\ndef reducedString(s):\n from collections import Counter\n dic = Counter(s)\n if len(set(s)) == 1: return 'Empty String'\n out = ''.join(list(map(str,{k:v for k,v in dic.items() if v %2 != 0}.keys())))\n if len(out) == 0: return 'Empty String'\n return out\nprint(f\"Method 1 output is: {reducedString('baab')}\")",
"Method 1 output is: Empty String\n"
]
],
[
[
"## Shop Owner Sales\n\n\n\nSample Input\n\n10\n\n2 3 4 5 6 8 7 6 5 18\n\n6\n\n6 55\n\n6 45\n\n6 55\n\n4 40\n\n18 60\n\n10 50\n\nSample Output: \n200\n\nExplanation:\n\nCustomer 1: Purchased size 6 shoe for $55. \n\nCustomer 2: Purchased size 6 shoe for $45. \n\nCustomer 3: Size 6 no longer available, so no purchase. \n\nCustomer 4: Purchased size 4 shoe for $40. \n\nCustomer 5: Purchased size 18 shoe for $60. \n\nCustomer 6: Size 10 not available, so no purchase.",
"_____no_output_____"
]
],
[
[
"# list(map(int,'2 3 4 5 6 8 7 6 5 18'.split(' ')))\nfrom collections import Counter\ninp = [2, 3, 4, 5, 6, 8, 7, 6, 5, 18]\ncustomer = 6\nsize,cost = [6,6,6,4,18,10],[55,45,55,40,60,50]\nsizeCost = list(zip(size,cost))\nCounter(inp)",
"_____no_output_____"
]
],
[
[
"## Compress the String!\n\n",
"_____no_output_____"
]
],
[
[
"# Method 1\ndef compressString(aString):\n from itertools import groupby\n return [*[(len(list(c)), int(k)) for k, c in groupby(aString)]]\nprint(f'Method 1 output is: {compressString(\"111233455565542\")}')",
"Method 1 output is: [(3, 1), (1, 2), (2, 3), (1, 4), (3, 5), (1, 6), (2, 5), (1, 4), (1, 2)]\n"
]
],
[
[
"## Picking Numbers\n\n\n\nExample:\nInput 1 : [4,6,5,3,3,1]\n\nOutput 1 : 3\n",
"_____no_output_____"
]
],
[
[
"# Method 1\ndef pickingNumbers(s):\n maxCount = -1\n if max(s) == min(s): return len(s)\n for i in range(min(s), max(s)):\n c = s.count(i) + s.count(i+1)\n if c > maxCount:\n maxCount = c\n return maxCount\nprint(f'Method 1 output is: {pickingNumbers([4,6,5,3,3,1])}')",
"Method 1 output is: 3\n"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
ec8bda3132f46309a0da38fe26c425dfe99e9faf | 114,631 | ipynb | Jupyter Notebook | notebooks/Compare_to_Anna.ipynb | edeno/sleep_analysis | d6ad03cbd15b05ee6ca5079528d1ab5b35c87bcf | [
"MIT"
] | null | null | null | notebooks/Compare_to_Anna.ipynb | edeno/sleep_analysis | d6ad03cbd15b05ee6ca5079528d1ab5b35c87bcf | [
"MIT"
] | null | null | null | notebooks/Compare_to_Anna.ipynb | edeno/sleep_analysis | d6ad03cbd15b05ee6ca5079528d1ab5b35c87bcf | [
"MIT"
] | 2 | 2021-07-09T23:02:11.000Z | 2021-10-17T18:49:57.000Z | 174.211246 | 85,000 | 0.862917 | [
[
[
"%matplotlib inline\n%reload_ext autoreload\n%autoreload 2\n%config InlineBackend.figure_format = 'retina'\n# %qtconsole",
"_____no_output_____"
],
[
"import numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport xarray as xr\nimport seaborn as sns\n\nfrom dask.distributed import Client, LocalCluster\nimport dask\nimport logging\n\nlogging.basicConfig(level=logging.INFO)\n\nclient = Client(processes=True, n_workers=7, threads_per_worker=8, memory_limit='25GB')\n\nclient",
"_____no_output_____"
],
[
"from loren_frank_data_processing import make_epochs_dataframe\nfrom src.parameters import ANIMALS\n\nepoch_info = make_epochs_dataframe(ANIMALS)\nepoch_info.xs('remy', drop_level=False)",
"_____no_output_____"
],
[
"from src.load_data import load_data\n\nepoch_key = ('remy', 35, 2)\n\ndata = load_data(epoch_key)",
"distributed.comm.tcp - WARNING - Closing dangling stream in <TCP local=tcp://127.0.0.1:48274 remote=tcp://127.0.0.1:34895>\nWARNING:loren_frank_data_processing.core:Failed to load file: /data2/edeno/replay_trajectory_paper/src/../Raw-Data/Remy/remyDIO35.mat\nWARNING:loren_frank_data_processing.core:No DIO file found, using distance from well to segment trials\nWARNING:loren_frank_data_processing.core:Failed to load file: /data2/edeno/replay_trajectory_paper/src/../Raw-Data/Remy/remyDIO35.mat\nWARNING:loren_frank_data_processing.core:No DIO file found, inferring correct inbound/outbound from task rules\nWARNING:loren_frank_data_processing.core:dav, 1, 4 not processed\nWARNING:loren_frank_data_processing.core:egy, 12, 1 not processed\nWARNING:loren_frank_data_processing.core:egy, 12, 2 not processed\nWARNING:loren_frank_data_processing.core:egy, 12, 3 not processed\nWARNING:loren_frank_data_processing.core:egy, 12, 4 not processed\nWARNING:loren_frank_data_processing.core:egy, 12, 5 not processed\nWARNING:loren_frank_data_processing.core:egy, 12, 6 not processed\nWARNING:loren_frank_data_processing.core:egy, 12, 7 not processed\nWARNING:loren_frank_data_processing.core:egy, 12, 8 not processed\nWARNING:loren_frank_data_processing.core:fra, 4, 1 not processed\nWARNING:loren_frank_data_processing.core:gov, 2, 3 not processed\nWARNING:loren_frank_data_processing.core:gov, 2, 4 not processed\nWARNING:loren_frank_data_processing.core:gov, 2, 5 not processed\nWARNING:loren_frank_data_processing.core:gov, 2, 6 not processed\nWARNING:loren_frank_data_processing.core:gov, 2, 7 not processed\nWARNING:loren_frank_data_processing.core:Failed to load file: /data2/edeno/replay_trajectory_paper/src/../Raw-Data/Remy/EEG/remymarks35-02.mat\nWARNING:loren_frank_data_processing.core:Failed to load file: /data2/edeno/replay_trajectory_paper/src/../Raw-Data/Remy/EEG/remymarks35-03.mat\nWARNING:loren_frank_data_processing.core:Failed to load file: /data2/edeno/replay_trajectory_paper/src/../Raw-Data/Remy/EEG/remymarks35-04.mat\nWARNING:loren_frank_data_processing.core:Failed to load file: /data2/edeno/replay_trajectory_paper/src/../Raw-Data/Remy/EEG/remymarks35-06.mat\nWARNING:loren_frank_data_processing.core:Failed to load file: /data2/edeno/replay_trajectory_paper/src/../Raw-Data/Remy/EEG/remymarks35-07.mat\nWARNING:loren_frank_data_processing.core:Failed to load file: /data2/edeno/replay_trajectory_paper/src/../Raw-Data/Remy/EEG/remymarks35-09.mat\nWARNING:loren_frank_data_processing.core:Failed to load file: /data2/edeno/replay_trajectory_paper/src/../Raw-Data/Remy/EEG/remymarks35-10.mat\nWARNING:loren_frank_data_processing.core:Failed to load file: /data2/edeno/replay_trajectory_paper/src/../Raw-Data/Remy/EEG/remymarks35-11.mat\nWARNING:loren_frank_data_processing.core:Failed to load file: /data2/edeno/replay_trajectory_paper/src/../Raw-Data/Remy/EEG/remymarks35-12.mat\nWARNING:loren_frank_data_processing.core:Failed to load file: /data2/edeno/replay_trajectory_paper/src/../Raw-Data/Remy/EEG/remymarks35-13.mat\nWARNING:loren_frank_data_processing.core:Failed to load file: /data2/edeno/replay_trajectory_paper/src/../Raw-Data/Remy/EEG/remymarks35-14.mat\nWARNING:loren_frank_data_processing.core:Failed to load file: /data2/edeno/replay_trajectory_paper/src/../Raw-Data/Remy/EEG/remymarks35-15.mat\nWARNING:loren_frank_data_processing.core:Failed to load file: /data2/edeno/replay_trajectory_paper/src/../Raw-Data/Remy/EEG/remymarks35-17.mat\nWARNING:loren_frank_data_processing.core:Failed to load file: /data2/edeno/replay_trajectory_paper/src/../Raw-Data/Remy/EEG/remymarks35-19.mat\nWARNING:loren_frank_data_processing.core:Failed to load file: /data2/edeno/replay_trajectory_paper/src/../Raw-Data/Remy/EEG/remymarks35-20.mat\nWARNING:loren_frank_data_processing.core:Failed to load file: /data2/edeno/replay_trajectory_paper/src/../Raw-Data/Remy/EEG/remymarks35-21.mat\nWARNING:loren_frank_data_processing.core:Failed to load file: /data2/edeno/replay_trajectory_paper/src/../Raw-Data/Remy/EEG/remymarks35-22.mat\nWARNING:loren_frank_data_processing.core:Failed to load file: /data2/edeno/replay_trajectory_paper/src/../Raw-Data/Remy/EEG/remymarks35-23.mat\nWARNING:loren_frank_data_processing.core:Failed to load file: /data2/edeno/replay_trajectory_paper/src/../Raw-Data/Remy/EEG/remymarks35-24.mat\nWARNING:loren_frank_data_processing.core:Failed to load file: /data2/edeno/replay_trajectory_paper/src/../Raw-Data/Remy/EEG/remymarks35-25.mat\nWARNING:loren_frank_data_processing.core:Failed to load file: /data2/edeno/replay_trajectory_paper/src/../Raw-Data/Remy/EEG/remymarks35-26.mat\nWARNING:loren_frank_data_processing.core:Failed to load file: /data2/edeno/replay_trajectory_paper/src/../Raw-Data/Remy/EEG/remymarks35-28.mat\nWARNING:loren_frank_data_processing.core:Failed to load file: /data2/edeno/replay_trajectory_paper/src/../Raw-Data/Remy/EEG/remymarks35-29.mat\nWARNING:loren_frank_data_processing.core:Failed to load file: /data2/edeno/replay_trajectory_paper/src/../Raw-Data/Remy/EEG/remymarks35-30.mat\nINFO:src.load_data:Finding ripple times...\ndistributed.comm.tcp - WARNING - Closing dangling stream in <TCP local=tcp://127.0.0.1:48278 remote=tcp://127.0.0.1:34895>\nWARNING:loren_frank_data_processing.core:Failed to load file: /data2/edeno/replay_trajectory_paper/src/../Raw-Data/Remy/remyDIO35.mat\nWARNING:loren_frank_data_processing.core:No DIO file found, using distance from well to segment trials\nWARNING:loren_frank_data_processing.core:Failed to load file: /data2/edeno/replay_trajectory_paper/src/../Raw-Data/Remy/remyDIO35.mat\nWARNING:loren_frank_data_processing.core:No DIO file found, inferring correct inbound/outbound from task rules\n"
],
[
"from src.parameters import (ANIMALS, FIGURE_DIR, PROBABILITY_THRESHOLD,\n PROCESSED_DATA_DIR, SAMPLING_FREQUENCY,\n TRANSITION_TO_CATEGORY,\n continuous_transition_types, discrete_diag,\n knot_spacing, model, model_kwargs, movement_var,\n place_bin_size, replay_speed, spike_model_penalty)\nfrom replay_trajectory_classification import ClusterlessClassifier\n\nis_training = data['position_info'].speed > 4\nposition = data['position_info'].loc[:, 'linear_position']\ntrack_labels = data['position_info'].arm_name\n\nlogging.info('Fitting classifier...')\nclassifier = ClusterlessClassifier(\n place_bin_size=place_bin_size, movement_var=movement_var,\n replay_speed=replay_speed,\n discrete_transition_diag=discrete_diag,\n continuous_transition_types=continuous_transition_types,\n model=model, model_kwargs=model_kwargs).fit(\n position, data['multiunit'], is_training=is_training,\n track_labels=track_labels)\nlogging.info(classifier)",
"INFO:root:Fitting classifier...\nINFO:replay_trajectory_classification.classifier:Fitting initial conditions...\nINFO:replay_trajectory_classification.classifier:Fitting state transition...\nINFO:replay_trajectory_classification.classifier:Fitting multiunits...\nINFO:root:ClusterlessClassifier(continuous_transition_types=[['w_track_1D_random_walk_minus_identity',\n 'w_track_1D_inverse_random_walk',\n 'identity'],\n ['uniform',\n 'w_track_1D_inverse_random_walk',\n 'uniform'],\n ['w_track_1D_random_walk_minus_identity',\n 'w_track_1D_inverse_random_walk',\n 'identity']],\n discrete_transition_diag=0.999,\n discrete_transition_type='strong_diagona...\n initial_conditions_type='uniform_on_track',\n model=<class 'replay_trajectory_classification.misc.NumbaKDE'>,\n model_kwargs={'bandwidth': array([24., 24., 24., 24., 5., 5.])},\n movement_var=4.0,\n occupancy_kwargs={'bandwidth': array([24., 24., 24., 24., 5., 5.])},\n occupancy_model=<class 'replay_trajectory_classification.misc.NumbaKDE'>,\n place_bin_size=2.0, position_range=None, replay_speed=1)\n"
],
[
"from scipy.ndimage import label\nfrom tqdm import tqdm\n\nlabels, _ = label(~is_training)\nresults = []\n\nfor label_name, df in tqdm(pd.DataFrame(labels, index=is_training.index, columns=['labels']).groupby('labels')):\n if label_name != 0:\n m = data['multiunit'].sel(time=df.index)\n results.append(classifier.predict(m, time=m.time))",
" 0%| | 0/339 [00:00<?, ?it/s]distributed.utils - ERROR - An asyncio.Future, a coroutine or an awaitable is required\nTraceback (most recent call last):\n File \"/home/edeno/miniconda3/envs/replay_trajectory_paper/lib/python3.6/site-packages/distributed/utils.py\", line 666, in log_errors\n yield\n File \"/home/edeno/miniconda3/envs/replay_trajectory_paper/lib/python3.6/site-packages/distributed/dashboard/components.py\", line 561, in cb\n prof, metadata = await asyncio.gather(prof, metadata)\n File \"/home/edeno/miniconda3/envs/replay_trajectory_paper/lib/python3.6/asyncio/tasks.py\", line 602, in gather\n fut = ensure_future(arg, loop=loop)\n File \"/home/edeno/miniconda3/envs/replay_trajectory_paper/lib/python3.6/asyncio/tasks.py\", line 526, in ensure_future\n raise TypeError('An asyncio.Future, a coroutine or an awaitable is '\nTypeError: An asyncio.Future, a coroutine or an awaitable is required\ntornado.application - ERROR - Exception in callback functools.partial(<bound method IOLoop._discard_future_result of <zmq.eventloop.ioloop.ZMQIOLoop object at 0x7f1c4e2f37f0>>, <Task finished coro=<ProfileTimePlot.trigger_update.<locals>.cb() done, defined at /home/edeno/miniconda3/envs/replay_trajectory_paper/lib/python3.6/site-packages/distributed/dashboard/components.py:551> exception=TypeError('An asyncio.Future, a coroutine or an awaitable is required',)>)\nTraceback (most recent call last):\n File \"/home/edeno/miniconda3/envs/replay_trajectory_paper/lib/python3.6/site-packages/tornado/ioloop.py\", line 743, in _run_callback\n ret = callback()\n File \"/home/edeno/miniconda3/envs/replay_trajectory_paper/lib/python3.6/site-packages/tornado/ioloop.py\", line 767, in _discard_future_result\n future.result()\n File \"/home/edeno/miniconda3/envs/replay_trajectory_paper/lib/python3.6/site-packages/distributed/dashboard/components.py\", line 561, in cb\n prof, metadata = await asyncio.gather(prof, metadata)\n File \"/home/edeno/miniconda3/envs/replay_trajectory_paper/lib/python3.6/asyncio/tasks.py\", line 602, in gather\n fut = ensure_future(arg, loop=loop)\n File \"/home/edeno/miniconda3/envs/replay_trajectory_paper/lib/python3.6/asyncio/tasks.py\", line 526, in ensure_future\n raise TypeError('An asyncio.Future, a coroutine or an awaitable is '\nTypeError: An asyncio.Future, a coroutine or an awaitable is required\ndistributed.utils - ERROR - An asyncio.Future, a coroutine or an awaitable is required\nTraceback (most recent call last):\n File \"/home/edeno/miniconda3/envs/replay_trajectory_paper/lib/python3.6/site-packages/distributed/utils.py\", line 666, in log_errors\n yield\n File \"/home/edeno/miniconda3/envs/replay_trajectory_paper/lib/python3.6/site-packages/distributed/dashboard/components.py\", line 561, in cb\n prof, metadata = await asyncio.gather(prof, metadata)\n File \"/home/edeno/miniconda3/envs/replay_trajectory_paper/lib/python3.6/asyncio/tasks.py\", line 602, in gather\n fut = ensure_future(arg, loop=loop)\n File \"/home/edeno/miniconda3/envs/replay_trajectory_paper/lib/python3.6/asyncio/tasks.py\", line 526, in ensure_future\n raise TypeError('An asyncio.Future, a coroutine or an awaitable is '\nTypeError: An asyncio.Future, a coroutine or an awaitable is required\ntornado.application - ERROR - Exception in callback functools.partial(<bound method IOLoop._discard_future_result of <zmq.eventloop.ioloop.ZMQIOLoop object at 0x7f1c4e2f37f0>>, <Task finished coro=<ProfileTimePlot.trigger_update.<locals>.cb() done, defined at /home/edeno/miniconda3/envs/replay_trajectory_paper/lib/python3.6/site-packages/distributed/dashboard/components.py:551> exception=TypeError('An asyncio.Future, a coroutine or an awaitable is required',)>)\nTraceback (most recent call last):\n File \"/home/edeno/miniconda3/envs/replay_trajectory_paper/lib/python3.6/site-packages/tornado/ioloop.py\", line 743, in _run_callback\n ret = callback()\n File \"/home/edeno/miniconda3/envs/replay_trajectory_paper/lib/python3.6/site-packages/tornado/ioloop.py\", line 767, in _discard_future_result\n future.result()\n File \"/home/edeno/miniconda3/envs/replay_trajectory_paper/lib/python3.6/site-packages/distributed/dashboard/components.py\", line 561, in cb\n prof, metadata = await asyncio.gather(prof, metadata)\n File \"/home/edeno/miniconda3/envs/replay_trajectory_paper/lib/python3.6/asyncio/tasks.py\", line 602, in gather\n fut = ensure_future(arg, loop=loop)\n File \"/home/edeno/miniconda3/envs/replay_trajectory_paper/lib/python3.6/asyncio/tasks.py\", line 526, in ensure_future\n raise TypeError('An asyncio.Future, a coroutine or an awaitable is '\nTypeError: An asyncio.Future, a coroutine or an awaitable is required\n100%|██████████| 339/339 [69:08:40<00:00, 734.28s/it] \n"
],
[
"results[-1]",
"_____no_output_____"
],
[
"r = xr.concat(results, dim='time')",
"_____no_output_____"
],
[
"from src.parameters import TRANSITION_TO_CATEGORY\n\nripple_results = []\nfor ripple_number in tqdm(data['ripple_times'].index):\n df = data['ripple_times'].loc[ripple_number]\n ds = (r.sel(time=slice(df.start_time, df.end_time))\n .assign_coords(time=lambda ds: ds.time - ds.time[0])\n .drop(['likelihood', 'causal_posterior']))\n ripple_results.append(ds)\n\nripple_results = xr.concat(ripple_results, dim=data['ripple_times'].index)\nripple_results = ripple_results.assign_coords(\n state=lambda ds: ds.state.to_index()\n .map(TRANSITION_TO_CATEGORY))",
"100%|██████████| 344/344 [00:01<00:00, 323.40it/s]\n"
],
[
"ripple_results",
"_____no_output_____"
],
[
"ripple_results.sel(ripple_number=1).dropna('time')",
"_____no_output_____"
],
[
"from src.analysis import get_probability, get_is_classified\nfrom src.visualization import plot_category_counts\n\nprobability_threshold = 0.8\nsampling_frequency = SAMPLING_FREQUENCY\n\n\nprobability = get_probability(ripple_results)\nis_classified = get_is_classified(probability, probability_threshold)\nduration = (is_classified.sum('time') / sampling_frequency)\nduration = duration.to_dataframe().unstack(level=1)\nduration.columns = list(duration.columns.get_level_values('state'))\nduration = duration.rename(\n columns=lambda column_name: column_name + '_duration')\nis_category = (duration > 0.0).rename(columns=lambda c: c.split('_')[0])\nduration = pd.concat((duration, is_category), axis=1)\nduration['is_classified'] = np.any(duration > 0.0, axis=1)\n\nplot_category_counts(duration)",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
ec8bf74382db3cc8d03676a664b3322ab297331a | 286,367 | ipynb | Jupyter Notebook | KRPAM_cellulose.ipynb | 77hippo/EPSL_paper | 320c2d66f895c3dde942de260ae9220cad853d53 | [
"MIT"
] | 8 | 2016-11-02T06:17:28.000Z | 2021-08-24T14:18:03.000Z | KRPAM_cellulose.ipynb | 77hippo/EPSL_paper | 320c2d66f895c3dde942de260ae9220cad853d53 | [
"MIT"
] | null | null | null | KRPAM_cellulose.ipynb | 77hippo/EPSL_paper | 320c2d66f895c3dde942de260ae9220cad853d53 | [
"MIT"
] | null | null | null | 920.794212 | 276,890 | 0.940217 | [
[
[
"Load necessary packages",
"_____no_output_____"
]
],
[
[
"%matplotlib inline\nfrom mpl_toolkits.basemap import Basemap\nfrom scipy.stats.mstats import mquantiles\nfrom scipy.stats.stats import pearsonr\nfrom scipy import interpolate\nfrom scipy.interpolate import UnivariateSpline\nfrom scipy.signal import butter, lfilter, filtfilt\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom numpy import genfromtxt\nfrom nitime import algorithms as alg\nfrom nitime import utils\nfrom scipy.stats import t\nimport xray\nimport pandas as pd\nfrom rpy2.robjects import FloatVector\nfrom rpy2.robjects.vectors import StrVector\nimport rpy2.robjects as robjects \nfrom rpy2.robjects.packages import importr\nr = robjects.r",
"_____no_output_____"
]
],
[
[
"Read precipitation and OLR data",
"_____no_output_____"
]
],
[
[
"#plt.style.use('ggplot')\n\n#read CMAP precipitation\nnc = xray.open_dataset('data/precip.mon.mean.nc')\nlat0 = nc['lat']\nlon0 = nc['lon']\nprecip = nc['precip']\nnlat=lat0.shape[0]\nnlon=lon0.shape[0]\nprecip_ann=precip[0:335:12,:,:]\n\n#read NOAA OLR\nnc = xray.open_dataset('data/olr.mon.mean.nc')\nlat1 = nc['lat']\nlon1 = nc['lon']\nolr0 = nc['olr']\nolr0 = olr0*0.01+327.65\nolr = olr0[55:390,:,:]\nolr_ann = olr0[55:390:12,:,:]",
"_____no_output_____"
]
],
[
[
"Calculate and test field correlation between cellulose $\\delta^{18}O$ and precipitation/OLR",
"_____no_output_____"
]
],
[
[
"#October-November-December mean\nfor i in range(precip_ann.shape[0]):\n precip_ann[i,:,:]=np.mean(precip[i*12+9:i*12+11,:,:],axis=0)\n olr_ann[i,:,:]=np.mean(olr[i*12+9:i*12+11,:,:],axis=0)\n\nsst_ann_fil=precip_ann\n\n#load October Kirirom cellulose d18O\ndata = genfromtxt('data/KRPAM_mon.txt', delimiter=' ')\nd18O = data[112:,10]\nd18O_median = d18O\n\n#correlation\n#sst_ann_new=sst_ann_fil.transpose(1,2,0)\nsst_ann_new=sst_ann_fil.transpose(\"lat\",\"lon\",\"time\")\nsst_ano=np.ma.anomalies(sst_ann_new,axis=2)\nsst_sd=np.sum(sst_ano**2,axis=2)\n\nolr_ann_new=olr_ann.transpose(\"lat\",\"lon\",\"time\")\nolr_ano=np.ma.anomalies(olr_ann_new,axis=2)\nolr_sd=np.sum(olr_ano**2,axis=2)\n\nd18O_median_ano=np.ma.anomalies(d18O_median)\nd18O_median_sd=np.sum(d18O_median_ano**2,axis=0)\n\npre_nomi_median=np.dot(sst_ano,d18O_median_ano)\nolr_nomi_median=np.dot(olr_ano,d18O_median_ano)\n\ncorr_pre = pre_nomi_median/np.sqrt(np.dot(sst_sd[:,:,None],d18O_median_sd[None]))\ncorr_olr = olr_nomi_median/np.sqrt(np.dot(olr_sd[:,:,None],d18O_median_sd[None]))",
"_____no_output_____"
],
[
"#t-test for correlation\nd18O_coef, d18O_sigma = alg.AR_est_YW(d18O_median_ano,1)\nneff_array=sst_ano[:,:,0]\nlatt,lont=[],[]\nlat_normal,lon_normal=[],[]\npval_pre=[]\npval_olr=[]\n\nfor ilat in range(nlat):\n for ilon in range(nlon):\n if np.isnan(sst_ano[ilat,ilon,0])==False:\n coef_pre, sigma_pre = alg.AR_est_YW(sst_ano[ilat,ilon,:],1)\n coef_olr, sigma_olr = alg.AR_est_YW(olr_ano[ilat,ilon,:],1)\n# sst_coef[ilat,ilon] = coef\n neff_pre=28*(1-d18O_coef*coef_pre)/(1+d18O_coef*coef_pre)\n neff_olr=28*(1-d18O_coef*coef_olr)/(1+d18O_coef*coef_olr)\n latt.append(lat0[ilat])\n lont.append(lon0[ilon])\n\n tval_pre=corr_pre[ilat,ilon]/np.sqrt(1-corr_pre[ilat,ilon]**2)*np.sqrt(neff_pre-2)\n tval_olr=corr_olr[ilat,ilon]/np.sqrt(1-corr_olr[ilat,ilon]**2)*np.sqrt(neff_olr-2)\n\n pval0_pre=t.sf(abs(tval_pre),neff_pre-2)*2\n pval0_olr=t.sf(abs(tval_olr),neff_olr-2)*2\n pval_pre.append(pval0_pre)\n pval_olr.append(pval0_olr)\n# if pval0 < 0.1:\n# lat_normal.append(lat0[ilat])\n# lon_normal.append(lon0[ilon])\n\npvalr_pre = FloatVector(pval_pre)\npvalr_olr = FloatVector(pval_olr)\n\nr.source(\"fdr.R\")\n#sig_med = r.fdr(pvalr_med,method=\"original\",adjustment_method=\"mean\")\n\nsig_pre = r.fdr(pvalr_pre,method=\"original\",qlevel=0.1)\nsig_olr = r.fdr(pvalr_olr,method=\"original\",qlevel=0.1)\n\nprint(sig_pre)\n#print(sig_975)\nlat_pre=latt[:]\nlon_pre=lont[:]\nlat_olr=latt[:]\nlon_olr=lont[:]\n#lat975=[]\n#lon975=[]\nif sig_pre:\n for isig in sorted(sig_pre,reverse=True):\n del lat_pre[isig-1]\n del lon_pre[isig-1]\n# lat975.append(latt[isig-1])\n# lon975.append(lont[isig-1])\nif sig_olr:\n for isig in sorted(sig_olr,reverse=True):\n del lat_olr[isig-1]\n del lon_olr[isig-1]",
"NULL\n"
]
],
[
[
"Plot field correlations",
"_____no_output_____"
]
],
[
[
"#plot figures\nmap = Basemap(projection='merc',resolution='l',lat_0=0,lon_0=180,llcrnrlon=80,llcrnrlat=-50,urcrnrlon=280,urcrnrlat=50)\n\nlons, lats = np.meshgrid(lon0, lat0)\nx,y=map(lons,lats)\nfig=plt.figure(figsize=(10,8))\nax1=fig.add_subplot(211)\nmap.drawcoastlines(linewidth=0.5,color='k')\n#map.fillcontinents(color='gray')\n#map.drawmapboundary()\nmap.drawmeridians(np.arange(0,360,30),color='DimGray',labels=[1,0,0,1],fontsize=10)\nmap.drawparallels(np.arange(-90,90,30),color='DimGray',labels=[1,0,0,1],fontsize=10)\nclevs=np.linspace(-1,1,21)\ncs=map.contourf(x,y,corr_pre,clevs,cmap=plt.cm.RdBu_r)\ncbar = map.colorbar(cs,location='bottom',pad=\"10%\")\ncbar.ax.tick_params(labelsize=10)\nx2,y2=map(lon_pre,lat_pre)\npasst=map.plot(x2,y2,'ko',markersize=1)\nax1.set_title(\"(a) precipitation\",fontsize=12)\n\nlons1, lats1 = np.meshgrid(lon1, lat1)\nx1,y1=map(lons1,lats1)\nax2=fig.add_subplot(212)\nmap.drawcoastlines(linewidth=0.5,color='k')\n#map.fillcontinents(color='gray')\n#map.drawmapboundary()\nmap.drawmeridians(np.arange(0,360,30),color='DimGray',labels=[1,0,0,1],fontsize=10)\nmap.drawparallels(np.arange(-90,90,30),color='DimGray',labels=[1,0,0,1],fontsize=10)\nclevs=np.linspace(-1,1,21)\ncs=map.contourf(x1,y1,corr_olr,clevs,cmap=plt.cm.RdBu_r)\ncbar = map.colorbar(cs,location='bottom',pad=\"10%\")\ncbar.ax.tick_params(labelsize=10)\nx2,y2=map(lon_olr,lat_olr)\npasst=map.plot(x2,y2,'ko',markersize=1)\nax2.set_title(\"(b) OLR\",fontsize=12)",
"/Users/hujun/anaconda/envs/snakes/lib/python3.5/site-packages/mpl_toolkits/basemap/__init__.py:3644: VisibleDeprecationWarning: using a non-integer number instead of an integer will result in an error in the future\n xx = x[x.shape[0]/2,:]\n"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
]
] |
ec8bf94d6494dbb11220434641aac176758467f1 | 718,423 | ipynb | Jupyter Notebook | notebooks/Thresholding Image for Text Detection.ipynb | arita37/text-detector | e9384e7ee5c1fe808a20925fb7bba800a0f6a08f | [
"MIT"
] | 2 | 2020-05-04T05:32:19.000Z | 2020-05-04T08:19:19.000Z | notebooks/Thresholding Image for Text Detection.ipynb | arita37/text-detector | e9384e7ee5c1fe808a20925fb7bba800a0f6a08f | [
"MIT"
] | 4 | 2020-07-09T18:16:26.000Z | 2020-07-11T06:30:19.000Z | notebooks/Thresholding Image for Text Detection.ipynb | arita37/text-detector | e9384e7ee5c1fe808a20925fb7bba800a0f6a08f | [
"MIT"
] | 1 | 2020-07-24T13:46:27.000Z | 2020-07-24T13:46:27.000Z | 8,353.755814 | 716,428 | 0.964355 | [
[
[
"from skimage.color import rgb2gray\nimport matplotlib.pyplot as plt\nfrom skimage.io import imread\nfrom skimage.filters import threshold_sauvola\nfrom skimage.exposure import is_low_contrast\nfrom skimage.exposure import adjust_gamma\nfrom skimage.restoration import denoise_tv_chambolle\n\ncimage = imread('../sample_images/image9.jpg')\n\ngamma_corrected = adjust_gamma(cimage, 1.2)\n# #if is_low_contrast(gamma_corrected):\nnoise_removed = denoise_tv_chambolle(gamma_corrected, multichannel=True)\ngry_img = rgb2gray(noise_removed)\nth = threshold_sauvola(gry_img, 19)\nbimage = gry_img > th\n\nfig, ax = plt.subplots(ncols=2, figsize=(20,20))\nax[0].imshow(cimage)\nax[0].axis(\"off\")\nax[1].imshow(bimage, cmap=\"gray\")\nax[1].axis(\"off\")",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code"
]
] |
ec8c0e4d9eca7c72a0fd04cd63ae7c36769545a5 | 20,459 | ipynb | Jupyter Notebook | examples/drift_expansion/sc_drift_expansion.ipynb | radiasoft/rs_synergia | b43509de7f4a938354dc127762d8e723463e0e95 | [
"Apache-2.0"
] | null | null | null | examples/drift_expansion/sc_drift_expansion.ipynb | radiasoft/rs_synergia | b43509de7f4a938354dc127762d8e723463e0e95 | [
"Apache-2.0"
] | null | null | null | examples/drift_expansion/sc_drift_expansion.ipynb | radiasoft/rs_synergia | b43509de7f4a938354dc127762d8e723463e0e95 | [
"Apache-2.0"
] | null | null | null | 33.760726 | 359 | 0.58214 | [
[
[
"## Purpose:\nCheck 2D space charge in Synergia vs envelope equations for free beam expansion.\n\nThe Bassetti-Erskine 2D space charge model is used. This is a \"frozen\" space charge algorithm, which assumes the transverse particle distribution is Gaussian in position and momentum. Synergia also provides self-consistent PIC algorithms. Longitudinal space charge is ignored here. Synergia provides 3D algorithms for both PIC and Bassetti-Erskine.\n\nThe initial transverse particle distribution is assumed to be Gaussian. The initial longitudinal particle distribution is assumed to be uniform in position (z), with zero energy spread. The initial particles are read from a local file.\n\n### Number of particles in the beam\n\n1. We work with the peak current, I.\n2. The beam consists of N protons, with total charge $Q = N e$.\n2. Assume beam has total length L, velocity $\\beta$c, and the particles are uniformly distributed in z.\n3. The current is then: $I = \\frac{Q \\beta c}{L}$, and the corresponding number is $N = \\frac{I L}{\\beta c e}$\n4. The number of particles per unit length is $\\frac{N}{L} = \\frac{I}{\\beta c e}$ \n\nThis implies a proton number scaling of $2.85769 \\times 10^8 \\frac{p}{m \\cdot mA}$, which at (for example) 14 mA equates to $4.0 \\times 10^{9}$ protons per meter.\n\n### Initial import statements",
"_____no_output_____"
]
],
[
[
"%matplotlib inline\n%load_ext autoreload\n%autoreload 2\n\nimport sys, os\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\nimport scipy\nimport tables\nfrom mpi4py import MPI",
"_____no_output_____"
]
],
[
[
"### Import physics libraries",
"_____no_output_____"
]
],
[
[
"try:\n import rssynergia\nexcept ImportError:\n !pip -q install git+git://github.com/radiasoft/rssynergia\n\nfrom rssynergia.base_diagnostics import read_bunch\nfrom rssynergia.base_diagnostics import workflow\nfrom rssynergia.base_diagnostics import lfplot\nfrom rssynergia.base_diagnostics import latticework\nfrom rssynergia.base_diagnostics import basic_calcs\nfrom rssynergia.base_diagnostics import pltbunch\nfrom rssynergia.base_diagnostics import elliptic_sp\nfrom rssynergia.base_diagnostics import singleparticle\nfrom rssynergia.base_diagnostics import options\nfrom rssynergia.base_diagnostics import diagplot\nfrom rssynergia.base_diagnostics import utils\n\nfrom rssynergia.elliptic import elliptic_beam6d\nfrom rssynergia.standard import standard_beam6d\n\nimport synergia\nimport synergia_workflow",
"_____no_output_____"
]
],
[
[
"### Test problem - Expanding beam in a drift\n\nDrift length:\n- 3 m\n\nBeam Parameters:\n- 14 mA, 2.5 MeV proton beam\n- Emittance: 0.3 mm-mrad normalized\n- 64x64x32 mesh (2D Open)\n- 25 turns at 0.1 m drift length",
"_____no_output_____"
],
[
"### Create workdir and specify Synergia simulation options (default values)",
"_____no_output_____"
]
],
[
[
"# Create and populate a Synergia options object\n# File I/O\nopts = synergia_workflow.Options(\"zc_drift\")\nopts.add(\"output_dir\",\"sc_drift\", \"Directory for output files\", str)\nopts.relpath = opts.output_dir\nworkflow.make_path(opts.output_dir)\nopts.add(\"verbosity\", 1, \"Verbosity of propagation\", int)\nopts.add(\"bunch_file\",\"myBunch.txt\",\"txt file for bunch particles\", str)\n\n# Define reference particle to be a proton at 2.5 MeV\ntotal_energy = synergia.foundation.pconstants.proton_mass + 2.5e-3 # [GeV]\nfour_momentum = synergia.foundation.Four_momentum(synergia.foundation.pconstants.proton_mass, total_energy)\nreference_particle = synergia.foundation.Reference_particle(synergia.foundation.pconstants.proton_charge,four_momentum)\nopts.gamma = reference_particle.get_gamma()\nopts.beta = reference_particle.get_beta()\n\n# beam (physical)\nopts.add(\"emit\",9.74e-6, \"H0 value corresponding to real sigma horizontal emittance of 0.3 mm-mrad\", float)\nopts.add(\"dpop\", 0.0, \"Delta-p/p spread\", float)\nopts.add(\"real_particles\", 1.0e11, \"Number of real particles\", float)\nopts.emit_n = 0.3*1.e-6 # 0.3 mm-mrad normalized emittance\nopts.emits = [basic_calcs.calc_geometric_emittance(opts.emit_n,opts.beta,opts.gamma)]\ndpop = 0.0\n\n# beam (numerical)\nopts.add(\"macro_particles\", 50000, \"Number of macro particles\", int) \nopts.add(\"spacecharge\", True, \"whether space charge is on\", bool)\nopts.add(\"solver\", \"2dbassetti-erskine\", \"other solvers are available\", str)\n\n# Lattice\nopts.add(\"steps_per_element\",5,\"Number of steps per element\", int)\nopts.add(\"turns\",30,\"Number of turns\", int)\nopts.add(\"checkpointperiod\", 15, \"Interval for creating checkpoints\", int)\nopts.add(\"radius\", 0.5, \"aperture radius [m]\", float)\nopts.add(\"stepper\", \"splitoperator\", \"Simulation stepper, either 'independent','elements','splitoperator','soelements'\", str)",
"_____no_output_____"
]
],
[
[
"### Construct the lattice (a simple drift)",
"_____no_output_____"
]
],
[
[
"# specify the drift element\n# note: above, it is specified that 5 steps are taken per element\n# hence, the integration step size is my_drift_length / 5.\n# above, the number of \"turns\" is specified as 30\n# hence, the total drift length is 30.*my_drift_length\n# hence, the total number of steps in Synergia is 30*5 = 150\nmy_drift_length = 0.1 # [m]\ndrift_element = synergia.lattice.Lattice_element(\"drift\", \"drift_element\")\ndrift_element.set_double_attribute(\"l\", my_drift_length)\n\n# instantiate the lattice\nlattice = synergia.lattice.Lattice(\"test\", synergia.lattice.Mad8_adaptor_map())\nlattice.append(drift_element)\nlattice.set_reference_particle(reference_particle)\nopts.lattice = lattice\n\n# specify the operator\ncoll_operator = synergia.collective.Space_charge_2d_bassetti_erskine()\n\n# instantiate the lattice stepper and simulator\nmap_order = 1\nstepper = synergia.simulation.Split_operator_stepper_elements(lattice, map_order,coll_operator, opts.steps_per_element)\nopts.lattice_simulator = stepper.get_lattice_simulator()",
"_____no_output_____"
],
[
"#Cleanup any files from previous runs\ntry:\n os.remove('myBunch.txt')\nexcept OSError:\n pass\n\nfiles = os.listdir(opts.output_dir)\nfor file in files:\n if file.endswith('.h5'):\n os.remove(os.path.join(opts.output_dir,file))\n\nfiles = os.listdir('.')\nfor file in files:\n if file.endswith('.h5'):\n os.remove(os.path.join('.',file))\n",
"_____no_output_____"
]
],
[
[
"## Construct the bunch",
"_____no_output_____"
]
],
[
[
"# Bunch options\ncurrent = 14.e-3 #mA of current \nrp_perlength = current/(opts.beta*scipy.constants.c*scipy.constants.e)\nbunch_length = 2e-2 #effective bunch length 2 mm\nreal_particles = rp_perlength*bunch_length\n\nopts.emit_n = 6e-7 #We want 0.3 mm-mrad normalized emittance\nopts.emits = [basic_calcs.calc_geometric_emittance(opts.emit_n,opts.beta,opts.gamma)] #give this geometric emittance\nopts.real_particles = rp_perlength*bunch_length\nopts.betae = 0.5 #statically fix beta\nopts.alphae = 0.0\n\n# load pre-generated Gaussian bunch from file\nbunch = np.loadtxt(\"myGaussianBunch.txt\")\n \nbunch[:,4] = bunch_length*(np.random.random(len(bunch)) -0.5) #center at 0\nbunch[:,5] = opts.dpop*np.random.randn(1,len(bunch)) #set dp/p\n\nnp.savetxt('myBunch.txt',bunch) #write the bunch to a text file",
"_____no_output_____"
],
[
"#Check emittance\nemit = np.sqrt(np.average(bunch[:,0]**2) * np.average(bunch[:,1]**2) - np.average(bunch[:,0]*bunch[:,1])**2)\nprint \"geometric emittance: %s \\nnormalized emittance: %s\" % (emit, emit * (opts.beta * opts.gamma))",
"_____no_output_____"
],
[
"#read in the bunch\nparticles_file = opts.bunch_file\nbucket_length = bunch_length #set equal\ncomm = synergia.utils.Commxx(True) #define a communicator\nmyBunch = read_bunch.read_bunch(particles_file, reference_particle, opts.real_particles, bucket_length, comm)\n\n# generated longitudinal coordinate is z position (beta*c*dt) but Synergia uses\n# c*dt. Divide by beta to get c*dt.\nlocal_particles = myBunch.get_local_particles()\nlocal_particles[:,4] /= opts.beta",
"_____no_output_____"
],
[
"pltbunch.plot_bunch(myBunch)\npltbunch.plot_long(myBunch)",
"_____no_output_____"
]
],
[
[
"## Run the simulation",
"_____no_output_____"
]
],
[
[
"bunch_simulator = synergia.simulation.Bunch_simulator(myBunch)\n\n#basic diagnostics - PER STEP\nbasicdiag = synergia.bunch.Diagnostics_basic(\"basic.h5\", opts.output_dir)\nbunch_simulator.add_per_step(basicdiag)\n\n#include full diagnostics\nfulldiag = synergia.bunch.Diagnostics_full2(\"full.h5\", opts.output_dir)\nbunch_simulator.add_per_turn(fulldiag)\n\n#particle diagnostics - PER TURN\nopts.turnsPerDiag = 1\nparticlediag = synergia.bunch.Diagnostics_particles(\"particles.h5\",0,0,opts.output_dir)\nbunch_simulator.add_per_turn(particlediag, opts.turnsPerDiag)",
"_____no_output_____"
],
[
"opts.maxturns = opts.turns+1\n\npropagator = synergia.simulation.Propagator(stepper)\npropagator.set_checkpoint_period(opts.checkpointperiod)\npropagator.propagate(bunch_simulator,opts.turns, opts.maxturns,opts.verbosity)\n\nworkflow.cleanup(opts.output_dir)",
"_____no_output_____"
]
],
[
[
"## Diagnostics",
"_____no_output_____"
]
],
[
[
"opts.inputfile = opts.output_dir + '/basic.h5'\nopts.plots = ['x_std', 'y_std']\nplotVals = diagplot.getPlotVals(opts.inputfile, opts.plots)\n\n#define specific value arrays\nxmaster = plotVals['s']\nxstd = plotVals['x_std']\nystd = plotVals['y_std']\n\nfig = plt.figure(figsize=(8,6))\nax = plt.gca()\nax.plot(xmaster,xstd*1.e3,'b-', alpha=0.7, label = 'x_std') #plot x\nax.plot(xmaster,ystd*1.e3,'g-', alpha=0.7, label = 'y_std') #plot y\naxtitle = \"RMS envelope evolution over 50 m - 14.1 mA\"\nax.set_title(axtitle, y = 1.02, fontsize = 18) \nax.set_xlabel(\"s [m]\",fontsize=14)\nax.set_ylabel(\"rms beam size $\\sigma_x$ [mm]\",fontsize=14)\nax.tick_params(axis='x', labelsize=14)\nax.tick_params(axis='y', labelsize=14)\nax.legend()\nsv_title = 'SC_test_envelope_10m_bunch.pdf'\nfig.tight_layout()",
"_____no_output_____"
],
[
"pltbunch.plot_bunch(myBunch)",
"_____no_output_____"
],
[
"pltbunch.plot_long(myBunch)",
"_____no_output_____"
],
[
"#Look at z distribution\npart = myBunch.get_local_particles()\npart[:,4]\nzvals = part[:,4]\nutils.plot_distribution(zvals, 100)",
"_____no_output_____"
]
],
[
[
"## Analytical Comparison",
"_____no_output_____"
]
],
[
[
"def calc_perveance(I,ref,cn=0):\n '''Calculate the perveance for a proton beam of a given current and particle energy.\n \n Arguments\n - I - current in A\n - ref - the reference particle for extracting beta and gamma\n \n - (optional) charge neutralization factor - default 0\n '''\n \n I0 = 3.13e7 #characteristic current\n \n beta = ref.get_beta()\n gamma = ref.get_gamma()\n \n return (I/I0)*(2/beta**3)*(1/gamma**3)\n\ndef calc_characteristic_current():\n '''Return characteristics current for proton beam'''\n return 4*np.pi*scipy.constants.epsilon_0*scipy.constants.m_p*(scipy.constants.c**3)/scipy.constants.e",
"_____no_output_____"
],
[
"#Introduce numerical integrators\n\n#2nd Order RK - Ralston Method\ndef Ralston(r,z,h,f):\n k1 = h*f(r)\n return 0.25*k1 + 0.75*h*f(r+(2/3)*k1)\n\n#4th Order Runge-Kutta\ndef RungeKutta4(r,z,h,f):\n k1 = f(r)\n k2 = f(r + (h/2)*k1)\n k3 = f(r + (h/2)*k2)\n k4 = f(r + h*k3)\n return h/6*(k1 + 2*k2 +2*k3 + k4)\n\n#function here, which is a function of r and z\ndef rprime(K,emit,r0,rp0,rm):\n '''\n \n Returns the slope of the beam envelope (dr/dz) for a given value of emittance,rm, K, and initial conditions.\n \n This equation follows from Reisier.\n \n Arguments:\n \n - r - beam radius (or RMS)\n - K - perveance\n - emit - geometric emittance\n - r0 - initial envelope radius (or RMS)\n - rp0 - initial slope of envelope (or RMS)\n \n '''\n \n first = rp0**2 #first term\n second = (emit**2)*((1./r0**2)-(1./rm**2)) #second term\n third = 2*K* np.log(rm/r0) / 4\n \n return np.sqrt(first + second + third)",
"_____no_output_____"
],
[
"import math\nfrom __future__ import division\n\ndef calculate_expansion(current, reference_paricle,r0,rp0,emit=emit,N=1000,zf=opts.turns * lattice.get_length()):\n\n '''Evaluate the expansion of a KV beam envelope in a drift along z-axis, begining at z = 0.\n \n Arguments:\n - current - beam current in A\n - reference_particle - synergia object for bunch/lattice reference particle\n - r0 - initial envelope value (provide RMS for RMS expansion, a for envelope expansion, etc.)\n - rp0 - initial slope of envelope (must be non-zero, but calculation is not sensitive to small values)\n \n - (optional) emit - geometric emittance of beam - default 2.05721258396*1.e-6 (for 0.3 mm-mrad KV beam)\n - (optional) N - number of steps for integration - default 1000\n - (optional) zf - final z value (e.g. length of expansion) - default 50.0\n \n '''\n \n z0 = 0.0 #start\n ss = (zf-z0)/N #step size\n\n zpoints = np.linspace(0.0, zf, num=N) #define z values\n rpoints = [] #empty array for r values\n \n #calculate perveance\n Kp = calc_perveance(current, reference_particle)\n \n #x is r\n #z is t (what we step up)\n #f is our function describing the relationship between r and z\n f = lambda r: rprime(Kp,emit,r0,rprime0,r)\n\n r,z,dz = r0,z0,ss\n points = []\n while z < zf:\n points.append((z,r))\n z, r = z+dz, r + Ralston(r,z,dz,f) #incremement\n \n return points",
"_____no_output_____"
],
[
"#Calculate current - 14 mA \n\ncurrent14 = 14*1.e-3\nrprime0 = 1.0*(xstd[1]-xstd[0])/(xmaster[1]-xmaster[0])\nr0 = xstd[0] #1.0*1.e-3 #initial envelope value\n#emit = 4.10849449506e-06 #not used (hard coded into calculate_expansion) #gemit_x #rms geometric emittance\npoints14 = calculate_expansion(current14, reference_particle, r0,rprime0)\npoints0 = calculate_expansion(0.0, reference_particle, r0,rprime0)\nprint emit",
"_____no_output_____"
],
[
"#Compare the results\nfig = plt.figure(figsize=(8,6))\nax = plt.gca()\nax.plot(xmaster,xstd*1.e3,'b-', alpha=0.7, label = 'simulation - 14mA') #plot x\nax.plot([p[0] for p in points14], [p[1]*1.e3 for p in points14],'g--',alpha=0.7, label = 'theory - 14 mA')\nax.plot([p[0] for p in points0], [p[1]*1.e3 for p in points0],'k--',alpha=0.7, label = 'theory - zero current')\naxtitle = \"RMS envelope over %s m - theory vs simulation\" % (opts.turns * lattice.get_length())\nax.set_title(axtitle, y = 1.02, fontsize = 18) \nax.set_xlabel(\"s [m]\",fontsize=14)\nax.set_ylabel(\"rms beam size $\\sigma_x$ [mm]\",fontsize=14)\nax.tick_params(axis='x', labelsize=14)\nax.tick_params(axis='y', labelsize=14)\nax.legend(loc = 2)\nfig.tight_layout()",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
]
] |
ec8c18792e32d7469817b4cf08a3c9ce78738998 | 11,548 | ipynb | Jupyter Notebook | 08. 미적분과 최적화/02. 행렬의 미분.ipynb | zzsza/Datascience_School | da27ac760ca8ad1a563a0803a08b332d560cbdc0 | [
"MIT"
] | 39 | 2017-04-30T06:17:21.000Z | 2022-01-07T07:50:11.000Z | 08. 미적분과 최적화/02. 행렬의 미분.ipynb | yeajunseok/Datascience_School | da27ac760ca8ad1a563a0803a08b332d560cbdc0 | [
"MIT"
] | null | null | null | 08. 미적분과 최적화/02. 행렬의 미분.ipynb | yeajunseok/Datascience_School | da27ac760ca8ad1a563a0803a08b332d560cbdc0 | [
"MIT"
] | 32 | 2017-04-09T16:51:49.000Z | 2022-01-23T20:30:48.000Z | 29.015075 | 166 | 0.492899 | [
[
[
"# 행렬의 미분",
"_____no_output_____"
],
[
"함수의 독립 변수나 종속 변수가 벡터나 행렬인 경우에도 미분을 정의할 수 있다. 이러한 경우에는 미분이 아닌 편미분(partial derivative)이지만 편의상 미분이라고 서술하도록 한다. \n\n또한 행렬 미분에는 분자 중심 표현법(Numerator-layout notation)과 분모 중심 표현법(Denominator-layout notation) 두 가지가 있는데 데이터 분석에는 주로 분모 중심 표현법이 사용되므로 여기에서도 분모 중심 표현법으로 서술한다.",
"_____no_output_____"
],
[
"## 스칼라를 벡터로 미분",
"_____no_output_____"
],
[
"데이터 분석에서는 함수의 종속 변수 $y$ 가 스칼라이고 독립 변수 $x$ 가 벡터(다차원)인 경우가 일반적이다.\n\n따라서 편미분 값도 $\\frac{\\partial y}{\\partial x_1}, \\frac{\\partial y}{\\partial x_2}, \\cdots$ 등으로 여러 개가 존재한다. \n\n이렇게 스칼라를 벡터로 미분하는 경우에는 결과를 (열) 벡터로 표시한다. 이렇게 만들어진 벡터를 **그레디언트 벡터(gradient vector)**라고 하고 $\\nabla y$ 로 표기하기도 한다.\n\n\n$$\n\\nabla y = \n\\frac{\\partial y}{\\partial \\mathbf{x}} =\n\\begin{bmatrix}\n\\frac{\\partial y}{\\partial x_1}\\\\\n\\frac{\\partial y}{\\partial x_2}\\\\\n\\vdots\\\\\n\\frac{\\partial y}{\\partial x_n}\\\\\n\\end{bmatrix}\n$$\n\n",
"_____no_output_____"
],
[
"## 벡터를 스칼라로 미분",
"_____no_output_____"
],
[
"만약 함수의 종속 변수 $y$ 가 벡터(다차원 데이터)이고 독립 변수 $x$ 가 스칼라인 경우는 함수가 여러 개라고 보는 것과 마찬가지이다.\n\n$$ y_1 = f_1(x) $$\n\n$$ y_2 = f_2(x) $$\n\n$$ \\vdots $$\n\n$$ y_m = f_m(x) $$\n\n$$ \\downarrow $$\n\n$$\n\\mathbf{y} =\n\\begin{bmatrix}\ny_1 \\\\\ny_2 \\\\\n\\vdots\\\\\ny_m \\\\\n\\end{bmatrix}\n= \\mathbf{f}(x)\n$$\n",
"_____no_output_____"
],
[
"따라서 미분 값도 $\\frac{\\partial y_1}{\\partial x}, \\frac{\\partial y_2}{\\partial x}, \\cdots$ 등으로 여러 개가 존재한다. \n\n벡터를 스칼라로 미분하는 경우에는 결과를 행 벡터로 표시한다.\n\n\n$$\n\\frac{\\partial \\mathbf{y}}{\\partial x} = \\left[\n\\frac{\\partial y_1}{\\partial x}\n\\frac{\\partial y_2}{\\partial x}\n\\cdots\n\\frac{\\partial y_m}{\\partial x}\n\\right].\n$$",
"_____no_output_____"
],
[
"## 벡터를 벡터로 미분",
"_____no_output_____"
],
[
"함수의 종속 변수와 독립 변수가 모두 벡터(다차원) 데이터인 경우에는 독립 변수 각각과 종속 변수 각각의 조합에 대해 모두 미분이 존재한다. 따라서 도함수는 행렬 형태가 된다. 이렇게 만들어진 도함수의 행렬을 **자코비안 행렬(Jacobian matrix)** 이라고 한다.\n\n\n\n\n$$\n\\mathbf J = \\frac{d\\mathbf y}{d\\mathbf x} = \\begin{bmatrix}\n \\dfrac{\\partial \\mathbf{y}}{\\partial x_1} & \\cdots & \\dfrac{\\partial \\mathbf{y}}{\\partial x_n} \\end{bmatrix}\n= \\begin{bmatrix}\n \\dfrac{\\partial y_1}{\\partial x_1} & \\cdots & \\dfrac{\\partial y_1}{\\partial x_n}\\\\\n \\vdots & \\ddots & \\vdots\\\\\n \\dfrac{\\partial y_m}{\\partial x_1} & \\cdots & \\dfrac{\\partial y_m}{\\partial x_n} \\end{bmatrix}\n$$\n",
"_____no_output_____"
],
[
"## 벡터 미분 규칙",
"_____no_output_____"
],
[
"\n$$\\frac{\\partial \\mathbf{w}^\\top\\mathbf{x}}{\\partial \\mathbf{x}} = \\frac{\\partial \\mathbf{x}^\\top\\mathbf{w}}{\\partial \\mathbf{x}} = \\mathbf{w}$$ ",
"_____no_output_____"
],
[
"(증명)\n\n$$ \n\\dfrac{\\partial (\\mathbf{w}^T \\mathbf{x})}{\\partial \\mathbf{x}}=\n\\begin{bmatrix}\n\\dfrac{\\partial (\\mathbf{w}^T \\mathbf{x})}{\\partial x_1} \\\\\n\\dfrac{\\partial (\\mathbf{w}^T \\mathbf{x})}{\\partial x_2} \\\\\n\\vdots \\\\\n\\dfrac{\\partial (\\mathbf{w}^T \\mathbf{x})}{\\partial x_N} \\\\\n\\end{bmatrix} =\n\\begin{bmatrix}\n\\dfrac{\\partial (w_1 x_1 + \\cancel{w_2 x_2} + \\cdots + \\cancel{w_N x_N})}{\\partial x_1} \\\\\n\\dfrac{\\partial (\\cancel{w_1 x_1} + w_2 x_2 + \\cdots + \\cancel{w_N x_N})}{\\partial x_2} \\\\\n\\vdots \\\\\n\\dfrac{\\partial (\\cancel{w_1 x_1} + \\cancel{w_2 x_2} + \\cdots + w_N x_N)}{\\partial x_N} \\\\\n\\end{bmatrix} =\n\\begin{bmatrix}\nw_1 \\\\\nw_2 \\\\\n\\vdots \\\\\nw_N \\\\\n\\end{bmatrix}\n= \\mathbf{w} \n$$",
"_____no_output_____"
],
[
"\n\n$$\\frac{\\partial \\mathbf{x}^\\top\\mathbf{A}\\mathbf{x}}{\\partial \\mathbf{x}} = (\\mathbf{A} + \\mathbf{A}^\\top)\\mathbf{x}$$",
"_____no_output_____"
],
[
"(증명)\n\n\n$$ \n\\begin{eqnarray}\n\\dfrac{\\partial (\\mathbf{x}^\\top\\mathbf{A}\\mathbf{x})}{\\partial \\mathbf{x}}\n&=&\n\\begin{bmatrix}\n\\dfrac{\\partial (\\mathbf{x}^\\top\\mathbf{A}\\mathbf{x})}{\\partial x_1} \\\\\n\\dfrac{\\partial (\\mathbf{x}^\\top\\mathbf{A}\\mathbf{x})}{\\partial x_2} \\\\\n\\vdots \\\\\n\\dfrac{\\partial (\\mathbf{x}^\\top\\mathbf{A}\\mathbf{x})}{\\partial x_N} \\\\\n\\end{bmatrix} \n=\n\\begin{bmatrix}\n\\dfrac{\\partial (\\sum_{i=1}^{N} \\sum_{j=1}^{N} a_{ij} x_i x_j)}{\\partial x_1} \\\\\n\\dfrac{\\partial (\\sum_{i=1}^{N} \\sum_{j=1}^{N} a_{ij} x_i x_j)}{\\partial x_2} \\\\\n\\vdots \\\\\n\\dfrac{\\partial (\\sum_{i=1}^{N} \\sum_{j=1}^{N} a_{ij} x_i x_j)}{\\partial x_N} \\\\\n\\end{bmatrix} \n=\n\\begin{bmatrix}\n\\dfrac{\\partial \n\\left(\n\\begin{matrix}\na_{11}x_1x_1 + a_{12}x_1x_2 + \\cdots + a_{1N}x_1x_N + \\\\\na_{21}x_2x_1 + \\cancel{a_{22}x_2x_2} + \\cdots + \\cancel{a_{2N}x_2x_N} + \\\\\n\\cdots \\\\\na_{N1}x_Nx_1 + \\cancel{a_{N2}x_Nx_2} + \\cdots + \\cancel{a_{NN}x_Nx_N} \n\\end{matrix}\n\\right)}{\\partial x_1} \\\\\n\\dfrac{\\partial \n\\left(\n\\begin{matrix}\n\\cancel{a_{11}x_1x_1} + a_{12}x_1x_2 + \\cdots + \\cancel{a_{1N}x_1x_N} + \\\\\na_{21}x_2x_1 + a_{22}x_2x_2 + \\cdots + a_{2N}x_2x_N + \\\\\n\\cdots \\\\\n\\cancel{a_{N1}x_Nx_1} + a_{N2}x_Nx_2 + \\cdots + \\cancel{a_{NN}x_Nx_N}\n\\end{matrix}\n\\right)}{\\partial x_2} \\\\\n\\vdots \\\\\n\\end{bmatrix} \n\\\\\n&=&\n\\begin{bmatrix}\n\\sum_{i=1}^{N} a_{1i} x_i + \\sum_{i=1}^{N} a_{i1} x_i\\\\\n\\sum_{i=1}^{N} a_{2i} x_i + \\sum_{i=1}^{N} a_{i2} x_i\\\\\n\\vdots \\\\\n\\sum_{i=1}^{N} a_{Ni} x_i + \\sum_{i=1}^{N} a_{iN} x_i\\\\\n\\end{bmatrix}\n=\n\\begin{bmatrix}\n\\sum_{i=1}^{N} a_{1i} x_i\\\\\n\\sum_{i=1}^{N} a_{2i} x_i\\\\\n\\vdots \\\\\n\\sum_{i=1}^{N} a_{Ni} x_i\\\\\n\\end{bmatrix}\n+\n\\begin{bmatrix}\n\\sum_{i=1}^{N} a_{i1} x_i \\\\\n\\sum_{i=1}^{N} a_{i2} x_i \\\\\n\\vdots \\\\\n\\sum_{i=1}^{N} a_{iN} x_i \\\\\n\\end{bmatrix}\n=\n\\mathbf{A} \\mathbf{x} + \\mathbf{A}^T \\mathbf{x} \n=\n(\\mathbf{A} + \\mathbf{A}^T)\\mathbf{x} \n\\end{eqnarray}\n$$\n\n\n",
"_____no_output_____"
],
[
"## 스칼라를 행렬로 미분",
"_____no_output_____"
],
[
"종속 변수가 스칼라 값이고 독립 변수가 행렬인 경우에는 도함수 행렬의 모양이 독립 변수 행렬과 일치한다.\n\n$$\n\\frac{\\partial y}{\\partial \\mathbf{X}} =\n\\begin{bmatrix}\n\\frac{\\partial y}{\\partial x_{11}} & \\frac{\\partial y}{\\partial x_{12}} & \\cdots & \\frac{\\partial y}{\\partial x_{1n}}\\\\\n\\frac{\\partial y}{\\partial x_{21}} & \\frac{\\partial y}{\\partial x_{22}} & \\cdots & \\frac{\\partial y}{\\partial x_{2n}}\\\\\n\\vdots & \\vdots & \\ddots & \\vdots\\\\\n\\frac{\\partial y}{\\partial x_{m1}} & \\frac{\\partial y}{\\partial x_{m2}} & \\cdots & \\frac{\\partial y}{\\partial x_{mn}}\\\\\n\\end{bmatrix}\n$$",
"_____no_output_____"
],
[
"## 행렬 미분 규칙",
"_____no_output_____"
],
[
"\n$$ \\dfrac{\\partial \\text{tr} (\\mathbf{B}\\mathbf{A})}{\\partial \\mathbf{A}} = \\mathbf{B}^T$$",
"_____no_output_____"
],
[
"(증명)\n\n$$ \\text{tr}(\\mathbf{B}\\mathbf{A}) = \\sum_{i=1}^n \\sum_{j=1}^n b_{ji} a_{ij} $$\n\n$$ \\dfrac{\\partial \\text{tr} (\\mathbf{B}\\mathbf{A})}{\\partial a_{ij}} = b_{ji} $$ ",
"_____no_output_____"
],
[
"$$ \\dfrac{\\partial \\log \\det \\mathbf{A} }{\\partial \\mathbf{A}} = (\\mathbf{A}^{-1})^T $$",
"_____no_output_____"
],
[
"(증명)\n\n\n$$ \\dfrac{\\partial}{\\partial a_{i,j}} \\det A = M_{i,j} $$\n\n$$ \\dfrac{\\partial}{\\partial A} \\det A = M = (\\det A) A^{-1} $$\n\n$$ \\dfrac{d}{dx} \\log f(x) = \\dfrac{f'(x)}{f(x)} = (\\det A) A^{-1} / \\det A = A^{-1}$$",
"_____no_output_____"
]
]
] | [
"markdown"
] | [
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
]
] |
ec8c38b5a0752d2d64ffd076cb6608e359cc871e | 102,956 | ipynb | Jupyter Notebook | tutorials/Certification_Trainings/Public/8.Keyword_Extraction_YAKE.ipynb | fcivardi/spark-nlp-workshop | aedb1f5d93577c81bc3dd0da5e46e02586941541 | [
"Apache-2.0"
] | null | null | null | tutorials/Certification_Trainings/Public/8.Keyword_Extraction_YAKE.ipynb | fcivardi/spark-nlp-workshop | aedb1f5d93577c81bc3dd0da5e46e02586941541 | [
"Apache-2.0"
] | null | null | null | tutorials/Certification_Trainings/Public/8.Keyword_Extraction_YAKE.ipynb | fcivardi/spark-nlp-workshop | aedb1f5d93577c81bc3dd0da5e46e02586941541 | [
"Apache-2.0"
] | null | null | null | 79.318952 | 6,939 | 0.587377 | [
[
[
"",
"_____no_output_____"
],
[
"[](https://colab.research.google.com/github/JohnSnowLabs/spark-nlp-workshop/blob/master/tutorials/Certification_Trainings/Public/8.Keyword_Extraction_YAKE.ipynb)",
"_____no_output_____"
],
[
"# 8 Keyword Extraction with YAKE",
"_____no_output_____"
]
],
[
[
"! pip install -q pyspark==3.2.0 spark-nlp",
"_____no_output_____"
],
[
"from pyspark.sql import functions as F\nfrom pyspark.sql.functions import lit\nfrom pyspark.sql.types import StringType, DataType,ArrayType\nfrom pyspark.sql.functions import udf, struct\nfrom IPython.core.display import display, HTML\nimport re",
"_____no_output_____"
],
[
"from pyspark.ml import PipelineModel\nfrom sparknlp.annotator import *\nfrom sparknlp.base import *\nimport sparknlp\n\nspark = sparknlp.start(spark32 = True)\n\nprint(\"Spark NLP version\", sparknlp.version())\nprint(\"Apache Spark version:\", spark.version)\n\nspark",
"Spark NLP version 3.4.0\nApache Spark version: 3.2.0\n"
],
[
"stopwords = StopWordsCleaner().getStopWords()",
"_____no_output_____"
],
[
"stopwords[:5]",
"_____no_output_____"
]
],
[
[
"## YAKE Keyword Extractor\n\nYake is an Unsupervised, Corpus-Independent, Domain and Language-Independent and Single-Document keyword extraction algorithm.\n\nExtracting keywords from texts has become a challenge for individuals and organizations as the information grows in complexity and size. The need to automate this task so that text can be processed in a timely and adequate manner has led to the emergence of automatic keyword extraction tools. Yake is a novel feature-based system for multi-lingual keyword extraction, which supports texts of different sizes, domain or languages. Unlike other approaches, Yake does not rely on dictionaries nor thesauri, neither is trained against any corpora. Instead, it follows an unsupervised approach which builds upon features extracted from the text, making it thus applicable to documents written in different languages without the need for further knowledge. This can be beneficial for a large number of tasks and a plethora of situations where access to training corpora is either limited or restricted.\n\n\nThe algorithm makes use of the position of a sentence and token. Therefore, to use the annotator, the text should be first sent through a Sentence Boundary Detector and then a tokenizer.\n\nYou can tweak the following parameters to get the best result from the annotator.\n\n- *setMinNGrams(int)* Select the minimum length of a extracted keyword\n- *setMaxNGrams(int)* Select the maximum length of a extracted keyword\n- *setNKeywords(int)* Extract the top N keywords\n- *setStopWords(list)* Set the list of stop words\n- *setThreshold(float)* Each keyword will be given a keyword score greater than 0. (Lower the score better the keyword) Set an upper bound for the keyword score from this method.\n- *setWindowSize(int)* Yake will construct a co-occurence matrix. You can set the window size for the cooccurence matrix construction from this method. ex: windowSize=2 will look at two words to both left and right of a candidate word.\n\n\n<b>References</b>\n\nCampos, R., Mangaravite, V., Pasquali, A., Jatowt, A., Jorge, A., Nunes, C. and Jatowt, A. (2020). YAKE! Keyword Extraction from Single Documents using Multiple Local Features. In Information Sciences Journal. Elsevier, Vol 509, pp 257-289. [pdf](https://doi.org/10.1016/j.ins.2019.09.013)",
"_____no_output_____"
]
],
[
[
"document = DocumentAssembler() \\\n .setInputCol(\"text\") \\\n .setOutputCol(\"document\")\n\nsentenceDetector = SentenceDetector() \\\n .setInputCols(\"document\") \\\n .setOutputCol(\"sentence\")\n\ntoken = Tokenizer() \\\n .setInputCols(\"sentence\") \\\n .setOutputCol(\"token\") \\\n .setContextChars([\"(\", \")\", \"?\", \"!\", \".\", \",\"])\n\nkeywords = YakeKeywordExtraction() \\\n .setInputCols(\"token\") \\\n .setOutputCol(\"keywords\") \\\n .setMinNGrams(1) \\\n .setMaxNGrams(3)\\\n .setNKeywords(20)\\\n .setStopWords(stopwords)\n\nyake_pipeline = Pipeline(stages=[document, sentenceDetector, token, keywords])\n\nempty_df = spark.createDataFrame([['']]).toDF(\"text\")\n\nyake_Model = yake_pipeline.fit(empty_df)",
"_____no_output_____"
],
[
"# LightPipeline\n\nlight_model = LightPipeline(yake_Model)\n\ntext = '''\ngoogle is acquiring data science community kaggle. Sources tell us that google is acquiring kaggle, a platform that hosts data science and machine learning competitions. Details about the transaction remain somewhat vague , but given that google is hosting its Cloud Next conference in san francisco this week, the official announcement could come as early as tomorrow. Reached by phone, kaggle co-founder ceo anthony goldbloom declined to deny that the acquisition is happening. google itself declined 'to comment on rumors'. kaggle, which has about half a million data scientists on its platform, was founded by Goldbloom and Ben Hamner in 2010. The service got an early start and even though it has a few competitors like DrivenData, TopCoder and HackerRank, it has managed to stay well ahead of them by focusing on its specific niche. The service is basically the de facto home for running data science and machine learning competitions. With kaggle, google is buying one of the largest and most active communities for data scientists - and with that, it will get increased mindshare in this community, too (though it already has plenty of that thanks to Tensorflow and other projects). kaggle has a bit of a history with google, too, but that's pretty recent. Earlier this month, google and kaggle teamed up to host a $100,000 machine learning competition around classifying YouTube videos. That competition had some deep integrations with the google Cloud platform, too. Our understanding is that google will keep the service running - likely under its current name. While the acquisition is probably more about Kaggle's community than technology, kaggle did build some interesting tools for hosting its competition and 'kernels', too. On kaggle, kernels are basically the source code for analyzing data sets and developers can share this code on the platform (the company previously called them 'scripts'). Like similar competition-centric sites, kaggle also runs a job board, too. It's unclear what google will do with that part of the service. According to Crunchbase, kaggle raised $12.5 million (though PitchBook says it's $12.75) since its launch in 2010. Investors in kaggle include Index Ventures, SV Angel, Max Levchin, Naval Ravikant, google chief economist Hal Varian, Khosla Ventures and Yuri Milner\n'''\n\nlight_result = light_model.fullAnnotate(text)[0]\n\n[(s.metadata['sentence'], s.result) for s in light_result['sentence']]",
"_____no_output_____"
],
[
"import pandas as pd\n\nkeys_df = pd.DataFrame([(k.result, k.begin, k.end, k.metadata['score'], k.metadata['sentence']) for k in light_result['keywords']],\n columns = ['keywords','begin','end','score','sentence'])\nkeys_df['score'] = keys_df['score'].astype(float)\n\n# ordered by relevance \nkeys_df.sort_values(['sentence','score']).head(30)",
"_____no_output_____"
]
],
[
[
"### Getting keywords from datraframe",
"_____no_output_____"
]
],
[
[
"! wget -q https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/resources/en/pubmed/pubmed_sample_text_small.csv\n\ndf = spark.read\\\n .option(\"header\", \"true\")\\\n .csv(\"pubmed_sample_text_small.csv\")\\\n \ndf.show(truncate=50)",
"+--------------------------------------------------+\n| text|\n+--------------------------------------------------+\n|The human KCNJ9 (Kir 3.3, GIRK3) is a member of...|\n|BACKGROUND: At present, it is one of the most i...|\n|OBJECTIVE: To investigate the relationship betw...|\n|Combined EEG/fMRI recording has been used to lo...|\n|Kohlschutter syndrome is a rare neurodegenerati...|\n|Statistical analysis of neuroimages is commonly...|\n|The synthetic DOX-LNA conjugate was characteriz...|\n|Our objective was to compare three different me...|\n|We conducted a phase II study to assess the eff...|\n|\"\"\"Monomeric sarcosine oxidase (MSOX) is a flav...|\n|We presented the tachinid fly Exorista japonica...|\n|The literature dealing with the water conductin...|\n|A novel approach to synthesize chitosan-O-isopr...|\n|An HPLC-ESI-MS-MS method has been developed for...|\n|The localizing and lateralizing values of eye a...|\n|OBJECTIVE: To evaluate the effectiveness and ac...|\n|For the construction of new combinatorial libra...|\n|We report the results of a screen for genetic a...|\n|Intraparenchymal pericatheter cyst is rarely re...|\n|It is known that patients with Klinefelter's sy...|\n+--------------------------------------------------+\nonly showing top 20 rows\n\n"
],
[
"result = yake_pipeline.fit(df).transform(df)",
"_____no_output_____"
],
[
"result = result.withColumn('unique_keywords', F.array_distinct(\"keywords.result\"))",
"_____no_output_____"
],
[
"def highlight(text, keywords):\n for k in keywords:\n text = (re.sub(r'(\\b%s\\b)'%k, r'<span style=\"background-color: yellow;\">\\1</span>', text, flags=re.IGNORECASE))\n return text",
"_____no_output_____"
],
[
"highlight_udf = udf(highlight, StringType())\n",
"_____no_output_____"
],
[
"result = result.withColumn(\"highlighted_keywords\",highlight_udf('text','unique_keywords'))",
"_____no_output_____"
],
[
"for r in result.select(\"highlighted_keywords\").limit(20).collect():\n display(HTML(r.highlighted_keywords))\n print(\"\\n\\n\")",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
ec8c3a8d39568f6876a163a3e0a6f1cbc0f7fd6c | 48,967 | ipynb | Jupyter Notebook | notebooks/TResNet_M.ipynb | linhduongtuan/Brain_Tumor_MRI_Classifier | 16cd43b87ba8b530973eb3a039d9aef739d671a1 | [
"Apache-2.0"
] | 4 | 2021-01-03T10:58:54.000Z | 2022-01-17T14:43:48.000Z | notebooks/TResNet_M.ipynb | linhduongtuan/Brain_Tumor_MRI_Classifier | 16cd43b87ba8b530973eb3a039d9aef739d671a1 | [
"Apache-2.0"
] | null | null | null | notebooks/TResNet_M.ipynb | linhduongtuan/Brain_Tumor_MRI_Classifier | 16cd43b87ba8b530973eb3a039d9aef739d671a1 | [
"Apache-2.0"
] | null | null | null | 35.051539 | 673 | 0.480507 | [
[
[
"import os\nimport re\nimport PIL\nimport sys\nimport json\nimport time\nimport timm\nimport math\nimport copy\nimport torch\nimport pickle\nimport logging\nimport fnmatch\nimport argparse\nimport torchvision\nimport numpy as np\n%matplotlib inline\nimport pandas as pd\nimport seaborn as sns\nimport torch.nn as nn\nfrom PIL import Image\nfrom pathlib import Path\nfrom copy import deepcopy\nfrom sklearn import metrics\nimport torch.optim as optim\nfrom datetime import datetime\nfrom torchvision import models\nimport matplotlib.pyplot as plt\nimport torch.nn.functional as F\nimport torch.utils.data as data\nfrom torch.autograd import Variable\nfrom tqdm import tqdm, tqdm_notebook\nfrom torch.optim import lr_scheduler\n#from pytorch_metric_learning import loss\nimport torch.utils.model_zoo as model_zoo\nfrom timm.models.layers.activations import *\n%config InlineBackend.figure_format = 'retina'\nfrom collections import OrderedDict, defaultdict\nfrom torch.utils.tensorboard import SummaryWriter\nfrom torchvision import transforms, models, datasets\nfrom torch.utils.data.sampler import SubsetRandomSampler\nfrom randaugment import RandAugment, ImageNetPolicy, Cutout\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\nfrom timm.loss import LabelSmoothingCrossEntropy, SoftTargetCrossEntropy\nfrom sklearn.metrics import confusion_matrix,accuracy_score, classification_report",
"_____no_output_____"
],
[
"data_dir = '/home/linh/Downloads/Brain/'\n\n# Define your transforms for the training and testing sets\ndata_transforms = {\n 'train': transforms.Compose([\n transforms.RandomRotation(30),\n transforms.RandomResizedCrop(224),\n transforms.RandomHorizontalFlip(),\n RandAugment(),\n ImageNetPolicy(),\n Cutout(size=16),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], \n [0.229, 0.224, 0.225])\n ]),\n 'val': transforms.Compose([\n transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], \n [0.229, 0.224, 0.225])\n ])\n}\n\n# Load the datasets with ImageFolder\nimage_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x),\n data_transforms[x])\n for x in ['train', 'val']}\nbatch_size = 140\ndata_loader = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=batch_size,\n shuffle=True, num_workers=4, pin_memory = True)\n for x in ['train', 'val']}\n\ndataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'val']}\n\nclass_names = image_datasets['train'].classes\nprint(class_names)\nprint(dataset_sizes)\nprint(device)\n\n### we get the class_to_index in the data_Set but what we really need is the cat_to_names so we will create\n_ = image_datasets['train'].class_to_idx\ncat_to_name = {_[i]: i for i in list(_.keys())}\nprint(cat_to_name)\n \n# Run this to test the data loader\nimages, labels = next(iter(data_loader['val']))\nimages.size()",
"['Glioma', 'Meningioma', 'No_Tumor', 'Pituitary']\n{'train': 2870, 'val': 394}\ncuda:0\n{0: 'Glioma', 1: 'Meningioma', 2: 'No_Tumor', 3: 'Pituitary'}\n"
],
[
"\"\"\"def showimage(data_loader, number_images, cat_to_name):\n dataiter = iter(data_loader)\n images, labels = dataiter.next()\n images = images.numpy() # convert images to numpy for display\n # plot the images in the batch, along with the corresponding labels\n fig = plt.figure(figsize=(number_images, 4))\n for idx in np.arange(number_images):\n ax = fig.add_subplot(2, number_images/2, idx+1, xticks=[], yticks=[])\n img = np.transpose(images[idx])\n plt.imshow(img)\n ax.set_title(cat_to_name[labels.tolist()[idx]])\n \n#### to show some images\nshowimage(data_loader['test'], 20, cat_to_name)\"\"\"",
"_____no_output_____"
],
[
"#model = models.resnet50(pretrained=True)\n#model = timm.create_model('resnet50', pretrained=True)\nmodel = timm.create_model('tresnet_m', num_classes=4,pretrained=True)\n#model.fc #show fully connected layer for ResNet family\nmodel.head #show the classifier layer (fully connected layer) for EfficientNets",
"_____no_output_____"
],
[
"# Create classifier\nfor param in model.parameters():\n param.requires_grad = True\n# define `classifier` for ResNet\n# Otherwise, define `fc` for EfficientNet family \n#because the definition of the full connection/classifier of 2 CNN families is differnt\n'''classifier = nn.Sequential(OrderedDict([('fc1', nn.Linear(2048, 1000, bias=True)),\n\t\t\t\t\t\t\t ('BN1', nn.BatchNorm2d(1000, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)),\n\t\t\t\t\t\t\t\t ('dropout1', nn.Dropout(0.7)),\n ('fc2', nn.Linear(1000, 512)),\n\t\t\t\t\t\t\t\t ('BN2', nn.BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)),\n\t\t\t\t\t\t\t\t ('swish1', Swish()),\n\t\t\t\t\t\t\t\t ('dropout2', nn.Dropout(0.5)),\n\t\t\t\t\t\t\t\t ('fc3', nn.Linear(512, 128)),\n\t\t\t\t\t\t\t\t ('BN3', nn.BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)),\n\t\t\t\t\t\t\t ('swish2', Swish()),\n\t\t\t\t\t\t\t\t ('fc4', nn.Linear(128, 4)),\n\t\t\t\t\t\t\t\t ('output', nn.Softmax(dim=1))\n\t\t\t\t\t\t\t ]))\n# connect base model (EfficientNet_B0) with modified classifier layer\nmodel.fc = classifier'''\ncriterion = LabelSmoothingCrossEntropy()\n#criterion = nn.CrossEntropyLoss()\n#optimizer = Nadam(model.parameters(), lr=0.001)\n#optimizer = optim.Adam(model.parameters(), lr=0.001, weight_decay=0.0001)\noptimizer = optim.SGD(model.parameters(), \n lr=0.01,momentum=0.9,\n nesterov=True,\n weight_decay=0.0001)\nscheduler = optim.lr_scheduler.StepLR(optimizer, step_size=80, gamma=0.1)\n#show our model architechture and send to GPU\n\ndef count_parameters(model):\n return sum(p.numel() for p in model.parameters() if p.requires_grad)\ncount = count_parameters(model)\nprint(count)",
"29348228\n"
],
[
"model.to(device)\ndef train_model(model, criterion, optimizer, scheduler, num_epochs=200, checkpoint = None):\n since = time.time()\n\n if checkpoint is None:\n best_model_wts = copy.deepcopy(model.state_dict())\n best_loss = math.inf\n best_acc = 0.\n else:\n print(f'Val loss: {checkpoint[\"best_val_loss\"]}, Val accuracy: {checkpoint[\"best_val_accuracy\"]}')\n model.load_state_dict(checkpoint['model_state_dict'])\n best_model_wts = copy.deepcopy(model.state_dict())\n optimizer.load_state_dict(checkpoint['optimizer_state_dict'])\n scheduler.load_state_dict(checkpoint['scheduler_state_dict'])\n best_loss = checkpoint['best_val_loss']\n best_acc = checkpoint['best_val_accuracy']\n \n # Tensorboard summary\n writer = SummaryWriter()\n \n for epoch in range(num_epochs):\n print('Epoch {}/{}'.format(epoch + 1, num_epochs)) #(epoch, num_epochs -1)\n print('-' * 20)\n\n # Each epoch has a training and validation phase\n for phase in ['train', 'val']:\n if phase == 'train':\n model.train() # Set model to training mode\n else:\n model.eval() # Set model to evaluate mode\n\n running_loss = 0.0\n running_corrects = 0\n\n # Iterate over data.\n for i, (inputs, labels) in enumerate(data_loader[phase]):\n inputs = inputs.to(device)\n labels = labels.to(device)\n\n # zero the parameter gradients\n optimizer.zero_grad()\n \n if i % 1000 == 999:\n print('[%d, %d] loss: %.8f' % \n (epoch + 1, i, running_loss / (i * inputs.size(0))))\n\n # forward\n # track history if only in train\n with torch.set_grad_enabled(phase == 'train'):\n outputs = model(inputs)\n _, preds = torch.max(outputs, 1)\n loss = criterion(outputs, labels)\n\n # backward + optimize only if in training phase\n if phase == 'train': \n loss.backward()\n optimizer.step()\n\n # statistics\n running_loss += loss.item() * inputs.size(0)\n running_corrects += torch.sum(preds == labels.data)\n \n if phase == 'train': \n scheduler.step()\n \n epoch_loss = running_loss / dataset_sizes[phase]\n epoch_acc = running_corrects.double() / dataset_sizes[phase]\n\n print('{} Loss: {:.8f} Acc: {:.8f}'.format(\n phase, epoch_loss, epoch_acc))\n \n # Record training loss and accuracy for each phase\n if phase == 'train':\n writer.add_scalar('Train/Loss', epoch_loss, epoch)\n writer.add_scalar('Train/Accuracy', epoch_acc, epoch)\n writer.flush()\n else:\n writer.add_scalar('Valid/Loss', epoch_loss, epoch)\n writer.add_scalar('Valid/Accuracy', epoch_acc, epoch)\n writer.flush()\n # deep copy the model\n \n if phase == 'val' and epoch_acc > best_acc:\n print(f'New best model found!')\n print(f'New record ACC: {epoch_acc}, previous record acc: {best_acc}')\n best_loss = epoch_loss\n best_acc = epoch_acc\n best_model_wts = copy.deepcopy(model.state_dict())\n torch.save({'model_state_dict': model.state_dict(),\n 'optimizer_state_dict': optimizer.state_dict(),\n 'best_val_loss': best_loss,\n 'best_val_accuracy': best_acc,\n 'scheduler_state_dict' : scheduler.state_dict(),\n }, \n CHECK_POINT_PATH\n )\n print(f'New record acc is SAVED: {epoch_acc}')\n\n print()\n time_elapsed = time.time() - since\n print('Training complete in {:.0f}m {:.0f}s'.format(\n time_elapsed // 60, time_elapsed % 60))\n print('Best val Acc: {:.8f} Best val loss: {:.8f}'.format(best_acc, best_loss))\n\n # load best model weights\n model.load_state_dict(best_model_wts)\n return model, best_loss, best_acc",
"_____no_output_____"
],
[
"CHECK_POINT_PATH = '/home/linh/Downloads/Brain/weights/TResNet_Medium.pth'\ntry:\n checkpoint = torch.load(CHECK_POINT_PATH)\n print(\"checkpoint loaded\")\nexcept:\n checkpoint = None\n print(\"checkpoint not found\")\nif checkpoint == None:\n CHECK_POINT_PATH = CHECK_POINT_PATH\nmodel, best_val_loss, best_val_acc = train_model(model,\n criterion,\n optimizer,\n scheduler,\n num_epochs = 300,\n checkpoint = None #torch.load(CHECK_POINT_PATH)\n ) \n \ntorch.save({'model_state_dict': model.state_dict(),\n 'optimizer_state_dict': optimizer.state_dict(),\n 'best_val_loss': best_val_loss,\n 'best_val_accuracy': best_val_acc,\n 'scheduler_state_dict': scheduler.state_dict(),\n }, CHECK_POINT_PATH)",
"checkpoint not found\nEpoch 1/300\n--------------------\ntrain Loss: 1.07955691 Acc: 0.57595819\nval Loss: 1.28092318 Acc: 0.53045685\nNew best model found!\nNew record ACC: 0.5304568527918782, previous record acc: 0.0\nNew record acc is SAVED: 0.5304568527918782\n\nEpoch 2/300\n--------------------\ntrain Loss: 0.77862053 Acc: 0.78222997\nval Loss: 1.07722064 Acc: 0.65736041\nNew best model found!\nNew record ACC: 0.6573604060913705, previous record acc: 0.5304568527918782\nNew record acc is SAVED: 0.6573604060913705\n\nEpoch 3/300\n--------------------\ntrain Loss: 0.69188676 Acc: 0.82055749\nval Loss: 0.99619725 Acc: 0.70304569\nNew best model found!\nNew record ACC: 0.7030456852791878, previous record acc: 0.6573604060913705\nNew record acc is SAVED: 0.7030456852791878\n\nEpoch 4/300\n--------------------\ntrain Loss: 0.65551965 Acc: 0.84355401\nval Loss: 0.93008084 Acc: 0.76649746\nNew best model found!\nNew record ACC: 0.766497461928934, previous record acc: 0.7030456852791878\nNew record acc is SAVED: 0.766497461928934\n\nEpoch 5/300\n--------------------\ntrain Loss: 0.61635884 Acc: 0.86794425\nval Loss: 0.93981691 Acc: 0.75634518\n\nEpoch 6/300\n--------------------\ntrain Loss: 0.60247049 Acc: 0.86550523\nval Loss: 0.89187448 Acc: 0.78172589\nNew best model found!\nNew record ACC: 0.781725888324873, previous record acc: 0.766497461928934\nNew record acc is SAVED: 0.781725888324873\n\nEpoch 7/300\n--------------------\ntrain Loss: 0.56582597 Acc: 0.89024390\nval Loss: 0.92371932 Acc: 0.76903553\n\nEpoch 8/300\n--------------------\ntrain Loss: 0.55891211 Acc: 0.89965157\nval Loss: 0.81821413 Acc: 0.82233503\nNew best model found!\nNew record ACC: 0.8223350253807106, previous record acc: 0.781725888324873\nNew record acc is SAVED: 0.8223350253807106\n\nEpoch 9/300\n--------------------\ntrain Loss: 0.54806927 Acc: 0.90174216\nval Loss: 0.93988984 Acc: 0.75126904\n\nEpoch 10/300\n--------------------\ntrain Loss: 0.52870543 Acc: 0.91533101\nval Loss: 0.83479353 Acc: 0.79695431\n\nEpoch 11/300\n--------------------\ntrain Loss: 0.51947024 Acc: 0.91777003\nval Loss: 0.84170746 Acc: 0.79695431\n\nEpoch 12/300\n--------------------\ntrain Loss: 0.50844645 Acc: 0.92229965\nval Loss: 0.83579027 Acc: 0.79695431\n\nEpoch 13/300\n--------------------\ntrain Loss: 0.50783221 Acc: 0.92055749\nval Loss: 0.88775407 Acc: 0.79187817\n\nEpoch 14/300\n--------------------\ntrain Loss: 0.50206799 Acc: 0.92299652\nval Loss: 0.78638785 Acc: 0.82741117\nNew best model found!\nNew record ACC: 0.8274111675126903, previous record acc: 0.8223350253807106\nNew record acc is SAVED: 0.8274111675126903\n\nEpoch 15/300\n--------------------\ntrain Loss: 0.49438596 Acc: 0.92613240\nval Loss: 0.80583147 Acc: 0.82233503\n\nEpoch 16/300\n--------------------\ntrain Loss: 0.50242820 Acc: 0.92160279\nval Loss: 0.78018884 Acc: 0.82994924\nNew best model found!\nNew record ACC: 0.8299492385786802, previous record acc: 0.8274111675126903\nNew record acc is SAVED: 0.8299492385786802\n\nEpoch 17/300\n--------------------\ntrain Loss: 0.49302235 Acc: 0.92682927\nval Loss: 0.76683039 Acc: 0.82233503\n\nEpoch 18/300\n--------------------\ntrain Loss: 0.48004893 Acc: 0.93658537\nval Loss: 0.75718068 Acc: 0.83502538\nNew best model found!\nNew record ACC: 0.8350253807106598, previous record acc: 0.8299492385786802\nNew record acc is SAVED: 0.8350253807106598\n\nEpoch 19/300\n--------------------\ntrain Loss: 0.47862971 Acc: 0.93832753\nval Loss: 0.74972339 Acc: 0.83248731\n\nEpoch 20/300\n--------------------\ntrain Loss: 0.47506818 Acc: 0.93623693\nval Loss: 0.70355875 Acc: 0.86294416\nNew best model found!\nNew record ACC: 0.8629441624365481, previous record acc: 0.8350253807106598\nNew record acc is SAVED: 0.8629441624365481\n\nEpoch 21/300\n--------------------\ntrain Loss: 0.48691467 Acc: 0.93275261\nval Loss: 0.73884114 Acc: 0.83756345\n\nEpoch 22/300\n--------------------\ntrain Loss: 0.46591006 Acc: 0.94494774\nval Loss: 0.69804413 Acc: 0.86040609\n\nEpoch 23/300\n--------------------\ntrain Loss: 0.46908571 Acc: 0.94006969\nval Loss: 0.78639412 Acc: 0.83248731\n\nEpoch 24/300\n--------------------\ntrain Loss: 0.46191247 Acc: 0.94668990\nval Loss: 0.81434298 Acc: 0.82233503\n\nEpoch 25/300\n--------------------\ntrain Loss: 0.46491889 Acc: 0.94425087\nval Loss: 0.87985800 Acc: 0.79949239\n\nEpoch 26/300\n--------------------\ntrain Loss: 0.46328794 Acc: 0.94216028\nval Loss: 0.77533215 Acc: 0.82487310\n\nEpoch 27/300\n--------------------\ntrain Loss: 0.46005933 Acc: 0.94494774\nval Loss: 0.79527310 Acc: 0.83248731\n\nEpoch 28/300\n--------------------\ntrain Loss: 0.45584408 Acc: 0.94529617\nval Loss: 0.77031647 Acc: 0.82741117\n\nEpoch 29/300\n--------------------\ntrain Loss: 0.45580589 Acc: 0.94947735\nval Loss: 0.82999220 Acc: 0.81472081\n\nEpoch 30/300\n--------------------\ntrain Loss: 0.46711184 Acc: 0.93972125\nval Loss: 0.77312013 Acc: 0.83248731\n\nEpoch 31/300\n--------------------\ntrain Loss: 0.46279705 Acc: 0.94773519\nval Loss: 0.74708271 Acc: 0.85786802\n\nEpoch 32/300\n--------------------\ntrain Loss: 0.45972006 Acc: 0.94390244\nval Loss: 0.75585001 Acc: 0.83502538\n\nEpoch 33/300\n--------------------\ntrain Loss: 0.45161296 Acc: 0.95121951\nval Loss: 0.77462223 Acc: 0.84010152\n\nEpoch 34/300\n--------------------\ntrain Loss: 0.45242023 Acc: 0.94634146\nval Loss: 0.74412737 Acc: 0.85786802\n\nEpoch 35/300\n--------------------\ntrain Loss: 0.45196337 Acc: 0.95191638\nval Loss: 0.75529295 Acc: 0.85025381\n\nEpoch 36/300\n--------------------\ntrain Loss: 0.44009197 Acc: 0.95540070\nval Loss: 0.83414264 Acc: 0.81725888\n\nEpoch 37/300\n--------------------\ntrain Loss: 0.44070122 Acc: 0.95574913\nval Loss: 0.76894827 Acc: 0.84263959\n\nEpoch 38/300\n--------------------\ntrain Loss: 0.42759520 Acc: 0.96445993\nval Loss: 0.76732480 Acc: 0.82994924\n\nEpoch 39/300\n--------------------\ntrain Loss: 0.44666263 Acc: 0.95121951\nval Loss: 0.81133180 Acc: 0.81979695\n\nEpoch 40/300\n--------------------\ntrain Loss: 0.43544375 Acc: 0.95749129\nval Loss: 0.77528378 Acc: 0.84010152\n\nEpoch 41/300\n--------------------\ntrain Loss: 0.43988246 Acc: 0.95749129\nval Loss: 0.76002305 Acc: 0.83756345\n\nEpoch 42/300\n--------------------\ntrain Loss: 0.44142167 Acc: 0.95714286\nval Loss: 0.78112182 Acc: 0.82487310\n\nEpoch 43/300\n--------------------\ntrain Loss: 0.43722423 Acc: 0.95923345\nval Loss: 0.78719612 Acc: 0.82741117\n\nEpoch 44/300\n--------------------\ntrain Loss: 0.43162573 Acc: 0.96062718\nval Loss: 0.81782086 Acc: 0.82487310\n\nEpoch 45/300\n--------------------\ntrain Loss: 0.43226431 Acc: 0.95714286\nval Loss: 0.77202790 Acc: 0.82994924\n\nEpoch 46/300\n--------------------\ntrain Loss: 0.43042657 Acc: 0.95749129\nval Loss: 0.82802807 Acc: 0.81218274\n\nEpoch 47/300\n--------------------\ntrain Loss: 0.43623541 Acc: 0.95888502\nval Loss: 0.73697588 Acc: 0.84517766\n\nEpoch 48/300\n--------------------\ntrain Loss: 0.42510746 Acc: 0.96376307\nval Loss: 0.75045471 Acc: 0.83756345\n\nEpoch 49/300\n--------------------\ntrain Loss: 0.42998085 Acc: 0.96306620\nval Loss: 0.83359613 Acc: 0.82233503\n\nEpoch 50/300\n--------------------\ntrain Loss: 0.43108792 Acc: 0.96027875\nval Loss: 0.83352725 Acc: 0.81472081\n\nEpoch 51/300\n--------------------\ntrain Loss: 0.44001133 Acc: 0.95574913\nval Loss: 0.77959656 Acc: 0.83756345\n\nEpoch 52/300\n--------------------\ntrain Loss: 0.43457917 Acc: 0.95574913\nval Loss: 0.80145788 Acc: 0.84517766\n\nEpoch 53/300\n--------------------\ntrain Loss: 0.43746230 Acc: 0.95679443\nval Loss: 0.80634653 Acc: 0.84263959\n\nEpoch 54/300\n--------------------\ntrain Loss: 0.43488286 Acc: 0.95644599\nval Loss: 0.74482666 Acc: 0.85279188\n\nEpoch 55/300\n--------------------\ntrain Loss: 0.43307004 Acc: 0.95783972\nval Loss: 0.78359570 Acc: 0.83756345\n\nEpoch 56/300\n--------------------\ntrain Loss: 0.42531417 Acc: 0.96027875\nval Loss: 0.79628814 Acc: 0.84263959\n\nEpoch 57/300\n--------------------\ntrain Loss: 0.42359076 Acc: 0.96724739\nval Loss: 0.77963397 Acc: 0.83756345\n\nEpoch 58/300\n--------------------\ntrain Loss: 0.42435722 Acc: 0.96167247\nval Loss: 0.80725921 Acc: 0.84010152\n\nEpoch 59/300\n--------------------\ntrain Loss: 0.42360077 Acc: 0.96794425\nval Loss: 0.84035984 Acc: 0.81472081\n\nEpoch 60/300\n--------------------\ntrain Loss: 0.43254929 Acc: 0.96097561\nval Loss: 0.82975231 Acc: 0.83248731\n\nEpoch 61/300\n--------------------\ntrain Loss: 0.41721838 Acc: 0.96655052\nval Loss: 0.82486998 Acc: 0.83248731\n\nEpoch 62/300\n--------------------\ntrain Loss: 0.42029893 Acc: 0.96550523\nval Loss: 0.74322473 Acc: 0.83502538\n\nEpoch 63/300\n--------------------\ntrain Loss: 0.42967167 Acc: 0.96167247\nval Loss: 0.71126670 Acc: 0.85532995\n\nEpoch 64/300\n--------------------\ntrain Loss: 0.41640477 Acc: 0.96585366\nval Loss: 0.82202418 Acc: 0.82994924\n\nEpoch 65/300\n--------------------\ntrain Loss: 0.41519830 Acc: 0.96968641\nval Loss: 0.81712575 Acc: 0.82487310\n\nEpoch 66/300\n--------------------\ntrain Loss: 0.43081548 Acc: 0.95853659\nval Loss: 0.75043372 Acc: 0.84517766\n\nEpoch 67/300\n--------------------\ntrain Loss: 0.42002539 Acc: 0.96550523\nval Loss: 0.72689663 Acc: 0.85786802\n\nEpoch 68/300\n--------------------\ntrain Loss: 0.44961827 Acc: 0.94808362\nval Loss: 0.82575576 Acc: 0.80203046\n\nEpoch 69/300\n--------------------\ntrain Loss: 0.45122380 Acc: 0.94982578\nval Loss: 0.74786557 Acc: 0.83502538\n\nEpoch 70/300\n--------------------\ntrain Loss: 0.43561756 Acc: 0.95923345\nval Loss: 0.71986518 Acc: 0.84517766\n\nEpoch 71/300\n--------------------\ntrain Loss: 0.44186756 Acc: 0.95226481\nval Loss: 0.69362317 Acc: 0.86040609\n\nEpoch 72/300\n--------------------\ntrain Loss: 0.42745571 Acc: 0.96202091\nval Loss: 0.75049616 Acc: 0.84010152\n\nEpoch 73/300\n--------------------\ntrain Loss: 0.42904225 Acc: 0.96132404\nval Loss: 0.70728105 Acc: 0.86040609\n\nEpoch 74/300\n--------------------\ntrain Loss: 0.43073680 Acc: 0.95679443\nval Loss: 0.66716083 Acc: 0.88578680\nNew best model found!\nNew record ACC: 0.8857868020304568, previous record acc: 0.8629441624365481\nNew record acc is SAVED: 0.8857868020304568\n\nEpoch 75/300\n--------------------\ntrain Loss: 0.41961109 Acc: 0.96585366\nval Loss: 0.74408713 Acc: 0.84771574\n\nEpoch 76/300\n--------------------\ntrain Loss: 0.42817543 Acc: 0.96236934\nval Loss: 0.67087705 Acc: 0.87817259\n\nEpoch 77/300\n--------------------\ntrain Loss: 0.42548585 Acc: 0.96132404\nval Loss: 0.74113382 Acc: 0.84771574\n\nEpoch 78/300\n--------------------\ntrain Loss: 0.41624432 Acc: 0.97038328\nval Loss: 0.77202074 Acc: 0.84010152\n\nEpoch 79/300\n--------------------\ntrain Loss: 0.43011832 Acc: 0.95818815\nval Loss: 0.74556210 Acc: 0.86040609\n\nEpoch 80/300\n--------------------\ntrain Loss: 0.41152218 Acc: 0.97212544\nval Loss: 0.71936851 Acc: 0.86040609\n\nEpoch 81/300\n--------------------\ntrain Loss: 0.42109304 Acc: 0.96550523\nval Loss: 0.73785654 Acc: 0.85786802\n\nEpoch 82/300\n--------------------\ntrain Loss: 0.41572437 Acc: 0.96829268\nval Loss: 0.72824530 Acc: 0.85786802\n\nEpoch 83/300\n--------------------\ntrain Loss: 0.41104174 Acc: 0.97108014\nval Loss: 0.74280316 Acc: 0.85786802\n\nEpoch 84/300\n--------------------\ntrain Loss: 0.40780332 Acc: 0.97038328\nval Loss: 0.72880073 Acc: 0.86548223\n\nEpoch 85/300\n--------------------\ntrain Loss: 0.41485920 Acc: 0.96480836\nval Loss: 0.73432602 Acc: 0.85786802\n\nEpoch 86/300\n--------------------\ntrain Loss: 0.40907608 Acc: 0.97177700\nval Loss: 0.74300770 Acc: 0.85786802\n\nEpoch 87/300\n--------------------\ntrain Loss: 0.40859414 Acc: 0.97038328\nval Loss: 0.74401240 Acc: 0.85279188\n\nEpoch 88/300\n--------------------\ntrain Loss: 0.41659167 Acc: 0.96585366\nval Loss: 0.74176683 Acc: 0.85279188\n\nEpoch 89/300\n--------------------\ntrain Loss: 0.40636962 Acc: 0.97142857\nval Loss: 0.74123829 Acc: 0.85025381\n\nEpoch 90/300\n--------------------\ntrain Loss: 0.41359146 Acc: 0.96968641\nval Loss: 0.77062151 Acc: 0.84010152\n\nEpoch 91/300\n--------------------\ntrain Loss: 0.40508187 Acc: 0.97386760\nval Loss: 0.75870210 Acc: 0.84263959\n\nEpoch 92/300\n--------------------\ntrain Loss: 0.41474315 Acc: 0.96411150\nval Loss: 0.76225663 Acc: 0.85025381\n\nEpoch 93/300\n--------------------\ntrain Loss: 0.41262368 Acc: 0.97003484\nval Loss: 0.75117283 Acc: 0.86040609\n\nEpoch 94/300\n--------------------\ntrain Loss: 0.40755284 Acc: 0.97247387\nval Loss: 0.75361821 Acc: 0.84517766\n\nEpoch 95/300\n--------------------\ntrain Loss: 0.40494398 Acc: 0.97491289\nval Loss: 0.74651039 Acc: 0.85025381\n\nEpoch 96/300\n--------------------\ntrain Loss: 0.40498891 Acc: 0.97247387\nval Loss: 0.75933120 Acc: 0.84263959\n\nEpoch 97/300\n--------------------\ntrain Loss: 0.41057344 Acc: 0.97003484\nval Loss: 0.72568852 Acc: 0.85786802\n\nEpoch 98/300\n--------------------\ntrain Loss: 0.40666989 Acc: 0.97247387\nval Loss: 0.74591490 Acc: 0.84517766\n\nEpoch 99/300\n--------------------\ntrain Loss: 0.40916159 Acc: 0.97108014\nval Loss: 0.72450217 Acc: 0.85786802\n\nEpoch 100/300\n--------------------\ntrain Loss: 0.40076156 Acc: 0.97560976\nval Loss: 0.72953181 Acc: 0.85025381\n\nEpoch 101/300\n--------------------\ntrain Loss: 0.39981452 Acc: 0.97595819\nval Loss: 0.73715254 Acc: 0.85279188\n\nEpoch 102/300\n--------------------\ntrain Loss: 0.40104605 Acc: 0.97595819\nval Loss: 0.74632621 Acc: 0.84517766\n\nEpoch 103/300\n--------------------\ntrain Loss: 0.40542237 Acc: 0.97142857\nval Loss: 0.73307475 Acc: 0.84517766\n\nEpoch 104/300\n--------------------\ntrain Loss: 0.41016472 Acc: 0.97038328\nval Loss: 0.73127879 Acc: 0.84517766\n\nEpoch 105/300\n--------------------\ntrain Loss: 0.40396888 Acc: 0.97317073\nval Loss: 0.74318020 Acc: 0.84263959\n\nEpoch 106/300\n--------------------\ntrain Loss: 0.40928484 Acc: 0.97038328\nval Loss: 0.73871459 Acc: 0.84263959\n\nEpoch 107/300\n--------------------\ntrain Loss: 0.39955929 Acc: 0.97909408\nval Loss: 0.74687118 Acc: 0.84263959\n\nEpoch 108/300\n--------------------\ntrain Loss: 0.40451033 Acc: 0.97212544\nval Loss: 0.74765764 Acc: 0.84263959\n\nEpoch 109/300\n--------------------\ntrain Loss: 0.41128959 Acc: 0.96933798\nval Loss: 0.76823201 Acc: 0.84263959\n\nEpoch 110/300\n--------------------\ntrain Loss: 0.40941048 Acc: 0.97247387\nval Loss: 0.74107483 Acc: 0.84263959\n\nEpoch 111/300\n--------------------\ntrain Loss: 0.40323405 Acc: 0.97456446\nval Loss: 0.75495655 Acc: 0.84263959\n\nEpoch 112/300\n--------------------\ntrain Loss: 0.40880959 Acc: 0.96829268\nval Loss: 0.74489800 Acc: 0.84263959\n\nEpoch 113/300\n--------------------\ntrain Loss: 0.39427245 Acc: 0.97944251\nval Loss: 0.74392122 Acc: 0.84263959\n\nEpoch 114/300\n--------------------\ntrain Loss: 0.39636758 Acc: 0.97665505\nval Loss: 0.75049006 Acc: 0.84263959\n\nEpoch 115/300\n--------------------\ntrain Loss: 0.40289220 Acc: 0.96933798\nval Loss: 0.74881588 Acc: 0.84263959\n\nEpoch 116/300\n--------------------\ntrain Loss: 0.40922481 Acc: 0.97456446\nval Loss: 0.75254966 Acc: 0.84263959\n\nEpoch 117/300\n--------------------\ntrain Loss: 0.40221643 Acc: 0.97351916\nval Loss: 0.75739597 Acc: 0.84263959\n\nEpoch 118/300\n--------------------\ntrain Loss: 0.39371079 Acc: 0.97944251\nval Loss: 0.73902633 Acc: 0.84517766\n\nEpoch 119/300\n--------------------\ntrain Loss: 0.40370486 Acc: 0.97386760\nval Loss: 0.74802745 Acc: 0.84263959\n\nEpoch 120/300\n--------------------\ntrain Loss: 0.40650206 Acc: 0.97108014\nval Loss: 0.76380232 Acc: 0.84263959\n\nEpoch 121/300\n--------------------\ntrain Loss: 0.40380084 Acc: 0.97212544\nval Loss: 0.75287916 Acc: 0.84263959\n\nEpoch 122/300\n--------------------\ntrain Loss: 0.41215803 Acc: 0.96585366\nval Loss: 0.75609041 Acc: 0.84263959\n\nEpoch 123/300\n--------------------\ntrain Loss: 0.40564699 Acc: 0.96968641\nval Loss: 0.75132988 Acc: 0.84263959\n\nEpoch 124/300\n--------------------\ntrain Loss: 0.40090712 Acc: 0.97421603\nval Loss: 0.74706326 Acc: 0.84517766\n\nEpoch 125/300\n--------------------\ntrain Loss: 0.40256565 Acc: 0.97700348\nval Loss: 0.74267610 Acc: 0.84517766\n\nEpoch 126/300\n--------------------\ntrain Loss: 0.41634537 Acc: 0.96515679\nval Loss: 0.74145975 Acc: 0.84517766\n\nEpoch 127/300\n--------------------\ntrain Loss: 0.40469982 Acc: 0.97282230\nval Loss: 0.75278142 Acc: 0.84263959\n\nEpoch 128/300\n--------------------\ntrain Loss: 0.40244130 Acc: 0.97247387\nval Loss: 0.75719665 Acc: 0.84263959\n\nEpoch 129/300\n--------------------\ntrain Loss: 0.40313329 Acc: 0.97630662\nval Loss: 0.75300915 Acc: 0.84263959\n\nEpoch 130/300\n--------------------\ntrain Loss: 0.40369024 Acc: 0.97526132\nval Loss: 0.74973791 Acc: 0.84263959\n\nEpoch 131/300\n--------------------\ntrain Loss: 0.41199547 Acc: 0.96585366\nval Loss: 0.74191673 Acc: 0.84263959\n\nEpoch 132/300\n--------------------\ntrain Loss: 0.40046060 Acc: 0.97491289\nval Loss: 0.74948985 Acc: 0.84263959\n\nEpoch 133/300\n--------------------\ntrain Loss: 0.39744091 Acc: 0.97804878\nval Loss: 0.75770802 Acc: 0.84263959\n\nEpoch 134/300\n--------------------\ntrain Loss: 0.40224196 Acc: 0.97317073\nval Loss: 0.74508033 Acc: 0.84263959\n\nEpoch 135/300\n--------------------\ntrain Loss: 0.40420449 Acc: 0.97212544\nval Loss: 0.76987769 Acc: 0.84263959\n\nEpoch 136/300\n--------------------\ntrain Loss: 0.40770570 Acc: 0.97177700\nval Loss: 0.74876810 Acc: 0.84517766\n\nEpoch 137/300\n--------------------\ntrain Loss: 0.40043709 Acc: 0.97526132\nval Loss: 0.75306465 Acc: 0.84263959\n\nEpoch 138/300\n--------------------\ntrain Loss: 0.40024679 Acc: 0.97560976\nval Loss: 0.74483396 Acc: 0.84263959\n\nEpoch 139/300\n--------------------\ntrain Loss: 0.40113429 Acc: 0.97595819\nval Loss: 0.75275326 Acc: 0.84263959\n\nEpoch 140/300\n--------------------\ntrain Loss: 0.41189863 Acc: 0.96968641\nval Loss: 0.74696247 Acc: 0.84517766\n\nEpoch 141/300\n--------------------\ntrain Loss: 0.40408249 Acc: 0.97421603\nval Loss: 0.74812344 Acc: 0.84517766\n\nEpoch 142/300\n--------------------\ntrain Loss: 0.39415051 Acc: 0.97735192\nval Loss: 0.73482271 Acc: 0.84517766\n\nEpoch 143/300\n--------------------\ntrain Loss: 0.40325190 Acc: 0.97317073\nval Loss: 0.75644791 Acc: 0.84517766\n\nEpoch 144/300\n--------------------\ntrain Loss: 0.40313440 Acc: 0.97770035\nval Loss: 0.74798757 Acc: 0.84517766\n\nEpoch 145/300\n--------------------\ntrain Loss: 0.39936030 Acc: 0.97595819\nval Loss: 0.73185033 Acc: 0.84517766\n\nEpoch 146/300\n--------------------\ntrain Loss: 0.39799953 Acc: 0.97386760\nval Loss: 0.73818926 Acc: 0.84517766\n\nEpoch 147/300\n--------------------\ntrain Loss: 0.40364861 Acc: 0.97247387\nval Loss: 0.74266221 Acc: 0.84517766\n\nEpoch 148/300\n--------------------\ntrain Loss: 0.40630591 Acc: 0.97177700\nval Loss: 0.74243656 Acc: 0.84517766\n\nEpoch 149/300\n--------------------\ntrain Loss: 0.40126704 Acc: 0.97247387\nval Loss: 0.75415949 Acc: 0.84263959\n\nEpoch 150/300\n--------------------\ntrain Loss: 0.39729626 Acc: 0.97804878\nval Loss: 0.73547044 Acc: 0.84517766\n\nEpoch 151/300\n--------------------\ntrain Loss: 0.40193698 Acc: 0.97560976\nval Loss: 0.74948283 Acc: 0.84517766\n\nEpoch 152/300\n--------------------\ntrain Loss: 0.39941811 Acc: 0.97874564\nval Loss: 0.73269870 Acc: 0.85279188\n\nEpoch 153/300\n--------------------\ntrain Loss: 0.40687962 Acc: 0.96968641\nval Loss: 0.74812717 Acc: 0.84263959\n\nEpoch 154/300\n--------------------\ntrain Loss: 0.39924486 Acc: 0.97595819\nval Loss: 0.75235899 Acc: 0.84263959\n\nEpoch 155/300\n--------------------\ntrain Loss: 0.39438013 Acc: 0.97770035\nval Loss: 0.74494623 Acc: 0.84263959\n\nEpoch 156/300\n--------------------\ntrain Loss: 0.40481182 Acc: 0.97108014\nval Loss: 0.75155656 Acc: 0.84517766\n\nEpoch 157/300\n--------------------\ntrain Loss: 0.40329746 Acc: 0.97421603\nval Loss: 0.73828199 Acc: 0.84517766\n\nEpoch 158/300\n--------------------\ntrain Loss: 0.40415878 Acc: 0.97456446\nval Loss: 0.74645362 Acc: 0.84517766\n\nEpoch 159/300\n--------------------\ntrain Loss: 0.40011319 Acc: 0.97491289\nval Loss: 0.75416870 Acc: 0.84263959\n\nEpoch 160/300\n--------------------\ntrain Loss: 0.39883057 Acc: 0.97456446\nval Loss: 0.74289167 Acc: 0.84517766\n\nEpoch 161/300\n--------------------\ntrain Loss: 0.39889145 Acc: 0.97491289\nval Loss: 0.75350676 Acc: 0.84263959\n\nEpoch 162/300\n--------------------\ntrain Loss: 0.39898317 Acc: 0.97630662\nval Loss: 0.75040332 Acc: 0.84517766\n\nEpoch 163/300\n--------------------\ntrain Loss: 0.40932940 Acc: 0.97003484\nval Loss: 0.76675559 Acc: 0.84263959\n\nEpoch 164/300\n--------------------\ntrain Loss: 0.40077837 Acc: 0.97665505\nval Loss: 0.76056289 Acc: 0.84263959\n\nEpoch 165/300\n--------------------\n"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
ec8c40e53d4c9b25289408eb7846f29e7fbea5c7 | 202,800 | ipynb | Jupyter Notebook | materials/lessons/lesson_14_notebook.ipynb | ethan-campbell/OCEAN_215 | 4fb7a0f07941c03d7931f7ad585533645355928b | [
"MIT"
] | null | null | null | materials/lessons/lesson_14_notebook.ipynb | ethan-campbell/OCEAN_215 | 4fb7a0f07941c03d7931f7ad585533645355928b | [
"MIT"
] | null | null | null | materials/lessons/lesson_14_notebook.ipynb | ethan-campbell/OCEAN_215 | 4fb7a0f07941c03d7931f7ad585533645355928b | [
"MIT"
] | 2 | 2022-02-19T08:00:26.000Z | 2022-03-07T16:06:01.000Z | 202,800 | 202,800 | 0.949606 | [
[
[
"# Video lesson \\#14 notebook",
"_____no_output_____"
],
[
"## Part 1: `SciPy` linear regression",
"_____no_output_____"
]
],
[
[
"# Import two modules from SciPy package\nfrom scipy import interpolate, stats\n\n# Also import other useful libraries\nimport numpy as np\nimport pandas as pd\nfrom datetime import datetime, timedelta\nimport matplotlib.pyplot as plt",
"_____no_output_____"
],
[
"# Sample noisy data with a trend\nx = np.arange(0,10,0.1)\ny = 5*x - 25 + np.random.normal(0,25,size=100)",
"_____no_output_____"
],
[
"# Plot sample data\nplt.figure(figsize=(7,4)) # dpi=300\nplt.scatter(x,y,c='k',zorder=2)\nplt.plot(x,y,c='k',zorder=3)\nplt.grid(zorder=1)\nplt.xlabel('x')\nplt.ylabel('y');",
"_____no_output_____"
],
[
"# Plot sample data with trend line\nplt.figure(figsize=(7,4))\nplt.scatter(x,y,c='k',zorder=2)\nplt.plot(x,y,c='k',zorder=3)\nplt.plot(x,5*x - 25,c='r',zorder=4)\nplt.grid(zorder=1)\nplt.xlabel('x')\nplt.ylabel('y');",
"_____no_output_____"
],
[
"# Sample noisy data with a quadratic trend\nx = np.arange(0,10,0.1)\ny_quad = 4*(x-5)**2 + 5*x - 25 + np.random.normal(0,25,size=100)",
"_____no_output_____"
],
[
"# Plot sample quadratic data with trend line\nplt.figure(figsize=(7,4))\nplt.scatter(x,y_quad,c='k',zorder=2)\nplt.plot(x,y_quad,c='k',zorder=3)\nplt.plot(x,6*x,c='r',zorder=4)\nplt.grid(zorder=1)\nplt.xlabel('x')\nplt.ylabel('y');",
"_____no_output_____"
],
[
"# Sample noisy data with outliers and a trend\nx = np.arange(0,10,0.1)\ny_outlier = 25*x - 150 + np.random.normal(0,25,size=100)\ny_outlier[-10:] -= 200",
"_____no_output_____"
],
[
"# Plot sample data with trend line\nm, b = stats.linregress(x,y_outlier)[0:2]\nplt.figure(figsize=(7,4))\nplt.scatter(x,y_outlier,c='k',zorder=2)\nplt.plot(x,y_outlier,c='k',zorder=3)\nplt.plot(x,m*x + b,c='r',zorder=4,label='Regression')\nplt.plot(x,25*x - 150,'r--',zorder=4,label='Regression without outliers')\nplt.legend()\nplt.grid(zorder=1)\nplt.xlabel('x')\nplt.ylabel('y');",
"_____no_output_____"
],
[
"# Linear regression on original noisy data\nslope, intercept, rvalue, pvalue, stderr = stats.linregress(x,y)\n\nprint('The slope is',round(slope,2))\nprint('The y-intercept is',round(intercept,2))\nprint('The r-value is',round(rvalue,2))\nprint('The p-value is',pvalue)\nprint('The standard error is',round(stderr,2))",
"The slope is 4.29\nThe y-intercept is -16.16\nThe r-value is 0.47\nThe p-value is 6.830058503530004e-07\nThe standard error is 0.81\n"
],
[
"# This is how you can make an array of Datetime objects useable in a regression\nimport matplotlib.dates as mdates\n\nt = np.array([datetime(2020,1,1),datetime(2020,2,1),datetime(2020,3,1)])\nt_as_numbers = mdates.date2num(t)\n\nprint(t_as_numbers)",
"[737425. 737456. 737485.]\n"
]
],
[
[
"## Part 2: `SciPy` 1-D interpolation (for 2-D examples, stay tuned for Class \\#14 activities)",
"_____no_output_____"
]
],
[
[
"# Monthly climatological high temperatures for Seattle\nmid_months = np.array([datetime(2020,mo,15) for mo in np.arange(1,13)]) # months 1 through 12\nhigh_t = np.array([45,48,52,58,64,69,72,73,67,59,51,47]) # units: °F\n\n# Plot original data\nplt.figure(figsize=(7,4))\nplt.plot(mid_months,high_t,c='k',lw=2,zorder=2)\nplt.scatter(mid_months,high_t,c='k',zorder=3)\nplt.ylabel('Temperature (°F)')\nplt.xlabel('Month')\nplt.title('Climatological high temperatures in Seattle');\nplt.grid(alpha=0.5)\nplt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%b'))",
"_____no_output_____"
],
[
"# Interpolation to 1st of each month\nstart_months = np.array([datetime(2020,mo,1) for mo in np.arange(1,13)])\ninterp_func = interpolate.interp1d(mdates.date2num(mid_months),high_t,\n fill_value='extrapolate',bounds_error=False)\nhigh_t_interpolated = interp_func(mdates.date2num(start_months))\n\n# Plot interpolated data\nplt.figure(figsize=(7,4))\nplt.plot(mid_months,high_t,c='k',lw=2,zorder=2,label='Original')\nplt.scatter(mid_months,high_t,c='k',zorder=3)\nplt.plot(start_months,high_t_interpolated,c='green',ls='--',lw=2,zorder=4,label='Interpolated')\nplt.scatter(start_months,high_t_interpolated,c='green',zorder=5)\nplt.ylabel('Temperature (°F)')\nplt.xlabel('Month')\nplt.title('Climatological high temperatures in Seattle')\nplt.legend()\nplt.grid(alpha=0.5)\nplt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%b'))",
"_____no_output_____"
],
[
"",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
]
] |
ec8c41105c5a4204ac6005fd959b94765b3e934b | 406,422 | ipynb | Jupyter Notebook | scripts/Model_Creation.ipynb | quicksell-louis/energyMeter | c7faefc8f25a4752ec7aebbe26a915a1b1ea3789 | [
"MIT"
] | 22 | 2019-03-23T19:23:57.000Z | 2022-03-15T11:08:38.000Z | scripts/Model_Creation.ipynb | quicksell-louis/energyMeter | c7faefc8f25a4752ec7aebbe26a915a1b1ea3789 | [
"MIT"
] | 3 | 2019-03-15T18:52:47.000Z | 2019-03-20T22:32:32.000Z | scripts/Model_Creation.ipynb | quicksell-louis/energyMeter | c7faefc8f25a4752ec7aebbe26a915a1b1ea3789 | [
"MIT"
] | 10 | 2019-06-16T13:05:02.000Z | 2022-03-06T18:40:14.000Z | 301.053333 | 105,640 | 0.873885 | [
[
[
"------------------------------------------------------------------\n# IMPORT LIBRARIES\n------------------------------------------------------------------",
"_____no_output_____"
]
],
[
[
"# import standard libraries\nimport tensorflow as tf\nimport numpy as np\nimport pandas as pd\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPooling2D, BatchNormalization, Activation\nfrom sklearn import preprocessing\nfrom sklearn.model_selection import train_test_split\nfrom sklearn import svm\nimport keras\n# tf.enable_eager_execution() #N.B. enabling this causes conflicts with cp_callback and tf.argmax\n\n#import plotting libraries\nimport warnings # current version of seaborn generates a bunch of warnings that we'll ignore\nwarnings.filterwarnings(\"ignore\")\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n%matplotlib inline\n\n# import file handler libraries\n!pip install -U -q PyDrive\nimport sys, os, shutil, zipfile,glob\nfrom pydrive.auth import GoogleAuth\nfrom pydrive.drive import GoogleDrive\nfrom google.colab import auth\nfrom oauth2client.client import GoogleCredentials\nfrom sklearn.metrics import confusion_matrix, accuracy_score,roc_curve, auc,roc_auc_score,classification_report\n\n#clear existing files\n!rm -rf /content/training\n!rm -rf /content/data_train\n\n# Authenticate and create the drive client for saving your model.\nfrom google.colab import drive\ndrive.mount('/content/gdrive',force_remount=True)\n!ls '/content/gdrive/My Drive/Models'\n\n#create directory to store training data\nif not os.path.exists('training'): \n os.makedirs('training')\n print('Created training directory')\n \n# Authenticate and create the PyDrive client for loading training data.\nauth.authenticate_user() \ngauth = GoogleAuth()\ngauth.credentials = GoogleCredentials.get_application_default()\ndrive = GoogleDrive(gauth)\n\n",
"Using TensorFlow backend.\n"
]
],
[
[
"------------------------------------------------------------------\n# LINK DATA TO INSTANCE\n------------------------------------------------------------------",
"_____no_output_____"
]
],
[
[
"#grab this zip id from the suffix of your google drive zip file sharing link\nzip_id = '1ShWZ4olv0SdT6jpRbVgF2C7q0tZZVKYc' #FILE ID CREATED FROM SHARING URL (ID=....)\n\n# DOWNLOAD TRAINING DATA ZIP\nprint (\"Downloading zip file\")\nmyzip = drive.CreateFile({'id': zip_id})\nmyzip.GetContentFile('data_train.zip')\n\n# UNZIP ZIP\nprint (\"Uncompressing zip file\")\nzip_ref = zipfile.ZipFile('data_train.zip', 'r') #file to be extracted\nzip_ref.extractall('./data_train') #where the files are extracted\nzip_ref.close()\n\nmodel_dir_path = '/content/gdrive/My Drive/Models'\n# Check that this directory exists.\nos.path.isdir(model_dir_path)\n\n# Make a new directory for our model\nexample_path = os.path.join(model_dir_path, 'emonpi')\nprint('\\nexample_path',example_path)\n\nif not os.path.isdir(example_path):\n os.makedirs(example_path)\n print('example path created')\n\ncheckpoint_path = os.path.join(example_path, 'wb_model_2.h5')\nprint('\\ncheckpoint_path',checkpoint_path)\n# Create a keras callback that saves our model during training.\ncp_callback = tf.keras.callbacks.ModelCheckpoint(checkpoint_path)",
"Downloading zip file\nUncompressing zip file\n\nexample_path /content/gdrive/My Drive/Models/emonpi\n\ncheckpoint_path /content/gdrive/My Drive/Models/emonpi/wb_model_2.h5\n"
]
],
[
[
"------------------------------------------------------------------\n# LOAD AND PREPROCESS DATA\n------------------------------------------------------------------",
"_____no_output_____"
]
],
[
[
"# Define path to training data directory\ntraining_path = '/content/data_train/data_train/'\nprint('Finished unzipping, contents:',os.listdir(training_path))\n\ndata_path = training_path \n\n# Function to parse through training data, reshape and append to a list of samples\n# params: filepath - path to training data, labels - optional list of labels, optional list of training data\n# returns: labels - list of sample labels, data - list of reshaped data samples\ndef build_dataset(filepath,labels=[],data=[]):\n vars = ['time', 'power_factor', 'phase_angle', 'power_real', 'power_reactive', 'power_apparent', 'vrms', 'irms']\n cwd = os.chdir(filepath)\n for appliance_type in os.listdir(cwd):\n if appliance_type.endswith('.csv'):\n label = appliance_type.split(\"_\")[0] #label is in title, before \"_\"\n labels.append(label)\n app_arr = np.genfromtxt(appliance_type, delimiter=',')\n data.append(np.array(app_arr))\n return labels,data\n \n# extracts CSV into labels & data\ndataset_labels_str,dataset_data = build_dataset(data_path) \n\n# convert to numpy array\ndataset_data_np = np.asarray(dataset_data) \n\n#let's make sure everything is shipshape\nprint(\"dataset_data shape: \",dataset_data_np.shape)\n",
"Finished unzipping, contents: ['cell_28.csv', 'sadlamp_50.csv', 'cell_12.csv', 'kettle_19.csv', 'laptop_72.csv', 'monitor_97.csv', 'desklamp_28.csv', 'laptop_74.csv', 'monitor_17.csv', 'fan_1.csv', 'none_97.csv', 'laptop_39.csv', 'cell_53.csv', 'monitor_35.csv', 'monitor_40.csv', 'sadlamp_47.csv', 'kettle_71.csv', 'kettle_48.csv', 'desklamp_89.csv', 'fan_73.csv', 'laptop_55.csv', 'none_15.csv', 'cell_84.csv', 'fan_4.csv', 'sadlamp_4.csv', 'kettle_53.csv', 'sadlamp_6.csv', 'laptop_26.csv', 'monitor_13.csv', 'laptop_20.csv', 'sadlamp_55.csv', 'monitor_5.csv', 'fan_56.csv', 'none_3.csv', 'none_68.csv', 'sadlamp_81.csv', 'desklamp_8.csv', 'sadlamp_35.csv', 'sadlamp_11.csv', 'none_21.csv', 'desklamp_58.csv', 'kettle_95.csv', 'cell_36.csv', 'laptop_90.csv', 'cell_76.csv', 'laptop_11.csv', 'desklamp_98.csv', 'monitor_76.csv', 'monitor_41.csv', 'cell_86.csv', 'monitor_94.csv', 'desklamp_18.csv', 'laptop_92.csv', 'monitor_36.csv', 'kettle_55.csv', 'desklamp_36.csv', 'laptop_22.csv', 'desklamp_97.csv', 'cell_25.csv', 'kettle_51.csv', 'sadlamp_76.csv', 'none_7.csv', 'kettle_1.csv', 'cell_40.csv', 'kettle_88.csv', 'fan_62.csv', 'laptop_41.csv', 'desklamp_11.csv', 'kettle_38.csv', 'fan_97.csv', 'fan_57.csv', 'monitor_23.csv', 'monitor_68.csv', 'none_86.csv', 'desklamp_73.csv', 'desklamp_48.csv', 'none_91.csv', 'monitor_55.csv', 'laptop_1.csv', 'fan_100.csv', 'cell_77.csv', 'kettle_31.csv', 'kettle_2.csv', 'monitor_22.csv', 'laptop_25.csv', 'cell_32.csv', 'desklamp_47.csv', 'kettle_69.csv', 'desklamp_27.csv', 'desklamp_3.csv', 'laptop_60.csv', 'kettle_76.csv', 'cell_63.csv', 'none_55.csv', 'sadlamp_83.csv', 'none_24.csv', 'none_87.csv', 'fan_6.csv', 'cell_91.csv', 'sadlamp_61.csv', 'sadlamp_72.csv', 'cell_5.csv', 'laptop_98.csv', 'kettle_73.csv', 'desklamp_26.csv', 'sadlamp_78.csv', 'cell_27.csv', 'sadlamp_23.csv', 'monitor_24.csv', 'fan_92.csv', 'kettle_75.csv', 'sadlamp_59.csv', 'monitor_32.csv', 'cell_30.csv', 'desklamp_38.csv', 'none_90.csv', 'cell_47.csv', 'kettle_4.csv', 'sadlamp_26.csv', 'cell_49.csv', 'sadlamp_93.csv', 'cell_48.csv', 'laptop_58.csv', 'none_100.csv', 'laptop_15.csv', 'monitor_63.csv', 'none_27.csv', 'kettle_20.csv', 'desklamp_29.csv', 'cell_21.csv', 'desklamp_35.csv', 'none_82.csv', 'laptop_52.csv', 'fan_94.csv', 'none_4.csv', 'none_58.csv', 'laptop_77.csv', 'sadlamp_28.csv', 'monitor_51.csv', 'monitor_71.csv', 'cell_35.csv', 'cell_8.csv', 'desklamp_86.csv', 'cell_75.csv', 'laptop_3.csv', 'fan_13.csv', 'desklamp_25.csv', 'laptop_27.csv', 'desklamp_62.csv', 'none_44.csv', 'monitor_78.csv', 'sadlamp_2.csv', 'laptop_69.csv', 'fan_31.csv', 'cell_82.csv', 'fan_12.csv', 'none_31.csv', 'laptop_35.csv', 'none_98.csv', 'laptop_21.csv', 'kettle_35.csv', 'none_20.csv', 'desklamp_7.csv', 'none_73.csv', 'desklamp_42.csv', 'none_2.csv', 'desklamp_40.csv', 'cell_59.csv', 'monitor_15.csv', 'fan_44.csv', 'fan_89.csv', 'sadlamp_67.csv', 'sadlamp_38.csv', 'cell_43.csv', 'monitor_53.csv', 'none_6.csv', 'sadlamp_62.csv', 'kettle_29.csv', 'kettle_8.csv', 'fan_93.csv', 'sadlamp_15.csv', 'sadlamp_87.csv', 'cell_57.csv', 'fan_86.csv', 'monitor_30.csv', 'none_1.csv', 'fan_14.csv', 'none_18.csv', 'none_37.csv', 'laptop_63.csv', 'monitor_60.csv', 'desklamp_50.csv', 'kettle_52.csv', 'cell_18.csv', 'cell_51.csv', 'monitor_27.csv', 'fan_81.csv', 'monitor_99.csv', 'cell_68.csv', 'fan_3.csv', 'monitor_82.csv', 'cell_19.csv', 'laptop_32.csv', 'none_38.csv', 'fan_25.csv', 'desklamp_49.csv', 'desklamp_41.csv', 'monitor_66.csv', 'monitor_31.csv', 'laptop_4.csv', 'laptop_88.csv', 'laptop_87.csv', 'none_99.csv', 'cell_38.csv', 'sadlamp_82.csv', 'kettle_65.csv', 'cell_16.csv', 'laptop_95.csv', 'none_79.csv', 'cell_79.csv', 'cell_4.csv', 'desklamp_6.csv', 'cell_10.csv', 'monitor_77.csv', 'fan_88.csv', 'fan_2.csv', 'laptop_13.csv', 'kettle_59.csv', 'desklamp_87.csv', 'sadlamp_9.csv', 'kettle_91.csv', 'desklamp_66.csv', 'sadlamp_14.csv', 'none_89.csv', 'cell_33.csv', 'laptop_85.csv', 'none_69.csv', 'none_71.csv', 'fan_19.csv', 'kettle_42.csv', 'none_61.csv', 'fan_67.csv', 'cell_94.csv', 'sadlamp_60.csv', 'kettle_84.csv', 'monitor_46.csv', 'desklamp_96.csv', 'sadlamp_24.csv', 'laptop_17.csv', 'monitor_79.csv', 'sadlamp_51.csv', 'fan_60.csv', 'fan_28.csv', 'laptop_59.csv', 'sadlamp_56.csv', 'cell_66.csv', 'desklamp_90.csv', 'sadlamp_49.csv', 'none_32.csv', 'desklamp_5.csv', 'laptop_86.csv', 'none_35.csv', 'sadlamp_68.csv', 'sadlamp_37.csv', 'cell_58.csv', 'desklamp_23.csv', 'desklamp_82.csv', 'cell_65.csv', 'sadlamp_41.csv', 'kettle_32.csv', 'none_47.csv', 'monitor_37.csv', 'kettle_70.csv', 'none_30.csv', 'cell_71.csv', 'none_5.csv', 'sadlamp_79.csv', 'fan_38.csv', 'monitor_80.csv', 'fan_79.csv', 'kettle_47.csv', 'sadlamp_98.csv', 'cell_37.csv', 'fan_42.csv', 'monitor_16.csv', 'fan_90.csv', 'laptop_31.csv', 'laptop_84.csv', 'sadlamp_52.csv', 'laptop_75.csv', 'sadlamp_97.csv', 'monitor_59.csv', 'monitor_88.csv', 'cell_69.csv', 'kettle_60.csv', 'none_59.csv', 'desklamp_10.csv', 'laptop_2.csv', 'kettle_79.csv', 'fan_55.csv', 'fan_80.csv', 'cell_52.csv', 'sadlamp_17.csv', 'monitor_39.csv', 'none_11.csv', 'kettle_64.csv', 'laptop_70.csv', 'laptop_56.csv', 'laptop_54.csv', 'laptop_80.csv', 'kettle_3.csv', 'none_81.csv', 'laptop_9.csv', 'cell_95.csv', 'kettle_68.csv', 'fan_11.csv', 'laptop_71.csv', 'kettle_10.csv', 'cell_31.csv', 'monitor_6.csv', 'monitor_33.csv', 'laptop_91.csv', 'none_83.csv', 'fan_96.csv', 'monitor_98.csv', 'sadlamp_42.csv', 'cell_7.csv', 'none_28.csv', 'kettle_78.csv', 'desklamp_65.csv', 'kettle_80.csv', 'monitor_89.csv', 'monitor_67.csv', 'laptop_29.csv', 'sadlamp_86.csv', 'sadlamp_73.csv', 'laptop_53.csv', 'cell_29.csv', 'none_43.csv', 'laptop_5.csv', 'kettle_18.csv', 'laptop_51.csv', 'none_50.csv', 'monitor_42.csv', 'kettle_27.csv', 'laptop_33.csv', 'kettle_45.csv', 'monitor_26.csv', 'cell_64.csv', 'monitor_86.csv', 'none_9.csv', 'sadlamp_43.csv', 'fan_37.csv', 'laptop_23.csv', 'cell_85.csv', 'kettle_44.csv', 'sadlamp_22.csv', 'laptop_14.csv', 'sadlamp_13.csv', 'monitor_91.csv', 'cell_54.csv', 'none_13.csv', 'fan_34.csv', 'fan_82.csv', 'desklamp_99.csv', 'fan_47.csv', 'monitor_100.csv', 'laptop_73.csv', 'cell_50.csv', 'desklamp_59.csv', 'desklamp_75.csv', 'fan_8.csv', 'fan_66.csv', 'fan_58.csv', 'fan_91.csv', 'monitor_50.csv', 'fan_9.csv', 'monitor_9.csv', 'kettle_89.csv', 'cell_80.csv', 'cell_22.csv', 'monitor_96.csv', 'kettle_24.csv', 'none_62.csv', 'kettle_43.csv', 'fan_22.csv', 'desklamp_14.csv', 'sadlamp_89.csv', 'desklamp_95.csv', 'monitor_57.csv', 'kettle_34.csv', 'fan_36.csv', 'kettle_6.csv', 'monitor_75.csv', 'laptop_36.csv', 'desklamp_13.csv', 'sadlamp_36.csv', 'none_94.csv', 'laptop_81.csv', 'cell_39.csv', 'none_70.csv', 'monitor_21.csv', 'laptop_49.csv', 'laptop_8.csv', 'desklamp_72.csv', 'none_54.csv', 'laptop_45.csv', 'kettle_90.csv', 'sadlamp_84.csv', 'desklamp_21.csv', 'sadlamp_85.csv', 'fan_74.csv', 'monitor_49.csv', 'sadlamp_94.csv', 'none_17.csv', 'cell_9.csv', 'kettle_15.csv', 'none_75.csv', 'kettle_37.csv', 'laptop_48.csv', 'monitor_29.csv', 'fan_64.csv', 'none_39.csv', 'cell_42.csv', 'desklamp_53.csv', 'desklamp_67.csv', 'cell_97.csv', 'laptop_83.csv', 'kettle_63.csv', 'cell_90.csv', 'fan_41.csv', 'none_8.csv', 'kettle_17.csv', 'fan_39.csv', 'monitor_48.csv', 'kettle_85.csv', 'desklamp_20.csv', 'cell_46.csv', 'desklamp_45.csv', 'cell_83.csv', 'none_63.csv', 'none_33.csv', 'cell_60.csv', 'kettle_5.csv', 'cell_81.csv', 'desklamp_55.csv', 'sadlamp_19.csv', 'cell_56.csv', 'fan_53.csv', 'desklamp_33.csv', 'cell_41.csv', 'desklamp_57.csv', 'sadlamp_16.csv', 'laptop_99.csv', 'desklamp_51.csv', 'none_42.csv', 'kettle_36.csv', 'none_53.csv', 'desklamp_2.csv', 'fan_65.csv', 'monitor_93.csv', 'fan_63.csv', 'cell_55.csv', 'monitor_92.csv', 'fan_68.csv', 'cell_15.csv', 'none_40.csv', 'sadlamp_40.csv', 'laptop_57.csv', 'fan_87.csv', 'desklamp_92.csv', 'monitor_43.csv', 'monitor_52.csv', 'monitor_14.csv', 'monitor_81.csv', 'kettle_49.csv', 'sadlamp_95.csv', 'sadlamp_54.csv', 'kettle_92.csv', 'desklamp_91.csv', 'laptop_24.csv', 'fan_49.csv', 'monitor_10.csv', 'monitor_70.csv', 'cell_13.csv', 'monitor_12.csv', 'kettle_13.csv', 'desklamp_79.csv', 'desklamp_64.csv', 'cell_45.csv', 'fan_54.csv', 'cell_17.csv', 'cell_72.csv', 'laptop_62.csv', 'monitor_1.csv', 'cell_99.csv', 'cell_67.csv', 'fan_59.csv', 'cell_26.csv', 'desklamp_94.csv', 'desklamp_30.csv', 'kettle_12.csv', 'monitor_54.csv', 'kettle_9.csv', 'monitor_73.csv', 'kettle_25.csv', 'fan_21.csv', 'kettle_66.csv', 'sadlamp_18.csv', 'desklamp_44.csv', 'sadlamp_34.csv', 'fan_71.csv', 'monitor_11.csv', 'monitor_25.csv', 'cell_70.csv', 'none_22.csv', 'kettle_82.csv', 'sadlamp_71.csv', 'fan_7.csv', 'monitor_44.csv', 'cell_98.csv', 'cell_3.csv', 'kettle_58.csv', 'desklamp_78.csv', 'laptop_93.csv', 'none_19.csv', 'kettle_56.csv', 'kettle_50.csv', 'monitor_69.csv', 'cell_11.csv', 'none_29.csv', 'sadlamp_44.csv', 'none_76.csv', 'laptop_66.csv', 'none_56.csv', 'monitor_95.csv', 'desklamp_83.csv', 'laptop_34.csv', 'laptop_82.csv', 'none_45.csv', 'laptop_10.csv', 'cell_93.csv', 'kettle_93.csv', 'monitor_87.csv', 'desklamp_69.csv', 'cell_24.csv', 'sadlamp_46.csv', 'cell_62.csv', 'desklamp_61.csv', 'laptop_40.csv', 'sadlamp_32.csv', 'sadlamp_74.csv', 'sadlamp_58.csv', 'kettle_99.csv', 'sadlamp_80.csv', 'none_88.csv', 'monitor_47.csv', 'fan_98.csv', 'fan_83.csv', 'monitor_58.csv', 'fan_33.csv', 'monitor_83.csv', 'cell_44.csv', 'kettle_23.csv', 'fan_29.csv', 'none_34.csv', 'sadlamp_1.csv', 'kettle_28.csv', 'cell_88.csv', 'fan_30.csv', 'monitor_8.csv', 'none_95.csv', 'fan_5.csv', 'kettle_41.csv', 'laptop_18.csv', 'fan_77.csv', 'kettle_97.csv', 'laptop_6.csv', 'monitor_45.csv', 'desklamp_34.csv', 'desklamp_74.csv', 'desklamp_77.csv', 'sadlamp_10.csv', 'sadlamp_39.csv', 'laptop_7.csv', 'monitor_2.csv', 'sadlamp_57.csv', 'monitor_38.csv', 'fan_78.csv', 'none_78.csv', 'kettle_87.csv', 'laptop_61.csv', 'kettle_61.csv', 'none_23.csv', 'none_52.csv', 'sadlamp_21.csv', 'monitor_85.csv', 'desklamp_80.csv', 'fan_20.csv', 'desklamp_22.csv', 'sadlamp_29.csv', 'desklamp_15.csv', 'none_84.csv', 'cell_78.csv', 'monitor_84.csv', 'monitor_61.csv', 'desklamp_24.csv', 'none_51.csv', 'laptop_50.csv', 'none_74.csv', 'desklamp_12.csv', 'monitor_28.csv', 'desklamp_31.csv', 'fan_72.csv', 'monitor_62.csv', 'kettle_83.csv', 'laptop_76.csv', 'none_60.csv', 'desklamp_85.csv', 'sadlamp_96.csv', 'none_57.csv', 'kettle_11.csv', 'laptop_42.csv', 'fan_70.csv', 'laptop_46.csv', 'kettle_22.csv', 'laptop_94.csv', 'none_26.csv', 'cell_61.csv', 'kettle_81.csv', 'laptop_12.csv', 'laptop_64.csv', 'none_10.csv', 'desklamp_9.csv', 'fan_45.csv', 'monitor_65.csv', 'kettle_14.csv', 'fan_40.csv', 'monitor_56.csv', 'desklamp_19.csv', 'desklamp_32.csv', 'fan_50.csv', 'laptop_100.csv', 'kettle_21.csv', 'laptop_79.csv', 'cell_20.csv', 'sadlamp_20.csv', 'fan_43.csv', 'cell_100.csv', 'monitor_72.csv', 'laptop_44.csv', 'sadlamp_33.csv', 'kettle_94.csv', 'fan_26.csv', 'kettle_46.csv', 'laptop_47.csv', 'laptop_89.csv', 'desklamp_100.csv', 'laptop_16.csv', 'sadlamp_3.csv', 'monitor_18.csv', 'desklamp_71.csv', 'desklamp_93.csv', 'kettle_54.csv', 'fan_76.csv', 'desklamp_60.csv', 'kettle_7.csv', 'desklamp_68.csv', 'none_49.csv', 'sadlamp_45.csv', 'sadlamp_90.csv', 'sadlamp_65.csv', 'sadlamp_100.csv', 'cell_1.csv', 'cell_87.csv', 'fan_15.csv', 'sadlamp_12.csv', 'desklamp_70.csv', 'kettle_100.csv', 'laptop_43.csv', 'desklamp_84.csv', 'desklamp_88.csv', 'none_64.csv', 'laptop_30.csv', 'sadlamp_69.csv', 'laptop_67.csv', 'desklamp_46.csv', 'none_25.csv', 'desklamp_63.csv', 'cell_14.csv', 'fan_75.csv', 'desklamp_39.csv', 'desklamp_56.csv', 'fan_24.csv', 'fan_52.csv', 'sadlamp_63.csv', 'laptop_38.csv', 'laptop_68.csv', 'sadlamp_64.csv', 'none_92.csv', 'fan_48.csv', 'fan_46.csv', 'kettle_77.csv', 'monitor_20.csv', 'sadlamp_66.csv', 'sadlamp_92.csv', 'kettle_98.csv', 'laptop_97.csv', 'none_85.csv', 'fan_16.csv', 'cell_34.csv', 'none_41.csv', 'desklamp_52.csv', 'sadlamp_77.csv', 'kettle_67.csv', 'monitor_64.csv', 'none_14.csv', 'fan_27.csv', 'cell_96.csv', 'desklamp_43.csv', 'monitor_19.csv', 'none_46.csv', 'monitor_74.csv', 'fan_51.csv', 'cell_89.csv', 'sadlamp_31.csv', 'desklamp_76.csv', 'kettle_30.csv', 'fan_10.csv', 'kettle_39.csv', 'fan_23.csv', 'fan_85.csv', 'fan_95.csv', 'desklamp_1.csv', 'desklamp_37.csv', 'kettle_72.csv', 'sadlamp_30.csv', 'cell_73.csv', 'desklamp_16.csv', 'sadlamp_25.csv', 'desklamp_81.csv', 'none_80.csv', 'laptop_28.csv', 'monitor_90.csv', 'laptop_96.csv', 'sadlamp_7.csv', 'cell_74.csv', 'fan_84.csv', 'none_16.csv', 'kettle_40.csv', 'cell_23.csv', 'laptop_19.csv', 'monitor_3.csv', 'cell_2.csv', 'monitor_34.csv', 'sadlamp_8.csv', 'none_48.csv', 'sadlamp_27.csv', 'laptop_37.csv', 'none_93.csv', 'desklamp_17.csv', 'fan_32.csv', 'none_66.csv', 'sadlamp_75.csv', 'monitor_4.csv', 'none_65.csv', 'fan_18.csv', 'kettle_86.csv', 'sadlamp_53.csv', 'cell_6.csv', 'sadlamp_99.csv', 'desklamp_54.csv', 'monitor_7.csv', 'sadlamp_70.csv', 'fan_17.csv', 'fan_35.csv', 'none_67.csv', 'kettle_74.csv', 'laptop_65.csv', 'kettle_26.csv', 'laptop_78.csv', 'kettle_33.csv', 'none_72.csv', 'none_12.csv', 'sadlamp_91.csv', 'kettle_62.csv', 'sadlamp_48.csv', 'kettle_96.csv', 'fan_61.csv', 'sadlamp_5.csv', 'none_96.csv', 'kettle_16.csv', 'none_36.csv', 'none_77.csv', 'fan_69.csv', 'sadlamp_88.csv', 'fan_99.csv', 'desklamp_4.csv', 'kettle_57.csv', 'cell_92.csv']\ndataset_data shape: (800, 40, 8)\n"
],
[
"#--------------------PREPROCESSING & LABEL ENCODING--------------------\n\nmin_max_scaler = preprocessing.MinMaxScaler(feature_range=(0, 1)) # define range for scale operation\n\n# Iterate through dataset and normalize each data sample\nscaled_dataset = []\nfor i,sample in enumerate(dataset_data_np):\n app_arr_normalized = min_max_scaler.fit_transform(np.array(sample)) #must scale when in 2D array form\n scaled_dataset.append(app_arr_normalized)\n \n# Convert normalized dataset to numpy\nscaled_dataset = np.array(scaled_dataset)\nprint(\"scaled_dataset.shape: \",scaled_dataset.shape)\n\n# Create label encoding for converting from string labels to integer\nle = preprocessing.LabelEncoder() #https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.LabelEncoder.html\n# Fit label encoding to sample labels\nle.fit(dataset_labels_str)\nkey = list(le.classes_)\n\n# Apply label encoding to labels\ndataset_labels_int = le.transform(dataset_labels_str)\nprint(dataset_labels_int[:15])\nprint(dataset_labels_str[:15])\n\n\n# Create dictionary of label encoding\nencoding = {}\nprint('key:',key)\nfor i in key:\n translate = le.transform([i])\n encoding[int(translate)] = str(i)\n\nprint(\"\\nappliance_dict = \",encoding)\n",
"scaled_dataset.shape: (800, 40, 8)\n[0 7 0 3 4 5 1 4 5 2 6 4 0 5 5]\n['cell', 'sadlamp', 'cell', 'kettle', 'laptop', 'monitor', 'desklamp', 'laptop', 'monitor', 'fan', 'none', 'laptop', 'cell', 'monitor', 'monitor']\nkey: ['cell', 'desklamp', 'fan', 'kettle', 'laptop', 'monitor', 'none', 'sadlamp']\n\nappliance_dict = {0: 'cell', 1: 'desklamp', 2: 'fan', 3: 'kettle', 4: 'laptop', 5: 'monitor', 6: 'none', 7: 'sadlamp'}\n"
],
[
"##converts to H,W,D from H,W because tensorflow works best with image formats (R,G,B)\ndef make3D(array,n): #adds channels to array to simulate an image\n reshape = np.stack((array,)*n, axis=-1)\n# print(\"array.shape\",array.shape,\"reshape.shape\",reshape.shape)\n return (np.stack((array,)*n, axis=-1))\n\n# Transform samples to be 3D for use in NN models\ndataset3D = []\nfor i,sample in enumerate(scaled_dataset):\n dim3 = make3D(np.array(sample),3) #must scale when in 2D array form\n dataset3D.append(dim3)\n \ndataset3D = np.array(dataset3D)\nprint('dataset before reshaping:',scaled_dataset.shape,'...reshaped to 3d array:',dataset3D.shape,'...individual array: ',dataset3D[0].shape)",
"dataset before reshaping: (800, 40, 8) ...reshaped to 3d array: (800, 40, 8, 3) ...individual array: (40, 8, 3)\n"
],
[
"# # -- Split data into training and test subsets\ndata_train, data_test, labels_train, labels_test = train_test_split(dataset3D, dataset_labels_int, test_size=0.30)#, random_state=42) #initialize random_state to get same result every time\nprint('Train data shape:',data_train.shape,'...# of train labels:',len(labels_train),'... unique train labels:',set(labels_train))\nprint('Test data shape:',data_test.shape,'...# of test labels:',len(labels_test),'... unique test labels:',set(labels_test))\n\nnum_labels = len(set(labels_train))\nprint(num_labels,'unique labels created')\n",
"Train data shape: (560, 40, 8, 3) ...# of train labels: 560 ... unique train labels: {0, 1, 2, 3, 4, 5, 6, 7}\nTest data shape: (240, 40, 8, 3) ...# of test labels: 240 ... unique test labels: {0, 1, 2, 3, 4, 5, 6, 7}\n8 unique labels created\n"
],
[
"# Convert data to float32 datatype and labels to int64 datatype.\ntrain_data = tf.cast(data_train, tf.float32)\ntrain_labels = tf.cast(labels_train, tf.int64)\ntest_data = tf.cast(data_test, tf.float32)\ntest_labels = tf.cast(labels_test, tf.int64)\n\n# ready to create tensorflow dataset\ntrain_dataset = tf.data.Dataset.from_tensor_slices((train_data, train_labels))\ntest_dataset = tf.data.Dataset.from_tensor_slices((test_data, test_labels))\n\n# shuffle our training data and batch it so its more efficient.\ntrain_dataset = train_dataset.shuffle(20).batch(50)\ntest_dataset = test_dataset.shuffle(20).batch(50)\n\n# # # -- Reshape train data for use in model\n# # nsamples, nx, ny,_ = data_train.shape\n# # train_data_2d = data_train.reshape((nsamples,nx*ny))\n\n# # # -- Reshape test data for use in model\n# # nsamples, nx, ny,_ = data_test.shape\n# # test_data_2d = data_test.reshape((nsamples,nx*ny))",
"_____no_output_____"
]
],
[
[
"------------------------------------------------------------------\n# BASELINE NEURAL NETWORK MODEL \n-----------------------------------------------------------------",
"_____no_output_____"
]
],
[
[
"model = tf.keras.Sequential() \nmodel.add(tf.keras.layers.Flatten(input_shape=[40,8,3]))\nmodel.add(tf.keras.layers.Dense(num_labels*16, activation='relu'))\nmodel.add(tf.keras.layers.Dense(num_labels, activation='softmax'))\n\nmodel.compile(optimizer='adam',loss='sparse_categorical_crossentropy', metrics=['accuracy','sparse_categorical_crossentropy'])\n\nmodel.summary()\nprint(\"\\n\\n\")\n# Train the model, keras does not need datasets to function, can just take raw numpy \nhistory = model.fit(data_train, \n labels_train, \n epochs=20,\n batch_size=10,\n validation_data=(data_test,labels_test),\n callbacks=[cp_callback])",
"WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/resource_variable_ops.py:435: colocate_with (from tensorflow.python.framework.ops) is deprecated and will be removed in a future version.\nInstructions for updating:\nColocations handled automatically by placer.\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\nflatten (Flatten) (None, 960) 0 \n_________________________________________________________________\ndense (Dense) (None, 128) 123008 \n_________________________________________________________________\ndense_1 (Dense) (None, 8) 1032 \n=================================================================\nTotal params: 124,040\nTrainable params: 124,040\nNon-trainable params: 0\n_________________________________________________________________\n\n\n\nTrain on 560 samples, validate on 240 samples\nEpoch 1/20\n560/560 [==============================] - 1s 2ms/sample - loss: 1.0244 - acc: 0.7036 - sparse_categorical_crossentropy: 1.0244 - val_loss: 0.4520 - val_acc: 0.8708 - val_sparse_categorical_crossentropy: 0.4520\nEpoch 2/20\n560/560 [==============================] - 0s 695us/sample - loss: 0.3123 - acc: 0.9125 - sparse_categorical_crossentropy: 0.3123 - val_loss: 0.2410 - val_acc: 0.9708 - val_sparse_categorical_crossentropy: 0.2410\nEpoch 3/20\n560/560 [==============================] - 0s 640us/sample - loss: 0.2254 - acc: 0.9232 - sparse_categorical_crossentropy: 0.2254 - val_loss: 0.2151 - val_acc: 0.8875 - val_sparse_categorical_crossentropy: 0.2151\nEpoch 4/20\n560/560 [==============================] - 0s 654us/sample - loss: 0.1656 - acc: 0.9393 - sparse_categorical_crossentropy: 0.1656 - val_loss: 0.1720 - val_acc: 0.9833 - val_sparse_categorical_crossentropy: 0.1720\nEpoch 5/20\n560/560 [==============================] - 0s 686us/sample - loss: 0.1298 - acc: 0.9732 - sparse_categorical_crossentropy: 0.1298 - val_loss: 0.1325 - val_acc: 0.9833 - val_sparse_categorical_crossentropy: 0.1325\nEpoch 6/20\n560/560 [==============================] - 0s 652us/sample - loss: 0.1071 - acc: 0.9679 - sparse_categorical_crossentropy: 0.1071 - val_loss: 0.1304 - val_acc: 0.9583 - val_sparse_categorical_crossentropy: 0.1304\nEpoch 7/20\n560/560 [==============================] - 0s 678us/sample - loss: 0.0879 - acc: 0.9804 - sparse_categorical_crossentropy: 0.0879 - val_loss: 0.1004 - val_acc: 0.9875 - val_sparse_categorical_crossentropy: 0.1004\nEpoch 8/20\n560/560 [==============================] - 0s 632us/sample - loss: 0.1470 - acc: 0.9411 - sparse_categorical_crossentropy: 0.1470 - val_loss: 0.1281 - val_acc: 0.9542 - val_sparse_categorical_crossentropy: 0.1281\nEpoch 9/20\n560/560 [==============================] - 0s 618us/sample - loss: 0.0812 - acc: 0.9714 - sparse_categorical_crossentropy: 0.0812 - val_loss: 0.0976 - val_acc: 0.9833 - val_sparse_categorical_crossentropy: 0.0976\nEpoch 10/20\n560/560 [==============================] - 0s 658us/sample - loss: 0.0560 - acc: 0.9964 - sparse_categorical_crossentropy: 0.0560 - val_loss: 0.0706 - val_acc: 0.9917 - val_sparse_categorical_crossentropy: 0.0706\nEpoch 11/20\n560/560 [==============================] - 0s 603us/sample - loss: 0.0450 - acc: 0.9964 - sparse_categorical_crossentropy: 0.0450 - val_loss: 0.0741 - val_acc: 0.9875 - val_sparse_categorical_crossentropy: 0.0741\nEpoch 12/20\n560/560 [==============================] - 0s 606us/sample - loss: 0.0532 - acc: 0.9857 - sparse_categorical_crossentropy: 0.0532 - val_loss: 0.0855 - val_acc: 0.9708 - val_sparse_categorical_crossentropy: 0.0855\nEpoch 13/20\n560/560 [==============================] - 0s 630us/sample - loss: 0.0336 - acc: 0.9982 - sparse_categorical_crossentropy: 0.0336 - val_loss: 0.0600 - val_acc: 0.9875 - val_sparse_categorical_crossentropy: 0.0600\nEpoch 14/20\n560/560 [==============================] - 0s 608us/sample - loss: 0.0289 - acc: 0.9964 - sparse_categorical_crossentropy: 0.0289 - val_loss: 0.0485 - val_acc: 0.9917 - val_sparse_categorical_crossentropy: 0.0485\nEpoch 15/20\n560/560 [==============================] - 0s 628us/sample - loss: 0.0252 - acc: 0.9982 - sparse_categorical_crossentropy: 0.0252 - val_loss: 0.0823 - val_acc: 0.9625 - val_sparse_categorical_crossentropy: 0.0823\nEpoch 16/20\n560/560 [==============================] - 0s 627us/sample - loss: 0.0205 - acc: 0.9964 - sparse_categorical_crossentropy: 0.0205 - val_loss: 0.0611 - val_acc: 0.9875 - val_sparse_categorical_crossentropy: 0.0611\nEpoch 17/20\n560/560 [==============================] - 0s 597us/sample - loss: 0.0197 - acc: 0.9982 - sparse_categorical_crossentropy: 0.0197 - val_loss: 0.0602 - val_acc: 0.9833 - val_sparse_categorical_crossentropy: 0.0602\nEpoch 18/20\n560/560 [==============================] - 0s 612us/sample - loss: 0.0227 - acc: 0.9982 - sparse_categorical_crossentropy: 0.0227 - val_loss: 0.0373 - val_acc: 0.9917 - val_sparse_categorical_crossentropy: 0.0373\nEpoch 19/20\n560/560 [==============================] - 0s 668us/sample - loss: 0.0182 - acc: 1.0000 - sparse_categorical_crossentropy: 0.0182 - val_loss: 0.0457 - val_acc: 0.9875 - val_sparse_categorical_crossentropy: 0.0457\nEpoch 20/20\n560/560 [==============================] - 0s 602us/sample - loss: 0.0193 - acc: 0.9982 - sparse_categorical_crossentropy: 0.0193 - val_loss: 0.0332 - val_acc: 0.9917 - val_sparse_categorical_crossentropy: 0.0332\n"
]
],
[
[
"------------------------------------------------------------------\n# DATA AUGMENTED MODEL\n-----------------------------------------------------------------",
"_____no_output_____"
]
],
[
[
"from random import randint\nimport cv2\n\ndef warp_shrink(feat):\n \"\"\"\n Takes in a feature in the form of a numpy array, takes a random sample from the array and shrinks it by half.\n The size of the random sample is equal to 1/10 of the size of the source array. Replaces sample with pair-wise average of sample indices.\n Approach inspired from this paper: https://aaltd16.irisa.fr/files/2016/08/AALTD16_paper_9.pdf\n Parameters: feat - feature to be warped in form of numpy array\n Returns: Feature array with sample shrank\n \"\"\"\n sample_range = feat.shape[0]\n\n #Calculate length of sample\n sample_tenth = int(int(sample_range)*.1)\n\n #Shift sample length to be even to allow for pair-wise averages\n if sample_tenth % 2 == 1:\n sample_tenth += 1\n\n # Generate random index for start of sample\n sample_index = randint(0, sample_range - (sample_tenth))\n\n #Shift sample index to be even to allow for pair-wise averages\n if sample_index % 2 == 1:\n sample_index += 1\n\n #Subset feature array into sample\n sample = feat[sample_index:int(sample_index+sample_tenth)]\n\n avg_arr = []\n for i in range(0, sample_tenth, 2): #iterate thru pairs of elements in sample, calculating average\n avg = (sample[i]+sample[i+1])/2\n avg_arr.append(avg)\n\n avg_arr = np.array(avg_arr)\n\n # Replace sample with shrunk average of samples\n feat_before = feat[:sample_index]\n feat_after = feat[int(sample_index+sample_tenth):]\n feat_transformed = np.concatenate([feat_before,avg_arr,feat_after])\n\n return feat_transformed \n \ndef warp_stretch(feat):\n \"\"\"\n Takes in a feature in the form of a numpy array, takes a random sample from the array and shrinks it by half.\n The size of the random sample is equal to 1/10 of the size of the source array. Appends pair-wise average of every other sample index.\n Approach inspired from this paper: https://aaltd16.irisa.fr/files/2016/08/AALTD16_paper_9.pdf\n Parameters: feat - feature to be warped in form of numpy array\n Returns: Feature array with sample stretched\n \"\"\"\n\n sample_range = feat.shape[0]\n\n # Calculate length of sample\n sample_tenth = int(int(sample_range)*.1)\n\n # Shift sample length to be even to allow for pair-wise averages\n if sample_tenth % 2 == 1:\n sample_tenth += 1\n\n # Generate random index for start of sample\n sample_index = randint(0, sample_range - (sample_tenth))\n\n # Shift sample index to be even to allow for pair-wise averages\n if sample_index % 2 == 1:\n sample_index += 1\n\n # Subset feature array into sample\n sample = feat[sample_index:int(sample_index + sample_tenth)]\n\n stretch_arr = []\n for i in range(0, sample_tenth,2): #Iterate thru each pair of elements in the sample\n if (i > 0 & i <= sample_tenth-1): #Get average of previous index and current index\n avg_prev = (sample[i-1]+sample[i])/2\n stretch_arr.append(avg_prev)\n avg = (sample[i]+sample[i+1])/2 #Get average of current index\n stretch_arr.append(sample[i])\n stretch_arr.append(avg)\n stretch_arr.append(sample[i+1])\n\n stretch_arr = np.array(stretch_arr)\n\n #Replace sample with stretched sample\n feat_before = feat[:sample_index]\n feat_after = feat[int(sample_index+sample_tenth):]\n feat_transformed = np.concatenate([feat_before,stretch_arr,feat_after])\n\n return feat_transformed \n\n\ndef warp_features(observation):\n \"\"\"\"\n Applies warp shrink/stretch at random to numpy array of feature arrays.\n Parameters: observation - numpy array of arrays\n Returns: warped numpy array of arrays\n \"\"\"\n new_obs = []\n observation_reshape,g,r = cv2.split(observation) #splitting into single channel and back again\n\n for feature in range(0,observation_reshape.shape[0]): #Iterates thru each feature array \n observation_feature = observation_reshape[feature]\n if (randint(0,1) > 0):\n warped_feature = warp_stretch(observation_feature) # applies stretch randomly\n else:\n warped_feature = warp_shrink(observation_feature) # else applies shrink\n new_obs.append(warped_feature)\n\n np_array = np.asarray(new_obs)\n\n for i,j in enumerate(np_array):\n diff_list = []\n for k,l in enumerate(j):\n diff_list.append(l-observation_reshape[i][k])\n array_3D = make3D(np_array,3)\n\n return array_3D \n\n#test the warp/shrink outputs the right shapes\nwarped_data = warp_features(data_train[0])\nprint(\"observation.shape\",data_train[0].shape, \"warped_data.shape\",warped_data.shape)\n",
"observation.shape (40, 8, 3) warped_data.shape (40, 8, 3)\n"
],
[
"# #DATA AUGMENTATION\n\n# preprocessing_function: function that will be implied on each input. \n# The function will run after the image is resized and augmented. \n# The function should take one argument: one image (Numpy tensor with rank 3), \n# and should output a Numpy tensor with the same shape.\n\ndef apply_noise(array):#preprocessing_function...apply gaussian noise\n return array #+ noise\n \n# train_datagen = tf.keras.preprocessing.image.ImageDataGenerator(preprocessing_function=apply_noise)\ntrain_datagen = tf.keras.preprocessing.image.ImageDataGenerator(preprocessing_function=warp_features)\ntest_datagen = tf.keras.preprocessing.image.ImageDataGenerator()\n\nlabels_train_np_1d = make3D(np.array(labels_train),1) #must have an additional channel for generator function\nlabels_test_np_1d = make3D(np.array(labels_test),1) #must have an additional channel for generator function\n\nprint('train:',data_train.shape,type(data_train),'labels:',labels_train_np_1d.shape,type(labels_train_np_1d))\nprint('test:',data_test.shape,type(data_test),'labels:',labels_test_np_1d.shape,type(labels_test_np_1d))\n\ntrain_generator = train_datagen.flow(\n# train_data, #tensor w/ added axis #TypeError: unsupported operand type(s) for /: 'Dimension' and 'int'\n# train_dataset, #(Tensor dataset input): TypeError: object of type 'DatasetV1Adapter' has no len()\n# data_train, #(NP input) ValueError: ('Input data in `NumpyArrayIterator` should have rank 4. You passed an array with shape', (144, 50, 8))\n data_train, #(NP input) ValueError: ('Input data in `NumpyArrayIterator` should have rank 4. You passed an array with shape', (144, 50, 8))\n labels_train_np_1d,\n# train_labels,\n# dataset_data_np, #ValueError: ('Input data in `NumpyArrayIterator` should have rank 4. You passed an array with shape', (161, 50, 8))\n# dataset_labels_int,\n batch_size=10)\n\ntest_generator = test_datagen.flow(\n data_test, #(NP input required)\n labels_test_np_1d,\n batch_size=10)",
"train: (560, 40, 8, 3) <class 'numpy.ndarray'> labels: (560, 1) <class 'numpy.ndarray'>\ntest: (240, 40, 8, 3) <class 'numpy.ndarray'> labels: (240, 1) <class 'numpy.ndarray'>\n"
],
[
"aug_model = tf.keras.Sequential() # Use keras sequential layers to build up a model.\n# aug_model.add(tf.keras.layers.Flatten(input_shape=[50,8]))\naug_model.add(tf.keras.layers.Flatten(input_shape=[40,8,3]))\naug_model.add(tf.keras.layers.Dense(num_labels*16, activation='relu'))\naug_model.add(tf.keras.layers.Dense(num_labels, activation='softmax'))\n\naug_model.compile(optimizer='adam',loss='sparse_categorical_crossentropy', metrics=['accuracy','sparse_categorical_crossentropy'])\naug_model.summary()\n\nprint('--------------------------SHAPES--------------------------')\nprint(\"\\n\\n\")\nprint('train:',data_train.shape,type(data_train),'labels:',labels_train_np_1d.shape,type(labels_train_np_1d))\nprint('test:',data_test.shape,type(data_test),'labels:',labels_test_np_1d.shape,type(labels_test_np_1d))\nprint(\"\\n\\n\")\n\naug_history = aug_model.fit_generator(train_generator, validation_data=test_generator, epochs=20)",
"_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\nflatten_1 (Flatten) (None, 960) 0 \n_________________________________________________________________\ndense_2 (Dense) (None, 128) 123008 \n_________________________________________________________________\ndense_3 (Dense) (None, 8) 1032 \n=================================================================\nTotal params: 124,040\nTrainable params: 124,040\nNon-trainable params: 0\n_________________________________________________________________\n--------------------------SHAPES--------------------------\n\n\n\ntrain: (560, 40, 8, 3) <class 'numpy.ndarray'> labels: (560, 1) <class 'numpy.ndarray'>\ntest: (240, 40, 8, 3) <class 'numpy.ndarray'> labels: (240, 1) <class 'numpy.ndarray'>\n\n\n\nEpoch 1/20\n24/24 [==============================] - 0s 8ms/step - loss: 0.4104 - acc: 0.9625 - sparse_categorical_crossentropy: 0.4104\n56/56 [==============================] - 1s 20ms/step - loss: 1.0319 - acc: 0.6732 - sparse_categorical_crossentropy: 1.0319 - val_loss: 0.4104 - val_acc: 0.9625 - val_sparse_categorical_crossentropy: 0.4104\nEpoch 2/20\n24/24 [==============================] - 0s 4ms/step - loss: 0.6035 - acc: 0.8458 - sparse_categorical_crossentropy: 0.6035\n56/56 [==============================] - 1s 16ms/step - loss: 0.3209 - acc: 0.9071 - sparse_categorical_crossentropy: 0.3209 - val_loss: 0.6035 - val_acc: 0.8458 - val_sparse_categorical_crossentropy: 0.6035\nEpoch 3/20\n24/24 [==============================] - 0s 3ms/step - loss: 0.1953 - acc: 0.9250 - sparse_categorical_crossentropy: 0.1953\n56/56 [==============================] - 1s 17ms/step - loss: 0.2338 - acc: 0.9089 - sparse_categorical_crossentropy: 0.2338 - val_loss: 0.1953 - val_acc: 0.9250 - val_sparse_categorical_crossentropy: 0.1953\nEpoch 4/20\n24/24 [==============================] - 0s 4ms/step - loss: 0.2682 - acc: 0.8750 - sparse_categorical_crossentropy: 0.2682\n56/56 [==============================] - 1s 16ms/step - loss: 0.2061 - acc: 0.9268 - sparse_categorical_crossentropy: 0.2061 - val_loss: 0.2682 - val_acc: 0.8750 - val_sparse_categorical_crossentropy: 0.2682\nEpoch 5/20\n24/24 [==============================] - 0s 3ms/step - loss: 0.1497 - acc: 0.9833 - sparse_categorical_crossentropy: 0.1497\n56/56 [==============================] - 1s 17ms/step - loss: 0.1481 - acc: 0.9518 - sparse_categorical_crossentropy: 0.1481 - val_loss: 0.1497 - val_acc: 0.9833 - val_sparse_categorical_crossentropy: 0.1497\nEpoch 6/20\n24/24 [==============================] - 0s 4ms/step - loss: 0.1465 - acc: 0.9333 - sparse_categorical_crossentropy: 0.1465\n56/56 [==============================] - 1s 16ms/step - loss: 0.1232 - acc: 0.9607 - sparse_categorical_crossentropy: 0.1232 - val_loss: 0.1465 - val_acc: 0.9333 - val_sparse_categorical_crossentropy: 0.1465\nEpoch 7/20\n24/24 [==============================] - 0s 4ms/step - loss: 0.1012 - acc: 0.9875 - sparse_categorical_crossentropy: 0.1012\n56/56 [==============================] - 1s 16ms/step - loss: 0.0848 - acc: 0.9929 - sparse_categorical_crossentropy: 0.0848 - val_loss: 0.1012 - val_acc: 0.9875 - val_sparse_categorical_crossentropy: 0.1012\nEpoch 8/20\n24/24 [==============================] - 0s 4ms/step - loss: 0.0916 - acc: 0.9792 - sparse_categorical_crossentropy: 0.0916\n56/56 [==============================] - 1s 16ms/step - loss: 0.0776 - acc: 0.9839 - sparse_categorical_crossentropy: 0.0776 - val_loss: 0.0916 - val_acc: 0.9792 - val_sparse_categorical_crossentropy: 0.0916\nEpoch 9/20\n24/24 [==============================] - 0s 5ms/step - loss: 0.1520 - acc: 0.9167 - sparse_categorical_crossentropy: 0.1520\n56/56 [==============================] - 1s 16ms/step - loss: 0.1069 - acc: 0.9446 - sparse_categorical_crossentropy: 0.1069 - val_loss: 0.1520 - val_acc: 0.9167 - val_sparse_categorical_crossentropy: 0.1520\nEpoch 10/20\n24/24 [==============================] - 0s 4ms/step - loss: 0.1010 - acc: 0.9708 - sparse_categorical_crossentropy: 0.1010\n56/56 [==============================] - 1s 16ms/step - loss: 0.0702 - acc: 0.9768 - sparse_categorical_crossentropy: 0.0702 - val_loss: 0.1010 - val_acc: 0.9708 - val_sparse_categorical_crossentropy: 0.1010\nEpoch 11/20\n24/24 [==============================] - 0s 4ms/step - loss: 0.0803 - acc: 0.9750 - sparse_categorical_crossentropy: 0.0803\n56/56 [==============================] - 1s 16ms/step - loss: 0.0524 - acc: 0.9839 - sparse_categorical_crossentropy: 0.0524 - val_loss: 0.0803 - val_acc: 0.9750 - val_sparse_categorical_crossentropy: 0.0803\nEpoch 12/20\n24/24 [==============================] - 0s 3ms/step - loss: 0.0681 - acc: 0.9875 - sparse_categorical_crossentropy: 0.0681\n56/56 [==============================] - 1s 16ms/step - loss: 0.0397 - acc: 0.9946 - sparse_categorical_crossentropy: 0.0397 - val_loss: 0.0681 - val_acc: 0.9875 - val_sparse_categorical_crossentropy: 0.0681\nEpoch 13/20\n24/24 [==============================] - 0s 4ms/step - loss: 0.0928 - acc: 0.9625 - sparse_categorical_crossentropy: 0.0928\n56/56 [==============================] - 1s 16ms/step - loss: 0.0386 - acc: 0.9946 - sparse_categorical_crossentropy: 0.0386 - val_loss: 0.0928 - val_acc: 0.9625 - val_sparse_categorical_crossentropy: 0.0928\nEpoch 14/20\n24/24 [==============================] - 0s 4ms/step - loss: 0.1972 - acc: 0.8958 - sparse_categorical_crossentropy: 0.1972\n56/56 [==============================] - 1s 17ms/step - loss: 0.0510 - acc: 0.9786 - sparse_categorical_crossentropy: 0.0510 - val_loss: 0.1972 - val_acc: 0.8958 - val_sparse_categorical_crossentropy: 0.1972\nEpoch 15/20\n24/24 [==============================] - 0s 4ms/step - loss: 0.0550 - acc: 0.9833 - sparse_categorical_crossentropy: 0.0550\n56/56 [==============================] - 1s 16ms/step - loss: 0.0449 - acc: 0.9821 - sparse_categorical_crossentropy: 0.0449 - val_loss: 0.0550 - val_acc: 0.9833 - val_sparse_categorical_crossentropy: 0.0550\nEpoch 16/20\n24/24 [==============================] - 0s 4ms/step - loss: 0.0523 - acc: 0.9875 - sparse_categorical_crossentropy: 0.0523\n56/56 [==============================] - 1s 16ms/step - loss: 0.0244 - acc: 1.0000 - sparse_categorical_crossentropy: 0.0244 - val_loss: 0.0523 - val_acc: 0.9875 - val_sparse_categorical_crossentropy: 0.0523\nEpoch 17/20\n24/24 [==============================] - 0s 4ms/step - loss: 0.0479 - acc: 0.9875 - sparse_categorical_crossentropy: 0.0479\n56/56 [==============================] - 1s 17ms/step - loss: 0.0246 - acc: 0.9982 - sparse_categorical_crossentropy: 0.0246 - val_loss: 0.0479 - val_acc: 0.9875 - val_sparse_categorical_crossentropy: 0.0479\nEpoch 18/20\n24/24 [==============================] - 0s 3ms/step - loss: 0.0472 - acc: 0.9875 - sparse_categorical_crossentropy: 0.0472\n56/56 [==============================] - 1s 16ms/step - loss: 0.0228 - acc: 0.9982 - sparse_categorical_crossentropy: 0.0228 - val_loss: 0.0472 - val_acc: 0.9875 - val_sparse_categorical_crossentropy: 0.0472\nEpoch 19/20\n24/24 [==============================] - 0s 4ms/step - loss: 0.0479 - acc: 0.9875 - sparse_categorical_crossentropy: 0.0479\n56/56 [==============================] - 1s 17ms/step - loss: 0.0164 - acc: 0.9982 - sparse_categorical_crossentropy: 0.0164 - val_loss: 0.0479 - val_acc: 0.9875 - val_sparse_categorical_crossentropy: 0.0479\nEpoch 20/20\n24/24 [==============================] - 0s 3ms/step - loss: 0.0379 - acc: 0.9917 - sparse_categorical_crossentropy: 0.0379\n56/56 [==============================] - 1s 16ms/step - loss: 0.0277 - acc: 0.9911 - sparse_categorical_crossentropy: 0.0277 - val_loss: 0.0379 - val_acc: 0.9917 - val_sparse_categorical_crossentropy: 0.0379\n"
]
],
[
[
"------------------------------------------------------------------\n# MORE COMPLEX MODEL\n-----------------------------------------------------------------",
"_____no_output_____"
]
],
[
[
"complex_model = tf.keras.Sequential()\ncomplex_model.add(tf.keras.layers.Conv2D(filters=64, kernel_size=2, padding='same', activation='relu', input_shape=[40,8,3]))\ncomplex_model.add(tf.keras.layers.BatchNormalization())\ncomplex_model.add(tf.keras.layers.MaxPooling2D(pool_size=2))\ncomplex_model.add(tf.keras.layers.Dropout(0.3))\ncomplex_model.add(tf.keras.layers.Conv2D(filters=32, kernel_size=2, padding='same', activation='relu'))\ncomplex_model.add(tf.keras.layers.BatchNormalization())\ncomplex_model.add(tf.keras.layers.MaxPooling2D(pool_size=2))\ncomplex_model.add(tf.keras.layers.Dropout(0.3))\ncomplex_model.add(tf.keras.layers.Flatten())\ncomplex_model.add(tf.keras.layers.Dense(64, activation='relu'))\ncomplex_model.add(tf.keras.layers.BatchNormalization())\ncomplex_model.add(tf.keras.layers.Dropout(0.5))\ncomplex_model.add(tf.keras.layers.Dense(num_labels, activation='softmax'))\ncomplex_model.add(tf.keras.layers.BatchNormalization())\ncomplex_model.add(tf.keras.layers.Activation('softmax'))\n\ncomplex_model.summary()\nprint(\"\\n\\n\")\n\ncomplex_model.compile(optimizer='adam',loss='sparse_categorical_crossentropy',metrics=['accuracy','sparse_categorical_crossentropy'])\n\ncomplex_history = model.fit(data_train, labels_train, epochs=20,batch_size=10,validation_data=(data_test,labels_test))\n",
"WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/layers/core.py:143: calling dropout (from tensorflow.python.ops.nn_ops) with keep_prob is deprecated and will be removed in a future version.\nInstructions for updating:\nPlease use `rate` instead of `keep_prob`. Rate should be set to `rate = 1 - keep_prob`.\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\nconv2d (Conv2D) (None, 40, 8, 64) 832 \n_________________________________________________________________\nbatch_normalization_v1 (Batc (None, 40, 8, 64) 256 \n_________________________________________________________________\nmax_pooling2d (MaxPooling2D) (None, 20, 4, 64) 0 \n_________________________________________________________________\ndropout (Dropout) (None, 20, 4, 64) 0 \n_________________________________________________________________\nconv2d_1 (Conv2D) (None, 20, 4, 32) 8224 \n_________________________________________________________________\nbatch_normalization_v1_1 (Ba (None, 20, 4, 32) 128 \n_________________________________________________________________\nmax_pooling2d_1 (MaxPooling2 (None, 10, 2, 32) 0 \n_________________________________________________________________\ndropout_1 (Dropout) (None, 10, 2, 32) 0 \n_________________________________________________________________\nflatten_2 (Flatten) (None, 640) 0 \n_________________________________________________________________\ndense_4 (Dense) (None, 64) 41024 \n_________________________________________________________________\nbatch_normalization_v1_2 (Ba (None, 64) 256 \n_________________________________________________________________\ndropout_2 (Dropout) (None, 64) 0 \n_________________________________________________________________\ndense_5 (Dense) (None, 8) 520 \n_________________________________________________________________\nbatch_normalization_v1_3 (Ba (None, 8) 32 \n_________________________________________________________________\nactivation (Activation) (None, 8) 0 \n=================================================================\nTotal params: 51,272\nTrainable params: 50,936\nNon-trainable params: 336\n_________________________________________________________________\n\n\n\nTrain on 560 samples, validate on 240 samples\nEpoch 1/20\n560/560 [==============================] - 0s 597us/sample - loss: 0.0136 - acc: 0.9982 - sparse_categorical_crossentropy: 0.0136 - val_loss: 0.0341 - val_acc: 0.9917 - val_sparse_categorical_crossentropy: 0.0341\nEpoch 2/20\n560/560 [==============================] - 0s 550us/sample - loss: 0.0702 - acc: 0.9643 - sparse_categorical_crossentropy: 0.0702 - val_loss: 0.0447 - val_acc: 0.9875 - val_sparse_categorical_crossentropy: 0.0447\nEpoch 3/20\n560/560 [==============================] - 0s 549us/sample - loss: 0.0135 - acc: 1.0000 - sparse_categorical_crossentropy: 0.0135 - val_loss: 0.0264 - val_acc: 0.9917 - val_sparse_categorical_crossentropy: 0.0264\nEpoch 4/20\n560/560 [==============================] - 0s 556us/sample - loss: 0.0108 - acc: 1.0000 - sparse_categorical_crossentropy: 0.0108 - val_loss: 0.0379 - val_acc: 0.9875 - val_sparse_categorical_crossentropy: 0.0379\nEpoch 5/20\n560/560 [==============================] - 0s 573us/sample - loss: 0.0088 - acc: 1.0000 - sparse_categorical_crossentropy: 0.0088 - val_loss: 0.0283 - val_acc: 0.9917 - val_sparse_categorical_crossentropy: 0.0283\nEpoch 6/20\n560/560 [==============================] - 0s 569us/sample - loss: 0.0079 - acc: 1.0000 - sparse_categorical_crossentropy: 0.0079 - val_loss: 0.0239 - val_acc: 0.9917 - val_sparse_categorical_crossentropy: 0.0239\nEpoch 7/20\n560/560 [==============================] - 0s 561us/sample - loss: 0.0130 - acc: 0.9982 - sparse_categorical_crossentropy: 0.0130 - val_loss: 0.0257 - val_acc: 0.9958 - val_sparse_categorical_crossentropy: 0.0257\nEpoch 8/20\n560/560 [==============================] - 0s 570us/sample - loss: 0.0050 - acc: 1.0000 - sparse_categorical_crossentropy: 0.0050 - val_loss: 0.0332 - val_acc: 0.9917 - val_sparse_categorical_crossentropy: 0.0332\nEpoch 9/20\n560/560 [==============================] - 0s 546us/sample - loss: 0.0054 - acc: 1.0000 - sparse_categorical_crossentropy: 0.0054 - val_loss: 0.0214 - val_acc: 0.9917 - val_sparse_categorical_crossentropy: 0.0214\nEpoch 10/20\n560/560 [==============================] - 0s 572us/sample - loss: 0.0051 - acc: 1.0000 - sparse_categorical_crossentropy: 0.0051 - val_loss: 0.0230 - val_acc: 0.9917 - val_sparse_categorical_crossentropy: 0.0230\nEpoch 11/20\n560/560 [==============================] - 0s 581us/sample - loss: 0.0045 - acc: 1.0000 - sparse_categorical_crossentropy: 0.0045 - val_loss: 0.0243 - val_acc: 0.9917 - val_sparse_categorical_crossentropy: 0.0243\nEpoch 12/20\n560/560 [==============================] - 0s 566us/sample - loss: 0.0055 - acc: 1.0000 - sparse_categorical_crossentropy: 0.0055 - val_loss: 0.0214 - val_acc: 0.9917 - val_sparse_categorical_crossentropy: 0.0214\nEpoch 13/20\n560/560 [==============================] - 0s 548us/sample - loss: 0.0054 - acc: 1.0000 - sparse_categorical_crossentropy: 0.0054 - val_loss: 0.0212 - val_acc: 0.9917 - val_sparse_categorical_crossentropy: 0.0212\nEpoch 14/20\n560/560 [==============================] - 0s 593us/sample - loss: 0.0035 - acc: 1.0000 - sparse_categorical_crossentropy: 0.0035 - val_loss: 0.0197 - val_acc: 0.9917 - val_sparse_categorical_crossentropy: 0.0197\nEpoch 15/20\n560/560 [==============================] - 0s 559us/sample - loss: 0.0045 - acc: 1.0000 - sparse_categorical_crossentropy: 0.0045 - val_loss: 0.0227 - val_acc: 0.9917 - val_sparse_categorical_crossentropy: 0.0227\nEpoch 16/20\n560/560 [==============================] - 0s 555us/sample - loss: 0.0031 - acc: 1.0000 - sparse_categorical_crossentropy: 0.0031 - val_loss: 0.0267 - val_acc: 0.9958 - val_sparse_categorical_crossentropy: 0.0267\nEpoch 17/20\n560/560 [==============================] - 0s 594us/sample - loss: 0.0035 - acc: 1.0000 - sparse_categorical_crossentropy: 0.0035 - val_loss: 0.0179 - val_acc: 0.9958 - val_sparse_categorical_crossentropy: 0.0179\nEpoch 18/20\n560/560 [==============================] - 0s 546us/sample - loss: 0.0045 - acc: 1.0000 - sparse_categorical_crossentropy: 0.0045 - val_loss: 0.0190 - val_acc: 0.9958 - val_sparse_categorical_crossentropy: 0.0190\nEpoch 19/20\n560/560 [==============================] - 0s 554us/sample - loss: 0.0035 - acc: 1.0000 - sparse_categorical_crossentropy: 0.0035 - val_loss: 0.0292 - val_acc: 0.9917 - val_sparse_categorical_crossentropy: 0.0292\nEpoch 20/20\n560/560 [==============================] - 0s 561us/sample - loss: 0.0048 - acc: 1.0000 - sparse_categorical_crossentropy: 0.0048 - val_loss: 0.0193 - val_acc: 0.9917 - val_sparse_categorical_crossentropy: 0.0193\n"
]
],
[
[
"------------------------------------------------------------------\n# MODEL COMPARISON\n-----------------------------------------------------------------",
"_____no_output_____"
]
],
[
[
"def plot_history(histories, key='sparse_categorical_crossentropy'):\n \n plt.figure(figsize=(16,10))\n \n for name, history in histories:\n val = plt.plot(history.epoch, history.history['val_'+key],'--', label=name.title()+' Val')\n plt.plot(history.epoch, history.history[key], color=val[0].get_color(),label=name.title()+' Train')\n plt.title(key)\n plt.xlabel('Epochs')\n plt.ylabel(key.replace('_',' ').title())\n plt.legend()\n\n plt.xlim([0,max(history.epoch)])",
"_____no_output_____"
],
[
"plot_history([('baseline', history),('augmented', aug_history),('complex', complex_history)], key='acc')\nplot_history([('baseline', history),('augmented', aug_history),('complex', complex_history)], key='sparse_categorical_crossentropy')\nplot_history([('baseline', history),('augmented', aug_history),('complex', complex_history)], key='loss')",
"_____no_output_____"
]
],
[
[
"## **Copy This To Your Classification Python Script:**",
"_____no_output_____"
]
],
[
[
"print('appliance_dict = ',encoding)",
"appliance_dict = {0: 'cell', 1: 'desklamp', 2: 'fan', 3: 'kettle', 4: 'laptop', 5: 'monitor', 6: 'none', 7: 'sadlamp'}\n"
]
],
[
[
"------------------------------------------------------------------\n# VALIDATION\n-----------------------------------------------------------------",
"_____no_output_____"
]
],
[
[
"#Let's see what we get \nfor i,data in enumerate(data_test[:10]): #a sample\n data = data[tf.newaxis, ...]\n guess_list = model.predict(data)\n print('Index:',i,'... Actual:',labels_test[i],encoding.get(labels_test[i]),'... Guess:',np.argmax(guess_list),encoding.get(np.argmax(guess_list)))\n print('')",
"Index: 0 ... Actual: 4 laptop ... Guess: 4 laptop\n\nIndex: 1 ... Actual: 0 cell ... Guess: 4 laptop\n\nIndex: 2 ... Actual: 0 cell ... Guess: 0 cell\n\nIndex: 3 ... Actual: 7 sadlamp ... Guess: 7 sadlamp\n\nIndex: 4 ... Actual: 7 sadlamp ... Guess: 7 sadlamp\n\nIndex: 5 ... Actual: 3 kettle ... Guess: 3 kettle\n\nIndex: 6 ... Actual: 4 laptop ... Guess: 4 laptop\n\nIndex: 7 ... Actual: 4 laptop ... Guess: 4 laptop\n\nIndex: 8 ... Actual: 7 sadlamp ... Guess: 7 sadlamp\n\nIndex: 9 ... Actual: 2 fan ... Guess: 2 fan\n\n"
],
[
"# Let’s go ahead and load the model, this would be what we do when starting a new notebook.\nmodel = tf.keras.models.load_model(checkpoint_path)\nmodel.evaluate(data_test, labels_test) # Evaluate to make sure the accuracy is preserved.",
"240/240 [==============================] - 0s 363us/sample - loss: 0.0332 - acc: 0.9917 - sparse_categorical_crossentropy: 0.0332\n"
]
],
[
[
"#Confusion Matrix",
"_____no_output_____"
]
],
[
[
"#build label list for confusion matrix\npredictions = model.predict(data_test)\n\npred_index = []\nfor i,data in enumerate(predictions):\n pred_index.append(np.argmax(data))\n \nencoding_list = list(set(labels_train))\nprint('encoding list:',encoding_list)\n\nlabel_list = [] \nfor i in encoding_list:\n label_list.append(encoding.get(i))\n \nprint('label_list:', label_list)",
"encoding list: [0, 1, 2, 3, 4, 5, 6, 7]\nlabel_list: ['cell', 'desklamp', 'fan', 'kettle', 'laptop', 'monitor', 'none', 'sadlamp']\n"
],
[
"#confusion matrix\nconf = tf.math.confusion_matrix(\n labels_test,\n pred_index,\n num_classes=None,\n dtype=tf.dtypes.int32,\n name=None,\n weights=None)\n\n#heatmaps don't play well with tensor objects, must convert to np array\nsess = tf.Session() \nwith sess.as_default():\n conf_np = conf.eval()\n \n#heat map plotting\nsns.set(font_scale=1.2)\nfig, ax = plt.subplots(figsize=(12,10))\nsns.heatmap(conf_np, annot=True, fmt='d',\n xticklabels=label_list,\n yticklabels=label_list)\nplt.ylabel('Actual')\nplt.xlabel('Predicted')\nax.set_title('Confusion Matrix Heatmap For Baseline Model')\nplt.show()",
"WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/confusion_matrix.py:193: to_int64 (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version.\nInstructions for updating:\nUse tf.cast instead.\nWARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/confusion_matrix.py:194: to_int32 (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version.\nInstructions for updating:\nUse tf.cast instead.\n"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
ec8c50c4abd535dfbacba47029c53fe2620f0e52 | 122,174 | ipynb | Jupyter Notebook | Position.ipynb | LuanAdemi/VisualGo | 41d58899ea0b458bb8acf5587ef07286c83b8993 | [
"MIT"
] | 8 | 2021-01-25T18:43:54.000Z | 2022-02-14T19:08:46.000Z | Position.ipynb | LuanAdemi/VisualGo | 41d58899ea0b458bb8acf5587ef07286c83b8993 | [
"MIT"
] | null | null | null | Position.ipynb | LuanAdemi/VisualGo | 41d58899ea0b458bb8acf5587ef07286c83b8993 | [
"MIT"
] | null | null | null | 686.370787 | 117,652 | 0.950431 | [
[
[
"# Model Exploration 🔭 - Position",
"_____no_output_____"
]
],
[
[
"import torch\nimport torch.nn.functional as F\nimport torch.nn as nn\n\nimport matplotlib.pyplot as plt\n\nimport os\n\nimport numpy as np\nfrom PIL import Image\nfrom tqdm import tqdm\n\nfrom torch.utils.data import DataLoader, Dataset\nimport torchvision.transforms as T\n\nfrom scripts.unet import UNet\nfrom scripts.transformers import MaskTransformer, PerspectiveTransformer, ThreasholdTransformer\nfrom scripts.dataloaders import PositionDataset\n\nfrom pytorch_model_summary import summary\n\nimport json",
"_____no_output_____"
],
[
"# loading our mask model\n\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\nmodel = UNet(in_channels=3, n_classes=2, wf=5, depth=4, padding=True, up_mode='upsample')\nmodel.load_state_dict(torch.load(\"state_dicts/checkpoint.pth\"))\nmodel.eval()\nprint(summary(model, torch.zeros((1, 3, 128, 128)), show_input=False))",
"-------------------------------------------------------------------------\n Layer (type) Output Shape Param # Tr. Param #\n=========================================================================\n UNetConvBlock-1 [1, 32, 128, 128] 10,144 10,144\n UNetConvBlock-2 [1, 64, 64, 64] 55,424 55,424\n UNetConvBlock-3 [1, 128, 32, 32] 221,440 221,440\n UNetConvBlock-4 [1, 256, 16, 16] 885,248 885,248\n UNetUpBlock-5 [1, 128, 32, 32] 475,520 475,520\n UNetUpBlock-6 [1, 64, 64, 64] 118,976 118,976\n UNetUpBlock-7 [1, 32, 128, 128] 29,792 29,792\n Conv2d-8 [1, 2, 128, 128] 66 66\n=========================================================================\nTotal params: 1,796,610\nTrainable params: 1,796,610\nNon-trainable params: 0\n-------------------------------------------------------------------------\n"
],
[
"# load the data\npd = PositionDataset(\"data/goBoards/training/\", 128, model)",
"100%|██████████| 1000/1000 [00:39<00:00, 25.29it/s]\n"
],
[
"plt.imshow(pd[42][0].permute(1,2,0))",
"_____no_output_____"
],
[
"plt.imshow(pd[42][0])",
"_____no_output_____"
]
]
] | [
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
]
] |
ec8c5b1a2b6ef8c581f1fd8d649a06bc1529d0a8 | 16,424 | ipynb | Jupyter Notebook | experiments/data_generation/VAE/Create_Datasets.ipynb | Karson-Fye/CMUDarknet | 7a463160ee4d61964ffe034f90d3089042f214b1 | [
"MIT"
] | 1 | 2022-02-23T01:58:15.000Z | 2022-02-23T01:58:15.000Z | experiments/data_generation/VAE/Create_Datasets.ipynb | Karson-Fye/CMUDarknet | 7a463160ee4d61964ffe034f90d3089042f214b1 | [
"MIT"
] | null | null | null | experiments/data_generation/VAE/Create_Datasets.ipynb | Karson-Fye/CMUDarknet | 7a463160ee4d61964ffe034f90d3089042f214b1 | [
"MIT"
] | null | null | null | 48.591716 | 966 | 0.647772 | [
[
[
"import utilities as utils",
"/home/drake/miniconda3/lib/python3.8/site-packages/xgboost/compat.py:36: FutureWarning: pandas.Int64Index is deprecated and will be removed from pandas in a future version. Use pandas.Index with the appropriate dtype instead.\n from pandas import MultiIndex, Int64Index\n"
],
[
"data_path_1: str = '../../../Data/phase2' \ndata_path_2: str = '../../../Data/phase1/' \n \n\ndata_set_1: list = [ 'vae_application_dataset.csv', 'vae_traffic_dataset.csv', ]\n\ndata_set_2: list = [ 'Traffic_type_seed.csv', 'Application_type_seed.csv' ] \n\n\nfile_path_1 = utils.get_file_path(data_path_1)\nfile_path_2 = utils.get_file_path(data_path_2)\nfile_set_1 : list = list(map(file_path_1, data_set_1))\nfile_set_2 : list = list(map(file_path_2, data_set_2))\n\nfile_set : list = file_set_1 + file_set_2 \ndata_set : list = data_set_1 + data_set_2 \ncurrent_job: int = 0\n\nutils.data_set = data_set\nutils.file_set = file_set",
"_____no_output_____"
],
[
"print(f'We will be using {len(file_set)} files:')\nutils.pretty(file_set)",
"We will be using 4 files:\n[ '../../../Data/phase2/vae_application_dataset.csv',\n '../../../Data/phase2/vae_traffic_dataset.csv',\n '../../../Data/phase1/Traffic_type_seed.csv',\n '../../../Data/phase1/Application_type_seed.csv']\n"
],
[
"vae_application_dataset_labels_50 = utils.examine_dataset(1)\nvae_traffic_dataset_100 = utils.examine_dataset(2)\nbaseline_traffic_seed = utils.examine_dataset(3)\nbaseline_application_seed = utils.examine_dataset(4)\n\nvae_traffic_dataset_100['Dataset'] = vae_traffic_dataset_100['Dataset'].drop(['Unnamed: 0'], axis = 1)\nvae_application_dataset_labels_50['Dataset'] = vae_application_dataset_labels_50['Dataset'].drop(['Unnamed: 0'], axis = 1)",
"Dataset 1/4: We now look at ../../../Data/phase2/vae_application_dataset.csv\n\n\nLoading Dataset: ../../../Data/phase2/vae_application_dataset.csv\n\tTo Dataset Cache: ./cache/vae_application_dataset.csv.pickle\n\n\n File:\t\t\t\t../../../Data/phase2/vae_application_dataset.csv \n Job Number:\t\t\t1\n Shape:\t\t\t\t(400000, 65)\n Samples:\t\t\t400000 \n Features:\t\t\t65\n \nDataset 2/4: We now look at ../../../Data/phase2/vae_traffic_dataset.csv\n\n\nLoading Dataset: ../../../Data/phase2/vae_traffic_dataset.csv\n\tTo Dataset Cache: ./cache/vae_traffic_dataset.csv.pickle\n\n\n File:\t\t\t\t../../../Data/phase2/vae_traffic_dataset.csv \n Job Number:\t\t\t2\n Shape:\t\t\t\t(300000, 65)\n Samples:\t\t\t300000 \n Features:\t\t\t65\n \nDataset 3/4: We now look at ../../../Data/phase1/Traffic_type_seed.csv\n\n\nLoading Dataset: ../../../Data/phase1/Traffic_type_seed.csv\n\tTo Dataset Cache: ./cache/Traffic_type_seed.csv.pickle\n\n\n File:\t\t\t\t../../../Data/phase1/Traffic_type_seed.csv \n Job Number:\t\t\t3\n Shape:\t\t\t\t(115670, 64)\n Samples:\t\t\t115670 \n Features:\t\t\t64\n \nDataset 4/4: We now look at ../../../Data/phase1/Application_type_seed.csv\n\n\nLoading Dataset: ../../../Data/phase1/Application_type_seed.csv\n\tTo Dataset Cache: ./cache/Application_type_seed.csv.pickle\n\n\n File:\t\t\t\t../../../Data/phase1/Application_type_seed.csv \n Job Number:\t\t\t4\n Shape:\t\t\t\t(113620, 64)\n Samples:\t\t\t113620 \n Features:\t\t\t64\n \n"
],
[
"def downsample(df: utils.pd.DataFrame, column_name: str, expected_sizes: dict) -> utils.pd.DataFrame():\n '''\n Function returns a new dataframe with the given column name and value\n '''\n new_dict = utils.pd.DataFrame()\n for item in df[column_name].unique():\n matching_values = df.loc[df[column_name] == item]\n if df[column_name].value_counts()[item] > expected_sizes[item]:\n new_dict = utils.pd.concat([new_dict, matching_values.sample(n = expected_sizes[item])])\n else:\n new_dict = utils.pd.concat([new_dict, matching_values])\n return new_dict\n\ndef random_sample(df: utils.pd.DataFrame, column_name: str, element_name: str, size : int) -> utils.pd.DataFrame():\n '''\n Function returns a new dataframe with the given column name and value\n '''\n new_df = df.loc[df[column_name] == element_name]\n return new_df.sample(n = size)",
"_____no_output_____"
]
],
[
[
"# Traffic Type Datasets Creation",
"_____no_output_____"
]
],
[
[
"expected_sizes = {\"Regular\" : 30000, \"VPN\" : 20000, \"Tor\": 10000}\nvae_balanced_traffic_labels_dataset_30_20_10 = downsample(baseline_traffic_seed['Dataset'], 'Traffic Type', expected_sizes)\nvae_balanced_traffic_labels_dataset_30_20_10 = utils.pd.concat([vae_balanced_traffic_labels_dataset_30_20_10, random_sample(vae_traffic_dataset_100['Dataset'], 'Traffic Type', 'Tor', 10000 - baseline_traffic_seed['Dataset']['Traffic Type'].value_counts()['Tor'])])",
"_____no_output_____"
],
[
"expected_sizes = {\"Regular\" : 92659, \"VPN\" : 92659, \"Tor\": 92659}\nvae_balanced_traffic_labels_dataset_equal = downsample(baseline_traffic_seed['Dataset'], 'Traffic Type', expected_sizes)\nvae_balanced_traffic_labels_dataset_equal = utils.pd.concat([vae_balanced_traffic_labels_dataset_equal, random_sample(vae_traffic_dataset_100['Dataset'], 'Traffic Type', 'VPN', 92659 - baseline_traffic_seed['Dataset']['Traffic Type'].value_counts()['VPN'])])\nvae_balanced_traffic_labels_dataset_equal = utils.pd.concat([vae_balanced_traffic_labels_dataset_equal, random_sample(vae_traffic_dataset_100['Dataset'], 'Traffic Type', 'Tor', 92659 - baseline_traffic_seed['Dataset']['Traffic Type'].value_counts()['Tor'])])",
"_____no_output_____"
]
],
[
[
"# Application Types Data Creation",
"_____no_output_____"
]
],
[
[
"expected_sizes = {\"p2p\" : 30000, \"browsing\" : 30000, \"audio-streaming\": 30000, 'file-transfer' : 30000, 'chat': 30000, 'video-streaming': 30000, 'voip': 30000, 'email': 30000}\nvae_balanced_application_dataset_labels_30_30_30 = downsample(baseline_application_seed['Dataset'], 'Application Type', expected_sizes)\nvae_balanced_application_dataset_labels_30_30_30 = utils.pd.concat([vae_balanced_application_dataset_labels_30_30_30, random_sample(vae_application_dataset_labels_50['Dataset'], 'Application Type', 'audio-streaming', 30000 - baseline_application_seed['Dataset']['Application Type'].value_counts()['audio-streaming'])])\nvae_balanced_application_dataset_labels_30_30_30 = utils.pd.concat([vae_balanced_application_dataset_labels_30_30_30, random_sample(vae_application_dataset_labels_50['Dataset'], 'Application Type', 'file-transfer', 30000 - baseline_application_seed['Dataset']['Application Type'].value_counts()['file-transfer'])])\nvae_balanced_application_dataset_labels_30_30_30 = utils.pd.concat([vae_balanced_application_dataset_labels_30_30_30, random_sample(vae_application_dataset_labels_50['Dataset'], 'Application Type', 'chat', 30000 - baseline_application_seed['Dataset']['Application Type'].value_counts()['chat'])])\nvae_balanced_application_dataset_labels_30_30_30 = utils.pd.concat([vae_balanced_application_dataset_labels_30_30_30, random_sample(vae_application_dataset_labels_50['Dataset'], 'Application Type', 'video-streaming', 30000 - baseline_application_seed['Dataset']['Application Type'].value_counts()['video-streaming'])])\nvae_balanced_application_dataset_labels_30_30_30 = utils.pd.concat([vae_balanced_application_dataset_labels_30_30_30, random_sample(vae_application_dataset_labels_50['Dataset'], 'Application Type', 'voip', 30000 - baseline_application_seed['Dataset']['Application Type'].value_counts()['voip'])])\nvae_balanced_application_dataset_labels_30_30_30 = utils.pd.concat([vae_balanced_application_dataset_labels_30_30_30, random_sample(vae_application_dataset_labels_50['Dataset'], 'Application Type', 'email', 30000 - baseline_application_seed['Dataset']['Application Type'].value_counts()['email'])])\n",
"_____no_output_____"
],
[
"expected_sizes = {\"p2p\" : 48020, \"browsing\" : 48020, \"audio-streaming\": 48020, 'file-transfer' : 48020, 'chat': 48020, 'video-streaming': 48020, 'voip': 48020, 'email': 48020}\nvae_balanced_application_dataset_labels_equal = downsample(baseline_application_seed['Dataset'], 'Application Type', expected_sizes)\nvae_balanced_application_dataset_labels_equal = utils.pd.concat([vae_balanced_application_dataset_labels_equal, random_sample(vae_application_dataset_labels_50['Dataset'], 'Application Type', 'audio-streaming', 48020 - baseline_application_seed['Dataset']['Application Type'].value_counts()['audio-streaming'])])\nvae_balanced_application_dataset_labels_equal = utils.pd.concat([vae_balanced_application_dataset_labels_equal, random_sample(vae_application_dataset_labels_50['Dataset'], 'Application Type', 'file-transfer', 48020 - baseline_application_seed['Dataset']['Application Type'].value_counts()['file-transfer'])])\nvae_balanced_application_dataset_labels_equal = utils.pd.concat([vae_balanced_application_dataset_labels_equal, random_sample(vae_application_dataset_labels_50['Dataset'], 'Application Type', 'chat', 48020 - baseline_application_seed['Dataset']['Application Type'].value_counts()['chat'])])\nvae_balanced_application_dataset_labels_equal = utils.pd.concat([vae_balanced_application_dataset_labels_equal, random_sample(vae_application_dataset_labels_50['Dataset'], 'Application Type', 'video-streaming', 48020 - baseline_application_seed['Dataset']['Application Type'].value_counts()['video-streaming'])])\nvae_balanced_application_dataset_labels_equal = utils.pd.concat([vae_balanced_application_dataset_labels_equal, random_sample(vae_application_dataset_labels_50['Dataset'], 'Application Type', 'voip', 48020 - baseline_application_seed['Dataset']['Application Type'].value_counts()['voip'])])\nvae_balanced_application_dataset_labels_equal = utils.pd.concat([vae_balanced_application_dataset_labels_equal, random_sample(vae_application_dataset_labels_50['Dataset'], 'Application Type', 'email', 48020 - baseline_application_seed['Dataset']['Application Type'].value_counts()['email'])])\nvae_balanced_application_dataset_labels_equal = utils.pd.concat([vae_balanced_application_dataset_labels_equal, random_sample(vae_application_dataset_labels_50['Dataset'], 'Application Type', 'browsing', 48020 - baseline_application_seed['Dataset']['Application Type'].value_counts()['browsing'])])\n\n",
"_____no_output_____"
],
[
"vae_balanced_traffic_labels_dataset_30_20_10.to_csv('./synthetic/vae_traffic_30_20_10.csv', index=False)\nvae_balanced_traffic_labels_dataset_equal.to_csv('./synthetic/vae_traffic_upsample_to_majority.csv', index=False)\nvae_balanced_application_dataset_labels_30_30_30.to_csv('./synthetic/vae_application_30000.csv', index=False)\nvae_balanced_application_dataset_labels_equal.to_csv('./synthetic/vae_application_upsample_to_majority.csv', index=False)",
"_____no_output_____"
],
[
"print(f'Last Execution: {utils.datetime.datetime.now()}')\nassert False, 'Nothing after this point is included in the study'",
"Last Execution: 2022-04-28 16:12:22.075012\n"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
]
] |
ec8c67f8a893636f8a5ef8c4fcaa0df8d7dd7516 | 12,788 | ipynb | Jupyter Notebook | emnist_inference_cnn.ipynb | ahmedbendebba1/Alphanumeric-Recognition-webapp | 9130c37577df05b41899b92e8b0224b105eef0ef | [
"MIT"
] | 1 | 2021-04-22T19:45:07.000Z | 2021-04-22T19:45:07.000Z | emnist_inference_cnn.ipynb | ahmedbendebba1/ML-web-app | 9130c37577df05b41899b92e8b0224b105eef0ef | [
"MIT"
] | null | null | null | emnist_inference_cnn.ipynb | ahmedbendebba1/ML-web-app | 9130c37577df05b41899b92e8b0224b105eef0ef | [
"MIT"
] | null | null | null | 43.794521 | 5,772 | 0.706131 | [
[
[
"import torch\nfrom torchvision import datasets, transforms\nimport torchvision\nimport matplotlib.pyplot as plt\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport logging\nimport numpy as np\nimport random\nimport time",
"_____no_output_____"
],
[
"%load_ext autoreload\n%autoreload 2\n\n%matplotlib inline\n\n",
"_____no_output_____"
],
[
"# Define the Net\n\n\nclass Net(nn.Module):\n def __init__(self):\n super(Net, self).__init__()\n self.conv1 = nn.Conv2d(28, 64, (5,5), padding = 2)\n self.conv1_bn = nn.BatchNorm2d(64)\n\n self.conv2 = nn.Conv2d(64, 128, 2, padding = 2)\n\n self.fc1 = nn.Linear(2048, 1024)\n\n self.dropout = nn.Dropout(0.3)\n\n self.fc2 = nn.Linear(1024, 512)\n\n self.bn = nn.BatchNorm1d(1)\n\n self.fc3 = nn.Linear(512, 128)\n\n self.fc4 = nn.Linear(128,47)\n\n def forward(self, x):\n x = F.relu(self.conv1(x))\n x = F.max_pool2d(x, 1, 2)\n x = self.conv1_bn(x)\n\n x = F.relu(self.conv2(x))\n x = F.max_pool2d(x, 2, 2)\n\n\n x = x.view(-1, 2048)\n x = F.relu(self.fc1(x))\n\n x = self.dropout(x)\n\n x = self.fc2(x)\n\n x = x.view(-1, 1, 512)\n x = self.bn(x)\n\n x = x.view(-1, 512)\n x = self.fc3(x)\n x = self.fc4(x)\n\n #return F.log_softmax(x, dim=1)\n \n return x",
"_____no_output_____"
],
[
"root = ''\n\n#test data tranform\ntransform_valid = transforms.Compose(\n [\n transforms.ToTensor(),\n \n ])\nemnist_test = datasets.EMNIST(root,split = 'balanced', train=False, download=True, transform = transform_valid)",
"_____no_output_____"
],
[
"# Download weights\n\nnet = Net()\n\n\nmodel_weights = 'cnn2.pth'\nnet.load_state_dict(torch.load(model_weights)[\"state_dict\"])\nnet.eval()\n\n\n\nrandom_img = 13\n\nclass_mapping = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabdefghnqrt'\n\nprint('Inference on GPU')\nstart = time.time()\nwith torch.no_grad():\n\n data = emnist_test.data[random_img] # (1,28,28)\n \n data = data.type(torch.FloatTensor)\n data = data/255\n \n #data = data.view(1, 28, 28, 1).to(device)\n data = data.view(1, 28, 28, 1)\n data = torch.transpose(data, 1, 2)\n \n out = net(data)\n probabilities = F.softmax(out, dim = 1)\n pred_y = torch.max(probabilities, 1)[1].cpu().data.numpy()\n \n print(class_mapping[int(pred_y)], 'prediction')\n print(torch.max(probabilities, 1)[0], 'probability')\n plt.imshow(data.cpu().reshape([28, 28]), cmap='Greys_r')\n print(class_mapping[int(emnist_test.targets[random_img].numpy())], 'real value')\n plt.show()\n\nend = time.time()\nprint(\"inference time on GPU: \", end-start)",
"Inference on GPU\nG prediction\ntensor([0.9994]) probability\nG real value\n"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code"
]
] |
ec8c6fea04cc2a743eeeb132860a3cbcd74c6a2d | 54,667 | ipynb | Jupyter Notebook | sklearn/notebooks/custom_code_in_serving.ipynb | N3da/cloudml-samples | bf367be3e497c584086d462140545dc4594b219b | [
"Apache-2.0"
] | null | null | null | sklearn/notebooks/custom_code_in_serving.ipynb | N3da/cloudml-samples | bf367be3e497c584086d462140545dc4594b219b | [
"Apache-2.0"
] | null | null | null | sklearn/notebooks/custom_code_in_serving.ipynb | N3da/cloudml-samples | bf367be3e497c584086d462140545dc4594b219b | [
"Apache-2.0"
] | null | null | null | 31.746225 | 619 | 0.497503 | [
[
[
"empty"
]
]
] | [
"empty"
] | [
[
"empty"
]
] |
ec8c70a54df593e7ad9ffdeb637dc4425cee63d8 | 146,597 | ipynb | Jupyter Notebook | .ipynb_checkpoints/P1-checkpoint.ipynb | mjalalmaab/NanoLaneDetect | 63d1a754841d4eb84405af1f5f39936bbd03a5d0 | [
"MIT"
] | null | null | null | .ipynb_checkpoints/P1-checkpoint.ipynb | mjalalmaab/NanoLaneDetect | 63d1a754841d4eb84405af1f5f39936bbd03a5d0 | [
"MIT"
] | null | null | null | .ipynb_checkpoints/P1-checkpoint.ipynb | mjalalmaab/NanoLaneDetect | 63d1a754841d4eb84405af1f5f39936bbd03a5d0 | [
"MIT"
] | null | null | null | 178.995116 | 116,880 | 0.887569 | [
[
[
"# Self-Driving Car Engineer Nanodegree\n\n\n## Project: **Finding Lane Lines on the Road** \n***\nIn this project, you will use the tools you learned about in the lesson to identify lane lines on the road. You can develop your pipeline on a series of individual images, and later apply the result to a video stream (really just a series of images). Check out the video clip \"raw-lines-example.mp4\" (also contained in this repository) to see what the output should look like after using the helper functions below. \n\nOnce you have a result that looks roughly like \"raw-lines-example.mp4\", you'll need to get creative and try to average and/or extrapolate the line segments you've detected to map out the full extent of the lane lines. You can see an example of the result you're going for in the video \"P1_example.mp4\". Ultimately, you would like to draw just one line for the left side of the lane, and one for the right.\n\nIn addition to implementing code, there is a brief writeup to complete. The writeup should be completed in a separate file, which can be either a markdown file or a pdf document. There is a [write up template](https://github.com/udacity/CarND-LaneLines-P1/blob/master/writeup_template.md) that can be used to guide the writing process. Completing both the code in the Ipython notebook and the writeup template will cover all of the [rubric points](https://review.udacity.com/#!/rubrics/322/view) for this project.\n\n---\nLet's have a look at our first image called 'test_images/solidWhiteRight.jpg'. Run the 2 cells below (hit Shift-Enter or the \"play\" button above) to display the image.\n\n**Note: If, at any point, you encounter frozen display windows or other confounding issues, you can always start again with a clean slate by going to the \"Kernel\" menu above and selecting \"Restart & Clear Output\".**\n\n---",
"_____no_output_____"
],
[
"**The tools you have are color selection, region of interest selection, grayscaling, Gaussian smoothing, Canny Edge Detection and Hough Tranform line detection. You are also free to explore and try other techniques that were not presented in the lesson. Your goal is piece together a pipeline to detect the line segments in the image, then average/extrapolate them and draw them onto the image for display (as below). Once you have a working pipeline, try it out on the video stream below.**\n\n---\n\n<figure>\n <img src=\"examples/line-segments-example.jpg\" width=\"380\" alt=\"Combined Image\" />\n <figcaption>\n <p></p> \n <p style=\"text-align: center;\"> Your output should look something like this (above) after detecting line segments using the helper functions below </p> \n </figcaption>\n</figure>\n <p></p> \n<figure>\n <img src=\"examples/laneLines_thirdPass.jpg\" width=\"380\" alt=\"Combined Image\" />\n <figcaption>\n <p></p> \n <p style=\"text-align: center;\"> Your goal is to connect/average/extrapolate line segments to get output like this</p> \n </figcaption>\n</figure>",
"_____no_output_____"
],
[
"**Run the cell below to import some packages. If you get an `import error` for a package you've already installed, try changing your kernel (select the Kernel menu above --> Change Kernel). Still have problems? Try relaunching Jupyter Notebook from the terminal prompt. Also, consult the forums for more troubleshooting tips.** ",
"_____no_output_____"
],
[
"## Import Packages",
"_____no_output_____"
]
],
[
[
"#importing some useful packages\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\nimport numpy as np\nimport cv2\n%matplotlib inline",
"_____no_output_____"
]
],
[
[
"## Read in an Image",
"_____no_output_____"
]
],
[
[
"#reading in an image\nimage = mpimg.imread('test_images/solidWhiteRight.jpg')\n\n#printing out some stats and plotting\nprint('This image is:', type(image), 'with dimensions:', image.shape)\nplt.imshow(image) # if you wanted to show a single color channel image called 'gray', for example, call as plt.imshow(gray, cmap='gray')",
"This image is: <class 'numpy.ndarray'> with dimensions: (540, 960, 3)\n"
]
],
[
[
"## Ideas for Lane Detection Pipeline",
"_____no_output_____"
],
[
"**Some OpenCV functions (beyond those introduced in the lesson) that might be useful for this project are:**\n\n`cv2.inRange()` for color selection \n`cv2.fillPoly()` for regions selection \n`cv2.line()` to draw lines on an image given endpoints \n`cv2.addWeighted()` to coadd / overlay two images \n`cv2.cvtColor()` to grayscale or change color \n`cv2.imwrite()` to output images to file \n`cv2.bitwise_and()` to apply a mask to an image\n\n**Check out the OpenCV documentation to learn about these and discover even more awesome functionality!**",
"_____no_output_____"
],
[
"## Helper Functions",
"_____no_output_____"
],
[
"Below are some helper functions to help get you started. They should look familiar from the lesson!",
"_____no_output_____"
]
],
[
[
"import math\n\ndef grayscale(img):\n \"\"\"Applies the Grayscale transform\n This will return an image with only one color channel\n but NOTE: to see the returned image as grayscale\n (assuming your grayscaled image is called 'gray')\n you should call plt.imshow(gray, cmap='gray')\"\"\"\n return cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n # Or use BGR2GRAY if you read an image with cv2.imread()\n # return cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n \ndef canny(img, low_threshold, high_threshold):\n \"\"\"Applies the Canny transform\"\"\"\n return cv2.Canny(img, low_threshold, high_threshold)\n\ndef gaussian_blur(img, kernel_size):\n \"\"\"Applies a Gaussian Noise kernel\"\"\"\n return cv2.GaussianBlur(img, (kernel_size, kernel_size), 0)\n\ndef region_of_interest(img, vertices):\n \"\"\"\n Applies an image mask.\n \n Only keeps the region of the image defined by the polygon\n formed from `vertices`. The rest of the image is set to black.\n `vertices` should be a numpy array of integer points.\n \"\"\"\n #defining a blank mask to start with\n mask = np.zeros_like(img) \n \n #defining a 3 channel or 1 channel color to fill the mask with depending on the input image\n if len(img.shape) > 2:\n channel_count = img.shape[2] # i.e. 3 or 4 depending on your image\n ignore_mask_color = (255,) * channel_count\n else:\n ignore_mask_color = 255\n \n #filling pixels inside the polygon defined by \"vertices\" with the fill color \n cv2.fillPoly(mask, vertices, ignore_mask_color)\n \n #returning the image only where mask pixels are nonzero\n masked_image = cv2.bitwise_and(img, mask)\n return masked_image\n\n\ndef draw_lines(img, lines, color=[255, 0, 0], thickness=10):\n \"\"\"\n NOTE: this is the function you might want to use as a starting point once you want to \n average/extrapolate the line segments you detect to map out the full\n extent of the lane (going from the result shown in raw-lines-example.mp4\n to that shown in P1_example.mp4). \n \n Think about things like separating line segments by their \n slope ((y2-y1)/(x2-x1)) to decide which segments are part of the left\n line vs. the right line. Then, you can average the position of each of \n the lines and extrapolate to the top and bottom of the lane.\n \n This function draws `lines` with `color` and `thickness`. \n Lines are drawn on the image inplace (mutates the image).\n If you want to make the lines semi-transparent, think about combining\n this function with the weighted_img() function below\n \"\"\"\n imshape = img.shape\n image_x_dim = imshape[1]\n image_y_dim = imshape[0]\n left_lines = numpy.array([0,0,0,0])\n right_lines = numpy.array([0,0,0,0])\n for line in lines:\n for x1,y1,x2,y2 in line:\n y1_modified = image_y_dim-y1\n y2_modified = image_y_dim-y2\n \n if (y1_modified<y2_modified):\n lower_x = x1\n lower_y = y1_modified\n higher_x = x2\n higher_y = y2_modified\n else:\n lower_x = x2\n lower_y = y2_modified\n higher_x = x1\n higher_y = y1_modified\n \n if (higher_x==lower_x):\n slope = 90\n else:\n slope = float(np.arctan((higher_y-lower_y)/(higher_x-lower_x)))\n slope_deg = slope * 180/np.pi\n \n if (abs(slope_deg)>20):\n if (lower_y>1):\n lower_x = float(((1-higher_y)/((higher_y-lower_y)/(higher_x-lower_x)) + higher_x) )\n lower_x = int(lower_x)\n lower_y = 1\n \n higher_x = float(((image_y_dim/2-higher_y)/((higher_y-lower_y)/(higher_x-lower_x)) + higher_x) )\n higher_x = int(higher_x)\n higher_y = image_y_dim/2\n \n # y -y1 = slope (x -x1)\n # x = (1-y1)/slope+x1\n if (slope>0)\n left_line = (left_line+[lower_x,lower_y, higher_x, higher_y])/2\n \n if (slope<0)\n right_line = (right_line+[lower_x,lower_y, higher_x, higher_y])/2\n \n \n left_ \n cv2.line(img, (higher_x, image_y_dim - higher_y), (lower_x, image_y_dim -lower_y), color, thickness)\n #cv2.line(img, (x1, y1), (x2, y2), color, thickness)\n\ndef hough_lines(img, rho, theta, threshold, min_line_len, max_line_gap):\n \"\"\"\n `img` should be the output of a Canny transform.\n \n Returns an image with hough lines drawn.\n \"\"\"\n lines = cv2.HoughLinesP(img, rho, theta, threshold, np.array([]), minLineLength=min_line_len, maxLineGap=max_line_gap)\n line_img = np.zeros((img.shape[0], img.shape[1], 3), dtype=np.uint8)\n draw_lines(line_img, lines)\n return line_img\n\n# Python 3 has support for cool math symbols.\n\ndef weighted_img(img, initial_img, α=0.8, β=1., γ=0.): \n \"\"\"\n `img` is the output of the hough_lines(), An image with lines drawn on it.\n Should be a blank image (all black) with lines drawn on it.\n \n `initial_img` should be the image before any processing.\n \n The result image is computed as follows:\n \n initial_img * α + img * β + γ\n NOTE: initial_img and img must be the same shape!\n \"\"\"\n return cv2.addWeighted(initial_img, α, img, β, γ)",
"_____no_output_____"
]
],
[
[
"## Test Images\n\nBuild your pipeline to work on the images in the directory \"test_images\" \n**You should make sure your pipeline works well on these images before you try the videos.**",
"_____no_output_____"
]
],
[
[
"import os\nfolder = \"test_images/\"\nos.listdir(folder)\n\n \nfor filename in os.listdir(folder):\n print(filename)",
"solidWhiteCurve.jpg\nsolidWhiteRight.jpg\nsolidYellowCurve.jpg\nsolidYellowCurve2.jpg\nsolidYellowLeft.jpg\nwhiteCarLaneSwitch.jpg\n"
]
],
[
[
"## Build a Lane Finding Pipeline\n\n",
"_____no_output_____"
],
[
"Build the pipeline and run your solution on all test_images. Make copies into the `test_images_output` directory, and you can use the images in your writeup report.\n\nTry tuning the various parameters, especially the low and high Canny thresholds as well as the Hough lines parameters.",
"_____no_output_____"
]
],
[
[
"# TODO: Build your pipeline that will draw lane lines on the test_images\n# then save them to the test_images_output directory.\n\nfor filename in os.listdir(folder):\n image = cv2.imread(os.path.join(folder,filename))\n\n gray_image = grayscale(image)\n #plt.imshow(gray_image, cmap = 'gray')\n\n kernel_size = 5\n blur_gray = gaussian_blur(gray_image, kernel_size)\n #plt.imshow(blur_gray, cmap = 'gray')\n\n\n low_threshold = 60\n high_threshold = 120\n\n edges = canny(blur_gray, low_threshold, high_threshold)\n\n #plt.imshow(edges,cmap='Greys_r')\n\n imshape = image.shape\n vertices = np.array([[(imshape[1]/8,imshape[0]),(imshape[1]/2, imshape[0]*0.55), (imshape[1]/2, imshape[0]*0.55), (imshape[1],imshape[0])]], dtype=np.int32)\n\n\n cropped_edges = region_of_interest(edges, vertices)\n #plt.imshow(cropped_edges,cmap='Greys_r')\n\n\n rho = 1\n theta = 1/200\n threshold = 50\n min_line_len = 150\n max_line_gap = 200\n houghed_img = hough_lines(cropped_edges, rho, theta, threshold, min_line_len, max_line_gap)\n #plt.imshow(houghed_img,cmap='Greys_r')\n \n result = weighted_img(houghed_img, image, α=0.8, β=1., γ=0.)\n \n save_folder = \"test_images_output/\"\n cv2.imwrite(os.path.join(save_folder,filename), result)\n print(os.path.join(save_folder,filename))\n\n",
"test_images_output/solidWhiteCurve.jpg\ntest_images_output/solidWhiteRight.jpg\ntest_images_output/solidYellowCurve.jpg\ntest_images_output/solidYellowCurve2.jpg\ntest_images_output/solidYellowLeft.jpg\ntest_images_output/whiteCarLaneSwitch.jpg\n"
]
],
[
[
"## Test on Videos\n\nYou know what's cooler than drawing lanes over images? Drawing lanes over video!\n\nWe can test our solution on two provided videos:\n\n`solidWhiteRight.mp4`\n\n`solidYellowLeft.mp4`\n\n**Note: if you get an import error when you run the next cell, try changing your kernel (select the Kernel menu above --> Change Kernel). Still have problems? Try relaunching Jupyter Notebook from the terminal prompt. Also, consult the forums for more troubleshooting tips.**\n\n**If you get an error that looks like this:**\n```\nNeedDownloadError: Need ffmpeg exe. \nYou can download it by calling: \nimageio.plugins.ffmpeg.download()\n```\n**Follow the instructions in the error message and check out [this forum post](https://discussions.udacity.com/t/project-error-of-test-on-videos/274082) for more troubleshooting tips across operating systems.**",
"_____no_output_____"
]
],
[
[
"# Import everything needed to edit/save/watch video clips\nfrom moviepy.editor import VideoFileClip\nfrom IPython.display import HTML",
"_____no_output_____"
],
[
"def process_image(image):\n # NOTE: The output you return should be a color image (3 channel) for processing video below\n # TODO: put your pipeline here,\n # you should return the final output (image where lines are drawn on lanes)\n \n gray_image = grayscale(image)\n #plt.imshow(gray_image, cmap = 'gray')\n\n kernel_size = 5\n blur_gray = gaussian_blur(gray_image, kernel_size)\n #plt.imshow(blur_gray, cmap = 'gray')\n\n\n low_threshold = 60\n high_threshold = 120\n\n edges = canny(blur_gray, low_threshold, high_threshold)\n\n #plt.imshow(edges,cmap='Greys_r')\n\n imshape = image.shape\n vertices = np.array([[(imshape[1]/8,imshape[0]),(imshape[1]/2, imshape[0]/2), (imshape[1]/2, imshape[0]/2), (imshape[1],imshape[0])]], dtype=np.int32)\n\n\n cropped_edges = region_of_interest(edges, vertices)\n #plt.imshow(cropped_edges,cmap='Greys_r')\n\n\n rho = 1\n theta = 1/500\n threshold = 70\n min_line_len = 200\n max_line_gap = 200\n houghed_img = hough_lines(cropped_edges, rho, theta, threshold, min_line_len, max_line_gap)\n #plt.imshow(houghed_img,cmap='Greys_r')\n \n result = weighted_img(houghed_img, image, α=0.8, β=1., γ=0.)\n\n return result",
"_____no_output_____"
]
],
[
[
"Let's try the one with the solid white lane on the right first ...",
"_____no_output_____"
]
],
[
[
"white_output = 'test_videos_output/solidWhiteRight.mp4'\n## To speed up the testing process you may want to try your pipeline on a shorter subclip of the video\n## To do so add .subclip(start_second,end_second) to the end of the line below\n## Where start_second and end_second are integer values representing the start and end of the subclip\n## You may also uncomment the following line for a subclip of the first 5 seconds\n##clip1 = VideoFileClip(\"test_videos/solidWhiteRight.mp4\").subclip(0,5)\nclip1 = VideoFileClip(\"test_videos/solidWhiteRight.mp4\")\nwhite_clip = clip1.fl_image(process_image) #NOTE: this function expects color images!!\n%time white_clip.write_videofile(white_output, audio=False)",
"[MoviePy] >>>> Building video test_videos_output/solidWhiteRight.mp4\n[MoviePy] Writing video test_videos_output/solidWhiteRight.mp4\n"
]
],
[
[
"Play the video inline, or if you prefer find the video in your filesystem (should be in the same directory) and play it in your video player of choice.",
"_____no_output_____"
]
],
[
[
"HTML(\"\"\"\n<video width=\"960\" height=\"540\" controls>\n <source src=\"{0}\">\n</video>\n\"\"\".format(white_output))",
"_____no_output_____"
]
],
[
[
"## Improve the draw_lines() function\n\n**At this point, if you were successful with making the pipeline and tuning parameters, you probably have the Hough line segments drawn onto the road, but what about identifying the full extent of the lane and marking it clearly as in the example video (P1_example.mp4)? Think about defining a line to run the full length of the visible lane based on the line segments you identified with the Hough Transform. As mentioned previously, try to average and/or extrapolate the line segments you've detected to map out the full extent of the lane lines. You can see an example of the result you're going for in the video \"P1_example.mp4\".**\n\n**Go back and modify your draw_lines function accordingly and try re-running your pipeline. The new output should draw a single, solid line over the left lane line and a single, solid line over the right lane line. The lines should start from the bottom of the image and extend out to the top of the region of interest.**",
"_____no_output_____"
],
[
"Now for the one with the solid yellow lane on the left. This one's more tricky!",
"_____no_output_____"
]
],
[
[
"yellow_output = 'test_videos_output/solidYellowLeft.mp4'\n## To speed up the testing process you may want to try your pipeline on a shorter subclip of the video\n## To do so add .subclip(start_second,end_second) to the end of the line below\n## Where start_second and end_second are integer values representing the start and end of the subclip\n## You may also uncomment the following line for a subclip of the first 5 seconds\n##clip2 = VideoFileClip('test_videos/solidYellowLeft.mp4').subclip(0,5)\nclip2 = VideoFileClip('test_videos/solidYellowLeft.mp4')\nyellow_clip = clip2.fl_image(process_image)\n%time yellow_clip.write_videofile(yellow_output, audio=False)",
"[MoviePy] >>>> Building video test_videos_output/solidYellowLeft.mp4\n[MoviePy] Writing video test_videos_output/solidYellowLeft.mp4\n"
],
[
"HTML(\"\"\"\n<video width=\"960\" height=\"540\" controls>\n <source src=\"{0}\">\n</video>\n\"\"\".format(yellow_output))",
"_____no_output_____"
]
],
[
[
"## Writeup and Submission\n\nIf you're satisfied with your video outputs, it's time to make the report writeup in a pdf or markdown file. Once you have this Ipython notebook ready along with the writeup, it's time to submit for review! Here is a [link](https://github.com/udacity/CarND-LaneLines-P1/blob/master/writeup_template.md) to the writeup template file.\n",
"_____no_output_____"
],
[
"## Optional Challenge\n\nTry your lane finding pipeline on the video below. Does it still work? Can you figure out a way to make it more robust? If you're up for the challenge, modify your pipeline so it works with this video and submit it along with the rest of your project!",
"_____no_output_____"
]
],
[
[
"challenge_output = 'test_videos_output/challenge.mp4'\n## To speed up the testing process you may want to try your pipeline on a shorter subclip of the video\n## To do so add .subclip(start_second,end_second) to the end of the line below\n## Where start_second and end_second are integer values representing the start and end of the subclip\n## You may also uncomment the following line for a subclip of the first 5 seconds\n##clip3 = VideoFileClip('test_videos/challenge.mp4').subclip(0,5)\nclip3 = VideoFileClip('test_videos/challenge.mp4')\nchallenge_clip = clip3.fl_image(process_image)\n%time challenge_clip.write_videofile(challenge_output, audio=False)",
"[MoviePy] >>>> Building video test_videos_output/challenge.mp4\n[MoviePy] Writing video test_videos_output/challenge.mp4\n"
],
[
"HTML(\"\"\"\n<video width=\"960\" height=\"540\" controls>\n <source src=\"{0}\">\n</video>\n\"\"\".format(challenge_output))",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
]
] |
ec8c71964f21fe0413ca914cb13a377f8d294844 | 9,666 | ipynb | Jupyter Notebook | Chapter_7/Section_7.4.2.3.ipynb | godfanmiao/ML-Kaggle-Github-2023 | 89e82bb5c22764461f5d8e855fced556a5f7750c | [
"BSD-3-Clause"
] | 5 | 2021-11-04T02:01:10.000Z | 2021-12-26T03:13:43.000Z | Chapter_7/.ipynb_checkpoints/Section_7.4.2.3-checkpoint.ipynb | godfanmiao/ML-Kaggle-Github-2023 | 89e82bb5c22764461f5d8e855fced556a5f7750c | [
"BSD-3-Clause"
] | null | null | null | Chapter_7/.ipynb_checkpoints/Section_7.4.2.3-checkpoint.ipynb | godfanmiao/ML-Kaggle-Github-2023 | 89e82bb5c22764461f5d8e855fced556a5f7750c | [
"BSD-3-Clause"
] | 1 | 2022-01-01T03:33:18.000Z | 2022-01-01T03:33:18.000Z | 35.149091 | 256 | 0.438754 | [
[
[
"##################################################################\n#《Python机器学习及实践:从零开始通往Kaggle竞赛之路(2023年度版)》开源代码\n#-----------------------------------------------------------------\n# @章节号:7.4.2.3(分布式随机森林回归模型) \n# @作者:范淼 \n# @电子邮箱:[email protected] \n# @微博:https://weibo.com/fanmiaothu \n# @官方交流QQ群号:561500762 \n##################################################################",
"_____no_output_____"
],
[
"from pyspark.sql import SparkSession\nimport pyspark.sql.functions as func\n\n\n#创建SparkSession。\nspark = SparkSession.builder.getOrCreate()\n\n#读取文件并存储到DataFrame中。\ndf = spark.read.csv('../Datasets/bike_rental/bike_rental.csv', header=True)",
"21/12/07 16:22:23 WARN Utils: Your hostname, michael-fandeMacBook-Air.local resolves to a loopback address: 127.0.0.1; using 172.24.206.36 instead (on interface en0)\n21/12/07 16:22:23 WARN Utils: Set SPARK_LOCAL_IP if you need to bind to another address\n21/12/07 16:22:24 WARN NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable\nUsing Spark's default log4j profile: org/apache/spark/log4j-defaults.properties\nSetting default log level to \"WARN\".\nTo adjust logging level use sc.setLogLevel(newLevel). For SparkR, use setLogLevel(newLevel).\n21/12/07 16:22:25 WARN Utils: Service 'SparkUI' could not bind on port 4040. Attempting port 4041.\n21/12/07 16:22:25 WARN Utils: Service 'SparkUI' could not bind on port 4041. Attempting port 4042.\n \r"
],
[
"df = df.select([c for c in df.columns if c not in ['instant','dteday']])\n\ndf",
"_____no_output_____"
],
[
"df.show(5)",
"+------+---+----+---+-------+-------+----------+----------+----+------+----+---------+------+----------+---+\n|season| yr|mnth| hr|holiday|weekday|workingday|weathersit|temp| atemp| hum|windspeed|casual|registered|cnt|\n+------+---+----+---+-------+-------+----------+----------+----+------+----+---------+------+----------+---+\n| 1| 0| 1| 0| 0| 6| 0| 1|0.24|0.2879|0.81| 0| 3| 13| 16|\n| 1| 0| 1| 1| 0| 6| 0| 1|0.22|0.2727| 0.8| 0| 8| 32| 40|\n| 1| 0| 1| 2| 0| 6| 0| 1|0.22|0.2727| 0.8| 0| 5| 27| 32|\n| 1| 0| 1| 3| 0| 6| 0| 1|0.24|0.2879|0.75| 0| 3| 10| 13|\n| 1| 0| 1| 4| 0| 6| 0| 1|0.24|0.2879|0.75| 0| 0| 1| 1|\n+------+---+----+---+-------+-------+----------+----------+----+------+----+---------+------+----------+---+\nonly showing top 5 rows\n\n"
],
[
"from pyspark.sql.functions import col\n\n\ncate_cols = ['season', 'yr', 'mnth', 'hr', 'holiday', 'weekday', 'workingday', 'weathersit']\nnum_cols = ['temp', 'atemp', 'hum', 'windspeed', 'registered']\n\nfor column in cate_cols:\n df = df.withColumn(column, col(column).cast('int'))\n\nfor column in num_cols:\n df = df.withColumn(column, col(column).cast('float'))\n\ndf = df.withColumn('cnt', col('cnt').cast('float'))\n\ndf",
"_____no_output_____"
],
[
"df.show(5)\n\n#分割出训练和测试集。\n(train_df, test_df) = df.randomSplit([0.8, 0.2], seed=911120)",
"+------+---+----+---+-------+-------+----------+----------+----+------+----+---------+------+----------+----+\n|season| yr|mnth| hr|holiday|weekday|workingday|weathersit|temp| atemp| hum|windspeed|casual|registered| cnt|\n+------+---+----+---+-------+-------+----------+----------+----+------+----+---------+------+----------+----+\n| 1| 0| 1| 0| 0| 6| 0| 1|0.24|0.2879|0.81| 0.0| 3| 13.0|16.0|\n| 1| 0| 1| 1| 0| 6| 0| 1|0.22|0.2727| 0.8| 0.0| 8| 32.0|40.0|\n| 1| 0| 1| 2| 0| 6| 0| 1|0.22|0.2727| 0.8| 0.0| 5| 27.0|32.0|\n| 1| 0| 1| 3| 0| 6| 0| 1|0.24|0.2879|0.75| 0.0| 3| 10.0|13.0|\n| 1| 0| 1| 4| 0| 6| 0| 1|0.24|0.2879|0.75| 0.0| 0| 1.0| 1.0|\n+------+---+----+---+-------+-------+----------+----------+----+------+----+---------+------+----------+----+\nonly showing top 5 rows\n\n"
],
[
"from pyspark.ml.feature import OneHotEncoder, VectorAssembler, StandardScaler\nfrom pyspark.ml.regression import RandomForestRegressor\nfrom pyspark.ml import Pipeline\n\n\nenc_cols = [c+'_c' for c in cate_cols]\n\nohe = OneHotEncoder(inputCols= cate_cols, outputCols=enc_cols)\n\nva = VectorAssembler(inputCols = num_cols+enc_cols, outputCol= 'features')\n\nscaler = StandardScaler(inputCol='features', outputCol='scaled_features')\n\nregressor = RandomForestRegressor(featuresCol='scaled_features', labelCol='cnt')\n\npipeline = Pipeline(stages=[ohe, va, scaler, regressor])\n\nmodel = pipeline.fit(train_df)\n\npredictions = model.transform(test_df)",
"21/12/07 16:22:47 WARN package: Truncated the string representation of a plan since it was too large. This behavior can be adjusted by setting 'spark.sql.debug.maxToStringFields'.\n \r"
],
[
"from pyspark.ml.evaluation import RegressionEvaluator\n\n\nevaluator = RegressionEvaluator(labelCol=\"cnt\", predictionCol=\"prediction\", metricName=\"rmse\")\n\nrmse = evaluator.evaluate(predictions)\n\n#评估回归器的均方根误差。\nprint ('Spark-ML的分布式随机森林回归模型在bike_rental测试集上的均方根误差为:%.2f。' %(rmse))",
"\r[Stage 20:> (0 + 1) / 1]\r"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
ec8c74742213e7e31810ef024b134bd909dab96d | 453,910 | ipynb | Jupyter Notebook | dogscats/roger.ipynb | rogerallen/kaggle | e692f530f62f582a4f6f30f65e2826e0a88d0f83 | [
"Apache-2.0"
] | null | null | null | dogscats/roger.ipynb | rogerallen/kaggle | e692f530f62f582a4f6f30f65e2826e0a88d0f83 | [
"Apache-2.0"
] | null | null | null | dogscats/roger.ipynb | rogerallen/kaggle | e692f530f62f582a4f6f30f65e2826e0a88d0f83 | [
"Apache-2.0"
] | null | null | null | 489.127155 | 431,260 | 0.932903 | [
[
[
"Creating my own version of the dogs_cats_redux notebook in order to make my own entry into the Kaggle competition.\n\nMy dir structure is similar, but not exactly the same:\n\n```\nutils \ndogscats (not lesson1)\n data\n (no extra redux subdir)\n train\n test\n```",
"_____no_output_____"
]
],
[
[
"#Verify we are in the lesson1 directory\n%pwd",
"_____no_output_____"
],
[
"%matplotlib inline",
"_____no_output_____"
],
[
"import os, sys\nsys.path.insert(1, os.path.join(sys.path[0], '../utils'))\nfrom utils import *\nfrom vgg16 import Vgg16\nfrom PIL import Image\nfrom keras.preprocessing import image\nfrom sklearn.metrics import confusion_matrix",
"Using gpu device 0: GeForce GTX 1070 (CNMeM is disabled, cuDNN 5105)\n/home/rallen/anaconda2/lib/python2.7/site-packages/theano/sandbox/cuda/__init__.py:600: UserWarning: Your cuDNN version is more recent than the one Theano officially supports. If you see any problems, try updating Theano or downgrading cuDNN to version 5.\n warnings.warn(warn)\nUsing Theano backend.\n"
]
],
[
[
"Note: had to comment out vgg16bn in utils.py (whatever that is)",
"_____no_output_____"
]
],
[
[
"current_dir = os.getcwd()\nLESSON_HOME_DIR = current_dir\nDATA_HOME_DIR = current_dir+'/data'",
"_____no_output_____"
]
],
[
[
"## Create validation set and sample\n\nONLY DO THIS ONCE.",
"_____no_output_____"
]
],
[
[
"from shutil import copyfile\n#Create directories\n%cd $DATA_HOME_DIR\n# did this once\n#%mkdir valid\n#%mkdir results\n#%mkdir -p sample/train\n#%mkdir -p sample/test\n#%mkdir -p sample/valid\n#%mkdir -p sample/results\n#%mkdir -p test/unknown",
"_____no_output_____"
],
[
"%cd $DATA_HOME_DIR/train",
"_____no_output_____"
],
[
"# create validation set by renaming 2000 \ng = glob('*.jpg')\nshuf = np.random.permutation(g)",
"_____no_output_____"
],
[
"NUM_IMAGES=len(g)\nNUM_VALID = 2000 # 0.1*NUM_IMAGES\nNUM_TRAIN = NUM_IMAGES-NUM_VALID\nprint(\"total=%d train=%d valid=%d\"%(NUM_IMAGES,NUM_TRAIN,NUM_VALID))",
"_____no_output_____"
],
[
"for i in range(NUM_VALID): \n os.rename(shuf[i], DATA_HOME_DIR+'/valid/' + shuf[i])",
"_____no_output_____"
],
[
"# copy a small sample\ng = glob('*.jpg')\nshuf = np.random.permutation(g)\nfor i in range(200): copyfile(shuf[i], DATA_HOME_DIR+'/sample/train/' + shuf[i])",
"_____no_output_____"
],
[
"%cd $DATA_HOME_DIR/valid",
"_____no_output_____"
],
[
"g = glob('*.jpg')\nshuf = np.random.permutation(g)\nfor i in range(50): copyfile(shuf[i], DATA_HOME_DIR+'/sample/valid/' + shuf[i])",
"_____no_output_____"
],
[
"!ls {DATA_HOME_DIR}/train/ |wc -l\n!ls {DATA_HOME_DIR}/valid/ |wc -l\n!ls {DATA_HOME_DIR}/sample/train/ |wc -l\n!ls {DATA_HOME_DIR}/sample/valid/ |wc -l",
"_____no_output_____"
]
],
[
[
"## Rearrange image files into their respective directories\n\nONLY DO THIS ONCE.",
"_____no_output_____"
]
],
[
[
"#Divide cat/dog images into separate directories\n\n%cd $DATA_HOME_DIR/sample/train\n%mkdir cats\n%mkdir dogs\n%mv cat.*.jpg cats/\n%mv dog.*.jpg dogs/\n\n%cd $DATA_HOME_DIR/sample/valid\n%mkdir cats\n%mkdir dogs\n%mv cat.*.jpg cats/\n%mv dog.*.jpg dogs/\n\n%cd $DATA_HOME_DIR/valid\n%mkdir cats\n%mkdir dogs\n%mv cat.*.jpg cats/\n%mv dog.*.jpg dogs/\n\n%cd $DATA_HOME_DIR/train\n%mkdir cats\n%mkdir dogs\n%mv cat.*.jpg cats/\n%mv dog.*.jpg dogs/",
"_____no_output_____"
],
[
"# Create single 'unknown' class for test set\n%cd $DATA_HOME_DIR/test\n%mv *.jpg unknown/",
"_____no_output_____"
],
[
"!ls {DATA_HOME_DIR}/test",
"_____no_output_____"
]
],
[
[
"## Finetuning and Training\n\nOKAY, ITERATE HERE",
"_____no_output_____"
]
],
[
[
"%cd $DATA_HOME_DIR\n\n#Set path to sample/ path if desired\npath = DATA_HOME_DIR + '/' #'/sample/'\ntest_path = DATA_HOME_DIR + '/test/' #We use all the test data\nresults_path=DATA_HOME_DIR + '/results/'\ntrain_path=path + '/train/'\nvalid_path=path + '/valid/'",
"/home/rallen/Documents/PracticalDL4C/kaggle/dogscats/data\n"
],
[
"vgg = Vgg16()",
"/home/rallen/anaconda2/lib/python2.7/site-packages/keras/layers/core.py:621: UserWarning: `output_shape` argument not specified for layer lambda_1 and cannot be automatically inferred with the Theano backend. Defaulting to output shape `(None, 3, 224, 224)` (same as input shape). If the expected output shape is different, specify it via the `output_shape` argument.\n .format(self.name, input_shape))\n"
],
[
"#Set constants. You can experiment with no_of_epochs to improve the model\nbatch_size=64\nno_of_epochs=2",
"_____no_output_____"
],
[
"#Finetune the model\nbatches = vgg.get_batches(train_path, batch_size=batch_size)\nval_batches = vgg.get_batches(valid_path, batch_size=batch_size*2)\nvgg.finetune(batches)\n\n#Not sure if we set this for all fits\nvgg.model.optimizer.lr = 0.01",
"Found 23000 images belonging to 2 classes.\nFound 2000 images belonging to 2 classes.\n"
],
[
"#Notice we are passing in the validation dataset to the fit() method\n#For each epoch we test our model against the validation set\nlatest_weights_filename = None\n#vgg.model.load_weights('/home/rallen/Documents/PracticalDL4C/courses/deeplearning1/nbs/data/dogscats/models/first.h5')\n#vgg.model.load_weights(results_path+'ft1.h5')\nlatest_weights_filename='ft24.h5'\nvgg.model.load_weights(results_path+latest_weights_filename)",
"_____no_output_____"
]
],
[
[
"if you are training, stay here. if you are loading & creating submission skip down from here.",
"_____no_output_____"
]
],
[
[
"# if you have run some epochs already...\nepoch_offset=12 # trying again from ft1\nfor epoch in range(no_of_epochs):\n print \"Running epoch: %d\" % (epoch + epoch_offset)\n vgg.fit(batches, val_batches, nb_epoch=1)\n latest_weights_filename = 'ft%d.h5' % (epoch + epoch_offset)\n vgg.model.save_weights(results_path+latest_weights_filename)\nprint \"Completed %s fit operations\" % no_of_epochs",
"_____no_output_____"
]
],
[
[
"```\nResults of ft1.h5\n\n0 val_loss: 0.2122 val_acc: 0.9830\n1 val_loss: 0.1841 val_acc: 0.9855\n[[987 7]\n [ 20 986]]\n--\n2 val_loss: 0.2659 val_acc: 0.9830\n3 val_loss: 0.2254 val_acc: 0.9850\n4 val_loss: 0.2072 val_acc: 0.9845\n[[975 19]\n [ 11 995]]\n \nResults of first0.h5\n\n0 val_loss: 0.2425 val_acc: 0.9830\n\n[[987 7]\n [ 27 979]]\n```",
"_____no_output_____"
]
],
[
[
"# only if you have to\nlatest_weights_filename='ft1.h5'\nvgg.model.load_weights(results_path+latest_weights_filename)",
"_____no_output_____"
]
],
[
[
"## Validate Predictions",
"_____no_output_____"
]
],
[
[
"val_batches, probs = vgg.test(valid_path, batch_size = batch_size)\nfilenames = val_batches.filenames\nexpected_labels = val_batches.classes #0 or 1\n\n#Round our predictions to 0/1 to generate labels\nour_predictions = probs[:,0]\nour_labels = np.round(1-our_predictions)",
"_____no_output_____"
],
[
"cm = confusion_matrix(expected_labels, our_labels)\nplot_confusion_matrix(cm, val_batches.class_indices)",
"_____no_output_____"
],
[
"#Helper function to plot images by index in the validation set \n#Plots is a helper function in utils.py\ndef plots_idx(idx, titles=None):\n plots([image.load_img(valid_path + filenames[i]) for i in idx], titles=titles)\n \n#Number of images to view for each visualization task\nn_view = 4",
"_____no_output_____"
],
[
"#1. A few correct labels at random\ncorrect = np.where(our_labels==expected_labels)[0]\nprint \"Found %d correct labels\" % len(correct)\nidx = permutation(correct)[:n_view]\nplots_idx(idx, our_predictions[idx])",
"_____no_output_____"
],
[
"#2. A few incorrect labels at random\nincorrect = np.where(our_labels!=expected_labels)[0]\nprint \"Found %d incorrect labels\" % len(incorrect)\nidx = permutation(incorrect)[:n_view]\nplots_idx(idx, our_predictions[idx])",
"_____no_output_____"
],
[
"#3a. The images we most confident were cats, and are actually cats\ncorrect_cats = np.where((our_labels==0) & (our_labels==expected_labels))[0]\nprint \"Found %d confident correct cats labels\" % len(correct_cats)\nmost_correct_cats = np.argsort(our_predictions[correct_cats])[::-1][:n_view]\nplots_idx(correct_cats[most_correct_cats], our_predictions[correct_cats][most_correct_cats])",
"_____no_output_____"
],
[
"#3b. The images we most confident were dogs, and are actually dogs\ncorrect_dogs = np.where((our_labels==1) & (our_labels==expected_labels))[0]\nprint \"Found %d confident correct dogs labels\" % len(correct_dogs)\nmost_correct_dogs = np.argsort(our_predictions[correct_dogs])[:n_view]\nplots_idx(correct_dogs[most_correct_dogs], our_predictions[correct_dogs][most_correct_dogs])",
"_____no_output_____"
],
[
"#4a. The images we were most confident were cats, but are actually dogs\nincorrect_cats = np.where((our_labels==0) & (our_labels!=expected_labels))[0]\nprint \"Found %d incorrect cats\" % len(incorrect_cats)\nif len(incorrect_cats):\n most_incorrect_cats = np.argsort(our_predictions[incorrect_cats])[::-1][:n_view]\n plots_idx(incorrect_cats[most_incorrect_cats], our_predictions[incorrect_cats][most_incorrect_cats])",
"_____no_output_____"
],
[
"#4b. The images we were most confident were dogs, but are actually cats\nincorrect_dogs = np.where((our_labels==1) & (our_labels!=expected_labels))[0]\nprint \"Found %d incorrect dogs\" % len(incorrect_dogs)\nif len(incorrect_dogs):\n most_incorrect_dogs = np.argsort(our_predictions[incorrect_dogs])[:n_view]\n plots_idx(incorrect_dogs[most_incorrect_dogs], our_predictions[incorrect_dogs][most_incorrect_dogs])",
"_____no_output_____"
],
[
"#5. The most uncertain labels (ie those with probability closest to 0.5).\nmost_uncertain = np.argsort(np.abs(our_predictions-0.5))\nplots_idx(most_uncertain[:n_view], our_predictions[most_uncertain])",
"_____no_output_____"
]
],
[
[
"## Generate Predictions",
"_____no_output_____"
]
],
[
[
"batches, preds = vgg.test(test_path, batch_size = batch_size*2)\n# Error allocating 3347316736 bytes of device memory (out of memory).\n# got this error when batch-size = 128\n# I see this pop up to 6GB memory with batch_size = 64 & this takes some time...",
"Found 12500 images belonging to 1 classes.\n"
],
[
"#For every image, vgg.test() generates two probabilities \n#based on how we've ordered the cats/dogs directories.\n#It looks like column one is cats and column two is dogs\nprint preds[:5]\n\nfilenames = batches.filenames\nprint filenames[:5]",
"[[ 1. 0.]\n [ 0. 1.]\n [ 1. 0.]\n [ 0. 1.]\n [ 1. 0.]]\n['unknown/2850.jpg', 'unknown/8158.jpg', 'unknown/4719.jpg', 'unknown/4743.jpg', 'unknown/11184.jpg']\n"
],
[
"#You can verify the column ordering by viewing some images\nImage.open(test_path + filenames[1])",
"_____no_output_____"
],
[
"#Save our test results arrays so we can use them again later\nsave_array(results_path + 'test_preds.dat', preds)\nsave_array(results_path + 'filenames.dat', filenames)",
"_____no_output_____"
]
],
[
[
"## Submit Predictions to Kaggle!",
"_____no_output_____"
]
],
[
[
"#Load our test predictions from file\npreds = load_array(results_path + 'test_preds.dat')\nfilenames = load_array(results_path + 'filenames.dat')",
"_____no_output_____"
],
[
"#Grab the dog prediction column\nisdog = preds[:,1]\nprint \"Raw Predictions: \" + str(isdog[:5])\nprint \"Mid Predictions: \" + str(isdog[(isdog < .6) & (isdog > .4)])\nprint \"Edge Predictions: \" + str(isdog[(isdog == 1) | (isdog == 0)])",
"Raw Predictions: [ 0. 1. 0. 1. 0.]\nMid Predictions: []\nEdge Predictions: [ 0. 1. 0. ..., 0. 0. 0.]\n"
],
[
"#play it safe, round down our edge predictions\n#isdog = isdog.clip(min=0.05, max=0.95)\n#isdog = isdog.clip(min=0.02, max=0.98)\nisdog = isdog.clip(min=0.01, max=0.99)",
"_____no_output_____"
],
[
"#Extract imageIds from the filenames in our test/unknown directory \nfilenames = batches.filenames\nids = np.array([int(f[8:f.find('.')]) for f in filenames])",
"_____no_output_____"
],
[
"subm = np.stack([ids,isdog], axis=1)\nsubm[:5]",
"_____no_output_____"
],
[
"%cd $DATA_HOME_DIR\nsubmission_file_name = 'submission4.csv'\nnp.savetxt(submission_file_name, subm, fmt='%d,%.5f', header='id,label', comments='')",
"/home/rallen/Documents/PracticalDL4C/kaggle/dogscats/data\n"
],
[
"from IPython.display import FileLink\n%cd $LESSON_HOME_DIR\nFileLink('data/'+submission_file_name)",
"/home/rallen/Documents/PracticalDL4C/kaggle/dogscats\n"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
ec8c77db0eab78d2af695aa721c6a6999795ce4b | 716,334 | ipynb | Jupyter Notebook | rfcn/visual_inference.ipynb | dhzhd1/road_obj_detect | adfec142546875e622f6a7bca986d7c952738af8 | [
"Apache-2.0"
] | 7 | 2017-09-08T07:10:36.000Z | 2018-01-27T16:35:23.000Z | rfcn/visual_inference.ipynb | dhzhd1/road_obj_detect | adfec142546875e622f6a7bca986d7c952738af8 | [
"Apache-2.0"
] | 11 | 2020-03-24T15:27:49.000Z | 2022-01-13T00:36:50.000Z | rfcn/visual_inference.ipynb | dhzhd1/road_obj_detect | adfec142546875e622f6a7bca986d7c952738af8 | [
"Apache-2.0"
] | 2 | 2018-01-18T14:49:47.000Z | 2018-01-27T16:35:54.000Z | 1,458.928717 | 151,494 | 0.939893 | [
[
[
"import _init_paths\n\nimport argparse\nimport os\nimport sys\nimport logging\nimport pprint\nimport cv2\nfrom config.config import config, update_config\nfrom utils.image import resize, transform\nimport numpy as np\n# get config\nos.environ['PYTHONUNBUFFERED'] = '1'\nos.environ['MXNET_CUDNN_AUTOTUNE_DEFAULT'] = '0'\nos.environ['MXNET_ENABLE_GPU_P2P'] = '0'\nupdate_config('./road_train_all.yaml')\n\nsys.path.insert(0, os.path.join('../external/mxnet', config.MXNET_VERSION))\nimport mxnet as mx\nfrom core.tester import im_detect, Predictor\nfrom symbols import *\nfrom utils.load_model import load_param\nfrom utils.show_boxes import show_boxes\nfrom utils.tictoc import tic, toc\nfrom nms.nms import py_nms_wrapper, cpu_nms_wrapper, gpu_nms_wrapper\n",
"_____no_output_____"
],
[
"# def parse_args():\n# parser = argparse.ArgumentParser(description='Show Deformable ConvNets demo')\n# # general\n# parser.add_argument('--rfcn_only', help='whether use R-FCN only (w/o Deformable ConvNets)', default=False, action='store_true')\n\n# args = parser.parse_args()\n# return args\n\n# args = parse_args()\n\ndef main():\n # get symbol\n pprint.pprint(config)\n config.symbol = 'resnet_v1_101_rfcn'\n sym_instance = eval(config.symbol + '.' + config.symbol)()\n sym = sym_instance.get_symbol(config, is_train=False)\n\n # set up class names\n num_classes = 4\n classes = ['vehicle', 'pedestrian', 'cyclist', 'traffic lights']\n\n # load demo data\n test_image_path = './data/RoadImages/test/'\n image_names = ['71777.jpg', '70522.jpg', '72056.jpg', '71531.jpg', '70925.jpg', '70372.jpg', '70211.jpg']\n data = []\n for im_name in image_names:\n assert os.path.exists(test_image_path + im_name), ('%s does not exist'.format(test_image_path + im_name))\n im = cv2.imread(test_image_path + im_name, cv2.IMREAD_COLOR | cv2.IMREAD_IGNORE_ORIENTATION)\n target_size = config.SCALES[0][1]\n max_size = config.SCALES[0][1]\n im, im_scale = resize(im, target_size, max_size, stride=config.network.IMAGE_STRIDE)\n im_tensor = transform(im, config.network.PIXEL_MEANS)\n im_info = np.array([[im_tensor.shape[2], im_tensor.shape[3], im_scale]], dtype=np.float32)\n data.append({'data': im_tensor, 'im_info': im_info})\n\n\n # get predictor\n data_names = ['data', 'im_info']\n label_names = []\n data = [[mx.nd.array(data[i][name]) for name in data_names] for i in xrange(len(data))]\n max_data_shape = [[('data', (1, 3, max([v[0] for v in config.SCALES]), max([v[1] for v in config.SCALES])))]]\n provide_data = [[(k, v.shape) for k, v in zip(data_names, data[i])] for i in xrange(len(data))]\n provide_label = [None for i in xrange(len(data))]\n arg_params, aux_params = load_param('./output/rfcn/road_obj/road_train_all/all/' + 'rfcn_road', 19 , process=True)\n predictor = Predictor(sym, data_names, label_names,\n context=[mx.gpu(0)], max_data_shapes=max_data_shape,\n provide_data=provide_data, provide_label=provide_label,\n arg_params=arg_params, aux_params=aux_params)\n nms = gpu_nms_wrapper(config.TEST.NMS, 0)\n\n # warm up\n# for j in xrange(2):\n# data_batch = mx.io.DataBatch(data=[data[0]], label=[], pad=0, index=0,\n# provide_data=[[(k, v.shape) for k, v in zip(data_names, data[0])]],\n# provide_label=[None])\n# scales = [data_batch.data[i][1].asnumpy()[0, 2] for i in xrange(len(data_batch.data))]\n# scores, boxes, data_dict = im_detect(predictor, data_batch, data_names, scales, config)\n\n # test\n for idx, im_name in enumerate(image_names):\n #print('DEBUG: Image Name: {}'.format(im_name))\n data_batch = mx.io.DataBatch(data=[data[idx]], label=[], pad=0, index=idx,\n provide_data=[[(k, v.shape) for k, v in zip(data_names, data[idx])]],\n provide_label=[None])\n scales = [data_batch.data[i][1].asnumpy()[0, 2] for i in xrange(len(data_batch.data))]\n #print('DEBUG: scales: {}'.format(scales))\n tic()\n scores, boxes, data_dict = im_detect(predictor, data_batch, data_names, scales, config)\n boxes = boxes[0].astype('f')\n #print('DEBUG: boxes: {}'.format(boxes))\n scores = scores[0].astype('f')\n #print('DEBUG: scores: {}'.format(scores))\n dets_nms = []\n for j in range(1, scores.shape[1]):\n cls_scores = scores[:, j, np.newaxis]\n #print('DEBUG: cls_scores: {}'.format(cls_scores))\n cls_boxes = boxes[:, 4:8] if config.CLASS_AGNOSTIC else boxes[:, j * 4:(j + 1) * 4]\n #print('DEBUG: cls_boxes: {}'.format(cls_boxes))\n cls_dets = np.hstack((cls_boxes, cls_scores))\n #print('DEBUG: cls_dets_1: {}'.format(cls_dets))\n keep = nms(cls_dets)\n #print('DEBUG: keep: {}'.format(keep))\n cls_dets = cls_dets[keep, :]\n #print('DEBUG: cls_dets_2: {}'.format(cls_dets))\n cls_dets = cls_dets[cls_dets[:, -1] > 0.7, :]\n #print('DEBUG: cls_dets_3: {}'.format(cls_dets))\n dets_nms.append(cls_dets)\n print 'testing {} {:.4f}s'.format(im_name, toc())\n #print('DEBUG: Shape of dets_nms: {}'.format(len(dets_nms)))\n print('DEBUG: dets_nms: {}'.format(dets_nms))\n # visualize\n im = cv2.imread(test_image_path + im_name)\n im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)\n show_boxes(im, dets_nms, classes, 1)\n\n print 'done'\n\n",
"_____no_output_____"
],
[
"if __name__ == '__main__':\n main()\n",
"{'CLASS_AGNOSTIC': True,\n 'MXNET_VERSION': 'mxnet',\n 'SCALES': [(360, 640)],\n 'TEST': {'BATCH_IMAGES': 1,\n 'CXX_PROPOSAL': False,\n 'HAS_RPN': True,\n 'NMS': 0.3,\n 'PROPOSAL_MIN_SIZE': 0,\n 'PROPOSAL_NMS_THRESH': 0.7,\n 'PROPOSAL_POST_NMS_TOP_N': 2000,\n 'PROPOSAL_PRE_NMS_TOP_N': 20000,\n 'RPN_MIN_SIZE': 0,\n 'RPN_NMS_THRESH': 0.7,\n 'RPN_POST_NMS_TOP_N': 300,\n 'RPN_PRE_NMS_TOP_N': 6000,\n 'max_per_image': 100,\n 'test_epoch': 8},\n 'TRAIN': {'ALTERNATE': {'RCNN_BATCH_IMAGES': 0,\n 'RPN_BATCH_IMAGES': 0,\n 'rfcn1_epoch': 0,\n 'rfcn1_lr': 0,\n 'rfcn1_lr_step': '',\n 'rfcn2_epoch': 0,\n 'rfcn2_lr': 0,\n 'rfcn2_lr_step': '',\n 'rpn1_epoch': 0,\n 'rpn1_lr': 0,\n 'rpn1_lr_step': '',\n 'rpn2_epoch': 0,\n 'rpn2_lr': 0,\n 'rpn2_lr_step': '',\n 'rpn3_epoch': 0,\n 'rpn3_lr': 0,\n 'rpn3_lr_step': ''},\n 'ASPECT_GROUPING': True,\n 'BATCH_IMAGES': 1,\n 'BATCH_ROIS': -1,\n 'BATCH_ROIS_OHEM': 128,\n 'BBOX_MEANS': [0.0, 0.0, 0.0, 0.0],\n 'BBOX_NORMALIZATION_PRECOMPUTED': True,\n 'BBOX_REGRESSION_THRESH': 0.5,\n 'BBOX_STDS': [0.1, 0.1, 0.2, 0.2],\n 'BBOX_WEIGHTS': array([ 1., 1., 1., 1.]),\n 'BG_THRESH_HI': 0.5,\n 'BG_THRESH_LO': 0.0,\n 'CXX_PROPOSAL': False,\n 'ENABLE_OHEM': True,\n 'END2END': True,\n 'FG_FRACTION': 0.25,\n 'FG_THRESH': 0.5,\n 'FLIP': False,\n 'RESUME': True,\n 'RPN_BATCH_SIZE': 256,\n 'RPN_BBOX_WEIGHTS': [1.0, 1.0, 1.0, 1.0],\n 'RPN_CLOBBER_POSITIVES': False,\n 'RPN_FG_FRACTION': 0.5,\n 'RPN_MIN_SIZE': 0,\n 'RPN_NEGATIVE_OVERLAP': 0.3,\n 'RPN_NMS_THRESH': 0.7,\n 'RPN_POSITIVE_OVERLAP': 0.7,\n 'RPN_POSITIVE_WEIGHT': -1.0,\n 'RPN_POST_NMS_TOP_N': 300,\n 'RPN_PRE_NMS_TOP_N': 6000,\n 'SHUFFLE': True,\n 'begin_epoch': 17,\n 'end_epoch': 1000,\n 'lr': 0.0005,\n 'lr_factor': 0.1,\n 'lr_step': '17.0053',\n 'model_prefix': 'rfcn_road',\n 'momentum': 0.9,\n 'warmup': True,\n 'warmup_lr': 5e-05,\n 'warmup_step': 1000,\n 'wd': 0.0005},\n 'dataset': {'NUM_CLASSES': 5,\n 'dataset': 'RoadImages',\n 'dataset_path': 'RoadImages',\n 'image_set': 'all',\n 'proposal': 'rpn',\n 'root_path': './data',\n 'test_image_set': 'test'},\n 'default': {'frequent': 100, 'kvstore': 'device'},\n 'gpus': '0',\n 'network': {'ANCHOR_RATIOS': [0.5, 1, 2],\n 'ANCHOR_SCALES': [4, 8, 16, 32],\n 'FIXED_PARAMS': ['conv1',\n 'bn_conv1',\n 'res2',\n 'bn2',\n 'gamma',\n 'beta'],\n 'FIXED_PARAMS_SHARED': ['conv1',\n 'bn_conv1',\n 'res2',\n 'bn2',\n 'res3',\n 'bn3',\n 'res4',\n 'bn4',\n 'gamma',\n 'beta'],\n 'IMAGE_STRIDE': 0,\n 'NUM_ANCHORS': 12,\n 'PIXEL_MEANS': array([ 103.06, 115.9 , 123.15]),\n 'RCNN_FEAT_STRIDE': 16,\n 'RPN_FEAT_STRIDE': 16,\n 'pretrained': './model/pretrained_model/resnet_v1_101',\n 'pretrained_epoch': 0},\n 'output_path': './output/rfcn/road_obj',\n 'symbol': 'resnet_v1_101_rfcn'}\ntesting 71777.jpg 0.1412s\nDEBUG: dets_nms: [array([[ 281.83398438, 168.65238953, 317.3901062 , 207.3127594 ,\n 0.99994469]], dtype=float32), array([], shape=(0, 5), dtype=float32), array([], shape=(0, 5), dtype=float32), array([], shape=(0, 5), dtype=float32)]\n"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code"
]
] |
ec8c8e9bf9a972781fa52b3439d1c73746600ffa | 94,850 | ipynb | Jupyter Notebook | QueryIDRWithGeneLists.ipynb | IDR/idr-notebooks | 78b62ac96724350cb57acdad1690a5aed207474a | [
"BSD-2-Clause"
] | 16 | 2017-05-17T09:54:30.000Z | 2021-11-10T21:42:13.000Z | QueryIDRWithGeneLists.ipynb | IDR/idr-notebooks | 78b62ac96724350cb57acdad1690a5aed207474a | [
"BSD-2-Clause"
] | 100 | 2016-10-06T16:55:44.000Z | 2022-03-10T19:28:26.000Z | QueryIDRWithGeneLists.ipynb | IDR/idr-notebooks | 78b62ac96724350cb57acdad1690a5aed207474a | [
"BSD-2-Clause"
] | 20 | 2016-10-05T09:20:40.000Z | 2021-03-31T22:10:21.000Z | 96.294416 | 37,328 | 0.783089 | [
[
[
"### Import libraries and global variables\nThe cell below will install dependencies if you choose to run the notebook in [Google Colab](https://colab.research.google.com/notebooks/intro.ipynb#recent=true).",
"_____no_output_____"
]
],
[
[
"%pip install idr-py",
"_____no_output_____"
],
[
"import pandas\n\nfrom ipywidgets import widgets, interact, fixed\nfrom functools import wraps\nfrom IPython import get_ipython\nfrom IPython.display import display, HTML\n\nfrom idr import connection\nfrom idr import create_http_session\nfrom idr import genes_of_interest_go\nfrom idr.widgets import textbox_widget\nfrom idr.widgets import dropdown_widget\nfrom idr import get_phenotypes_for_genelist, get_similar_genes\n\nfrom idr.visualizations import plot_idr_attributes, plot_string_interactions\nfrom idr.externalDBs import genes_of_interest_from_string",
"_____no_output_____"
]
],
[
[
"## Querying",
"_____no_output_____"
],
[
"### Variables:",
"_____no_output_____"
]
],
[
[
"organisms_list = ['Homo sapiens', 'Saccharomyces cerevisiae']\norg_sel = dropdown_widget(organisms_list, 'Select Organism:', True)\ntax_id = textbox_widget('9606', 'Enter Taxonomy Id for Organism',\n \"Taxonomy Id:\", True)\ngo_term = textbox_widget('GO:0008290', 'Enter GO Id',\n 'Gene Ontology Id:', True)\nmanual_gene_list = textbox_widget('', 'Comma seperated gene symbols',\n 'Manual Gene List:', True)",
"_____no_output_____"
]
],
[
[
"### Import query list",
"_____no_output_____"
]
],
[
[
"go_gene_list = []\nif go_term.value.split(\",\") != ['']:\n go_gene_list = genes_of_interest_go(go_term.value, tax_id.value)\nelse:\n print('Please enter a valid Gene Ontology Id')\nmanual_list = manual_gene_list.value.split(\",\")\nif manual_list != ['']:\n go_gene_list = list(set(go_gene_list + manual_list))\nprint(\"Query list of genes:\", go_gene_list)",
"Query list of genes: ['CAPZA3', 'CAPZA2', 'CAPZA1', 'ADD2', 'ADD1', 'CAPZB', 'HEL-S-86', 'MTPN', 'hCG_28646', 'CAPG']\n"
]
],
[
[
"### Query IDR for Phenotypes",
"_____no_output_____"
]
],
[
[
"session = create_http_session()\norganism = org_sel.value\n\n[query_genes_dataframe,\n screen_to_phenotype_dictionary] = get_phenotypes_for_genelist(session,\n go_gene_list,\n organism)\ndisplay(HTML(query_genes_dataframe.to_html(escape=False)))",
"[============================================================] 100.0% ...Iterating through gene list\r"
]
],
[
[
"### Get Other Genes from the phenotypes",
"_____no_output_____"
]
],
[
[
"conn = connection('idr.openmicroscopy.org')\ntry:\n query_genes_list = list(query_genes_dataframe['Value'])\n [similar_genes,\n overlap_genes] = get_similar_genes(conn, query_genes_list,\n screen_to_phenotype_dictionary)\n overlap_genes_dataframe = pandas.DataFrame.from_dict(overlap_genes,\n orient='index')\n display(HTML(\"<strong>Query Genes:</strong>\"))\n display(HTML(overlap_genes_dataframe.to_html(escape=False)))\n\n similar_genes_dataframe = pandas.DataFrame.from_dict(similar_genes,\n orient='index')\n display(HTML(\"<strong>Similar Genes:</strong>\"))\n display(HTML(similar_genes_dataframe.to_html(escape=False)))\nfinally:\n conn.close()",
"Connected to IDR...\n[============================================================] 100.0% ...Iterating through screens\r"
]
],
[
[
"## Visualization",
"_____no_output_____"
],
[
"### Plot Query Genes",
"_____no_output_____"
]
],
[
[
"%matplotlib inline\nfilter_by_category = widgets.Dropdown(description='Filter',\n options=['Phenotypes', 'Screens'])\nthreshold_for_category = widgets.IntSlider(description='Threshold',\n min=1, max=10, step=1, value=1)\nthreshold_for_plot = widgets.IntSlider(description='Threshold_for_plot',\n min=1, max=10, step=1, value=1)\n\n\n@interact(primary_dictionary=fixed(overlap_genes),\n secondary_dictionary=fixed(overlap_genes),\n plot_title=fixed('Query Genes'),\n filter_by_category=filter_by_category,\n threshold_for_category=threshold_for_category,\n threshold_for_plot=threshold_for_plot)\n@wraps(plot_idr_attributes)\ndef myfun(**kwargs):\n global screenids_removed, phenotypes_removed, genes_of_interest\n [screenids_removed, phenotypes_removed,\n genes_of_interest] = plot_idr_attributes(**kwargs)",
"_____no_output_____"
]
],
[
[
"### Plot Similar Genes",
"_____no_output_____"
]
],
[
[
"for ids in screenids_removed:\n if ids in similar_genes:\n del similar_genes[ids]\n\n\n@interact(primary_dictionary=fixed(similar_genes),\n secondary_dictionary=fixed(overlap_genes),\n plot_title=fixed('Similar genes'),\n filter_by_category=fixed(filter_by_category.value),\n threshold_for_category=fixed(threshold_for_category.value),\n threshold_for_plot=widgets.IntSlider(min=1, max=10, step=1, value=5))\n@wraps(plot_idr_attributes)\ndef myfun2(**kwargs):\n global screenids_removed, phenotypes_removed, genes_of_interest\n [screenids_removed, phenotypes_removed,\n genes_of_interest] = plot_idr_attributes(**kwargs)\n\n\nsimilar_genes_list = genes_of_interest",
"_____no_output_____"
]
],
[
[
"### Get String Interactions",
"_____no_output_____"
],
[
"### Plot interactions between similar genes and query genes/similar genes",
"_____no_output_____"
]
],
[
[
"similar_genes_list = list(set(genes_of_interest) - set(go_gene_list))\ngenes_of_interest1 = list(set(go_gene_list + similar_genes_list))\ninteractions_dataframe = genes_of_interest_from_string(genes_of_interest1,\n 1, tax_id.value)\n\nprint('Primary Interactors:')\ndf = plot_string_interactions(go_gene_list, similar_genes_list,\n interactions_dataframe)\n\nprimary_genes = list(df.columns.values)\nsecondary_genes = set(similar_genes_list) - set(primary_genes)\nprint('Secondary Interactors:')\ndf = plot_string_interactions(secondary_genes, primary_genes,\n interactions_dataframe)",
"Primary Interactors:\n"
]
],
[
[
"### License (BSD 2-Clause)¶\n\nCopyright (C) 2016-2021 University of Dundee. All Rights Reserved.\n\nRedistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:\n\nRedistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
ec8ca729898aec0555e9cf612c308d40d8d53296 | 204,989 | ipynb | Jupyter Notebook | workshop/workshop02.ipynb | JonahSpicher/ThinkBayes2 | 92d147f5e5a6d192550744ef46e023473444300f | [
"MIT"
] | null | null | null | workshop/workshop02.ipynb | JonahSpicher/ThinkBayes2 | 92d147f5e5a6d192550744ef46e023473444300f | [
"MIT"
] | null | null | null | workshop/workshop02.ipynb | JonahSpicher/ThinkBayes2 | 92d147f5e5a6d192550744ef46e023473444300f | [
"MIT"
] | null | null | null | 246.974699 | 48,772 | 0.917381 | [
[
[
"Bayesian Statistics Made Simple\n===\n\nCode and exercises from my workshop on Bayesian statistics in Python.\n\nCopyright 2018 Allen Downey\n\nMIT License: https://opensource.org/licenses/MIT",
"_____no_output_____"
]
],
[
[
"from __future__ import print_function, division\n\n%matplotlib inline\n\nimport numpy as np\n\nfrom thinkbayes2 import Suite\nimport thinkplot\n\nimport warnings\nwarnings.filterwarnings('ignore')",
"_____no_output_____"
]
],
[
[
"## The likelihood function\n\n\nHere's a definition for `Bandit`, which extends `Suite` and defines a likelihood function that computes the probability of the data (win or lose) for a given value of `x` (the probability of win).\n\nNote that `hypo` is in the range 0 to 100.",
"_____no_output_____"
]
],
[
[
"class Bandit(Suite):\n \n def Likelihood(self, data, hypo):\n \"\"\" \n hypo is the prob of win (0-100)\n data is a string, either 'W' or 'L'\n \"\"\"\n x = hypo / 100\n if data == 'W':\n return x\n else:\n return 1-x",
"_____no_output_____"
]
],
[
[
"We'll start with a uniform distribution from 0 to 100.",
"_____no_output_____"
]
],
[
[
"bandit = Bandit(range(101))\nthinkplot.Pdf(bandit)\nthinkplot.Config(xlabel='x', ylabel='Probability')",
"No handles with labels found to put in legend.\n"
]
],
[
[
"Now we can update with a single loss:",
"_____no_output_____"
]
],
[
[
"bandit.Update('L')\nthinkplot.Pdf(bandit)\nthinkplot.Config(xlabel='x', ylabel='Probability', legend=False)",
"_____no_output_____"
]
],
[
[
"Another loss:",
"_____no_output_____"
]
],
[
[
"bandit.Update('L')\nthinkplot.Pdf(bandit)\nthinkplot.Config(xlabel='x', ylabel='Probability', legend=False)",
"_____no_output_____"
]
],
[
[
"And a win:",
"_____no_output_____"
]
],
[
[
"bandit.Update('W')\nthinkplot.Pdf(bandit)\nthinkplot.Config(xlabel='x', ylabel='Probability', legend=False)",
"_____no_output_____"
]
],
[
[
"Starting over, here's what it looks like after 1 win and 9 losses.",
"_____no_output_____"
]
],
[
[
"bandit = Bandit(range(101))\n\nfor outcome in 'WLLLLLLLLL':\n bandit.Update(outcome)\n\nthinkplot.Pdf(bandit)\nthinkplot.Config(xlabel='x', ylabel='Probability', legend=False)",
"_____no_output_____"
]
],
[
[
"The posterior mean is about 17%",
"_____no_output_____"
]
],
[
[
"bandit.Mean()",
"_____no_output_____"
]
],
[
[
"The most likely value is the observed proportion 1/10",
"_____no_output_____"
]
],
[
[
"bandit.MAP()",
"_____no_output_____"
]
],
[
[
"The posterior credible interval has a 90% chance of containing the true value (provided that the prior distribution truly represents our background knowledge).",
"_____no_output_____"
]
],
[
[
"bandit.CredibleInterval(90)",
"_____no_output_____"
]
],
[
[
"## Multiple bandits",
"_____no_output_____"
],
[
"Now suppose we have several bandits and we want to decide which one to play.",
"_____no_output_____"
],
[
"For this example, we have 4 machines with these probabilities:",
"_____no_output_____"
]
],
[
[
"actual_probs = [0.10, 0.20, 0.30, 0.40]",
"_____no_output_____"
]
],
[
[
"The following function simulates playing one machine once.",
"_____no_output_____"
]
],
[
[
"from random import random\nfrom collections import Counter\n\ncounter = Counter()\n\ndef flip(p):\n return random() < p\n\ndef play(i):\n counter[i] += 1\n p = actual_probs[i]\n if flip(p):\n return 'W'\n else:\n return 'L'",
"_____no_output_____"
]
],
[
[
"Here's a test, playing machine 3 twenty times:",
"_____no_output_____"
]
],
[
[
"for i in range(20):\n result = play(3)\n print(result, end=' ')",
"W W L L L W L L L W W W L L L L W L W L "
]
],
[
[
"Now I'll make 4 `Bandit` objects to represent our beliefs about the 4 machines.",
"_____no_output_____"
]
],
[
[
"prior = range(101)\nbeliefs = [Bandit(prior) for i in range(4)]",
"_____no_output_____"
]
],
[
[
"This function displays the four posterior distributions",
"_____no_output_____"
]
],
[
[
"options = dict(yticklabels='invisible')\n\ndef plot(beliefs, **options):\n thinkplot.preplot(rows=2, cols=2)\n for i, b in enumerate(beliefs):\n thinkplot.subplot(i+1)\n thinkplot.Pdf(b, label=i)\n thinkplot.Config(**options)",
"_____no_output_____"
],
[
"plot(beliefs, legend=True)",
"/home/jonah/anaconda3/lib/python3.6/site-packages/matplotlib/cbook/deprecation.py:106: MatplotlibDeprecationWarning: Adding an axes using the same arguments as a previous axes currently reuses the earlier instance. In a future version, a new instance will always be created and returned. Meanwhile, this warning can be suppressed, and the future behavior ensured, by passing a unique label to each axes instance.\n warnings.warn(message, mplDeprecation, stacklevel=1)\n"
]
],
[
[
"Now suppose we play each machine 10 times. This function updates our beliefs about one of the machines based on one outcome.",
"_____no_output_____"
]
],
[
[
"def update(beliefs, i, outcome):\n beliefs[i].Update(outcome)",
"_____no_output_____"
],
[
"for i in range(4):\n for _ in range(10):\n outcome = play(i)\n update(beliefs, i, outcome)",
"_____no_output_____"
],
[
"plot(beliefs, legend=True)",
"/home/jonah/anaconda3/lib/python3.6/site-packages/matplotlib/cbook/deprecation.py:106: MatplotlibDeprecationWarning: Adding an axes using the same arguments as a previous axes currently reuses the earlier instance. In a future version, a new instance will always be created and returned. Meanwhile, this warning can be suppressed, and the future behavior ensured, by passing a unique label to each axes instance.\n warnings.warn(message, mplDeprecation, stacklevel=1)\n"
]
],
[
[
"After playing each machine 10 times, we have some information about their probabilies:",
"_____no_output_____"
]
],
[
[
"[belief.Mean() for belief in beliefs]",
"_____no_output_____"
]
],
[
[
"## Bayesian Bandits\n\nTo get more information, we could play each machine 100 times, but while we are gathering data, we are not making good use of it. The kernel of the Bayesian Bandits algorithm is that is collects and uses data at the same time. In other words, it balances exploration and exploitation.\n\nThe following function chooses among the machines so that the probability of choosing each machine is proportional to its \"probability of superiority\".\n\n`Random` chooses a value from the posterior distribution.\n\n`argmax` returns the index of the machine that chose the highest value.",
"_____no_output_____"
]
],
[
[
"def choose(beliefs):\n ps = [b.Random() for b in beliefs]\n return np.argmax(ps)",
"_____no_output_____"
]
],
[
[
"Here's an example.",
"_____no_output_____"
]
],
[
[
"choose(beliefs)",
"_____no_output_____"
]
],
[
[
"Putting it all together, the following function chooses a machine, plays once, and updates `beliefs`:",
"_____no_output_____"
]
],
[
[
"def choose_play_update(beliefs, verbose=False):\n i = choose(beliefs)\n outcome = play(i)\n update(beliefs, i, outcome)\n if verbose:\n print(i, outcome, beliefs[i].Mean())",
"_____no_output_____"
]
],
[
[
"Here's an example",
"_____no_output_____"
]
],
[
[
"counter = Counter()\nchoose_play_update(beliefs, verbose=True)",
"2 L 46.15384615485773\n"
]
],
[
[
"## Trying it out",
"_____no_output_____"
],
[
"Let's start again with a fresh set of machines:",
"_____no_output_____"
]
],
[
[
"beliefs = [Bandit(prior) for i in range(4)]",
"_____no_output_____"
]
],
[
[
"Now we can play a few times and see how `beliefs` gets updated:",
"_____no_output_____"
]
],
[
[
"num_plays = 1000\n\nfor i in range(num_plays):\n choose_play_update(beliefs)\n \nplot(beliefs)",
"/home/jonah/anaconda3/lib/python3.6/site-packages/matplotlib/cbook/deprecation.py:106: MatplotlibDeprecationWarning: Adding an axes using the same arguments as a previous axes currently reuses the earlier instance. In a future version, a new instance will always be created and returned. Meanwhile, this warning can be suppressed, and the future behavior ensured, by passing a unique label to each axes instance.\n warnings.warn(message, mplDeprecation, stacklevel=1)\n"
]
],
[
[
"We can summarize `beliefs` by printing the posterior mean and credible interval:",
"_____no_output_____"
]
],
[
[
"for i, b in enumerate(beliefs):\n print(b.Mean(), b.CredibleInterval(90))",
"11.136292936200892 (2, 25)\n24.444444444445942 (15, 35)\n35.48387096774196 (30, 41)\n40.71146245059293 (38, 44)\n"
]
],
[
[
"The credible intervals usually contain the true values (10, 20, 30, and 40).\n\nThe estimates are still rough, especially for the lower-probability machines. But that's a feature, not a bug: the goal is to play the high-probability machines most often. Making the estimates more precise is a means to that end, but not an end itself.\n\nLet's see how many times each machine got played. If things go according to play, the machines with higher probabilities should get played more often.",
"_____no_output_____"
]
],
[
[
"for machine, count in sorted(counter.items()):\n print(machine, count)",
"0 21\n1 88\n2 203\n3 789\n"
]
],
[
[
"**Exercise:** Go back and run this section again with a different value of `num_play` and see how it does.",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
ec8cabf5e04d5257dc506a996b7945f3187fa5a7 | 53,564 | ipynb | Jupyter Notebook | my_attempts/model_12.ipynb | goddard-guryon/fictional-fiesta | e69056034d3b79efc8d5913fbc85ed99a9afe09a | [
"CC0-1.0"
] | null | null | null | my_attempts/model_12.ipynb | goddard-guryon/fictional-fiesta | e69056034d3b79efc8d5913fbc85ed99a9afe09a | [
"CC0-1.0"
] | null | null | null | my_attempts/model_12.ipynb | goddard-guryon/fictional-fiesta | e69056034d3b79efc8d5913fbc85ed99a9afe09a | [
"CC0-1.0"
] | null | null | null | 32.621194 | 226 | 0.338287 | [
[
[
"# Content-Based Recommender System\n\nTo recommend movies to a user based on their user profile",
"_____no_output_____"
],
[
"Import necessary modules:",
"_____no_output_____"
]
],
[
[
"import pandas as pd",
"_____no_output_____"
]
],
[
[
"Get the dataset (since the dataset for this was very big, I haven't uploaded it on Github and you'll have to download it using the link given in the solutions notebook) and see a sample (we'll start with movies dataset):",
"_____no_output_____"
]
],
[
[
"movies_df = pd.read_csv(\"../datasets/movies.csv\")\nratings_df = pd.read_csv(\"../datasets/ratings.csv\")\nmovies_df.head()",
"_____no_output_____"
]
],
[
[
"Preprocess the dataset to put the movie's year of release in a separate column and convert the movie genres column entries into lists:",
"_____no_output_____"
]
],
[
[
"movies_df[\"year\"] = movies_df[\"title\"].str.extract(\"(\\(\\d\\d\\d\\d\\))\", expand=False)\nmovies_df[\"year\"] = movies_df[\"year\"].str.extract(\"(\\d\\d\\d\\d)\", expand=False)\nmovies_df[\"title\"] = movies_df[\"title\"].str.replace(\"(\\(\\d\\d\\d\\d\\))\", '')\nmovies_df[\"title\"] = movies_df[\"title\"].apply(lambda x: x.strip())\nmovies_df[\"genres\"] = movies_df[\"genres\"].str.split('|')\nmovies_df.head()",
"_____no_output_____"
]
],
[
[
"Create separate entries for every genre (this will be important for creating genre table):",
"_____no_output_____"
]
],
[
[
"movies_with_genres_df = movies_df.copy()\nfor i, row in movies_df.iterrows():\n for genre in row[\"genres\"]:\n movies_with_genres_df.at[i, genre] = 1\nmovies_with_genres_df = movies_with_genres_df.fillna(0)\nmovies_with_genres_df.head()",
"_____no_output_____"
]
],
[
[
"Now, let's look at the ratings dataset:",
"_____no_output_____"
]
],
[
[
"ratings_df.head()",
"_____no_output_____"
]
],
[
[
"Process the dataset to remove the timestamp column:",
"_____no_output_____"
]
],
[
[
"ratings_df = ratings_df.drop(\"timestamp\", 1)\nratings_df.head()",
"_____no_output_____"
]
],
[
[
"Let's create a dummy user for whom we'll make recommendations:",
"_____no_output_____"
]
],
[
[
"test_movies = pd.DataFrame([\n {'title':'Breakfast Club, The', 'rating':5},\n {'title':'Toy Story', 'rating':3.5},\n {'title':'Jumanji', 'rating':2},\n {'title':\"Pulp Fiction\", 'rating':5},\n {'title':'Akira', 'rating':4.5}\n])\ntest_movies",
"_____no_output_____"
]
],
[
[
"Get the IDs of the movies the user has watched:",
"_____no_output_____"
]
],
[
[
"test_ids = movies_df[movies_df[\"title\"].isin(test_movies[\"title\"].tolist())]\ntest_movies = pd.merge(test_ids, test_movies).drop(\"genres\", 1).drop(\"year\", 1)\ntest_movies",
"_____no_output_____"
]
],
[
[
"Use the above dataset to create a dataset of genres of the movies the user has watched:",
"_____no_output_____"
]
],
[
[
"prefs_df = movies_with_genres_df[movies_with_genres_df[\"movieId\"].isin(test_movies[\"movieId\"].tolist())]\nprefs_df = prefs_df.reset_index(drop=True).drop(\"movieId\", 1).drop(\"title\", 1).drop(\"genres\", 1).drop(\"year\", 1)\nprefs_df",
"_____no_output_____"
]
],
[
[
"Now, create a user profile by taking a dot product of the user's watched genres dataset with the ratings dataset:",
"_____no_output_____"
]
],
[
[
"user_profile = prefs_df.transpose().dot(test_movies[\"rating\"])\nuser_profile",
"_____no_output_____"
]
],
[
[
"Create a genres table from the dataset we made earlier:",
"_____no_output_____"
]
],
[
[
"genre_table = movies_with_genres_df.set_index(movies_with_genres_df[\"movieId\"])\ngenre_table = genre_table.drop(\"movieId\", 1).drop(\"title\", 1).drop(\"genres\", 1).drop(\"year\", 1)\ngenre_table.head()",
"_____no_output_____"
]
],
[
[
"Take the weighted average of the genres table with the user profile to find out the preferences of the user:",
"_____no_output_____"
]
],
[
[
"weighted_profile = ((genre_table*user_profile).sum(axis=1)/user_profile.sum()).sort_values(ascending=False)\nweighted_profile.head()",
"_____no_output_____"
]
],
[
[
"The top 20 (or 10, the number may change) entries in the weighted average profile are the movies we should recommend to the user:",
"_____no_output_____"
]
],
[
[
"recommendations_df = movies_df.loc[movies_df[\"movieId\"].isin(weighted_profile.head(20).keys())]\nrecommendations_df",
"_____no_output_____"
]
],
[
[
"And that's it! We created a movie recommender engine without using any machine learning library :D",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
ec8caff6bb019c31de972ebd989714c98837371e | 1,378 | ipynb | Jupyter Notebook | Redfish/Delivered/Tes2020/0-ReadmeFirst.ipynb | donzef/JupyterNotebooks | 76c69e6e0e21120f3b88e7c991be6e946bc156d5 | [
"Apache-2.0"
] | 1 | 2021-05-04T19:31:17.000Z | 2021-05-04T19:31:17.000Z | Redfish/Delivered/Tes2020/0-ReadmeFirst.ipynb | donzef/JupyterNotebooks | 76c69e6e0e21120f3b88e7c991be6e946bc156d5 | [
"Apache-2.0"
] | null | null | null | Redfish/Delivered/Tes2020/0-ReadmeFirst.ipynb | donzef/JupyterNotebooks | 76c69e6e0e21120f3b88e7c991be6e946bc156d5 | [
"Apache-2.0"
] | null | null | null | 29.319149 | 238 | 0.592163 | [
[
[
"empty"
]
]
] | [
"empty"
] | [
[
"empty"
]
] |
ec8cc3b647f05a77b83f4eee4e714290ffc0670e | 381,104 | ipynb | Jupyter Notebook | BERT-SentimentAnalysis/model.ipynb | Frostday/PytorchProjects | ddfc19d6c34ce7b519e41f69e9b83ead2789a020 | [
"MIT"
] | 8 | 2021-05-28T16:09:36.000Z | 2022-02-27T23:12:48.000Z | BERT-SentimentAnalysis/model.ipynb | Frostday/PytorchProjects | ddfc19d6c34ce7b519e41f69e9b83ead2789a020 | [
"MIT"
] | null | null | null | BERT-SentimentAnalysis/model.ipynb | Frostday/PytorchProjects | ddfc19d6c34ce7b519e41f69e9b83ead2789a020 | [
"MIT"
] | 8 | 2021-05-28T16:01:48.000Z | 2022-02-27T23:12:50.000Z | 381,104 | 381,104 | 0.934417 | [
[
[
"# Setup",
"_____no_output_____"
]
],
[
[
"!pip install -qq transformers",
"_____no_output_____"
],
[
"!pip install -q gdown",
"_____no_output_____"
],
[
"import transformers\nfrom transformers import BertModel, BertTokenizer, AdamW, get_linear_schedule_with_warmup\nimport torch\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nfrom pylab import rcParams\nimport matplotlib.pyplot as plt\nfrom matplotlib import rc\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import confusion_matrix, classification_report\nfrom collections import defaultdict\nfrom textwrap import wrap\nfrom torch import nn, optim\nfrom torch.utils.data import Dataset, DataLoader\nfrom torch.nn import functional as F\n\n%matplotlib inline\n%config InlineBackend.figure_format='retina'\n\nsns.set(style='whitegrid', palette='muted', font_scale=1.2)\nHAPPY_COLORS_PALETTE = [\"#01BEFE\", \"#FFDD00\", \"#FF7D00\", \"#FF006D\", \"#ADFF02\", \"#8F00FF\"]\nsns.set_palette(sns.color_palette(HAPPY_COLORS_PALETTE))\nrcParams['figure.figsize'] = 12, 8\n\nRANDOM_SEED = 42\nnp.random.seed(RANDOM_SEED)\ntorch.manual_seed(RANDOM_SEED)\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")",
"_____no_output_____"
],
[
"!gdown --id 1S6qMioqPJjyBLpLVz4gmRTnJHnjitnuV\n!gdown --id 1zdmewp7ayS4js4VtrJEHzAheSW-5NBZv",
"Downloading...\nFrom: https://drive.google.com/uc?id=1S6qMioqPJjyBLpLVz4gmRTnJHnjitnuV\nTo: /kaggle/working/apps.csv\n100%|████████████████████████████████████████| 134k/134k [00:00<00:00, 58.3MB/s]\nDownloading...\nFrom: https://drive.google.com/uc?id=1zdmewp7ayS4js4VtrJEHzAheSW-5NBZv\nTo: /kaggle/working/reviews.csv\n7.17MB [00:00, 152MB/s]\n"
]
],
[
[
"# Data",
"_____no_output_____"
]
],
[
[
"df = pd.read_csv(\"./reviews.csv\")\ndf.head()",
"_____no_output_____"
],
[
"df.shape, df.info()",
"<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 15746 entries, 0 to 15745\nData columns (total 11 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 userName 15746 non-null object\n 1 userImage 15746 non-null object\n 2 content 15746 non-null object\n 3 score 15746 non-null int64 \n 4 thumbsUpCount 15746 non-null int64 \n 5 reviewCreatedVersion 13533 non-null object\n 6 at 15746 non-null object\n 7 replyContent 7367 non-null object\n 8 repliedAt 7367 non-null object\n 9 sortOrder 15746 non-null object\n 10 appId 15746 non-null object\ndtypes: int64(2), object(9)\nmemory usage: 1.3+ MB\n"
],
[
"sns.countplot(df.score)\nplt.xlabel('review score');",
"/opt/conda/lib/python3.7/site-packages/seaborn/_decorators.py:43: FutureWarning: Pass the following variable as a keyword arg: x. From version 0.12, the only valid positional argument will be `data`, and passing other arguments without an explicit keyword will result in an error or misinterpretation.\n FutureWarning\n"
],
[
"def to_sentiment(rating):\n rating = int(rating)\n if rating <= 2:\n return 0\n elif rating == 3:\n return 1\n else:\n return 2\n\ndf['sentiment'] = df.score.apply(to_sentiment)\nclass_names = ['negative', 'neutral', 'positive']\nax = sns.countplot(df.sentiment)\nplt.xlabel('review sentiment')\nax.set_xticklabels(class_names);",
"/opt/conda/lib/python3.7/site-packages/seaborn/_decorators.py:43: FutureWarning: Pass the following variable as a keyword arg: x. From version 0.12, the only valid positional argument will be `data`, and passing other arguments without an explicit keyword will result in an error or misinterpretation.\n FutureWarning\n"
],
[
"PRE_TRAINED_MODEL_NAME = 'bert-base-cased'\ntokenizer = BertTokenizer.from_pretrained(PRE_TRAINED_MODEL_NAME)\nsample_txt = 'When was I last outside? I am stuck at home for 2 weeks.'\ntokens = tokenizer.tokenize(sample_txt)\ntoken_ids = tokenizer.convert_tokens_to_ids(tokens)\n\nprint(f' Sentence: {sample_txt}')\nprint(f' Tokens: {tokens}')\nprint(f'Token IDs: {token_ids}')",
"_____no_output_____"
],
[
"print(tokenizer.sep_token, tokenizer.sep_token_id)\n# marks ending of a sentence\nprint(tokenizer.cls_token, tokenizer.cls_token_id)\n# at start of the sentence so BERT knows we are doing classification\nprint(tokenizer.pad_token, tokenizer.pad_token_id)\n# token for padding\nprint(tokenizer.unk_token, tokenizer.unk_token_id)\n# BERT understands tokens that were in the training set. Everything else in unknown",
"[SEP] 102\n[CLS] 101\n[PAD] 0\n[UNK] 100\n"
],
[
"encoding = tokenizer.encode_plus(\n sample_txt,\n max_length=32,\n add_special_tokens=True, # Add '[CLS]' and '[SEP]'\n return_token_type_ids=False,\n pad_to_max_length=True,\n return_attention_mask=True,\n return_tensors='pt', # Return PyTorch tensors\n)\nencoding.keys()",
"Truncation was not explicitly activated but `max_length` is provided a specific value, please use `truncation=True` to explicitly truncate examples to max length. Defaulting to 'longest_first' truncation strategy. If you encode pairs of sequences (GLUE-style) with the tokenizer you can select this strategy more precisely by providing a specific strategy to `truncation`.\n/opt/conda/lib/python3.7/site-packages/transformers/tokenization_utils_base.py:2079: FutureWarning: The `pad_to_max_length` argument is deprecated and will be removed in a future version, use `padding=True` or `padding='longest'` to pad to the longest sequence in the batch, or use `padding='max_length'` to pad to a max length. In this case, you can give a specific length with `max_length` (e.g. `max_length=45`) or leave max_length to None to pad to the maximal input size of the model (e.g. 512 for Bert).\n FutureWarning,\n"
],
[
"print(len(encoding['input_ids'][0]))\nencoding['input_ids'][0]",
"32\n"
],
[
"print(len(encoding['attention_mask'][0]))\nencoding['attention_mask']",
"32\n"
],
[
"tokenizer.convert_ids_to_tokens(encoding['input_ids'][0])",
"_____no_output_____"
],
[
"token_lens = []\n\nfor txt in df.content:\n tokens = tokenizer.encode(txt, max_length=512)\n token_lens.append(len(tokens))\n\nsns.distplot(token_lens)\nplt.xlim([0, 256]);\nplt.xlabel('Token count');",
"/opt/conda/lib/python3.7/site-packages/seaborn/distributions.py:2557: FutureWarning: `distplot` is a deprecated function and will be removed in a future version. Please adapt your code to use either `displot` (a figure-level function with similar flexibility) or `histplot` (an axes-level function for histograms).\n warnings.warn(msg, FutureWarning)\n"
],
[
"# since most tokens contain less than 150 tokens, we pick a value greater than that\nMAX_LEN = 512",
"_____no_output_____"
],
[
"class GPReviewDataset(Dataset):\n def __init__(self, reviews, targets, tokenizer, max_len):\n self.reviews = reviews\n self.targets = targets\n self.tokenizer = tokenizer\n self.max_len = max_len\n\n def __len__(self):\n return len(self.reviews)\n \n def __getitem__(self, item):\n review = str(self.reviews[item])\n target = self.targets[item]\n encoding = self.tokenizer.encode_plus(\n review,\n add_special_tokens=True,\n max_length=self.max_len,\n return_token_type_ids=False,\n pad_to_max_length=True,\n return_attention_mask=True,\n return_tensors='pt',\n )\n return {\n 'review_text': review,\n 'input_ids': encoding['input_ids'].flatten(),\n 'attention_mask': encoding['attention_mask'].flatten(),\n 'targets': torch.tensor(target, dtype=torch.long)\n }",
"_____no_output_____"
],
[
"df_train, df_test = train_test_split(\n df,\n test_size=0.1,\n random_state=RANDOM_SEED\n)\n\ndf_val, df_test = train_test_split(\n df_test,\n test_size=0.5,\n random_state=RANDOM_SEED\n)\n\ndf_train.shape, df_val.shape, df_test.shape",
"_____no_output_____"
],
[
"def create_data_loader(df, tokenizer, max_len, batch_size):\n ds = GPReviewDataset(\n reviews=df.content.to_numpy(),\n targets=df.sentiment.to_numpy(),\n tokenizer=tokenizer,\n max_len=max_len\n )\n return DataLoader(\n ds,\n batch_size=batch_size\n )\n\nBATCH_SIZE = 8\ntrain_data_loader = create_data_loader(df_train, tokenizer, MAX_LEN, BATCH_SIZE)\nval_data_loader = create_data_loader(df_val, tokenizer, MAX_LEN, BATCH_SIZE)\ntest_data_loader = create_data_loader(df_test, tokenizer, MAX_LEN, BATCH_SIZE)",
"_____no_output_____"
],
[
"data = next(iter(train_data_loader))\nprint(data.keys())\n\nprint(data['input_ids'].shape)\nprint(data['attention_mask'].shape)\nprint(data['targets'].shape)",
"dict_keys(['review_text', 'input_ids', 'attention_mask', 'targets'])\ntorch.Size([8, 512])\ntorch.Size([8, 512])\ntorch.Size([8])\n"
]
],
[
[
"# Model",
"_____no_output_____"
]
],
[
[
"bert_model = BertModel.from_pretrained(PRE_TRAINED_MODEL_NAME)",
"_____no_output_____"
],
[
"output = bert_model(\n input_ids=data['input_ids'],\n attention_mask=data['attention_mask']\n)\n\nlast_hidden_state = output[0]\npooled_output = output[1]",
"_____no_output_____"
],
[
"# here is the output by transformer\n# last_hidden_state is a sequence of hidden states of the last layer of the model. (batch, max_len, neurons in last layer)\n# pooled output is the pooled output obtained using BertPooler (batch, neurons in last layer)\n# bert_model.config.hidden_size is the number of neurons in the final layer\nlast_hidden_state.shape, pooled_output.shape, bert_model.config.hidden_size",
"_____no_output_____"
],
[
"# Making a sentiment classsifier using the vanilla bert model\nclass SentimentClassifier(nn.Module):\n def __init__(self, n_classes):\n super(SentimentClassifier, self).__init__()\n # self.bert = BertModel.from_pretrained(PRE_TRAINED_MODEL_NAME)\n self.bert = bert_model\n self.drop = nn.Dropout(p=0.3)\n self.out = nn.Linear(self.bert.config.hidden_size, n_classes)\n\n def forward(self, input_ids, attention_mask):\n output = self.bert(\n input_ids=input_ids,\n attention_mask=attention_mask\n )\n pooled_output = output[1]\n output = self.drop(pooled_output)\n return self.out(output)\n\nmodel = SentimentClassifier(len(class_names))\nmodel = model.to(device)",
"_____no_output_____"
],
[
"input_ids = data['input_ids'].to(device)\nattention_mask = data['attention_mask'].to(device)\n\nprint(input_ids.shape) # batch size x seq length\nprint(attention_mask.shape) # batch size x seq length",
"torch.Size([8, 512])\ntorch.Size([8, 512])\n"
],
[
"F.softmax(model(input_ids, attention_mask), dim=1)",
"_____no_output_____"
]
],
[
[
"# Training",
"_____no_output_____"
]
],
[
[
"EPOCHS = 10\ntotal_steps = len(train_data_loader) * EPOCHS\n\noptimizer = AdamW(model.parameters(), lr=2e-5, correct_bias=False)\nloss_fn = nn.CrossEntropyLoss().to(device)\n\nscheduler = get_linear_schedule_with_warmup(\n optimizer,\n num_warmup_steps=0,\n num_training_steps=total_steps\n)",
"_____no_output_____"
],
[
"def train_epoch(\n model,\n data_loader,\n loss_fn,\n optimizer,\n device,\n scheduler,\n n_examples):\n model = model.train()\n losses = []\n correct_predictions = 0\n\n for d in data_loader:\n input_ids = d[\"input_ids\"].to(device)\n attention_mask = d[\"attention_mask\"].to(device)\n targets = d[\"targets\"].to(device)\n\n outputs = model(input_ids=input_ids, attention_mask=attention_mask)\n _, preds = torch.max(outputs, dim=1)\n\n loss = loss_fn(outputs, targets)\n correct_predictions += torch.sum(preds == targets)\n \n losses.append(loss.item())\n loss.backward()\n nn.utils.clip_grad_norm_(model.parameters(), max_norm=1.0)\n optimizer.step()\n scheduler.step()\n optimizer.zero_grad()\n return correct_predictions.double() / n_examples, np.mean(losses)",
"_____no_output_____"
],
[
"def eval_model(model, data_loader, loss_fn, device, n_examples):\n model = model.eval()\n losses = []\n correct_predictions = 0\n with torch.no_grad():\n for d in data_loader:\n input_ids = d[\"input_ids\"].to(device)\n attention_mask = d[\"attention_mask\"].to(device)\n targets = d[\"targets\"].to(device)\n\n outputs = model(input_ids=input_ids, attention_mask=attention_mask)\n _, preds = torch.max(outputs, dim=1)\n \n loss = loss_fn(outputs, targets)\n correct_predictions += torch.sum(preds == targets)\n losses.append(loss.item())\n return correct_predictions.double() / n_examples, np.mean(losses)",
"_____no_output_____"
],
[
"history = defaultdict(list)\nbest_accuracy = 0\n\nfor epoch in range(EPOCHS):\n print(f'Epoch {epoch + 1}/{EPOCHS}')\n print('-' * 10)\n\n train_acc, train_loss = train_epoch(\n model,\n train_data_loader,\n loss_fn,\n optimizer,\n device,\n scheduler,\n len(df_train)\n )\n print(f'Train loss {train_loss} accuracy {train_acc}')\n\n val_acc, val_loss = eval_model(\n model,\n val_data_loader,\n loss_fn,\n device,\n len(df_val)\n )\n print(f'Val loss {val_loss} accuracy {val_acc}')\n\n print()\n history['train_acc'].append(train_acc)\n history['train_loss'].append(train_loss)\n history['val_acc'].append(val_acc)\n history['val_loss'].append(val_loss)\n if val_acc > best_accuracy:\n torch.save(model.state_dict(), 'best_model_state.pt')\n best_accuracy = val_acc",
"Epoch 1/10\n----------\nTrain loss 0.7855003028561447 accuracy 0.648860348599252\nVal loss 0.6403540487241264 accuracy 0.7293519695044473\n\nEpoch 2/10\n----------\nTrain loss 0.5239438856373757 accuracy 0.8055183120457272\nVal loss 0.6715833859123064 accuracy 0.8043202033036849\n\nEpoch 3/10\n----------\nTrain loss 0.3733838071478226 accuracy 0.8954907910521488\nVal loss 0.8126848230392418 accuracy 0.8246505717916137\n\nEpoch 4/10\n----------\nTrain loss 0.2734905697654856 accuracy 0.9341613153623598\nVal loss 0.8018847727393167 accuracy 0.8487928843710293\n\nEpoch 5/10\n----------\nTrain loss 0.19611530211353653 accuracy 0.9554724437230965\nVal loss 0.8692746333403965 accuracy 0.8487928843710293\n\nEpoch 6/10\n----------\nTrain loss 0.1434257523109959 accuracy 0.9664808411544704\nVal loss 0.916636138795781 accuracy 0.8589580686149937\n\nEpoch 7/10\n----------\nTrain loss 0.11010718144051254 accuracy 0.9731141062733752\nVal loss 0.9992212773244263 accuracy 0.8513341804320204\n\nEpoch 8/10\n----------\nTrain loss 0.07851120862579421 accuracy 0.9795356714416767\nVal loss 1.0276289357337218 accuracy 0.8589580686149937\n\nEpoch 9/10\n----------\nTrain loss 0.061139786512962854 accuracy 0.9834874038529391\nVal loss 0.9707986147909053 accuracy 0.866581956797967\n\nEpoch 10/10\n----------\nTrain loss 0.0443726650260679 accuracy 0.9871568696633971\nVal loss 0.9789650823529681 accuracy 0.8716645489199493\n\n"
],
[
"plt.plot(history['train_acc'], label='train accuracy')\nplt.plot(history['val_acc'], label='validation accuracy')\nplt.title('Training history')\nplt.ylabel('Accuracy')\nplt.xlabel('Epoch')\nplt.legend()\nplt.ylim([0, 1]);",
"_____no_output_____"
]
],
[
[
"# Evaluating",
"_____no_output_____"
]
],
[
[
"test_acc, _ = eval_model(\n model,\n test_data_loader,\n loss_fn,\n device,\n len(df_test)\n)\n\ntest_acc.item()",
"_____no_output_____"
],
[
"def get_predictions(model, data_loader):\n model = model.eval()\n review_texts = []\n predictions = []\n prediction_probs = []\n real_values = []\n\n with torch.no_grad():\n for d in data_loader:\n texts = d[\"review_text\"]\n input_ids = d[\"input_ids\"].to(device)\n attention_mask = d[\"attention_mask\"].to(device)\n targets = d[\"targets\"].to(device)\n \n outputs = model(input_ids=input_ids, attention_mask=attention_mask)\n _, preds = torch.max(outputs, dim=1)\n \n review_texts.extend(texts)\n predictions.extend(preds)\n prediction_probs.extend(outputs)\n real_values.extend(targets)\n\n predictions = torch.stack(predictions).cpu()\n prediction_probs = torch.stack(prediction_probs).cpu()\n real_values = torch.stack(real_values).cpu()\n return review_texts, predictions, prediction_probs, real_values",
"_____no_output_____"
],
[
"y_review_texts, y_pred, y_pred_probs, y_test = get_predictions(\n model,\n test_data_loader\n)",
"_____no_output_____"
],
[
"print(classification_report(y_test, y_pred, target_names=class_names))",
" precision recall f1-score support\n\n negative 0.93 0.86 0.89 245\n neutral 0.82 0.87 0.85 254\n positive 0.93 0.93 0.93 289\n\n accuracy 0.89 788\n macro avg 0.89 0.89 0.89 788\nweighted avg 0.89 0.89 0.89 788\n\n"
],
[
"def show_confusion_matrix(confusion_matrix):\n hmap = sns.heatmap(confusion_matrix, annot=True, fmt=\"d\", cmap=\"Blues\")\n hmap.yaxis.set_ticklabels(hmap.yaxis.get_ticklabels(), rotation=0, ha='right')\n hmap.xaxis.set_ticklabels(hmap.xaxis.get_ticklabels(), rotation=30, ha='right')\n plt.ylabel('True sentiment')\n plt.xlabel('Predicted sentiment');\n\ncm = confusion_matrix(y_test, y_pred)\ndf_cm = pd.DataFrame(cm, index=class_names, columns=class_names)\nshow_confusion_matrix(df_cm)",
"_____no_output_____"
],
[
"idx = 2\n\nreview_text = y_review_texts[idx]\ntrue_sentiment = y_test[idx]\npred_df = pd.DataFrame({\n 'class_names': class_names,\n 'values': y_pred_probs[idx]\n})",
"_____no_output_____"
],
[
"print(\"\\n\".join(wrap(review_text)))\nprint()\nprint(f'True sentiment: {class_names[true_sentiment]}')",
"I used to use Habitica, and I must say this is a great step up. I'd\nlike to see more social features, such as sharing tasks - only one\nperson has to perform said task for it to be checked off, but only\ngiving that person the experience and gold. Otherwise, the price for\nsubscription is too steep, thus resulting in a sub-perfect score. I\ncould easily justify $0.99/month or eternal subscription for $15. If\nthat price could be met, as well as fine tuning, this would be easily\nworth 5 stars.\n\nTrue sentiment: neutral\n"
],
[
"sns.barplot(x='values', y='class_names', data=pred_df, orient='h')\nplt.ylabel('sentiment')\nplt.xlabel('probability')\nplt.xlim([0, 1]);",
"_____no_output_____"
]
],
[
[
"# Predicting on raw data",
"_____no_output_____"
]
],
[
[
"review_text = \"I love playing Rocket League! Best game ever\"",
"_____no_output_____"
],
[
"encoded_review = tokenizer.encode_plus(\n review_text,\n max_length=MAX_LEN,\n add_special_tokens=True,\n return_token_type_ids=False,\n pad_to_max_length=True,\n return_attention_mask=True,\n return_tensors='pt',\n)",
"_____no_output_____"
],
[
"input_ids = encoded_review['input_ids'].to(device)\nattention_mask = encoded_review['attention_mask'].to(device)\n\noutput = model(input_ids, attention_mask)\n_, prediction = torch.max(output, dim=1)\n\nprint(f'Review text: {review_text}')\nprint(f'Sentiment : {class_names[prediction]}')",
"Review text: I love playing Rocket League! Best game ever\nSentiment : positive\n"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
]
] |
ec8cc7e500212bdd273297fdc32ef82e5b82d1de | 89,103 | ipynb | Jupyter Notebook | code/chapter03_DL-basics/3.5_fashion-mnist.ipynb | hs867785578/Dive-into-DL-PyTorch | d93784258542e3e876b5e05b951e9bd60a59c3dc | [
"Apache-2.0"
] | null | null | null | code/chapter03_DL-basics/3.5_fashion-mnist.ipynb | hs867785578/Dive-into-DL-PyTorch | d93784258542e3e876b5e05b951e9bd60a59c3dc | [
"Apache-2.0"
] | null | null | null | code/chapter03_DL-basics/3.5_fashion-mnist.ipynb | hs867785578/Dive-into-DL-PyTorch | d93784258542e3e876b5e05b951e9bd60a59c3dc | [
"Apache-2.0"
] | null | null | null | 338.794677 | 82,537 | 0.821375 | [
[
[
"# 3.5 图像分类数据集(Fashion-MNIST)",
"_____no_output_____"
]
],
[
[
"import torch\r\nimport torchvision\r\nimport torchvision.transforms as transforms\r\nimport matplotlib.pyplot as plt\r\nimport time\r\nimport sys\r\nsys.path.append(\"..\") # 为了导入上层目录的d2lzh_pytorch\r\nimport d2lzh_pytorch as d2l\r\n\r\nprint(torch.__version__)\r\nprint(torchvision.__version__)",
"1.9.1\n0.10.1\n"
]
],
[
[
"## 3.5.1 获取数据集",
"_____no_output_____"
]
],
[
[
"mnist_train = torchvision.datasets.FashionMNIST(root='~/Datasets/FashionMNIST', train=True, download=True, transform=transforms.ToTensor())\r\nmnist_test = torchvision.datasets.FashionMNIST(root='~/Datasets/FashionMNIST', train=False, download=True, transform=transforms.ToTensor())",
"C:\\Users\\hanshuo\\.conda\\envs\\pytorch\\lib\\site-packages\\torchvision\\datasets\\mnist.py:498: UserWarning: The given NumPy array is not writeable, and PyTorch does not support non-writeable tensors. This means you can write to the underlying (supposedly non-writeable) NumPy array using the tensor. You may want to copy the array to protect its data or make it writeable before converting it to a tensor. This type of warning will be suppressed for the rest of this program. (Triggered internally at ..\\torch\\csrc\\utils\\tensor_numpy.cpp:180.)\n return torch.from_numpy(parsed.astype(m[2], copy=False)).view(*s)\n"
],
[
"print(type(mnist_train))\r\nprint(len(mnist_train), len(mnist_test))",
"torchvision.datasets.mnist.FashionMNIST\n60000 10000\n"
],
[
"feature, label = mnist_train[0]\r\nprint(feature.shape, feature.dtype) # Channel x Height X Width\r\nprint(label)",
"torch.Size([1, 28, 28]) torch.float32\n9\n"
],
[
"mnist_PIL = torchvision.datasets.FashionMNIST(root='~/Datasets/FashionMNIST', train=True, download=True)\r\nPIL_feature, label = mnist_PIL[0]\r\nprint(PIL_feature)",
"<PIL.Image.Image image mode=L size=28x28 at 0x22AF1D43B00>\n"
],
[
"# 本函数已保存在d2lzh包中方便以后使用\r\ndef get_fashion_mnist_labels(labels):\r\n text_labels = ['t-shirt', 'trouser', 'pullover', 'dress', 'coat',\r\n 'sandal', 'shirt', 'sneaker', 'bag', 'ankle boot']\r\n return [text_labels[int(i)] for i in labels]",
"_____no_output_____"
],
[
"# 本函数已保存在d2lzh包中方便以后使用\r\ndef show_fashion_mnist(images, labels):\r\n d2l.use_svg_display()\r\n # 这里的_表示我们忽略(不使用)的变量\r\n _, figs = plt.subplots(1, len(images), figsize=(12, 12))\r\n for f, img, lbl in zip(figs, images, labels):\r\n f.imshow(img.view((28, 28)).numpy())\r\n f.set_title(lbl)\r\n f.axes.get_xaxis().set_visible(False)\r\n f.axes.get_yaxis().set_visible(False)\r\n plt.show()",
"_____no_output_____"
],
[
"X, y = [], []\r\nfor i in range(10):\r\n X.append(mnist_train[i][0])\r\n y.append(mnist_train[i][1])\r\nshow_fashion_mnist(X, get_fashion_mnist_labels(y))",
"_____no_output_____"
]
],
[
[
"## 3.5.2 读取小批量",
"_____no_output_____"
]
],
[
[
"batch_size = 256\r\nif sys.platform.startswith('win'):\r\n num_workers = 0 # 0表示不用额外的进程来加速读取数据\r\nelse:\r\n num_workers = 4\r\ntrain_iter = torch.utils.data.DataLoader(mnist_train, batch_size=batch_size, shuffle=True, num_workers=num_workers)\r\ntest_iter = torch.utils.data.DataLoader(mnist_test, batch_size=batch_size, shuffle=False, num_workers=num_workers)",
"_____no_output_____"
],
[
"start = time.time()\r\nfor X, y in train_iter:\r\n continue\r\nprint('%.2f sec' % (time.time() - start))",
"3.91 sec\n"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
ec8cc93066ad7c855dd435e1c8150be1a02937e5 | 57,755 | ipynb | Jupyter Notebook | 3.wmch3-datatypes,functrions,files/.ipynb_checkpoints/wmch03-checkpoint.ipynb | hongqin/CPSC4180_codes_for_lectures | 9438b733a8478b259b78769af6b7e1186e92623f | [
"MIT"
] | null | null | null | 3.wmch3-datatypes,functrions,files/.ipynb_checkpoints/wmch03-checkpoint.ipynb | hongqin/CPSC4180_codes_for_lectures | 9438b733a8478b259b78769af6b7e1186e92623f | [
"MIT"
] | null | null | null | 3.wmch3-datatypes,functrions,files/.ipynb_checkpoints/wmch03-checkpoint.ipynb | hongqin/CPSC4180_codes_for_lectures | 9438b733a8478b259b78769af6b7e1186e92623f | [
"MIT"
] | null | null | null | 18.054079 | 304 | 0.448308 | [
[
[
"# Built-in Data Structures, Functions, ",
"_____no_output_____"
],
[
"## Data Structures and Sequences",
"_____no_output_____"
],
[
"### Tuple",
"_____no_output_____"
]
],
[
[
"tup1 = 4, 5, 6\ntup1",
"_____no_output_____"
],
[
"nested_tup = (4, 5, 6), (7, 8)\nnested_tup",
"_____no_output_____"
],
[
"tup1 = 4, 5, 6\ntup2 = 7, 8\ntup3 = (tup1, tup2)\ntup3",
"_____no_output_____"
],
[
"tuple([4, 0, 2])",
"_____no_output_____"
],
[
"mylist = [4, 0, 2]\nmytuple = tuple(mylist)\n\ntype(mytuple)",
"_____no_output_____"
],
[
"tup = tuple('string')\ntup",
"_____no_output_____"
],
[
"tup[0]",
"_____no_output_____"
],
[
"# 0 1 2\ntup = tuple(['foo', [1, 2], True])\nprint(tup)\ntype(tup)\ntype(tup[2])\n#tup[2] = False",
"_____no_output_____"
],
[
"tup[1].append(3)\ntup",
"_____no_output_____"
],
[
"(4, None, 'foo') + (6, 0) + ('bar',)",
"_____no_output_____"
],
[
"type(None)",
"_____no_output_____"
],
[
"('foo', 'bar') * 4",
"_____no_output_____"
],
[
"tup1 = ('first', 'second')\ntup2 = tup1 * 4\ntup2",
"_____no_output_____"
],
[
"tup2 /tup1 ",
"_____no_output_____"
],
[
"('foo', [1,3]) * 4",
"_____no_output_____"
]
],
[
[
"#### Unpacking tuples",
"_____no_output_____"
]
],
[
[
"tup = (4, 5, 6)\na, b, c = tup\nb",
"_____no_output_____"
],
[
"x , y , z = 1, 2, 3\nx, y = y, x\nprint(\"x=\", x, \" y=\", y)",
"_____no_output_____"
],
[
"x, y, z = -1, 5, 999\nprint( 'x=',x, ' y=', y, ' z=', z)",
"_____no_output_____"
],
[
"x, y = y, x \nprint( 'x=',x, ' y=', y, ' z=', z)",
"_____no_output_____"
],
[
"tup = 4, 5, (6, 7)\na, b, (c, d) = tup\n(c, d)",
"_____no_output_____"
],
[
"# in other lanuages, we often have to use a temperary variable to exchange A and B. \nA = 10\nB = 9\ntmp = A\nA = B\nB = tmp\nprint( \"A = \", A, \" B=\", B)",
"_____no_output_____"
],
[
"a, b = 1, 2\nprint('a=',a)\nprint('b=',b)\nb, a = a, b\nprint('a=',a)\nprint('b=',b)",
"_____no_output_____"
],
[
"mylist = ['Chattanooga', \"Is\", \"A\", \"great\", \"place\"]\nfor word in mylist: \n print(word)",
"_____no_output_____"
],
[
"seq = [(1, 2, 'red'), (4, 5, 'blue'), (7, 8, 'green')]\nfor a, b, c in seq:\n print('a={0}, b={1}, c={2}'.format(a, b, c))",
"_____no_output_____"
],
[
"values = 1, 2, 3, 4, 5\na, b, *rest = values\na, b\nrest",
"_____no_output_____"
],
[
"a, b, *_ = values",
"_____no_output_____"
]
],
[
[
"#### Tuple methods",
"_____no_output_____"
]
],
[
[
"a = (1, 2, 2, 2, 3, 4, 2)\na.count(2)",
"_____no_output_____"
]
],
[
[
"### List",
"_____no_output_____"
]
],
[
[
"a_list = [2, 3, 7, None]\ntup = ('foo', 'bar', 'baz')\nb_list = list(tup)\nb_list\nb_list[1] = 'peekaboo'\nprint(b_list)\na_tuple = tuple(a_list)\nprint(type(a_tuple))",
"_____no_output_____"
],
[
"gen = range(10)\ngen\nlist(gen)",
"_____no_output_____"
],
[
"for i in range(10):\n print( i )",
"_____no_output_____"
]
],
[
[
"#### Adding and removing elements",
"_____no_output_____"
]
],
[
[
"b_list= ['x', 'y', 'z']\nb_list.append('dwarf')\nb_list",
"_____no_output_____"
],
[
"help(b_list.insert)",
"Help on built-in function insert:\n\ninsert(index, object, /) method of builtins.list instance\n Insert object before index.\n\n"
],
[
"b_list.insert(0, 'red')\nb_list",
"_____no_output_____"
],
[
"b_list= ['x', 'y', 'z', \"dwarf\"]\nmyindex = 0\nprint(\"before pop\", myindex , \" b_list: \", b_list)\nb_list.pop(myindex)\nprint(\"after pop\", myindex, \" b_list: \", b_list)",
"before pop 0 b_list: ['x', 'y', 'z', 'dwarf']\nafter pop 0 b_list: ['y', 'z', 'dwarf']\n"
],
[
"b_list= ['x', 'y', 'z', \"dwarf\"]\nprint(\"before append\", b_list)\n\nb_list.append('foo')\nprint(\"after append\", b_list)\n",
"before append ['x', 'y', 'z', 'dwarf']\nafter append ['x', 'y', 'z', 'dwarf', 'foo']\n"
],
[
"b_list= ['x', \"foo\", 'y', 'z', \"dwarf\", ]\n\nprint(\"before remove\", b_list)\n\nb_list.remove('foo')\nprint(\"after remove\", b_list)\n",
"before remove ['x', 'foo', 'y', 'z', 'dwarf']\nafter remove ['x', 'y', 'z', 'dwarf']\n"
],
[
"shopping_list= ['apple', \"Milk\", 'bread', 'pearl', \"paper towel\", ]\n\n'milk' in shopping_list",
"_____no_output_____"
],
[
"'toys' in shopping_list",
"_____no_output_____"
]
],
[
[
"#### Concatenating and combining lists",
"_____no_output_____"
]
],
[
[
"[4, None, 'foo'] + [7, 8, (2, 3)]",
"_____no_output_____"
],
[
"x = [4, None, 'foo']\nx.append([7, 8, (2, 3)])\nx",
"_____no_output_____"
],
[
"x = [4, None, 'foo']\nx.extend([7, 8, (2, 3)])\nx",
"_____no_output_____"
]
],
[
[
"everything = []\nfor chunk in list_of_lists:\n everything.extend(chunk)",
"_____no_output_____"
],
[
"everything = []\nfor chunk in list_of_lists:\n everything = everything + chunk",
"_____no_output_____"
],
[
"#### Sorting",
"_____no_output_____"
]
],
[
[
"a = [7, 2, 5, 1, 3]\na.sort()\na",
"_____no_output_____"
],
[
"b = ['saw', 'small', 'He', 'foxes', 'six']\nb.sort(key=len)\nb",
"_____no_output_____"
]
],
[
[
"#### Binary search and maintaining a sorted list",
"_____no_output_____"
]
],
[
[
"import bisect\nc = [1, 2, 2.5, 4, 5, 6, 7]\nbisect.bisect(c, 2)\n# help(bisect.bisect)",
"_____no_output_____"
],
[
"bisect.bisect(c, 5.1)",
"_____no_output_____"
],
[
"bisect.insort(c, 6.5)\nc\n#help(bisect.insort)",
"_____no_output_____"
]
],
[
[
"#### Slicing",
"_____no_output_____"
]
],
[
[
"seq = [7, 2, 3, 7, 5, 6, 0, 1]\nprint(\"original\", seq)\nseq[1:5]\nprint(\"slicing 1:5\", seq[1:5]) #end exclusive",
"original [7, 2, 3, 7, 5, 6, 0, 1]\nslicing 1:5 [2, 3, 7, 5]\n"
],
[
"print(\"original\", seq)\nseq[3:4] = [6, 3] #?? Hong find this annoying. \nseq",
"original [7, 2, 3, 7, 5, 6, 0, 1]\n"
],
[
"seq[:5]",
"_____no_output_____"
],
[
"seq[3:]",
"_____no_output_____"
],
[
"seq[-4:]",
"_____no_output_____"
],
[
"seq[-6:-2]",
"_____no_output_____"
],
[
"seq[::2]",
"_____no_output_____"
],
[
"seq[::-1]",
"_____no_output_____"
]
],
[
[
"### Built-in Sequence Functions",
"_____no_output_____"
],
[
"#### enumerate",
"_____no_output_____"
]
],
[
[
"i = 0\nfor value in collection:\n # do something with value\n i += 1",
"_____no_output_____"
],
[
"for i, value in enumerate(collection):\n # do something with value",
"_____no_output_____"
],
[
"some_list = ['foo', 'bar', 'baz']\nmapping = {}\nfor i, v in enumerate(some_list):\n mapping[v] = i\nmapping",
"_____no_output_____"
]
],
[
[
"#### sorted",
"_____no_output_____"
]
],
[
[
"sorted([7, 1, 2, 6, 0, 3, 2])",
"_____no_output_____"
],
[
"sorted('horse race')",
"_____no_output_____"
]
],
[
[
"#### zip",
"_____no_output_____"
]
],
[
[
"seq1 = ['foo', 'bar', 'baz']\nseq2 = ['one', 'two', 'three']\nzipped = zip(seq1, seq2)",
"_____no_output_____"
],
[
"print(zipped)",
"<zip object at 0x7f9ad80f6f50>\n"
],
[
"dict(zipped)",
"_____no_output_____"
],
[
"list(zip(seq1, seq2))",
"_____no_output_____"
],
[
"seq3 = [False, True]\nlist(zip(seq1, seq2, seq3))",
"_____no_output_____"
],
[
"for i, (a, b) in enumerate(zip(seq1, seq2)):\n print('{0}: {1}, {2}'.format(i, a, b))",
"0: foo, one\n1: bar, two\n2: baz, three\n"
],
[
"pitchers = [('Nolan', 'Ryan'), ('Roger', 'Clemens'),\n ('Schilling', 'Curt')]\nfirst_names, last_names = zip(*pitchers)\nprint(first_names)\nprint(last_names)",
"('Nolan', 'Roger', 'Schilling')\n('Ryan', 'Clemens', 'Curt')\n"
]
],
[
[
"#### reversed",
"_____no_output_____"
]
],
[
[
"list(reversed(range(10)))",
"_____no_output_____"
]
],
[
[
"### Dictionary\n\nkey and values",
"_____no_output_____"
]
],
[
[
"empty_dict = {}\nd1 = {'a' : 'some value', 'b' : [1, 2, 3, 4]}\nd1",
"_____no_output_____"
],
[
"d1[7] = 'an integer'\nprint(d1)\nd1\nd1['b']",
"{'a': 'some value', 'b': [1, 2, 3, 4], 7: 'an integer'}\n"
],
[
"d1['b'][2]",
"_____no_output_____"
],
[
"'b' in d1",
"_____no_output_____"
],
[
"d1[5] = 'some value'\nprint('d1=', d1)\nd1['dummy'] = 'another value'\nprint('before del d1', d1)\ndel d1[5]\nprint('after del ',d1)\nret = d1.pop('dummy')\nprint('ret=', ret)\nprint('d1=', d1)",
"_____no_output_____"
],
[
"list(d1.keys())\nlist(d1.values())",
"_____no_output_____"
],
[
"d1.update({'b' : 'foo', 'c' : 12})\nd1",
"_____no_output_____"
]
],
[
[
"#### Creating dicts from sequences",
"_____no_output_____"
]
],
[
[
"# conventional way to assign key-value into a dicitonary\nmapping = {}\nfor key, value in zip(key_list, value_list):\n mapping[key] = value",
"_____no_output_____"
],
[
"# Python's prided way of doing this. \nmapping = dict(zip(range(5), reversed(range(5))))\nmapping",
"_____no_output_____"
]
],
[
[
"#### Default values",
"_____no_output_____"
]
],
[
[
"if key in some_dict:\n value = some_dict[key]\nelse:\n value = default_value",
"_____no_output_____"
],
[
"value = some_dict.get(key, default_value)",
"_____no_output_____"
],
[
"words = ['apple', 'bat', 'bar', 'atom', 'book', 'cats', 'dogs']\nby_letter = {} #buffer \nfor word in words:\n letter = word[0]\n if letter not in by_letter:\n by_letter[letter] = [word]\n else:\n by_letter[letter].append(word) #value of each letter is a List. \n\nby_letter",
"_____no_output_____"
],
[
"by_letter = {} #buffer \nfor word in words:\n letter = word[0]\n by_letter.setdefault(letter, []).append(word)\n \nprint(words)\nprint(by_letter)",
"_____no_output_____"
]
],
[
[
"from collections import defaultdict\nby_letter = defaultdict(list)\nfor word in words:\n by_letter[word[0]].append(word)",
"_____no_output_____"
],
[
"#### Valid dict key types",
"_____no_output_____"
]
],
[
[
"hash('string')",
"_____no_output_____"
],
[
"hash((1, 2, (2, 3)))",
"_____no_output_____"
],
[
"hash((1, 2, [2, 3])) # fails because lists are mutable, list cannot be keys",
"_____no_output_____"
],
[
"d = {}\nd[tuple([1, 2, 3])] = 5 #tuples can be keys\nd",
"_____no_output_____"
]
],
[
[
"### set",
"_____no_output_____"
]
],
[
[
"X = set([2, 2, 2, 1, 3, 3])\nx[1] #cannot index sets, because sets are un-ordered",
"_____no_output_____"
],
[
"print({2, 4, 2, 1, 3, 3})",
"_____no_output_____"
],
[
"a = {1, 2, 3, 4, 5}\nb = {3, 4, 5, 6, 7, 8}",
"_____no_output_____"
],
[
"print( a.union(b))\na | b",
"_____no_output_____"
],
[
"a.intersection(b)\nprint( a & b)",
"_____no_output_____"
],
[
"c = a.copy()\nc |= b\nc\nd = a.copy()\nd &= b\nd",
"_____no_output_____"
],
[
"my_data = [1, 2, 3, 4]\nmy_set = {tuple(my_data)}\nmy_set",
"_____no_output_____"
],
[
"a_set = {1, 2, 3, 4, 5}\n{1, 2, 3}.issubset(a_set)\na_set.issuperset({1, 2, 3})",
"_____no_output_____"
],
[
"{1, 2, 3} == {3, 2, 1} #sets are unordered!!! ",
"_____no_output_____"
],
[
"[1,2,3] == [3,2,1]",
"_____no_output_____"
]
],
[
[
"### List, Set, and Dict Comprehensions",
"_____no_output_____"
],
[
"result = []\nfor val in collection:\n if ",
"_____no_output_____"
]
],
[
[
"strings = ['a', 'as', 'bat', 'car', 'dove', 'python']\n[x.upper() for x in strings if len(x) > 2] # comprehesion",
"_____no_output_____"
],
[
"strings = ['a', 'as', 'bat', 'car', 'dove', 'python']\n\nbuffer = []\nfor x in strings:\n if len(x) > 2:\n buffer.append(x.upper()) \n \nprint(buffer)",
"_____no_output_____"
],
[
"# dictionary comprehension\n{x.upper():len(x) for x in strings if len(x) > 2}",
"_____no_output_____"
],
[
"# set comprehension ?? \n#{ ((x.upper(), len(x)):len(x) for x in strings if len(x) > 2}\n\n{ x.upper() for x in strings if len(x) > 2}",
"_____no_output_____"
],
[
"unique_lengths = {len(x) for x in strings}\nunique_lengths",
"_____no_output_____"
],
[
"set(map(len, strings))",
"_____no_output_____"
],
[
"x = {3, 3,2} #set\nprint(x)",
"_____no_output_____"
],
[
"loc_mapping = {val : index for index, val in enumerate(strings)}\nloc_mapping",
"_____no_output_____"
]
],
[
[
"#### Nested list comprehensions",
"_____no_output_____"
]
],
[
[
"all_data = [['John', 'Emily', 'Michael', 'Mary', 'Steven'],\n ['Maria', 'Juan', 'Javier', 'Natalia', 'Pilar']]",
"_____no_output_____"
],
[
"names_of_interest = []\nfor names in all_data:\n enough_es = [name for name in names if name.count('e') >= 2]\n names_of_interest.extend(enough_es)\n \nnames_of_interest",
"_____no_output_____"
],
[
"result = [name for names in all_data for name in names\n if name.count('e') >= 2]\nresult",
"_____no_output_____"
],
[
"some_tuples = [(1, 2, 3), (4, 5, 6), (7, 8, 9)]\nflattened = [x for tup in some_tuples for x in tup]\nflattened",
"_____no_output_____"
],
[
"flattened = []\n\nfor tup in some_tuples:\n for x in tup:\n flattened.append(x)",
"_____no_output_____"
],
[
"[[x for x in tup] for tup in some_tuples]",
"_____no_output_____"
]
],
[
[
"## Functions",
"_____no_output_____"
]
],
[
[
"def my_function(x, y, z=1.5):\n if z > 1:\n return z * (x + y)\n else:\n return z / (x + y)",
"_____no_output_____"
],
[
"my_function(5, 6, z=0.7)\nmy_function(3.14, 7, 3.5)\nmy_function(10, 20)",
"_____no_output_____"
]
],
[
[
"### Namespaces, Scope, and Local Functions",
"_____no_output_____"
]
],
[
[
"def func():\n a = []\n for i in range(5):\n a.append(i)",
"_____no_output_____"
],
[
"a = []\ndef func():\n for i in range(5):\n a.append(i)",
"_____no_output_____"
],
[
"a = None\ndef bind_a_variable():\n global a\n a = []\nbind_a_variable()\nprint(a)",
"_____no_output_____"
]
],
[
[
"### Returning Multiple Values",
"_____no_output_____"
]
],
[
[
"def f():\n a = 5\n b = 6\n c = 7\n return a, b, c\n\na, b, c = f()",
"_____no_output_____"
],
[
"return_value = f()",
"_____no_output_____"
],
[
"def f():\n a = 5\n b = 6\n c = 7\n return {'a' : a, 'b' : b, 'c' : c}",
"_____no_output_____"
]
],
[
[
"### Functions Are Objects",
"_____no_output_____"
]
],
[
[
"states = [' Alabama ', 'Georgia!', 'Georgia', 'georgia', 'FlOrIda',\n 'south carolina##', 'West virginia?']",
"_____no_output_____"
],
[
"import re\n\ndef clean_strings(strings):\n result = []\n for value in strings:\n value = value.strip()\n value = re.sub('[!#?]', '', value)\n value = value.title()\n result.append(value)\n return result",
"_____no_output_____"
],
[
"clean_strings(states)",
"_____no_output_____"
]
],
[
[
"### A different way to reimplement the above codes by passing fuction as objects",
"_____no_output_____"
]
],
[
[
"def remove_punctuation(value):\n return re.sub('[!#?]', '', value)\n\nclean_ops = [str.strip, remove_punctuation, str.title]\n\ndef clean_strings(strings, ops):\n result = []\n for value in strings:\n for function in ops:\n value = function(value)\n result.append(value)\n return result",
"_____no_output_____"
],
[
"clean_strings(states, clean_ops)",
"_____no_output_____"
],
[
"for x in map(remove_punctuation, states):\n print(x)",
"_____no_output_____"
]
],
[
[
"### Anonymous (Lambda) Functions",
"_____no_output_____"
]
],
[
[
"def short_function(x):\n return x * 2\n\nequiv_anon = lambda x: x * 2",
"_____no_output_____"
],
[
"def apply_to_list(some_list, f):\n return [f(x) for x in some_list]\n\nints = [4, 0, 1, 5, 6]\napply_to_list(ints, lambda x: x * 2)",
"_____no_output_____"
],
[
"strings = ['foo', 'card', 'bar', 'aaaa', 'abab']",
"_____no_output_____"
],
[
"strings.sort(key=lambda x: len(set(list(x))))\nstrings",
"_____no_output_____"
]
],
[
[
"### Currying: Partial Argument Application",
"_____no_output_____"
]
],
[
[
"def add_numbers(x, y):\n return x + y",
"_____no_output_____"
],
[
"add_five = lambda y: add_numbers(5, y)",
"_____no_output_____"
],
[
"from functools import partial\nadd_five = partial(add_numbers, 5)",
"_____no_output_____"
]
],
[
[
"### Generators",
"_____no_output_____"
]
],
[
[
"some_dict = {'a': 1, 'b': 2, 'c': 3}\nfor key in some_dict:\n print(key)",
"a\nb\nc\n"
],
[
"dict_iterator = iter(some_dict)\ndict_iterator",
"_____no_output_____"
],
[
"list(dict_iterator)",
"_____no_output_____"
],
[
"def squares(n=10):\n print('Generating squares from 1 to {0}'.format(n ** 2))\n for i in range(1, n + 1):\n yield i ** 2",
"_____no_output_____"
],
[
"gen = squares()\ngen",
"_____no_output_____"
],
[
"for x in gen:\n print(x, end=' ')",
"Generating squares from 1 to 100\n1 4 9 16 25 36 49 64 81 100 "
]
],
[
[
"#### Generator expresssions",
"_____no_output_____"
]
],
[
[
"gen = (x ** 2 for x in range(100))\ngen",
"_____no_output_____"
]
],
[
[
"def _make_gen():\n for x in range(100):\n yield x ** 2\ngen = _make_gen()",
"_____no_output_____"
]
],
[
[
"sum(x ** 2 for x in range(100))\ndict((i, i **2) for i in range(5))",
"_____no_output_____"
]
],
[
[
"#### itertools module",
"_____no_output_____"
]
],
[
[
"import itertools\nfirst_letter = lambda x: x[0]\nnames = ['Alan', 'Adam', 'Wes', 'Will', 'Albert', 'Steven']\nfor letter, names in itertools.groupby(names, first_letter):\n print(letter, list(names)) # names is a generator",
"A ['Alan', 'Adam']\nW ['Wes', 'Will']\nA ['Albert']\nS ['Steven']\n"
]
],
[
[
"### Errors and Exception Handling",
"_____no_output_____"
]
],
[
[
"float('1.2345')\nfloat('something')",
"_____no_output_____"
],
[
"def attempt_float(x):\n try:\n return float(x)\n except:\n return x",
"_____no_output_____"
],
[
"attempt_float('1.2345')\nattempt_float('something')",
"_____no_output_____"
],
[
"float((1, 2))",
"_____no_output_____"
],
[
"def attempt_float(x):\n try:\n return float(x)\n except ValueError:\n return x",
"_____no_output_____"
],
[
"attempt_float((1, 2))",
"_____no_output_____"
],
[
"def attempt_float(x):\n try:\n return float(x)\n except (TypeError, ValueError):\n return x",
"_____no_output_____"
]
],
[
[
"f = open(path, 'w')\n\ntry:\n write_to_file(f)\nfinally:\n f.close()",
"_____no_output_____"
],
[
"f = open(path, 'w')\n\ntry:\n write_to_file(f)\nexcept:\n print('Failed')\nelse:\n print('Succeeded')\nfinally:\n f.close()",
"_____no_output_____"
],
[
"#### Exceptions in IPython",
"_____no_output_____"
],
[
"In [10]: %run examples/ipython_bug.py\n---------------------------------------------------------------------------\nAssertionError Traceback (most recent call last)\n/home/wesm/code/pydata-book/examples/ipython_bug.py in <module>()\n 13 throws_an_exception()\n 14\n---> 15 calling_things()\n\n/home/wesm/code/pydata-book/examples/ipython_bug.py in calling_things()\n 11 def calling_things():\n 12 works_fine()\n---> 13 throws_an_exception()\n 14\n 15 calling_things()\n\n/home/wesm/code/pydata-book/examples/ipython_bug.py in throws_an_exception()\n 7 a = 5\n 8 b = 6\n----> 9 assert(a + b == 10)\n 10\n 11 def calling_things():\n\nAssertionError:",
"_____no_output_____"
],
[
"## Files and the Operating System",
"_____no_output_____"
]
],
[
[
"%pushd book-materials",
"[Errno 2] No such file or directory: 'book-materials'\n/Volumes/GoogleDrive/My Drive/courses.gd/CPSC4180-5180Fa20/2.lectures/3.wmch3-datatypes,functrions,files\n"
],
[
"path = 'examples/segismundo.txt'\nf = open(path)",
"_____no_output_____"
]
],
[
[
"for line in f:\n pass",
"_____no_output_____"
]
],
[
[
"lines = [x.rstrip() for x in open(path)]\nlines",
"_____no_output_____"
],
[
"f.close()",
"_____no_output_____"
],
[
"with open(path) as f:\n lines = [x.rstrip() for x in f]",
"_____no_output_____"
],
[
"f = open(path)\nf.read(10)\nf2 = open(path, 'rb') # Binary mode\nf2.read(10)",
"_____no_output_____"
],
[
"f.tell()\nf2.tell()",
"_____no_output_____"
],
[
"import sys\nsys.getdefaultencoding()",
"_____no_output_____"
],
[
"f.seek(3)\nf.read(1)",
"_____no_output_____"
],
[
"f.close()\nf2.close()",
"_____no_output_____"
],
[
"with open('tmp.txt', 'w') as handle:\n handle.writelines(x for x in open(path) if len(x) > 1)\nwith open('tmp.txt') as f:\n lines = f.readlines()\nlines",
"_____no_output_____"
],
[
"import os\nos.remove('tmp.txt')",
"_____no_output_____"
]
],
[
[
"### Bytes and Unicode with Files (skip this)",
"_____no_output_____"
]
],
[
[
"with open(path) as f:\n chars = f.read(10)\nchars",
"_____no_output_____"
],
[
"with open(path, 'rb') as f:\n data = f.read(10)\ndata",
"_____no_output_____"
],
[
"data.decode('utf8')\ndata[:4].decode('utf8')",
"_____no_output_____"
],
[
"sink_path = 'sink.txt'\nwith open(path) as source:\n with open(sink_path, 'xt', encoding='iso-8859-1') as sink:\n sink.write(source.read())\nwith open(sink_path, encoding='iso-8859-1') as f:\n print(f.read(10))",
"_____no_output_____"
],
[
"os.remove(sink_path)",
"_____no_output_____"
],
[
"f = open(path)\nf.read(5)\nf.seek(4)\nf.read(1)\nf.close()",
"_____no_output_____"
],
[
"%popd",
"_____no_output_____"
]
],
[
[
"## Conclusion",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
]
] |
ec8cd3892b9ca40283b247896ff7a1e709135b19 | 186,315 | ipynb | Jupyter Notebook | ScratchSpace.ipynb | SimonRosen173/MultiAgentPathFinding | 1bd247dad83e08356af95a96f48d0c41c9fce9d6 | [
"MIT"
] | null | null | null | ScratchSpace.ipynb | SimonRosen173/MultiAgentPathFinding | 1bd247dad83e08356af95a96f48d0c41c9fce9d6 | [
"MIT"
] | null | null | null | ScratchSpace.ipynb | SimonRosen173/MultiAgentPathFinding | 1bd247dad83e08356af95a96f48d0c41c9fce9d6 | [
"MIT"
] | null | null | null | 254.528689 | 40,328 | 0.915965 | [
[
[
"%load_ext autoreload\n%autoreload 2\n\nfrom Benchmark import Warehouse\nfrom Visualisations import Vis\nfrom GlobalObjs.GraphNX import GridGraph, plot_graph\nfrom networkx.algorithms.traversal.breadth_first_search import bfs_tree\nimport networkx as nx",
"The autoreload extension is already loaded. To reload it, use:\n %reload_ext autoreload\n"
],
[
"rand_grid = Warehouse.get_uniform_random_grid((22, 44), 560)\n# rand_grid\ngrid_graph = GridGraph(rand_grid, only_full_G=True)\nG = grid_graph.get_full_G()\nplot_graph(G)",
"_____no_output_____"
],
[
"vis = VisGrid(grid, (800, 400), 25, tick_time=0.2)\n# vis.",
"_____no_output_____"
],
[
"grid_graph.remove_non_reachable()\nnew_G = grid_graph.get_full_G()\nplot_graph(new_G)",
"_____no_output_____"
],
[
"tmp_G = G.copy()\nobstructed_nodes = [(y,x) for (y,x), is_obstructed in G.nodes(data=\"obstructed\") \n if is_obstructed]\ntmp_G.remove_nodes_from(obstructed_nodes)\nbfs_G = bfs_tree(tmp_G, (0, 0))\nplot_graph(bfs_G)",
"_____no_output_____"
],
[
"rand_start_x = 6",
"_____no_output_____"
],
[
"storage_locs = [(y,x) for (y,x), is_obstructed in G.nodes(data=\"obstructed\") \n if is_obstructed and x>=rand_start_x]\norig_storage_loc_no = len(storage_locs)\norig_storage_loc_no",
"_____no_output_____"
],
[
"free_locs = [(y,x) for (y,x), is_obstructed in G.nodes(data=\"obstructed\") \n if not is_obstructed and x>=rand_start_x]\nlen(free_locs)",
"_____no_output_____"
],
[
"unreachable_storage_locs = []\nfor storage_loc in storage_locs:\n is_unreachable = True\n for neighbor in G.neighbors(storage_loc):\n if neighbor in bfs_G.nodes:\n is_unreachable = False\n break\n if is_unreachable:\n unreachable_storage_locs.append(storage_loc)",
"_____no_output_____"
],
[
"unreachable_free_locs = list(set(free_locs) - set(bfs_G.nodes))\nlen(unreachable_free_locs)",
"_____no_output_____"
],
[
"orig_storage_loc_no - len(unreachable_storage_locs)",
"_____no_output_____"
],
[
"tmp_G = G.copy()\ntmp_G.remove_nodes_from(unreachable_storage_locs)\ntmp_G.remove_nodes_from(unreachable_free_locs)\nplot_graph(tmp_G)",
"_____no_output_____"
],
[
"rand_grid = Warehouse.get_uniform_random_grid((22, 44), 400) # 560)\n# rand_grid\ngrid_graph = GridGraph(rand_grid, only_full_G=True)\nG = grid_graph.get_full_G()\n# plot_graph(G)\n\ntmp_G = G.copy()\nobstructed_nodes = [(y,x) for (y,x), is_obstructed in G.nodes(data=\"obstructed\") \n if is_obstructed]\ntmp_G.remove_nodes_from(obstructed_nodes)\nbfs_G = bfs_tree(tmp_G, (0, 0))\n\nrand_start_x = 6\n\nstorage_locs = [(y,x) for (y,x), is_obstructed in G.nodes(data=\"obstructed\") \n if is_obstructed and x>=rand_start_x]\norig_storage_loc_no = len(storage_locs)\n\nfree_locs = [(y,x) for (y,x), is_obstructed in G.nodes(data=\"obstructed\") \n if not is_obstructed and x>=rand_start_x]\n\nunreachable_storage_locs = []\nfor storage_loc in storage_locs:\n is_unreachable = True\n for neighbor in G.neighbors(storage_loc):\n if neighbor in bfs_G.nodes:\n is_unreachable = False\n break\n if is_unreachable:\n unreachable_storage_locs.append(storage_loc)\n \nunreachable_free_locs = list(set(free_locs) - set(bfs_G.nodes))\n# print(len(unreachable_free_locs)\nprint(orig_storage_loc_no - len(unreachable_storage_locs))",
"315\n"
],
[
"tmp_G = G.copy()\ntmp_G.remove_nodes_from(unreachable_storage_locs)\ntmp_G.remove_nodes_from(unreachable_free_locs)\nplot_graph(tmp_G)",
"_____no_output_____"
],
[
"import numpy as np\nfrom scipy import sparse",
"_____no_output_____"
],
[
"np.random.randint(0, 2, (20,10))",
"_____no_output_____"
],
[
"arr_1 = np.random.randint(0, 2, (10,10))\narr_1",
"_____no_output_____"
],
[
"arr_2 = np.random.randint(0, 2, (10,10))\narr_2",
"_____no_output_____"
],
[
"4 in [np.random.randint(0, 4) for _ in range(100)]",
"_____no_output_____"
],
[
"c_pt = (5,5)\nc_arr_1 = arr_1.copy()\nc_arr_2 = arr_2.copy()\ntl_slices = (slice(None, c_pt[0]), slice(None, c_pt[0]))\ntr_slices = (slice(None, c_pt[0]), slice(c_pt[0], None))\nbl_slices = (slice(c_pt[0], None), slice(None, c_pt[0]))\nbr_slices = (slice(c_pt[0], None), slice(c_pt[0], None))\n\nslices = br_slices\nc_arr_1[slices] = arr_2[slices].copy()\nc_arr_2[slices] = arr_1[slices].copy()",
"_____no_output_____"
],
[
"c_arr_1",
"_____no_output_____"
],
[
"c_arr_2",
"_____no_output_____"
],
[
"np.count_nonzero(c_arr_1)",
"_____no_output_____"
],
[
"tmp_set = set([(1,2),(2,3),(3,4)])\ntmp_set",
"_____no_output_____"
],
[
"set(map(lambda x: x[0] + x[1], tmp_set))",
"_____no_output_____"
],
[
"tup_list = [(1, 2), (3, 4), (5, 6)]\ntup_list",
"_____no_output_____"
],
[
"from more_itertools import flatten",
"_____no_output_____"
],
[
"list(flatten(tup_list))",
"_____no_output_____"
],
[
"arr = np.random.randint(0, 2, (100,100))\nsparse.csr_matrix(arr)",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
ec8cd4dafe92e39fd75b162ec1a93229805be110 | 6,150 | ipynb | Jupyter Notebook | Lecture 21 NLP-1/Basics of Word Processing/Basics of Word Processing-checkpoint.ipynb | Paraskk/Data-Science-and-Machine-Leaning- | b29223a82ea39f7860d3729d7297bac2a4724c8f | [
"MIT"
] | 1 | 2020-08-06T07:33:59.000Z | 2020-08-06T07:33:59.000Z | Lecture 21 NLP-1/Basics of Word Processing/Basics of Word Processing-checkpoint.ipynb | Udaysonu/Coding-Ninjas-Machine-Learning | 4fd6b4b62f07b28dbe80c084ad820630f2351a76 | [
"MIT"
] | null | null | null | Lecture 21 NLP-1/Basics of Word Processing/Basics of Word Processing-checkpoint.ipynb | Udaysonu/Coding-Ninjas-Machine-Learning | 4fd6b4b62f07b28dbe80c084ad820630f2351a76 | [
"MIT"
] | 2 | 2020-08-27T13:03:33.000Z | 2020-09-01T17:34:23.000Z | 20.163934 | 72 | 0.335447 | [
[
[
"sample_text=\"Does this thing really work? Lets see\"",
"_____no_output_____"
],
[
"from nltk.tokenize import sent_tokenize,word_tokenize",
"_____no_output_____"
],
[
"sent_tokenize(sample_text)",
"_____no_output_____"
],
[
"words=word_tokenize(sample_text)",
"_____no_output_____"
],
[
"words",
"_____no_output_____"
],
[
"from nltk.corpus import stopwords\nstop=stopwords.words('english')\nstop",
"_____no_output_____"
],
[
"clean_words=[w for w in words if not w in stop]\nimport string\npunctuations=list(string.punctuation)\nstop=stop + punctuations",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
ec8cd9d0592d2aa93154d2d5f1c00a57c573187f | 101,429 | ipynb | Jupyter Notebook | examples/DeepHedgingHeston.ipynb | vishalbelsare/Mean-Variance-Hedging | f3bfe2edef48b5762718d6adb4437b04aeac3ec5 | [
"MIT"
] | 7 | 2021-03-11T00:15:19.000Z | 2022-02-27T11:48:48.000Z | examples/DeepHedgingHeston.ipynb | vishalbelsare/Mean-Variance-Hedging | f3bfe2edef48b5762718d6adb4437b04aeac3ec5 | [
"MIT"
] | null | null | null | examples/DeepHedgingHeston.ipynb | vishalbelsare/Mean-Variance-Hedging | f3bfe2edef48b5762718d6adb4437b04aeac3ec5 | [
"MIT"
] | 1 | 2021-07-13T20:35:49.000Z | 2021-07-13T20:35:49.000Z | 81.338412 | 31,030 | 0.725808 | [
[
[
"### Heston\n\n$$ dS_{t} = \\mu S_{t} dt + \\sqrt{V_{t}} S_{t} dW_{t}$$\n\n$$ dV_{t} = k(\\theta - \\bar{V_{t}})dt + \\sigma \\sqrt{V_{t}} dZ_{t}$$\n\n+ T = 30 / 365\n+ n = 30\n+ L = 3,N0= 2d,N1=N2=d+ 15, 3 layer, 2d, d + 15, d + 15 units\n+ larger learning rate, the tech-nique of batch normalizatio\n+ Relu activation",
"_____no_output_____"
],
[
"Heston Parameters\n\n$dS_{t} = \\sqrt{V_{t}} S_{t} dB_{t}$\n\n$dV_{t} = \\alpha(b - V_{t}) dt + \\sigma \\sqrt{V_{t}} dW_{t} $\n\n+ $\\alpha = 1, b = 0.04, \\sigma = 2, v_{0} = 0.04$\n\n+ Exact simulation of CIR\n\n\nGlasserman Chapter 3, page 124",
"_____no_output_____"
],
[
"",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport matplotlib.pyplot as plt\nimport scipy\nimport cmath\nplt.style.use(\"fivethirtyeight\")\nimport seaborn as sns\n!rm -rf Mean-Variance-Hedging\n!git clone https://github.com/chrischia06/Mean-Variance-Hedging\n\nimport sys\nsys.path.append(\"Mean-Variance-Hedging/\")\nfrom mean_variance_hedge.dynamic_programming import *\nfrom mean_variance_hedge.black_scholes import *\nfrom mean_variance_hedge.heston import *\nfrom mean_variance_hedge.utils import *\n",
"Cloning into 'Mean-Variance-Hedging'...\nremote: Enumerating objects: 75, done.\u001b[K\nremote: Counting objects: 100% (75/75), done.\u001b[K\nremote: Compressing objects: 100% (45/45), done.\u001b[K\nremote: Total 75 (delta 22), reused 67 (delta 18), pack-reused 0\u001b[K\nUnpacking objects: 100% (75/75), done.\n"
],
[
"####################\n# PARAMETERS #\n####################\n\nN_SAMPLES = 10 ** 5\n\n\nalpha = 1 # speed of mean reversion\nb = 0.04 #log-run variance\nsigma = 2 # vol-of variance\ndt = 1 / 250 # time increment, years\nv_0 = 0.04 # intial estimate of variance \nT = 30 #time steps\n\ntis = np.arange(T + 1) #0, 1 .. T\ntis = np.tile(tis, N_SAMPLES).reshape(N_SAMPLES, T + 1) # [[0, 1.. T], [0, 1.. T]...]\n\nS0 = 100\nrho = -0.7\nr = 0 # risk free rate\nrf = 1 + r #risk-free return\n\n\nK_strike = 100\nCALL = 1\n",
"_____no_output_____"
],
[
"Sts, Vts = generate_Heston_paths(n_samples = N_SAMPLES, S0 = 100, rho = -0.7, \n r = 0, alpha = alpha, b = b, \n sigma = sigma, dt = dt, \n v_0 = v_0, T = T, seed = 2021)",
"_____no_output_____"
],
[
"Heston_MC_price = np.mean(np.maximum(Sts[:, T] - K_strike, 0))\nimp_vol = bsinv(Heston_MC_price, St = S0, K = K_strike, r = r, tau = T / 250, flag = 1)\nprint(f\"Monte Carlo Heston Price: {Heston_MC_price}\")\nprint(f\"Implied Volatility {imp_vol} (BS-Price under imp vol: {BlackScholes(S0, K_strike, r, imp_vol, T * dt, 1)})\")",
"Monte Carlo Heston Price: 1.8989713424445749\nImplied Volatility 0.13742277963359376 (BS-Price under imp vol: 1.8989713424445753)\n"
]
],
[
[
"Calibrate log-returns for the Dynamic Programming method based on log-returns from t = 0, 1",
"_____no_output_____"
]
],
[
[
"observed_rets = np.log(Sts[:,1] / Sts[:,0])\nfig, ax = plt.subplots()\nsns.histplot(observed_rets, ax=ax)\nbounds = np.round([observed_rets.min(), observed_rets.max()], 3)\nprint(bounds)",
"[-0.065 0.048]\n"
],
[
"scale_factor = 10 ** 3\n\nN_discrete_states = 10\nbins = np.linspace(bounds[0], bounds[1], N_discrete_states + 1)\ncounts, val = np.histogram(observed_rets, bins=bins)\np_probs = counts / np.sum(counts)\n\nlog_ret_space = [round((bins[i] + bins[i + 1]) / 2, 3) for i in range(len(bins) - 1)]\nlog_ret_space2 = [round(x * scale_factor) for x in log_ret_space]\nret_space = np.exp(log_ret_space) # returns\na, b, m, q_probs = calc_variance_optimal_measure(ret_space, rf, p_probs)\nprint(\"Discretised Log-Returns space:\",log_ret_space)\nprint(\"Variance Optimal probabilities:\",np.round(q_probs, 4))\n\nattainable_nodes = possible_nodes(log_ret_space, T, scale_factor)\nHts = calc_mean_value_process(attainable_nodes, S0, K_strike, rf, log_ret_space, T, scale_factor, q_probs)\ndynamic_delta = calc_dynamic_deltas(attainable_nodes, Hts, S0, rf, log_ret_space, T, scale_factor, q_probs) ",
"Discretised Log-Returns space: [-0.059, -0.048, -0.037, -0.025, -0.014, -0.003, 0.008, 0.02, 0.031, 0.042]\nVariance Optimal probabilities: [2.000e-04 1.700e-03 1.100e-02 5.250e-02 1.723e-01 3.293e-01 3.076e-01\n 1.116e-01 1.340e-02 5.000e-04]\n"
]
],
[
[
"Calculate Black-Scholes, Dynamic Programming and Locally Optimal Hedge along Monte Carlo Paths",
"_____no_output_____"
]
],
[
[
"%%time\n\nheston_vals = np.zeros((N_SAMPLES, T + 1))\nbs_vals = np.zeros((N_SAMPLES, T + 1))\ndynamic_vals = np.zeros((N_SAMPLES, T + 1))\nlocal_vals = np.zeros((N_SAMPLES, T + 1))\n\n\n\n\nbs_vals[:,0] = Heston_MC_price\ndynamic_vals[:,0] = Heston_MC_price\nlocal_vals[:,0] = Heston_MC_price\nheston_vals[:,0] = Heston_MC_price\n\n# heston_deltas = np.zeros((N_SAMPLES, T))\nbs_deltas = np.zeros((N_SAMPLES, T))\ndynamic_deltas = np.zeros((N_SAMPLES, T ))\nlocal_deltas = np.zeros((N_SAMPLES, T ))\n\n\n\n# heston_deltas[:, 0] = calc_Heston_delta_by_FD(kappa = 1, theta = 0.04, sigma = 2, rho = -0.7, r = 0, \n# s0 = S0, v0 = 0.04, K= K_strike, tau = (T - 0) * dt)\ndynamic_deltas[:, 0] = dynamic_delta[0][0]\nlocal_deltas[:,0] = dynamic_delta[0][0] + rf * a * (Hts[0][0] - Heston_MC_price) / S0\n\nattainable_rets = {t:np.array(list(attainable_nodes[t])) for t in range(T + 1)}\n\nfor i in range(N_SAMPLES):\n for t in range(1, T):\n current_node = attainable_rets[t][np.abs(attainable_rets[t] - (np.log(Sts[i,t] / S0) * scale_factor)).argmin()]\n dynamic_deltas[i, t] = dynamic_delta[t][current_node]\n # adjustment require for the locally-optimal hedging value\n local_vals[i, t] = (rf * local_vals[i, t - 1] + \n local_deltas[i, t - 1] * (Sts[i, t] - rf * Sts[i, t - 1]))\n local_deltas[i, t] = (dynamic_delta[t][current_node] + \n rf * a * (Hts[t][current_node] - local_vals[i, t]) / Sts[i, t])\n local_vals[i, T] = (rf * local_vals[i, T - 1] + \n local_deltas[i, T - 1] * (Sts[i, T] - rf * Sts[i, T - 1]))\n \n \nfor i in range(N_SAMPLES):\n bs_deltas[i, :] = delta(Sts[i, :-1], K_strike, r, imp_vol, (T - tis[i,:-1]) * dt, CALL) # BS deltas\n\nfor t in range(1, T + 1):\n bs_vals[:, t] = rf * bs_vals[:, t - 1] + bs_deltas[:, t - 1] * (Sts[:, t] - rf * Sts[: , t- 1])\n dynamic_vals[:, t] = rf * dynamic_vals[:, t - 1] + dynamic_deltas[:, t - 1] * (Sts[:, t] - rf * Sts[: , t- 1])\n\n# calculate terminal value = hedging portfolio value - (ST - K)+\nbs_terminal_error = bs_vals[:, T] - np.maximum(Sts[:, T] - K_strike, 0) \nlocal_terminal_error = local_vals[:, T] - np.maximum(Sts[:, T] - K_strike, 0) \ndynamic_terminal_error = dynamic_vals[:, T] - np.maximum(Sts[:, T] - K_strike, 0) ",
"CPU times: user 54.1 s, sys: 30.8 ms, total: 54.1 s\nWall time: 54.2 s\n"
]
],
[
[
"Heston Model Hedge using Finite Differences",
"_____no_output_____"
]
],
[
[
"%%time\nfor t in range(1, T):\n heston_deltas[:,t] = vec_fd(kappa = alpha, theta = b, sigma = sigma, rho = rho, r = r, \n s0 = Sts[:, t], v0 = v_0, K= K_strike, tau = (T - t) * dt)",
"CPU times: user 1h 7min 52s, sys: 10.2 s, total: 1h 8min 2s\nWall time: 1h 8min 16s\n"
],
[
"heston_vals[:,0] = Heston_MC_price\n\nfor t in range(1, T + 1):\n heston_vals[:, t] = rf * heston_vals[:, t - 1] + heston_deltas[:, t - 1] * (Sts[:, t] - rf * Sts[: , t- 1])\n\nheston_terminal_error = heston_vals[:, T] - np.maximum(Sts[:, T] - K_strike, 0) ",
"_____no_output_____"
],
[
"diagnosis(heston_terminal_error)",
"_____no_output_____"
]
],
[
[
"# Deep Hedging",
"_____no_output_____"
]
],
[
[
"from tensorflow.keras.layers import Input, Dense, LSTM, GRU, BatchNormalization, Dropout\nfrom tensorflow.keras import Model\nfrom tensorflow.keras.regularizers import l2\nfrom tensorflow.keras.optimizers import Adam\nfrom tensorflow.keras import initializers\n\ndef deep_hedger(T, n_feats):\n \"\"\"\n Feed-Forward Neural Network\n \"\"\"\n input_layer = Input(shape = (None, n_feats))\n # batch_norm = BatchNormalization()(input_layer)\n layer_1 = GRU(32, activation='tanh', \n kernel_initializer=initializers.RandomNormal(0,0.1),\n bias_initializer=initializers.RandomNormal(0,0.1),\n return_sequences=True\n )(input_layer)\n # layer_2 = Dense(16, activation='relu',\n # kernel_initializer=initializers.RandomNormal())(batch_norm1)\n # batch_norm2 = BatchNormalization()(layer_2)\n # layer_3 = Dense(8, activation='relu', \n # kernel_initializer = initializers.RandomNormal())(batch_norm2)\n # batch_norm3 = BatchNormalization()(layer_3)\n output_layer = Dense(1, activation='linear',\n kernel_initializer=initializers.RandomNormal(),\n bias_initializer=initializers.RandomNormal(0,0.1)\n )(layer_1)\n model = Model(input_layer, output_layer)\n return model\n\n",
"_____no_output_____"
],
[
"from sklearn.model_selection import train_test_split\nX_true = np.log(Sts[:,:-1] / K_strike)\nX_true = X_true.reshape((-1, T, 1))\ny_true = Sts.reshape((-1, T + 1, 1))\n\n# generate additional Heston paths, but NOT from the original observed paths\nN_TRAIN_PATHS = 10 ** 5\ntrain_paths, Vts2 = generate_Heston_paths(n_samples = N_SAMPLES, S0 = 100, rho = -0.7, \n r = 0, alpha = alpha, b = b, \n sigma = sigma, dt = dt, \n v_0 = v_0, T = T, seed = 42)\n\nX = np.log(train_paths / K_strike)\n\nn_feats = 1\nX2 = X[:, :-1].reshape((-1, T, n_feats))\ny2 = train_paths.reshape((-1, T + 1, n_feats))\n\nX_train, X_val, y_train, y_val = train_test_split(X2, y2, test_size=0.2, random_state = 42)",
"_____no_output_____"
],
[
"def MSHE_Loss(init_price, strike, T):\n def lossFunction(y_true,y_pred):\n # for simplcity ignore rf for now, although this could be incorporated\n price_changes = tf.experimental.numpy.diff(y_true, n=1, axis=1)\n val = tf.reduce_sum(tf.math.multiply(price_changes, y_pred),axis=1)\n # print(tf.shape(price_changes)) \n # print(tf.shape(y_true)Z)\n option_val = tf.math.maximum(y_true[:,-1] - strike, 0)\n\n # val = (tf.math.reduce_sum(deltas * price_changes,axis=1))\n return tf.math.reduce_mean(tf.math.square(-option_val + val + init_price))\n return lossFunction\n\ndef cvarLoss(init_price, strike, T, batch_size, proportion = 0.01):\n num = int(batch_size * proportion)\n def lossFunction(y_true,y_pred):\n price_changes = tf.experimental.numpy.diff(y_true, n=1, axis=1)\n val = tf.reduce_sum(tf.math.multiply(price_changes, y_pred),axis=1) \n option_val = tf.math.maximum(y_true[:,-1,:] - strike, 0)\n # top k worse - net value\n error = tf.reshape(-(-option_val + val + init_price), [-1])\n CVaR, idx = tf.math.top_k(error, tf.constant(num, dtype=tf.int32))\n # return tf.math.reduce_mean(CVaR)\n return tf.math.reduce_mean(CVaR)\n return lossFunction",
"_____no_output_____"
],
[
"import os\nimport datetime\nimport tensorflow as tf\ntf.random.set_seed(2021)\n\nlr = 0.005\nBATCH_SIZE = 256\nEPOCHS = 50\n\n\nlogdir = os.path.join(\"logs\", datetime.datetime.now().strftime(\"%Y%m%d-%H%M%S\"))\ntensorboard_callback = tf.keras.callbacks.TensorBoard(logdir, histogram_freq=1)\nearly_stopping_callback = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=10)\n\nmodel = deep_hedger(T, n_feats)\nmodel.summary()\nprint(\"Check Model\", model.predict(np.zeros((1, T, 1))).reshape(-1))\n\nmshe_loss = MSHE_Loss(init_price=Heston_MC_price, strike=K_strike, T = T)\nmodel.compile(optimizer=Adam(learning_rate=lr), loss=mshe_loss)\nmodel.fit(X_train, y_train, epochs=EPOCHS, verbose=1, \n batch_size=BATCH_SIZE, callbacks=[tensorboard_callback,early_stopping_callback],\n validation_data=(X_val, y_val), shuffle=False)\n\n# cvar_loss = cvarLoss(init_price = Heston_MC_price, strike = K_strike, T = T, batch_size = BATCH_SIZE, proportion = 0.01)\n\n# cvar_model = deep_hedger(T, n_feats)\n# cvar_model.compile(optimizer=Adam(learning_rate=lr), loss=cvar_loss)\n# cvar_model.fit(X_train, y_train, epochs=EPOCHS, verbose=1, \n# batch_size=BATCH_SIZE, callbacks=[tensorboard_callback,early_stopping_callback],\n# validation_data=(X_val, y_val), shuffle=False)\n",
"Model: \"model\"\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\ninput_1 (InputLayer) [(None, None, 1)] 0 \n_________________________________________________________________\ngru (GRU) (None, None, 32) 3360 \n_________________________________________________________________\ndense (Dense) (None, None, 1) 33 \n=================================================================\nTotal params: 3,393\nTrainable params: 3,393\nNon-trainable params: 0\n_________________________________________________________________\nCheck Model [-0.02293535 -0.00704249 0.00108805 0.00526644 0.00739373 0.00844738\n 0.00894241 0.00915322 0.00922545 0.00923493 0.00921978 0.00919808\n 0.00917754 0.00916084 0.00914831 0.00913939 0.00913328 0.00912921\n 0.00912657 0.0091249 0.00912386 0.00912324 0.00912286 0.00912265\n 0.00912253 0.00912246 0.00912243 0.00912242 0.00912242 0.00912242]\nEpoch 1/50\n313/313 [==============================] - 11s 27ms/step - loss: 10.0872 - val_loss: 6.7091\nEpoch 2/50\n313/313 [==============================] - 8s 25ms/step - loss: 6.6817 - val_loss: 6.4719\nEpoch 3/50\n313/313 [==============================] - 8s 25ms/step - loss: 6.4837 - val_loss: 6.5099\nEpoch 4/50\n313/313 [==============================] - 8s 24ms/step - loss: 6.3781 - val_loss: 6.3398\nEpoch 5/50\n313/313 [==============================] - 8s 24ms/step - loss: 6.2925 - val_loss: 6.2229\nEpoch 6/50\n313/313 [==============================] - 8s 25ms/step - loss: 6.2400 - val_loss: 6.1576\nEpoch 7/50\n313/313 [==============================] - 7s 24ms/step - loss: 6.2063 - val_loss: 6.1300\nEpoch 8/50\n313/313 [==============================] - 8s 25ms/step - loss: 6.1817 - val_loss: 6.1119\nEpoch 9/50\n313/313 [==============================] - 8s 24ms/step - loss: 6.1605 - val_loss: 6.0912\nEpoch 10/50\n313/313 [==============================] - 8s 24ms/step - loss: 6.1397 - val_loss: 6.0707\nEpoch 11/50\n313/313 [==============================] - 8s 24ms/step - loss: 6.1205 - val_loss: 6.0514\nEpoch 12/50\n313/313 [==============================] - 8s 24ms/step - loss: 6.1048 - val_loss: 6.0359\nEpoch 13/50\n313/313 [==============================] - 8s 24ms/step - loss: 6.0926 - val_loss: 6.0238\nEpoch 14/50\n313/313 [==============================] - 8s 24ms/step - loss: 6.0818 - val_loss: 6.0123\nEpoch 15/50\n313/313 [==============================] - 8s 24ms/step - loss: 6.0701 - val_loss: 6.0046\nEpoch 16/50\n313/313 [==============================] - 8s 25ms/step - loss: 6.0556 - val_loss: 5.9989\nEpoch 17/50\n313/313 [==============================] - 8s 24ms/step - loss: 6.0385 - val_loss: 5.9983\nEpoch 18/50\n313/313 [==============================] - 8s 24ms/step - loss: 6.0222 - val_loss: 6.0001\nEpoch 19/50\n313/313 [==============================] - 8s 24ms/step - loss: 6.0079 - val_loss: 6.0081\nEpoch 20/50\n313/313 [==============================] - 8s 25ms/step - loss: 5.9947 - val_loss: 6.0097\nEpoch 21/50\n313/313 [==============================] - 8s 25ms/step - loss: 5.9727 - val_loss: 6.0333\nEpoch 22/50\n313/313 [==============================] - 8s 25ms/step - loss: 5.9699 - val_loss: 6.0763\nEpoch 23/50\n313/313 [==============================] - 8s 25ms/step - loss: 5.9410 - val_loss: 6.0830\nEpoch 24/50\n313/313 [==============================] - 8s 25ms/step - loss: 5.9693 - val_loss: 6.0938\nEpoch 25/50\n313/313 [==============================] - 8s 25ms/step - loss: 5.9651 - val_loss: 6.0884\nEpoch 26/50\n313/313 [==============================] - 8s 25ms/step - loss: 5.9345 - val_loss: 6.0815\nEpoch 27/50\n313/313 [==============================] - 8s 25ms/step - loss: 5.9884 - val_loss: 6.0277\n"
],
[
"deep_hedge_deltas = np.zeros((N_SAMPLES, T))\ndeep_cvar_hedge_deltas = np.zeros((N_SAMPLES, T))\n\nfor i in range(T):\n temp= model.predict(X_true[:,:(i + 1),:], batch_size=512)\n deep_hedge_deltas[:,i] = temp.reshape(-1, i + 1)[:,i]\n # temp2 = cvar_model.predict(X_true[:,:(i + 1),:], batch_size=512)\n # deep_cvar_hedge_deltas[:, i] = temp2.reshape(-1, i + 1)[:,i]\n\n# calculate portfolio value\ndeep_vals = np.zeros((N_SAMPLES, T + 1))\ndeep_vals[:,0] = Heston_MC_price\n\n# deep_cvar_vals = np.zeros((N_SAMPLES, T + 1))\n# deep_cvar_vals[:,0] = Heston_MC_price\n\n\nfor t in range(1, T + 1):\n deep_vals[:, t] = rf * deep_vals[:, t - 1] + deep_hedge_deltas[:, t - 1] * (Sts[:, t] - rf * Sts[:, t - 1]) \n # deep_cvar_vals[:, t] = rf * deep_cvar_vals[:, t - 1] + deep_cvar_hedge_deltas[:, t - 1] * (Sts[:, t] - rf * Sts[:, t - 1]) \n \n\ndeep_terminal_error = deep_vals[:, T] - np.maximum(Sts[:, T] - K_strike, 0)\n# deep_cvar_terminal_error = deep_cvar_vals[:, T] - np.maximum(Sts[:, T] - K_strike, 0)\n \n ",
"_____no_output_____"
]
],
[
[
"# Plot Results",
"_____no_output_____"
]
],
[
[
"hedging_errors = [bs_terminal_error, local_terminal_error, dynamic_terminal_error,\n deep_terminal_error] #, deep_cvar_terminal_error]\n\nhedging_errors2 = [diagnosis(error) for error in hedging_errors]\nresults = pd.concat(hedging_errors2)\n\ntitles = [\"BS-delta\", \"Local Hedge\", \"Dynamic Hedge\", ] + [\"Deep-Hedger\"]#, \"Deep-CVaR-Hedger\"]\nresults.index = titles\ndisplay(results)\n\n\nplot_titles = [title +\" error\" for title in titles]\nfig, ax = plt.subplots(ncols= len(titles), figsize=(len(titles) * 6, 5))\nfor i in range(len(titles)):\n sns.histplot(hedging_errors[i], ax=ax[i]).set_title(plot_titles[i]) ",
"_____no_output_____"
],
[
"print(results.to_latex())",
"\\begin{tabular}{lrrrrr}\n\\toprule\n{} & MSHE & CVar 1\\% & CVaR 5\\% & CVar 10\\% & CVaR 50\\% \\\\\n\\midrule\nBS-delta & 3.143079 & 9.524499 & 5.632385 & 4.089477 & 1.121566 \\\\\nLocal Hedge & 2.950864 & 8.677565 & 5.200123 & 3.862064 & 1.149328 \\\\\nDynamic Hedge & 2.956798 & 8.592913 & 5.178991 & 3.866077 & 1.154398 \\\\\nDeep-Hedger & 2.524503 & 6.627313 & 4.422683 & 3.542721 & 1.151801 \\\\\n\\bottomrule\n\\end{tabular}\n\n"
]
],
[
[
"Test to make sure loss functions are correctly specified",
"_____no_output_____"
]
],
[
[
"mshe_loss(y_true, bs_deltas.reshape(-1, T, 1))",
"_____no_output_____"
],
[
"cvar_loss_test = cvarLoss(init_price = Heston_MC_price, strike = K_strike, T = T, batch_size = N_SAMPLES, proportion = 0.01)\ncvar_loss_test(y_true, deep_cvar_hedge_deltas.reshape(-1, T, 1))",
"_____no_output_____"
],
[
" def characteristic_func(kappa, theta, sigma, rho, r, xi, s0, v0, tau):\n ixi = 1j * xi\n d = np.sqrt((kappa - ixi * rho * sigma)**2\n + sigma**2 * (ixi + xi**2))\n g = (kappa - ixi * rho * sigma - d) / (kappa - ixi * rho * sigma + d)\n ee = cmath.exp(-d * tau)\n C = ixi * r * tau + kappa * theta / sigma**2 * (\n (kappa - ixi * rho * sigma - d) * tau - 2. * cmath.log((1 - g * ee) / (1 - g))\n )\n D = (kappa - ixi * rho * sigma - d) / sigma**2 * (\n (1 - ee) / (1 - g * ee)\n )\n return cmath.exp(C + D*v0 + ixi * cmath.log(s0))\n\n def integ_func(kappa, theta, sigma, rho, r, xi, s0, v0, K, tau, num):\n ixi = 1j * xi\n if num == 1:\n return (characteristic_func(kappa, theta, sigma, rho, r, xi - 1j, s0, v0, tau) / (ixi * characteristic_func(kappa, theta, sigma, rho, r, -1j, s0, v0, tau)) * cmath.exp(-ixi * cmath.log(K))).real\n else:\n return (characteristic_func(kappa, theta, sigma, rho, r, xi, s0, v0, tau) / (ixi) * cmath.exp(-ixi * cmath.log(K))).real\n\n def call_price(kappa, theta, sigma, rho, r, s0, v0, K, tau):\n \n \"Simplified form, with only one integration. \"\n h = lambda xi: s0 * integ_func(kappa, theta, sigma, rho, r, xi, s0, v0, K, tau, 1) - K * np.exp(-r * tau) * integ_func(kappa, theta, sigma, rho, r, xi, s0, v0, K, tau, 2)\n return 0.5 * (s0 - K * np.exp(-r * tau)) + 1/np.pi * scipy.integrate.quad(h, 1e-4, 500.)[0]\n\n def calc_Heston_delta_by_FD(kappa, theta, sigma, rho, r, s0, v0, K, tau):\n return (call_price(kappa, theta, sigma, rho, r, s0 + 0.0001, v0, K, tau) - \n call_price(kappa, theta, sigma, rho, r, s0, v0, K, tau)) / 0.0001\n\n\n def calc_Heston_vega_by_FD(s0, v0, k, tau, pricer):\n \" This is the sensitivity of price w.r.t variance, not to vega \"\n dv = v0 * 0.001\n p_plus = pricer.call_price(s0, v0 + dv, k, tau)\n p_minus = pricer.call_price(s0, v0 - dv, k, tau)\n return (p_plus - p_minus) / (2 * dv)",
"_____no_output_____"
],
[
"# inspect NN behaviour\nmoneyness_grid = np.linspace(-0.1, 0.1, 10).reshape(-1, 1, 1)\ninp = tf.Variable(moneyness_grid, dtype=tf.float32)\nwith tf.GradientTape() as tape:\n preds = model(inp)\ngrads = tape.gradient(preds, inp)\n\n\ngrads = grads.numpy().reshape(-1)\n\nplt.plot(moneyness_grid.reshape(-1), grads / (K_strike * np.exp(moneyness_grid.reshape(-1))))",
"_____no_output_____"
],
[
"%%time\ncalc_Heston_delta_by_FD(kappa = 1, theta = 0.04, sigma = 2, rho = -0.7, r = 0, \n s0 = Sts[0, 0], v0 = 0.04, K= K_strike, tau = (T - 0) * dt)",
"CPU times: user 18.8 ms, sys: 1.01 ms, total: 19.8 ms\nWall time: 22.4 ms\n"
],
[
"h = 0.0001\n(call_price(kappa = 1, theta = 0.04, sigma = 2, rho = -0.7, r = 0, \n s0 = Sts[0, 0] + h, v0 = 0.04, K= K_strike, tau = (T - 0) * dt) - call_price(kappa = 1, theta = 0.04, sigma = 2, rho = -0.7, r = 0, \n s0 = Sts[0, 0] - h, v0 = 0.04, K= K_strike, tau = (T - 0) * dt)) / (2 * h)",
"_____no_output_____"
],
[
"%%time\nvec_fd = np.vectorize(calc_Heston_delta_by_FD)\nprint(vec_fd(kappa = 1, theta = 0.04, sigma = 2, rho = -0.7, r = 0, \n s0 = Sts[:10, :1], v0 = 0.04, K= K_strike, tau = (T - 0) * dt))",
"[[0.70869062]\n [0.70869062]\n [0.70869062]\n [0.70869062]\n [0.70869062]\n [0.70869062]\n [0.70869062]\n [0.70869062]\n [0.70869062]\n [0.70869062]]\nCPU times: user 241 ms, sys: 791 µs, total: 242 ms\nWall time: 243 ms\n"
],
[
"with open(\"heston_deltas.npy\", \"wb\") as f:\n np.save(f, heston_deltas)",
"_____no_output_____"
]
],
[
[
"# References\n\n\n\n+ Weiguan Wang\n+ Mathematical Modeling and Computation in Finance\n+ Glasserman \n+ Broadie Kaya\n\n## Code References\n\n+ https://github.com/weiguanwang/Hedging_Neural_Networks/blob/master/library/heston.py\n+https://github.com/LechGrzelak/Computational-Finance-Course/blob/main/Lecture%2010-%20Monte%20Carlo%20Simulation%20of%20the%20Heston%20Model/Materials/HestonModelDiscretization.py",
"_____no_output_____"
]
],
[
[
"",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
]
] |
ec8cea766398147ad551d1621741dece07aea71f | 22,862 | ipynb | Jupyter Notebook | dog_class/distinguish_dog.ipynb | xingchen20133/AI | c8b98fa98870eb874c655c5aedd8c2c5fe75d21d | [
"MIT"
] | null | null | null | dog_class/distinguish_dog.ipynb | xingchen20133/AI | c8b98fa98870eb874c655c5aedd8c2c5fe75d21d | [
"MIT"
] | null | null | null | dog_class/distinguish_dog.ipynb | xingchen20133/AI | c8b98fa98870eb874c655c5aedd8c2c5fe75d21d | [
"MIT"
] | null | null | null | 31.66482 | 257 | 0.502843 | [
[
[
"\"\"\"\n为不同类dog图片名称添加前缀 哈士奇:0_ 吉娃娃:1_ 松狮犬:2_\n\"\"\"\nimport os\n\n\n#为不同类dog添加前缀 哈士奇:0 吉娃娃:1 松狮犬:2\ndef ranameJPG(filepath, kind):\n images = os.listdir(filepath)\n# print(images)\n for name in images:\n os.rename(filepath+name, filepath+str(kind)+'_'+(name.split(\"_\")[-1]).split('.')[0]+'.jpg')\n# print(name)\n\nif __name__ == '__main__':\n ranameJPG(\"/Users/s/code/python/keras/dog_img/h_dog/\",0)\n ranameJPG(\"/Users/s/code/python/keras/dog_img/j_dog/\",1)\n ranameJPG(\"/Users/s/code/python/keras/dog_img/s_dog/\",2)\n ",
"_____no_output_____"
],
[
"'''\nkeras 中类似的vgg的神经网络要求输入图片为100 * 100,处理图片为\n'''\nfrom PIL import Image\n\ndef converting(orgimg, outdir, width=100, height=100):\n img = Image.open(orgimg)\n try:\n new_img = img.resize((width, height), Image.BILINEAR)\n new_img.save(os.path.join(outdir,orgimg.split('/')[-1]))\n except Exception as e:\n print(e)\n\n'''\n图片的每一个分类放在一个文件加里,再由一个文件夹放所有的分类文件加\norg_parent_dir:图片处理前的总文件夹\nout_parent_dir:图片处理后的总文件夹\n'''\ndef convertjpg(org_parent_dir,out_parent_dir):\n for f in os.listdir(org_parent_dir):\n if \".\" in f:\n continue\n org_dir = org_parent_dir+f+'/'\n out_dir = out_parent_dir+f+'/'\n for img in os.listdir(org_dir):\n print(img)\n converting(org_dir+img, out_dir)\n \n #img = Image.open(\"/Users/s/code/python/keras/dog_img/s_dog/2_songshi99.jpg\") \n \nif __name__ == \"__main__\":\n convertjpg(\"/Users/s/code/python/keras/dog_img/\",\"/Users/s/code/python/keras/dog_img_org/\")\n ",
"2_songshi141.jpg\n2_songshi10.jpg\n2_songshi11.jpg\n2_songshi39.jpg\n2_songshi140.jpg\n2_songshi12.jpg\n2_songshi147.jpg\n2_songshi16.jpg\n2_songshi146.jpg\n2_songshi144.jpg\n2_songshi15.jpg\n2_songshi14.jpg\n2_songshi145.jpg\n2_songshi98.jpg\n2_songshi136.jpg\n2_songshi122.jpg\n2_songshi67.jpg\n2_songshi73.jpg\n2_songshi72.jpg\n2_songshi66.jpg\n2_songshi123.jpg\n2_songshi137.jpg\n2_songshi99.jpg\n2_songshi109.jpg\n2_songshi135.jpg\n2_songshi58.jpg\n2_songshi64.jpg\n2_songshi65.jpg\n2_songshi71.jpg\n2_songshi59.jpg\n2_songshi134.jpg\n2_songshi108.jpg\n2_songshi124.jpg\n2_songshi118.jpg\n2_songshi61.jpg\n2_songshi49.jpg\n2_songshi74.jpg\n2_songshi131.jpg\n2_songshi133.jpg\n2_songshi127.jpg\n2_songshi62.jpg\n2_songshi77.jpg\n2_songshi63.jpg\n2_songshi88.jpg\n2_songshi126.jpg\n2_songshi132.jpg\n2_songshi85.jpg\n2_songshi117.jpg\n2_songshi103.jpg\n2_songshi46.jpg\n2_songshi52.jpg\n2_songshi53.jpg\n2_songshi47.jpg\n2_songshi102.jpg\n2_songshi90.jpg\n2_songshi128.jpg\n2_songshi92.jpg\n2_songshi100.jpg\n2_songshi86.jpg\n2_songshi79.jpg\n2_songshi51.jpg\n2_songshi44.jpg\n2_songshi50.jpg\n2_songshi78.jpg\n2_songshi101.jpg\n2_songshi105.jpg\n2_songshi97.jpg\n2_songshi111.jpg\n2_songshi139.jpg\n2_songshi54.jpg\n2_songshi40.jpg\n2_songshi68.jpg\n2_songshi69.jpg\n2_songshi138.jpg\n2_songshi110.jpg\n2_songshi104.jpg\ncannot write mode P as JPEG\n2_songshi112.jpg\n2_songshi80.jpg\n2_songshi106.jpg\n2_songshi43.jpg\n2_songshi57.jpg\ncannot write mode P as JPEG\n2_songshi56.jpg\n2_songshi42.jpg\n2_songshi95.jpg\n2_songshi107.jpg\n2_songshi113.jpg\n2_songshi81.jpg\n2_songshi148.jpg\n2_songshi19.jpg\n2_songshi31.jpg\n2_songshi3.jpg\n2_songshi37.jpg\n2_songshi5.jpg\n2_songshi4.jpg\n2_songshi36.jpg\n2_songshi34.jpg\n2_songshi6.jpg\n2_songshi7.jpg\n2_songshi35.jpg\n0_hashiqi9.jpg\n0_hashiqi144.jpg\n0_hashiqi43.jpg\n0_hashiqi57.jpg\n0_hashiqi95.jpg\n0_hashiqi81.jpg\n0_hashiqi56.jpg\n0_hashiqi42.jpg\n0_hashiqi145.jpg\n0_hashiqi8.jpg\n0_hashiqi147.jpg\n0_hashiqi40.jpg\n0_hashiqi97.jpg\n0_hashiqi96.jpg\n0_hashiqi55.jpg\n0_hashiqi146.jpg\n0_hashiqi142.jpg\n0_hashiqi79.jpg\n0_hashiqi51.jpg\n0_hashiqi45.jpg\n0_hashiqi92.jpg\n0_hashiqi86.jpg\n0_hashiqi93.jpg\n0_hashiqi78.jpg\n0_hashiqi143.jpg\n0_hashiqi141.jpg\n0_hashiqi52.jpg\n0_hashiqi91.jpg\n0_hashiqi90.jpg\n0_hashiqi47.jpg\n0_hashiqi133.jpg\n0_hashiqi127.jpg\n0_hashiqi20.jpg\n0_hashiqi34.jpg\n0_hashiqi35.jpg\n0_hashiqi21.jpg\n0_hashiqi126.jpg\n0_hashiqi132.jpg\n0_hashiqi118.jpg\n0_hashiqi130.jpg\n0_hashiqi37.jpg\n0_hashiqi131.jpg\n0_hashiqi125.jpg\n0_hashiqi119.jpg\n0_hashiqi121.jpg\n0_hashiqi135.jpg\n0_hashiqi32.jpg\n0_hashiqi27.jpg\n0_hashiqi33.jpg\n0_hashiqi108.jpg\n0_hashiqi134.jpg\n0_hashiqi120.jpg\n0_hashiqi122.jpg\n0_hashiqi19.jpg\n0_hashiqi25.jpg\n0_hashiqi31.jpg\n0_hashiqi30.jpg\n0_hashiqi24.jpg\n0_hashiqi18.jpg\n0_hashiqi123.jpg\n0_hashiqi137.jpg\n0_hashiqi112.jpg\n0_hashiqi106.jpg\n0_hashiqi15.jpg\n0_hashiqi29.jpg\n0_hashiqi28.jpg\n0_hashiqi14.jpg\n0_hashiqi107.jpg\n0_hashiqi139.jpg\n0_hashiqi105.jpg\n0_hashiqi111.jpg\n0_hashiqi17.jpg\n0_hashiqi110.jpg\n0_hashiqi104.jpg\n0_hashiqi100.jpg\n0_hashiqi114.jpg\n0_hashiqi128.jpg\n0_hashiqi13.jpg\n0_hashiqi12.jpg\n0_hashiqi129.jpg\n0_hashiqi101.jpg\n0_hashiqi117.jpg\n0_hashiqi10.jpg\n0_hashiqi39.jpg\n0_hashiqi102.jpg\n0_hashiqi0.jpg\n0_hashiqi76.jpg\n0_hashiqi89.jpg\n0_hashiqi88.jpg\n0_hashiqi77.jpg\n0_hashiqi1.jpg\n0_hashiqi3.jpg\n0_hashiqi75.jpg\n0_hashiqi61.jpg\n0_hashiqi49.jpg\n0_hashiqi60.jpg\n0_hashiqi2.jpg\n0_hashiqi6.jpg\n0_hashiqi58.jpg\n0_hashiqi64.jpg\n0_hashiqi65.jpg\n0_hashiqi71.jpg\n0_hashiqi59.jpg\n0_hashiqi148.jpg\n0_hashiqi5.jpg\n0_hashiqi73.jpg\n0_hashiqi98.jpg\n0_hashiqi99.jpg\n0_hashiqi4.jpg\n0_hashiqi149.jpg\n1_jiwawa127.jpg\n1_jiwawa56.jpg\n1_jiwawa95.jpg\n1_jiwawa81.jpg\n1_jiwawa80.jpg\n1_jiwawa43.jpg\n1_jiwawa126.jpg\n1_jiwawa130.jpg\n1_jiwawa124.jpg\n1_jiwawa118.jpg\n1_jiwawa69.jpg\n1_jiwawa41.jpg\n1_jiwawa55.jpg\n1_jiwawa82.jpg\n1_jiwawa96.jpg\n1_jiwawa97.jpg\n1_jiwawa83.jpg\n1_jiwawa54.jpg\n1_jiwawa40.jpg\n1_jiwawa119.jpg\n1_jiwawa125.jpg\n1_jiwawa109.jpg\n1_jiwawa135.jpg\n1_jiwawa121.jpg\n1_jiwawa44.jpg\n1_jiwawa50.jpg\n1_jiwawa92.jpg\n1_jiwawa86.jpg\n1_jiwawa79.jpg\n1_jiwawa51.jpg\n1_jiwawa45.jpg\n1_jiwawa120.jpg\n1_jiwawa134.jpg\n1_jiwawa108.jpg\n1_jiwawa53.jpg\n1_jiwawa90.jpg\n1_jiwawa85.jpg\n1_jiwawa91.jpg\n1_jiwawa52.jpg\n1_jiwawa2.jpg\n1_jiwawa35.jpg\n1_jiwawa21.jpg\n1_jiwawa20.jpg\n1_jiwawa34.jpg\n1_jiwawa3.jpg\n1_jiwawa1.jpg\n1_jiwawa147.jpg\n1_jiwawa22.jpg\n1_jiwawa23.jpg\n1_jiwawa0.jpg\n1_jiwawa4.jpg\n1_jiwawa33.jpg\n1_jiwawa32.jpg\n1_jiwawa26.jpg\n1_jiwawa5.jpg\n1_jiwawa7.jpg\n1_jiwawa30.jpg\n1_jiwawa24.jpg\n1_jiwawa18.jpg\n1_jiwawa19.jpg\n1_jiwawa25.jpg\n1_jiwawa31.jpg\n1_jiwawa140.jpg\n1_jiwawa14.jpg\n1_jiwawa15.jpg\n1_jiwawa29.jpg\n1_jiwawa8.jpg\n1_jiwawa17.jpg\n1_jiwawa9.jpg\n1_jiwawa12.jpg\n1_jiwawa13.jpg\n1_jiwawa11.jpg\n1_jiwawa39.jpg\n1_jiwawa38.jpg\n1_jiwawa10.jpg\n1_jiwawa106.jpg\n1_jiwawa112.jpg\n1_jiwawa77.jpg\n1_jiwawa63.jpg\n1_jiwawa89.jpg\n1_jiwawa62.jpg\n1_jiwawa76.jpg\n1_jiwawa113.jpg\n1_jiwawa107.jpg\n1_jiwawa139.jpg\n1_jiwawa74.jpg\n1_jiwawa75.jpg\n1_jiwawa61.jpg\n1_jiwawa49.jpg\n1_jiwawa138.jpg\n1_jiwawa110.jpg\n1_jiwawa114.jpg\n1_jiwawa100.jpg\n1_jiwawa71.jpg\n1_jiwawa59.jpg\n1_jiwawa58.jpg\n1_jiwawa70.jpg\n1_jiwawa64.jpg\n1_jiwawa115.jpg\n1_jiwawa129.jpg\n1_jiwawa103.jpg\n1_jiwawa117.jpg\n1_jiwawa72.jpg\n1_jiwawa98.jpg\n1_jiwawa67.jpg\n1_jiwawa73.jpg\n1_jiwawa116.jpg\n"
],
[
"#待测贴图片处理\ndef convertpre(org_dir, out_dir, width=100, height=100):\n print(os.listdir(org_dir))\n for f in os.listdir(org_dir):\n# if \".\" in f:\n# continue\n print(org_dir+f)\n \n try:\n img = Image.open(org_dir+f)\n print(os.path.join(out_dir,f))\n new_img = img.resize((width, height), Image.BILINEAR)\n new_img.save(os.path.join(out_dir,f))\n except Exception as e:\n print(e)\nconvertpre(\"predict_img/org_img/\",\"predict_img/dealed_img/\")",
"['j.jpg', 'h.jpg', 's.jpg']\npredict_img/org_img/j.jpg\npredict_img/dealed_img/j.jpg\npredict_img/org_img/h.jpg\npredict_img/dealed_img/h.jpg\npredict_img/org_img/s.jpg\npredict_img/dealed_img/s.jpg\n"
],
[
"\"\"\"\nVGG 深度学习网络\n\"\"\"\nimport os\nfrom PIL import Image\nimport numpy as np\nfrom keras.utils import np_utils\nfrom keras.models import Sequential\nfrom keras.layers.core import Dense, Dropout, Activation, Flatten\nfrom keras.optimizers import SGD, RMSprop, Adam\nfrom keras.layers import Conv2D, MaxPooling2D\n\n#--------------------------------------------------------------------------------------------\n# 将训练集图片转换成数组\nima1 = os.listdir('./train_img/')\ndef read_image(filename):\n img = Image.open(filename).convert('RGB')\n return np.array(img)\n\nx_train = []\n\nfor i in ima1:\n if i == r\".DS_Store\":\n continue\n x_train.append(read_image('./train_img/'+i))\n\nx_train = np.array(x_train)\n\n# 根据文件名提取标签\ny_train = []\nfor filename in ima1:\n if filename == r\".DS_Store\":\n continue\n y_train.append(int(filename.split('_')[0]))\n\ny_train = np.array(y_train)\n# -----------------------------------------------------------------------------------------\n# 将测试集图片转化成数组\nima2 = os.listdir('./test_img/')\n# def read_image2(filename):\n# img = Image.open('./test_img/'+filename).convert('RGB')\n# return np.array(img)\n\nx_test = []\n\nfor i in ima2:\n if i == r\".DS_Store\":\n continue\n x_test.append(read_image(\"./test_img/\"+i))\n\nx_test = np.array(x_test)\n\n\n\n\n# 根据文件名提取标签\ny_test = []\nfor filename in ima2:\n if filename == r\".DS_Store\":\n continue\n y_test.append(int(filename.split('_')[0]))\n\ny_test = np.array(y_test)\n#-------------------------------------------------------------------------------------\n# 将标签转换格式\ny_train = np_utils.to_categorical(y_train)\ny_test = np_utils.to_categorical(y_test)\n\n# 将特征点从0~255转换成0~1提高特征提取精度\nx_train = x_train.astype('float32')\nx_test = x_test.astype('float32')\nx_train /= 255\nx_test /= 255\n\n# 搭建卷积神经网络\nmodel = Sequential()\nmodel.add(Conv2D(32, (3, 3), activation='relu', input_shape=(100, 100, 3)))\nmodel.add(Conv2D(32, (3, 3), activation='relu'))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\nmodel.add(Dropout(0))\n\nmodel.add(Conv2D(64, (3, 3), activation='relu'))\nmodel.add(Conv2D(64, (3, 3), activation='relu'))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\nmodel.add(Dropout(0.25))\n\nmodel.add(Flatten())\nmodel.add(Dense(256, activation='relu'))\nmodel.add(Dropout(0.3))\nmodel.add(Dense(3, activation='softmax'))\n\nsgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)\nmodel.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])\n\nmodel.fit(x_train, y_train, batch_size=40, epochs=32)\nmodel.save_weights('./dog_weights.h5', overwrite=True)\n\nscore = model.evaluate(x_test, y_test, batch_size=40)\nprint(score)",
"/Users/s/anaconda2/lib/python2.7/site-packages/h5py/__init__.py:34: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.\n from ._conv import register_converters as _register_converters\nUsing TensorFlow backend.\n"
],
[
"#测试图片属于种类\nimg_dir = os.listdir('/Users/s/code/python/keras/predict_img/dealed_img/')\n# def read_image2(filename):\n# img = Image.open('./test_img/'+filename).convert('RGB')\n# return np.array(img)\nprint(os.listdir(\"./predict_img/dealed_img/\"))\n\nprint(img_dir)\n\nx_predice = []\n\nfor i in img_dir:\n if i == r\".DS_Store\":\n continue\n x_predice.append(read_image(\"/Users/s/code/python/keras/predict_img/dealed_img/\"+i))\n\nx_predice = np.array(x_predice)\nx_predice = x_predice.astype('float32')\nx_predice /= 255\n\nmodel = Sequential()\nmodel.add(Conv2D(32, (3, 3), activation='relu', input_shape=(100, 100, 3)))\nmodel.add(Conv2D(32, (3, 3), activation='relu'))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\nmodel.add(Dropout(0.25))\n\nmodel.add(Conv2D(64, (3, 3), activation='relu'))\nmodel.add(Conv2D(64, (3, 3), activation='relu'))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\nmodel.add(Dropout(0.25))\n\nmodel.add(Flatten())\nmodel.add(Dense(256, activation='relu'))\nmodel.add(Dropout(0.5))\nmodel.add(Dense(3, activation='softmax'))\n\nsgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)\nmodel.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])\n\n\nmodel.load_weights('dog_weights.h5')\nclasses = model.predict_classes(x_predice)\ntarget = ['哈士奇', '吉娃娃', '松狮犬']\nfor index in classes:\n print(target[index])\n",
"['j.jpg', 'h.jpg', 's.jpg']\n['j.jpg', 'h.jpg', 's.jpg']\n吉娃娃\n哈士奇\n松狮犬\n"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code"
]
] |
ec8cf2c2f935f002d4f1096882141523ad246868 | 4,497 | ipynb | Jupyter Notebook | example/Photometry_IMAGE2_product.ipynb | mtakahiro/niriss_ghost | ed01736b75c7019ed0fcbab5835cba6878132aba | [
"BSD-3-Clause"
] | null | null | null | example/Photometry_IMAGE2_product.ipynb | mtakahiro/niriss_ghost | ed01736b75c7019ed0fcbab5835cba6878132aba | [
"BSD-3-Clause"
] | 1 | 2021-12-15T21:22:45.000Z | 2021-12-17T22:01:41.000Z | example/Photometry_IMAGE2_product.ipynb | mtakahiro/niriss_ghost | ed01736b75c7019ed0fcbab5835cba6878132aba | [
"BSD-3-Clause"
] | null | null | null | 30.181208 | 137 | 0.579053 | [
[
[
"## This is an example notebook to retrieve photometric catalog from IMAGE2 products by runnig photutils on _cal.fits files.",
"_____no_output_____"
]
],
[
[
"import glob\nimport os\nimport argparse\nfrom astropy.io import fits\n#try:\nfrom configobj import ConfigObj\n#except ModuleNotFoundError:\n#from jwst.extern.configobj.configobj import ConfigObj\n\nfrom jwst.pipeline import collect_pipeline_cfgs\nfrom jwst.pipeline import Detector1Pipeline\nfrom jwst.pipeline import Image2Pipeline\nfrom jwst.pipeline import Image3Pipeline\nfrom jwst.pipeline import Spec2Pipeline\nfrom jwst.pipeline import Spec3Pipeline\n\nimport yaml",
"_____no_output_____"
],
[
"import jwst\njwst.__version__",
"_____no_output_____"
],
[
"# Define some fancy function, if you want;\nTERM = os.get_terminal_size()\nSYM = \"~\"\ndef printnice(level):\n \"\"\"\n Print the current pipeline level and make it nice :)\n Args:\n level (str): The thing you want to print nicely.\n \"\"\"\n\n print(\"\\n\", SYM*TERM.columns, \"\\n\", SYM, level.center(TERM.columns-2), SYM, \"\\n\", SYM*TERM.columns, \"\\n\", sep=\"\")",
"_____no_output_____"
],
[
"### Make a directory for reduced products;\nDIR_RED = './reduced/'\nif not os.path.exists(DIR_RED):\n os.mkdir(DIR_RED)\n",
"_____no_output_____"
],
[
"import astropy.wcs as wcs\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom photutils import Background2D, MedianBackground, detect_sources, deblend_sources, source_properties\nfrom astropy.stats import gaussian_fwhm_to_sigma\nfrom astropy.convolution import Gaussian2DKernel\n\nimages_cal = glob.glob('%s*_nis_cal.fits'%(DIR_RED))\n\nfor image_cal in images_cal[:]:\n\n hdu = fits.open(image_cal)\n data = hdu[1].data\n imwcs = wcs.WCS(hdu[1].header, hdu)\n err = hdu[2].data\n\n # Measure background and set detection threshold\n bkg_estimator = MedianBackground()\n bkg = Background2D(data, (50, 50), filter_size=(3, 3), bkg_estimator=bkg_estimator)\n #threshold = bkg.background + (15. * bkg.background_rms)\n threshold = bkg.background + (10. * bkg.background_rms)\n\n # Before detection, smooth image with Gaussian FWHM = n pixels\n sigma = 1.5 * gaussian_fwhm_to_sigma \n\n kernel = Gaussian2DKernel(sigma)#, x_size=5, y_size=5)\n kernel.normalize()\n\n # Detect and deblend\n segm_detect = detect_sources(data, threshold, npixels=20)#, filter_kernel=kernel)\n\n # Save segmentation map of detected objects\n segm_hdu = fits.PrimaryHDU(segm_detect.data.astype(np.uint32), header=imwcs.to_header())\n segm_hdu.writeto(image_cal.replace('_cal.fits','_cal_seg.fits'), overwrite=True)\n \n # Save cat;\n cat = source_properties(data-bkg.background, segm_detect, wcs=imwcs, background=bkg.background, error=err)\n tbl = cat.to_table()#columns=columns)\n\n tbl.write(image_cal.replace('_cal.fits', '_cat_man.ecsv'), format='ascii.ecsv',overwrite=True)\n\n\n",
"_____no_output_____"
]
]
] | [
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
]
] |
ec8cff148b68cccc699579a8fe831cf3f4722541 | 75,956 | ipynb | Jupyter Notebook | hw/hw6-1.ipynb | Bertha-ding/20MA573-yuning-ding | 21b8fb9596b4d72eff972643602bb8e55f26453c | [
"MIT"
] | null | null | null | hw/hw6-1.ipynb | Bertha-ding/20MA573-yuning-ding | 21b8fb9596b4d72eff972643602bb8e55f26453c | [
"MIT"
] | null | null | null | hw/hw6-1.ipynb | Bertha-ding/20MA573-yuning-ding | 21b8fb9596b4d72eff972643602bb8e55f26453c | [
"MIT"
] | null | null | null | 434.034286 | 70,504 | 0.913726 | [
[
[
"<a href=\"https://colab.research.google.com/github/Bertha-ding/20MA573-yuning-ding/blob/master/hw/hw6-1.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport matplotlib.pyplot as plt\nimport math \ndef Bpath(T,N):\n sum_list = [0] \n h = T/N \n wi = 0\n for i in range(N):\n Z = np.random.normal(0,1)\n wi += np.sqrt(h) * Z\n sum_list.append(wi)\n return sum_list\ndef plot(T,N):\n x0 = [x*T/N for x in range(N+1)]\n y0 = Bpath(T,N)\n return plt.plot(x0,y0)\nfor t in range(300,311):\n plot(t,int(t/0.01)); \na = list(range(int(math.e**2),311))\nkl1 = [math.sqrt(2*x*math.log(math.log(x))) for x in a] \nkl2 = [-x for x in kl1]\nplt.plot(a,kl1,label = 'loglog_1');\nplt.plot(a,kl2,label = 'loglog_2');\nplt.legend();",
"_____no_output_____"
],
[
"import matplotlib.pyplot as plt\nimport numpy as np\nimport scipy.optimize as so\nimport scipy.stats as ss\n\nclass VanillaOption:\n def __init__(\n self,\n otype = 1, # 1: 'call'\n # -1: 'put'\n strike = 110.,\n maturity = 1.,\n market_price = 10.):\n self.otype = otype\n self.strike = strike\n self.maturity = maturity\n self.market_price = market_price\nclass Gbm:\n def __init__(self, init_state = 100.,\n drift_ratio = .0475,\n vol_ratio = .2,\n nstep = 5,\n N = 1000):\n self.init_state = init_state\n self.drift_ratio = drift_ratio\n self.vol_ratio = vol_ratio\n self.nstep = nstep\n self.N = N # The number of trials\n\n def arasian(self,Vanillaoption):\n s0 = self.init_state\n o = self.vol_ratio\n r = self.drift_ratio\n n = self.nstep\n N = self.N \n otype = Vanillaoption.otype\n k = Vanillaoption.strike\n T = Vanillaoption.maturity\n sum1 = 0\n for i in range(N):\n t = np.linspace(0, T, n+1) \n h = T/n\n W = np.zeros(n+1)\n for j in range(n):\n W[j+1] = W[j] + np.sqrt(h) * np.random.normal(0,1) \n S = s0 * np.exp((r - o**2/2.) * t + o * W) \n average = np.mean(S)\n sum1 += np.exp(-r*T)*np.max([average-k, 0])\n return sum1/N\ngbm1 = Gbm()\noption1 = VanillaOption()\ngbm1.arasian(option1)",
"_____no_output_____"
]
]
] | [
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code"
]
] |
ec8d0c8a9bf74e0a52a5f8f31b670359311a392b | 4,564 | ipynb | Jupyter Notebook | notebook/hamiltonian/01-custom_eigen.ipynb | highrizer/HOQSTTutorials.jl | fb3fb43b7edd7c24fe505e47fa9a3dfd4af1b5eb | [
"MIT"
] | 9 | 2020-11-11T09:02:23.000Z | 2021-11-10T20:31:00.000Z | notebook/hamiltonian/01-custom_eigen.ipynb | highrizer/HOQSTTutorials.jl | fb3fb43b7edd7c24fe505e47fa9a3dfd4af1b5eb | [
"MIT"
] | 9 | 2020-11-05T22:37:40.000Z | 2021-11-10T20:55:39.000Z | notebook/hamiltonian/01-custom_eigen.ipynb | highrizer/HOQSTTutorials.jl | fb3fb43b7edd7c24fe505e47fa9a3dfd4af1b5eb | [
"MIT"
] | 3 | 2020-12-05T20:20:09.000Z | 2022-02-07T06:37:54.000Z | 45.64 | 594 | 0.599912 | [
[
[
"empty"
]
]
] | [
"empty"
] | [
[
"empty"
]
] |
ec8d1e227b5b18e33cbeb5a88f3a6e507ec5bc24 | 23,769 | ipynb | Jupyter Notebook | importation.ipynb | Michel-Nassalang/python | 8c3b40d63c9930ac32120140db04fff6ec6c46bf | [
"Apache-2.0"
] | null | null | null | importation.ipynb | Michel-Nassalang/python | 8c3b40d63c9930ac32120140db04fff6ec6c46bf | [
"Apache-2.0"
] | null | null | null | importation.ipynb | Michel-Nassalang/python | 8c3b40d63c9930ac32120140db04fff6ec6c46bf | [
"Apache-2.0"
] | null | null | null | 22.615604 | 142 | 0.423535 | [
[
[
"import math",
"_____no_output_____"
],
[
"from listing import liste",
"_____no_output_____"
],
[
"import random",
"_____no_output_____"
],
[
"import statistics",
"_____no_output_____"
],
[
"import os",
"_____no_output_____"
],
[
"import glob",
"_____no_output_____"
],
[
"math.sinh(30)",
"_____no_output_____"
],
[
"statistics.Fraction(0.9)",
"_____no_output_____"
],
[
"liste1 =['Mike', 'Edgar', 'Mandou', 'Marie Pierre']",
"_____no_output_____"
],
[
"random.choice(liste1)",
"_____no_output_____"
],
[
"random.getstate()",
"_____no_output_____"
],
[
"statistics.mean(liste)",
"_____no_output_____"
],
[
"statistics.variance(liste)",
"_____no_output_____"
],
[
"random.randint(5,15)",
"_____no_output_____"
],
[
"random.randrange(10, 15)",
"_____no_output_____"
],
[
"random.sample(range(10, 100), 15)",
"_____no_output_____"
],
[
"random.shuffle(liste1)\nliste1",
"_____no_output_____"
],
[
"os.getcwd()",
"_____no_output_____"
],
[
"glob.glob('*')",
"_____no_output_____"
],
[
"with open('tableau.txt', 'w') as T :\n tb = {i:i**2 for i in range(10)}\n T.write(str(tb))",
"_____no_output_____"
],
[
"with open('tableau.txt', 'w') as T :\n for i in range(10):\n T.write(f'\"{i}\":{i**2}'+' \\n ')",
"_____no_output_____"
],
[
"with open('tableau.txt', 'r') as T:\n r=T.read()\n g = r.split()",
"_____no_output_____"
],
[
"r, g",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
ec8d5040866d053928f6b591a0976a7e9863ec69 | 28,035 | ipynb | Jupyter Notebook | Mini-Project-1 - JSON Data Wrangling/sliderule_dsi_json_exercise.ipynb | emenriquez/Springboard-Coursework | 7ac89a5b8bf7855bcd5cefaa02367134cb81ce8a | [
"Apache-2.0"
] | null | null | null | Mini-Project-1 - JSON Data Wrangling/sliderule_dsi_json_exercise.ipynb | emenriquez/Springboard-Coursework | 7ac89a5b8bf7855bcd5cefaa02367134cb81ce8a | [
"Apache-2.0"
] | null | null | null | Mini-Project-1 - JSON Data Wrangling/sliderule_dsi_json_exercise.ipynb | emenriquez/Springboard-Coursework | 7ac89a5b8bf7855bcd5cefaa02367134cb81ce8a | [
"Apache-2.0"
] | 1 | 2019-04-22T14:57:02.000Z | 2019-04-22T14:57:02.000Z | 43.736349 | 3,315 | 0.506795 | [
[
[
"# JSON examples and exercise\n****\n+ get familiar with packages for dealing with JSON\n+ study examples with JSON strings and files \n+ work on exercise to be completed and submitted \n****\n+ reference: http://pandas.pydata.org/pandas-docs/stable/io.html#io-json-reader\n+ data source: http://jsonstudio.com/resources/\n****",
"_____no_output_____"
]
],
[
[
"import pandas as pd",
"_____no_output_____"
]
],
[
[
"## imports for Python, Pandas",
"_____no_output_____"
]
],
[
[
"import json\nfrom pandas.io.json import json_normalize",
"_____no_output_____"
]
],
[
[
"## JSON example, with string\n\n+ demonstrates creation of normalized dataframes (tables) from nested json string\n+ source: http://pandas.pydata.org/pandas-docs/stable/io.html#normalization",
"_____no_output_____"
]
],
[
[
"# define json string\ndata = [{'state': 'Florida', \n 'shortname': 'FL',\n 'info': {'governor': 'Rick Scott'},\n 'counties': [{'name': 'Dade', 'population': 12345},\n {'name': 'Broward', 'population': 40000},\n {'name': 'Palm Beach', 'population': 60000}]},\n {'state': 'Ohio',\n 'shortname': 'OH',\n 'info': {'governor': 'John Kasich'},\n 'counties': [{'name': 'Summit', 'population': 1234},\n {'name': 'Cuyahoga', 'population': 1337}]}]",
"_____no_output_____"
],
[
"# use normalization to create tables from nested element\njson_normalize(data, 'counties')",
"_____no_output_____"
],
[
"# further populate tables created from nested element\njson_normalize(data, 'counties', ['state', 'shortname', ['info', 'governor']])",
"_____no_output_____"
]
],
[
[
"****\n## JSON example, with file\n\n+ demonstrates reading in a json file as a string and as a table\n+ uses small sample file containing data about projects funded by the World Bank \n+ data source: http://jsonstudio.com/resources/",
"_____no_output_____"
]
],
[
[
"# load json as string\njson.load((open('data/world_bank_projects_less.json')))",
"_____no_output_____"
],
[
"# load as Pandas dataframe\nsample_json_df = pd.read_json('data/world_bank_projects_less.json')\nsample_json_df",
"_____no_output_____"
]
],
[
[
"****\n## JSON exercise\n\nUsing data in file 'data/world_bank_projects.json' and the techniques demonstrated above,\n1. Find the 10 countries with most projects\n2. Find the top 10 major project themes (using column 'mjtheme_namecode')\n3. In 2. above you will notice that some entries have only the code and the name is missing. Create a dataframe with the missing names filled in.",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
]
] |
ec8d5e08de3767bcf6dd406df69a4573a46c5cdb | 27,990 | ipynb | Jupyter Notebook | Gradient_Descent_(Multivariate).ipynb | rahul23aug/PY-machine_learning | 02d644c64290d78b37c9024cde2c332b8232636b | [
"MIT"
] | null | null | null | Gradient_Descent_(Multivariate).ipynb | rahul23aug/PY-machine_learning | 02d644c64290d78b37c9024cde2c332b8232636b | [
"MIT"
] | null | null | null | Gradient_Descent_(Multivariate).ipynb | rahul23aug/PY-machine_learning | 02d644c64290d78b37c9024cde2c332b8232636b | [
"MIT"
] | null | null | null | 59.300847 | 12,910 | 0.669918 | [
[
[
"Multivariate Linear Regression without scaling\n ",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport pandas as pd",
"_____no_output_____"
]
],
[
[
"LOAD THE DATASET",
"_____no_output_____"
]
],
[
[
"dataset=pd.read_csv('./50_Startups.csv')\n",
"_____no_output_____"
]
],
[
[
"DATA WRANGLING :\n\nNo na's in the data but since we have categorical data \"State\" we will encode it to dummy variables, since whe have three labels viz California, New York, Florida\nwe need 3-1=2 dummy variables\nsuch that D0=California and D1=New York",
"_____no_output_____"
]
],
[
[
"dataset['D0']=dataset['State'].replace({\"California\":1,\"Florida\":0,\"New York\":0})\ndataset['D1']=dataset['State'].replace({\"California\":0,\"Florida\":0,\"New York\":1})\ndataset['ones']= np.ones(len(dataset)).reshape(len(dataset),1)",
"_____no_output_____"
]
],
[
[
"View the changes",
"_____no_output_____"
]
],
[
[
"print(dataset)",
" R&D Spend Administration Marketing Spend ... D0 D1 ones\n0 165349.20 136897.80 471784.10 ... 0 1 1.0\n1 162597.70 151377.59 443898.53 ... 1 0 1.0\n2 153441.51 101145.55 407934.54 ... 0 0 1.0\n3 144372.41 118671.85 383199.62 ... 0 1 1.0\n4 142107.34 91391.77 366168.42 ... 0 0 1.0\n5 131876.90 99814.71 362861.36 ... 0 1 1.0\n6 134615.46 147198.87 127716.82 ... 1 0 1.0\n7 130298.13 145530.06 323876.68 ... 0 0 1.0\n8 120542.52 148718.95 311613.29 ... 0 1 1.0\n9 123334.88 108679.17 304981.62 ... 1 0 1.0\n10 101913.08 110594.11 229160.95 ... 0 0 1.0\n11 100671.96 91790.61 249744.55 ... 1 0 1.0\n12 93863.75 127320.38 249839.44 ... 0 0 1.0\n13 91992.39 135495.07 252664.93 ... 1 0 1.0\n14 119943.24 156547.42 256512.92 ... 0 0 1.0\n15 114523.61 122616.84 261776.23 ... 0 1 1.0\n16 78013.11 121597.55 264346.06 ... 1 0 1.0\n17 94657.16 145077.58 282574.31 ... 0 1 1.0\n18 91749.16 114175.79 294919.57 ... 0 0 1.0\n19 86419.70 153514.11 0.00 ... 0 1 1.0\n20 76253.86 113867.30 298664.47 ... 1 0 1.0\n21 78389.47 153773.43 299737.29 ... 0 1 1.0\n22 73994.56 122782.75 303319.26 ... 0 0 1.0\n23 67532.53 105751.03 304768.73 ... 0 0 1.0\n24 77044.01 99281.34 140574.81 ... 0 1 1.0\n25 64664.71 139553.16 137962.62 ... 1 0 1.0\n26 75328.87 144135.98 134050.07 ... 0 0 1.0\n27 72107.60 127864.55 353183.81 ... 0 1 1.0\n28 66051.52 182645.56 118148.20 ... 0 0 1.0\n29 65605.48 153032.06 107138.38 ... 0 1 1.0\n30 61994.48 115641.28 91131.24 ... 0 0 1.0\n31 61136.38 152701.92 88218.23 ... 0 1 1.0\n32 63408.86 129219.61 46085.25 ... 1 0 1.0\n33 55493.95 103057.49 214634.81 ... 0 0 1.0\n34 46426.07 157693.92 210797.67 ... 1 0 1.0\n35 46014.02 85047.44 205517.64 ... 0 1 1.0\n36 28663.76 127056.21 201126.82 ... 0 0 1.0\n37 44069.95 51283.14 197029.42 ... 1 0 1.0\n38 20229.59 65947.93 185265.10 ... 0 1 1.0\n39 38558.51 82982.09 174999.30 ... 1 0 1.0\n40 28754.33 118546.05 172795.67 ... 1 0 1.0\n41 27892.92 84710.77 164470.71 ... 0 0 1.0\n42 23640.93 96189.63 148001.11 ... 1 0 1.0\n43 15505.73 127382.30 35534.17 ... 0 1 1.0\n44 22177.74 154806.14 28334.72 ... 1 0 1.0\n45 1000.23 124153.04 1903.93 ... 0 1 1.0\n46 1315.46 115816.21 297114.46 ... 0 0 1.0\n47 0.00 135426.92 0.00 ... 1 0 1.0\n48 542.05 51743.15 0.00 ... 0 1 1.0\n49 0.00 116983.80 45173.06 ... 1 0 1.0\n\n[50 rows x 8 columns]\n"
]
],
[
[
"Create a new dataframe to work with.\nthe old one can be used for reference if needed",
"_____no_output_____"
]
],
[
[
"df=dataset[['ones','R&D Spend', 'Administration', 'Marketing Spend','D0', 'D1', 'Profit']]\ny_features= df[df.columns[-1]]\nx_features=df[df.columns[:-1]]\n#pass a list of the column names to be extracted from thr original dataframe and the order in which they must appear",
"_____no_output_____"
]
],
[
[
"SHOW THE RESULTS",
"_____no_output_____"
]
],
[
[
"print(df)",
"_____no_output_____"
],
[
"print(y_features)",
"_____no_output_____"
],
[
"print(x_features)",
"_____no_output_____"
]
],
[
[
"Let's see how the dataframe column values fluctuate",
"_____no_output_____"
]
],
[
[
"print(\"Max Features:\\n \")\nprint(df.max(axis=0))\nprint(\"\\n\\nMin Features: \\n\")\nprint(df.min(axis=0))",
"Max Features:\n \nones 1.00\nR&D Spend 165349.20\nAdministration 182645.56\nMarketing Spend 471784.10\nD0 1.00\nD1 1.00\nProfit 192261.83\ndtype: float64\n\n\nMin Features: \n\nones 1.00\nR&D Spend 0.00\nAdministration 51283.14\nMarketing Spend 0.00\nD0 0.00\nD1 0.00\nProfit 14681.40\ndtype: float64\n"
]
],
[
[
"multi-variate linear model is : \n\ny= Theta0 + Theta1*x1 + Theta2*x2 + Theta3*x3 +.....\n\nin vector notation:\nTheta(transpose)*xi (from 1 to n) + Theta0\n\nGradient Descent is given by:\nmin(theta) fot J(Theta0,Theta1,..)\nwhere j= ((yp=y)**2/2m)\n\nNote: Theta values must be updated simultaneously and then plugged in",
"_____no_output_____"
],
[
"Note: The model requires you to set the learning rate \nit is an iterative process (if the rate is too high you will see inf. or nan as MSE: valued if its is too low you will see some value but the graph will be a straight line. A right learning rate & iteration combo produces a decreasing exponent)",
"_____no_output_____"
]
],
[
[
"def fit(x_features,y_responses,alpha=0.000000000001,iter=100):\n theta=np.ones(x_features.shape[1])\n rl=[]\n for i in range(iter):\n yp=(x_features*theta).sum(axis=1)\n rl.append([(yp-y_responses).sum()])\n for column in range(theta.shape[0]):\n difference=(x_features[x_features.columns[column]]*(yp-y_responses)).sum()\n theta[column]= theta[column] - (alpha*difference)/y_responses.shape[0]\n MSE=(((yp-y_responses)**2)/y_responses.shape[0]).sum()\n print(\"MSE: \",MSE)\n return (rl , iter, theta)\ny=fit(x_features,y_responses)",
"MSE: 268737835.4457357\n"
],
[
"import matplotlib.pyplot as plt\nx=np.arange(y[1])\nplt.plot(x,np.array(y[0])**2)\nplt.xlabel(\"ITERATIONS\")\nplt.ylabel(\"RESIDUALS\")\nplt.title(\"Gradient Descent\")\nplt.show()\nprint(\"Theta values are: \", end=\" \")\nprint(y[2])",
"_____no_output_____"
],
[
"# PREDICT USING THE MODEL\ndef predict(x,th):\n return (th.T*x).sum()\nx=[ 1.0,162597.70,151377.59,393898.53,1,1]\nprint(\"%.5f\"%predict(x,y[2]))",
"192747.62948\n"
]
],
[
[
"",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
]
] |
ec8d5e76ba73f00213e2812657d3625d18892b8b | 11,433 | ipynb | Jupyter Notebook | ipynb/wltests/sched-evaluation-full.ipynb | MOVZX/ARM-Lisa | ff5cdc0990aeebf5650e27b4b3318c6d6bbb9226 | [
"Apache-2.0"
] | 1 | 2020-04-29T05:46:27.000Z | 2020-04-29T05:46:27.000Z | ipynb/wltests/sched-evaluation-full.ipynb | MOVZX/ARM-Lisa | ff5cdc0990aeebf5650e27b4b3318c6d6bbb9226 | [
"Apache-2.0"
] | null | null | null | ipynb/wltests/sched-evaluation-full.ipynb | MOVZX/ARM-Lisa | ff5cdc0990aeebf5650e27b4b3318c6d6bbb9226 | [
"Apache-2.0"
] | null | null | null | 24.854348 | 129 | 0.544564 | [
[
[
"# WLTests Results\nAnalyses and visualises results generated by a wltest",
"_____no_output_____"
]
],
[
[
"from conf import LisaLogging\nLisaLogging.setup()",
"_____no_output_____"
],
[
"import logging\nfrom IPython.display import display\n\nfrom wa_results_collector import WaResultsCollector\nimport pandas as pd\n\n%pylab inline",
"_____no_output_____"
]
],
[
[
"## Results analysis and metrics collection",
"_____no_output_____"
]
],
[
[
"collector = WaResultsCollector(\n \n # WLTests results folder:\n base_dir='../../results/wltests/', # Base path of your results folders\n #wa_dirs='(substring_to_match)', # Parse only folder matching this regexp\n \n # Results to collect:\n parse_traces=False, # Enable trace parsing only to get more metrics\n # NOTE: results generation will take more times\n \n # Kernel tree used for the tests\n kernel_repo_path='/path/to/your/linux/sources/tree'\n)",
"_____no_output_____"
]
],
[
[
"## Collected metrics",
"_____no_output_____"
]
],
[
[
"df = collector.results_df\nlogging.info(\"Metrics available for plots and analysis:\")\nfor metric in df.metric.unique().tolist():\n logging.info(\" %s\", metric)",
"_____no_output_____"
]
],
[
[
"# Jankbench",
"_____no_output_____"
],
[
"## Total Frame Duration",
"_____no_output_____"
]
],
[
[
"for test in collector.tests(workload='jankbench'):\n logging.info(\"Results for: %s\", test)\n collector.report(workload='jankbench', metric='frame_total_duration',\n test=\"^{}$\".format(test), sort_on='99%', ascending=True)",
"_____no_output_____"
]
],
[
[
"## Energy",
"_____no_output_____"
]
],
[
[
"for test in collector.tests(workload='jankbench'):\n logging.info(\"Results for: %s\", test)\n collector.report(workload='jankbench', metric='device_total_energy',\n test=\"^{}$\".format(test), sort_on='mean', ascending=True)",
"_____no_output_____"
]
],
[
[
"## Frames Duration CDF",
"_____no_output_____"
]
],
[
[
"for test in collector.tests(workload='jankbench'):\n logging.info(\"Results for: %s\", test)\n collector.plot_cdf(workload='jankbench', metric='frame_total_duration',\n test=\"^{}$\".format(test), threshold=16)",
"_____no_output_____"
]
],
[
[
"# Exoplayer",
"_____no_output_____"
],
[
"## Dropper Frames",
"_____no_output_____"
]
],
[
[
"for test in collector.tests(workload='exoplayer'):\n logging.info(\"Results for: %s\", test)\n collector.report(workload='exoplayer', metric='exoplayer_dropped_frames',\n test=test, sort_on='99%', ascending=True)",
"_____no_output_____"
]
],
[
[
"## Energy",
"_____no_output_____"
]
],
[
[
"for test in collector.tests(workload='exoplayer'):\n logging.info(\"Results for: %s\", test)\n collector.report(workload='exoplayer', metric='device_total_energy',\n test=test, sort_on='mean', ascending=True)",
"_____no_output_____"
]
],
[
[
"# Homescreen",
"_____no_output_____"
]
],
[
[
"for test in collector.tests(workload='homescreen'):\n logging.info(\"Results for: %s\", test)\n collector.report(workload='homescreen', metric='device_total_energy',\n test=test, sort_on='mean', ascending=True)",
"_____no_output_____"
]
],
[
[
"# Geekbench",
"_____no_output_____"
],
[
"## Overall scores",
"_____no_output_____"
]
],
[
[
"for metric in [ 'Single-Core_score', 'Multi-Core_score']:\n collector.report(workload='geekbench', metric=metric,\n sort_on='99%', ascending=False)",
"_____no_output_____"
]
],
[
[
"## Detailed scores",
"_____no_output_____"
]
],
[
[
"# Get Geekbench scores\ndf = collector.results_df\ngb_scores_db = df[df.test == 'geekbench']\n\n# Group scores\ngrouped_df = gb_scores_db.groupby(['test', 'tag', 'kernel', 'metric'])\n\n# Get stats for grouped scores\nstats_df = pd.DataFrame(grouped_df.describe(percentiles=[.95, .99]))\nstats_df = stats_df.reset_index().rename(columns={'level_4': 'stats'})",
"_____no_output_____"
]
],
[
[
"### Single Core Scores",
"_____no_output_____"
]
],
[
[
"single_score_df = stats_df[stats_df.metric.str.match('Single.*')][['metric', 'kernel', 'stats', 'value']]\nsingle_score_df['metric'] = single_score_df.metric.apply(lambda s : s.replace('Single-Core_', '').replace('_score', ''))\nsingle_score_df = single_score_df.set_index(['metric', 'kernel', 'stats']).unstack()\nlogging.info(\"Detailed SINGLE core scores:\")\nsingle_score_df",
"_____no_output_____"
]
],
[
[
"### Multi Core Scores",
"_____no_output_____"
]
],
[
[
"multi_score_df = stats_df[stats_df.metric.str.match('Multi.*')][['metric', 'kernel', 'stats', 'value']]\nmulti_score_df['metric'] = multi_score_df.metric.apply(lambda s : s.replace('Multi-Core_', '').replace('_score', ''))\nmulti_score_df = multi_score_df.set_index(['metric', 'kernel', 'stats']).unstack()\nlogging.info(\"Detailed SINGLE core scores:\")\nmulti_score_df",
"_____no_output_____"
]
],
[
[
"# PCMark Scores",
"_____no_output_____"
],
[
"## Overall Scores",
"_____no_output_____"
]
],
[
[
"pm_df = df[df.workload == 'pcmark']\npm_scores = [m for m in pm_df.metric.unique().tolist() if m.startswith('pcmark_')]\nfor metric in pm_scores:\n collector.report(workload='pcmark', metric=metric,\n sort_on='99%', ascending=False)",
"_____no_output_____"
]
],
[
[
"## Detailed Scores",
"_____no_output_____"
]
],
[
[
"# Get Geekbench scores\ndf = collector.results_df\npm_scores_db = df[df.workload == 'pcmark']\n\n# Group scores\ngrouped_df = pm_scores_db.groupby(['test', 'tag', 'kernel', 'metric'])\n\n# Get stats for grouped scores\nstats_df = pd.DataFrame(grouped_df.describe(percentiles=[.95, .99]))\nstats_df = stats_df.reset_index().rename(columns={'level_4': 'stats'})",
"_____no_output_____"
],
[
"pm_score_df = stats_df[stats_df.metric.str.match('pcmark_.*')][['metric', 'kernel', 'stats', 'value']]\npm_score_df['metric'] = pm_score_df.metric.apply(lambda s : s.replace('pcmark_', ''))\npm_score_df = pm_score_df.set_index(['metric', 'kernel', 'stats']).unstack()\nlogging.info(\"Detailed scores:\")\npm_score_df",
"_____no_output_____"
]
],
[
[
"# Generic comparison plots\n`plot_comparisons` can be used to automatically discover metrics that changed between different kernel versions or tags. ",
"_____no_output_____"
]
],
[
[
"logging.info(\"Here is the list of kernels available:\")\nlogging.info(\" %s\", ', '.join(df['kernel'].unique().tolist() ))",
"_____no_output_____"
],
[
"# Select the baseline kernels for comparisions:\n# by deafult we use the first available:\nkernel_baseline = df['kernel'].iloc[0]\n# Or defined here below one of the above reported kernels as baseline for comparisions\n# kernel_baseline = \"PutHereYourKernelName\"\n\nlogging.info(\"Comparing against baseline kernel: %s\", kernel_baseline)\ncollector.plot_comparisons(base_id=kernel_baseline, by='kernel')",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
ec8d61a7e4d8c46b27bfe3ab26646e4f28352eaf | 2,857 | ipynb | Jupyter Notebook | app.ipynb | caaso-quant/etfs-monitor-public | a39c5001e9c42d7424de3af755aedde6019e7059 | [
"MIT"
] | 2 | 2021-08-05T18:35:44.000Z | 2021-08-19T13:30:13.000Z | app.ipynb | caaso-quant/etfs-monitor-public | a39c5001e9c42d7424de3af755aedde6019e7059 | [
"MIT"
] | null | null | null | app.ipynb | caaso-quant/etfs-monitor-public | a39c5001e9c42d7424de3af755aedde6019e7059 | [
"MIT"
] | null | null | null | 25.972727 | 120 | 0.583829 | [
[
[
"# NDT - ETFs Monitor\n## <font color='#2e86e0'>Numbers Doing Trade - Hackaton Bloomberg 2021</font>\n- - -\n\n## Funcionalidades\n### Seleção de ETFs:\n* Visualização de todos os ETFs listados na bolsa brasileira.\n* Opção de coloração por correlação com drivers macroeconômicos.\n* Opção de coloração por retorno de 1 ano e 1 semana.\n* Agrupamento e filtragem por características intrínsecas dos ETFs.\n\n### Análise e comparação de ETFs:\n* Possibilidade de selecionar múltiplos ETFs.\n* Comparar retornos de ETFs selecionados e respectivos benchmarks.\n* Exibição de gráfico de fronteira eficiente de Markowitz.\n* Comparar volatilidade dos ETFs selecionados.\n* Comparar volume de negociações.\n* Analisar tracking error em relação aos respectivos benchmarks.\n* Visualização de gráfico de área ponderada pela correlação com drivers macroeconômicos.\n* Visualização de holdings dos ETFs e pesos relativos.\n* Listagem de informações adicionais sobre os ETFs selecionados.\n\n- - -",
"_____no_output_____"
]
],
[
[
"%load_ext autoreload\n%autoreload 2\nfrom app import AppView\nimport ipywidgets as widgets\n",
"The autoreload extension is already loaded. To reload it, use:\n %reload_ext autoreload\n"
],
[
"\napp = AppView()\napp.show()",
"_____no_output_____"
]
]
] | [
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code"
]
] |
ec8d662da174bde8e2a48c98ee6b4fc5dc4ff1f9 | 174,329 | ipynb | Jupyter Notebook | 315.ipynb | arnoldasjan/225_turing | 1f976c9ea99cdd23737e74119f762b92931f1f6d | [
"MIT"
] | null | null | null | 315.ipynb | arnoldasjan/225_turing | 1f976c9ea99cdd23737e74119f762b92931f1f6d | [
"MIT"
] | null | null | null | 315.ipynb | arnoldasjan/225_turing | 1f976c9ea99cdd23737e74119f762b92931f1f6d | [
"MIT"
] | null | null | null | 48.532572 | 32,794 | 0.415576 | [
[
[
"# Module 3: Machine Learning\n\n## Sprint 1: Basic Machine Learning\n\n## House prices regression competition",
"_____no_output_____"
],
[
"## Background",
"_____no_output_____"
],
[
"---",
"_____no_output_____"
],
[
"Participating in Kaggle competitions is an efficient way to learn some aspects of Machine Learning. You can read solutions made public by the others, participate in the discussions to talk about solution ideas and test them by submitting them for evaluation.\n\nThe metric used for evaluation can vary from competition to competition, but the idea remains the same - build a model that is as accurate as possible on the testing set. In industry, there are other factors to consider when building machine learning models - inference time, solution complexity, maintainability and so on. However, even though you only learn a subset of the required skills while participating in Kaggle competitions, it is quite a fun way to learn by doing it, so let's participate in one of the tutorial competitions now!",
"_____no_output_____"
],
[
"## The competition\n\nIn the previous notebooks, we learned about some very important ML topics:\n\n- Optimizing an objective\n- Training, validation, testing sets\n- Feature engineering\n- Logistic regression model\n- Linear regression model\n- Random forest model\n- Interpreting the model by looking at the important features\n\nIn this notebook we will practice all the above concepts by participating in another Kaggle competition:\n\n- https://www.kaggle.com/c/house-prices-advanced-regression-techniques/overview\n\nRead the overview and look at the evaluation section (https://www.kaggle.com/c/house-prices-advanced-regression-techniques/overview/evaluation). The target to predict is a continuous variable and hence we will build regression models.\n\nGive extra attention to the evaluation metric - the RMSE is computed between the logarithm of the predicted value and the logarithm of the observed sales price, not the raw prices.\n\nNow look at the data section - inspect the columns that are available. Download the data and start with exploratory data analysis, similar to the one you used in the previous notebooks. Select some features, maybe do some feature engineering and build linear regression and random forest models using sklearn.\n\nFor help, you can look at some of the notebooks by other competitors. However, try to write code by yourself, as even though you will always be able to consult external resources while working as a professional, the main thing right now is to learn by first trying it yourself. These two notebooks could be useful:\n\n- https://www.kaggle.com/pmarcelino/comprehensive-data-exploration-with-python for exploratory data analysis\n- https://www.kaggle.com/marcelopesse/house-prices-machine-learning-with-sklearn using random forests for regression",
"_____no_output_____"
],
[
"---",
"_____no_output_____"
],
[
"## Concepts to explore\n\n- Linear regression https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LinearRegression.html\n- Random forest regressor https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestRegressor.html\n- Root mean squared error https://en.wikipedia.org/wiki/Root-mean-square_deviation",
"_____no_output_____"
],
[
"## Requirements\n\n- Train, validate (using a dedicated validation set) and submit a linear regression model\n- Train, validate (using a dedicated validation set) and submit a random forest regressor model\n- Achieve a score better than 0.14 on the public leaderboard",
"_____no_output_____"
],
[
"## Evaluation criteria\n\n- Public leaderboard score\n- How simple is the model\n- Code quality",
"_____no_output_____"
],
[
"\n## Sample correction questions\n\nDuring a correction, you may get asked questions that test your understanding of covered topics.\n\n- What factors determine whether you should use machine learning for a given problem?\n- What factors determine what kind of machine learning model should be used for a given problem?\n- What are some possible ways to make sure that your model will perform well on previously unseen samples?\n- What factors make a machine learning model useful?",
"_____no_output_____"
]
],
[
[
"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns; sns.set_theme()\nfrom pandas.api.types import CategoricalDtype\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn.model_selection import cross_val_score",
"_____no_output_____"
],
[
"train_data = pd.read_csv('https://raw.githubusercontent.com/TuringCollegeSubmissions/ajanus-DS.3.1/master/train.csv?token=ALINPS3ZE2BT4LIY3P7CWADAV2IOW', index_col=\"Id\")\ntest_data = pd.read_csv('https://raw.githubusercontent.com/TuringCollegeSubmissions/ajanus-DS.3.1/master/test.csv?token=ALINPS5SAPIQ2ZAMYXGQWULAV2IRC', index_col=\"Id\")",
"_____no_output_____"
],
[
"train_data.info()",
"<class 'pandas.core.frame.DataFrame'>\nInt64Index: 1460 entries, 1 to 1460\nData columns (total 80 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 MSSubClass 1460 non-null int64 \n 1 MSZoning 1460 non-null object \n 2 LotFrontage 1201 non-null float64\n 3 LotArea 1460 non-null int64 \n 4 Street 1460 non-null object \n 5 Alley 91 non-null object \n 6 LotShape 1460 non-null object \n 7 LandContour 1460 non-null object \n 8 Utilities 1460 non-null object \n 9 LotConfig 1460 non-null object \n 10 LandSlope 1460 non-null object \n 11 Neighborhood 1460 non-null object \n 12 Condition1 1460 non-null object \n 13 Condition2 1460 non-null object \n 14 BldgType 1460 non-null object \n 15 HouseStyle 1460 non-null object \n 16 OverallQual 1460 non-null int64 \n 17 OverallCond 1460 non-null int64 \n 18 YearBuilt 1460 non-null int64 \n 19 YearRemodAdd 1460 non-null int64 \n 20 RoofStyle 1460 non-null object \n 21 RoofMatl 1460 non-null object \n 22 Exterior1st 1460 non-null object \n 23 Exterior2nd 1460 non-null object \n 24 MasVnrType 1452 non-null object \n 25 MasVnrArea 1452 non-null float64\n 26 ExterQual 1460 non-null object \n 27 ExterCond 1460 non-null object \n 28 Foundation 1460 non-null object \n 29 BsmtQual 1423 non-null object \n 30 BsmtCond 1423 non-null object \n 31 BsmtExposure 1422 non-null object \n 32 BsmtFinType1 1423 non-null object \n 33 BsmtFinSF1 1460 non-null int64 \n 34 BsmtFinType2 1422 non-null object \n 35 BsmtFinSF2 1460 non-null int64 \n 36 BsmtUnfSF 1460 non-null int64 \n 37 TotalBsmtSF 1460 non-null int64 \n 38 Heating 1460 non-null object \n 39 HeatingQC 1460 non-null object \n 40 CentralAir 1460 non-null object \n 41 Electrical 1459 non-null object \n 42 1stFlrSF 1460 non-null int64 \n 43 2ndFlrSF 1460 non-null int64 \n 44 LowQualFinSF 1460 non-null int64 \n 45 GrLivArea 1460 non-null int64 \n 46 BsmtFullBath 1460 non-null int64 \n 47 BsmtHalfBath 1460 non-null int64 \n 48 FullBath 1460 non-null int64 \n 49 HalfBath 1460 non-null int64 \n 50 BedroomAbvGr 1460 non-null int64 \n 51 KitchenAbvGr 1460 non-null int64 \n 52 KitchenQual 1460 non-null object \n 53 TotRmsAbvGrd 1460 non-null int64 \n 54 Functional 1460 non-null object \n 55 Fireplaces 1460 non-null int64 \n 56 FireplaceQu 770 non-null object \n 57 GarageType 1379 non-null object \n 58 GarageYrBlt 1379 non-null float64\n 59 GarageFinish 1379 non-null object \n 60 GarageCars 1460 non-null int64 \n 61 GarageArea 1460 non-null int64 \n 62 GarageQual 1379 non-null object \n 63 GarageCond 1379 non-null object \n 64 PavedDrive 1460 non-null object \n 65 WoodDeckSF 1460 non-null int64 \n 66 OpenPorchSF 1460 non-null int64 \n 67 EnclosedPorch 1460 non-null int64 \n 68 3SsnPorch 1460 non-null int64 \n 69 ScreenPorch 1460 non-null int64 \n 70 PoolArea 1460 non-null int64 \n 71 PoolQC 7 non-null object \n 72 Fence 281 non-null object \n 73 MiscFeature 54 non-null object \n 74 MiscVal 1460 non-null int64 \n 75 MoSold 1460 non-null int64 \n 76 YrSold 1460 non-null int64 \n 77 SaleType 1460 non-null object \n 78 SaleCondition 1460 non-null object \n 79 SalePrice 1460 non-null int64 \ndtypes: float64(3), int64(34), object(43)\nmemory usage: 923.9+ KB\n"
],
[
"test_data.info()",
"<class 'pandas.core.frame.DataFrame'>\nInt64Index: 1459 entries, 1461 to 2919\nData columns (total 79 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 MSSubClass 1459 non-null int64 \n 1 MSZoning 1455 non-null object \n 2 LotFrontage 1232 non-null float64\n 3 LotArea 1459 non-null int64 \n 4 Street 1459 non-null object \n 5 Alley 107 non-null object \n 6 LotShape 1459 non-null object \n 7 LandContour 1459 non-null object \n 8 Utilities 1457 non-null object \n 9 LotConfig 1459 non-null object \n 10 LandSlope 1459 non-null object \n 11 Neighborhood 1459 non-null object \n 12 Condition1 1459 non-null object \n 13 Condition2 1459 non-null object \n 14 BldgType 1459 non-null object \n 15 HouseStyle 1459 non-null object \n 16 OverallQual 1459 non-null int64 \n 17 OverallCond 1459 non-null int64 \n 18 YearBuilt 1459 non-null int64 \n 19 YearRemodAdd 1459 non-null int64 \n 20 RoofStyle 1459 non-null object \n 21 RoofMatl 1459 non-null object \n 22 Exterior1st 1458 non-null object \n 23 Exterior2nd 1458 non-null object \n 24 MasVnrType 1443 non-null object \n 25 MasVnrArea 1444 non-null float64\n 26 ExterQual 1459 non-null object \n 27 ExterCond 1459 non-null object \n 28 Foundation 1459 non-null object \n 29 BsmtQual 1415 non-null object \n 30 BsmtCond 1414 non-null object \n 31 BsmtExposure 1415 non-null object \n 32 BsmtFinType1 1417 non-null object \n 33 BsmtFinSF1 1458 non-null float64\n 34 BsmtFinType2 1417 non-null object \n 35 BsmtFinSF2 1458 non-null float64\n 36 BsmtUnfSF 1458 non-null float64\n 37 TotalBsmtSF 1458 non-null float64\n 38 Heating 1459 non-null object \n 39 HeatingQC 1459 non-null object \n 40 CentralAir 1459 non-null object \n 41 Electrical 1459 non-null object \n 42 1stFlrSF 1459 non-null int64 \n 43 2ndFlrSF 1459 non-null int64 \n 44 LowQualFinSF 1459 non-null int64 \n 45 GrLivArea 1459 non-null int64 \n 46 BsmtFullBath 1457 non-null float64\n 47 BsmtHalfBath 1457 non-null float64\n 48 FullBath 1459 non-null int64 \n 49 HalfBath 1459 non-null int64 \n 50 BedroomAbvGr 1459 non-null int64 \n 51 KitchenAbvGr 1459 non-null int64 \n 52 KitchenQual 1458 non-null object \n 53 TotRmsAbvGrd 1459 non-null int64 \n 54 Functional 1457 non-null object \n 55 Fireplaces 1459 non-null int64 \n 56 FireplaceQu 729 non-null object \n 57 GarageType 1383 non-null object \n 58 GarageYrBlt 1381 non-null float64\n 59 GarageFinish 1381 non-null object \n 60 GarageCars 1458 non-null float64\n 61 GarageArea 1458 non-null float64\n 62 GarageQual 1381 non-null object \n 63 GarageCond 1381 non-null object \n 64 PavedDrive 1459 non-null object \n 65 WoodDeckSF 1459 non-null int64 \n 66 OpenPorchSF 1459 non-null int64 \n 67 EnclosedPorch 1459 non-null int64 \n 68 3SsnPorch 1459 non-null int64 \n 69 ScreenPorch 1459 non-null int64 \n 70 PoolArea 1459 non-null int64 \n 71 PoolQC 3 non-null object \n 72 Fence 290 non-null object \n 73 MiscFeature 51 non-null object \n 74 MiscVal 1459 non-null int64 \n 75 MoSold 1459 non-null int64 \n 76 YrSold 1459 non-null int64 \n 77 SaleType 1458 non-null object \n 78 SaleCondition 1459 non-null object \ndtypes: float64(11), int64(25), object(43)\nmemory usage: 911.9+ KB\n"
],
[
"train_data.describe().T",
"_____no_output_____"
],
[
"test_data.describe().T",
"_____no_output_____"
]
],
[
[
"* We see that we have a lot of missing values. We will need to deal with those. \n* Also, variabale GarageYrBlt has not only missing values, but also some errors since max GarageYrBlt in test data is 2207, so, since YearBuilt variable does not have missing data we could GarageYrBlt equal to the same as the House build year (YearBuilt). \n* What's more, it is interesting to see, that none of the datasets have OverallCondition equal to 10, even though it says so in the data_description file, the highest value for this variable is 9.",
"_____no_output_____"
]
],
[
[
"test_data['GarageYrBlt']",
"_____no_output_____"
],
[
"test_data['GarageYrBlt'] = test_data['GarageYrBlt'].where(test_data['GarageYrBlt'] <= 2010, test_data['YearBuilt'])",
"_____no_output_____"
],
[
"fig, ax = plt.subplots(figsize=(10, 8))\nfig = sns.histplot(train_data['SalePrice'], kde=True);\nfig.set_title(\"SalePrice Distribution Plot\", fontsize=16);",
"_____no_output_____"
]
],
[
[
"As we can see from the chart, house prices distribution is right-skewed.",
"_____no_output_____"
],
[
"Now let's prepare our data for the baseline model creation. We will start off, by checking after categorical variables and from data_description file looking which ones should be ordinal and which ones not. For example, MSSubClass is of type int, even though it is actually a categorical variable.",
"_____no_output_____"
]
],
[
[
"nominal_features = ['MSSubClass', 'MSZoning', 'Street', 'Alley', 'LandContour', 'LotConfig',\n 'Neighborhood', 'Condition1', 'Condition2', 'BldgType', 'HouseStyle',\n 'RoofStyle', 'RoofMatl', 'Exterior1st', 'Exterior2nd', 'MasVnrType', 'Foundation',\n 'Heating', 'CentralAir', 'Electrical', 'GarageType', 'Fence', 'MiscFeature', 'SaleType', 'SaleCondition']\n\nten_levels = list(range(1,10))\nfive_levels = ['Po', 'Fa', 'TA', 'Gd', 'Ex']\n\n\nordinal_features = {\n 'LotShape': ['Reg', 'IR1', 'IR2', 'IR3'],\n 'Utilities': ['ELO', 'NoSeWa', 'NoSewr', 'AllPub'],\n 'LandSlope': ['Gtl', 'Mod', 'Sev'],\n 'OverallQual': ten_levels,\n 'OverallCond': ten_levels,\n 'ExterQual': five_levels,\n 'ExterCond': five_levels,\n 'BsmtQual': five_levels,\n 'BsmtCond': five_levels,\n 'BsmtExposure': ['No', 'Mn', 'Av', 'Gd'],\n 'BsmtFinType1': ['Unf', 'LwQ', 'Rec', 'BLQ', 'ALQ', 'GLQ'],\n 'BsmtFinType2': ['Unf', 'LwQ', 'Rec', 'BLQ', 'ALQ', 'GLQ'],\n 'HeatingQC': five_levels,\n 'KitchenQual': five_levels,\n 'Functional': ['Sal', 'Sev', 'Maj2', 'Maj1', 'Mod', 'Min2', 'Min1', 'Typ'],\n 'FireplaceQu': five_levels,\n 'GarageFinish': ['Unf', 'RFn', 'Fin'],\n 'GarageQual': five_levels,\n 'GarageCond': five_levels,\n 'PavedDrive': ['N', 'P', 'Y'],\n 'PoolQC': five_levels,\n}\n\nordinal_features = {key: ['None'] + value for key, value in\n ordinal_features.items()}\n\ndef encode(df: pd.DataFrame):\n \"Correctly encodes the categorical variables inside dataframe\"\n \n for feature in nominal_features:\n df[feature] = df[feature].astype(\"category\")\n if 'None' not in df[feature].cat.categories:\n df[feature].cat.add_categories('None', inplace=True)\n for feature, levels in ordinal_features.items():\n df[feature] = df[feature].astype(CategoricalDtype(levels, ordered=True))\n return df",
"_____no_output_____"
],
[
"def fill_missing_values(df: pd.DataFrame) -> pd.DataFrame:\n \"Fills in missing values in a dataframe base on column type\"\n\n for feature in df.select_dtypes('number'):\n df[feature] = df[feature].fillna(0)\n for feature in df.select_dtypes('category'):\n df[feature] = df[feature].fillna('None')\n return df",
"_____no_output_____"
],
[
"def process_initial_data(initial_tain_df: pd.DataFrame, initial_test_df: pd.DataFrame) -> (pd.DataFrame, pd.DataFrame):\n df = pd.concat([initial_tain_df, initial_test_df])\n df = encode(df)\n df = fill_missing_values(df)\n df_train = df.loc[initial_tain_df.index, :]\n df_test = df.loc[initial_test_df.index, :]\n return df_train, df_test ",
"_____no_output_____"
],
[
"train_df, test_df = process_initial_data(train_data, test_data)",
"_____no_output_____"
],
[
"def get_score(X: pd.DataFrame, y: pd.Series, model = LinearRegression()):\n for colname in X.select_dtypes([\"category\"]):\n X[colname] = X[colname].cat.codes\n log_y = np.log(y)\n score = cross_val_score(\n model, X, log_y, cv=5, scoring=\"neg_mean_squared_error\",\n )\n score = -1 * score.mean()\n score = np.sqrt(score)\n return score",
"_____no_output_____"
],
[
"X = train_df.copy()\ny = X.pop('SalePrice')",
"_____no_output_____"
],
[
"baseline_lreg_score = get_score(X, y)\nprint('Our Baseline Model Score using Linear Regression model is: ', \"{:.4f}\".format(baseline_lreg_score))",
"Our Baseline Model Score using Linear Regression model is: 0.1523\n"
],
[
"baseline_rforest_score = get_score(X, y, model=RandomForestRegressor(random_state=0))\nprint('Our Baseline Model Score using Random Forest Regressor is: ', \"{:.4f}\".format(baseline_rforest_score))",
"Our Baseline Model Score using Random Forest Regressor is: 0.1397\n"
],
[
"X",
"_____no_output_____"
],
[
"def create_submission_file(test_df: pd.DataFrame, model=RandomForestRegressor(random_state=0)):\n for colname in test_df.select_dtypes([\"category\"]):\n test_df[colname] = test_df[colname].cat.codes\n preds = pd.Series(np.exp(model.predict(test_df)))\n submission_df = pd.DataFrame({'Id': test_df.index, 'SalePrice': preds})\n submission_df.to_csv('submission.csv', index=False)",
"_____no_output_____"
]
],
[
[
"Create the submission file",
"_____no_output_____"
]
],
[
[
"# reg = RandomForestRegressor(random_state=0)\n# reg.fit(X,np.log(y))\n# test_df.pop('SalePrice')\n# create_submission_file(test_df.copy(), model=reg)",
"_____no_output_____"
],
[
"test_df.select_dtypes([\"category\"])",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
ec8d6bc6f037b14c8b5a0cd1c5be9832a351ea62 | 132,272 | ipynb | Jupyter Notebook | examples/tungsten_extxyz/pair_potential_demo.ipynb | sxie22/uf3 | dbd0fafe0c36dde4ce9d0a772f6dc1f50267dd63 | [
"Apache-2.0"
] | 17 | 2021-10-01T14:52:16.000Z | 2022-02-11T13:10:03.000Z | examples/tungsten_extxyz/pair_potential_demo.ipynb | sxie22/uf3 | dbd0fafe0c36dde4ce9d0a772f6dc1f50267dd63 | [
"Apache-2.0"
] | 3 | 2021-10-06T13:39:28.000Z | 2021-12-03T16:39:03.000Z | examples/tungsten_extxyz/pair_potential_demo.ipynb | sxie22/uf3 | dbd0fafe0c36dde4ce9d0a772f6dc1f50267dd63 | [
"Apache-2.0"
] | 4 | 2022-01-27T08:49:00.000Z | 2022-03-22T11:55:35.000Z | 88.892473 | 27,960 | 0.808334 | [
[
[
"import os\nfrom concurrent.futures import ProcessPoolExecutor",
"_____no_output_____"
],
[
"import numpy as np\nimport matplotlib.pyplot as plt",
"_____no_output_____"
],
[
"from uf3.data import io\nfrom uf3.data import geometry\nfrom uf3.data import composition\nfrom uf3.representation import bspline\nfrom uf3.representation import distances\nfrom uf3.representation import process\nfrom uf3.regression import least_squares\nfrom uf3.forcefield import calculator\nfrom uf3.forcefield import lammps\nfrom uf3.util import parallel\nfrom uf3.util import plotting",
"_____no_output_____"
]
],
[
[
"# $\\text{UF}_{2}$ Demo: Elemental tungsten",
"_____no_output_____"
],
[
"**Data split**\n- Training set: 1939 configurations (stratified 20% of the dataset)\n\n- Holdout: 7754 configurations (remaining 80%)",
"_____no_output_____"
],
[
"**Inputs**\n- ```w-14.xyz``` (30 mb)\n- ```training_idx.txt``` (10 kb, included for reproducibility purposes)",
"_____no_output_____"
],
[
"**Outputs**\n- ```model_pair.json``` (1 kb)\n- ```W_pair.table``` (32 kb)",
"_____no_output_____"
]
],
[
[
"%%html\n<style>\n table {margin-left: 0 !important;}\n</style>",
"_____no_output_____"
]
],
[
[
" Step | Estimated Time \n:-----------------|:--------------\nPreprocessing | 10 seconds\nPair distribution | 10 seconds\nFeaturization | 5 core-minutes (parallelizable)\nTraining | 1 second\nPrediction | 1 second\nPlotting | 9 seconds",
"_____no_output_____"
],
[
"# User Parameters\n\n```element_list (list)```: list of element symbols\n\n```degree (int)```: truncation of many-body expansion. A value of 2 yields a pair potential.",
"_____no_output_____"
]
],
[
[
"element_list = ['W']\ndegree = 2",
"_____no_output_____"
]
],
[
[
"Initialize the ```ChemicalSystem``` and inspect interactions.\n\nElements involved in each interactions are sorted by electronegativity.",
"_____no_output_____"
]
],
[
[
"chemical_system = composition.ChemicalSystem(element_list=element_list,\n degree=degree)\nprint(\"Pairs:\", chemical_system.interactions_map[2])",
"Pairs: [('W', 'W')]\n"
]
],
[
[
"```r_min_map (dict)```: map of minimum pair distance per interaction (angstroms). \n If unspecified, defaults to 1.0 for all interactions.\n \n```r_max_map (dict)```: map of maximum pair distance per interaction (angstroms). \n If unspecified, defaults to 6.0 angstroms for all interactions, which probably encompasses at least 2nd-nearest neighbors.\n \n```resolution_map (dict)```: map of resolution (number of knot intervals) per interaction. \n For the cubic basis, the number of basis functions equals three more than the number of knot intervals.\n This is, in turn, negated by ```trailing_trim```.\n If unspecified, defaults to 20 for all two-body interactions and 5 for three-body interactions.\n \n```trailing_trim (int)```: number of trailing basis functions to trim, defaults to 3.\n - ```= 0```: hard cutoff at ```r_max```\n - ```= 1```: function goes to zero at ```r_max```\n - ```= 2```: first derivative goes to zero at ```r_max```\n - ```= 3```: second derivative goes to zero at ```r_max```",
"_____no_output_____"
]
],
[
[
"r_min_map = {('W', 'W'): 1.5,\n }\nr_max_map = {('W', 'W'): 5.5,\n }\nresolution_map = {('W', 'W'): 25,\n }\ntrailing_trim = 3",
"_____no_output_____"
]
],
[
[
"# Demo parameters\n```n_cores```: number of workers to use in parallel for feature generation\n\n```data_filename```: filename of reference data including geometries, energies, forces, ...\n\n```training_1939```: list of integers corresponding to entries to use for training.",
"_____no_output_____"
]
],
[
[
"n_cores = 4",
"_____no_output_____"
],
[
"example_directory = os.getcwd()\ndata_filename = os.path.join(example_directory, \"w-14.xyz\")\nwith open(os.path.join(example_directory, \"training_idx.txt\"), \"r\") as f:\n training_1939 = [int(idx) for idx in f.read().splitlines()]",
"_____no_output_____"
]
],
[
[
"# Initialize basis",
"_____no_output_____"
]
],
[
[
"bspline_config = bspline.BSplineBasis(chemical_system,\n r_min_map=r_min_map,\n r_max_map=r_max_map,\n resolution_map=resolution_map,\n trailing_trim=trailing_trim)",
"_____no_output_____"
]
],
[
[
"```bspline_config.get_interaction_partitions()``` yields the number of coefficients for each n-body interaction (one-body terms, two-body terms, three-body terms, ...) as well as the starting index in the coefficient vector for each interaction.",
"_____no_output_____"
]
],
[
[
"bspline_config.get_interaction_partitions()[0]",
"_____no_output_____"
],
[
"bspline_config.get_interaction_partitions()[1]",
"_____no_output_____"
]
],
[
[
"# Load data",
"_____no_output_____"
]
],
[
[
"data_coordinator = io.DataCoordinator()\ndata_coordinator.dataframe_from_trajectory(data_filename,\n prefix='dft')\ndf_data = data_coordinator.consolidate()\nprint(\"Number of energies:\", len(df_data))\nprint(\"Number of forces:\", int(np.sum(df_data[\"size\"]) * 3))",
"Number of energies: 9693\nNumber of forces: 475578\n"
],
[
"df_data.head()",
"_____no_output_____"
]
],
[
[
"Useful step that serves as a sanity check for selected cutoffs and resolution.",
"_____no_output_____"
],
[
"# Examine pair distance distribution",
"_____no_output_____"
]
],
[
[
"atoms_key = data_coordinator.atoms_key\nhistogram_slice = np.random.choice(np.arange(len(df_data)),\n min(1000, len(df_data)),\n replace=False)\ndf_slice = df_data[atoms_key].iloc[histogram_slice]\nhistograms = distances.summarize_distances(df_slice,\n chemical_system,\n min_peak_width=0.2)",
"_____no_output_____"
],
[
"bar_width = histograms[1][1] - histograms[1][0]\npairs = chemical_system.interactions_map[2]\nfig, ax = plt.subplots(1, len(pairs), figsize=(len(pairs)*3, 3), dpi=100)\nif not isinstance(ax, (list, np.ndarray)):\n ax = [ax]\nfor i, pair in enumerate(pairs):\n ax[i].bar(histograms[1][:-1],\n histograms[0][pair],\n width=bar_width)\n ax[i].set_title(\" - \".join(pair))\n ax[i].plot([0, 10], [1, 1], linestyle='--', color='k')\n ax[i].set_xlim(0, 10)\n ax[i].set_ylim(0, 4)\n ax[i].set_xlabel(\"Pair distance (angstroms)\")\n ax[i].set_ylabel(\"Normalized Frequency\")\nfig.tight_layout()",
"_____no_output_____"
]
],
[
[
"# Compute energy and force features",
"_____no_output_____"
]
],
[
[
"representation = process.BasisFeaturizer(chemical_system,\n bspline_config)",
"_____no_output_____"
],
[
"client = ProcessPoolExecutor(max_workers=n_cores)",
"_____no_output_____"
],
[
"n_batches = n_cores * 16 # added granularity for more progress bar updates\ndf_features = representation.evaluate_parallel(df_data,\n client,\n energy_key=data_coordinator.energy_key,\n n_jobs=n_batches)",
"_____no_output_____"
],
[
"df_features.head()",
"_____no_output_____"
]
],
[
[
"# Fit model",
"_____no_output_____"
]
],
[
[
"regularizer = bspline_config.get_regularization_matrix(ridge_1b=1e-6,\n curvature_2b=1e-8)\n\nmodel = least_squares.WeightedLinearModel(bspline_config,\n regularizer=regularizer)",
"_____no_output_____"
]
],
[
[
"Train with 20% of the dataset (1939 samples)",
"_____no_output_____"
]
],
[
[
"training_keys = df_data.index[training_1939]\ndf_slice = df_features.loc[training_keys]\nn_elements = len(chemical_system.element_list)\nx_e, y_e, x_f, y_f = least_squares.dataframe_to_tuples(df_slice,\n n_elements=n_elements,\n energy_key=\"energy\")",
"_____no_output_____"
]
],
[
[
"# Fit with energies and force",
"_____no_output_____"
]
],
[
[
"model.fit(x_e, y_e, x_f, y_f, weight=0.5)\nsolutions = least_squares.arrange_coefficients(model.coefficients, \n bspline_config)\ncoefficients = solutions[(\"W\", \"W\")]\nknot_sequence = bspline_config.knots_map[(\"W\", \"W\")]\nfig, ax = plotting.visualize_splines(coefficients, knot_sequence)",
"_____no_output_____"
]
],
[
[
"# Prediction",
"_____no_output_____"
]
],
[
[
"# predict with remaining 80% of dataset\nholdout_keys = df_data.index.difference(training_keys)",
"_____no_output_____"
],
[
"df_holdout = df_features.loc[holdout_keys]\nx_e, y_e, x_f, y_f = least_squares.dataframe_to_tuples(df_holdout,\n n_elements=n_elements,\n energy_key=\"energy\")",
"_____no_output_____"
],
[
"p_e = model.predict(x_e)",
"_____no_output_____"
],
[
"p_f = model.predict(x_f)",
"_____no_output_____"
],
[
"plotting.density_scatter(y_e, p_e)\nplt.tight_layout()",
"_____no_output_____"
],
[
"plotting.density_scatter(y_f, p_f)\nplt.tight_layout()",
"_____no_output_____"
],
[
"solutions = least_squares.arrange_coefficients(model.coefficients, \n bspline_config)",
"_____no_output_____"
],
[
"for interaction, values in solutions.items():\n print(interaction, \":\", values)",
"W : -7.3598176211137485\n('W', 'W') : [ 6.05292109e+00 5.76172773e+00 5.06058534e+00 3.69113116e+00\n 1.65205853e+00 9.23859149e-01 2.81258201e-01 -5.36413556e-03\n -1.65669972e-01 -2.39954884e-01 -2.77954263e-01 -2.80998799e-01\n -2.49653066e-01 -1.77409590e-01 -1.00099852e-01 -6.82599769e-02\n -2.00034440e-02 -3.28071234e-02 -3.43219435e-02 -3.43618259e-02\n -2.88400639e-02 -2.12175439e-02 -6.90983502e-04 -9.25139031e-03\n -8.08694792e-03 0.00000000e+00 0.00000000e+00 0.00000000e+00]\n"
]
],
[
[
"# Export potential and tabulated potential",
"_____no_output_____"
]
],
[
[
"model.save(\"model_pair.json\")",
"_____no_output_____"
],
[
"table_list = []\npair_list = chemical_system.interactions_map[2]\nfor pair in pair_list:\n text = lammps.export_tabulated_potential(representation.knots_map[pair],\n solutions[pair],\n pair,\n grid=1000,\n filename=None)\n table_list.append(text)\ncombined_text = \"\\n\\n\\n\".join(table_list)\ntable_name = os.path.join(example_directory, \"table_test\", \"W.table\")\nwith open(table_name, \"w\") as f:\n f.write(combined_text)",
"_____no_output_____"
]
],
[
[
"# Compute energy, forces, stress with UFCalculator",
"_____no_output_____"
]
],
[
[
"calc = calculator.UFCalculator(bspline_config, model)\n\ngeom = df_data.iloc[3000]['geometry'].copy() # 12-atom cell\ngeom.set_calculator(calc)\nprint(\"Energy:\", geom.get_potential_energy())\nprint(\"Stresses (numerical):\", geom.get_stress())\nprint(\"Forces:\\n\", geom.get_forces())\nprint(\"Max force:\", np.max(np.abs(geom.get_forces())))",
"Energy: [-129.90850437]\nStresses (numerical): [-0.07157686 -0.05615776 -0.03585433 0.00072185 -0.01699741 -0.03629823]\nForces:\n [[ 0.36706937 -7.28026397 0.58103694]\n [ 1.10934562 0.51625161 -0.62697603]\n [-1.15703167 -1.59575223 -0.37736722]\n [-1.83079162 5.43204632 -0.44976142]\n [ 0.57471404 2.30500838 1.01534022]\n [ 1.86237115 -2.25965517 -0.7613154 ]\n [ 2.60550322 1.55873367 -0.90440522]\n [-1.66154524 -0.61688415 1.17963061]\n [-2.14010085 -0.27336131 0.10013569]\n [ 0.09055564 5.6471755 0.11915614]\n [ 1.97460374 -3.39727663 -0.23972051]\n [-1.79469339 -0.03602202 0.3642462 ]]\nMax force: 7.280263968421663\n"
]
],
[
[
"# Compare with UFLammps calculator",
"_____no_output_____"
]
],
[
[
"calc = lammps.UFLammps(lmpcmds=[\"pair_style table spline 1000\",\n \"pair_coeff * * table_test/W.table UF_W-W 5.5\"],\n keep_alive=True)\ngeom = df_data.iloc[3000]['geometry'].copy() # 12-atom cell\ngeom.set_calculator(calc)\nprint(\"Energy:\", geom.get_potential_energy())\nprint(\"Stresses (numerical):\", geom.get_stress())\nprint(\"Forces:\\n\", geom.get_forces())\nprint(\"Max force:\", np.max(np.abs(geom.get_forces())))\ndel calc",
"Energy: -41.59071629828812\nStresses (numerical): [-0.07157678 -0.05615798 -0.03585438 0.0007219 -0.01699733 -0.0362982 ]\nForces:\n [[-0.36707085 7.28027149 -0.58103072]\n [-1.10934726 -0.51625373 0.62697386]\n [ 1.15703129 1.59575361 0.37736717]\n [ 1.830793 -5.4320507 0.44975813]\n [-0.57471276 -2.30501251 -1.01534688]\n [-1.86236869 2.25965632 0.76131392]\n [-2.60550385 -1.55873187 0.90440782]\n [ 1.66154317 0.61688455 -1.17962671]\n [ 2.14010237 0.27336506 -0.10013918]\n [-0.0905544 -5.64717482 -0.11915727]\n [-1.97460007 3.39727246 0.23972502]\n [ 1.79468806 0.03602015 -0.36424515]]\nMax force: 7.280271486255797\n"
]
],
[
[
"# Relax with UFLammps calculator",
"_____no_output_____"
]
],
[
[
"calc = lammps.UFLammps(lmpcmds=[\"pair_style table spline 1000\",\n \"pair_coeff * * table_test/W.table UF_W-W 5.5\"],\n keep_alive=True)\ncalc.relax(geom)\nprint('Relaxation Steps:', calc.results['nsteps'])\ngeom.calc = calc\nprint(\"Energy:\", geom.get_potential_energy())\nprint(\"Stresses (numerical):\", geom.get_stress())\nlammps_forces = geom.get_forces()\nprint(\"Forces:\\n\", lammps_forces)\nprint(\"Max force:\", np.max(lammps_forces))\ndel calc",
"Relaxation Steps: 93\nEnergy: -44.93448967579233\nStresses (numerical): [-0.02876663 -0.00625577 0.03505017 0.00253287 -0.01604888 -0.03666763]\nForces:\n [[-6.13670874e-04 -5.59840158e-04 -1.20723586e-04]\n [ 2.10592823e-03 -2.35684886e-04 -1.86147363e-03]\n [ 2.02430711e-03 9.13033147e-04 -4.07057512e-05]\n [-2.62411174e-03 2.23662760e-04 9.79704229e-04]\n [-1.35599262e-03 7.35322985e-05 8.57665172e-04]\n [ 1.69965025e-03 -2.32461582e-04 -1.11735158e-03]\n [ 2.73562177e-03 -4.59016302e-04 -1.12265584e-03]\n [-2.76851036e-03 7.63081639e-04 1.39608060e-03]\n [-6.43108653e-04 4.58048485e-05 8.62789598e-04]\n [ 7.53828171e-04 -8.23463358e-04 3.07237588e-04]\n [-2.56944163e-04 -2.46499874e-04 -3.66001987e-04]\n [-1.05699711e-03 5.37851469e-04 2.25435193e-04]]\nMax force: 0.0027356217678128417\n"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
ec8d8bbda73cc1e59068492567f2436972832827 | 51,590 | ipynb | Jupyter Notebook | 4.3_pandas_selecting.ipynb | butayama/Learning-Python-for-Data-Science | 43f5e8e0b6f767f46687a4eb33d7530925dd9727 | [
"MIT"
] | null | null | null | 4.3_pandas_selecting.ipynb | butayama/Learning-Python-for-Data-Science | 43f5e8e0b6f767f46687a4eb33d7530925dd9727 | [
"MIT"
] | null | null | null | 4.3_pandas_selecting.ipynb | butayama/Learning-Python-for-Data-Science | 43f5e8e0b6f767f46687a4eb33d7530925dd9727 | [
"MIT"
] | null | null | null | 25.426318 | 65 | 0.319926 | [
[
[
"import pandas as pd",
"_____no_output_____"
],
[
"import seaborn as sns",
"_____no_output_____"
],
[
"df=sns.load_dataset('tips')\ndf.head()",
"_____no_output_____"
],
[
"df['tip'].head()",
"_____no_output_____"
],
[
"df[['tip']].head()",
"_____no_output_____"
],
[
"df[['tip', 'sex']].head()",
"_____no_output_____"
],
[
"df[2:4]",
"_____no_output_____"
]
],
[
[
"# .loc",
"_____no_output_____"
]
],
[
[
"df.loc[3]",
"_____no_output_____"
],
[
"df.loc[[3]]",
"_____no_output_____"
],
[
"df.loc[[3, 7]]",
"_____no_output_____"
],
[
"df.loc[3:5]",
"_____no_output_____"
],
[
"df.loc[:, 'tip'].head()",
"_____no_output_____"
],
[
"df.loc[:, ['tip']].head()",
"_____no_output_____"
],
[
"df.loc[:, ['tip', 'smoker']].head()",
"_____no_output_____"
],
[
"df2 = df.set_index('sex', inplace=False)\ndf2.head()",
"_____no_output_____"
],
[
"df2.loc['Female'].head()",
"_____no_output_____"
],
[
"df.head()",
"_____no_output_____"
],
[
"df.loc[3, 'time']",
"_____no_output_____"
],
[
"df.loc[3, ['time', 'size']]",
"_____no_output_____"
],
[
"df.loc[3:5, ['time', 'size']]",
"_____no_output_____"
],
[
"df.loc[[3], ['time', 'size']]",
"_____no_output_____"
]
],
[
[
"# .iloc",
"_____no_output_____"
]
],
[
[
"df.head()",
"_____no_output_____"
],
[
"df.iloc[3]",
"_____no_output_____"
],
[
"df.iloc[[3]]",
"_____no_output_____"
],
[
"df.iloc[1:3]",
"_____no_output_____"
],
[
"df.iloc[:, 4].head()",
"_____no_output_____"
],
[
"df.iloc[:, [4]].head()",
"_____no_output_____"
],
[
"df.iloc[1:3, [4]].head()",
"_____no_output_____"
],
[
"df.iloc[1:3, [4, 0]].head()",
"_____no_output_____"
],
[
"df2.head()",
"_____no_output_____"
],
[
"df2.iloc[1:3]",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
ec8d90e0a56ebb24dea0a5e9bebb79be465a582e | 207,390 | ipynb | Jupyter Notebook | memefly-de/notebooks/dumping_base_memes_to_s3.ipynb | Nburkhal/memefly-ds | 359853ba79f2ce8e8145fc04d2a33ab58a7faf00 | [
"MIT"
] | 6 | 2019-11-06T21:56:26.000Z | 2020-01-17T06:26:47.000Z | memefly-de/notebooks/dumping_base_memes_to_s3.ipynb | Nburkhal/memefly-ds | 359853ba79f2ce8e8145fc04d2a33ab58a7faf00 | [
"MIT"
] | 12 | 2020-03-24T18:29:09.000Z | 2021-09-08T01:34:54.000Z | memefly-de/notebooks/dumping_base_memes_to_s3.ipynb | BloomTech-Labs/memefly-ds | 82c7b556627b992b6fcbc5f4b8ccfafb171d0aac | [
"MIT"
] | 3 | 2019-11-06T21:54:25.000Z | 2020-03-06T04:17:31.000Z | 75.855889 | 163 | 0.665982 | [
[
[
"import boto3\nimport json\nfrom decouple import config\nimport os\nimport requests\nimport logging\nimport time",
"_____no_output_____"
]
],
[
[
"## Load `base_memes.json` file",
"_____no_output_____"
]
],
[
[
"DATA_DIR = '../data'\nBASE_MEME_IMGFLIP_LINKS = 'base_memes_from_imgflip.json'\n\nwith open(os.path.join(DATA_DIR, BASE_MEME_IMGFLIP_LINKS), 'r') as robj:\n base_memes = json.load(robj)\nbase_memes",
"_____no_output_____"
]
],
[
[
"## Dump to S3 `memefly-datalake` `raw-images` folder",
"_____no_output_____"
]
],
[
[
"BASE_MEMES_S3_LINKS = 'base_memes_s3_links.json'\nBUCKET_NAME = 'memefly'\nS3_IMAGE_FOLDER = 'base-meme-images' \nREGION = 'us-east-2'\nS3_BASE_URL = f'https://{BUCKET_NAME}.s3.{REGION}.amazonaws.com'\nprint(f'''\nBUCKET_NAME = {BUCKET_NAME}\nS3_IMAGE_FOLDER = {S3_IMAGE_FOLDER}\nREGION = {REGION }\nS3_BASE_URL = {S3_BASE_URL}\n''')",
"\nBUCKET_NAME = memefly\nS3_IMAGE_FOLDER = base-meme-images\nREGION = us-east-2\nS3_BASE_URL = https://memefly.s3.us-east-2.amazonaws.com\n\n"
],
[
"def dump_imgflip_basememes_to_s3():\n headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/537.36 (KHTML, like Gecko) Cafari/537.36'} \n\n s3 = boto3.client(\n 's3',\n aws_access_key_id=config('AWS_ACCESS_KEY'),\n aws_secret_access_key=config('AWS_SECRET_ACCESS_KEY')\n )\n \n for k, v in base_memes.items():\n print(f'Requesting base meme pic: {v}')\n pic = requests.get(v, headers=headers)\n pic_filename = v.split('/')[-1]\n pic_filename = pic_filename.lower()\n folder_path = '/'.join([S3_IMAGE_FOLDER, pic_filename])\n print(f'Saving to S3 folder path: {folder_path}')\n s3.put_object(Body = pic.content, Bucket = BUCKET_NAME, Key = folder_path)\n\ndump_imgflip_basememes_to_s3()",
"Requesting base meme pic: https://imgflip.com/s/meme/Woman-Yelling-At-Cat.jpg\nSaving to S3 folder path: base-meme-images/woman-yelling-at-cat.jpg\nRequesting base meme pic: https://imgflip.com/s/meme/Distracted-Boyfriend.jpg\nSaving to S3 folder path: base-meme-images/distracted-boyfriend.jpg\nRequesting base meme pic: https://imgflip.com/s/meme/Drake-Hotline-Bling.jpg\nSaving to S3 folder path: base-meme-images/drake-hotline-bling.jpg\nRequesting base meme pic: https://imgflip.com/s/meme/Two-Buttons.jpg\nSaving to S3 folder path: base-meme-images/two-buttons.jpg\nRequesting base meme pic: https://imgflip.com/s/meme/Mocking-Spongebob.jpg\nSaving to S3 folder path: base-meme-images/mocking-spongebob.jpg\nRequesting base meme pic: https://imgflip.com/s/meme/Change-My-Mind.jpg\nSaving to S3 folder path: base-meme-images/change-my-mind.jpg\nRequesting base meme pic: https://imgflip.com/s/meme/Left-Exit-12-Off-Ramp.jpg\nSaving to S3 folder path: base-meme-images/left-exit-12-off-ramp.jpg\nRequesting base meme pic: https://imgflip.com/s/meme/Expanding-Brain.jpg\nSaving to S3 folder path: base-meme-images/expanding-brain.jpg\nRequesting base meme pic: https://imgflip.com/s/meme/Batman-Slapping-Robin.jpg\nSaving to S3 folder path: base-meme-images/batman-slapping-robin.jpg\nRequesting base meme pic: https://imgflip.com/s/meme/Blank-Nut-Button.jpg\nSaving to S3 folder path: base-meme-images/blank-nut-button.jpg\nRequesting base meme pic: https://imgflip.com/s/meme/Surprised-Pikachu.jpg\nSaving to S3 folder path: base-meme-images/surprised-pikachu.jpg\nRequesting base meme pic: https://imgflip.com/s/meme/Spongebob-Ight-Imma-Head-Out.jpg\nSaving to S3 folder path: base-meme-images/spongebob-ight-imma-head-out.jpg\nRequesting base meme pic: https://imgflip.com/s/meme/Running-Away-Balloon.jpg\nSaving to S3 folder path: base-meme-images/running-away-balloon.jpg\nRequesting base meme pic: https://imgflip.com/s/meme/Boardroom-Meeting-Suggestion.jpg\nSaving to S3 folder path: base-meme-images/boardroom-meeting-suggestion.jpg\nRequesting base meme pic: https://imgflip.com/s/meme/Waiting-Skeleton.jpg\nSaving to S3 folder path: base-meme-images/waiting-skeleton.jpg\nRequesting base meme pic: https://imgflip.com/s/meme/Roll-Safe-Think-About-It.jpg\nSaving to S3 folder path: base-meme-images/roll-safe-think-about-it.jpg\nRequesting base meme pic: https://imgflip.com/s/meme/Tuxedo-Winnie-The-Pooh.jpg\nSaving to S3 folder path: base-meme-images/tuxedo-winnie-the-pooh.jpg\nRequesting base meme pic: https://imgflip.com/s/meme/American-Chopper-Argument.jpg\nSaving to S3 folder path: base-meme-images/american-chopper-argument.jpg\nRequesting base meme pic: https://imgflip.com/s/meme/Is-This-A-Pigeon.jpg\nSaving to S3 folder path: base-meme-images/is-this-a-pigeon.jpg\nRequesting base meme pic: https://imgflip.com/s/meme/Inhaling-Seagull.jpg\nSaving to S3 folder path: base-meme-images/inhaling-seagull.jpg\nRequesting base meme pic: https://imgflip.com/s/meme/X-X-Everywhere.jpg\nSaving to S3 folder path: base-meme-images/x-x-everywhere.jpg\nRequesting base meme pic: https://imgflip.com/s/meme/Disaster-Girl.jpg\nSaving to S3 folder path: base-meme-images/disaster-girl.jpg\nRequesting base meme pic: https://imgflip.com/s/meme/One-Does-Not-Simply.jpg\nSaving to S3 folder path: base-meme-images/one-does-not-simply.jpg\nRequesting base meme pic: https://imgflip.com/s/meme/The-Scroll-Of-Truth.jpg\nSaving to S3 folder path: base-meme-images/the-scroll-of-truth.jpg\nRequesting base meme pic: https://imgflip.com/s/meme/Marked-Safe-From.jpg\nSaving to S3 folder path: base-meme-images/marked-safe-from.jpg\nRequesting base meme pic: https://imgflip.com/s/meme/Ancient-Aliens.jpg\nSaving to S3 folder path: base-meme-images/ancient-aliens.jpg\nRequesting base meme pic: https://imgflip.com/s/meme/Hide-the-Pain-Harold.jpg\nSaving to S3 folder path: base-meme-images/hide-the-pain-harold.jpg\nRequesting base meme pic: https://imgflip.com/s/meme/Yall-Got-Any-More-Of-That.jpg\nSaving to S3 folder path: base-meme-images/yall-got-any-more-of-that.jpg\nRequesting base meme pic: https://imgflip.com/s/meme/Unsettled-Tom.jpg\nSaving to S3 folder path: base-meme-images/unsettled-tom.jpg\nRequesting base meme pic: https://imgflip.com/s/meme/Trump-Bill-Signing.jpg\nSaving to S3 folder path: base-meme-images/trump-bill-signing.jpg\nRequesting base meme pic: https://imgflip.com/s/meme/The-Rock-Driving.jpg\nSaving to S3 folder path: base-meme-images/the-rock-driving.jpg\nRequesting base meme pic: https://imgflip.com/s/meme/Oprah-You-Get-A.jpg\nSaving to S3 folder path: base-meme-images/oprah-you-get-a.jpg\nRequesting base meme pic: https://imgflip.com/s/meme/Futurama-Fry.jpg\nSaving to S3 folder path: base-meme-images/futurama-fry.jpg\nRequesting base meme pic: https://imgflip.com/s/meme/Who-Killed-Hannibal.jpg\nSaving to S3 folder path: base-meme-images/who-killed-hannibal.jpg\nRequesting base meme pic: https://imgflip.com/s/meme/The-Most-Interesting-Man-In-The-World.jpg\nSaving to S3 folder path: base-meme-images/the-most-interesting-man-in-the-world.jpg\nRequesting base meme pic: https://imgflip.com/s/meme/Third-World-Skeptical-Kid.jpg\nSaving to S3 folder path: base-meme-images/third-world-skeptical-kid.jpg\nRequesting base meme pic: https://imgflip.com/s/meme/Finding-Neverland.jpg\nSaving to S3 folder path: base-meme-images/finding-neverland.jpg\nRequesting base meme pic: https://imgflip.com/s/meme/Hard-To-Swallow-Pills.jpg\nSaving to S3 folder path: base-meme-images/hard-to-swallow-pills.jpg\nRequesting base meme pic: https://imgflip.com/s/meme/Evil-Kermit.jpg\nSaving to S3 folder path: base-meme-images/evil-kermit.jpg\nRequesting base meme pic: https://imgflip.com/s/meme/First-World-Problems.jpg\nSaving to S3 folder path: base-meme-images/first-world-problems.jpg\nRequesting base meme pic: https://imgflip.com/s/meme/That-Would-Be-Great.jpg\nSaving to S3 folder path: base-meme-images/that-would-be-great.jpg\nRequesting base meme pic: https://imgflip.com/s/meme/Dont-You-Squidward.jpg\nSaving to S3 folder path: base-meme-images/dont-you-squidward.jpg\nRequesting base meme pic: https://imgflip.com/s/meme/Black-Girl-Wat.jpg\nSaving to S3 folder path: base-meme-images/black-girl-wat.jpg\nRequesting base meme pic: https://imgflip.com/s/meme/Grandma-Finds-The-Internet.jpg\nSaving to S3 folder path: base-meme-images/grandma-finds-the-internet.jpg\nRequesting base meme pic: https://imgflip.com/s/meme/Me-And-The-Boys.jpg\nSaving to S3 folder path: base-meme-images/me-and-the-boys.jpg\nRequesting base meme pic: https://imgflip.com/s/meme/Brace-Yourselves-X-is-Coming.jpg\nSaving to S3 folder path: base-meme-images/brace-yourselves-x-is-coming.jpg\nRequesting base meme pic: https://imgflip.com/s/meme/Who-Would-Win.jpg\nSaving to S3 folder path: base-meme-images/who-would-win.jpg\nRequesting base meme pic: https://imgflip.com/s/meme/Leonardo-Dicaprio-Cheers.jpg\nSaving to S3 folder path: base-meme-images/leonardo-dicaprio-cheers.jpg\nRequesting base meme pic: https://imgflip.com/s/meme/Y-U-No.jpg\nSaving to S3 folder path: base-meme-images/y-u-no.jpg\nRequesting base meme pic: https://imgflip.com/s/meme/But-Thats-None-Of-My-Business.jpg\nSaving to S3 folder path: base-meme-images/but-thats-none-of-my-business.jpg\nRequesting base meme pic: https://imgflip.com/s/meme/Star-Wars-Yoda.jpg\nSaving to S3 folder path: base-meme-images/star-wars-yoda.jpg\nRequesting base meme pic: https://imgflip.com/s/meme/Laughing-Men-In-Suits.jpg\nSaving to S3 folder path: base-meme-images/laughing-men-in-suits.jpg\nRequesting base meme pic: https://imgflip.com/s/meme/Third-World-Success-Kid.jpg\nSaving to S3 folder path: base-meme-images/third-world-success-kid.jpg\nRequesting base meme pic: https://imgflip.com/s/meme/Bad-Luck-Brian.jpg\nSaving to S3 folder path: base-meme-images/bad-luck-brian.jpg\nRequesting base meme pic: https://imgflip.com/s/meme/Doge.jpg\nSaving to S3 folder path: base-meme-images/doge.jpg\nRequesting base meme pic: https://imgflip.com/s/meme/Creepy-Condescending-Wonka.jpg\nSaving to S3 folder path: base-meme-images/creepy-condescending-wonka.jpg\nRequesting base meme pic: https://imgflip.com/s/meme/Face-You-Make-Robert-Downey-Jr.jpg\nSaving to S3 folder path: base-meme-images/face-you-make-robert-downey-jr.jpg\nRequesting base meme pic: https://imgflip.com/s/meme/Evil-Toddler.jpg\nSaving to S3 folder path: base-meme-images/evil-toddler.jpg\n"
],
[
"def save_base_memes_s3_links():\n s3 = boto3.client(\n 's3',\n aws_access_key_id=config('AWS_ACCESS_KEY'),\n aws_secret_access_key=config('AWS_SECRET_ACCESS_KEY')\n )\n objs = s3.list_objects(Bucket=BUCKET_NAME)\n print(f'[INFO] Fetching S3 objects from {BUCKET_NAME}')\n data = {}\n for obj in objs['Contents']:\n if obj['Key'].endswith('.jpg'):\n s3_file_path = obj['Key']\n s3_link = S3_BASE_URL + '/' + s3_file_path\n s3_pic_name = s3_file_path.split('/')[1]\n data[s3_pic_name] = s3_link\n full_path = os.path.join(DATA_DIR, BASE_MEMES_S3_LINKS)\n print(f'[INFO] Saving file at {full_path}')\n with open(full_path, \"w\") as wobj:\n json.dump(data, wobj, indent=4) \n\nsave_base_memes_s3_links()",
"[INFO] Fetching S3 objects from memefly\n[INFO] Saving file at ../data/base_memes_s3_links.json\n"
],
[
"def download_base_memes_from_s3_bucket(pic_dir_path):\n \n if not os.path.exists(pic_dir_path):\n os.makedirs(pic_dir_path, exist_ok=True)\n \n bucket_name = 'memefly-datalake'\n s3_folder = 'base-meme-images' \n \n s3 = boto3.client(\n 's3',\n aws_access_key_id=config('AWS_ACCESS_KEY'),\n aws_secret_access_key=config('AWS_SECRET_ACCESS_KEY')\n )\n \n objs = s3.list_objects(Bucket=bucket_name)\n for obj in objs['Contents']:\n if obj['Key'].endswith('.jpg'):\n print(obj['Key'])\n s3_file_path = obj['Key']\n s3_folder = s3_file_path.split('/')[0]\n s3_pic_name = s3_file_path.split('/')[1]\n pic_download_loc = os.path.join(pic_dir_path, s3_pic_name)\n s3.download_file(bucket_name, s3_file_path, pic_download_loc)\n print(f'Downloaded to: {pic_download_loc}')\n\n\n#download_base_memes_from_s3_bucket('../data/images')",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
]
] |
ec8d92fd666060074dcf0838c25c0abcc2f79901 | 161,666 | ipynb | Jupyter Notebook | introduction_to_data_science/lesson_2/lecture/Describe & fill NA.ipynb | robinl3680/udacity-course | 308daf62479f9bf6f4256eb19313631f1bb4c5da | [
"MIT"
] | 68 | 2016-07-28T07:24:57.000Z | 2021-10-09T19:28:48.000Z | introduction_to_data_science/lesson_2/lecture/Describe & fill NA.ipynb | robinl3680/udacity-course | 308daf62479f9bf6f4256eb19313631f1bb4c5da | [
"MIT"
] | null | null | null | introduction_to_data_science/lesson_2/lecture/Describe & fill NA.ipynb | robinl3680/udacity-course | 308daf62479f9bf6f4256eb19313631f1bb4c5da | [
"MIT"
] | 105 | 2016-10-19T03:56:33.000Z | 2022-03-15T02:12:08.000Z | 43.110933 | 166 | 0.277158 | [
[
[
"empty"
]
]
] | [
"empty"
] | [
[
"empty"
]
] |
ec8d9bd6cead1e72bb07f7cc6601e24eae3a725e | 131,717 | ipynb | Jupyter Notebook | Espectro_de_Respuesta_Pisco_2007.ipynb | Kurt9806/Seismic-analysis | b4dc2cdb1cef833986b6c07614c19ecb3941d366 | [
"MIT"
] | null | null | null | Espectro_de_Respuesta_Pisco_2007.ipynb | Kurt9806/Seismic-analysis | b4dc2cdb1cef833986b6c07614c19ecb3941d366 | [
"MIT"
] | null | null | null | Espectro_de_Respuesta_Pisco_2007.ipynb | Kurt9806/Seismic-analysis | b4dc2cdb1cef833986b6c07614c19ecb3941d366 | [
"MIT"
] | null | null | null | 486.04059 | 104,351 | 0.930662 | [
[
[
"<a href=\"https://colab.research.google.com/github/Kurt9806/Seismic-analysis/blob/main/Espectro_de_Respuesta_Pisco_2007.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
],
[
"Subir los datos del acelerograma",
"_____no_output_____"
]
],
[
[
"import pandas as pd\r\nimport numpy as np\r\nimport math\r\nimport matplotlib.pyplot as plt",
"_____no_output_____"
],
[
"df = pd.read_csv('Acelerograma.csv')\r\ndf['EW'] = df['EW']/100\r\ndf['NS'] = df['NS']/100\r\ndf['UD'] = df['UD']/100",
"_____no_output_____"
]
],
[
[
"# Metodo de Aproximacion de Carga",
"_____no_output_____"
],
[
"",
"_____no_output_____"
]
],
[
[
"# Metodo numerico - Metodo de Aproximacion de Fuerzas\r\ndef Aprox_Fuerzas(k,xi,m,wn,E,dt,T):\r\n e = math.e**(-xi*wn*dt)\r\n wd = wn*math.sqrt(1-xi**2)\r\n\r\n A = e*(xi/math.sqrt(1-xi**2)*math.sin(wd*dt)+math.cos(wd*dt))\r\n B = e*(1/wd*math.sin(wd*dt))\r\n C = 1/k*(2*xi/(wn*dt)+e*(((1-2*xi**2)/(wd*dt)-xi/(math.sqrt(1-xi**2)))*math.sin(wd*dt)-(1+2*xi/(wn*dt))*math.cos(wd*dt)))\r\n D = 1/k*(1-2*xi/(wn*dt)+e*((2*xi**2-1)/(wd*dt)*math.sin(wd*dt)+2*xi/(wn*dt)*math.cos(wd*dt)))\r\n A_ = -e*(wn/math.sqrt(1-xi**2)*math.sin(wd*dt))\r\n B_ = e*(math.cos(wd*dt)-xi/math.sqrt(1-xi**2)*math.sin(wd*dt))\r\n C_ = 1/k*(-1/dt+e*((wn/math.sqrt(1-xi**2)+xi/(dt*math.sqrt(1-xi**2)))*math.sin(wd*dt)+1/dt*math.cos(wd*dt)))\r\n D_ = 1/(k*dt)*(1-e*(xi/math.sqrt(1-xi**2)*math.sin(wd*dt)+math.cos(wd*dt)))\r\n\r\n # Definiendo la fuerza\r\n u = np.zeros(len(T))\r\n u_ = np.zeros(len(T))\r\n\r\n for i in range(len(T)-1):\r\n u[i+1] = A*u[i] + B*u_[i] + C*(m*9.81*E[i]) + D*(m*9.81*E[i+1])\r\n u_[i+1] = A_*u[i] + B_*u_[i] + C_*(m*9.81*E[i]) + D_*(m*9.81*E[i+1])\r\n\r\n return u, u_",
"_____no_output_____"
]
],
[
[
"# Calculo del espectro",
"_____no_output_____"
]
],
[
[
"# Condiciones iniciales\r\nm=1\r\ndt = df['T'][1] - df['T'][0]\r\nT = np.arange(0.01,3,dt)\r\nomega_n = 2*math.pi/T\r\nK = m*omega_n*omega_n\r\nXI = [0.001,0.01,0.1,0.2,0.5,0.8]\r\n\r\nD = np.zeros((len(T),len(XI)))\r\nfor i in range(len(XI)):\r\n for j in range(len(T)):\r\n wn = omega_n[j]\r\n k = K[j]\r\n [u, v] = Aprox_Fuerzas(k,xi,m,wn,df['NS'],dt,T)\r\n D[j,i] = max(abs(u))",
"_____no_output_____"
],
[
"Df = pd.DataFrame.from_dict({'0.001':D[:,0],'0.01':D[:,1],'0.1':D[:,2],'0.2':D[:,3],'0.8':D[:,-1]})\r\nprint(Df)",
" 0.001 0.01 0.1 0.2 0.8\n0 0.000004 0.000004 0.000004 0.000004 0.000004\n1 0.000018 0.000018 0.000018 0.000018 0.000018\n2 0.000040 0.000040 0.000040 0.000040 0.000040\n3 0.000072 0.000072 0.000072 0.000072 0.000072\n4 0.000110 0.000110 0.000110 0.000110 0.000110\n.. ... ... ... ... ...\n294 0.029137 0.029137 0.029137 0.029137 0.029137\n295 0.029252 0.029252 0.029252 0.029252 0.029252\n296 0.029367 0.029367 0.029367 0.029367 0.029367\n297 0.029479 0.029479 0.029479 0.029479 0.029479\n298 0.029591 0.029591 0.029591 0.029591 0.029591\n\n[299 rows x 5 columns]\n"
],
[
"# Calcular la pseudo velocidad y la pseudo aceleracion\r\nV = pd.DataFrame.from_dict({'0':np.zeros(len(T))})\r\nA = pd.DataFrame.from_dict({'0':np.zeros(len(T))})\r\n\r\nfor i,xi in enumerate(XI):\r\n V[xi] = omega_n*D[:,i]\r\n A[xi] = omega_n*omega_n*D[:,i]",
"_____no_output_____"
],
[
"# Plot figures\r\nplt.title('Espectros de respuesta del sismo de Pisco 2007')\r\nplt.subplot(3,1,1)\r\nplt.plot(T,Df['0.001'])\r\nplt.plot(T,Df['0.01'])\r\nplt.plot(T,Df['0.2'])\r\nplt.plot(T,Df['0.8'])\r\nplt.subplot(3,1,2)\r\nplt.plot(T,V[0.01])\r\nplt.plot(T,V[0.1])\r\nplt.plot(T,V[0.8])\r\nplt.subplot(3,1,3)\r\nplt.plot(T,A[0.01])\r\nplt.plot(T,A[0.1])\r\nplt.plot(T,A[0.8])",
"_____no_output_____"
],
[
"A.to_excel('Pseudoaceleracion Pisco.xlsx')",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
]
] |
ec8d9bff8eb740a613410926dc49806d504f978b | 4,291 | ipynb | Jupyter Notebook | python_practical_2/python_practical_2.ipynb | baiwenjia/clinical_data_management | b931952bd7f14a749f3ad153b559bad7c410d190 | [
"Apache-2.0"
] | 2 | 2018-09-30T17:14:32.000Z | 2018-10-12T13:02:18.000Z | python_practical_2/python_practical_2.ipynb | baiwenjia/clinical_data_management | b931952bd7f14a749f3ad153b559bad7c410d190 | [
"Apache-2.0"
] | null | null | null | python_practical_2/python_practical_2.ipynb | baiwenjia/clinical_data_management | b931952bd7f14a749f3ad153b559bad7c410d190 | [
"Apache-2.0"
] | 1 | 2018-10-25T17:49:20.000Z | 2018-10-25T17:49:20.000Z | 39.009091 | 387 | 0.65649 | [
[
[
"## Python practical: Data privacy\n\nIn this practical, you will be given two datasets, a clinical dataset and a query dataset. The datasets were adapted from \\[1\\] but with the age information modified. Also, the original dataset contains 100,000 subjects. Here, we reduce the number of subjects so that you do not need to wait for a long time while running the practical.\n\n\\[1\\] [Diabetes 130-US hospitals for years 1999-2008 Data Set](https://archive.ics.uci.edu/ml/datasets/Diabetes+130-US+hospitals+for+years+1999-2008). For more detail, please refer to Table 1 in this paper: Beata Strack et al. Impact of HbA1c Measurement on Hospital Readmission Rates: Analysis of 70,000 Clinical Database Patient Records, BioMed Research International, 2014.\n\nThe task is to find the matching subject ID in the clinical dataset for each of the query data. This will help you understand the importance of data privacy and sensitive information.\n\nYou will mainly use pandas and numpy libraries in this practical.\n\nWe have prepared the model answer at the answer directory. Maybe you are not yet familiar with pandas and numpy. In that case, you may read the answer for Questions 1 and 2. Then you can easily solve Questions 3 and 4 by yourself. But maybe you would also like to do some googling, programming and challenge yourself. Why not?",
"_____no_output_____"
]
],
[
[
"# Import libaries\nimport numpy as np\nimport pandas as pd",
"_____no_output_____"
]
],
[
[
"## Question 1: Load data\n\nThe filenames for the two datasets are data.csv and query.csv. The first column of data.csv is the subject ID. For query.csv, we do not have the information about subject ID.\n\nPlease display the first few rows of data using the head() function in pandas. How many subjects are there in the data set and the query set?",
"_____no_output_____"
],
[
"## Question 2: Matching by age\n\nUsing only age information, for each row in query.csv, how many matching subjects can you find in data.csv?",
"_____no_output_____"
],
[
"## Question 2: Matching by age range\n\nWe have prepared another two datasets for you. The filenames are data2.csv and query2.csv. In the new spreadsheets, the age information is replaced by age range, such as 40 to 50 years old.\n\nPlease load the two datasets and re-do the matching. How many matching subjects can you find in data2.csv using only age range information? Do you get more matches or less?",
"_____no_output_____"
],
[
"## Question 3: Matching by age range, race and gender\n\nStill use data2.csv and query2.csv. This time, using three columns of information (age range, race and gender). How many matching subjects can you find in data2.csv now? Could you comment on why for some query subjects, we find a small number of matches please (i.e. we almost find the subject)?",
"_____no_output_____"
],
[
"## Question 4: Matching using all information we have\n\nUsing all the columns of information we have, how many matching subjects can you find in data2.csv? What are the subject IDs? Are these correct? You can compare to the answer at answer/query_answer.csv.",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
]
] |
ec8d9e9f5dccece0df89cb015ba851c15c62697e | 6,275 | ipynb | Jupyter Notebook | notebooks/.ipynb_checkpoints/CenterOfMassAndMomentOfInertia-checkpoint (rnwatanabe-Inspiron-5457's conflicted copy 2018-03-18).ipynb | raissabthibes/bmc | 840800fb94ea3bf188847d0771ca7197dfec68e3 | [
"MIT"
] | null | null | null | notebooks/.ipynb_checkpoints/CenterOfMassAndMomentOfInertia-checkpoint (rnwatanabe-Inspiron-5457's conflicted copy 2018-03-18).ipynb | raissabthibes/bmc | 840800fb94ea3bf188847d0771ca7197dfec68e3 | [
"MIT"
] | null | null | null | notebooks/.ipynb_checkpoints/CenterOfMassAndMomentOfInertia-checkpoint (rnwatanabe-Inspiron-5457's conflicted copy 2018-03-18).ipynb | raissabthibes/bmc | 840800fb94ea3bf188847d0771ca7197dfec68e3 | [
"MIT"
] | null | null | null | 42.114094 | 524 | 0.536414 | [
[
[
"# Center of Mass and Moment of Inertia\nRenato Naville Watanabe",
"_____no_output_____"
],
[
"### Center of mass\n\nThe center of mass of a set of particles is formally defined as the point where the sum of the vectors linking this point to each particle, weighted by its mass, is zero. The sum of the vectors linking the center of mass to each particle is:\n\n\\begin{equation}\n \\sum\\limits_{i = 1}^nm_i\\left({\\bf\\vec{r_i}} - {\\bf\\vec{r}_{cm}}\\right) = \\sum\\limits_{i = 1}^nm_i{\\bf\\vec{r_i}} - \\sum\\limits_{i = 1}^nm_i{\\bf\\vec{r}_{cm}} = \\sum\\limits_{i = 1}^nm_i{\\bf\\vec{r_i}} - {\\bf\\vec{r}_{cm}}\\sum\\limits_{i = 1}^nm_i\n\\end{equation}\nwhere $n$ is the number of particles.\n\nNow, we equal this sum to zero and isolate ${\\bf\\vec{r_{cm}}}$:\n\n\\begin{equation}\n \\sum\\limits_{i = 1}^nm_i{\\bf\\vec{r_i}} - {\\bf\\vec{r}_{cm}}\\sum\\limits_{i = 1}^nm_i = 0 \\longrightarrow {\\bf\\vec{r}_{cm}} = \\frac{\\sum\\limits_{i = 1}^nm_i{\\bf\\vec{r_i}}}{\\sum\\limits_{i = 1}^nm_i} = \\frac{\\sum\\limits_{i = 1}^nm_i{\\bf\\vec{r_i}}}{m_T}\n\\end{equation}\n\nwhere $m_T$ is the total mass of the particles.\n\nFor a rigid body, the center of mass is defined as the point where the integral of the vectors linking this point to each differential part of mass, weighted by this differential mass, is zero. This integral is:\n\n\\begin{equation}\n \\int\\limits_{B} {\\bf{\\vec{r}_B}} dm = \\int\\limits_{B} {\\bf{\\vec{r}}} - {\\bf{\\vec{r}_{cm}}} dm = \\int\\limits_{B} {\\bf{\\vec{r}}}\\,dm - \\int\\limits_{B}{\\bf{\\vec{r}_{cm}}} dm = \\int\\limits_{B} {\\bf{\\vec{r}}}\\,dm - {\\bf{\\vec{r}_{cm}}}\\int\\limits_{B}\\, dm\n\\end{equation}\n\nNow we equal this integral to zero and isolate ${\\bf\\vec{r_{cm}}}$:\n\n\\begin{equation}\n \\int\\limits_{B} {\\bf{\\vec{r}}}\\,dm - {\\bf{\\vec{r}_{cm}}}\\int\\limits_{B}\\, dm = 0 \\longrightarrow {\\bf{\\vec{r}_{cm}}} = \\frac{ \\int\\limits_{B} {\\bf{\\vec{r}}}\\,dm}{\\int\\limits_{B}\\, dm} = \\frac{ \\int\\limits_{B} {\\bf{\\vec{r}}}\\,dm}{m_B}\n\\end{equation}\nwhere $m_B$ is the mass of the body.",
"_____no_output_____"
],
[
"### Center of gravity\n\nCenter of gravity of a body is the point where the moment caused by the gravitational force in the whole body relative to this point is zero.\n\n\\begin{equation}\n \\vec{\\bf{M_0}} = \\int\\limits_{B} \\vec{\\bf{r_B}}\\, \\times\\,\\vec{\\bf{g}}\\,dm \n\\end{equation}\n\nIf the acceleration of gravity being applied to the whole body is the same (for all practical purposes, in Biomechanics we can consider it the same in the whole body), the gravity vector can go out of the integral: \n\n\\begin{equation}\n \\vec{\\bf{M_0}}= \\int\\limits_{B}\\vec{\\bf{r_B}}\\,dm\\, \\times\\,\\vec{\\bf{g}} = \\int\\limits_{B}(\\vec{\\bf{r}} - \\vec{\\bf{r_{cm}}})\\,dm\\, \\times\\,\\vec{\\bf{g}} = \\left(\\int\\limits_{B}\\vec{\\bf{r}}\\,dm -\\int\\limits_{B}\\vec{\\bf{r_{G}}}\\,dm\\,\\right) \\times\\,\\vec{\\bf{g}}\n\\end{equation}\n\n\nNow, we equal this moment to zero and isolate $\\vec{\\bf{r_{G}}}$:\n\n\\begin{equation}\n \\left(\\int\\limits_{B}\\vec{\\bf{r}}\\,dm -\\int\\limits_{B}\\vec{\\bf{r_{G}}}\\,dm\\right) \\times\\,\\vec{\\bf{g}} = 0 \\longrightarrow \\int\\limits_{B}\\vec{\\bf{r}}\\,dm -\\int\\limits_{B}\\vec{\\bf{r_{G}}}\\,dm = 0 \\longrightarrow \\int\\limits_{B}\\vec{\\bf{r}}\\,dm -\\vec{\\bf{r_{G}}}\\int\\limits_{B}\\,dm = 0\\,\\,\\,\\,\\,\\, \\longrightarrow \\vec{\\bf{r_{G}}} = \\frac{ \\int\\limits_{B}\\vec{\\bf{r}}\\,dm}{\\int\\limits_{B}\\,dm} = \\frac{ \\int\\limits_{B}\\vec{\\bf{r}}\\,dm}{m_B} \n\\end{equation}\nwhere $m_B$ is the mass of the body.\n\nNote that in this case, that the gravity acceleration is constant, the center of gravity $\\vec{\\bf{r_{G}}}$ is equal to the center of mass $\\vec{\\bf{r_{cm}}}$.",
"_____no_output_____"
],
[
"### Moment of inertia\n\nThe moment of inertia is a measure of how the mass of the body distributes relative to a given axis passing to a given point O. If we define the versor $\\hat{\\bf{n_a}}$ as a versor in the direction of the axis, then the moment of inertia relative to the point O in the direction of the versor $\\hat{\\bf{n_a}}$ is defined as:\n \n\\begin{equation}\n J_a = \\int\\limits_B (\\vec{\\bf{r_{/O}}} \\times \\hat{\\bf{n_a}}).(\\vec{\\bf{r_{/O}}} \\times \\hat{\\bf{n_a}})\\,dm \n \\label{eq:inmomgen}\n\\end{equation}\n\nFor planar movements, we normally compute the moment of inertia relative to the z-axis. So, the Eq.~\\eqref{eq:inmomgen} simplifies to:\n\n\\begin{equation}\n \n\\end{equation}\n### Parallel axis theorem\n\n",
"_____no_output_____"
],
[
"### Matrix of Inertia",
"_____no_output_____"
],
[
"### Problems\n3.2.5, 3.2.7, ",
"_____no_output_____"
]
]
] | [
"markdown"
] | [
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
]
] |
ec8da8884964a6cc64266015466c00628f79b22c | 49,833 | ipynb | Jupyter Notebook | appyters/Enrichment_Analysis_Visualizer/Enrichment-Appyter.ipynb | serena-zhang/appyter-catalog | f3323f2a533d3c290a41bebadf653c3694ce03f0 | [
"Apache-2.0"
] | null | null | null | appyters/Enrichment_Analysis_Visualizer/Enrichment-Appyter.ipynb | serena-zhang/appyter-catalog | f3323f2a533d3c290a41bebadf653c3694ce03f0 | [
"Apache-2.0"
] | null | null | null | appyters/Enrichment_Analysis_Visualizer/Enrichment-Appyter.ipynb | serena-zhang/appyter-catalog | f3323f2a533d3c290a41bebadf653c3694ce03f0 | [
"Apache-2.0"
] | null | null | null | 43.867077 | 3,856 | 0.577469 | [
[
[
"#%%appyter init\nfrom appyter import magic\nmagic.init(lambda _=globals: _())",
"_____no_output_____"
]
],
[
[
"# Enrichment Analysis Visualizer\n\nThis appyter creates a variety of visualizations for enrichment analysis data for one selected Enrichr library. The scatter plot and hexagonal canvas may not be available for every library.\n\nHere are the visualizations created: \n \n* **Scatter Plot** \n\nThe scatterplot is organized so that simliar gene sets are clustered together. Enriched gene sets will appear blue instead of gray; the darker the blue the smaller the p-value. The name of the gene set and the associated p-value are displayed when a point is hovered over.\n\nPlots can be downloaded as an svg using the save function on the toolbar next to the plot.\n\n* **Bar Chart**\n\nThe bar chart contains the top 10 enriched terms and their corresponding p-values for the chosen library. Colored bars correspond to terms with significant p-values (<0.05). An asterisk (*) next to a p-value indicates the term also has a significant adjusted p-value (<0.05).\n\n* **Hexagonal Canvas**\n\nThe hexagonal canvas is made up of hexagons, each of which represents one gene set from the library selected. The hexagons are colored based on the Jaccard similarity index between the inputted gene list and the gene set the hexagon represents (the brighter, the more similar). The hexagons representing the most similar gene sets are grouped together. The name of the gene set and the associated similarity index are displayed when a hexagon is hovered over.\n\n* **Manhattan Plot**\n\nA manhattan plot is created displaying all the gene sets from the select library and their p-value. The x-axis of the plot is made up of gene sets from the library. The y-axis of the plot has the -log(p value) for each gene set. The name of the gene set and the associated p-value are displayed when a point is hovered over. You can also zoom, pan, and save the plot as an svg using the toolbar on the right.\n\nAdditionally, the names and p-values of significant terms in the library are provided in a table output below the plot which can be downloaded. \n\nThere is a link at the bottom to the full analysis results on the Enrichr website.",
"_____no_output_____"
]
],
[
[
"# Scatter Plot Imports\nfrom maayanlab_bioinformatics.enrichment import enrich_crisp\nimport matplotlib as mpl\nimport matplotlib.colors as colors\nimport base64\n\n# Bar Chart Imports\nimport pandas as pd \nimport numpy as np\nimport json\nimport requests\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport time\nfrom matplotlib.ticker import MaxNLocator\nfrom IPython.display import display, FileLink, Markdown, HTML\n\n# Hexagonal Canvas Imports\nimport json\nimport math\nimport uuid\nimport urllib\nfrom textwrap import dedent\nfrom string import Template\nfrom operator import itemgetter\n\n# Manhattan Plot Imports\nimport matplotlib.patches as mpatches\nimport matplotlib.cm as cm\n\n# Bokeh\nfrom bokeh.io import output_notebook\nfrom bokeh.plotting import figure, show\nfrom bokeh.models import HoverTool, CustomJS, ColumnDataSource, Span\nfrom bokeh.layouts import layout, row, column, gridplot\nfrom bokeh.palettes import all_palettes\noutput_notebook()",
"_____no_output_____"
],
[
"%%appyter hide_code\n\n{% do SectionField(name='section1', title = '1. Submit Your Gene List', subtitle = 'Upload a text file containing your gene list -OR- copy and paste your gene list into the text box below (One gene per row). You can also try the default gene list provided.', img = 'enrichr-icon.png')%}\n{% do SectionField(name='section2', title = '2. Choose Enrichr Library', subtitle = 'Select one Enrichr library.', img = 'enrichr-icon.png')%}",
"_____no_output_____"
],
[
"%%appyter code_eval\n\ngene_list_filename = {{ FileField(name='gene_list_filename', label='Gene List File', default='', description='Upload your gene list as a text file (One gene per row).',section = 'section1') }}\n\ngene_list_input = {{ TextField(name='gene_list_input', label='Gene List', default='NSUN3\\nPOLRMT\\nNLRX1\\nSFXN5\\nZC3H12C\\nSLC25A39\\nARSG\\nDEFB29\\nNDUFB6\\nZFAND1\\nTMEM77\\n5730403B10RIK\\nRP23-195K8.6\\nTLCD1\\nPSMC6\\nSLC30A6\\nLOC100047292\\nLRRC40\\nORC5L\\nMPP7\\nUNC119B\\nPRKACA\\nTCN2\\nPSMC3IP\\nPCMTD2\\nACAA1A\\nLRRC1\\n2810432D09RIK\\nSEPHS2\\nSAC3D1\\nTMLHE\\nLOC623451\\nTSR2\\nPLEKHA7\\nGYS2\\nARHGEF12\\nHIBCH\\nLYRM2\\nZBTB44\\nENTPD5\\nRAB11FIP2\\nLIPT1\\nINTU\\nANXA13\\nKLF12\\nSAT2\\nGAL3ST2\\nVAMP8\\nFKBPL\\nAQP11\\nTRAP1\\nPMPCB\\nTM7SF3\\nRBM39\\nBRI3\\nKDR\\nZFP748\\nNAP1L1\\nDHRS1\\nLRRC56\\nWDR20A\\nSTXBP2\\nKLF1\\nUFC1\\nCCDC16\\n9230114K14RIK\\nRWDD3\\n2610528K11RIK\\nACO1\\nCABLES1\\nLOC100047214\\nYARS2\\nLYPLA1\\nKALRN\\nGYK\\nZFP787\\nZFP655\\nRABEPK\\nZFP650\\n4732466D17RIK\\nEXOSC4\\nWDR42A\\nGPHN\\n2610528J11RIK\\n1110003E01RIK\\nMDH1\\n1200014M14RIK\\nAW209491\\nMUT\\n1700123L14RIK\\n2610036D13RIK\\nCOX15\\nTMEM30A\\nNSMCE4A\\nTM2D2\\nRHBDD3\\nATXN2\\nNFS1\\n3110001I20RIK\\nBC038156\\nLOC100047782\\n2410012H22RIK\\nRILP\\nA230062G08RIK\\nPTTG1IP\\nRAB1\\nAFAP1L1\\nLYRM5\\n2310026E23RIK\\nC330002I19RIK\\nZFYVE20\\nPOLI\\nTOMM70A\\nSLC7A6OS\\nMAT2B\\n4932438A13RIK\\nLRRC8A\\nSMO\\nNUPL2\\nTRPC2\\nARSK\\nD630023B12RIK\\nMTFR1\\n5730414N17RIK\\nSCP2\\nZRSR1\\nNOL7\\nC330018D20RIK\\nIFT122\\nLOC100046168\\nD730039F16RIK\\nSCYL1\\n1700023B02RIK\\n1700034H14RIK\\nFBXO8\\nPAIP1\\nTMEM186\\nATPAF1\\nLOC100046254\\nLOC100047604\\nCOQ10A\\nFN3K\\nSIPA1L1\\nSLC25A16\\nSLC25A40\\nRPS6KA5\\nTRIM37\\nLRRC61\\nABHD3\\nGBE1\\nPARP16\\nHSD3B2\\nESM1\\nDNAJC18\\nDOLPP1\\nLASS2\\nWDR34\\nRFESD\\nCACNB4\\n2310042D19RIK\\nSRR\\nBPNT1\\n6530415H11RIK\\nCLCC1\\nTFB1M\\n4632404H12RIK\\nD4BWG0951E\\nMED14\\nADHFE1\\nTHTPA\\nCAT\\nELL3\\nAKR7A5\\nMTMR14\\nTIMM44\\nSF1\\nIPP\\nIAH1\\nTRIM23\\nWDR89\\nGSTZ1\\nCRADD\\n2510006D16RIK\\nFBXL6\\nLOC100044400\\nZFP106\\nCD55\\n0610013E23RIK\\nAFMID\\nTMEM86A\\nALDH6A1\\nDALRD3\\nSMYD4\\nNME7\\nFARS2\\nTASP1\\nCLDN10\\nA930005H10RIK\\nSLC9A6\\nADK\\nRBKS\\n2210016F16RIK\\nVWCE\\n4732435N03RIK\\nZFP11\\nVLDLR\\n9630013D21RIK\\n4933407N01RIK\\nFAHD1\\nMIPOL1\\n1810019D21RIK\\n1810049H13RIK\\nTFAM\\nPAICS\\n1110032A03RIK\\nLOC100044139\\nDNAJC19\\nBC016495\\nA930041I02RIK\\nRQCD1\\nUSP34\\nZCCHC3\\nH2AFJ\\nPHF7\\n4921508D12RIK\\nKMO\\nPRPF18\\nMCAT\\nTXNDC4\\n4921530L18RIK\\nVPS13B\\nSCRN3\\nTOR1A\\nAI316807\\nACBD4\\nFAH\\nAPOOL\\nCOL4A4\\nLRRC19\\nGNMT\\nNR3C1\\nSIP1\\nASCC1\\nFECH\\nABHD14A\\nARHGAP18\\n2700046G09RIK\\nYME1L1\\nGK5\\nGLO1\\nSBK1\\nCISD1\\n2210011C24RIK\\nNXT2\\nNOTUM\\nANKRD42\\nUBE2E1\\nNDUFV1\\nSLC33A1\\nCEP68\\nRPS6KB1\\nHYI\\nALDH1A3\\nMYNN\\n3110048L19RIK\\nRDH14\\nPROZ\\nGORASP1\\nLOC674449\\nZFP775\\n5430437P03RIK\\nNPY\\nADH5\\nSYBL1\\n4930432O21RIK\\nNAT9\\nLOC100048387\\nMETTL8\\nENY2\\n2410018G20RIK\\nPGM2\\nFGFR4\\nMOBKL2B\\nATAD3A\\n4932432K03RIK\\nDHTKD1\\nUBOX5\\nA530050D06RIK\\nZDHHC5\\nMGAT1\\nNUDT6\\nTPMT\\nWBSCR18\\nLOC100041586\\nCDK5RAP1\\n4833426J09RIK\\nMYO6\\nCPT1A\\nGADD45GIP1\\nTMBIM4\\n2010309E21RIK\\nASB9\\n2610019F03RIK\\n7530414M10RIK\\nATP6V1B2\\n2310068J16RIK\\nDDT\\nKLHDC4\\nHPN\\nLIFR\\nOVOL1\\nNUDT12\\nCDAN1\\nFBXO9\\nFBXL3\\nHOXA7\\nALDH8A1\\n3110057O12RIK\\nABHD11\\nPSMB1\\nENSMUSG00000074286\\nCHPT1\\nOXSM\\n2310009A05RIK\\n1700001L05RIK\\nZFP148\\n39509\\nMRPL9\\nTMEM80\\n9030420J04RIK\\nNAGLU\\nPLSCR2\\nAGBL3\\nPEX1\\nCNO\\nNEO1\\nASF1A\\nTNFSF5IP1\\nPKIG\\nAI931714\\nD130020L05RIK\\nCNTD1\\nCLEC2H\\nZKSCAN1\\n1810044D09RIK\\nMETTL7A\\nSIAE\\nFBXO3\\nFZD5\\nTMEM166\\nTMED4\\nGPR155\\nRNF167\\nSPTLC1\\nRIOK2\\nTGDS\\nPMS1\\nPITPNC1\\nPCSK7\\n4933403G14RIK\\nEI24\\nCREBL2\\nTLN1\\nMRPL35\\n2700038C09RIK\\nUBIE\\nOSGEPL1\\n2410166I05RIK\\nWDR24\\nAP4S1\\nLRRC44\\nB3BP\\nITFG1\\nDMXL1\\nC1D', description='Paste your gene list (One gene per row).', section = 'section1') }}\n\nenrichr_library = '{{ ChoiceField(name='enrichr_library', description='Select one Enrichr library for which to create visualizations.', label='Enrichr Library', default='WikiPathways_2019_Human', section = 'section2',choices=[\n 'ARCHS4_TFs_Coexp',\n 'ChEA_2016',\n 'ENCODE_and_ChEA_Consensus_TFs_from_ChIP-X',\n 'ENCODE_Histone_Modifications_2015',\n 'ENCODE_TF_ChIP-seq_2015',\n 'Epigenomics_Roadmap_HM_ChIP-seq',\n 'Enrichr_Submissions_TF-Gene_Coocurrence',\n 'Genome_Browser_PWMs',\n 'lncHUB_lncRNA_Co-Expression',\n 'miRTarBase_2017',\n 'TargetScan_microRNA_2017',\n 'TF-LOF_Expression_from_GEO',\n 'TF_Perturbations_Followed_by_Expression',\n 'Transcription_Factor_PPIs',\n 'TRANSFAC_and_JASPAR_PWMs',\n 'TRRUST_Transcription_Factors_2019',\n 'ARCHS4_Kinases_Coexp',\n 'BioCarta_2016',\n 'BioPlanet_2019',\n 'BioPlex_2017',\n 'CORUM',\n 'Elsevier_Pathway_Collection',\n 'HMS_LINCS_KinomeScan',\n 'HumanCyc_2016',\n 'huMAP',\n 'KEA_2015',\n 'KEGG_2019_Human',\n 'KEGG_2019_Mouse',\n 'Kinase_Perturbations_from_GEO_down',\n 'Kinase_Perturbations_from_GEO_up',\n 'L1000_Kinase_and_GPCR_Perturbations_down',\n 'L1000_Kinase_and_GPCR_Perturbations_up',\n 'NCI-Nature_2016',\n 'NURSA_Human_Endogenous_Complexome',\n 'Panther_2016',\n 'Phosphatase_Substrates_from_DEPOD',\n 'PPI_Hub_Proteins',\n 'Reactome_2016',\n 'SILAC_Phosphoproteomics',\n 'SubCell_BarCode',\n 'Virus-Host_PPI_P-HIPSTer_2020',\n 'WikiPathways_2019_Human',\n 'WikiPathways_2019_Mouse',\n 'GO_Biological_Process_2018',\n 'GO_Cellular_Component_2018',\n 'GO_Molecular_Function_2018',\n 'Human_Phenotype_Ontology',\n 'Jensen_COMPARTMENTS',\n 'Jensen_DISEASES',\n 'Jensen_TISSUES',\n 'MGI_Mammalian_Phenotype_Level_4_2019', \n 'Achilles_fitness_decrease',\n 'Achilles_fitness_increase',\n 'ARCHS4_IDG_Coexp',\n 'ClinVar_2019',\n 'dbGaP',\n 'DepMap_WG_CRISPR_Screens_Broad_CellLines_2019',\n 'DepMap_WG_CRISPR_Screens_Sanger_CellLines_2019',\n 'DisGeNET',\n 'DrugMatrix',\n 'DSigDB',\n 'GeneSigDB',\n 'GWAS_Catalog_2019',\n 'LINCS_L1000_Chem_Pert_down',\n 'LINCS_L1000_Chem_Pert_up',\n 'LINCS_L1000_Ligand_Perturbations_down',\n 'LINCS_L1000_Ligand_Perturbations_up',\n 'MSigDB_Computational',\n 'MSigDB_Oncogenic_Signatures',\n 'Old_CMAP_down',\n 'Old_CMAP_up',\n 'OMIM_Disease',\n 'OMIM_Expanded',\n 'PheWeb_2019',\n 'Rare_Diseases_AutoRIF_ARCHS4_Predictions',\n 'Rare_Diseases_AutoRIF_Gene_Lists',\n 'Rare_Diseases_GeneRIF_ARCHS4_Predictions',\n 'Rare_Diseases_GeneRIF_Gene_Lists',\n 'UK_Biobank_GWAS_v1',\n 'Virus_Perturbations_from_GEO_down',\n 'Virus_Perturbations_from_GEO_up',\n 'VirusMINT', \n 'Allen_Brain_Atlas_down',\n 'Allen_Brain_Atlas_up',\n 'ARCHS4_Cell-lines',\n 'ARCHS4_Tissues',\n 'Cancer_Cell_Line_Encyclopedia',\n 'CCLE_Proteomics_2020',\n 'ESCAPE',\n 'GTEx_Tissue_Sample_Gene_Expression_Profiles_down',\n 'GTEx_Tissue_Sample_Gene_Expression_Profiles_up',\n 'Human_Gene_Atlas',\n 'Mouse_Gene_Atlas',\n 'NCI-60_Cancer_Cell_Lines',\n 'ProteomicsDB_2020',\n 'Tissue_Protein_Expression_from_Human_Proteome_Map', \n 'Chromosome_Location_hg19',\n 'Data_Acquisition_Method_Most_Popular_Genes',\n 'Enrichr_Libraries_Most_Popular_Genes',\n 'Genes_Associated_with_NIH_Grants',\n 'HMDB_Metabolites',\n 'HomoloGene',\n 'InterPro_Domains_2019',\n 'NIH_Funded_PIs_2017_AutoRIF_ARCHS4_Predictions',\n 'NIH_Funded_PIs_2017_GeneRIF_ARCHS4_Predictions',\n 'NIH_Funded_PIs_2017_Human_AutoRIF',\n 'NIH_Funded_PIs_2017_Human_GeneRIF',\n 'Pfam_Domains_2019',\n 'Pfam_InterPro_Domains',\n 'Table_Mining_of_CRISPR_Studies', \n 'BioCarta_2013',\n 'BioCarta_2015',\n 'ChEA_2013',\n 'ChEA_2015',\n 'Chromosome_Location',\n 'Disease_Signatures_from_GEO_down_2014',\n 'Disease_Signatures_from_GEO_up_2014',\n 'Drug_Perturbations_from_GEO_2014',\n 'ENCODE_Histone_Modifications_2013',\n 'ENCODE_TF_ChIP-seq_2014',\n 'GO_Biological_Process_2013',\n 'GO_Biological_Process_2015',\n 'GO_Biological_Process_2017',\n 'GO_Biological_Process_2017b',\n 'GO_Cellular_Component_2013',\n 'GO_Cellular_Component_2015',\n 'GO_Cellular_Component_2017',\n 'GO_Cellular_Component_2017b',\n 'GO_Molecular_Function_2013',\n 'GO_Molecular_Function_2015',\n 'GO_Molecular_Function_2017',\n 'GO_Molecular_Function_2017b',\n 'HumanCyc_2015',\n 'KEA_2013',\n 'KEGG_2013',\n 'KEGG_2015',\n 'KEGG_2016',\n 'MGI_Mammalian_Phenotype_2013',\n 'MGI_Mammalian_Phenotype_2017',\n 'MGI_Mammalian_Phenotype_Level_3',\n 'MGI_Mammalian_Phenotype_Level_4',\n 'NCI-Nature_2015',\n 'Panther_2015',\n 'Reactome_2013',\n 'Reactome_2015',\n 'TargetScan_microRNA',\n 'Tissue_Protein_Expression_from_ProteomicsDB',\n 'WikiPathways_2013',\n 'WikiPathways_2015',\n 'WikiPathways_2016', \n 'Aging_Perturbations_from_GEO_down',\n 'Aging_Perturbations_from_GEO_up',\n 'Disease_Perturbations_from_GEO_down',\n 'Disease_Perturbations_from_GEO_up',\n 'Drug_Perturbations_from_GEO_down',\n 'Drug_Perturbations_from_GEO_up',\n 'Gene_Perturbations_from_GEO_down',\n 'Gene_Perturbations_from_GEO_up',\n 'Ligand_Perturbations_from_GEO_down',\n 'Ligand_Perturbations_from_GEO_up',\n 'MCF7_Perturbations_from_GEO_down',\n 'MCF7_Perturbations_from_GEO_up',\n 'Microbe_Perturbations_from_GEO_down',\n 'Microbe_Perturbations_from_GEO_up',\n 'RNA-Seq_Disease_Gene_and_Drug_Signatures_from_GEO',\n 'SysMyo_Muscle_Gene_Sets']) }}'",
"_____no_output_____"
]
],
[
[
"# Set Other Parameters\nFor simplicity, the only inputs for this appyter are a gene list and one library. Other parameters are set to default values in the cell below. You can download the notebook, change these parameters, and rerun it if you wish.",
"_____no_output_____"
]
],
[
[
"# Scatter Plot Parameters\nsignificance_value = 0.05\n\n# Bar Chart Parameters\nfigure_file_format = ['png', 'svg']\noutput_file_name = 'Enrichr_results'\ncolor = 'lightskyblue'\nfinal_output_file_names = [str(output_file_name+'.'+file_type) for file_type in figure_file_format]\n\n# Hexagonal Canvas Parameters\ncanvas_color = 'Blue'\nnum_hex_colored = 10\n\n# Manhattan Plot Parameters\nmanhattan_colors = ['#003f5c', '#7a5195', '#ef5675', '#ffa600']",
"_____no_output_____"
]
],
[
[
"### Import gene list",
"_____no_output_____"
]
],
[
[
"# Import gene list as file or from text box file\n# Will choose file upload over textbox if a file is given \nif gene_list_filename != '':\n open_gene_list_file = open(gene_list_filename,'r')\n lines = open_gene_list_file.readlines()\n genes = [x.strip() for x in lines]\n open_gene_list_file.close()\nelse:\n genes = gene_list_input.split('\\n')\n genes = [x.strip() for x in genes]",
"_____no_output_____"
]
],
[
[
"# Scatter Plot Functions",
"_____no_output_____"
]
],
[
[
"def download_library(library_name):\n # download pre-processed library data\n try:\n df = pd.read_csv('https://raw.githubusercontent.com/MaayanLab/Enrichr-Viz-Appyter/master/Enrichr-Processed-Library-Storage/Scatterplot/Libraries/' + library_name + '.csv')\n except:\n return -1, -1, -1\n\n name = df['Name'].tolist()\n gene_list = df['Genes'].tolist()\n library_data = [list(a) for a in zip(name, gene_list)]\n return genes, library_data, df",
"_____no_output_____"
],
[
"# enrichment analysis\ndef get_library_iter(library_data):\n for member in library_data:\n term = member[0]\n gene_set = member[1].split(' ')\n yield term, gene_set\n\ndef get_enrichment_results(genes, library_data):\n return sorted(enrich_crisp(genes, get_library_iter(library_data), 20000, True), key=lambda r: r[1].pvalue)\n\ndef get_pvalue(row, unzipped_results, all_results):\n if row['Name'] in list(unzipped_results[0]):\n index = list(unzipped_results[0]).index(row['Name'])\n return all_results[index][1].pvalue\n else:\n return 1\n",
"_____no_output_____"
],
[
"# call enrichment results and return a plot and dataframe for Scatter Plot\ndef get_plot(library_name):\n genes, library_data, df = download_library(library_name)\n\n # library not supported\n if genes == -1:\n return -1 ,-1\n\n all_results = get_enrichment_results(genes, library_data)\n unzipped_results = list(zip(*all_results))\n\n if len(all_results) == 0:\n print(\"There are no enriched terms with your inputted gene set and the \", library_name, \" library.\")\n my_colors = ['#808080'] * len(df.index)\n\n source = ColumnDataSource(\n data=dict(\n x = df['x'],\n y = df['y'],\n gene_set = df['Name'],\n colors = my_colors,\n sizes = [6] * len(df.index)\n )\n )\n\n hover_emb = HoverTool(names=[\"df\"], tooltips=\"\"\"\n <div style=\"margin: 10\">\n <div style=\"margin: 0 auto; width:200px;\">\n <span style=\"font-size: 12px; font-weight: bold;\">Gene Set:</span>\n <span style=\"font-size: 12px\">@gene_set</span>\n </div>\n </div>\n \"\"\")\n else:\n # add p value to the dataframe\n df['p value'] = df.apply (lambda row: get_pvalue(row, unzipped_results, all_results), axis=1)\n\n # normalize p values for color scaling\n cmap = mpl.cm.get_cmap('Blues_r')\n norm = colors.Normalize(vmin = df['p value'].min(), vmax=significance_value*2)\n\n my_colors = []\n my_sizes = []\n for index, row in df.iterrows():\n if row['p value'] < significance_value:\n my_colors += [mpl.colors.to_hex(cmap(norm(row['p value'])))]\n my_sizes += [12]\n else:\n my_colors += ['#808080']\n my_sizes += [6]\n\n source = ColumnDataSource(\n data=dict(\n x = df['x'],\n y = df['y'],\n gene_set = df['Name'],\n p_value = df['p value'],\n colors = my_colors,\n sizes = my_sizes\n )\n )\n\n hover_emb = HoverTool(names=[\"df\"], tooltips=\"\"\"\n <div style=\"margin: 10\">\n <div style=\"margin: 0 auto; width:200px;\">\n <span style=\"font-size: 12px; font-weight: bold;\">Gene Set:</span>\n <span style=\"font-size: 12px\">@gene_set</span>\n <span style=\"font-size: 12px; font-weight: bold;\">p-value:</span>\n <span style=\"font-size: 12px\">@p_value</span>\n </div>\n </div>\n \"\"\")\n\n tools_emb = [hover_emb, 'pan', 'wheel_zoom', 'reset', 'save']\n\n plot_emb = figure(plot_width=700, plot_height=800, tools=tools_emb)\n\n # hide axis labels and grid lines\n plot_emb.xaxis.major_tick_line_color = None\n plot_emb.xaxis.minor_tick_line_color = None\n plot_emb.yaxis.major_tick_line_color = None\n plot_emb.yaxis.minor_tick_line_color = None\n plot_emb.xaxis.major_label_text_font_size = '0pt'\n plot_emb.yaxis.major_label_text_font_size = '0pt' \n\n plot_emb.circle('x', 'y', size = 'sizes', alpha = 0.7, line_alpha = 0, \n line_width = 0.01, source = source, fill_color = 'colors', name = \"df\")\n\n plot_emb.output_backend = \"svg\"\n \n return plot_emb, df",
"_____no_output_____"
]
],
[
[
"### Enrichr API Function for Manhattan Plot and Bar Chart",
"_____no_output_____"
]
],
[
[
"# Takes a gene list and Enrichr libraries as input\ndef Enrichr_API(enrichr_gene_list, all_libraries):\n\n all_terms = []\n all_pvalues =[] \n all_adjusted_pvalues = []\n\n for library_name in all_libraries : \n ENRICHR_URL = 'http://amp.pharm.mssm.edu/Enrichr/addList'\n genes_str = '\\n'.join(enrichr_gene_list)\n description = 'Example gene list'\n payload = {\n 'list': (None, genes_str),\n 'description': (None, description)\n }\n\n response = requests.post(ENRICHR_URL, files=payload)\n if not response.ok:\n raise Exception('Error analyzing gene list')\n\n data = json.loads(response.text)\n time.sleep(0.5)\n ENRICHR_URL = 'http://amp.pharm.mssm.edu/Enrichr/enrich'\n query_string = '?userListId=%s&backgroundType=%s'\n user_list_id = data['userListId']\n short_id = data[\"shortId\"]\n gene_set_library = library_name\n response = requests.get(\n ENRICHR_URL + query_string % (user_list_id, gene_set_library)\n )\n if not response.ok:\n raise Exception('Error fetching enrichment results')\n\n data = json.loads(response.text)\n\n short_results_df = pd.DataFrame(data[library_name][0:10])\n all_terms.append(list(short_results_df[1]))\n all_pvalues.append(list(short_results_df[2]))\n all_adjusted_pvalues.append(list(short_results_df[6]))\n \n results_df = pd.DataFrame(data[library_name])\n # adds library name to the data frame so the libraries can be distinguished\n results_df['library'] = library_name.replace('_', '')\n\n return([results_df, short_results_df, all_terms, all_pvalues, all_adjusted_pvalues, str(short_id)])",
"_____no_output_____"
]
],
[
[
"# Bar Chart Functions",
"_____no_output_____"
]
],
[
[
"# Function plots results in bar chart\n# Takes all terms, all p-values, all adjusted p-values, plot title, Enrichr libraries, and specified figure format\ndef enrichr_figure(all_terms, all_pvalues, all_adjusted_pvalues, plot_names, all_libraries, fig_format, bar_color): \n \n # rows and columns depend on number of Enrichr libraries submitted \n rows = []\n cols = []\n \n # Bar colors\n if bar_color != 'lightgrey':\n bar_color_not_sig = 'lightgrey'\n edgecolor=None\n linewidth=0\n else:\n bar_color_not_sig = 'white'\n edgecolor='black'\n linewidth=1\n \n\n plt.figure(figsize=(12, 12))\n rows = [0]\n cols = [0]\n i = 0 \n bar_colors = [bar_color if (x < 0.05) else bar_color_not_sig for x in all_pvalues[i]]\n fig = sns.barplot(x=np.log10(all_pvalues[i])*-1, y=all_terms[i], palette=bar_colors, edgecolor=edgecolor, linewidth=linewidth)\n fig.axes.get_yaxis().set_visible(False)\n fig.set_title(all_libraries[i].replace('_', ' '), fontsize=26)\n fig.set_xlabel('-Log10(p-value)', fontsize=25)\n fig.xaxis.set_major_locator(MaxNLocator(integer=True))\n fig.tick_params(axis='x', which='major', labelsize=20)\n if max(np.log10(all_pvalues[i])*-1)<1:\n fig.xaxis.set_ticks(np.arange(0, max(np.log10(all_pvalues[i])*-1), 0.1))\n for ii,annot in enumerate(all_terms[i]):\n if all_adjusted_pvalues[i][ii] < 0.05:\n annot = ' *'.join([annot, str(str(np.format_float_scientific(all_pvalues[i][ii], precision=2)))]) \n else:\n annot = ' '.join([annot, str(str(np.format_float_scientific(all_pvalues[i][ii], precision=2)))])\n\n title_start= max(fig.axes.get_xlim())/200\n fig.text(title_start, ii, annot, ha='left', wrap = True, fontsize = 26)\n fig.patch.set_edgecolor('black') \n fig.patch.set_linewidth('2')\n \n # Save results \n for plot_name in plot_names:\n plt.savefig(plot_name, bbox_inches = 'tight')\n \n # Show plot \n plt.show() ",
"_____no_output_____"
]
],
[
[
"# Hexagonal Canvas Functions",
"_____no_output_____"
]
],
[
[
"def library_processing():\n # downloads library data for the hexagonal canvas\n # library data is pre-annealed so the canvas will have the most similar gene sets closest together\n\n raw_library_data = []\n library_data = []\n\n try:\n library_name = enrichr_library\n with urllib.request.urlopen('https://raw.githubusercontent.com/MaayanLab/Enrichr-Viz-Appyter/master/Enrichr-Processed-Library-Storage/Annealing/Annealed-Libraries/' + library_name + '.txt') as f:\n for line in f.readlines():\n raw_library_data.append(line.decode('utf-8').split(\"\\t\\t\"))\n name = []\n gene_list = []\n except:\n return [], -1, -1\n\n for i in range(len(raw_library_data)):\n name += [raw_library_data[i][0]]\n raw_genes = raw_library_data[i][1].split('\\t')\n gene_list += [raw_genes[:-1]]\n\n library_data = [list(a) for a in zip(name, gene_list)]\n\n # raw_library_data: a 2D list where the first element is the name and the second element is a list of genes associated with that name\n\n jaccard_indices = []\n indices = []\n\n for gene_set in library_data:\n intersection = [value for value in gene_set[1] if value in genes]\n index = len(intersection)/(len(gene_set[1]) + len(genes))\n jaccard_indices += [[gene_set[0], index]]\n indices += [round(index, 5)]\n\n # determine the dimensions of the canvas\n x_dimension = math.ceil(math.sqrt(len(indices)))\n y_dimension = math.ceil(math.sqrt(len(indices)))\n\n # zip name, gene_list, indices, and blank list for neighbor score then add dummy entries to the zipped list\n num_hex = x_dimension*y_dimension\n anneal_list = list(zip(name, gene_list, indices))\n\n return anneal_list, x_dimension, y_dimension\n\ndef unzip_list(anneal_list):\n unzipped_list = zip(*anneal_list)\n return list(unzipped_list)",
"_____no_output_____"
],
[
"# define a list of colors for the hexagonal canvas\ndef get_color(anneal_list, cut_off_value, x_dimension, y_dimension):\n\n # Deal with cut_off_value (only color the most significant 10/20 hexagons)\n if cut_off_value == 2.0:\n sort_list = sorted(anneal_list, key=itemgetter(2), reverse=True)\n cut_off_value = sort_list[int(num_hex_colored)-1][2]\n\n r_value = 0\n g_value = 0\n b_value = 0\n\n if canvas_color == 'Red':\n r_value = 0.0\n g_value = 0.8\n b_value = 0.8\n if canvas_color == 'Yellow':\n r_value = 0.0\n g_value = 0.3\n b_value = 1.0\n if canvas_color == 'Purple':\n r_value = 0.5\n g_value = 1.0\n b_value = 0.0\n if canvas_color == 'Pink':\n r_value = 0.0\n g_value = 1.0\n b_value = 0.2\n if canvas_color == 'Orange':\n r_value = 0.0\n g_value = 0.45\n b_value = 1.0\n if canvas_color == 'Green':\n r_value = 1.0\n g_value = 0.0\n b_value = 1.0\n if canvas_color == 'Blue':\n r_value = 1.0\n g_value = 0.9\n b_value = 0.0\n\n color_list = []\n\n unzipped_anneal_list = unzip_list(anneal_list)\n\n max_index = max(unzipped_anneal_list[2])\n\n if max_index != 0:\n scaled_list = [i/max_index for i in unzipped_anneal_list[2]]\n else:\n scaled_list = unzipped_anneal_list[2]\n\n for i in range(x_dimension*y_dimension):\n if i < len(unzipped_anneal_list[2]) and float(unzipped_anneal_list[2][i]) >= cut_off_value:\n color_list += [mpl.colors.to_hex((1-scaled_list[i]*r_value, \n 1-scaled_list[i]*g_value, 1-scaled_list[i]*b_value))]\n elif i < len(unzipped_anneal_list[2]):\n color_list += [mpl.colors.to_hex((1-scaled_list[i], \n 1-scaled_list[i], 1-scaled_list[i]))]\n else:\n color_list += [\"#FFFFFF\"]\n return color_list, max_index, cut_off_value",
"_____no_output_____"
],
[
"def init_chart():\n chart_id = 'mychart-' + str(uuid.uuid4())\n display(HTML('<script src=\"/static/components/requirejs/require.js\"></script>'))\n display(HTML(Template(dedent('''\n <script>\n require.config({\n paths: {\n 'd3': 'https://cdnjs.cloudflare.com/ajax/libs/d3/5.16.0/d3.min',\n 'd3-hexbin': 'https://d3js.org/d3-hexbin.v0.2.min',\n },\n shim: {\n 'd3-hexbin': ['d3']\n }\n })\n\n // If we configure mychart via url, we can eliminate this define here\n define($chart_id, ['d3', 'd3-hexbin'], function(d3, d3_hexbin) {\n return function (figure_id, numA, numB, colorList, libraryList, indices) {\n var margin = {top: 50, right: 20, bottom: 20, left: 50},\n width = 850 - margin.left - margin.right,\n height = 350 - margin.top - margin.bottom;\n\n // append the svg object to the body of the page\n var svG = d3.select('#' + figure_id)\n .attr(\"width\", width + margin.left + margin.right)\n .attr(\"height\", height + margin.top + margin.bottom)\n .append(\"g\")\n .attr(\"transform\", \"translate(\" + margin.left + \",\" + margin.top + \")\");\n \n //The number of columns and rows of the heatmap\n var MapColumns = numA,\n MapRows = numB;\n\n //The maximum radius the hexagons can have to still fit the screen\n var hexRadius = d3.min([width/((MapColumns + 0.5) * Math.sqrt(3)), height/((MapRows + 1/3) * 1.5)]);\n\n //Calculate the center position of each hexagon\n var points = [];\n for (var i = 0; i < MapRows; i++) {\n for (var j = 0; j < MapColumns; j++) {\n var x = hexRadius * j * Math.sqrt(3)\n //Offset each uneven row by half of a \"hex-width\" to the right\n if(i%2 === 1) x += (hexRadius * Math.sqrt(3))/2\n var y = hexRadius * i * 1.5\n points.push([x,y])\n }\n }\n\n //Set the hexagon radius\n var hexbin = d3_hexbin.hexbin().radius(hexRadius);\n\n svG.append(\"g\")\n .selectAll(\".hexagon\")\n .data(hexbin(points))\n .enter().append(\"path\")\n .attr(\"class\", \"hexagon\")\n .attr(\"d\", function (d) {\n return \"M\" + d.x + \",\" + d.y + hexbin.hexagon();\n })\n .attr(\"stroke\", \"black\")\n .attr(\"stroke-width\", \"1px\")\n .style(\"fill\", function (d,i) { return colorList[i]; })\n .on(\"mouseover\", mover)\n .on(\"mouseout\", mout)\n .append(\"svg:title\")\n .text(function(d,i) { return libraryList[i].concat(\" \").concat(indices[i]); });\n\n // Mouseover function\n function mover(d) {\n d3.select(this)\n .transition().duration(10) \n .style(\"fill-opacity\", 0.3)\n };\n\n // Mouseout function\n function mout(d) { \n d3.select(this)\n .transition().duration(10)\n .style(\"fill-opacity\", 1)\n };\n\n }\n\n })\n </script>\n ''')).substitute({ 'chart_id': repr(chart_id) })))\n return chart_id\n\ndef Canvas(numA, numB, colorList, libraryList, indices):\n chart_id = init_chart()\n display(HTML(Template(dedent('''\n <svg id=$figure_id></svg>\n <script>\n require([$chart_id], function(mychart) {\n mychart($figure_id, $numA, $numB, $colorList, $libraryList, $indices)\n })\n </script>\n ''')).substitute({\n 'chart_id': repr(chart_id),\n 'figure_id': repr('fig-' + str(uuid.uuid4())),\n 'numA': repr(numA),\n 'numB': repr(numB),\n 'colorList': repr(colorList),\n 'libraryList': repr(libraryList),\n 'indices': repr(indices)\n })))",
"_____no_output_____"
]
],
[
[
"# Manhattan Plot Functions",
"_____no_output_____"
]
],
[
[
"# Processes Enrichr data for Manhattan plots\n\ndef get_data(genes):\n # Process Enrichr data\n sorted_data = pd.DataFrame({\"Gene Set\": [], \"-log(p value)\": [], \"Library\": []})\n\n # get enrichr results from the library selected\n results_df = Enrichr_API(genes, [enrichr_library])[0]\n\n all_terms = []\n all_pvalues = []\n library_names = []\n\n all_terms.append(list(results_df[1]))\n all_pvalues.append(list(results_df[2]))\n library_names.append(list(results_df['library']))\n\n x=np.log10(all_pvalues[0])*-1\n sorted_terms = list(zip(all_terms[0], x, library_names[0]))\n sorted_terms = sorted(sorted_terms, key = itemgetter(0))\n unzipped_sorted_list = list(zip(*sorted_terms))\n\n data = pd.DataFrame({\"Gene Set\": unzipped_sorted_list[0], \"-log(p value)\": unzipped_sorted_list[1], \"Library\": unzipped_sorted_list[2]})\n\n sorted_data = pd.concat([sorted_data, data])\n\n # group data by library\n groups = sorted_data.groupby(\"Library\")\n return sorted_data, groups",
"_____no_output_____"
],
[
"# Create Manhattan Plots\ndef manhattan(sorted_data, groups):\n # split data frame into smaller data frames by library\n list_of_df = []\n for library_name in [enrichr_library]:\n library_name = library_name.replace('_', '')\n df_new = sorted_data[sorted_data['Library'] == library_name]\n list_of_df += [df_new]\n\n list_of_xaxis_values = []\n for df in list_of_df: \n list_of_xaxis_values += df[\"Gene Set\"].values.tolist()\n\n # define the output figure and the features we want\n p = figure(x_range = list_of_xaxis_values, plot_height=300, plot_width=750, tools='pan, box_zoom, hover, reset, save')\n\n # loop over all libraries\n r = []\n categories = []\n color_index = 0\n for df in list_of_df:\n if color_index >= len(manhattan_colors):\n color_index = 0 \n\n # calculate actual p value from -log(p value)\n actual_pvalues = []\n for log_value in df[\"-log(p value)\"].values.tolist():\n actual_pvalues += [\"{:.5e}\".format(10**(-1*log_value))]\n\n # define ColumnDataSource with our data for this library\n source = ColumnDataSource(data=dict(\n x = df[\"Gene Set\"].values.tolist(),\n y = df[\"-log(p value)\"].values.tolist(),\n pvalue = actual_pvalues,\n ))\n \n # plot data from this library\n r += [p.circle(x = 'x', y = 'y', size=5, fill_color=manhattan_colors[color_index], line_color = manhattan_colors[color_index], line_width=1, source = source)]\n color_index += 1\n\n p.background_fill_color = 'white'\n p.xaxis.major_tick_line_color = None \n p.xaxis.major_label_text_font_size = '0pt'\n p.y_range.start = 0\n p.yaxis.axis_label = '-log(p value)'\n\n p.hover.tooltips = [\n (\"Gene Set\", \"@x\"),\n (\"p value\", \"@pvalue\"),\n ]\n p.output_backend = \"svg\"\n \n # returns the plot\n return p",
"_____no_output_____"
]
],
[
[
"# Plot Outputs\n## Scatter Plot",
"_____no_output_____"
]
],
[
[
"# Display Scatter Plot\nplot, df = get_plot(enrichr_library)\nif plot == -1:\n display(Markdown(\"The Scatter Plot visualization is not available for the selected library at this time.\"))\nelse:\n show(plot)\n display(Markdown('You may have to zoom in to see the details in densely-populated portions of the plots.'))\n display(Markdown('The larger blue points represent significantly enriched terms (the darker the blue, the more significant).'))\n display(Markdown('The gray points are not significant.'))\n display(Markdown('Hovering over points will display the associated gene set name and the p-value.'))\n display(Markdown('Plots can be downloaded as an svg using the save function on the toolbar next to the plot.'))\n display(Markdown('[Access the complete Scatter Plot Visualization Appyter here to create and compare up to 9 scatter plots at once](https://appyters.maayanlab.cloud/#Enrichr_Scatterplot_Appyter)'))",
"_____no_output_____"
]
],
[
[
"## Bar Chart",
"_____no_output_____"
]
],
[
[
"# Display Bar Chart\nresults = Enrichr_API(genes, [enrichr_library])\nenrichr_figure(results[2], results[3], results[4], final_output_file_names, [enrichr_library], figure_file_format, color)\n# Download Bar Chart\nfor i, file in enumerate(final_output_file_names):\n display(FileLink(file, result_html_prefix=str('Download ' + figure_file_format[i] + ': ')))\ndisplay(Markdown('The bar chart contains the top 10 enriched terms and their corresponding p-values for the chosen library. Colored bars correspond to terms with significant p-values (<0.05). An asterisk (*) next to a p-value indicates the term also has a significant adjusted p-value (<0.05).'))\ndisplay(Markdown('[Access the complete Bar Chart Appyter here to create customized bar charts for multiple libraries at once.](https://appyters.maayanlab.cloud/Enrichr_compressed_bar_chart_figure/)'))",
"_____no_output_____"
]
],
[
[
"## Hexagonal Canvas",
"_____no_output_____"
]
],
[
[
"# Display Hexagonal Canvas\nanneal_list, x_dimension, y_dimension = library_processing()\nif x_dimension < 0:\n display(Markdown(\"The Hexagonal Canvas visualization is not available for the selected library at this time.\"))\nelse:\n color_list, scaling_factor, cut_off_value = get_color(anneal_list, 2.0, x_dimension, y_dimension)\n unzipped_anneal_list = unzip_list(anneal_list)\n Canvas(x_dimension, y_dimension, color_list, list(unzipped_anneal_list[0]), list(unzipped_anneal_list[2]))\n print(\"The color scaling factor for this canvas is: \" + str(scaling_factor))\n print(\"Gene sets with a similarity index less than \" + str(cut_off_value) + \" are not colored\")\n display(Markdown('Each of the hexagons on the canvas represent one gene set from the library selected. The hexagons are colored based on the Jaccard similarity index between the inputted gene list and the gene set the hexagon represents (the brighter, the more similar). The name of the gene set and the associated similarity index are displayed when a hexagon is hovered over.'))\n display(Markdown('[Access the complete Hexagonal Canvas Appyter here to create customized canvases for 1 or 2 libraries at once.](https://appyters.maayanlab.cloud/#Enrichr_Canvas_Appyter)'))",
"_____no_output_____"
]
],
[
[
"## Manhattan Plot",
"_____no_output_____"
]
],
[
[
"# Display Manhattan Plot\nsorted_data, groups = get_data(genes)\nshow(manhattan(sorted_data, groups))\ndisplay(Markdown('The x-axis of the plot is made up of gene sets from the library. The y-axis of the plot has the -log(p value) for each gene set. The name of the gene set and the associated p-value are displayed when a point is hovered over. You can also zoom, pan, and save the plot as an svg using the toolbar on the right.'))\ndisplay(Markdown('[Access the complete Manhattan Plot Appyter here to create customized static and dynamic Manhattan plots to compare multiple libraries at once.](https://appyters.maayanlab.cloud/#Enrichr_Manhattan_Plot)'))",
"_____no_output_____"
]
],
[
[
"## Table of significant p-values",
"_____no_output_____"
]
],
[
[
"# Output a table of significant p-values\ndef create_download_link(df, title = \"Download CSV file of this table\", filename = \"data.csv\"): \n csv = df.to_csv(index = False)\n b64 = base64.b64encode(csv.encode())\n payload = b64.decode()\n html = '<a download=\"{filename}\" href=\"data:text/csv;base64,{payload}\" target=\"_blank\">{title}</a>'\n html = html.format(payload=payload, title=title, filename=filename)\n return HTML(html)\n\nif plot != -1 and 'p value' in df.columns:\n sorted_df = df.sort_values(by = ['p value'])\n filtered_df = sorted_df[sorted_df['p value'] <= significance_value]\n if len(filtered_df) != 0:\n display(HTML(f\"<strong>Table of significant p-values for {enrichr_library.replace('_', ' ')}</strong>\"))\n display(HTML(filtered_df[['Name', 'p value']].to_html(index = False)))\n display(create_download_link(filtered_df[['Name', 'p value']]))",
"_____no_output_____"
]
],
[
[
"## Link to Enrichr",
"_____no_output_____"
]
],
[
[
"# Get complete enrichment analysis results from Enrichr \nurl = 'https://amp.pharm.mssm.edu/Enrichr/enrich?dataset=' + results[5]\ndisplay(HTML(f'<span style=\"font-size:1.5em;\"><a href = \"https://amp.pharm.mssm.edu/Enrichr/enrich?dataset={results[5]}\"> Access your complete Enrichment results on Enrichr here. </a></span>'))",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
ec8db57d2a5a299da91d6fbdd0a3eea9b988450e | 338,084 | ipynb | Jupyter Notebook | Model backlog/Evaluation/244-Tweet-Evaluation-5Fold-roBERTa HF OneCycle LR.ipynb | dimitreOliveira/Tweet-Sentiment-Extraction | 0a775abe9a92c4bc2db957519c523be7655df8d8 | [
"MIT"
] | 11 | 2020-06-17T07:30:20.000Z | 2022-03-25T16:56:01.000Z | Model backlog/Evaluation/244-Tweet-Evaluation-5Fold-roBERTa HF OneCycle LR.ipynb | dimitreOliveira/Tweet-Sentiment-Extraction | 0a775abe9a92c4bc2db957519c523be7655df8d8 | [
"MIT"
] | null | null | null | Model backlog/Evaluation/244-Tweet-Evaluation-5Fold-roBERTa HF OneCycle LR.ipynb | dimitreOliveira/Tweet-Sentiment-Extraction | 0a775abe9a92c4bc2db957519c523be7655df8d8 | [
"MIT"
] | null | null | null | 338,084 | 338,084 | 0.783116 | [
[
[
"!pip --quiet install transformers\n!pip --quiet install tokenizers",
"\u001b[K |████████████████████████████████| 675kB 3.4MB/s \n\u001b[K |████████████████████████████████| 3.8MB 16.3MB/s \n\u001b[K |████████████████████████████████| 890kB 48.3MB/s \n\u001b[K |████████████████████████████████| 1.1MB 56.7MB/s \n\u001b[?25h Building wheel for sacremoses (setup.py) ... \u001b[?25l\u001b[?25hdone\n"
],
[
"from google.colab import drive\ndrive.mount('/content/drive')",
"Go to this URL in a browser: https://accounts.google.com/o/oauth2/auth?client_id=947318989803-6bn6qk8qdgf4n4g3pfee6491hc0brc4i.apps.googleusercontent.com&redirect_uri=urn%3aietf%3awg%3aoauth%3a2.0%3aoob&response_type=code&scope=email%20https%3a%2f%2fwww.googleapis.com%2fauth%2fdocs.test%20https%3a%2f%2fwww.googleapis.com%2fauth%2fdrive%20https%3a%2f%2fwww.googleapis.com%2fauth%2fdrive.photos.readonly%20https%3a%2f%2fwww.googleapis.com%2fauth%2fpeopleapi.readonly\n\nEnter your authorization code:\n··········\nMounted at /content/drive\n"
],
[
"!cp -r '/content/drive/My Drive/Colab Notebooks/Tweet Sentiment Extraction/Scripts/.' .",
"_____no_output_____"
],
[
"COLAB_BASE_PATH = '/content/drive/My Drive/Colab Notebooks/Tweet Sentiment Extraction/'\nMODEL_BASE_PATH = COLAB_BASE_PATH + 'Models/Files/244-roBERTa_base/'",
"_____no_output_____"
]
],
[
[
"## Dependencies",
"_____no_output_____"
]
],
[
[
"import json, glob, warnings\nfrom tweet_utility_scripts import *\nfrom tweet_utility_preprocess_roberta_scripts_aux import *\nfrom transformers import TFRobertaModel, RobertaConfig\nfrom tokenizers import ByteLevelBPETokenizer\nfrom tensorflow.keras import layers\nfrom tensorflow.keras.models import Model\n\nSEED = 0\nseed_everything(SEED)\nwarnings.filterwarnings(\"ignore\")\npd.set_option('max_colwidth', 120)",
"/usr/local/lib/python3.6/dist-packages/statsmodels/tools/_testing.py:19: FutureWarning: pandas.util.testing is deprecated. Use the functions in the public API at pandas.testing instead.\n import pandas.util.testing as tm\n"
]
],
[
[
"# Load data",
"_____no_output_____"
]
],
[
[
"# Unzip files\n!tar -xf '/content/drive/My Drive/Colab Notebooks/Tweet Sentiment Extraction/Data/complete_64_clean/fold_1.tar.gz'\n!tar -xf '/content/drive/My Drive/Colab Notebooks/Tweet Sentiment Extraction/Data/complete_64_clean/fold_2.tar.gz'\n!tar -xf '/content/drive/My Drive/Colab Notebooks/Tweet Sentiment Extraction/Data/complete_64_clean/fold_3.tar.gz'\n!tar -xf '/content/drive/My Drive/Colab Notebooks/Tweet Sentiment Extraction/Data/complete_64_clean/fold_4.tar.gz'\n!tar -xf '/content/drive/My Drive/Colab Notebooks/Tweet Sentiment Extraction/Data/complete_64_clean/fold_5.tar.gz'",
"_____no_output_____"
],
[
"database_base_path = COLAB_BASE_PATH + 'Data/complete_64_clean/'\nk_fold = pd.read_csv(database_base_path + '5-fold.csv')\ndisplay(k_fold.head())",
"_____no_output_____"
]
],
[
[
"# Model parameters",
"_____no_output_____"
]
],
[
[
"vocab_path = COLAB_BASE_PATH + 'qa-transformers/roberta/roberta-base-vocab.json'\nmerges_path = COLAB_BASE_PATH + 'qa-transformers/roberta/roberta-base-merges.txt'\nbase_path = COLAB_BASE_PATH + 'qa-transformers/roberta/'\n\nwith open(MODEL_BASE_PATH + 'config.json') as json_file:\n config = json.load(json_file)\n\nconfig",
"_____no_output_____"
]
],
[
[
"# Tokenizer",
"_____no_output_____"
]
],
[
[
"tokenizer = ByteLevelBPETokenizer(vocab_file=vocab_path, merges_file=merges_path, \n lowercase=True, add_prefix_space=True)",
"_____no_output_____"
]
],
[
[
"# Model",
"_____no_output_____"
]
],
[
[
"module_config = RobertaConfig.from_pretrained(config['config_path'], output_hidden_states=False)\n\ndef model_fn(MAX_LEN):\n input_ids = layers.Input(shape=(MAX_LEN,), dtype=tf.int32, name='input_ids')\n attention_mask = layers.Input(shape=(MAX_LEN,), dtype=tf.int32, name='attention_mask')\n \n base_model = TFRobertaModel.from_pretrained(config['base_model_path'], config=module_config, name=\"base_model\")\n last_hidden_state, _ = base_model({'input_ids': input_ids, 'attention_mask': attention_mask})\n \n logits = layers.Dense(2, name=\"qa_outputs\", use_bias=False)(last_hidden_state)\n \n start_logits, end_logits = tf.split(logits, 2, axis=-1)\n start_logits = tf.squeeze(start_logits, axis=-1)\n end_logits = tf.squeeze(end_logits, axis=-1)\n \n model = Model(inputs=[input_ids, attention_mask], outputs=[start_logits, end_logits])\n \n return model",
"_____no_output_____"
]
],
[
[
"# Make predictions",
"_____no_output_____"
]
],
[
[
"for n_fold in range(config['N_FOLDS']):\n n_fold +=1\n\n # Load data \n base_data_path = 'fold_%d/' % (n_fold)\n x_train = np.load(base_data_path + 'x_train.npy')\n y_train = np.load(base_data_path + 'y_train.npy')\n x_valid = np.load(base_data_path + 'x_valid.npy')\n y_valid = np.load(base_data_path + 'y_valid.npy')\n\n # Load model\n model_path = 'model_fold_%d.h5' % (n_fold)\n model = model_fn(config['MAX_LEN'])\n \n # Make predictions\n model.load_weights(MODEL_BASE_PATH + model_path)\n predict_eval_df(k_fold, model, x_train, x_valid, get_test_dataset, decode, n_fold, tokenizer, config, config['question_size'])",
"_____no_output_____"
]
],
[
[
"# Model evaluation",
"_____no_output_____"
]
],
[
[
"#@title\ndisplay(evaluate_model_kfold(k_fold, config['N_FOLDS']).style.applymap(color_map))",
"_____no_output_____"
]
],
[
[
"# Visualize predictions",
"_____no_output_____"
]
],
[
[
"#@title\nk_fold['jaccard_mean'] = (k_fold['jaccard_fold_1'] + k_fold['jaccard_fold_2'] + \n k_fold['jaccard_fold_3'] + k_fold['jaccard_fold_4'] + \n k_fold['jaccard_fold_4']) / 5\n\ndisplay(k_fold[['text', 'selected_text', 'sentiment', 'text_tokenCnt', \n 'selected_text_tokenCnt', 'jaccard', 'jaccard_mean']].head(15))",
"_____no_output_____"
]
],
[
[
"## Post-processing evaluation",
"_____no_output_____"
]
],
[
[
"#@title\nk_fold_post = k_fold.copy()\nk_fold_post.loc[k_fold_post['sentiment'] == 'neutral', 'selected_text'] = k_fold_post[\"text\"]\nprint('\\nImpute neutral')\ndisplay(evaluate_model_kfold(k_fold_post, config['N_FOLDS']).head(1).style.applymap(color_map))\n\nk_fold_post = k_fold.copy()\nk_fold_post.loc[k_fold_post['text_wordCnt'] <= 3, 'selected_text'] = k_fold_post[\"text\"]\nprint('\\nImpute <= 3')\ndisplay(evaluate_model_kfold(k_fold_post, config['N_FOLDS']).head(1).style.applymap(color_map))\n\nk_fold_post = k_fold.copy()\nk_fold_post['selected_text'] = k_fold_post['selected_text'].apply(lambda x: x.replace('!!!!', '!') if len(x.split())==1 else x)\nk_fold_post['selected_text'] = k_fold_post['selected_text'].apply(lambda x: x.replace('..', '.') if len(x.split())==1 else x)\nk_fold_post['selected_text'] = k_fold_post['selected_text'].apply(lambda x: x.replace('...', '.') if len(x.split())==1 else x)\nprint('\\nImpute noise')\ndisplay(evaluate_model_kfold(k_fold_post, config['N_FOLDS']).head(1).style.applymap(color_map))\n\nk_fold_post = k_fold.copy()\nk_fold_post.loc[k_fold_post['sentiment'] == 'neutral', 'selected_text'] = k_fold_post[\"text\"]\nk_fold_post.loc[k_fold_post['text_wordCnt'] <= 3, 'selected_text'] = k_fold_post[\"text\"]\nprint('\\nImpute neutral and <= 3')\ndisplay(evaluate_model_kfold(k_fold_post, config['N_FOLDS']).head(1).style.applymap(color_map))\n\nk_fold_post = k_fold.copy()\nk_fold_post.loc[k_fold_post['sentiment'] == 'neutral', 'selected_text'] = k_fold_post[\"text\"]\nk_fold_post.loc[k_fold_post['text_wordCnt'] <= 3, 'selected_text'] = k_fold_post[\"text\"]\nk_fold_post['selected_text'] = k_fold_post['selected_text'].apply(lambda x: x.replace('!!!!', '!') if len(x.split())==1 else x)\nk_fold_post['selected_text'] = k_fold_post['selected_text'].apply(lambda x: x.replace('..', '.') if len(x.split())==1 else x)\nk_fold_post['selected_text'] = k_fold_post['selected_text'].apply(lambda x: x.replace('...', '.') if len(x.split())==1 else x)\nprint('\\nImpute neutral and <= 3 and mpute noise')\ndisplay(evaluate_model_kfold(k_fold_post, config['N_FOLDS']).head(1).style.applymap(color_map))",
"\nImpute neutral\n"
]
],
[
[
"# Error analysis\n\n## 10 worst predictions",
"_____no_output_____"
]
],
[
[
"#@title\nk_fold['jaccard_mean'] = (k_fold['jaccard_fold_1'] + k_fold['jaccard_fold_2'] + \n k_fold['jaccard_fold_3'] + k_fold['jaccard_fold_4'] + \n k_fold['jaccard_fold_4']) / 5\n\ndisplay(k_fold[['text', 'selected_text', 'sentiment', 'jaccard', 'jaccard_mean', \n 'prediction_fold_1', 'prediction_fold_2', 'prediction_fold_3', \n 'prediction_fold_4', 'prediction_fold_5']].sort_values(by=['jaccard_mean']).head(10))",
"_____no_output_____"
]
],
[
[
"# Sentiment",
"_____no_output_____"
]
],
[
[
"#@title\nprint('\\n sentiment == neutral')\ndisplay(k_fold[k_fold['sentiment'] == 'neutral'][['text', 'selected_text', \n 'jaccard_mean', 'prediction_fold_1', \n 'prediction_fold_2', 'prediction_fold_3', \n 'prediction_fold_4', 'prediction_fold_5']].sort_values(by=['jaccard_mean']).head(10))\n\nprint('\\n sentiment == positive')\ndisplay(k_fold[k_fold['sentiment'] == 'positive'][['text', 'selected_text', \n 'jaccard_mean', 'prediction_fold_1', \n 'prediction_fold_2', 'prediction_fold_3', \n 'prediction_fold_4', 'prediction_fold_5']].sort_values(by=['jaccard_mean']).head(10))\n\nprint('\\n sentiment == negative')\ndisplay(k_fold[k_fold['sentiment'] == 'negative'][['text', 'selected_text', \n 'jaccard_mean', 'prediction_fold_1', \n 'prediction_fold_2', 'prediction_fold_3', \n 'prediction_fold_4', 'prediction_fold_5']].sort_values(by=['jaccard_mean']).head(10))",
"\n sentiment == neutral\n"
]
],
[
[
"# text_tokenCnt",
"_____no_output_____"
]
],
[
[
"#@title\nprint('\\n text_tokenCnt <= 3')\ndisplay(k_fold[k_fold['text_tokenCnt'] <= 3][['text', 'selected_text', 'sentiment', \n 'jaccard_mean', 'prediction_fold_1', \n 'prediction_fold_2', 'prediction_fold_3', \n 'prediction_fold_4', 'prediction_fold_5']].sort_values(by=['jaccard_mean']).head(5))\n\nprint('\\n text_tokenCnt >= 45')\ndisplay(k_fold[k_fold['text_tokenCnt'] >= 45][['text', 'selected_text', 'sentiment', \n 'jaccard_mean', 'prediction_fold_1', \n 'prediction_fold_2', 'prediction_fold_3', \n 'prediction_fold_4', 'prediction_fold_5']].sort_values(by=['jaccard_mean']).head(5))",
"\n text_tokenCnt <= 3\n"
]
],
[
[
"# selected_text_tokenCnt",
"_____no_output_____"
]
],
[
[
"#@title\nprint('\\n selected_text_tokenCnt <= 3')\ndisplay(k_fold[k_fold['selected_text_tokenCnt'] <= 3][['text', 'selected_text', 'sentiment', \n 'jaccard_mean', 'prediction_fold_1', \n 'prediction_fold_2', 'prediction_fold_3', \n 'prediction_fold_4', 'prediction_fold_5']].sort_values(by=['jaccard_mean']).head(5))\nprint('\\n selected_text_tokenCnt >= 45')\ndisplay(k_fold[k_fold['selected_text_tokenCnt'] >= 45][['text', 'selected_text', 'sentiment', \n 'jaccard_mean', 'prediction_fold_1', \n 'prediction_fold_2', 'prediction_fold_3', \n 'prediction_fold_4', 'prediction_fold_5']].sort_values(by=['jaccard_mean']).head(5))",
"\n selected_text_tokenCnt <= 3\n"
]
],
[
[
"# Jaccard histogram",
"_____no_output_____"
]
],
[
[
"#@title\nfig, ax = plt.subplots(1, 1, figsize=(20, 5))\nsns.distplot(k_fold['jaccard_mean'], ax=ax).set_title(f\"Overall [{len(k_fold)}]\")\nsns.despine()\nplt.show()",
"_____no_output_____"
]
],
[
[
"## By sentiment",
"_____no_output_____"
]
],
[
[
"#@title\nfig, (ax1, ax2, ax3) = plt.subplots(3, 1, figsize=(20, 15), sharex=False)\nsns.distplot(k_fold[k_fold['sentiment'] == 'neutral']['jaccard_mean'], ax=ax1).set_title(f\"Neutral [{len(k_fold[k_fold['sentiment'] == 'neutral'])}]\")\nsns.distplot(k_fold[k_fold['sentiment'] == 'positive']['jaccard_mean'], ax=ax2).set_title(f\"Positive [{len(k_fold[k_fold['sentiment'] == 'positive'])}]\")\nsns.distplot(k_fold[k_fold['sentiment'] == 'negative']['jaccard_mean'], ax=ax3).set_title(f\"Negative [{len(k_fold[k_fold['sentiment'] == 'negative'])}]\")\nsns.despine()\nplt.show()",
"_____no_output_____"
]
],
[
[
"## By text token count",
"_____no_output_____"
]
],
[
[
"#@title\nfig, (ax1, ax2) = plt.subplots(2, 1, figsize=(20, 10), sharex=False)\nsns.distplot(k_fold[k_fold['text_tokenCnt'] <= 3]['jaccard_mean'], ax=ax1).set_title(f\"text_tokenCnt <= 3 [{len(k_fold[k_fold['text_tokenCnt'] <= 3])}]\")\nsns.distplot(k_fold[k_fold['text_tokenCnt'] >= 45]['jaccard_mean'], ax=ax2).set_title(f\"text_tokenCnt >= 45 [{len(k_fold[k_fold['text_tokenCnt'] >= 45])}]\")\nsns.despine()\nplt.show()",
"_____no_output_____"
]
],
[
[
"## By selected_text token count",
"_____no_output_____"
]
],
[
[
"#@title\nfig, (ax1, ax2) = plt.subplots(2, 1, figsize=(20, 10), sharex=False)\nsns.distplot(k_fold[k_fold['selected_text_tokenCnt'] <= 3]['jaccard_mean'], ax=ax1).set_title(f\"selected_text_tokenCnt <= 3 [{len(k_fold[k_fold['selected_text_tokenCnt'] <= 3])}]\")\nsns.distplot(k_fold[k_fold['selected_text_tokenCnt'] >= 45]['jaccard_mean'], ax=ax2).set_title(f\"selected_text_tokenCnt >= 45 [{len(k_fold[k_fold['selected_text_tokenCnt'] >= 45])}]\")\nsns.despine()\nplt.show()",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
ec8dcf53218e2a01ed1c35de1f37dd97e747947e | 43,491 | ipynb | Jupyter Notebook | HeroesOfPymoli/HeroesOfPymoli.ipynb | sandykolu/pandas-challenge | 964316b3ce2fefed786b7b616032deb761ed7837 | [
"MIT"
] | null | null | null | HeroesOfPymoli/HeroesOfPymoli.ipynb | sandykolu/pandas-challenge | 964316b3ce2fefed786b7b616032deb761ed7837 | [
"MIT"
] | null | null | null | HeroesOfPymoli/HeroesOfPymoli.ipynb | sandykolu/pandas-challenge | 964316b3ce2fefed786b7b616032deb761ed7837 | [
"MIT"
] | null | null | null | 35.215385 | 163 | 0.401278 | [
[
[
"# Dependencies and Setup\nimport pandas as pd\n\n# File to Load (Remember to Change These)\nfile_to_load = \"Resources/purchase_data.csv\"\n\n# Read Purchasing File and store into Pandas data frame\npurchase_data = pd.read_csv(file_to_load)",
"_____no_output_____"
]
],
[
[
"# Player Count\n Display the total number of players",
"_____no_output_____"
]
],
[
[
"unique_players = len(purchase_data['SN'].unique())\nplayer_demo = purchase_data.loc[:, [\"Gender\", \"SN\", \"Age\"]]\nplayer_demo = player_demo.drop_duplicates()\nunique_players_df = pd.DataFrame({\"Total Players\" : [unique_players]})\nunique_players_df\n",
"_____no_output_____"
]
],
[
[
"Purchasing Analysis (Total)\nRun basic calculations to obtain number of unique items, average price, etc.\nCreate a summary data frame to hold the results\nOptional: give the displayed data cleaner formatting\nDisplay the summary data frame",
"_____no_output_____"
]
],
[
[
"purchase_data.head()",
"_____no_output_____"
],
[
"unique_items = len(purchase_data[\"Item Name\"].unique())\naverage_price = round(purchase_data[\"Price\"].mean(),2)\nnumber_of_purchases = purchase_data[\"Price\"].count()\ntotal_revenue = purchase_data[\"Price\"].sum()\n\n# Purchase summary DataFrame\npurchase_df = pd.DataFrame({\"Number of Unique Items\" : [unique_items],\n \"Average Price\" : [average_price],\n \"Number of Purchases\" : [number_of_purchases],\n \"Total Revenue\" : [total_revenue]\n })\n\npurchase_df[\"Average Price\"] = purchase_df[\"Average Price\"].map(\"${:,.2f}\".format)\npurchase_df[\"Total Revenue\"] = purchase_df[\"Total Revenue\"].map(\"${:,.2f}\".format)\npurchase_df",
"_____no_output_____"
]
],
[
[
"Gender Demographics\nPercentage and Count of Male Players\nPercentage and Count of Female Players\nPercentage and Count of Other / Non-Disclosed",
"_____no_output_____"
]
],
[
[
"gender_demo_total = player_demo[\"Gender\"].value_counts()\ngender_demo_percentage = gender_demo_total/unique_players * 100\n\ngender_demo_df = pd.DataFrame({\"Total Count\": gender_demo_total,\n \"Percentage of Players\":gender_demo_percentage\n })\n\ngender_demo_df[\"Percentage of Players\"] = gender_demo_df[\"Percentage of Players\"].map(\"{:,.2f}%\".format)\ngender_demo_df",
"_____no_output_____"
]
],
[
[
"Purchasing Analysis (Gender)¶\nRun basic calculations to obtain purchase count, avg. purchase price, avg. purchase total per person etc. by gender\nCreate a summary data frame to hold the results\nOptional: give the displayed data cleaner formatting\nDisplay the summary data frame",
"_____no_output_____"
]
],
[
[
"\navg_purchase_price = purchase_data.groupby([\"Gender\"]).mean()[\"Price\"]\npurchase_total = purchase_data.groupby([\"Gender\"]).sum()[\"Price\"]\npurchase_count = purchase_data.groupby([\"Gender\"]).count()[\"Purchase ID\"]\n\n# Avg Total Purchase per Person\navg_total_purchase_per_person = purchase_total / gender_demo_df[\"Total Count\"]\n# avg_total_purchase_per_person\n\nsummary_gender_data = pd.DataFrame({\"Purchase Count\" : purchase_count,\n \"Average Purchase Price\" : avg_purchase_price,\n \"Total Purchase Value\" : purchase_total,\n \"Avg Total Purchase per Person\": avg_total_purchase_per_person\n })\nsummary_gender_data[\"Average Purchase Price\"] = summary_gender_data[\"Average Purchase Price\"].map(\"${:,.2f}\".format)\nsummary_gender_data[\"Total Purchase Value\"] = summary_gender_data[\"Total Purchase Value\"].map(\"${:,.2f}\".format)\nsummary_gender_data[\"Avg Total Purchase per Person\"] = summary_gender_data[\"Avg Total Purchase per Person\"].map(\"${:,.2f}\".format)\nsummary_gender_data",
"_____no_output_____"
]
],
[
[
"Age Demographics\nEstablish bins for ages\nCategorize the existing players using the age bins. Hint: use pd.cut()\nCalculate the numbers and percentages by age group\nCreate a summary data frame to hold the results\nOptional: round the percentage column to two decimal points\nDisplay Age Demographics Table",
"_____no_output_____"
]
],
[
[
"# Establish bins for ages\nbins = [0, 9.90, 14.90, 19.90, 24.90, 29.90, 34.90, 39.90, 999]\n\n# Create the names for the five bins\ngroup_names = [\"<10\", \"10-14\", \"15-19\", \"20-24\", \"25-29\", \"30-34\", \"35-39\", \"40+\"]\n\n# Categorize the existing players using the age bins.\nplayer_demo[\"Age Range\"] = pd.cut(player_demo[\"Age\"], bins, labels=group_names)\nplayer_demo\n\n# Calculate the numbers and percentages by age group\nage_demo_total = player_demo[\"Age Range\"].value_counts()\nage_demo_total\nage_demo_percent = age_demo_total / unique_players * 100\nage_demo_percent\n\n# Create a summary data frame to hold the results \nage_demo = pd.DataFrame({\"Total Count\": age_demo_total,\n \"Percentage of Players\": age_demo_percent\n })\n# round the percentage column to two decimal points\nage_demo[\"Percentage of Players\"] = age_demo[\"Percentage of Players\"].map(\"{:,.2f}%\".format)\nage_demo = age_demo.sort_index()\nage_demo\n\n",
"_____no_output_____"
]
],
[
[
"Purchasing Analysis (Age)¶\nBin the purchase_data frame by age\nRun basic calculations to obtain purchase count, avg. purchase price, avg. purchase total per person etc. in the table below\nCreate a summary data frame to hold the results\nOptional: give the displayed data cleaner formatting\nDisplay the summary data frame",
"_____no_output_____"
]
],
[
[
"# Bin the purchase_data frame by age\npurchase_data[\"Age Range\"] = pd.cut(purchase_data[\"Age\"], bins, labels=group_names)\n\n# Run basic calculations to obtain purchase count, avg. purchase price, avg. purchase total per person etc. in the table below\navg_purchase_price_by_age = purchase_data.groupby([\"Age Range\"]).mean()[\"Price\"]\npurchase_total_by_age = purchase_data.groupby([\"Age Range\"]).sum()[\"Price\"]\npurchase_count_by_age = purchase_data.groupby([\"Age Range\"]).count()[\"Purchase ID\"]\n\n# Avg Total Purchase per Person by age\navg_total_purchase_per_person_by_age = purchase_total_by_age / age_demo[\"Total Count\"]\navg_total_purchase_per_person_by_age\n\n\n# Create a summary data frame to hold the results\nsummary_age_data = pd.DataFrame({\"Purchase Count\" : purchase_count_by_age,\n \"Average Purchase Price\" : avg_purchase_price_by_age,\n \"Total Purchase Value\" : purchase_total_by_age,\n \"Avg Total Purchase per Person\": avg_total_purchase_per_person_by_age\n })\n#clean formatting\nsummary_age_data[\"Average Purchase Price\"] = summary_age_data[\"Average Purchase Price\"].map(\"${:,.2f}\".format)\nsummary_age_data[\"Total Purchase Value\"] = summary_age_data[\"Total Purchase Value\"].map(\"${:,.2f}\".format)\nsummary_age_data[\"Avg Total Purchase per Person\"] = summary_age_data[\"Avg Total Purchase per Person\"].map(\"${:,.2f}\".format)\nsummary_age_data",
"_____no_output_____"
]
],
[
[
"Top Spenders¶\nRun basic calculations to obtain the results in the table below\nCreate a summary data frame to hold the results\nSort the total purchase value column in descending order\nOptional: give the displayed data cleaner formatting\nDisplay a preview of the summary data frame",
"_____no_output_____"
]
],
[
[
"# Top Spenders¶\n\n# Run basic calculations to obtain purchase count, avg. purchase price, avg. purchase total\navg_purchase_price_by_name = purchase_data.groupby([\"SN\"]).mean()[\"Price\"]\npurchase_total_by_name = purchase_data.groupby([\"SN\"]).sum()[\"Price\"]\npurchase_count_by_name = purchase_data.groupby([\"SN\"]).count()[\"Purchase ID\"]\n\n# Create a summary data frame to hold the results\nspender_data_summary = pd.DataFrame({\"Purchase Count\" : purchase_count_by_name,\n \"Average Purchase Price\" : avg_purchase_price_by_name,\n \"Total Purchase Value\" : purchase_total_by_name \n }) \n\n# Sort the total purchase value column in descending order\nspender_data_summary_sorted = spender_data_summary.sort_values(\"Total Purchase Value\", ascending = False)\nspender_data_summary_sorted\n\n# Clean formatting\nspender_data_summary_sorted[\"Average Purchase Price\"] = spender_data_summary_sorted[\"Average Purchase Price\"].map(\"${:,.2f}\".format)\nspender_data_summary_sorted[\"Total Purchase Value\"] = spender_data_summary_sorted[\"Total Purchase Value\"].map(\"${:,.2f}\".format)\n\n# Display a preview of the summary data frame - top 5 rows\nspender_data_summary_sorted.head()",
"_____no_output_____"
]
],
[
[
"Most Popular Items¶\nRetrieve the Item ID, Item Name, and Item Price columns\nGroup by Item ID and Item Name. Perform calculations to obtain purchase count, average item price, and total purchase value\nCreate a summary data frame to hold the results\nSort the purchase count column in descending order\nOptional: give the displayed data cleaner formatting\nDisplay a preview of the summary data frame",
"_____no_output_____"
]
],
[
[
"# Most Popular Items\n\n# Retrieve the Item ID, Item Name, and Item Price columns\npurchase_data[[\"Item ID\", \"Item Name\", \"Price\"]]\n\n# Group by Item ID and Item Name. \npurchase_data_grouped = purchase_data.groupby([\"Item ID\", \"Item Name\"])\n# purchase_data_grouped\n\n# Perform calculations to obtain purchase count, average item price, and total purchase value\nitem_purchase_count = purchase_data.groupby([\"Item ID\", \"Item Name\"]).count()[\"Purchase ID\"]\n# item_purchase_count\n\nitem_average_price = purchase_data.groupby([\"Item ID\", \"Item Name\"]).mean()[\"Price\"]\n# item_average_price\n\nitem_total_purchase_value = purchase_data.groupby([\"Item ID\", \"Item Name\"]).sum()[\"Price\"]\n# item_total_purchase_value\n\n# Create a summary data frame to hold the results\n\npopular_items_summary = pd.DataFrame({\"Purchase Count\" : item_purchase_count,\n \"Item Price\" : item_average_price,\n \"Total Purchase Value\" : item_total_purchase_value\n })\npopular_items_summary\n\n# Sort the purchase count column in descending order\npopular_items_summary_sorted_by_count = popular_items_summary.sort_values(\"Purchase Count\", ascending = False)\n\n# Clean formatting\npopular_items_summary_sorted_by_count[\"Item Price\"] = popular_items_summary_sorted_by_count[\"Item Price\"].map(\"${:,.2f}\".format)\npopular_items_summary_sorted_by_count[\"Total Purchase Value\"] = popular_items_summary_sorted_by_count[\"Total Purchase Value\"].map(\"${:,.2f}\".format)\n\n\n\n# Display a preview of the summary data frame\npopular_items_summary_sorted_by_count.head()\n",
"_____no_output_____"
]
],
[
[
"Most Profitable Items\nSort the above table by total purchase value in descending order\nOptional: give the displayed data cleaner formatting\nDisplay a preview of the data frame",
"_____no_output_____"
]
],
[
[
"# Most Profitable Items\n# Sort the above table by total purchase value in descending order\nitems_sorted_by_purchase_value = popular_items_summary.sort_values(\"Total Purchase Value\", ascending = False)\nitems_sorted_by_purchase_value\n\n# cleanup formatting\nitems_sorted_by_purchase_value[\"Item Price\"] = items_sorted_by_purchase_value[\"Item Price\"].map(\"${:,.2f}\".format)\nitems_sorted_by_purchase_value[\"Total Purchase Value\"] = items_sorted_by_purchase_value[\"Total Purchase Value\"].map(\"${:,.2f}\".format)\n\n# Display a preview of the data frame\nitems_sorted_by_purchase_value.head()",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
ec8e0110319a1b4eb9695bbb954ade3b5fd00d42 | 106,481 | ipynb | Jupyter Notebook | module-4-select-important-features/LS_DS_24_Cross_Validation_AND_Feature_Selection.ipynb | BrittonWinterrose/DS-Unit-2-Sprint-4-Model-Validation | 76355ce749bc80fd85f132fa0152f169a081cb41 | [
"MIT"
] | null | null | null | module-4-select-important-features/LS_DS_24_Cross_Validation_AND_Feature_Selection.ipynb | BrittonWinterrose/DS-Unit-2-Sprint-4-Model-Validation | 76355ce749bc80fd85f132fa0152f169a081cb41 | [
"MIT"
] | null | null | null | module-4-select-important-features/LS_DS_24_Cross_Validation_AND_Feature_Selection.ipynb | BrittonWinterrose/DS-Unit-2-Sprint-4-Model-Validation | 76355ce749bc80fd85f132fa0152f169a081cb41 | [
"MIT"
] | null | null | null | 46.6 | 29,356 | 0.603563 | [
[
[
"_Lambda School Data Science - Model Validation_\n\n## Example solution to the Cross-Validation assignment — plus Feature Selection!\n\nSee also Sebastian Raschka's example, [Basic Pipeline and Grid Search Setup](https://github.com/rasbt/python-machine-learning-book/blob/master/code/bonus/svm_iris_pipeline_and_gridsearch.ipynb).",
"_____no_output_____"
]
],
[
[
"#!pip install tsfresh\n#!pip install seaborn --upgrade\n\nimport warnings\nwarnings.filterwarnings('ignore')",
"_____no_output_____"
]
],
[
[
"### Here in we pull in all our feature creation functions (from Jake Vanderplas and a few from our cohort)",
"_____no_output_____"
]
],
[
[
"# We'll modify a project from Python Data Science Handbook by Jake VanderPlas\n# https://jakevdp.github.io/PythonDataScienceHandbook/05.06-linear-regression.html#Example:-Predicting-Bicycle-Traffic\n \n# Predicting Bicycle Traffic\n\n# As an example, let's take a look at whether we can predict the number of \n# bicycle trips across Seattle's Fremont Bridge based on weather, season, \n# and other factors.\n\n# We will join the bike data with another dataset, and try to determine the \n# extent to which weather and seasonal factors—temperature, precipitation, \n# and daylight hours—affect the volume of bicycle traffic through this corridor. \n# Fortunately, the NOAA makes available their daily weather station data \n# (I used station ID USW00024233) and we can easily use Pandas to join \n# the two data sources.\n\n\nimport numpy as np\nimport pandas as pd\nfrom sklearn.feature_selection import f_regression, SelectKBest\nfrom sklearn.linear_model import Ridge\nfrom sklearn.metrics import mean_absolute_error\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.pipeline import make_pipeline\nfrom sklearn.preprocessing import RobustScaler\nfrom sklearn.dummy import DummyRegressor\nfrom datetime import datetime\nfrom datetime import date\n\n\ndef load(): \n fremont_bridge = 'https://data.seattle.gov/api/views/65db-xm6k/rows.csv?accessType=DOWNLOAD'\n \n bicycle_weather = 'https://raw.githubusercontent.com/jakevdp/PythonDataScienceHandbook/master/notebooks/data/BicycleWeather.csv'\n\n counts = pd.read_csv(fremont_bridge, index_col='Date', parse_dates=True, \n infer_datetime_format=True)\n\n weather = pd.read_csv(bicycle_weather, index_col='DATE', parse_dates=True, \n infer_datetime_format=True)\n\n daily = counts.resample('d').sum()\n daily['Total'] = daily.sum(axis=1)\n daily = daily[['Total']] # remove other columns\n\n weather_columns = ['PRCP', 'SNOW', 'SNWD', 'TMAX', 'TMIN', 'AWND']\n daily = daily.join(weather[weather_columns], how='inner')\n \n # Make a feature for yesterday's total\n daily['Total_yesterday'] = daily.Total.shift(1)\n daily = daily.drop(index=daily.index[0])\n \n \n daily.insert(0, 'ID', range(0, len(daily)))\n\n return daily\n\n \ndef ordinal(daily):\n daily = daily.copy()\n # Add a column of the dates\n daily['date_'] = daily.index\n \n # Date stuff https://docs.python.org/3/library/datetime.html#datetime.date.fromisoformat\n # Add some day context\n daily.insert(0, 'weekday_', daily.date_.apply(lambda x: x.weekday()))\n daily.insert(0, 'dayOfMonth_', daily.date_.apply(lambda x: x.day))\n daily.insert(0, 'month_', daily.date_.apply(lambda x: x.month))\n daily.insert(0, 'year_', daily.date_.apply(lambda x: x.year)) \n \n # Drop date column\n daily = daily.drop(columns='date_')\n \n return daily\n \n \ndef split(daily):\n # Hold out an \"out-of-time\" test set, from the last 100 days of data\n \n train = daily[:-100]\n test = daily[-100:]\n \n X_train = train.drop(columns='Total')\n y_train = train.Total\n\n X_test = test.drop(columns='Total')\n y_test = test.Total\n \n return X_train, X_test, y_train, y_test\n\n \n \ndef one_hot_encoded(X):\n X = X.copy()\n\n # patterns of use generally vary from day to day; \n # let's add binary columns that indicate the day of the week:\n days = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']\n for i, day in enumerate(days):\n X[day] = (X.index.dayofweek == i).astype(float)\n\n\n # we might expect riders to behave differently on holidays; \n # let's add an indicator of this as well:\n from pandas.tseries.holiday import USFederalHolidayCalendar\n cal = USFederalHolidayCalendar()\n holidays = cal.holidays('2012', '2016')\n X = X.join(pd.Series(1, index=holidays, name='holiday'))\n X['holiday'].fillna(0, inplace=True)\n \n months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']\n for i, month in enumerate(months):\n X[month] = (X.index.month == i+1).astype(float)\n \n return X\n\n \n\ndef jake_wrangle(X): \n X = X.copy()\n\n # We also might suspect that the hours of daylight would affect \n # how many people ride; let's use the standard astronomical calculation \n # to add this information:\n def hours_of_daylight(date, axis=23.44, latitude=47.61):\n \"\"\"Compute the hours of daylight for the given date\"\"\"\n days = (date - pd.datetime(2000, 12, 21)).days\n m = (1. - np.tan(np.radians(latitude))\n * np.tan(np.radians(axis) * np.cos(days * 2 * np.pi / 365.25)))\n return 24. * np.degrees(np.arccos(1 - np.clip(m, 0, 2))) / 180.\n\n X['daylight_hrs'] = list(map(hours_of_daylight, X.index))\n\n\n # temperatures are in 1/10 deg C; convert to C\n X['TMIN'] /= 10\n X['TMAX'] /= 10\n\n # We can also calcuate the average temperature.\n X['Temp (C)'] = 0.5 * (X['TMIN'] + X['TMAX'])\n\n\n # precip is in 1/10 mm; convert to inches\n X['PRCP'] /= 254\n\n # In addition to the inches of precipitation, let's add a flag that \n # indicates whether a day is dry (has zero precipitation):\n X['dry day'] = (X['PRCP'] == 0).astype(int)\n\n\n # Let's add a counter that increases from day 1, and measures how many \n # years have passed. This will let us measure any observed annual increase \n # or decrease in daily crossings:\n X['annual'] = (X.index - X.index[0]).days / 365.\n\n return X\n\n \n\ndef wrangle(X):\n # From Daniel H (DS1 KotH)\n X = X.copy()\n X = X.replace(-9999, 0)\n X = jake_wrangle(X)\n \n X['PRCP_yest'] = X.PRCP.shift(1).fillna(X.PRCP.mean())\n X['Windchill'] = (((X['Temp (C)'] * (9/5) + 32) * .6215) + 34.74) - (35.75 * (X['AWND']** .16)) + (.4275 * (X['Temp (C)'])) * (X['AWND'] ** .16)\n X['Rl_Cold'] = (((X['Temp (C)'] * (9/5) + 32) - X['Windchill']) -32) * (5/9)\n X['TMIN_ln'] = X['TMIN'] **2\n \n return X\n\n ",
"_____no_output_____"
],
[
"# Download and join data into a dataframe\ndata = load()",
"_____no_output_____"
],
[
"data.head()",
"_____no_output_____"
],
[
"import seaborn as sns\nsns.lineplot(data.ID, data.Total)",
"_____no_output_____"
]
],
[
[
"### This is an example of a pipeline + parameter grid. ",
"_____no_output_____"
],
[
"Remember: If your dataset is massive, be sure to take a random subset sample to try fitting with the training data to start getting scores. ",
"_____no_output_____"
],
[
"### Benchmark / Baseline using Dummy Regressor in Pipe",
"_____no_output_____"
]
],
[
[
"%%time\n# Split data into train and test\nX_train, X_test, y_train, y_test = split(data)\n\n# Define an estimator and param_grid\n# WHEN DEFINING YOU CAN GIVE IT A NAME OTHERWISE IT WILL USE THE PIPELINE NAME AUTOGEN NAME (name of the function but lowercase)\npipe = make_pipeline(\n RobustScaler(), \n DummyRegressor(strategy='mean'))\n\npipe.fit(X_train, y_train)\n\n### Get the scores with the MAE Function\n## Predict with X_test features and Compare predictions to y_test labels\ny_pred = pipe.predict(X_train)\ndummy_train_score = mean_absolute_error(y_train, y_pred)\nprint('Dummy Regression Train Score:', dummy_train_score)\n\n\n## Predict with X_test features and Compare predictions to y_test labels\ny_pred = pipe.predict(X_test)\ndummy_test_score = mean_absolute_error(y_test, y_pred)\nprint('Dummy Regression Test Score:', dummy_test_score)\n",
"Dummy Regression Train Score: 980.8981106765484\nDummy Regression Test Score: 1341.2051609553478\nCPU times: user 14.9 ms, sys: 13 ms, total: 27.9 ms\nWall time: 14 ms\n"
],
[
"%%time\n\n# Define an estimator and param_grid\n# WHEN DEFINING YOU CAN GIVE IT A NAME OTHERWISE IT WILL USE THE PIPELINE NAME AUTOGEN NAME (name of the function but lowercase)\npipe = make_pipeline(\n RobustScaler(), \n SelectKBest(f_regression), \n Ridge())\n\nparam_grid = {\n 'selectkbest__k': range(1, len(X_train.columns)+1), \n 'ridge__alpha': [0.1, 1.0, 10.]\n}\n\n# Fit on the train set, with grid search cross-validation\ngs = GridSearchCV(pipe, param_grid=param_grid, cv=3, \n scoring='neg_mean_absolute_error', \n verbose=1)\n\ngs.fit(X_train, y_train)\nvalidation_score = gs.best_score_\nprint()\nprint('Cross-Validation Score:', -validation_score)\nprint()\nprint('Best estimator:', gs.best_estimator_)\nprint()\n\n\n### Get the score with the GridSearch's score method\n# It combines the predict and score calculation.\nRidgeBaseline_train_score = gs.score(X_train, y_train)\nprint('RidgeBaseline Train Score:', -RidgeBaseline_train_score)\n\nRidgeBaseline_test_score = gs.score(X_test, y_test)\nprint('RidgeBaseline Test Score:', -RidgeBaseline_test_score)",
"Fitting 3 folds for each of 24 candidates, totalling 72 fits\n"
]
],
[
[
"### Apply data `ordinal`",
"_____no_output_____"
]
],
[
[
"%%time\nordinal_data = ordinal(data)\n\n# Split data into train and test\nX_train, X_test, y_train, y_test = split(ordinal_data)\n\n# Define an estimator and param_grid\n# WHEN DEFINING YOU CAN GIVE IT A NAME OTHERWISE IT WILL USE THE PIPELINE NAME AUTOGEN NAME (name of the function but lowercase)\npipe = make_pipeline(\n RobustScaler(), \n SelectKBest(f_regression), \n Ridge())\n\nparam_grid = {\n 'selectkbest__k': range(1, len(X_train.columns)+1), \n 'ridge__alpha': [0.1, 1.0, 10.]\n}\n\n# Fit on the train set, with grid search cross-validation\ngs = GridSearchCV(pipe, param_grid=param_grid, cv=3, \n scoring='neg_mean_absolute_error', \n verbose=1)\n\ngs.fit(X_train, y_train)\nvalidation_score = gs.best_score_\nprint()\nprint('Cross-Validation Score:', -validation_score)\nprint()\nprint('Best estimator:', gs.best_estimator_)\nprint()\n\n\n### Get the score with the GridSearch's score method\n# It combines the predict and score calculation.\nord_train_score = gs.score(X_train, y_train)\nprint('Ordinal Train Score:', -ord_train_score)\n\nord_test_score = gs.score(X_test, y_test)\nprint('Ordinal Test Score:', -ord_test_score)",
"[Parallel(n_jobs=1)]: Using backend SequentialBackend with 1 concurrent workers.\n"
]
],
[
[
"### Apply `one_hot_encoded` (classmates function)",
"_____no_output_____"
]
],
[
[
"#%%time\none_hot_data = one_hot_encoded(data)\n\n# Split data into train and test\nX_train, X_test, y_train, y_test = split(one_hot_data)\n\n# Define an estimator and param_grid\n# WHEN DEFINING YOU CAN GIVE IT A NAME OTHERWISE IT WILL USE THE PIPELINE NAME AUTOGEN NAME (name of the function but lowercase)\npipe = make_pipeline(\n RobustScaler(), \n SelectKBest(f_regression), \n Ridge())\n\nparam_grid = {\n 'selectkbest__k': range(1, len(X_train.columns)+1), \n 'ridge__alpha': [0.1, 1.0, 10.]\n}\n\n# Fit on the train set, with grid search cross-validation\ngs = GridSearchCV(pipe, param_grid=param_grid, cv=3, \n scoring='neg_mean_absolute_error', \n verbose=1)\n\ngs.fit(X_train, y_train)\nvalidation_score = gs.best_score_\nprint()\nprint('Cross-Validation Score:', -validation_score)\nprint()\nprint('Best estimator:', gs.best_estimator_)\nprint()\n\n\n### Get the score with the GridSearch's score method\n# It combines the predict and score calculation.\noh_train_score = gs.score(X_train, y_train)\nprint('Train Score:', -oh_train_score)\n\noh_test_score = gs.score(X_test, y_test)\nprint('Test Score:', -oh_test_score)",
"[Parallel(n_jobs=1)]: Using backend SequentialBackend with 1 concurrent workers.\n"
],
[
"print('Ordinal Train Score:', -ord_train_score)\nprint('Ordinal Test Score:', -ord_test_score)\nprint('\\n')\nprint('One-Hot Train Score:', -oh_train_score)\nprint('One-Hot Test Score:', -oh_test_score)",
"Ordinal Train Score: 385.55035508465824\nOrdinal Test Score: 448.71439473552834\n\n\nOne-Hot Train Score: 290.9695287750331\nOne-Hot Test Score: 625.7986098051067\n"
]
],
[
[
"### Just feature Wrangling",
"_____no_output_____"
]
],
[
[
"%%time\n# Split data into train and test\nX_train, X_test, y_train, y_test = split(data)\n\n# Do the same wrangling to X_train and X_test\nX_train = wrangle(X_train)\nX_test = wrangle(X_test)\n\n# Define an estimator and param_grid\n# WHEN DEFINING YOU CAN GIVE IT A NAME OTHERWISE IT WILL USE THE PIPELINE NAME AUTOGEN NAME (name of the function but lowercase)\npipe = make_pipeline(\n RobustScaler(), \n SelectKBest(f_regression), \n Ridge())\n\nparam_grid = {\n 'selectkbest__k': range(1, len(X_train.columns)+1), \n 'ridge__alpha': [0.1, 1.0, 10.]\n}\n\n# Fit on the train set, with grid search cross-validation\ngs = GridSearchCV(pipe, param_grid=param_grid, cv=3, \n scoring='neg_mean_absolute_error', \n verbose=1)\n\ngs.fit(X_train, y_train)\nvalidation_score = gs.best_score_\nprint()\nprint('Cross-Validation Score:', -validation_score)\nprint()\nprint('Best estimator:', gs.best_estimator_)\nprint()\n\n\n### Get the score with the GridSearch's score method\n# It combines the predict and score calculation.\nRRtrain_score = gs.score(X_train, y_train)\nprint('Just Wrangling Train Score:', -RRtrain_score)\n\nRRtest_score = gs.score(X_test, y_test)\nprint('Just Wrangling Test Score:', -RRtest_score)",
"[Parallel(n_jobs=1)]: Using backend SequentialBackend with 1 concurrent workers.\n"
]
],
[
[
"### Comparing 3 seperate runs. ",
"_____no_output_____"
]
],
[
[
"# Comparing 1 set of variables for each\n# This is how I did it at first.\n\nprint('Dummy Regression Train Score:', dummy_train_score)\nprint('Dummy Regression Test Score:', dummy_test_score)\nprint('\\n')\nprint('RidgeBaseline Train Score:', -RidgeBaseline_train_score)\nprint('RidgeBaseline Test Score:', -RidgeBaseline_test_score)\nprint('\\n')\n\n\nprint('Just Ordinal Train Score:', -ord_train_score)\nprint('Just Ordinal Test Score:', -ord_test_score)\nprint('\\n')\n\nprint('Just One-Hot Train Score:', -oh_train_score)\nprint('Just One-Hot Test Score:', -oh_test_score)\nprint('\\n')\n\nprint('Just Wrangling Train Score:', -RRtrain_score)\nprint('Just Wrangling Test Score:', -RRtest_score)\n",
"_____no_output_____"
],
[
"# then I wrapped a function and made it better. \ndef compare(data, name):\n X_train, X_test, y_train, y_test = split(data)\n\n pipe = make_pipeline(\n RobustScaler(), \n SelectKBest(f_regression), \n Ridge())\n\n param_grid = {\n 'selectkbest__k': range(1, len(X_train.columns)+1), \n 'ridge__alpha': [0.1, 1.0, 10.]\n }\n\n # Fit on the train set, with grid search cross-validation\n gs = GridSearchCV(pipe, param_grid=param_grid, cv=3, \n scoring='neg_mean_absolute_error', \n verbose=0)\n gs.fit(X_train, y_train)\n validation_score = gs.best_score_\n print(f'\\n{name} Cross-Validation Score: {-validation_score}')\n print(f'{name} Train Score: {-gs.score(X_train, y_train)}')\n print(f'{name} Test Score: {-gs.score(X_test, y_test)}\\n')",
"_____no_output_____"
]
],
[
[
"",
"_____no_output_____"
]
],
[
[
"# Here I'm going to compare my dummy regression baseline to all of my other\n# feature engineered \n# Comparing 1 set of variables for each\nprint(\"Some Comparisons\")\n\nprint('Dummy Regression Train Score:', dummy_train_score)\nprint('Dummy Regression Test Score:', dummy_test_score)\nprint('\\n')\n\n###\nnames = [\"Ridge Baseline\",'Just Ordinal','Just One Hot','Ord + One Hot','Wrangled + Ordinal','Wrangled + OH','All Combined']\nZ_data = data\ncompare(Z_data, name)\n\n### \nname = 'Just Ordinal'\nA_data = ordinal(data)\ncompare(A_data, name)\n\n### \nname = 'Just One Hot'\nB_data = one_hot_encoded(data)\ncompare(B_data, name)\n\n### \nname = 'Ord + One Hot'\nC_data = one_hot_encoded(A_data)\ncompare(C_data, name)\n\n### \nname = 'Wrangled + Ordinal'\nD_data = wrangle(A_data)\ncompare(D_data, name)\n\n### \nname = 'Wrangled + OH'\nE_data = wrangle(B_data)\ncompare(E_data, name)\n\n### \nname = 'All Combined'\nF_data = wrangle(C_data)\ncompare(F_data, name)\n",
"Some Comparisons\nDummy Regression Train Score: 980.8981106765484\nDummy Regression Test Score: 1341.2051609553478\n\n\n\nRidge Baseline Cross-Validation Score: 604.6453429336518\nRidge Baseline Train Score: 601.7510401226398\nRidge Baseline Test Score: 773.107268536548\n\n\nJust Ordinal Cross-Validation Score: 389.2116166662251\nJust Ordinal Train Score: 385.55035508465824\nJust Ordinal Test Score: 448.71439473552834\n\n\nJust One Hot Cross-Validation Score: 317.0056086250159\nJust One Hot Train Score: 290.9695287750331\nJust One Hot Test Score: 625.7986098051067\n\n\nOrd + One Hot Cross-Validation Score: 316.7241668646224\nOrd + One Hot Train Score: 290.95264043258334\nOrd + One Hot Test Score: 635.7680843548015\n\n\nWrangled + Ordinal Cross-Validation Score: 375.38130158393705\nWrangled + Ordinal Train Score: 362.64459025651087\nWrangled + Ordinal Test Score: 414.397277738424\n\n\nWrangled + OH Cross-Validation Score: 297.1960150364704\nWrangled + OH Train Score: 272.7878390752409\nWrangled + OH Test Score: 335.93397588625544\n\n\nAll Combined Cross-Validation Score: 297.1413242158009\nAll Combined Train Score: 272.8330918063642\nAll Combined Test Score: 333.8853138194594\n\n"
]
],
[
[
"### Combine all 3 feature engineering sets. ",
"_____no_output_____"
]
],
[
[
"%%time\nordinal_data = ordinal(data)\nord_oh_data = one_hot_encoded(ordinal_data)\nwrangled_oh_ord_data = wrangle(ord_oh_data)\n\n# Split data into train and test\nX_train, X_test, y_train, y_test = split(wrangled_oh_ord_data)\n\n# Define an estimator and param_grid\n# WHEN DEFINING YOU CAN GIVE IT A NAME OTHERWISE IT WILL USE THE PIPELINE NAME AUTOGEN NAME (name of the function but lowercase)\npipe = make_pipeline(\n RobustScaler(), \n SelectKBest(f_regression), \n Ridge())\n\nparam_grid = {\n 'selectkbest__k': range(1, len(X_train.columns)+1), \n 'ridge__alpha': [0.1, 1.0, 10.]\n}\n\n# Fit on the train set, with grid search cross-validation\ngs = GridSearchCV(pipe, param_grid=param_grid, cv=3, \n scoring='neg_mean_absolute_error', \n verbose=1)\n\ngs.fit(X_train, y_train)\nvalidation_score = gs.best_score_\nprint()\nprint('Cross-Validation Score:', -validation_score)\nprint()\nprint('Best estimator:', gs.best_estimator_)\nprint()\n\n\n### Get the score with the GridSearch's score method\n# It combines the predict and score calculation.\nfe_train_score = gs.score(X_train, y_train)\nprint('Feature Engineered Train Score:', -fe_train_score)\n\nfe_test_score = gs.score(X_test, y_test)\nprint('Feature Engineered Test Score:', -fe_test_score)",
"Fitting 3 folds for each of 120 candidates, totalling 360 fits\n"
],
[
"%%time\nordinal_data = ordinal(data)\nord_oh_data = one_hot_encoded(ordinal_data)\nwrangled_oh_ord_data = wrangle(ord_oh_data)\n\n# Split data into train and test\nX_train, X_test, y_train, y_test = split(wrangled_oh_ord_data)\n\n# Define an estimator and param_grid\n# WHEN DEFINING YOU CAN GIVE IT A NAME OTHERWISE IT WILL USE THE PIPELINE NAME AUTOGEN NAME (name of the function but lowercase)\npipe = make_pipeline(\n RobustScaler(), \n SelectKBest(f_regression), \n Ridge())\n\nparam_grid = {\n 'selectkbest__k': range(1, len(X_train.columns)+1), \n 'ridge__alpha': [0.1, 1.0, 10.]\n}\n\n# Fit on the train set, with grid search cross-validation\ngs = GridSearchCV(pipe, param_grid=param_grid, cv=3, \n scoring='neg_mean_absolute_error', \n verbose=1)\n\ngs.fit(X_train, y_train)\nvalidation_score = gs.best_score_\nprint()\nprint('Cross-Validation Score:', -validation_score)\nprint()\nprint('Best estimator:', gs.best_estimator_)\nprint()\n\n\n### Get the score with the GridSearch's score method\n# It combines the predict and score calculation.\nfe_train_score = gs.score(X_train, y_train)\nprint('Feature Engineered Train Score:', -fe_train_score)\n\nfe_test_score = gs.score(X_test, y_test)\nprint('Feature Engineered Test Score:', -fe_test_score)",
"Fitting 3 folds for each of 120 candidates, totalling 360 fits\n"
]
],
[
[
"### Notes on Pipeline / GridScore",
"_____no_output_____"
],
[
"#### Then we can get the final test score (using MAE function)",
"_____no_output_____"
],
[
"Using gs.predict calls all the grid score fit/transforms and applies them to our X_test data in this case. \n\nThen we can calculate our score useing those test predictions. ",
"_____no_output_____"
]
],
[
[
"# Predict with X_test features\ny_pred = gs.predict(X_test)\n\n# Compare predictions to y_test labels\ntest_score = mean_absolute_error(y_test, y_pred)\nprint('Test Score:', test_score)",
"Test Score: 333.8853138194594\n"
]
],
[
[
"#### Or use the GridSearch's `score` method to get the final test score. ",
"_____no_output_____"
],
[
"This method just generates the test predictions and scores them, returning the (negative) test score to you. So you apply the negative symbol to flip the sign from negative to positive. ",
"_____no_output_____"
]
],
[
[
"train_score = gs.score(X_train, y_train)\nprint('Train Score:', -train_score)\n\n# Or use the grid search's score method, \n# which combines these steps\ntest_score = gs.score(X_test, y_test)\n\nprint('Test Score:', -test_score)",
"Train Score: 272.8330918063642\nTest Score: 333.8853138194594\n"
],
[
"'''def vary_scale(typ): \n if typ=='robust': return RobustScaler\n elif typ=='standard': return StandardScaler\n else: Fail\n\npipe = Pipeline(steps=[ ... , ('scale', FunctionTransformer(vary_scale)), ... ])\n\nparam_grid = { \n ... , \n FunctionTransformer'vary_scale__choice': ['robust', 'standard'],\n ... }\n\nsearch = GridSearchCV(pipe, param_grid, ...)''';",
"_____no_output_____"
]
],
[
[
"### Now we can explore and see what features we actually ended up selecting. ",
"_____no_output_____"
]
],
[
[
"# Which features were selected?\n# 'selectkbest' is the autogenerated name of the SelectKBest() function in the pipeline\nselector = gs.best_estimator_.named_steps['selectkbest']\nall_names = X_train.columns\n\n# get_support returns a mask of the columns in True / False\nselected_mask = selector.get_support()\n# Passing the boolean list as the column names creates a \nselected_names = all_names[selected_mask]\nunselected_names = all_names[~selected_mask]\n\nprint('Features selected:')\nfor name in selected_names:\n print(name)\n\nprint()\nprint('Features not selected:')\nfor name in unselected_names:\n print(name)",
"Features selected:\nyear_\nweekday_\nID\nPRCP\nTMAX\nTMIN\nAWND\nTotal_yesterday\nMon\nTue\nWed\nThu\nSat\nSun\nholiday\nJan\nFeb\nMar\nMay\nJun\nJul\nAug\nNov\nDec\ndaylight_hrs\nTemp (C)\ndry day\nannual\nPRCP_yest\nWindchill\nRl_Cold\nTMIN_ln\n\nFeatures not selected:\nmonth_\ndayOfMonth_\nSNOW\nSNWD\nFri\nApr\nSep\nOct\n"
],
[
"from sklearn.preprocessing import PolynomialFeatures\n\n#%%time\nordinal_data = ordinal(data)\nord_oh_data = one_hot_encoded(ordinal_data)\nwrangled_oh_ord_data = wrangle(ord_oh_data)\n\n# Split data into train and test\nX_train, X_test, y_train, y_test = split(wrangled_oh_ord_data)\n\n# Define an estimator and param_grid\n# WHEN DEFINING YOU CAN GIVE IT A NAME OTHERWISE IT WILL USE THE PIPELINE NAME AUTOGEN NAME (name of the function but lowercase)\npipe = make_pipeline(\n RobustScaler(), \n PolynomialFeatures(degree=2),\n SelectKBest(f_regression), \n Ridge())\n\nparam_grid = {\n 'selectkbest__k': range(1, len(X_train.columns)+1), \n 'ridge__alpha': [0.1, 1.0, 10.]\n}\n\n# Fit on the train set, with grid search cross-validation\ngs = GridSearchCV(pipe, param_grid=param_grid, cv=3, \n scoring='neg_mean_absolute_error', \n verbose=1)\n\ngs.fit(X_train, y_train)\nvalidation_score = gs.best_score_\nprint()\nprint('Cross-Validation Score:', -validation_score)\nprint()\nprint('Best estimator:', gs.best_estimator_)\nprint()\n\n\n### Get the score with the GridSearch's score method\n# It combines the predict and score calculation.\nfe_train_score = gs.score(X_train, y_train)\nprint('Feature Engineered Train Score:', -fe_train_score)\n\nfe_test_score = gs.score(X_test, y_test)\nprint('Feature Engineered Test Score:', -fe_test_score)",
"Fitting 3 folds for each of 120 candidates, totalling 360 fits\n"
],
[
"print('Feature Engineered Train Score:', -fe_train_score)\nprint('Feature Engineered Test Score:', -fe_test_score)",
"Feature Engineered Train Score: 276.24247184972216\nFeature Engineered Test Score: 395.80984993785677\n"
]
],
[
[
"#### An unecessary attempt with Tsfresh (Relevant Time Feature Augmentor)",
"_____no_output_____"
]
],
[
[
"'''# Find all the features, even the irrelevant ones.\n%%time\nfrom tsfresh.transformers import RelevantFeatureAugmenter\nfrom tsfresh import extract_features\n\nextracted_features = extract_features(wrangled_oh_ord_data, column_id=\"order\", column_sort=\"ID\")\n\nfrom tsfresh.utilities.dataframe_functions import impute\n\nfeatures = impute(extracted_features)''';",
"_____no_output_____"
],
[
"X_train.shape",
"_____no_output_____"
]
],
[
[
"## BONUS: Recursive Feature Elimination!\n\nhttps://scikit-learn.org/stable/modules/generated/sklearn.feature_selection.RFECV.html",
"_____no_output_____"
]
],
[
[
"from sklearn.feature_selection import RFECV\n\nX_train_scaled = RobustScaler().fit_transform(X_train)\nrfe = RFECV(Ridge(alpha=1.0), scoring='neg_mean_absolute_error', cv=3)\nX_train_subset = rfe.fit_transform(X_train_scaled, y_train)\n\nall_names = X_train.columns\nselected_mask = rfe.support_\nselected_names = all_names[selected_mask]\nunselected_names = all_names[~selected_mask]\n\nprint('Features selected:')\nfor name in selected_names:\n print(name)\n\nprint()\nprint('Features not selected:')\nfor name in unselected_names:\n print(name)",
"Features selected:\ndayOfMonth_\nweekday_\nPRCP\nTMAX\nTMIN\nAWND\nTotal_yesterday\nMon\nTue\nWed\nThu\nFri\nSat\nSun\nholiday\nFeb\nMar\nApr\nMay\nJun\nOct\nDec\ndaylight_hrs\nTemp (C)\ndry day\nWindchill\nRl_Cold\nTMIN_ln\n\nFeatures not selected:\nyear_\nmonth_\nID\nSNOW\nSNWD\nJan\nJul\nAug\nSep\nNov\nannual\nPRCP_yest\n"
],
[
"X_train_subset = pd.DataFrame(X_train_subset, columns=selected_names)",
"_____no_output_____"
],
[
"X_test_subset = rfe.transform(X_test)\nX_test_subset = pd.DataFrame(X_test_subset, columns=selected_names)",
"_____no_output_____"
],
[
"print(X_train.shape, X_train_subset.shape, X_test.shape, X_test_subset.shape)",
"(963, 40) (963, 28) (100, 40) (100, 28)\n"
]
],
[
[
"# RFE again, but with polynomial features and interaction terms!",
"_____no_output_____"
],
[
"### Apply a Polynomial Transformation",
"_____no_output_____"
]
],
[
[
"from sklearn.preprocessing import PolynomialFeatures\n\npoly = PolynomialFeatures(degree=2)\nX_train_polynomial = poly.fit_transform(X_train)\n\nprint(X_train.shape, X_train_polynomial.shape)",
"(963, 40) (963, 861)\n"
]
],
[
[
"### Apply a Scaler & RFECV",
"_____no_output_____"
]
],
[
[
"%%time\nfrom sklearn.feature_selection import RFECV\n\nscaler = RobustScaler()\nX_train_scaled = scaler.fit_transform(X_train_polynomial)\n\nrfe = RFECV(Ridge(alpha=1.0), scoring='neg_mean_absolute_error', \n step=10, cv=3, verbose=0)\n\nX_train_subset = rfe.fit_transform(X_train_scaled, y_train)",
"CPU times: user 13.7 s, sys: 10.6 s, total: 24.3 s\nWall time: 12.3 s\n"
]
],
[
[
"### Look at all of those new features!\n",
"_____no_output_____"
]
],
[
[
"all_names = poly.get_feature_names(X_train.columns)\nselected_mask = rfe.support_\nselected_names = [name for name, selected in zip(all_names, selected_mask) if selected]\n\nprint(f'{rfe.n_features_} Features selected')\n'''for name in selected_names:\n print(name)''';",
"171 Features selected:\n"
]
],
[
[
"### Now we can experiment with mulitple ridge settings for selecting features. ",
"_____no_output_____"
]
],
[
[
"# Define an estimator and param_grid\n\nridge = Ridge()\n\nparam_grid = {\n 'alpha': [0.1, 0.75, 1.0, 2.1, 2.2, 2.3, 2.4, 2.5, 3.0, 4.0, 10.]\n}\n\n# Fit on the train set, with grid search cross-validation\ngs = GridSearchCV(ridge, param_grid=param_grid, cv=3, \n scoring='neg_mean_absolute_error', \n verbose=1)\n\ngs.fit(X_train_subset, y_train)\nvalidation_score = gs.best_score_\nprint()\nprint('Cross-Validation Score:', -validation_score)\nprint()\nprint('Best estimator:', gs.best_estimator_)\nprint()",
"Fitting 3 folds for each of 11 candidates, totalling 33 fits\n"
],
[
"# Do the same transformations to X_test (Applying the same transformations to each set so we can check the test scores)\nX_test_polynomial = poly.transform(X_test)\nX_test_scaled = scaler.transform(X_test_polynomial)\nX_test_subset = rfe.transform(X_test_scaled)\n\n# Use the grid search's score method with X_test_subset\ntrain_score = gs.score(X_train_subset, y_train)\nprint('Train Score:', -train_score)\ntest_score = gs.score(X_test_subset, y_test)\nprint('Test Score:', -test_score)",
"Train Score: 197.61173458116826\nTest Score: 356.485481175808\n"
]
],
[
[
"### Exploring other regression\n",
"_____no_output_____"
]
],
[
[
"'''# Authors: Jan Hendrik Metzen <[email protected]>\n#\n# License: BSD 3 clause\n\nfrom __future__ import division, print_function\n\nimport numpy as np\n\nfrom matplotlib import pyplot as plt\nfrom sklearn.datasets import fetch_openml\nfrom sklearn.gaussian_process import GaussianProcessRegressor\nfrom sklearn.gaussian_process.kernels \\\n import RBF, WhiteKernel, RationalQuadratic, ExpSineSquared\n\nprint(__doc__)\n\n\ndef load_mauna_loa_atmospheric_co2():\n ml_data = fetch_openml(data_id=41187)\n months = []\n ppmv_sums = []\n counts = []\n\n y = ml_data.data[:, 0]\n m = ml_data.data[:, 1]\n month_float = y + (m - 1) / 12\n ppmvs = ml_data.target\n\n for month, ppmv in zip(month_float, ppmvs):\n if not months or month != months[-1]:\n months.append(month)\n ppmv_sums.append(ppmv)\n counts.append(1)\n else:\n # aggregate monthly sum to produce average\n ppmv_sums[-1] += ppmv\n counts[-1] += 1\n\n months = np.asarray(months).reshape(-1, 1)\n avg_ppmvs = np.asarray(ppmv_sums) / counts\n return months, avg_ppmvs\n\n\nX, y = load_mauna_loa_atmospheric_co2()\n\nprint(load_mauna_loa_atmospheric_co2())\n\n# Kernel with parameters given in GPML book\nk1 = 66.0**2 * RBF(length_scale=67.0) # long term smooth rising trend\nk2 = 2.4**2 * RBF(length_scale=90.0) \\\n * ExpSineSquared(length_scale=1.3, periodicity=1.0) # seasonal component\n# medium term irregularity\nk3 = 0.66**2 \\\n * RationalQuadratic(length_scale=1.2, alpha=0.78)\nk4 = 0.18**2 * RBF(length_scale=0.134) \\\n + WhiteKernel(noise_level=0.19**2) # noise terms\nkernel_gpml = k1 + k2 + k3 + k4\n\ngp = GaussianProcessRegressor(kernel=kernel_gpml, alpha=0,\n optimizer=None, normalize_y=True)\ngp.fit(X, y)\n\nprint(\"GPML kernel: %s\" % gp.kernel_)\nprint(\"Log-marginal-likelihood: %.3f\"\n % gp.log_marginal_likelihood(gp.kernel_.theta))\n\n# Kernel with optimized parameters\nk1 = 50.0**2 * RBF(length_scale=50.0) # long term smooth rising trend\nk2 = 2.0**2 * RBF(length_scale=100.0) \\\n * ExpSineSquared(length_scale=1.0, periodicity=1.0,\n periodicity_bounds=\"fixed\") # seasonal component\n# medium term irregularities\nk3 = 0.5**2 * RationalQuadratic(length_scale=1.0, alpha=1.0)\nk4 = 0.1**2 * RBF(length_scale=0.1) \\\n + WhiteKernel(noise_level=0.1**2,\n noise_level_bounds=(1e-3, np.inf)) # noise terms\nkernel = k1 + k2 + k3 + k4\n\ngp = GaussianProcessRegressor(kernel=kernel, alpha=0,\n normalize_y=True)\ngp.fit(X, y)\n\nprint(\"\\nLearned kernel: %s\" % gp.kernel_)\nprint(\"Log-marginal-likelihood: %.3f\"\n % gp.log_marginal_likelihood(gp.kernel_.theta))\n\nX_ = np.linspace(X.min(), X.max() + 30, 1000)[:, np.newaxis]\ny_pred, y_std = gp.predict(X_, return_std=True)\n\n# Illustration\nplt.scatter(X, y, c='k')\nplt.plot(X_, y_pred)\nplt.fill_between(X_[:, 0], y_pred - y_std, y_pred + y_std,\n alpha=0.5, color='k')\nplt.xlim(X_.min(), X_.max())\nplt.xlabel(\"Year\")\nplt.ylabel(r\"CO$_2$ in ppm\")\nplt.title(r\"Atmospheric CO$_2$ concentration at Mauna Loa\")\nplt.tight_layout()\nplt.show()''';",
"_____no_output_____"
]
],
[
[
"",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
ec8e072c14c16354f3410a07ec658c64033d106e | 10,626 | ipynb | Jupyter Notebook | course2/week2/sklearn.cross_validation.ipynb | Irishkova/ML_course | 4b893125ad0077fd2e660c1d48c7b8acf21f88a6 | [
"MIT"
] | null | null | null | course2/week2/sklearn.cross_validation.ipynb | Irishkova/ML_course | 4b893125ad0077fd2e660c1d48c7b8acf21f88a6 | [
"MIT"
] | null | null | null | course2/week2/sklearn.cross_validation.ipynb | Irishkova/ML_course | 4b893125ad0077fd2e660c1d48c7b8acf21f88a6 | [
"MIT"
] | null | null | null | 23.1 | 153 | 0.481272 | [
[
[
"# Sklearn",
"_____no_output_____"
],
[
"## sklearn.model_selection",
"_____no_output_____"
],
[
"документация: http://scikit-learn.org/stable/modules/cross_validation.html",
"_____no_output_____"
]
],
[
[
"from sklearn import model_selection, datasets\n\nimport numpy as np",
"_____no_output_____"
]
],
[
[
"### Разовое разбиение данных на обучение и тест с помощью train_test_split",
"_____no_output_____"
]
],
[
[
"iris = datasets.load_iris()",
"_____no_output_____"
]
],
[
[
"функция train_test_split позволяет построить разовое разбиение данных на обучение и тест. test_size доля тестовой выборки.",
"_____no_output_____"
]
],
[
[
"train_data, test_data, train_labels, test_labels = model_selection.train_test_split(iris.data, iris.target, \n test_size = 0.3)",
"_____no_output_____"
],
[
"#убедимся, что тестовая выборка действительно составляет 0.3 от всех данных\nfloat(len(test_labels))/len(iris.data)",
"_____no_output_____"
],
[
"print ('Размер обучающей выборки: {} объектов \\nРазмер тестовой выборки: {} объектов'.format(len(train_data),\n len(test_data)))",
"Размер обучающей выборки: 105 объектов \nРазмер тестовой выборки: 45 объектов\n"
],
[
"print ('Обучающая выборка:\\n', train_data[:5])\nprint ('\\n')\nprint ('Тестовая выборка:\\n', test_data[:5])",
"Обучающая выборка:\n [[6.8 2.8 4.8 1.4]\n [5.2 2.7 3.9 1.4]\n [7.9 3.8 6.4 2. ]\n [6.3 2.3 4.4 1.3]\n [6.3 3.3 4.7 1.6]]\n\n\nТестовая выборка:\n [[5.7 2.5 5. 2. ]\n [6.4 2.8 5.6 2.2]\n [5.5 3.5 1.3 0.2]\n [5.7 3. 4.2 1.2]\n [6.7 3.1 4.7 1.5]]\n"
],
[
"print ('Метки классов на обучающей выборке:\\n', train_labels)\nprint ('\\n')\nprint ('Метки классов на тестовой выборке:\\n', test_labels)",
"Метки классов на обучающей выборке:\n [1 1 2 1 1 0 2 1 0 1 1 0 0 2 1 2 1 0 1 0 2 1 0 0 2 0 1 1 0 1 2 0 2 0 0 1 2\n 2 1 0 0 1 1 1 1 2 2 2 1 0 2 0 2 0 1 1 0 1 0 0 0 1 1 2 0 1 0 2 2 1 2 0 1 1\n 1 0 0 0 2 2 0 2 2 0 2 1 0 1 0 1 0 1 2 2 2 2 1 2 2 0 2 2 0 2 1]\n\n\nМетки классов на тестовой выборке:\n [2 2 0 1 1 0 2 0 2 2 0 1 1 2 2 0 2 0 2 1 2 1 0 2 1 0 1 2 0 2 2 2 1 2 0 1 0\n 0 1 1 0 2 0 0 1]\n"
]
],
[
[
"### Стратегии проведения кросс-валидации",
"_____no_output_____"
]
],
[
[
"#сгенерируем короткое подобие датасета, где элементы совпадают с порядковым номером\nX = range(0,10)",
"_____no_output_____"
]
],
[
[
"#### KFold",
"_____no_output_____"
]
],
[
[
"kf = model_selection.KFold(n_splits = 5)\nfor train_indices, test_indices in kf.split(X):\n print (train_indices, test_indices)",
"[2 3 4 5 6 7 8 9] [0 1]\n[0 1 4 5 6 7 8 9] [2 3]\n[0 1 2 3 6 7 8 9] [4 5]\n[0 1 2 3 4 5 8 9] [6 7]\n[0 1 2 3 4 5 6 7] [8 9]\n"
]
],
[
[
"для того чтобы перемешать переменные нужно shuffle = True",
"_____no_output_____"
]
],
[
[
"kf = model_selection.KFold(n_splits = 2, shuffle = True)\nfor train_indices, test_indices in kf.split(X):\n print (train_indices, test_indices)",
"[4 6 7 8 9] [0 1 2 3 5]\n[0 1 2 3 5] [4 6 7 8 9]\n"
]
],
[
[
"если мы не хотим чтобы каждый раз у нас генерился новый набор, лучше указать индекс конктретного набора который нас итересует random_state = 1",
"_____no_output_____"
]
],
[
[
"kf = model_selection.KFold(n_splits = 2, shuffle = True, random_state = 1)\nfor train_indices, test_indices in kf.split(X):\n print (train_indices, test_indices)",
"[1 3 5 7 8] [0 2 4 6 9]\n[0 2 4 6 9] [1 3 5 7 8]\n"
]
],
[
[
"#### StratifiedKFold",
"_____no_output_____"
]
],
[
[
"#создаем набор меток классов\ny = np.array([0] * 5 + [1] * 5)\nprint( y)\n\nskf = model_selection.StratifiedKFold(n_splits = 2, shuffle = True, random_state = 0)\nfor train_indices, test_indices in skf.split(X, y):\n print (train_indices, test_indices)",
"[0 0 0 0 0 1 1 1 1 1]\n[3 4 8 9] [0 1 2 5 6 7]\n[0 1 2 5 6 7] [3 4 8 9]\n"
],
[
"target = np.array([0, 1] * 5)\nprint (target)\n\nskf = model_selection.StratifiedKFold(n_splits = 2,shuffle = True)\nfor train_indices, test_indices in skf.split(X, target):\n print (train_indices, test_indices)",
"[0 1 0 1 0 1 0 1 0 1]\n[1 4 5 8] [0 2 3 6 7 9]\n[0 2 3 6 7 9] [1 4 5 8]\n"
]
],
[
[
"#### ShuffleSplit",
"_____no_output_____"
]
],
[
[
"ss = model_selection.ShuffleSplit(n_splits = 10, test_size = 0.2)\n\nfor train_indices, test_indices in ss.split(X):\n print (train_indices, test_indices)",
"[7 1 5 0 8 9 3 6] [2 4]\n[5 6 9 3 7 1 2 0] [4 8]\n[0 4 9 6 1 7 3 5] [8 2]\n[7 9 8 2 0 1 3 6] [4 5]\n[4 3 2 1 7 6 9 8] [5 0]\n[4 0 1 9 8 3 2 6] [5 7]\n[0 5 8 9 6 3 1 2] [7 4]\n[7 0 5 3 8 4 6 9] [2 1]\n[8 4 6 9 1 5 3 2] [0 7]\n[4 8 6 3 1 5 9 7] [0 2]\n"
]
],
[
[
"#### StratifiedShuffleSplit",
"_____no_output_____"
]
],
[
[
"target = np.array([0] * 5 + [1] * 5)\nprint (target)\n\nsss = model_selection.StratifiedShuffleSplit(n_splits = 4, test_size = 0.2)\nfor train_indices, test_indices in sss.split(X, target):\n print (train_indices, test_indices)",
"[0 0 0 0 0 1 1 1 1 1]\n[3 9 8 7 1 4 6 0] [5 2]\n[4 5 6 2 8 0 9 3] [1 7]\n[7 2 6 5 3 0 9 4] [8 1]\n[5 7 2 6 8 3 1 0] [4 9]\n"
]
],
[
[
"#### Leave-One-Out",
"_____no_output_____"
]
],
[
[
"loo = model_selection.LeaveOneOut()\n\nfor train_indices, test_index in loo.split(X):\n print (train_indices, test_index)",
"[1 2 3 4 5 6 7 8 9] [0]\n[0 2 3 4 5 6 7 8 9] [1]\n[0 1 3 4 5 6 7 8 9] [2]\n[0 1 2 4 5 6 7 8 9] [3]\n[0 1 2 3 5 6 7 8 9] [4]\n[0 1 2 3 4 6 7 8 9] [5]\n[0 1 2 3 4 5 7 8 9] [6]\n[0 1 2 3 4 5 6 8 9] [7]\n[0 1 2 3 4 5 6 7 9] [8]\n[0 1 2 3 4 5 6 7 8] [9]\n"
]
],
[
[
"Больше стратегий проведения кросс-валидации доступно здесь: http://scikit-learn.org/stable/modules/cross_validation.html#cross-validation-iterators",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
ec8e076f53b030af978ff34686a1c888fa64bb0c | 86,768 | ipynb | Jupyter Notebook | lessons/05_Step_4.ipynb | oddsun/CFDPython | a955baa3031e2adeff3227d144f5d3ee1eefe2b3 | [
"CC-BY-3.0"
] | 2,341 | 2015-01-04T22:20:47.000Z | 2022-03-30T08:51:12.000Z | lessons/05_Step_4.ipynb | oddsun/CFDPython | a955baa3031e2adeff3227d144f5d3ee1eefe2b3 | [
"CC-BY-3.0"
] | 59 | 2015-01-02T22:21:14.000Z | 2020-10-19T17:37:14.000Z | lessons/05_Step_4.ipynb | oddsun/CFDPython | a955baa3031e2adeff3227d144f5d3ee1eefe2b3 | [
"CC-BY-3.0"
] | 900 | 2015-01-12T03:08:03.000Z | 2022-03-29T01:34:07.000Z | 146.567568 | 38,304 | 0.845807 | [
[
[
"Text provided under a Creative Commons Attribution license, CC-BY. All code is made available under the FSF-approved BSD-3 license. (c) Lorena A. Barba, Gilbert F. Forsyth 2017. Thanks to NSF for support via CAREER award #1149784.",
"_____no_output_____"
]
],
[
[
"[@LorenaABarba](https://twitter.com/LorenaABarba)",
"_____no_output_____"
],
[
"12 steps to Navier–Stokes\n=====\n***",
"_____no_output_____"
],
[
"We continue our journey to solve the Navier–Stokes equation with Step 4. But don't continue unless you have completed the previous steps! In fact, this next step will be a combination of the two previous ones. The wonders of *code reuse*!",
"_____no_output_____"
],
[
"Step 4: Burgers' Equation\n----\n***",
"_____no_output_____"
],
[
"You can read about Burgers' Equation on its [wikipedia page](http://en.wikipedia.org/wiki/Burgers'_equation).\n\nBurgers' equation in one spatial dimension looks like this:\n\n$$\\frac{\\partial u}{\\partial t} + u \\frac{\\partial u}{\\partial x} = \\nu \\frac{\\partial ^2u}{\\partial x^2}$$\n\nAs you can see, it is a combination of non-linear convection and diffusion. It is surprising how much you learn from this neat little equation! \n\nWe can discretize it using the methods we've already detailed in Steps [1](./01_Step_1.ipynb) to [3](./04_Step_3.ipynb). Using forward difference for time, backward difference for space and our 2nd-order method for the second derivatives yields:\n\n$$\\frac{u_i^{n+1}-u_i^n}{\\Delta t} + u_i^n \\frac{u_i^n - u_{i-1}^n}{\\Delta x} = \\nu \\frac{u_{i+1}^n - 2u_i^n + u_{i-1}^n}{\\Delta x^2}$$\n\nAs before, once we have an initial condition, the only unknown is $u_i^{n+1}$. We will step in time as follows:\n\n$$u_i^{n+1} = u_i^n - u_i^n \\frac{\\Delta t}{\\Delta x} (u_i^n - u_{i-1}^n) + \\nu \\frac{\\Delta t}{\\Delta x^2}(u_{i+1}^n - 2u_i^n + u_{i-1}^n)$$",
"_____no_output_____"
],
[
"### Initial and Boundary Conditions\n\nTo examine some interesting properties of Burgers' equation, it is helpful to use different initial and boundary conditions than we've been using for previous steps. \n\nOur initial condition for this problem is going to be:\n\n\\begin{eqnarray}\nu &=& -\\frac{2 \\nu}{\\phi} \\frac{\\partial \\phi}{\\partial x} + 4 \\\\\\\n\\phi &=& \\exp \\bigg(\\frac{-x^2}{4 \\nu} \\bigg) + \\exp \\bigg(\\frac{-(x-2 \\pi)^2}{4 \\nu} \\bigg)\n\\end{eqnarray}\n\nThis has an analytical solution, given by:\n\n\\begin{eqnarray}\nu &=& -\\frac{2 \\nu}{\\phi} \\frac{\\partial \\phi}{\\partial x} + 4 \\\\\\\n\\phi &=& \\exp \\bigg(\\frac{-(x-4t)^2}{4 \\nu (t+1)} \\bigg) + \\exp \\bigg(\\frac{-(x-4t -2 \\pi)^2}{4 \\nu(t+1)} \\bigg)\n\\end{eqnarray}\n\nOur boundary condition will be:\n\n$$u(0) = u(2\\pi)$$\n\nThis is called a *periodic* boundary condition. Pay attention! This will cause you a bit of headache if you don't tread carefully.",
"_____no_output_____"
],
[
"### Saving Time with SymPy\n\n\nThe initial condition we're using for Burgers' Equation can be a bit of a pain to evaluate by hand. The derivative $\\frac{\\partial \\phi}{\\partial x}$ isn't too terribly difficult, but it would be easy to drop a sign or forget a factor of $x$ somewhere, so we're going to use SymPy to help us out. \n\n[SymPy](http://sympy.org/en/) is the symbolic math library for Python. It has a lot of the same symbolic math functionality as Mathematica with the added benefit that we can easily translate its results back into our Python calculations (it is also free and open source). \n\nStart by loading the SymPy library, together with our favorite library, NumPy.",
"_____no_output_____"
]
],
[
[
"import numpy\nimport sympy",
"_____no_output_____"
]
],
[
[
"We're also going to tell SymPy that we want all of its output to be rendered using $\\LaTeX$. This will make our Notebook beautiful!",
"_____no_output_____"
]
],
[
[
"from sympy import init_printing\ninit_printing(use_latex=True)",
"_____no_output_____"
]
],
[
[
"Start by setting up symbolic variables for the three variables in our initial condition and then type out the full equation for $\\phi$. We should get a nicely rendered version of our $\\phi$ equation.",
"_____no_output_____"
]
],
[
[
"x, nu, t = sympy.symbols('x nu t')\nphi = (sympy.exp(-(x - 4 * t)**2 / (4 * nu * (t + 1))) +\n sympy.exp(-(x - 4 * t - 2 * sympy.pi)**2 / (4 * nu * (t + 1))))\nphi",
"_____no_output_____"
]
],
[
[
"It's maybe a little small, but that looks right. Now to evaluate our partial derivative $\\frac{\\partial \\phi}{\\partial x}$ is a trivial task. ",
"_____no_output_____"
]
],
[
[
"phiprime = phi.diff(x)\nphiprime",
"_____no_output_____"
]
],
[
[
"If you want to see the unrendered version, just use the Python print command.",
"_____no_output_____"
]
],
[
[
"print(phiprime)",
"-(-8*t + 2*x)*exp(-(-4*t + x)**2/(4*nu*(t + 1)))/(4*nu*(t + 1)) - (-8*t + 2*x - 4*pi)*exp(-(-4*t + x - 2*pi)**2/(4*nu*(t + 1)))/(4*nu*(t + 1))\n"
]
],
[
[
"### Now what?\n\n\nNow that we have the Pythonic version of our derivative, we can finish writing out the full initial condition equation and then translate it into a usable Python expression. For this, we'll use the *lambdify* function, which takes a SymPy symbolic equation and turns it into a callable function. ",
"_____no_output_____"
]
],
[
[
"from sympy.utilities.lambdify import lambdify\n\nu = -2 * nu * (phiprime / phi) + 4\nprint(u)",
"-2*nu*(-(-8*t + 2*x)*exp(-(-4*t + x)**2/(4*nu*(t + 1)))/(4*nu*(t + 1)) - (-8*t + 2*x - 4*pi)*exp(-(-4*t + x - 2*pi)**2/(4*nu*(t + 1)))/(4*nu*(t + 1)))/(exp(-(-4*t + x - 2*pi)**2/(4*nu*(t + 1))) + exp(-(-4*t + x)**2/(4*nu*(t + 1)))) + 4\n"
]
],
[
[
"### Lambdify\n\nTo lambdify this expression into a useable function, we tell lambdify which variables to request and the function we want to plug them in to.",
"_____no_output_____"
]
],
[
[
"ufunc = lambdify((t, x, nu), u)\nprint(ufunc(1, 4, 3))",
"3.49170664206\n"
]
],
[
[
"### Back to Burgers' Equation\n\nNow that we have the initial conditions set up, we can proceed and finish setting up the problem. We can generate the plot of the initial condition using our lambdify-ed function.",
"_____no_output_____"
]
],
[
[
"from matplotlib import pyplot\n%matplotlib inline\n\n###variable declarations\nnx = 101\nnt = 100\ndx = 2 * numpy.pi / (nx - 1)\nnu = .07\ndt = dx * nu\n\nx = numpy.linspace(0, 2 * numpy.pi, nx)\nun = numpy.empty(nx)\nt = 0\n\nu = numpy.asarray([ufunc(t, x0, nu) for x0 in x])\nu",
"_____no_output_____"
],
[
"pyplot.figure(figsize=(11, 7), dpi=100)\npyplot.plot(x, u, marker='o', lw=2)\npyplot.xlim([0, 2 * numpy.pi])\npyplot.ylim([0, 10]);",
"_____no_output_____"
]
],
[
[
"This is definitely not the hat function we've been dealing with until now. We call it a \"saw-tooth function\". Let's proceed forward and see what happens. ",
"_____no_output_____"
],
[
"### Periodic Boundary Conditions\n\nOne of the big differences between Step 4 and the previous lessons is the use of *periodic* boundary conditions. If you experiment with Steps 1 and 2 and make the simulation run longer (by increasing `nt`) you will notice that the wave will keep moving to the right until it no longer even shows up in the plot. \n\nWith periodic boundary conditions, when a point gets to the right-hand side of the frame, it *wraps around* back to the front of the frame. \n\nRecall the discretization that we worked out at the beginning of this notebook:\n\n$$u_i^{n+1} = u_i^n - u_i^n \\frac{\\Delta t}{\\Delta x} (u_i^n - u_{i-1}^n) + \\nu \\frac{\\Delta t}{\\Delta x^2}(u_{i+1}^n - 2u_i^n + u_{i-1}^n)$$\n\nWhat does $u_{i+1}^n$ *mean* when $i$ is already at the end of the frame?\n\nThink about this for a minute before proceeding. \n\n",
"_____no_output_____"
]
],
[
[
"for n in range(nt):\n un = u.copy()\n for i in range(1, nx-1):\n u[i] = un[i] - un[i] * dt / dx *(un[i] - un[i-1]) + nu * dt / dx**2 *\\\n (un[i+1] - 2 * un[i] + un[i-1])\n u[0] = un[0] - un[0] * dt / dx * (un[0] - un[-2]) + nu * dt / dx**2 *\\\n (un[1] - 2 * un[0] + un[-2])\n u[-1] = u[0]\n \nu_analytical = numpy.asarray([ufunc(nt * dt, xi, nu) for xi in x])",
"_____no_output_____"
],
[
"pyplot.figure(figsize=(11, 7), dpi=100)\npyplot.plot(x,u, marker='o', lw=2, label='Computational')\npyplot.plot(x, u_analytical, label='Analytical')\npyplot.xlim([0, 2 * numpy.pi])\npyplot.ylim([0, 10])\npyplot.legend();",
"_____no_output_____"
]
],
[
[
"***\n\nWhat next?\n----\n\nThe subsequent steps, from 5 to 12, will be in two dimensions. But it is easy to extend the 1D finite-difference formulas to the partial derivatives in 2D or 3D. Just apply the definition — a partial derivative with respect to $x$ is the variation in the $x$ direction *while keeping $y$ constant*.\n\nBefore moving on to [Step 5](./07_Step_5.ipynb), make sure you have completed your own code for steps 1 through 4 and you have experimented with the parameters and thought about what is happening. Also, we recommend that you take a slight break to learn about [array operations with NumPy](./06_Array_Operations_with_NumPy.ipynb).",
"_____no_output_____"
]
],
[
[
"from IPython.core.display import HTML\ndef css_styling():\n styles = open(\"../styles/custom.css\", \"r\").read()\n return HTML(styles)\ncss_styling()",
"_____no_output_____"
]
]
] | [
"raw",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"raw"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
]
] |
ec8e0b38d3526d4cb6e35ecf3153e391f0e03b27 | 51,703 | ipynb | Jupyter Notebook | examples/multiclass_comparison.ipynb | bernardoduarte/textvec | c95ed206cea5099d5b4da842f12c7a551159f3c2 | [
"MIT"
] | 195 | 2018-04-23T07:25:43.000Z | 2020-03-07T17:04:17.000Z | examples/multiclass_comparison.ipynb | bernardoduarte/textvec | c95ed206cea5099d5b4da842f12c7a551159f3c2 | [
"MIT"
] | 7 | 2019-10-28T13:39:03.000Z | 2021-10-14T04:27:46.000Z | examples/multiclass_comparison.ipynb | bernardoduarte/textvec | c95ed206cea5099d5b4da842f12c7a551159f3c2 | [
"MIT"
] | 16 | 2018-04-23T15:17:41.000Z | 2020-03-07T17:04:16.000Z | 165.185304 | 22,428 | 0.896795 | [
[
[
"import os\n\nimport numpy as np\nimport pandas as pd\nfrom sklearn.datasets import fetch_20newsgroups\nfrom sklearn.utils import shuffle\nfrom sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.model_selection import GridSearchCV, train_test_split\nfrom sklearn.metrics.classification import accuracy_score, f1_score\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\nfrom textvec import vectorizers",
"_____no_output_____"
],
[
"import warnings\nwarnings.filterwarnings(\"ignore\")",
"_____no_output_____"
]
],
[
[
"# Load data",
"_____no_output_____"
],
[
"## 20news",
"_____no_output_____"
]
],
[
[
"def get_multiclass_20news_data():\n newsgroups_train = fetch_20newsgroups(subset=\"train\")\n newsgroups_test = fetch_20newsgroups(subset=\"test\")\n train_data = pd.DataFrame({\"text\": newsgroups_train.data, \"y\": newsgroups_train.target})\n test_data = pd.DataFrame({\"text\": newsgroups_test.data, \"y\": newsgroups_test.target})\n return train_data, test_data",
"_____no_output_____"
],
[
"news_train, news_test = get_multiclass_20news_data()",
"_____no_output_____"
]
],
[
[
"## Airline sentiments",
"_____no_output_____"
]
],
[
[
"def get_airline_multiclass_sentiment_data(path):\n df = pd.read_csv(path, encoding = \"ISO-8859-1\")\n df.loc[df.airline_sentiment == \"negative\",\"airline_sentiment\"] = 0\n df.loc[df.airline_sentiment == \"positive\",\"airline_sentiment\"] = 1\n df.loc[df.airline_sentiment == \"neutral\",\"airline_sentiment\"] = 2\n X_train, X_test, y_train, y_test = train_test_split(\n df.text, df.airline_sentiment, stratify=df.airline_sentiment, test_size=0.2, random_state=0\n )\n train_data = pd.DataFrame({\"text\": X_train, \"y\": y_train})\n test_data = pd.DataFrame({\"text\": X_test, \"y\": y_test})\n return train_data, test_data",
"_____no_output_____"
],
[
"air_train, air_test = get_airline_multiclass_sentiment_data(\"data/Airline-Sentiment-2-w-AA.csv\")",
"_____no_output_____"
]
],
[
[
"# Grid search",
"_____no_output_____"
]
],
[
[
"transformers = [\n (\"tf\", TfidfTransformer(use_idf=False, sublinear_tf=True)),\n (\"tfidf\", TfidfTransformer(sublinear_tf=True)),\n (\"tficf\", vectorizers.TfIcfVectorizer(sublinear_tf=True))\n]",
"_____no_output_____"
],
[
"param_grid = {\n \"vectorizer__ngram_range\": [(1, 1), (1, 2), (1, 3)],\n \"vectorizer__max_features\": [2000, 5000, None],\n \"clf__C\": [0.01, 0.1, 1, 10],\n}",
"_____no_output_____"
],
[
"def grid_search(param_grid, train, test, n_jobs=-1):\n results = dict()\n \n for name, transformer in transformers:\n pipeline = Pipeline([\n (\"vectorizer\", CountVectorizer()),\n (\"transformer\", transformer),\n (\"clf\", LogisticRegression(solver=\"saga\", random_state=0))\n ])\n search = GridSearchCV(pipeline, param_grid, cv=5, n_jobs=n_jobs, scoring=\"accuracy\")\n search.fit(train.text, train.y)\n\n test_preds_proba = search.best_estimator_.predict_proba(test.text)\n test_preds = np.argmax(test_preds_proba, axis=1)\n np.testing.assert_allclose(test_preds, search.best_estimator_.predict(test.text))\n\n train_accuracy = search.best_score_\n test_accuracy = accuracy_score(test.y, test_preds)\n print(f\"{name:10} train accuracy: {train_accuracy:<15} test accuracy: {test_accuracy}\")\n\n local_results = {\n \"best_params\": search.best_params_,\n \"test_accuracy\": test_accuracy,\n \"preds_proba\": test_preds_proba[:, 1]\n }\n results[name] = local_results\n return results",
"_____no_output_____"
]
],
[
[
"# Correlation heatmap",
"_____no_output_____"
]
],
[
[
"def plot_correlation_heatmap(results, title):\n probs = []\n labels = []\n for name, _ in transformers:\n probs.append(results[name][\"preds_proba\"])\n accuracy = results[name][\"test_accuracy\"]\n labels.append(f\"{name}: {accuracy:.4f}\")\n probs = np.array(probs)\n corr = np.corrcoef(probs)\n\n fig, ax = plt.subplots(figsize=(10,10))\n ax = sns.heatmap(corr, \n cmap=\"viridis\", \n square=True, \n annot=corr, \n linewidth=0.5,\n xticklabels=labels,\n yticklabels=labels)\n ax.set_title(title)\n plt.show()",
"_____no_output_____"
],
[
"results = grid_search(param_grid, air_train, air_test, n_jobs=4)\nplot_correlation_heatmap(results, \"Airline sentiment 3 class Dataset\")",
"tf train accuracy: 0.8059255464480874 test accuracy: 0.8084016393442623\ntfidf train accuracy: 0.8046448087431693 test accuracy: 0.8118169398907104\ntficf train accuracy: 0.8080601092896175 test accuracy: 0.8125\n"
],
[
"results = grid_search(param_grid, news_train, news_test, n_jobs=3)\nplot_correlation_heatmap(results, \"20 classes news dataset\")",
"tf train accuracy: 0.8980908608803253 test accuracy: 0.820631970260223\ntfidf train accuracy: 0.9272582640975782 test accuracy: 0.8575411577270313\ntficf train accuracy: 0.9129397207000177 test accuracy: 0.8292618162506639\n"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
]
] |
ec8e14f83b0977b758d6be5698c2a0ff50afa84d | 792,468 | ipynb | Jupyter Notebook | notebooks/cat_dog.ipynb | ProjektMetafora/DSHNet | f97aeb73f2201737415062d6db7217760c594ada | [
"Apache-2.0"
] | null | null | null | notebooks/cat_dog.ipynb | ProjektMetafora/DSHNet | f97aeb73f2201737415062d6db7217760c594ada | [
"Apache-2.0"
] | null | null | null | notebooks/cat_dog.ipynb | ProjektMetafora/DSHNet | f97aeb73f2201737415062d6db7217760c594ada | [
"Apache-2.0"
] | null | null | null | 1,610.707317 | 777,204 | 0.957495 | [
[
[
"cd /opt/app",
"/opt/app\n"
],
[
"! ls /opt/data/cat_dog_coco/",
"annotations images\n"
],
[
"! ls /opt/data/cat_dog_coco/annotations",
"annotations.json\n"
],
[
"! ls /opt/data/cat_dog_coco/images",
"000000001319.jpg 000000136373.jpg 000000273885.jpg 000000476173.jpg\n000000018155.jpg 000000161617.jpg 000000283471.jpg 000000499656.jpg\n000000033111.jpg 000000168963.jpg 000000306395.jpg 000000516194.jpg\n000000053289.jpg 000000177529.jpg 000000342150.jpg 000000531983.jpg\n000000055607.jpg 000000179392.jpg 000000344702.jpg 000000532975.jpg\n000000086205.jpg 000000183757.jpg 000000349776.jpg 000000557543.jpg\n000000116503.jpg 000000209419.jpg 000000381605.jpg 000000562458.jpg\n000000117108.jpg 000000220053.jpg 000000396039.jpg 000000570995.jpg\n"
],
[
"%%file /opt/configs/faster_rcnn/cat_dog.py\n\n_base_ = [\n '../_base_/models/faster_rcnn_r50_fpn.py',\n '../_base_/datasets/coco_detection.py',\n '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'\n]\ncheckpoint_config = dict(interval=18)\nlog_config = dict(\n interval=3000,\n hooks=[\n dict(type='TextLoggerHook'),\n # dict(type='TensorboardLoggerHook')\n ])\nmodel = dict(\n backbone=dict(\n type='ResNet',\n depth=50,\n num_stages=4,\n out_indices=(0, 1, 2, 3),\n frozen_stages=1,\n norm_cfg=dict(type='BN', requires_grad=True),\n norm_eval=True,\n style='pytorch'),\n # neck=dict(type='DensityFPN'),\n rpn_head=dict(\n type='RPNHead',\n # adaptive=False,\n in_channels=256,\n feat_channels=256,\n anchor_generator=dict(\n type='AnchorGenerator',\n scales=[4],\n ratios=[0.5, 1.0, 2.0],\n strides=[4, 8, 16, 32, 64]),\n bbox_coder=dict(\n type='DeltaXYWHBBoxCoder',\n target_means=[.0, .0, .0, .0],\n target_stds=[1.0, 1.0, 1.0, 1.0]),\n # loss_density=dict(type='MSELoss', reduction='mean', loss_weight=0.01),\n loss_cls=dict(\n type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),\n loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)),\n\n roi_head=dict(\n type='TailRoIHead',\n labels_tail=[2, 4, 5, 6, 7, 8, 9],\n labels=[0, 1, 3],\n bbox_roi_extractor=dict(\n type='SingleRoIExtractor',\n roi_layer=dict(type='RoIAlign', out_size=7, sample_num=0),\n out_channels=256,\n featmap_strides=[4, 8, 16, 32]),\n bbox_head_tail=dict(\n type='TailBBoxHead',\n in_channels=256,\n fc_out_channels=1024,\n roi_feat_size=7,\n num_classes=2,\n bbox_coder=dict(\n type='DeltaXYWHBBoxCoder',\n target_means=[0., 0., 0., 0.],\n target_stds=[0.1, 0.1, 0.2, 0.2]),\n reg_class_agnostic=False,\n loss_cls=dict(\n type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),\n loss_bbox=dict(type='L1Loss', loss_weight=1.0)),\n bbox_head=dict(\n type='TailBBoxHead',\n in_channels=256,\n fc_out_channels=1024,\n roi_feat_size=7,\n num_classes=2,\n bbox_coder=dict(\n type='DeltaXYWHBBoxCoder',\n target_means=[0., 0., 0., 0.],\n target_stds=[0.1, 0.1, 0.2, 0.2]),\n reg_class_agnostic=False,\n loss_cls=dict(\n type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),\n loss_bbox=dict(type='L1Loss', loss_weight=1.0))))\ntest_cfg = dict(\n rpn=dict(\n nms_across_levels=False,\n nms_pre=1000,\n nms_post=1000,\n max_num=1000,\n nms_thr=0.7,\n min_bbox_size=0),\n rcnn=dict(\n score_thr=0.05, nms=dict(type='nms', iou_thr=0.5), max_per_img=500))\n# dataset settings\ndataset_type = 'CocoDataset'\nclasses = ('cat', 'dog')\ndata_root = '/opt/data/cat_dog_coco/'\nimg_norm_cfg = dict(\n mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)\ntrain_pipeline = [\n dict(type='LoadImageFromFile'),\n dict(type='LoadAnnotations', with_bbox=True),\n dict(\n type='MinIoURandomCrop',\n min_ious=(0.1, 0.3, 0.5, 0.7, 0.9),\n min_crop_size=0.3),\n dict(type='Resize', img_scale=[(1000, 600)], keep_ratio=True),\n dict(type='RandomFlip', flip_ratio=0.5),\n dict(type='Normalize', **img_norm_cfg),\n dict(type='Pad', size_divisor=32),\n dict(type='DefaultFormatBundle'),\n dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),\n]\ntest_pipeline = [\n dict(type='LoadImageFromFile'),\n dict(\n type='MultiScaleFlipAug',\n img_scale=[(1000, 600)],\n flip=False,\n transforms=[\n dict(type='Resize', keep_ratio=True),\n dict(type='RandomFlip'),\n dict(type='Normalize', **img_norm_cfg),\n dict(type='Pad', size_divisor=32),\n dict(type='ImageToTensor', keys=['img']),\n dict(type='Collect', keys=['img']),\n\n ])\n]\ntrain_cfg = dict(\n rpn=dict(\n assigner=dict(\n type='MaxIoUAssigner',\n pos_iou_thr=0.7,\n neg_iou_thr=0.3,\n min_pos_iou=0.3,\n match_low_quality=True,\n ignore_iof_thr=-1),\n sampler=dict(\n type='RandomSampler',\n num=256,\n pos_fraction=0.5,\n neg_pos_ub=-1,\n add_gt_as_proposals=False),\n allowed_border=-1,\n pos_weight=-1,\n debug=False),\n rpn_proposal=dict(\n nms_across_levels=False,\n nms_pre=2000,\n nms_post=1000,\n max_num=1000,\n nms_thr=0.7,\n min_bbox_size=0),\n rcnn=dict(\n assigner_tail=dict(\n type='MaxIoUAssigner',\n pos_iou_thr=0.5,\n neg_iou_thr=0.5,\n min_pos_iou=0.5,\n match_low_quality=False,\n ignore_iof_thr=-1),\n sampler_tail=dict(\n type='RandomSampler',\n num=512,\n pos_fraction=0.5,\n neg_pos_ub=-1,\n add_gt_as_proposals=True,\n labels=[2, 4, 5, 6, 7, 8, 9]),\n\n assigner=dict(\n type='MaxIoUAssigner',\n pos_iou_thr=0.5,\n neg_iou_thr=0.5,\n min_pos_iou=0.5,\n match_low_quality=False,\n ignore_iof_thr=-1),\n sampler=dict(\n type='RandomSampler',\n num=512,\n pos_fraction=0.25,\n neg_pos_ub=-1,\n add_gt_as_proposals=True,\n labels=[0, 1, 3]),\n pos_weight=-1,\n debug=False))\ndata = dict(\n samples_per_gpu=2,\n workers_per_gpu=2,\n train=dict(\n type=dataset_type,\n classes=classes,\n ann_file=data_root + 'annotations/annotations.json',\n img_prefix=data_root + 'images/',\n pipeline=train_pipeline),\n val=dict(\n type=dataset_type,\n classes=classes,\n ann_file=data_root + 'annotations/annotations.json',\n img_prefix=data_root + 'images/',\n pipeline=test_pipeline),\n test=dict(\n type=dataset_type,\n classes=classes,\n ann_file=data_root + 'annotations/annotations.json',\n img_prefix=data_root + 'images/',\n pipeline=test_pipeline))\nevaluation = dict(interval=1, metric='bbox')\noptimizer = dict(type='SGD', lr=0.002, momentum=0.95, weight_decay=5 * 1e-4)\nlr_config = dict(\n policy='step',\n warmup='linear',\n warmup_iters=500,\n warmup_ratio=1.0 / 3,\n step=[8, 12, 16])\ntotal_epochs = 18\nload_from = '/opt/notebooks/vd_faster_rcnn_r50_fpn_tail_epoch_18.pth'",
"Overwriting /opt/configs/faster_rcnn/cat_dog.py\n"
],
[
"! ls /opt/notebooks",
"cat_dog.ipynb test_image.jpg vd_faster_rcnn_r50_fpn_tail_epoch_18.pth\n"
],
[
"from pathlib import Path\nfrom PIL import Image\nimport requests",
"_____no_output_____"
],
[
"myfile = requests.get(\"https://stmedia.stimg.co/ows_157782178985657.jpg?fit=crop&crop=faces\", allow_redirects=True)\nPath(\"test_image.jpg\").open('wb').write(myfile.content)",
"_____no_output_____"
],
[
"from mmdet.apis import init_detector, inference_detector, show_result_pyplot\nimport mmcv",
"/opt/conda/lib/python3.7/site-packages/mmcv/utils/registry.py:64: UserWarning: The old API of register_module(module, force=False) is deprecated and will be removed, please use the new API register_module(name=None, force=False, module=None) instead.\n 'The old API of register_module(module, force=False) '\n"
],
[
"config_file = '/opt/configs/faster_rcnn/cat_dog.py'\ncheckpoint_file = '/opt/app/checkpoints/cat_dog/latest.pth'",
"_____no_output_____"
],
[
"model = init_detector(config_file, checkpoint_file, device='cuda:0')",
"/opt/conda/lib/python3.7/site-packages/mmcv/utils/misc.py:304: UserWarning: \"out_size\" is deprecated in `RoIAlign.__init__`, please use \"output_size\" instead\n f'\"{src_arg_name}\" is deprecated in '\n/opt/conda/lib/python3.7/site-packages/mmcv/utils/misc.py:304: UserWarning: \"sample_num\" is deprecated in `RoIAlign.__init__`, please use \"sampling_ratio\" instead\n f'\"{src_arg_name}\" is deprecated in '\n"
],
[
"img = '/opt/notebooks/test_image.jpg'\nresult = inference_detector(model, img)",
"/opt/conda/lib/python3.7/site-packages/mmcv/utils/misc.py:304: UserWarning: \"iou_thr\" is deprecated in `nms`, please use \"iou_threshold\" instead\n f'\"{src_arg_name}\" is deprecated in '\n"
],
[
"show_result_pyplot(model, img, result)",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
ec8e17ab4e388dedcd2dbb690abe05d2debddaa5 | 8,118 | ipynb | Jupyter Notebook | session-five/session_five_blank_template.ipynb | Precel2000/beginners-python | 6296e5d8bcd782caee267bd334abef14f2521fa9 | [
"MIT"
] | 4 | 2020-12-17T11:01:52.000Z | 2021-03-11T01:11:31.000Z | session-five/session_five_blank_template.ipynb | Precel2000/beginners-python | 6296e5d8bcd782caee267bd334abef14f2521fa9 | [
"MIT"
] | 1 | 2020-07-13T15:29:48.000Z | 2020-07-13T15:29:48.000Z | session-five/session_five_blank_template.ipynb | Precel2000/beginners-python | 6296e5d8bcd782caee267bd334abef14f2521fa9 | [
"MIT"
] | 7 | 2020-12-11T18:11:13.000Z | 2021-11-25T21:31:48.000Z | 18.619266 | 270 | 0.519463 | [
[
[
"<a href=\"https://colab.research.google.com/github/warwickdatascience/beginners-python/blob/master/session_five/session_five_blank_template.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
],
[
"<center>Spotted a mistake? Report it <a href=\"https://github.com/warwickdatascience/beginners-python/issues/new\">here</a></center>",
"_____no_output_____"
],
[
"# Beginner's Python—Session Five Template",
"_____no_output_____"
],
[
"## Looping Through a Range",
"_____no_output_____"
],
[
"### Introduction\n",
"_____no_output_____"
],
[
"Use a for loop to print the numbers zero to four",
"_____no_output_____"
],
[
"Use a for loop to print the numbers two to five",
"_____no_output_____"
],
[
"### Standard Puzzles",
"_____no_output_____"
],
[
"Use a for loop to print the number -5 to 5, inclusive",
"_____no_output_____"
],
[
"Print the first ten square numbers",
"_____no_output_____"
],
[
"### Bonus Puzzles",
"_____no_output_____"
],
[
"Experiment with passing a third argument into `range(...)`. What does it do?",
"_____no_output_____"
],
[
"Use this technique to count down from ten to zero",
"_____no_output_____"
],
[
"## Looping Through a List",
"_____no_output_____"
],
[
"### Introduction",
"_____no_output_____"
],
[
"Create a shopping list and print out each item in a sentence",
"_____no_output_____"
],
[
"### Standard Puzzles",
"_____no_output_____"
],
[
"Calculate the product of the elements in a list (see the presentation for a similar example)",
"_____no_output_____"
],
[
"Use a for loop to count how many positive elements a numeric list contains",
"_____no_output_____"
],
[
"## Early-stopping",
"_____no_output_____"
],
[
"### Introduction",
"_____no_output_____"
],
[
"Print the elements of a list until a negative number is encountered, at which point you should stop",
"_____no_output_____"
],
[
"Print the elements of a list, skipping any negative values",
"_____no_output_____"
],
[
"### Standard Puzzles",
"_____no_output_____"
],
[
"Create a list as specified in the presentation",
"_____no_output_____"
],
[
"Loop through the list, printing only positive elements and halting when a negative entry is encountered",
"_____no_output_____"
],
[
"### Bonus Puzzles",
"_____no_output_____"
],
[
"Setup up a nested for loop and attempt to break when a condition is met in the inner loop",
"_____no_output_____"
],
[
"## Enumeration",
"_____no_output_____"
],
[
"### Introduction",
"_____no_output_____"
],
[
"Use enumeration to print out the elements of a list alongside their ordinal indices",
"_____no_output_____"
],
[
"### Standard Puzzles",
"_____no_output_____"
],
[
"Define the list shown in the presentation",
"_____no_output_____"
],
[
"Print whether each element is greater than, equal to, or less than its index",
"_____no_output_____"
],
[
"Use enumeration to print every other element of a list",
"_____no_output_____"
]
]
] | [
"markdown"
] | [
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
]
] |
ec8e27ed780a079dccf7686cbcbf15927acf447b | 41,316 | ipynb | Jupyter Notebook | dqn/exercise/Deep_Q_Network.ipynb | parksoy/Soyoung_Udacity_ND_DeepReinforcementLearning | 28c4cd3ce76094a0e682736fcdceeec3f21ba754 | [
"MIT"
] | null | null | null | dqn/exercise/Deep_Q_Network.ipynb | parksoy/Soyoung_Udacity_ND_DeepReinforcementLearning | 28c4cd3ce76094a0e682736fcdceeec3f21ba754 | [
"MIT"
] | null | null | null | dqn/exercise/Deep_Q_Network.ipynb | parksoy/Soyoung_Udacity_ND_DeepReinforcementLearning | 28c4cd3ce76094a0e682736fcdceeec3f21ba754 | [
"MIT"
] | null | null | null | 159.521236 | 32,416 | 0.882346 | [
[
[
"# Deep Q-Network (DQN)\n---\nIn this notebook, you will implement a DQN agent with OpenAI Gym's LunarLander-v2 environment.\n\n### 1. Import the Necessary Packages",
"_____no_output_____"
]
],
[
[
"%reset",
"_____no_output_____"
],
[
"import gym\nimport random\nimport torch\nimport numpy as np\nfrom collections import deque\nimport matplotlib.pyplot as plt\n%matplotlib inline",
"_____no_output_____"
]
],
[
[
"### 2. Instantiate the Environment and Agent\n\nInitialize the environment in the code cell below.",
"_____no_output_____"
]
],
[
[
"env = gym.make('LunarLander-v2')\nenv.seed(0)\nprint('State shape: ', env.observation_space.shape)\nprint('Number of actions: ', env.action_space.n)",
"State shape: (8,)\nNumber of actions: 4\n"
]
],
[
[
"Before running the next code cell, familiarize yourself with the code in **Step 2** and **Step 3** of this notebook, along with the code in `dqn_agent.py` and `model.py`. Once you have an understanding of how the different files work together, \n- Define a neural network architecture in `model.py` that maps states to action values. This file is mostly empty - it's up to you to define your own deep Q-network!\n- Finish the `learn` method in the `Agent` class in `dqn_agent.py`. The sampled batch of experience tuples is already provided for you; you need only use the local and target Q-networks to compute the loss, before taking a step towards minimizing the loss.\n\nOnce you have completed the code in `dqn_agent.py` and `model.py`, run the code cell below. (_If you end up needing to make multiple changes and get unexpected behavior, please restart the kernel and run the cells from the beginning of the notebook!_)\n\nYou can find the solution files, along with saved model weights for a trained agent, in the `solution/` folder. (_Note that there are many ways to solve this exercise, and the \"solution\" is just one way of approaching the problem, to yield a trained agent._)",
"_____no_output_____"
]
],
[
[
"from dqn_agent import Agent\n\nagent = Agent(state_size=8, action_size=4, seed=0)\n\n# watch an untrained agent\nstate = env.reset()\nfor j in range(200):\n action = agent.act(state)\n env.render()\n state, reward, done, _ = env.step(action)\n if done:\n break \n \nenv.close()",
"_____no_output_____"
]
],
[
[
"### 3. Train the Agent with DQN\n\nRun the code cell below to train the agent from scratch. You are welcome to amend the supplied values of the parameters in the function, to try to see if you can get better performance!",
"_____no_output_____"
]
],
[
[
"def dqn(n_episodes=2000, max_t=1000, eps_start=1.0, eps_end=0.01, eps_decay=0.995):\n \"\"\"Deep Q-Learning.\n \n Params\n ======\n n_episodes (int): maximum number of training episodes\n max_t (int): maximum number of timesteps per episode\n eps_start (float): starting value of epsilon, for epsilon-greedy action selection\n eps_end (float): minimum value of epsilon\n eps_decay (float): multiplicative factor (per episode) for decreasing epsilon\n \"\"\"\n scores = [] # list containing scores from each episode\n scores_window = deque(maxlen=100) # last 100 scores\n eps = eps_start # initialize epsilon\n for i_episode in range(1, n_episodes+1):\n state = env.reset()\n score = 0\n for t in range(max_t):\n action = agent.act(state, eps)\n next_state, reward, done, _ = env.step(action)\n agent.step(state, action, reward, next_state, done)\n state = next_state\n score += reward\n if done:\n break \n scores_window.append(score) # save most recent score\n scores.append(score) # save most recent score\n eps = max(eps_end, eps_decay*eps) # decrease epsilon\n print('\\rEpisode {}\\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_window)), end=\"\")\n if i_episode % 100 == 0:\n print('\\rEpisode {}\\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_window)))\n if np.mean(scores_window)>=200.0:\n print('\\nEnvironment solved in {:d} episodes!\\tAverage Score: {:.2f}'.format(i_episode-100, np.mean(scores_window)))\n torch.save(agent.qnetwork_local.state_dict(), 'checkpoint.pth')\n break\n return scores\n\nscores = dqn()\n\n# plot the scores\nfig = plt.figure()\nax = fig.add_subplot(111)\nplt.plot(np.arange(len(scores)), scores)\nplt.ylabel('Score')\nplt.xlabel('Episode #')\nplt.show()",
"Episode 100\tAverage Score: -183.04\nEpisode 200\tAverage Score: -98.230\nEpisode 300\tAverage Score: -41.98\nEpisode 400\tAverage Score: -4.286\nEpisode 500\tAverage Score: 60.65\nEpisode 600\tAverage Score: 125.72\nEpisode 700\tAverage Score: 196.29\nEpisode 714\tAverage Score: 201.15\nEnvironment solved in 614 episodes!\tAverage Score: 201.15\n"
]
],
[
[
"### 4. Watch a Smart Agent!\n\nIn the next code cell, you will load the trained weights from file to watch a smart agent!",
"_____no_output_____"
]
],
[
[
"# load the weights from file\nagent.qnetwork_local.load_state_dict(torch.load('checkpoint.pth'))\n\nfor i in range(5):\n state = env.reset()\n for j in range(200):\n action = agent.act(state)\n env.render()\n state, reward, done, _ = env.step(action)\n if done:\n break \n \nenv.close()",
"_____no_output_____"
]
],
[
[
"### 5. Explore\n\nIn this exercise, you have implemented a DQN agent and demonstrated how to use it to solve an OpenAI Gym environment. To continue your learning, you are encouraged to complete any (or all!) of the following tasks:\n- Amend the various hyperparameters and network architecture to see if you can get your agent to solve the environment faster. Once you build intuition for the hyperparameters that work well with this environment, try solving a different OpenAI Gym task with discrete actions!\n- You may like to implement some improvements such as prioritized experience replay, Double DQN, or Dueling DQN! \n- Write a blog post explaining the intuition behind the DQN algorithm and demonstrating how to use it to solve an RL environment of your choosing. ",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
ec8e407a7617e8e422f64e35eec54a5b008be31f | 4,501 | ipynb | Jupyter Notebook | notebooks/road_following/live_demo_build_trt.ipynb | Ryan-ZL-Lin/jetbot-customization | 5ce8619e049c53c83d2197678023f254b7c5fa48 | [
"MIT"
] | 1 | 2021-04-29T15:18:58.000Z | 2021-04-29T15:18:58.000Z | notebooks/road_following/live_demo_build_trt.ipynb | Ryan-ZL-Lin/jetbot-customization | 5ce8619e049c53c83d2197678023f254b7c5fa48 | [
"MIT"
] | null | null | null | notebooks/road_following/live_demo_build_trt.ipynb | Ryan-ZL-Lin/jetbot-customization | 5ce8619e049c53c83d2197678023f254b7c5fa48 | [
"MIT"
] | null | null | null | 23.321244 | 338 | 0.574317 | [
[
[
"# Road Following - Build TensorRT model for live demo",
"_____no_output_____"
],
[
"In this notebook, we will optimize the model we trained using TensorRT.",
"_____no_output_____"
],
[
"## Load the trained model",
"_____no_output_____"
],
[
"We will assume that you have already downloaded ``best_steering_model_xy.pth`` to work station as instructed in \"train_model.ipynb\" notebook. Now, you should upload model file to JetBot in to this notebooks's directory. Once that's finished there should be a file named ``best_steering_model_xy.pth`` in this notebook's directory.",
"_____no_output_____"
],
[
"> Please make sure the file has uploaded fully before calling the next cell",
"_____no_output_____"
],
[
"Execute the code below to initialize the PyTorch model. This should look very familiar from the training notebook.",
"_____no_output_____"
]
],
[
[
"import torchvision\nimport torch\n\nmodel = torchvision.models.resnet18(pretrained=False)\nmodel.fc = torch.nn.Linear(512, 2)\nmodel = model.cuda().eval().half()",
"_____no_output_____"
]
],
[
[
"Next, load the trained weights from the ``best_steering_model_xy.pth`` file that you uploaded.",
"_____no_output_____"
]
],
[
[
"model.load_state_dict(torch.load('best_steering_model_xy.pth'))",
"_____no_output_____"
]
],
[
[
"Currently, the model weights are located on the CPU memory execute the code below to transfer to the GPU device.",
"_____no_output_____"
]
],
[
[
"device = torch.device('cuda')",
"_____no_output_____"
]
],
[
[
"## TensorRT",
"_____no_output_____"
],
[
"> If your setup does not have `torch2trt` installed, you need to first install `torch2trt` by executing the following in the console.\n```bash\ncd $HOME\ngit clone https://github.com/NVIDIA-AI-IOT/torch2trt\ncd torch2trt\nsudo python3 setup.py install\n```\n\nConvert and optimize the model using torch2trt for faster inference with TensorRT. Please see the [torch2trt](https://github.com/NVIDIA-AI-IOT/torch2trt) readme for more details.\n\n> This optimization process can take a couple minutes to complete.",
"_____no_output_____"
]
],
[
[
"from torch2trt import torch2trt\n\ndata = torch.zeros((1, 3, 224, 224)).cuda().half()\n\nmodel_trt = torch2trt(model, [data], fp16_mode=True)",
"_____no_output_____"
]
],
[
[
"Save the optimized model using the cell below",
"_____no_output_____"
]
],
[
[
"torch.save(model_trt.state_dict(), 'best_steering_model_xy_trt.pth')",
"_____no_output_____"
]
],
[
[
"## Next\nOpen live_demo_trt.ipynb to move JetBot with the TensorRT optimized model.",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
ec8e4c50cd57d8a1fd5030138176ff51e3f098b4 | 468,196 | ipynb | Jupyter Notebook | demo_cv.ipynb | thesfinox/avogadro-2021 | 91015ff3c9cd816ea2e206eb6bd9ffc71ac9dec4 | [
"MIT"
] | null | null | null | demo_cv.ipynb | thesfinox/avogadro-2021 | 91015ff3c9cd816ea2e206eb6bd9ffc71ac9dec4 | [
"MIT"
] | null | null | null | demo_cv.ipynb | thesfinox/avogadro-2021 | 91015ff3c9cd816ea2e206eb6bd9ffc71ac9dec4 | [
"MIT"
] | null | null | null | 424.860254 | 91,170 | 0.920104 | [
[
[
"# Artificial Intelligence for Handwriting Recognition\n\nSpeakers: Riccardo Finotello, Veronica Guidetti\n\nCode: [Riccardo Finotello](mailto:[email protected])\n\n[**XVII Avogadro Meeting**](https://www.ggi.infn.it/showevent.pl?id=407), 12/2021, Galileo Galilei Institute for Theoretical Physics, Florence, Italy\n\n# Synopsis\n\nIn this small demo, we introduce some technical instruments to deploy a simple AI for the recognition of handwritten digits.\nThe main idea is to showcase the use of convolutional neural networks and autoencoders in a simple realisation, and to show possible generalisations and applications.",
"_____no_output_____"
]
],
[
[
"%load_ext autoreload\n%autoreload 2\n%matplotlib inline\n",
"_____no_output_____"
]
],
[
[
"The chosen frameworks for the neural networks are [Tensorflow](https://www.tensorflow.org/) and its high-level API [Keras](https://keras.io/).",
"_____no_output_____"
]
],
[
[
"import sys\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport seaborn as sns\nimport tensorflow as tf\nimport tensorflow.keras.backend as K\nfrom tensorflow.keras import Model, Sequential\nfrom tensorflow.keras.callbacks import ModelCheckpoint\nfrom tensorflow.keras.datasets import mnist\nfrom tensorflow.keras.layers import (Conv2D, Conv2DTranspose, MaxPool2D,\n UpSampling2D)\nfrom tensorflow.keras.losses import CategoricalCrossentropy, MeanSquaredError\nfrom tensorflow.keras.metrics import BinaryAccuracy, CategoricalAccuracy\nfrom tensorflow.keras.optimizers import Adam\n\nTRAIN_CNN = False # set to False to load directly the pretrained models\nTRAIN_AE = False # set to False to load directly the pretrained models\n\nnp.random.seed(0)\ntf.random.set_seed(0)\n\nsns.set_theme(palette='tab10')\n\n# set the memory growth of the GPU for training, if available\ngpus = tf.config.list_physical_devices('GPU')\nif gpus:\n\n try:\n\n for gpu in gpus:\n\n tf.config.experimental.set_memory_growth(gpu, True)\n\n except RuntimeError as e:\n\n sys.stderr.write(str(e))\n",
"_____no_output_____"
]
],
[
[
"## Data Preparation",
"_____no_output_____"
],
[
"In this demo we shall use the [MNIST database](http://yann.lecun.com/exdb/mnist/) of handwritten digits.\nThe dataset contains $6 \\times 10^4$ images as training set, and $10^4$ images as test set.\nThe size of the images is $28~\\text{px} \\times 28~\\text{px}$.\nEach realisation in the sets is accompanied by a ground truth label identifying the digits represented.\n\nImages are black and white: only one channel is present. Its intensity varies from $0$ (pure black) to $255$ (pure white).",
"_____no_output_____"
]
],
[
[
"(X_train, y_train), (X_test, y_test) = mnist.load_data()\n\n# normalise to the maximal intensity\nM = X_train.max()\nX_train = X_train / M\nX_test = X_test / M\n",
"_____no_output_____"
]
],
[
[
"In this example, we pad the images by adding two black pixels at the borders.\nThe procedure modifies the images into $32~\\text{px} \\times 32~\\text{px}$, as it is usually easier to work with powers of $2$.",
"_____no_output_____"
]
],
[
[
"# pad images to get 32 x 32 inputs (convenience)\nX_train = tf.pad(X_train, ((0, 0), (2, 2), (2, 2)))\nX_test = tf.pad(X_test, ((0, 0), (2, 2), (2, 2)))\n",
"_____no_output_____"
]
],
[
[
"As the objective is a **binary classification**, we *one-hot* encode the labels in the training set. That is, we transform the scalar value of each entry as follows:\n$$\ny_i \\in \\{ 0,\\, 1,\\, \\dots,\\, K \\}\n\\quad\n\\mapsto\n\\quad\nY_{i,k} =\n\\begin{cases}\n1 & \\quad \\text{if} \\quad y_i = k\n\\\\\n0 & \\quad \\text{otherwise}\n\\end{cases}\n$$",
"_____no_output_____"
]
],
[
[
"y_train = tf.one_hot(y_train, depth=y_train.max()+1)\n",
"_____no_output_____"
]
],
[
[
"We then show some images in the training set:",
"_____no_output_____"
]
],
[
[
"_, ax = plt.subplots(1, 5, figsize=(30, 5), sharey=True)\nidx = np.random.choice(X_train.shape[0], size=5)\n\nfor n, i in enumerate(idx):\n\n sns.heatmap(data=X_train[i],\n cmap='Greys_r',\n cbar=False,\n xticklabels=False,\n yticklabels=False,\n square=True,\n ax=ax[n]\n )\n\n ax[n].set_title(f'class: {np.argmax(y_train[i]):d}', fontsize=32)\n\nplt.tight_layout()\n",
"_____no_output_____"
]
],
[
[
"## Classification Model",
"_____no_output_____"
],
[
"In what follows, we train a simple convolutional neural network to classify the digits in the dataset.\n\nThe fundamental principle of the convolutions is the scan of the image by a smaller *window*:\n\n<img src=\"https://miro.medium.com/max/2340/1*Fw-ehcNBR9byHtho-Rxbtw.gif\" width=\"640px\">\n\nImage by [Mayank Mishra](https://towardsdatascience.com/convolutional-neural-networks-explained-9cc5188c4939)\n\nBy repeating the same operation on multiple filters, we get a tensor structure with an additional *channel* direction:\n\n<img src=\"https://raw.githubusercontent.com/dvgodoy/dl-visuals/main/Convolutions/3channel_filters1.png\" width=\"640px\">\n\nImage by [dvgodoy](https://github.com/dvgodoy/dl-visuals) / [CC BY](https://creativecommons.org/licenses/by/4.0)\n\nIn this implementation we develop a *fully convolutional neural network* (that is, no fully connected layers are used), in order to avoid any dependence on the size of the input image.",
"_____no_output_____"
]
],
[
[
"K.clear_session()\n\ncnn = Sequential([Conv2D(filters=64,\n kernel_size=(3, 3),\n activation='relu',\n input_shape=(None, None, 1),\n name='cnn_input'\n ),\n MaxPool2D(pool_size=(3, 3), name='cnn_pool_1'),\n Conv2D(filters=128,\n kernel_size=(2, 2),\n activation='relu',\n name='cnn_hidden_1'\n ),\n MaxPool2D(pool_size=(2, 2), name='cnn_pool_2'),\n Conv2D(filters=64,\n kernel_size=(2, 2),\n activation='relu',\n name='cnn_hidden_2'\n ),\n MaxPool2D(pool_size=(2, 2), name='cnn_pool_3'),\n Conv2D(filters=10,\n kernel_size=(1, 1),\n activation='softmax',\n name='cnn_output'\n )\n ],\n name='fully_convolutional_nn'\n )\n\ncnn.summary()\n",
"Model: \"fully_convolutional_nn\"\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\ncnn_input (Conv2D) (None, None, None, 64) 640 \n_________________________________________________________________\ncnn_pool_1 (MaxPooling2D) (None, None, None, 64) 0 \n_________________________________________________________________\ncnn_hidden_1 (Conv2D) (None, None, None, 128) 32896 \n_________________________________________________________________\ncnn_pool_2 (MaxPooling2D) (None, None, None, 128) 0 \n_________________________________________________________________\ncnn_hidden_2 (Conv2D) (None, None, None, 64) 32832 \n_________________________________________________________________\ncnn_pool_3 (MaxPooling2D) (None, None, None, 64) 0 \n_________________________________________________________________\ncnn_output (Conv2D) (None, None, None, 10) 650 \n=================================================================\nTotal params: 67,018\nTrainable params: 67,018\nNon-trainable params: 0\n_________________________________________________________________\n"
]
],
[
[
"Finally, we train the network using the [Adam](https://arxiv.org/abs/1404.7359) optimiser of stochastic gradient descent.\nWe use $5 \\times 10^4$ images for effective training, and $10^4$ for evaluation (validation) of the model.",
"_____no_output_____"
]
],
[
[
"cnn.compile(optimizer=Adam(learning_rate=1.0e-3),\n loss=CategoricalCrossentropy(),\n metrics=[CategoricalAccuracy()]\n )\n\nif TRAIN_CNN:\n\n history = cnn.fit(x=X_train[:50000, :, :, None],\n y=y_train[:50000, None, None, :],\n batch_size=128,\n epochs=10,\n validation_data=(X_train[50000:, :, :, None],\n y_train[50000:, None, None, :]\n ),\n callbacks=[ModelCheckpoint('./fully_convolutional_nn.h5',\n save_best_only=True\n )\n ]\n )\n\ncnn.load_weights('./fully_convolutional_nn.h5')\n",
"Epoch 1/10\n391/391 [==============================] - 6s 10ms/step - loss: 0.4656 - categorical_accuracy: 0.8657 - val_loss: 0.1624 - val_categorical_accuracy: 0.9503\nEpoch 2/10\n391/391 [==============================] - 4s 9ms/step - loss: 0.1444 - categorical_accuracy: 0.9562 - val_loss: 0.1065 - val_categorical_accuracy: 0.9701\nEpoch 3/10\n391/391 [==============================] - 4s 9ms/step - loss: 0.1013 - categorical_accuracy: 0.9691 - val_loss: 0.0880 - val_categorical_accuracy: 0.9752\nEpoch 4/10\n391/391 [==============================] - 4s 9ms/step - loss: 0.0828 - categorical_accuracy: 0.9744 - val_loss: 0.0813 - val_categorical_accuracy: 0.9772\nEpoch 5/10\n391/391 [==============================] - 4s 9ms/step - loss: 0.0683 - categorical_accuracy: 0.9786 - val_loss: 0.0719 - val_categorical_accuracy: 0.9792\nEpoch 6/10\n391/391 [==============================] - 4s 9ms/step - loss: 0.0595 - categorical_accuracy: 0.9812 - val_loss: 0.0639 - val_categorical_accuracy: 0.9806\nEpoch 7/10\n391/391 [==============================] - 4s 9ms/step - loss: 0.0540 - categorical_accuracy: 0.9831 - val_loss: 0.0542 - val_categorical_accuracy: 0.9840\nEpoch 8/10\n391/391 [==============================] - 4s 9ms/step - loss: 0.0471 - categorical_accuracy: 0.9850 - val_loss: 0.0642 - val_categorical_accuracy: 0.9823\nEpoch 9/10\n391/391 [==============================] - 4s 9ms/step - loss: 0.0436 - categorical_accuracy: 0.9865 - val_loss: 0.0650 - val_categorical_accuracy: 0.9802\nEpoch 10/10\n391/391 [==============================] - 4s 9ms/step - loss: 0.0408 - categorical_accuracy: 0.9869 - val_loss: 0.0686 - val_categorical_accuracy: 0.9814\n"
]
],
[
[
"For visualisation purposes, we plot the loss function and the accuracy as functions of the training epochs.",
"_____no_output_____"
]
],
[
[
"if TRAIN_CNN:\n\n _, ax = plt.subplots(1, 2, figsize=(12, 5))\n\n sns.lineplot(x=range(1, 10+1),\n y=history.history['loss'],\n ls='-',\n color='tab:blue',\n label='training',\n ax=ax[0]\n )\n sns.lineplot(x=range(1, 10+1),\n y=history.history['val_loss'],\n ls='-',\n color='tab:orange',\n label='validation',\n ax=ax[0]\n )\n ax[0].set_xlabel('epochs', fontsize=18)\n ax[0].set_ylabel('loss', fontsize=18)\n\n sns.lineplot(x=range(1, 10+1),\n y=history.history['categorical_accuracy'],\n ls='-',\n color='tab:blue',\n label='training',\n ax=ax[1]\n )\n sns.lineplot(x=range(1, 10+1),\n y=history.history['val_categorical_accuracy'],\n ls='-',\n color='tab:orange',\n label='validation',\n ax=ax[1]\n )\n ax[1].set_xlabel('epochs', fontsize=18)\n ax[1].set_ylabel('accuracy', fontsize=18)\n\n plt.tight_layout()\n",
"_____no_output_____"
]
],
[
[
"Finally, we compute the predictions on the final $10^4$ images in the test set (these realisations were never seen by the algorithm).",
"_____no_output_____"
]
],
[
[
"y_pred = cnn.predict(X_test[:, :, :, None]).squeeze().argmax(axis=-1)\n\nprint(f'Prediction accuracy: {(y_pred == y_test).mean():.2%}')\n",
"Prediction accuracy: 98.40%\n"
],
[
"_, ax = plt.subplots(1, 5, figsize=(30, 5), sharey=True)\nidx = np.random.choice(X_test.shape[0], size=5)\n\nfor n, i in enumerate(idx):\n\n sns.heatmap(data=X_test[i],\n cmap='Greys_r',\n cbar=False,\n xticklabels=False,\n yticklabels=False,\n square=True,\n ax=ax[n]\n )\n\n ax[n].set_title(f'target class: {y_test[i]:d}', fontsize=32)\n ax[n].set_xlabel(f'predicted class: {y_pred[i]:d}', fontsize=32)\n\nplt.tight_layout()\n",
"_____no_output_____"
]
],
[
[
"Given the peculiarity of the network, we can also provide the predictions for inputs of different sizes. For instance:",
"_____no_output_____"
]
],
[
[
"_, ax = plt.subplots(1, 5, figsize=(30, 5), sharey=True)\nidx = np.random.choice(X_test.shape[0], size=10)\nfor n in range(5):\n\n conc = np.concatenate([X_test[idx[n]], X_test[idx[9-n]]], axis=1)\n\n sns.heatmap(data=conc,\n cmap='Greys_r',\n cbar=False,\n xticklabels=False,\n yticklabels=False,\n square=True,\n ax=ax[n]\n )\n\n pred = cnn.predict(conc[None, :, :, None]).squeeze().argmax(axis=1)\n ax[n].set_title(f'target class: {y_test[idx[n]]:d}{y_test[idx[9-n]]:d}',\n fontsize=32\n )\n ax[n].set_xlabel(f'predicted class: {pred[0]:d}{pred[-1]:d}',\n fontsize=32\n )\n\nplt.tight_layout()\n",
"_____no_output_____"
]
],
[
[
"## Autoencoder as Denoise Filter\n\nIn what follows, we introduce a source of defects inside the distribution of the input data. In general, this may prevent the network from performing at the same accuracy levels.",
"_____no_output_____"
]
],
[
[
"X_train_noise = X_train + 1.75 * np.random.uniform(size=X_train.shape)\nX_test_noise = X_test + 1.75 * np.random.uniform(size=X_test.shape)\ny_pred_noise = cnn.predict(X_test_noise[:, :, :, None]).squeeze()\ny_pred_noise = y_pred_noise.argmax(axis=-1)\n\nprint(f'Prediction accuracy: {(y_pred_noise == y_test).mean():.2%}')\n",
"Prediction accuracy: 26.19%\n"
]
],
[
[
"The reason for such a drop in accuracy may become clear once we visualise some realisations in the new \"noisy\" dataset.",
"_____no_output_____"
]
],
[
[
"_, ax = plt.subplots(1, 5, figsize=(30, 5), sharey=True)\nidx = np.random.choice(X_test_noise.shape[0], size=5)\n\nfor n, i in enumerate(idx):\n\n sns.heatmap(data=X_test_noise[i],\n cmap='Greys_r',\n cbar=False,\n xticklabels=False,\n yticklabels=False,\n square=True,\n ax=ax[n]\n )\n\n ax[n].set_title(f'target class: {y_test[i]:d}', fontsize=32)\n ax[n].set_xlabel(f'predicted class: {y_pred_noise[i]:d}', fontsize=32)\n\nplt.tight_layout()\n",
"_____no_output_____"
]
],
[
[
"One way to tackle the problem is to use a particular *unsupervised* approach.\nWe focus on an **autoencoder** architecture, whose job is to extract only the relevant information contained in the noisy image, and to rebuild a clean pattern:\n\n<img src=\"https://lilianweng.github.io/lil-log/assets/images/autoencoder-architecture.png\" width=\"640px\">\n\nImage by [Lilian Weng](https://lilianweng.github.io/lil-log/2018/08/12/from-autoencoder-to-beta-vae.html)\n\nAgain, we use a fully convolutional model to avoid dependencies on the size of the input image.",
"_____no_output_____"
]
],
[
[
"K.clear_session()\n\nae = Sequential([Conv2D(filters=128,\n kernel_size=(3, 3),\n activation='tanh',\n input_shape=(None, None, 1),\n name='ae_input'\n ),\n MaxPool2D(pool_size=(3, 3), name='ae_pool_1'),\n Conv2D(filters=64,\n kernel_size=(2, 2),\n activation='tanh',\n name='ae_down_1'\n ),\n MaxPool2D(pool_size=(2, 2), name='ae_pool_2'),\n Conv2D(filters=64,\n kernel_size=(2, 2),\n activation='tanh',\n name='ae_down_2'\n ),\n UpSampling2D(size=(3, 3), name='ae_upsample_1'),\n Conv2DTranspose(filters=64,\n kernel_size=(2, 2),\n activation='relu',\n name='ae_up_1'\n ),\n UpSampling2D(size=(3, 3), name='ae_upsample_2'),\n Conv2DTranspose(filters=128,\n kernel_size=(3, 3),\n activation='relu',\n name='ae_up_2'\n ),\n Conv2D(filters=1,\n kernel_size=(1, 1),\n # activation='sigmoid',\n name='ae_output'\n ),\n ],\n name='autoencoder'\n )\n\nae.summary()\n",
"Model: \"autoencoder\"\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\nae_input (Conv2D) (None, None, None, 128) 1280 \n_________________________________________________________________\nae_pool_1 (MaxPooling2D) (None, None, None, 128) 0 \n_________________________________________________________________\nae_down_1 (Conv2D) (None, None, None, 64) 32832 \n_________________________________________________________________\nae_pool_2 (MaxPooling2D) (None, None, None, 64) 0 \n_________________________________________________________________\nae_down_2 (Conv2D) (None, None, None, 64) 16448 \n_________________________________________________________________\nae_upsample_1 (UpSampling2D) (None, None, None, 64) 0 \n_________________________________________________________________\nae_up_1 (Conv2DTranspose) (None, None, None, 64) 16448 \n_________________________________________________________________\nae_upsample_2 (UpSampling2D) (None, None, None, 64) 0 \n_________________________________________________________________\nae_up_2 (Conv2DTranspose) (None, None, None, 128) 73856 \n_________________________________________________________________\nae_output (Conv2D) (None, None, None, 1) 129 \n=================================================================\nTotal params: 140,993\nTrainable params: 140,993\nNon-trainable params: 0\n_________________________________________________________________\n"
],
[
"ae.compile(optimizer=Adam(learning_rate=1.0e-3),\n loss=MeanSquaredError(),\n metrics=[BinaryAccuracy()]\n )\n\nif TRAIN_AE:\n\n history = ae.fit(x=X_train_noise[:50000, :, :, None],\n y=X_train[:50000, :, :, None],\n batch_size=128,\n # epochs=5,\n epochs=50,\n validation_data=(X_train_noise[50000:, :, :, None],\n X_train[50000:, :, :, None]\n ),\n callbacks=[ModelCheckpoint('./autoencoder.h5',\n save_best_only=True\n )\n ]\n )\n\nae.load_weights('./autoencoder.h5')\n",
"Epoch 1/50\n391/391 [==============================] - 15s 37ms/step - loss: 0.0443 - binary_accuracy: 0.8442 - val_loss: 0.0378 - val_binary_accuracy: 0.8402\nEpoch 2/50\n391/391 [==============================] - 13s 34ms/step - loss: 0.0337 - binary_accuracy: 0.8457 - val_loss: 0.0311 - val_binary_accuracy: 0.8480\nEpoch 3/50\n391/391 [==============================] - 13s 34ms/step - loss: 0.0298 - binary_accuracy: 0.8471 - val_loss: 0.0295 - val_binary_accuracy: 0.8443\nEpoch 4/50\n391/391 [==============================] - 13s 34ms/step - loss: 0.0280 - binary_accuracy: 0.8477 - val_loss: 0.0272 - val_binary_accuracy: 0.8495\nEpoch 5/50\n391/391 [==============================] - 14s 35ms/step - loss: 0.0268 - binary_accuracy: 0.8481 - val_loss: 0.0265 - val_binary_accuracy: 0.8489\nEpoch 6/50\n391/391 [==============================] - 13s 34ms/step - loss: 0.0261 - binary_accuracy: 0.8484 - val_loss: 0.0261 - val_binary_accuracy: 0.8475\nEpoch 7/50\n391/391 [==============================] - 13s 34ms/step - loss: 0.0254 - binary_accuracy: 0.8486 - val_loss: 0.0252 - val_binary_accuracy: 0.8491\nEpoch 8/50\n391/391 [==============================] - 13s 34ms/step - loss: 0.0250 - binary_accuracy: 0.8488 - val_loss: 0.0252 - val_binary_accuracy: 0.8480\nEpoch 9/50\n391/391 [==============================] - 13s 34ms/step - loss: 0.0246 - binary_accuracy: 0.8489 - val_loss: 0.0252 - val_binary_accuracy: 0.8516\nEpoch 10/50\n391/391 [==============================] - 13s 34ms/step - loss: 0.0244 - binary_accuracy: 0.8490 - val_loss: 0.0244 - val_binary_accuracy: 0.8491\nEpoch 11/50\n391/391 [==============================] - 13s 34ms/step - loss: 0.0242 - binary_accuracy: 0.8491 - val_loss: 0.0242 - val_binary_accuracy: 0.8504\nEpoch 12/50\n391/391 [==============================] - 13s 34ms/step - loss: 0.0239 - binary_accuracy: 0.8491 - val_loss: 0.0249 - val_binary_accuracy: 0.8469\nEpoch 13/50\n391/391 [==============================] - 13s 34ms/step - loss: 0.0238 - binary_accuracy: 0.8492 - val_loss: 0.0238 - val_binary_accuracy: 0.8499\nEpoch 14/50\n391/391 [==============================] - 13s 34ms/step - loss: 0.0236 - binary_accuracy: 0.8492 - val_loss: 0.0239 - val_binary_accuracy: 0.8484\nEpoch 15/50\n391/391 [==============================] - 13s 34ms/step - loss: 0.0235 - binary_accuracy: 0.8493 - val_loss: 0.0236 - val_binary_accuracy: 0.8490\nEpoch 16/50\n391/391 [==============================] - 13s 34ms/step - loss: 0.0234 - binary_accuracy: 0.8493 - val_loss: 0.0237 - val_binary_accuracy: 0.8517\nEpoch 17/50\n391/391 [==============================] - 13s 34ms/step - loss: 0.0233 - binary_accuracy: 0.8494 - val_loss: 0.0236 - val_binary_accuracy: 0.8485\nEpoch 18/50\n391/391 [==============================] - 13s 34ms/step - loss: 0.0231 - binary_accuracy: 0.8494 - val_loss: 0.0235 - val_binary_accuracy: 0.8490\nEpoch 19/50\n391/391 [==============================] - 13s 34ms/step - loss: 0.0231 - binary_accuracy: 0.8494 - val_loss: 0.0232 - val_binary_accuracy: 0.8498\nEpoch 20/50\n391/391 [==============================] - 13s 34ms/step - loss: 0.0230 - binary_accuracy: 0.8494 - val_loss: 0.0231 - val_binary_accuracy: 0.8505\nEpoch 21/50\n391/391 [==============================] - 13s 34ms/step - loss: 0.0229 - binary_accuracy: 0.8495 - val_loss: 0.0231 - val_binary_accuracy: 0.8501\nEpoch 22/50\n391/391 [==============================] - 13s 34ms/step - loss: 0.0228 - binary_accuracy: 0.8495 - val_loss: 0.0232 - val_binary_accuracy: 0.8514\nEpoch 23/50\n391/391 [==============================] - 13s 34ms/step - loss: 0.0228 - binary_accuracy: 0.8495 - val_loss: 0.0230 - val_binary_accuracy: 0.8504\nEpoch 24/50\n391/391 [==============================] - 13s 34ms/step - loss: 0.0227 - binary_accuracy: 0.8495 - val_loss: 0.0229 - val_binary_accuracy: 0.8500\nEpoch 25/50\n391/391 [==============================] - 13s 34ms/step - loss: 0.0227 - binary_accuracy: 0.8495 - val_loss: 0.0229 - val_binary_accuracy: 0.8506\nEpoch 26/50\n391/391 [==============================] - 13s 34ms/step - loss: 0.0226 - binary_accuracy: 0.8495 - val_loss: 0.0229 - val_binary_accuracy: 0.8499\nEpoch 27/50\n391/391 [==============================] - 13s 34ms/step - loss: 0.0226 - binary_accuracy: 0.8495 - val_loss: 0.0229 - val_binary_accuracy: 0.8510\nEpoch 28/50\n391/391 [==============================] - 13s 34ms/step - loss: 0.0225 - binary_accuracy: 0.8495 - val_loss: 0.0230 - val_binary_accuracy: 0.8517\nEpoch 29/50\n391/391 [==============================] - 13s 34ms/step - loss: 0.0225 - binary_accuracy: 0.8496 - val_loss: 0.0228 - val_binary_accuracy: 0.8507\nEpoch 30/50\n391/391 [==============================] - 13s 34ms/step - loss: 0.0224 - binary_accuracy: 0.8496 - val_loss: 0.0228 - val_binary_accuracy: 0.8502\nEpoch 31/50\n391/391 [==============================] - 13s 34ms/step - loss: 0.0224 - binary_accuracy: 0.8496 - val_loss: 0.0228 - val_binary_accuracy: 0.8502\nEpoch 32/50\n391/391 [==============================] - 13s 34ms/step - loss: 0.0224 - binary_accuracy: 0.8496 - val_loss: 0.0228 - val_binary_accuracy: 0.8500\nEpoch 33/50\n391/391 [==============================] - 13s 34ms/step - loss: 0.0223 - binary_accuracy: 0.8496 - val_loss: 0.0227 - val_binary_accuracy: 0.8508\nEpoch 34/50\n391/391 [==============================] - 13s 34ms/step - loss: 0.0223 - binary_accuracy: 0.8496 - val_loss: 0.0227 - val_binary_accuracy: 0.8506\nEpoch 35/50\n391/391 [==============================] - 13s 34ms/step - loss: 0.0222 - binary_accuracy: 0.8496 - val_loss: 0.0226 - val_binary_accuracy: 0.8511\nEpoch 36/50\n391/391 [==============================] - 13s 34ms/step - loss: 0.0223 - binary_accuracy: 0.8496 - val_loss: 0.0227 - val_binary_accuracy: 0.8496\nEpoch 37/50\n391/391 [==============================] - 13s 34ms/step - loss: 0.0222 - binary_accuracy: 0.8496 - val_loss: 0.0226 - val_binary_accuracy: 0.8500\nEpoch 38/50\n391/391 [==============================] - 13s 34ms/step - loss: 0.0222 - binary_accuracy: 0.8496 - val_loss: 0.0226 - val_binary_accuracy: 0.8505\nEpoch 39/50\n391/391 [==============================] - 13s 34ms/step - loss: 0.0222 - binary_accuracy: 0.8497 - val_loss: 0.0226 - val_binary_accuracy: 0.8506\nEpoch 40/50\n391/391 [==============================] - 13s 34ms/step - loss: 0.0221 - binary_accuracy: 0.8497 - val_loss: 0.0225 - val_binary_accuracy: 0.8510\nEpoch 41/50\n391/391 [==============================] - 13s 34ms/step - loss: 0.0221 - binary_accuracy: 0.8497 - val_loss: 0.0225 - val_binary_accuracy: 0.8503\nEpoch 42/50\n391/391 [==============================] - 13s 34ms/step - loss: 0.0221 - binary_accuracy: 0.8497 - val_loss: 0.0226 - val_binary_accuracy: 0.8509\nEpoch 43/50\n391/391 [==============================] - 13s 34ms/step - loss: 0.0221 - binary_accuracy: 0.8497 - val_loss: 0.0226 - val_binary_accuracy: 0.8501\nEpoch 44/50\n391/391 [==============================] - 14s 35ms/step - loss: 0.0221 - binary_accuracy: 0.8497 - val_loss: 0.0225 - val_binary_accuracy: 0.8512\nEpoch 45/50\n391/391 [==============================] - 13s 34ms/step - loss: 0.0220 - binary_accuracy: 0.8497 - val_loss: 0.0225 - val_binary_accuracy: 0.8500\nEpoch 46/50\n391/391 [==============================] - 13s 34ms/step - loss: 0.0220 - binary_accuracy: 0.8497 - val_loss: 0.0225 - val_binary_accuracy: 0.8511\nEpoch 47/50\n391/391 [==============================] - 13s 34ms/step - loss: 0.0220 - binary_accuracy: 0.8497 - val_loss: 0.0227 - val_binary_accuracy: 0.8519\nEpoch 48/50\n391/391 [==============================] - 13s 34ms/step - loss: 0.0220 - binary_accuracy: 0.8497 - val_loss: 0.0225 - val_binary_accuracy: 0.8503\nEpoch 49/50\n391/391 [==============================] - 13s 35ms/step - loss: 0.0220 - binary_accuracy: 0.8497 - val_loss: 0.0224 - val_binary_accuracy: 0.8503\nEpoch 50/50\n391/391 [==============================] - 13s 34ms/step - loss: 0.0219 - binary_accuracy: 0.8497 - val_loss: 0.0224 - val_binary_accuracy: 0.8500\n"
]
],
[
[
"In this case training can become quite complicated as shown by the loss function and its metric.",
"_____no_output_____"
]
],
[
[
"if TRAIN_AE:\n\n _, ax = plt.subplots(1, 2, figsize=(12, 5))\n\n sns.lineplot(x=range(1, 50+1),\n y=history.history['loss'],\n ls='-',\n color='tab:blue',\n label='training',\n ax=ax[0]\n )\n sns.lineplot(x=range(1, 50+1),\n y=history.history['val_loss'],\n ls='-',\n color='tab:orange',\n label='validation',\n ax=ax[0]\n )\n ax[0].set_xlabel('epochs', fontsize=18)\n ax[0].set_ylabel('loss', fontsize=18)\n\n sns.lineplot(x=range(1, 50+1),\n y=history.history['binary_accuracy'],\n ls='-',\n color='tab:blue',\n label='training',\n ax=ax[1]\n )\n sns.lineplot(x=range(1, 50+1),\n y=history.history['val_binary_accuracy'],\n ls='-',\n color='tab:orange',\n label='validation',\n ax=ax[1]\n )\n ax[1].set_xlabel('epochs', fontsize=18)\n ax[1].set_ylabel('accuracy', fontsize=18)\n\n plt.tight_layout()\n",
"_____no_output_____"
]
],
[
[
"In this case, the predictions represent the ability of the network to reconstruct the original images. The final effect is a denoise filter, used to render back the unreadable handwritten digits. ",
"_____no_output_____"
]
],
[
[
"X_preds = ae.predict(X_test_noise[:, :, :, None]).squeeze()\n\n_, ax = plt.subplots(3, 5, figsize=(18, 15), sharey=True, sharex=True)\nidx = np.random.choice(X_preds.shape[0], size=5)\n\nfor n, i in enumerate(idx):\n\n sns.heatmap(data=X_test_noise[n],\n cmap='Greys_r',\n cbar=False,\n xticklabels=False,\n yticklabels=False,\n square=True,\n ax=ax[0, n]\n )\n\n sns.heatmap(data=X_test[n],\n cmap='Greys_r',\n cbar=False,\n xticklabels=False,\n yticklabels=False,\n square=True,\n ax=ax[1, n]\n )\n\n sns.heatmap(data=X_preds[n],\n cmap='Greys_r',\n cbar=False,\n xticklabels=False,\n yticklabels=False,\n square=True,\n ax=ax[2, n]\n )\n\nax[0, 0].set_ylabel('Noisy Data', fontsize=32)\nax[1, 0].set_ylabel('Original Data', fontsize=32)\nax[2, 0].set_ylabel('Denoised Data', fontsize=32)\n\nplt.tight_layout()\n",
"_____no_output_____"
]
],
[
[
"## Full AI for Handwritten Digits\n\nFinally, we use the previous results to build a fully functioning AI, possibly capable of beating human error on the recognition of handwritten digits.",
"_____no_output_____"
]
],
[
[
"K.clear_session()\n\ncv = Model(inputs=ae.input, outputs=cnn(ae.output), trainable=False, name='CV')\n",
"_____no_output_____"
],
[
"y_pred = cv.predict(X_test_noise[:, :, :, None]).squeeze().argmax(axis=-1)\n\nprint(f'Prediction accuracy: {(y_pred == y_test).mean():.2%}')\n",
"Prediction accuracy: 92.22%\n"
]
],
[
[
"As visible, the accuracy reached by the full model is lower than the \"clean\" model, but it is nonetheless quite high. Notice that in no way we are re-training the classification model on the new data, but we are still using the old weights and biases.",
"_____no_output_____"
]
],
[
[
"_, ax = plt.subplots(1, 5, figsize=(30, 5), sharey=True)\nidx = np.random.choice(X_test.shape[0], size=10)\nfor n in range(5):\n\n conc = np.concatenate(\n [X_test_noise[idx[n]], X_test_noise[idx[9-n]]], axis=1)\n\n sns.heatmap(data=conc, cmap='Greys_r', cbar=False,\n xticklabels=False, yticklabels=False,\n square=True, ax=ax[n]\n )\n\n pred = cv.predict(conc[None, :, :, None]).squeeze().argmax(axis=1)\n ax[n].set_title(\n f'target class: {y_test[idx[n]]:d}{y_test[idx[9-n]]:d}', fontsize=32)\n ax[n].set_xlabel(f'predicted class: {pred[0]:d}{pred[-1]:d}', fontsize=32)\n\nplt.tight_layout()\n",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
]
] |
ec8e5f62fd2c0a13be7458ae1cfbb4839f4e6e5b | 22,557 | ipynb | Jupyter Notebook | lesson_7.ipynb | sethmott/datasci400 | f1c418c59f505212874732928b1a124cde0b9a74 | [
"MIT"
] | null | null | null | lesson_7.ipynb | sethmott/datasci400 | f1c418c59f505212874732928b1a124cde0b9a74 | [
"MIT"
] | null | null | null | lesson_7.ipynb | sethmott/datasci400 | f1c418c59f505212874732928b1a124cde0b9a74 | [
"MIT"
] | null | null | null | 35.691456 | 788 | 0.628807 | [
[
[
"# Clustering",
"_____no_output_____"
],
[
"In this notebook, we learn about one of the most common un-supervised learning methods: clustering. There isn't a single algorithm for clustering, but the most common one is called **k-means clustering** where $k$ refers to the number of clusters we wish to have. Note that $k$ isn't really something we can learn from the data. It's something we must specify ahead of time, and while there are some guidelines we can use to choose a reasonable value for $k$, ultimately it's a subjective choice. In fact, with un-supervised learning in general, there is a lot of subjectivity involved, making it hard to interpret results.",
"_____no_output_____"
]
],
[
[
"import pandas as pd\n\ncensus = pd.read_csv('data/adult_train.csv', sep = \",\", header = 0)\ncensus = census.drop(columns = ['fnlwgt', 'age'])\ncensus.head()",
"_____no_output_____"
]
],
[
[
"To make some of the results we generate a little easier to read, we will clean the data in the following way:\n1. We relpace the any hyphen with underscore in the column names.\n1. In the `income` column, we replace `<=` with `lt_` and `>` with `gt_`",
"_____no_output_____"
]
],
[
[
"census.columns = census.columns.str.replace(\"-\", \"_\")\n\ncensus[\"income\"] = census[\"income\"].replace(\"<=\", \"lt_\", regex = True)\ncensus[\"income\"] = census[\"income\"].replace(\">\", \"gt_\", regex = True)\n\ncensus[\"income\"].value_counts()",
"_____no_output_____"
]
],
[
[
"### Exercise\n\nLet's get a list of all the categorical columns in the data:",
"_____no_output_____"
]
],
[
[
"cat_vars = census.select_dtypes('object').columns.to_list()",
"_____no_output_____"
]
],
[
[
"For each of the categorical columns in `cat_vars` do the following:\n\n- Find all the rows that hypens and replace it with underscore.\n- Convert all the strings to lower case.",
"_____no_output_____"
],
[
"Show the top 5 rows of the data to make sure your transformations worked.",
"_____no_output_____"
]
],
[
[
"census[cat_vars].head()",
"_____no_output_____"
]
],
[
[
"### End of exercise",
"_____no_output_____"
],
[
"The k-means clustering algorithm tries to find which rows of the data are similar to each other, where similarity is based having attributes (columns) that are close to each other. To determine closeness, we use **Euclidean distance**. Let's say for the sake of example we have only two columns: `education_num` and `capital_gain`. Let's grab two rows of the data (can be any two rows):",
"_____no_output_____"
]
],
[
[
"which_cols = ['education_num', 'capital_gain']\n\ntwo_rows = census.loc[[0, 197], which_cols]\ntwo_rows",
"_____no_output_____"
]
],
[
[
"The $E_0$ and $C_0$ refer to `education_num` and `capital_gain` at row with index 0, and $E_{197}$ and $C_{197}$ refer to `education_num` and `capital_gain` at row with index 197, the the Euclidean distance between the two rows is given by the following equation: \n\n$$D(0, 197) = \\sqrt{(E_0-E_{197})^2 + (C_0-C_{197})^2}$$\n\nTo calculate this distance, it's probably easiest to convert our `DataFrame` to a `numpy` array first:",
"_____no_output_____"
]
],
[
[
"two_rows = two_rows.values # using values turns DataFrame into numpy array\ntwo_rows",
"_____no_output_____"
]
],
[
[
"### Exercise\n\nUse `numpy` to calculate the Euclidean distance between the two rows:",
"_____no_output_____"
],
[
"Notice that the Euclidian distance is dominated by the difference in the rows in the `captial_gain` column. This is because this column has a much bigger **scale** than `education_num`. So what can we do to make sure both columns can influence the distance equally? We can **normalize** the columns.\n\nUse `numpy` to normalize the columns of the data. We will use **Z-normalization**, which is the following transformation: \n\n$$x_{\\text{norm}} = \\dfrac{x - \\text{mean}(x)}{\\text{std}(x)}$$\n\nWhere the mean and standard deviation are calculated on the **whole data**, not just the two rows above. Find the mean and standard deviations of `education_num` and `capital_gain` for the whole `census` data and use them to normalize the `two_rows`.\n\nHINT: You can calculate the mean using this: `census[which_cols].values.mean(axis = 0)`. By default `axis = 0` but we specify it just to be sure. You can get the standard deviation similarly, using the `std` method.",
"_____no_output_____"
],
[
"Calculate the Euclidean distance of the normalized values of `two_rows`.",
"_____no_output_____"
],
[
"### End of exercise",
"_____no_output_____"
],
[
"If we had more columns we simply add a squared difference for each to the formula, and the `numpy` code to compute the distance stays the same. \n\nLet's now normalize all our numeric columns in one go using `pandas`. First we get a list of the numeric columns:",
"_____no_output_____"
]
],
[
[
"num_cols = census.select_dtypes(['integer', 'float']).columns\nprint(num_cols)",
"_____no_output_____"
]
],
[
[
"Now we use the `apply` method to apply a function to all the numeric columns at once, using `axis = 0` to say that the function applies across rows. We can either create a function ahead of time and pass it to apply, or in our case since the function is quite simple, we simply create it on the fly using the **lambda notation**. The important thing about our function is that it must be a function of a single array (which represents a column in our data), and return an array of the same size. A Z-normalization function matches this description.",
"_____no_output_____"
]
],
[
[
"census_rescaled = census[num_cols]\ncensus_rescaled = census_rescaled.apply(lambda x: (x - x.mean()) / x.std(), axis = 0)\ncensus_rescaled.head()",
"_____no_output_____"
]
],
[
[
"We can create a scatter plot of the `education_num` and `capital_gain`, which we used to illustrate our previous example.",
"_____no_output_____"
]
],
[
[
"import seaborn as sns\n\nsns.scatterplot(x = 'education_num', y = 'capital_gain', data = census_rescaled);",
"_____no_output_____"
]
],
[
[
"At first glance, it's seems hard to say if there are more than 2 clusters from looking at the scatter plot above. So should we pick $k=2$ or $k>2$. The answer is that there is no easy answer. $k=2$ might be a safe choice, but has little practical value (so you're telling me that the world is made up of very rich people and everyone else, thanks! I'm so glad I hired a data scientist...). With $k>2$ we can start making more complex differentiation, but it's hard to know where to draw the line and what sets different groups apart. And here we only have two columns and the luxury of looking at scatter plots, but as the number of features goes this becomes a harder and harder problem. Well this is the curse of un-supervised learning!\n\nSo for now let's start with $k=2$ and use k-means to cluster the data. We use the `KMeans` function which is the Python's machine learning library `sklearn`. A pattern common to almost all of the ML algorithms in `sklearn` is the following:\n\n1. We initialize the algorithm and specify any arguments if need be. In this case, the number of clusters.\n1. We call `fit` and pass it the data. This is when learning happens.\n1. We call `transform` and get predictions back. For k-means the predictions are the cluster labels. Any row will be assigned one of $k$ labels, depending on which cluster they belong to.\n\nThe above three steps are marked in the code below.",
"_____no_output_____"
]
],
[
[
"from sklearn.cluster import KMeans\nn_clusters = 2 # the number of clusters\nwhich_cols = ['education_num', 'capital_gain']\n\nX = census_rescaled#[which_cols]\nkmeans = KMeans(n_clusters = n_clusters, random_state = 0) # step 1: initialize\nkmeans.fit(X) # step 2, learn the clusters\ncensus_rescaled['cluster'] = kmeans.predict(X) # step 3, assign a cluster to each row\ncensus_rescaled.head()",
"_____no_output_____"
]
],
[
[
"There is one thing to note about k-means. We said in the beginning that k-means is an un-supervised learning algorithm. This means that the data is not labeled ahead of time with clusters that we need to then learn. This means that there is no learning happening, and when we use k-means to assign each row to a cluster, we have no way to **evaluate** the label assignments and determine if we did a good job. This is why we call it **un-supervised**. However, we can still do something that we usually do with **supervised** learning algorithms: we can predict for any new row of data by assigning a cluster to it. How? We simply assign the new row by normalizing it and assigning it whichever cluster **centroid** it is closest (using Euclidean distance to measure closeness). \n\nHere's how we can find out what the cluster centroids are, but keep in mind that these are centroids based on the **normalized** data, so we have to **un-normalize** it by running the reverse transformation if we want them to be on the same scaled as the original data.",
"_____no_output_____"
]
],
[
[
"kmeans.cluster_centers_",
"_____no_output_____"
]
],
[
[
"Let's check how the distribution of cluster assignments:",
"_____no_output_____"
]
],
[
[
"census_rescaled['cluster'].value_counts()",
"_____no_output_____"
]
],
[
[
"It is very important to note that while the cluster assignments are integers, they have no numeric value, meaning that the numbers are just labels. Cluster 1 is not necessarily closer to cluster 2 than it is to cluster 3. In fact, reruning `KMeans` doesn't guarantee that we will retain the same order. So to be safe, we will convert the `cluster` to `category` type.",
"_____no_output_____"
]
],
[
[
"census_rescaled['cluster'] = census_rescaled['cluster'].astype('category')",
"_____no_output_____"
]
],
[
[
"We can redraw our earlier scatter plot and color-code the points by the cluster they belong to.",
"_____no_output_____"
]
],
[
[
"sns.scatterplot(x = 'education_num', y = 'capital_gain', hue = 'cluster', \n data = census_rescaled);",
"_____no_output_____"
]
],
[
[
"### Exercise\n\n- Based on the above scatter plot, which of the two features do you think is more important in determining which cluster a person belongs to?\n- Return to where we called `KMeans` and change the number of clusters now to $k=3$. Report your findings now: What differentiates cluster 1 form 2, 1 from 3, and 2 from 3?\n- Try this again with $k=4$ and report your findings. You can see that as $k$ increases, we have more and more comparisons to make.\n- Let's keep $k=4$, but this time instead of passing all four numeric columns to `KMeans`. Do you notice any changes to the scatter plot above?\n\n### End of exercise",
"_____no_output_____"
],
[
"Of course since we added all four numeric columns to `KMeans`, then we have to look at scatter plots of all possible combinations of those four columns: there are $4 \\choose 2$ (we read that as **4 choose 2**) which is $\\frac{4!}{2!2!} = 6$ possible combinations. There is a very easy way to get all the combinations using the `itertools.combinations` function.",
"_____no_output_____"
]
],
[
[
"from itertools import combinations\nall_pairs = list(combinations(num_cols, 2))\n\nfor pair in all_pairs:\n print(pair)",
"_____no_output_____"
]
],
[
[
"Let's plot the all the possbile scatter plots form the pairs above.",
"_____no_output_____"
]
],
[
[
"import matplotlib.pyplot as plt\nfig, axs = plt.subplots(nrows = 3, ncols = 2, figsize = (18, 12))\nsns.scatterplot(*all_pairs[0], hue = 'cluster', data = census_rescaled, ax = axs[0, 0], legend = False);\nsns.scatterplot(*all_pairs[1], hue = 'cluster', data = census_rescaled, ax = axs[1, 0], legend = False);\nsns.scatterplot(*all_pairs[2], hue = 'cluster', data = census_rescaled, ax = axs[2, 0], legend = False);\nsns.scatterplot(*all_pairs[3], hue = 'cluster', data = census_rescaled, ax = axs[0, 1], legend = False);\nsns.scatterplot(*all_pairs[4], hue = 'cluster', data = census_rescaled, ax = axs[1, 1], legend = False);\nsns.scatterplot(*all_pairs[5], hue = 'cluster', data = census_rescaled, ax = axs[2, 1], legend = False);",
"_____no_output_____"
]
],
[
[
"### Exercise\n\nLet's say you are now charged with **profiling** the clusters. That means you need to come up with a brief description of each of the 4 clusters. You can rely on the above scatter plots or any other summaries you like. This is not necessarily an easy task, and the point is to show you the challenge of dealing with un-supervised learning algorithms. Imagine how much harder this would have been with a much larger $k$ or with more features in the data!\n\nIn the next notebook, we will use decision trees to help us with this task. Don't worry, there's better ways of doing this than having to look at tons of scatter plots...",
"_____no_output_____"
],
[
"### End of exercise",
"_____no_output_____"
],
[
"Our clusters so far only used the numeric columns in the data, but we also have a lot of categorical columns and we should be using them too, but we run into a problem: As we saw earlier k-means clustering relies on Euclidean distance to measure the similarity between the rows. So how do you measure Euclidean distance when you have categorical data? The answer is you one-hot encode your categorical data. The quick and easy way to do this is using `pd.get_dummies` function. One-hot encoded (binary) features are also called **dummy variables**, which explains why the function is named `get_dummies`.",
"_____no_output_____"
]
],
[
[
"census_onehot = pd.get_dummies(census[cat_vars])\ncensus_onehot.head()",
"_____no_output_____"
]
],
[
[
"Let's combine our standardized numeric features and our one-hot-encoded categorical features into one data and train `KMeans` on it.",
"_____no_output_____"
]
],
[
[
"census_featurized = pd.concat([census_rescaled, census_onehot], axis = 1)",
"_____no_output_____"
]
],
[
[
"We're going to train with $k=5$ just to use a bigger number.",
"_____no_output_____"
]
],
[
[
"from sklearn.cluster import KMeans\nimport numpy as np\n\nkmeans = KMeans(n_clusters = n_clusters, random_state = 0)\nkmeans.fit(census_featurized)",
"_____no_output_____"
]
],
[
[
"Instead of adding the clusters as a new column to the featurized data, we add them to the original data. This way we can get summaries on the original (non-normalized) numeric features which makes it easier to interpret results.",
"_____no_output_____"
]
],
[
[
"census['cluster'] = kmeans.predict(census_featurized)",
"_____no_output_____"
]
],
[
[
"For example, here's the average of the numeric features grouped by each cluster. It's definitely easier to compare clusters this way than having to look at all those scatter plots.",
"_____no_output_____"
]
],
[
[
"census.groupby('cluster').mean()",
"_____no_output_____"
]
],
[
[
"In addition to comparing averages, it's also important to compare the variability within each cluster. However, if we want to compare variability across features, we need to use standardized features otherwise features on a larger scale will always have more variability.",
"_____no_output_____"
]
],
[
[
"census_rescaled.groupby('cluster').std()",
"_____no_output_____"
]
],
[
[
"How do we check how clusters compare across the categories of our categorical variables? One way is to look at two-way tables. We can use `pd.crosstab` for that.",
"_____no_output_____"
]
],
[
[
"pd.crosstab(census['cluster'], census['workclass'])",
"_____no_output_____"
]
],
[
[
"### Exercise\n\n- How can we make the above information more useful. We can turn the counts into percentages by cluster. Find how you can use the `normalize` argument, to turn the counts into percentages. Note that `normalize` here has nothing to do with Z-normalization we learned earlier.",
"_____no_output_____"
],
[
"- Even more useful would be to display the above table with the percentages as a **heat map**, so that we can quickly compare the distribution of the clusters across different occupations. Turn the above table into a heat map using `seaborn`.",
"_____no_output_____"
],
[
"- Does anything particularly stand out? Can you refine your profile of each of the clusters based on what you see? It might help to also try other categorical features like `education`, `marital_status`, or `income`.\n\n### End of exercise",
"_____no_output_____"
],
[
"There's so much more to say about clustering. Here are two examples:\n\n- We could try to solve the problem of clustering when we have categorical data by defining a distance function that works for categorical data. \n- We could try to find a way to cluster the data **hierarchically**, so that we depend less on a specific choice of $k$. Instead we narrow our choice later by choosing the level of hierarchy we want to stop at. \n\nThe topic of clustering can be its own course!",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
]
] |
ec8ecb99de3eadbc97ad3a47e0d2e215068b55eb | 13,333 | ipynb | Jupyter Notebook | 2021_11_01_weird_psfs/fit_t.ipynb | beckermr/des-y6-analysis | 6a3281130c929cf3e74e370a2fa29174ba2a0d68 | [
"BSD-3-Clause"
] | null | null | null | 2021_11_01_weird_psfs/fit_t.ipynb | beckermr/des-y6-analysis | 6a3281130c929cf3e74e370a2fa29174ba2a0d68 | [
"BSD-3-Clause"
] | null | null | null | 2021_11_01_weird_psfs/fit_t.ipynb | beckermr/des-y6-analysis | 6a3281130c929cf3e74e370a2fa29174ba2a0d68 | [
"BSD-3-Clause"
] | null | null | null | 25.939689 | 126 | 0.486012 | [
[
[
"import ngmix\nimport fitsio\nimport proplot as pplt\nimport numpy as np\nimport piff\nimport galsim\nimport yaml",
"_____no_output_____"
],
[
"%matplotlib inline",
"_____no_output_____"
],
[
"TNAMES = [\"DES0131-3206\", \"DES0137-3749\", \"DES0221-0750\", \"DES0229-0416\"]\nBANDS = [\"g\", \"r\", \"i\", \"z\"]\n\nwith open(\n \"/Users/beckermr/MEDS_DIR/des-pizza-slices-y6-test/pizza_cutter_info/\"\n \"%s_%s_pizza_cutter_info.yaml\" % (TNAMES[1], BANDS[1])\n) as fp:\n yml = yaml.safe_load(fp.read())",
"_____no_output_____"
],
[
"from des_y6utils.piff import (\n measure_star_t_for_piff_model,\n map_star_t_to_grid,\n measure_t_grid_for_piff_model,\n make_good_regions_for_piff_model_gal_grid,\n make_good_regions_for_piff_model_star_and_gal_grid,\n nanmad,\n)",
"_____no_output_____"
],
[
"# find a bad model\nfor sind, src in enumerate(yml[\"src_info\"]):\n print(sind)\n piff_mod = piff.read(src[\"piff_path\"])\n res = make_good_regions_for_piff_model(piff_mod, piff_kwargs={\"GI_COLOR\": 0.61}, seed=11, verbose=False)\n \n b = res[\"bbox\"]\n flags = res[\"flags\"]\n if flags == 0:\n bf = np.mean(res[\"bad\"])\n else:\n bf = 1.0\n if bf > 1/512:\n break",
"_____no_output_____"
],
[
"sind = 40\n\nsrc = yml[\"src_info\"][sind]\npiff_mod = piff.read(src[\"piff_path\"])\n\nimg = (\n fitsio.read(src[\"image_path\"], ext=src[\"image_ext\"])\n - fitsio.read(src[\"bkg_path\"], ext=src[\"bkg_ext\"])\n)\nwgt = fitsio.read(src[\"weight_path\"], ext=src[\"weight_ext\"])",
"_____no_output_____"
],
[
"thresh = 5\nt_arr = measure_t_grid_for_piff_model(piff_mod, {\"GI_COLOR\": 0.61})\ndata = measure_star_t_for_piff_model(piff_mod, img, wgt, piff_prop=\"GI_COLOR\")\nts_arr = map_star_t_to_grid(data)\n\nres_g = make_good_regions_for_piff_model_gal_grid(piff_mod, piff_kwargs={\"GI_COLOR\": 0.61}, seed=11, verbose=False)\nres_sg = make_good_regions_for_piff_model_star_and_gal_grid(\n piff_mod, img, wgt, piff_kwargs={\"GI_COLOR\": 0.61}, seed=11, verbose=False,\n flag_bad_thresh=2, any_bad_thresh=5,\n)",
"_____no_output_____"
],
[
"vmin = min(np.nanmin(ts_arr), np.nanmin(t_arr))\nvmax = max(np.nanmax(ts_arr), np.nanmax(t_arr))\n\nfig, axs = pplt.subplots(ncols=4, share=0)\nh0 = axs[0].imshow(ts_arr, vmin=vmin, vmax=vmax, cmap=\"rocket\")\naxs[0].grid(False)\naxs[0].set_title(\"stars on grid w/ 2d poly\")\naxs[0].colorbar(h0, loc='l')\n\naxs[1].imshow(t_arr, vmin=vmin, vmax=vmax, cmap=\"rocket\")\naxs[1].grid(False)\naxs[1].set_title(\"grid of shapes at gal color\")\n\nb = res_g[\"bbox\"]\naxs[1].plot([b[\"xmin\"]/128, b[\"xmin\"]/128], [b[\"ymin\"]/128, b[\"ymax\"]/128 - 1], color=\"red\")\naxs[1].plot([b[\"xmax\"]/128 - 1, b[\"xmax\"]/128 - 1], [b[\"ymin\"]/128, b[\"ymax\"]/128 - 1], color=\"red\")\naxs[1].plot([b[\"xmin\"]/128, b[\"xmax\"]/128 - 1], [b[\"ymin\"]/128, b[\"ymin\"]/128], color=\"red\")\naxs[1].plot([b[\"xmin\"]/128, b[\"xmax\"]/128 - 1], [b[\"ymax\"]/128 - 1, b[\"ymax\"]/128 - 1], color=\"red\")\n\nb = res_sg[\"bbox\"]\naxs[1].plot([b[\"xmin\"]/128, b[\"xmin\"]/128], [b[\"ymin\"]/128, b[\"ymax\"]/128 - 1], color=\"blue\")\naxs[1].plot([b[\"xmax\"]/128 - 1, b[\"xmax\"]/128 - 1], [b[\"ymin\"]/128, b[\"ymax\"]/128 - 1], color=\"blue\")\naxs[1].plot([b[\"xmin\"]/128, b[\"xmax\"]/128 - 1], [b[\"ymin\"]/128, b[\"ymin\"]/128], color=\"blue\")\naxs[1].plot([b[\"xmin\"]/128, b[\"xmax\"]/128 - 1], [b[\"ymax\"]/128 - 1, b[\"ymax\"]/128 - 1], color=\"blue\")\n\naxs[2].imshow(res_sg[\"bad_msk\"], cmap=\"rocket\")\n\nharr = (t_arr-ts_arr).ravel()\nstd5 = nanmad(harr) * 2\nh = axs[3].hist(harr, bins=50)\naxs[3].vlines([-std5, np.nanmedian(harr)+std5], 0, np.max(h[0]), color=\"k\")\naxs[3].set_xlabel(\"gal T - star T\")",
"_____no_output_____"
],
[
"np.mean(res_sg[\"bad_msk\"])",
"_____no_output_____"
],
[
"import ngmix\n\ndef get_star_stamp_pos(s, img, wgt):\n xint = int(np.floor(s.x - 1 + 0.5))\n yint = int(np.floor(s.y - 1 + 0.5))\n bbox = 17\n bbox_2 = (bbox - 1)//2\n \n return dict(\n img=img[yint-bbox_2: yint+bbox_2+1, xint-bbox_2: xint+bbox_2+1],\n wgt=wgt[yint-bbox_2: yint+bbox_2+1, xint-bbox_2: xint+bbox_2+1],\n xstart=xint-bbox_2, \n ystart=yint-bbox_2,\n dim=bbox,\n x=s.x - 1,\n y=s.y - 1,\n )\n\ndef get_star_piff_obs(piff_mod, s, img, wgt):\n \n sres = get_star_stamp_pos(s, img, wgt)\n \n xv = sres[\"x\"]+1\n yv = sres[\"y\"]+1\n wcs = list(piff_mod.wcs.values())[0].local(\n image_pos=galsim.PositionD(x=xv, y=yv)\n ).jacobian()\n img = galsim.ImageD(sres[\"dim\"], sres[\"dim\"], wcs=wcs)\n cen = (\n sres[\"x\"] - sres[\"xstart\"] + 1,\n sres[\"y\"] - sres[\"ystart\"] + 1,\n )\n img = piff_mod.draw(\n x=xv, y=yv, chipnum=list(piff_mod.wcs.keys())[0],\n GI_COLOR=s.data.properties[\"GI_COLOR\"],\n image=img, center=cen,\n )\n model_obs = ngmix.Observation(\n image=img.array,\n jacobian=ngmix.Jacobian(\n y=cen[1]-1,\n x=cen[0]-1,\n wcs=wcs,\n )\n )\n star_obs = ngmix.Observation(\n image=sres[\"img\"],\n weight=sres[\"wgt\"],\n jacobian=ngmix.Jacobian(\n y=cen[1]-1,\n x=cen[0]-1,\n wcs=wcs,\n )\n )\n return model_obs, star_obs, sres",
"_____no_output_____"
],
[
"\nx = []\ny = []\nt = []\n\nfor s in piff_mod.stars:\n\n mobs, sobs, sres = get_star_piff_obs(piff_mod, s, img, wgt)\n \n res = ngmix.admom.AdmomFitter(\n rng=np.random.RandomState(seed=10)\n ).go(mobs, ngmix.moments.fwhm_to_T(1))\n t.append(res[\"T\"])\n x.append(sres[\"x\"])\n y.append(sres[\"y\"])\n ",
"_____no_output_____"
],
[
"from sklearn.preprocessing import PolynomialFeatures\nfrom sklearn.pipeline import make_pipeline\nfrom sklearn.linear_model import LinearRegression\n\ndegree = 2\npolyreg = make_pipeline(PolynomialFeatures(degree), LinearRegression())\npolyreg.fit(np.array([x, y]).T, np.array(t))",
"_____no_output_____"
],
[
"y, x = np.mgrid[0:4096:128, 0:2048:128] + 64\ntg = polyreg.predict(np.array([x.ravel(), y.ravel()]).T)",
"_____no_output_____"
],
[
"tg = tg.reshape(x.shape)",
"_____no_output_____"
],
[
"fig, axs = pplt.subplots(ncols=2)\naxs[0].imshow(tg)\naxs[1].imshow(res[\"t_arr\"])",
"_____no_output_____"
],
[
"def _nanmad(x, axis=None):\n \"\"\"\n median absolute deviation - scaled like a standard deviation\n\n mad = 1.4826*median(|x-median(x)|)\n\n Parameters\n ----------\n x: array-like\n array to take MAD of\n axis : {int, sequence of int, None}, optional\n `axis` keyword for\n\n Returns\n -------\n mad: float\n MAD of array x\n \"\"\"\n return 1.4826*np.nanmedian(np.abs(x - np.nanmedian(x, axis=axis)), axis=axis)\n",
"_____no_output_____"
],
[
"print(_nanmad(t), _nanmad(tg))",
"_____no_output_____"
],
[
"from des_y6utils.piff import make_good_regions_for_piff_model",
"_____no_output_____"
],
[
"res = make_good_regions_for_piff_model(piff_mod, piff_kwargs={\"GI_COLOR\": 0.61}, seed=10, verbose=False)",
"_____no_output_____"
],
[
"res[\"t_std\"]",
"_____no_output_____"
],
[
"fig, axs = pplt.subplots()\n\naxs.hist((res[\"t_arr\"] - tg).ravel(), bins=50)",
"_____no_output_____"
],
[
"np.std((res[\"t_arr\"] - tg).ravel()) * 5",
"_____no_output_____"
],
[
"np.max(np.abs(np.max(t) - np.median(t)))",
"_____no_output_____"
],
[
"_nanmad(t)",
"_____no_output_____"
],
[
"g = galsim.Gaussian(fwhm=0.5).dilate(1.1)",
"_____no_output_____"
],
[
"g",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
ec8ed0cf15c46cf6263c98984ffa849f5e7b86f5 | 44,759 | ipynb | Jupyter Notebook | lectures/L5/Exercise_1-final.ipynb | xuwd11/cs207_Weidong_Xu | 00442657239c7a4040501bf7fa0f6697c731fe94 | [
"MIT"
] | null | null | null | lectures/L5/Exercise_1-final.ipynb | xuwd11/cs207_Weidong_Xu | 00442657239c7a4040501bf7fa0f6697c731fe94 | [
"MIT"
] | null | null | null | lectures/L5/Exercise_1-final.ipynb | xuwd11/cs207_Weidong_Xu | 00442657239c7a4040501bf7fa0f6697c731fe94 | [
"MIT"
] | null | null | null | 399.633929 | 31,030 | 0.928551 | [
[
[
"# Exercise 1\nThis exercise requires you to plot a few functions in Python.\n1. Plot any three functions of your choosing of the form $y=f(x)$. $x$ should be an array of equally spaced numbers generated using the `numpy` linspace command (see [`numpy.linspace()`](https://docs.scipy.org/doc/numpy/reference/generated/numpy.linspace.html)). Be sure to include a legend and label the $x$ and $y$ axes.\n2. Use `numpy` to draw $N$ samples (you choose $N$) from a probability distribution of your choosing. See [Random sampling](https://docs.scipy.org/doc/numpy-1.13.0/reference/routines.random.html) for a bunch of `numpy` functions that draw random samples for you. See [Statistical functions](https://docs.scipy.org/doc/scipy/reference/stats.html) for an alternative to `numpy`. Plot a histogram of the data you generated.",
"_____no_output_____"
]
],
[
[
"%matplotlib inline\nimport numpy as np\nimport matplotlib.pyplot as plt",
"_____no_output_____"
],
[
"# Q1\ndef f1(x):\n return x/3\n\ndef f2(x):\n return np.sin(x)\n\ndef f3(x):\n return np.cos(x)\n\nx = np.linspace(-np.pi, np.pi)\nplt.plot(x, f1(x), label='$y=x/3$');\nplt.plot(x, f2(x), label='$y=sin(x)$');\nplt.plot(x, f3(x), label='$y=cos(x)$');\nplt.xlabel('$x$');\nplt.ylabel('$y$');\nplt.legend();\nplt.show()",
"_____no_output_____"
],
[
"# Q2\nN = 100\nsamples = np.random.poisson(5, 10000)\ncount, bins, ignored = plt.hist(samples, 15, normed=True)\nplt.xlabel('$x$');\nplt.ylabel('Probability');\nplt.title('Histogram of a Poisson distribution ($\\lambda = 5$)');",
"_____no_output_____"
]
]
] | [
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code"
]
] |
ec8eddc4b14623d09c34df8f53af5705f49c0360 | 9,557 | ipynb | Jupyter Notebook | rapport/_build/html/_sources/notebooks/5- Enseignements.ipynb | Fabien-DS/DSA_Sentiment | 27c68909d87aea9ec034792a376f4cd9be10feff | [
"RSA-MD"
] | 1 | 2021-05-08T16:32:01.000Z | 2021-05-08T16:32:01.000Z | rapport/notebooks/5- Enseignements.ipynb | Fabien-DS/DSA_Sentiment | 27c68909d87aea9ec034792a376f4cd9be10feff | [
"RSA-MD"
] | null | null | null | rapport/notebooks/5- Enseignements.ipynb | Fabien-DS/DSA_Sentiment | 27c68909d87aea9ec034792a376f4cd9be10feff | [
"RSA-MD"
] | null | null | null | 37.042636 | 516 | 0.496704 | [
[
[
"-------------------------------------------------------------------\n**TD DSA 2021 de Antoine Ly - rapport de Fabien Faivre**\n------------------------- -------------------------------------",
"_____no_output_____"
],
[
"# Enseignements et pistes d'amélioration",
"_____no_output_____"
],
[
"Plusieurs modèle ont été testés. Le modèle champion est un XGBoost optimisé s'appuyant sur une combinaison de features créées à partir de modèles pré entrainé dont roBERTa tweet.\nCe modèle permet d'atteindre un f1 macro de **76%** sur le jeu test",
"_____no_output_____"
]
],
[
[
"import pandas as pd",
"_____no_output_____"
],
[
"res_fin3 = pd.read_parquet('/mnt/data/processed/res_fin3.gzip')\nres_fin3",
"_____no_output_____"
]
],
[
[
"Ce projet a été une constant source d'étonnement.\n\nLe fait de disposer de 3 classes à prédire a été un élément complexifiant par rapport au cas binaire. L'absence de courbe ROC nous rend beaucoup plus dépendant des chiffres.\n\nAprès la découverte de [twitter-roberta-base-sentiment](https://huggingface.co/cardiffnlp/twitter-roberta-base-sentiment), je pensais que le sujet serait plié. Après tout ce modèle a été entrainé explicitement pour ce cas (58 millions de tweets, en anglais et optimisé pour l'analyse de sentiments). Je pensais voir le f1 macro s'envoler, ce qui n'a pas été le cas. Le modèle a bien aidé, mais le gain est resté modeste (6 points de f1 macro par rapport aux approches fréquentistes classiques).\n\nAu final en analysant les fausses prédictions, on réalise que la labelisation de plusieurs tweets laisse songeur.\nCeci met en lumière le fait que l'appréciation de la tonalité n'est pas toujours évidente et que des erreurs humaines peuvent en plus se glisser.\nSi ce phénomène existe déjà pour les catégories extrèmes (`positif` et `négatif`) on imagine la sensibilité pour la classe générique `neutre`...\n\nPar ailleurs rien n'indique que la stratégie de labellisation utilisé dans ce cas corresponde à celle utilisée pour le pré entrainement de roBERTa tweet.\n\nAprès un acquis de conscience, un entrainement réel d'un modèle de deep learning s'appuyant sur les modèles préentrainées `BERT`, `DistilBERT` et surtout `RoBERTa` en en réentrainant la dernière couche pour le sujet étudié a de loin présenté le meilleur gain (+3,4% de f1_macro par rapport au modèle RoBERTa utilisé directement en entrée d'un modèle classique). Ce gain est vraisemblablement à mettre au crédit de la dimention plus élevée de l'avant dernière couche et à la labellisation spéciale du projet.\n\nDeux pistes auraient pu être explorées pour améliorer la performance :\n- potentiellement modéliser le sujet discuté dans les tweets et le rajouter comme feature. On avait en effet vu que les tweets positifs par exemple se rapportaient principalement à la fête des mère et au `star wars day`\n- effectivement utiliser un modèle ensembliste y compris avec `RoBERTa` rendu impossible ici du fait du temps d'exécution et de la limiattion du matériel utilisé.\n\nEnfin ce sujet a été l'occasion de se frotter à plusieurs difficultés techniques liées principalement :\n- à l'utilisation de ressources GPU depuis docker\n- à l'utilisation des GPU pour XGBoost (non pris en compte par défaut)\n- aux pipelines sklearn, pratiques mais pas toujours compatibles avec les packages (ex SHAP) et nécessitant souvent des créations de classes ad-hoc\n- une première confrontation réelle avec les modèles de Deep Learning.",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
]
] |
ec8ede4875c4a97d964a48b52076e24f35620f01 | 22,601 | ipynb | Jupyter Notebook | thesis_omniglot20w5s.ipynb | wngud0811/KJH_graduation | 66c995deb274df38d6fac64d8d59dbc333127975 | [
"MIT"
] | null | null | null | thesis_omniglot20w5s.ipynb | wngud0811/KJH_graduation | 66c995deb274df38d6fac64d8d59dbc333127975 | [
"MIT"
] | null | null | null | thesis_omniglot20w5s.ipynb | wngud0811/KJH_graduation | 66c995deb274df38d6fac64d8d59dbc333127975 | [
"MIT"
] | null | null | null | 47.183716 | 156 | 0.534091 | [
[
[
"import numpy as np\nimport pandas as pd\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.nn.init as init\nimport torch.nn.functional as F\nimport torch.utils as utils\nimport torch.utils.data\nimport torchvision\nimport torchvision.transforms as transforms\nimport torchvision.transforms.functional as TF\nimport torchvision.datasets as dsets\nfrom torchvision import models\nfrom torchvision.utils import save_image\nfrom torch.utils.tensorboard import SummaryWriter\n\nwriter = SummaryWriter('runs/thesis06_20w_1s')\n\n\nfrom tqdm import tqdm\n\nfrom flows import PlanarFlow\nfrom utils import Binarize\nfrom codes import Linear_flipout, Flatten, count_parameters, EfficientNet\n\nfrom torchmeta.datasets import Omniglot, CIFARFS\nfrom torchmeta.transforms import Categorical, ClassSplitter, Rotation\nfrom torchvision.transforms import Compose, Resize, ToTensor\nfrom torchmeta.utils.data import BatchMetaDataLoader\n\n\n#from __future__ import print_function\nimport argparse\nimport cv2\nimport matplotlib.pyplot as plt\n\nimport os\ncur_dir = \"C:/Users/KJH/OneDrive - skku.edu/KJH/Projects/2019winter_research\"\n#cur_dir = \"C:/Users/KJH-Laptop/OneDrive - skku.edu/KJH/Projects/2019winter_research/\"\nos.chdir(cur_dir)\n#os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"1\"\nos.environ[\"CUDA_LAUNCH_BLOCKING\"] = \"1\"\n\nimport time\nimport copy\nimport random as rd\n\ndevice = torch.device('cuda')\n\nclass net(nn.Module):\n def __init__(self, num_classes):\n super(net, self).__init__()\n self.input_dim = [1, 28, 28]\n self.num_classes = num_classes\n \n self.ctx = torch.hub.load('rwightman/gen-efficientnet-pytorch', 'efficientnet_b0', pretrained=True)\n self.ctx.conv_stem = nn.Conv2d(1, 32, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n self.ctx.classifier = nn.Sequential(\n nn.BatchNorm1d(1280),\n nn.Linear(1280, 320, bias = True),\n nn.ReLU()\n )\n \n self.layer = nn.LSTM(input_size=1280, hidden_size=1280, num_layers=5, bias=True, batch_first = True).to(device)\n self.dec_mu = nn.ModuleList()\n self.dec_logvar = nn.ModuleList()\n \n self.num_params = [[200 * 200, 200], [200 * 200, 200], [200 * self.num_classes, self.num_classes]]\n \n for layer_size in self.num_params:\n self.dec_mu.append(\n nn.Sequential(\n nn.ELU(),\n nn.Linear(1280, 1280, bias = True),\n nn.ELU(),\n nn.Linear(1280, layer_size[0] + layer_size[1], bias = True),\n ).to(device))\n \n self.dec_logvar.append(\n nn.Sequential(\n nn.ELU(),\n nn.Linear(1280, 1280, bias = True),\n nn.ELU(),\n nn.Linear(1280, layer_size[0] + layer_size[1], bias = True),\n ).to(device))\n \n self.encoder = nn.Sequential(\n nn.BatchNorm2d(1),\n Flatten(), \n nn.Linear(784, 200, bias = False),\n nn.ELU()\n )\n\n \n \n def forward(self, input_train, label_train, input_test, label_test, adapt_lr, adapt_step = 1):\n ctx = self.ctx(input_train).view(input_train.shape[0], 5, -1)\n h = torch.stack([ctx[torch.where(label_train == x)].mean(dim = 0) for x in range(self.num_classes)], dim = 1)\n h = h.view(5, 1, -1)\n \n params = nn.ParameterList()\n param_vals = []\n kld = torch.tensor(0., device=device)\n c = torch.empty([5, 1, 1280], requires_grad = False, device = device).fill_(0)\n x_test_init = self.encoder(input_test)\n x_test = x_test_init\n\n for ind, (dec_mu, dec_logvar) in enumerate(zip(self.dec_mu, self.dec_logvar)):\n x = torch.empty([1, 1, 1280], requires_grad=False, device = device).normal_(0, 1)\n f, (h, c) = self.layer(x, (h, c))\n \n mu = dec_mu(f.view(1, -1)).squeeze()\n logvar = dec_logvar(f.view(1, -1)).squeeze()\n params.append(nn.Parameter(torch.stack((mu, logvar), dim = 0), requires_grad = True))\n \n optimizer = optim.SGD(params, lr = adapt_lr)\n \n for step in range(adapt_step + 1):\n x_test = x_test_init\n kld = torch.tensor(0., device=device)\n for ind, param in enumerate(params):\n param.retain_grad()\n\n weight_mu = param[0, :self.num_params[ind][0]].view(-1, self.num_params[ind][1])\n bias_mu = param[0, self.num_params[ind][0]:]\n\n weight_logvar = param[1, :self.num_params[ind][0]].view(-1, self.num_params[ind][1])\n bias_logvar = param[1, self.num_params[ind][0]:]\n\n weight_noise = torch.empty(weight_mu.shape, requires_grad = False, device = device).normal_(0,1)\n bias_noise = torch.empty(bias_mu.shape, requires_grad = False, device = device).normal_(0,1)\n in_sign = torch.empty(x_test.shape, requires_grad = False, device = device).uniform_(-1,1).sign()\n out_sign = torch.empty([x_test.shape[0], self.num_params[ind][1]], requires_grad = False, device = device).uniform_(-1,1).sign()\n\n x_test = torch.mm(x_test, weight_mu) + torch.mm(in_sign * x_test, weight_noise * weight_mu * weight_logvar.div(2).exp()) * out_sign\n x_test += (1 + bias_noise * bias_logvar.div(2).exp()) * bias_mu\n x_test = F.elu(x_test)\n\n kld += (mu.pow(2) - logvar + logvar.exp() - 1).mean()/2\n \n if step < adapt_step:\n optimizer.zero_grad()\n loss = F.cross_entropy(x_test, label_test) + 1e-6 * kld\n loss.backward(retain_graph = True)\n optimizer.step()\n\n return x_test, kld",
"_____no_output_____"
],
[
"batch_size = 16\nmeta_trainset = Omniglot('./data/',\n # Number of ways\n num_classes_per_task=20,\n # Resize the images to 28x28 and converts them to PyTorch tensors (from Torchvision)\n transform=Compose([Resize(28), ToTensor()]),\n # Transform the labels to integers (e.g. (\"Glagolitic/character01\", \"Sanskrit/character14\", ...) to (0, 1, ...))\n target_transform=Categorical(num_classes=20),\n # Creates new virtual classes with rotated versions of the images (from Santoro et al., 2016)\n class_augmentations=[Rotation([90, 180, 270])],\n meta_train=True,\n download=True)\nmeta_trainset = ClassSplitter(meta_trainset, shuffle=True, num_train_per_class=5, num_test_per_class=15)\nmeta_trainloader = BatchMetaDataLoader(meta_trainset, batch_size=batch_size, num_workers=0)\n\nmodel = net(20).cuda()\noptimizer = optim.Adam(model.parameters(), lr=1e-2)\ncriterion = nn.CrossEntropyLoss()\n\nnum_batches = 200\nfor batch_idx, meta_train_batch in zip(range(num_batches), meta_trainloader):\n start = time.time()\n\n train_inputs, train_targets = [x.to(device) for x in meta_train_batch[\"train\"]]\n test_inputs, test_targets = [x.to(device) for x in meta_train_batch[\"test\"]]\n \n cum_loss = torch.tensor(0., device=device)\n accuracy = torch.tensor(0., device=device)\n reg = torch.tensor(0., device=device)\n\n for task_idx, (train_input, train_target, test_input, test_target) in enumerate(\n zip(train_inputs, train_targets, test_inputs, test_targets)):\n optimizer.zero_grad()\n pred, kld = model(train_input, train_target, test_input, test_target, 0.5, 5)\n loss = criterion(pred, test_target)\n (loss + 1e-6 * kld).backward()\n optimizer.step()\n with torch.no_grad():\n cum_loss += loss\n accuracy += torch.sum(pred.argmax(1) == test_target.cuda())\n reg += kld\n cum_loss /= batch_size\n accuracy /= batch_size * 20 * 15\n reg /= batch_size\n\n if batch_idx % 10 == 0:\n print(\"%3d) loss = %f, kld = %f, acc = %f, time = %.3f sec\" %(batch_idx, cum_loss, reg, accuracy, time.time() - start))",
"Using cache found in C:\\Users\\KJH/.cache\\torch\\hub\\rwightman_gen-efficientnet-pytorch_master\n"
],
[
"optimizer = optim.Adam(model.parameters(), lr=1e-4)\n\nnum_batches = 200\nfor batch_idx, meta_train_batch in zip(range(num_batches), meta_trainloader):\n start = time.time()\n\n train_inputs, train_targets = [x.to(device) for x in meta_train_batch[\"train\"]]\n test_inputs, test_targets = [x.to(device) for x in meta_train_batch[\"test\"]]\n \n cum_loss = torch.tensor(0., device=device)\n accuracy = torch.tensor(0., device=device)\n reg = torch.tensor(0., device=device)\n\n for task_idx, (train_input, train_target, test_input, test_target) in enumerate(\n zip(train_inputs, train_targets, test_inputs, test_targets)):\n optimizer.zero_grad()\n pred, kld = model(train_input, train_target, test_input, test_target, 0.5, 5)\n loss = criterion(pred, test_target)\n (loss + 1e-6 * kld).backward()\n optimizer.step()\n with torch.no_grad():\n cum_loss += loss\n accuracy += torch.sum(pred.argmax(1) == test_target.cuda())\n reg += kld\n cum_loss /= batch_size\n accuracy /= batch_size * 20 * 15\n reg /= batch_size\n\n if batch_idx % 10 == 0:\n print(\"%3d) loss = %f, kld = %f, acc = %f, time = %.3f sec\" %(batch_idx, cum_loss, reg, accuracy, time.time() - start))",
" 0) loss = 2.531750, kld = 0.000010, acc = 0.418125, time = 29.869 sec\n 10) loss = 2.003087, kld = 0.000010, acc = 0.461875, time = 30.248 sec\n 20) loss = 1.943420, kld = 0.000010, acc = 0.492083, time = 30.127 sec\n 30) loss = 1.942439, kld = 0.000010, acc = 0.503750, time = 30.166 sec\n 40) loss = 1.869648, kld = 0.000010, acc = 0.517083, time = 30.000 sec\n 50) loss = 1.982632, kld = 0.000010, acc = 0.490833, time = 30.098 sec\n 60) loss = 1.494840, kld = 0.000010, acc = 0.585417, time = 30.877 sec\n 70) loss = 1.660182, kld = 0.000010, acc = 0.551667, time = 31.406 sec\n 80) loss = 1.659267, kld = 0.000010, acc = 0.568750, time = 30.541 sec\n 90) loss = 1.527660, kld = 0.000010, acc = 0.585417, time = 31.039 sec\n100) loss = 1.698551, kld = 0.000010, acc = 0.542917, time = 31.140 sec\n110) loss = 1.744369, kld = 0.000010, acc = 0.541458, time = 31.131 sec\n120) loss = 2.010949, kld = 0.000010, acc = 0.481458, time = 30.920 sec\n130) loss = 1.537902, kld = 0.000010, acc = 0.582292, time = 31.205 sec\n140) loss = 1.745841, kld = 0.000010, acc = 0.548542, time = 31.026 sec\n150) loss = 1.726605, kld = 0.000010, acc = 0.540833, time = 30.433 sec\n160) loss = 1.524956, kld = 0.000010, acc = 0.576458, time = 31.398 sec\n170) loss = 1.610029, kld = 0.000010, acc = 0.560208, time = 31.050 sec\n180) loss = 1.643855, kld = 0.000010, acc = 0.565417, time = 31.217 sec\n190) loss = 1.718126, kld = 0.000010, acc = 0.538542, time = 30.368 sec\n"
],
[
"optimizer = optim.Adam(model.parameters(), lr=1e-6)\n\nnum_batches = 200\nfor batch_idx, meta_train_batch in zip(range(num_batches), meta_trainloader):\n start = time.time()\n\n train_inputs, train_targets = [x.to(device) for x in meta_train_batch[\"train\"]]\n test_inputs, test_targets = [x.to(device) for x in meta_train_batch[\"test\"]]\n \n cum_loss = torch.tensor(0., device=device)\n accuracy = torch.tensor(0., device=device)\n reg = torch.tensor(0., device=device)\n\n for task_idx, (train_input, train_target, test_input, test_target) in enumerate(\n zip(train_inputs, train_targets, test_inputs, test_targets)):\n optimizer.zero_grad()\n pred, kld = model(train_input, train_target, test_input, test_target, 0.5, 5)\n loss = criterion(pred, test_target)\n (loss + 1e-6 * kld).backward()\n optimizer.step()\n with torch.no_grad():\n cum_loss += loss\n accuracy += torch.sum(pred.argmax(1) == test_target.cuda())\n reg += kld\n cum_loss /= batch_size\n accuracy /= batch_size * 20 * 15\n reg /= batch_size\n\n if batch_idx % 10 == 0:\n print(\"%3d) loss = %f, kld = %f, acc = %f, time = %.3f sec\" %(batch_idx, cum_loss, reg, accuracy, time.time() - start))",
" 0) loss = 1.651697, kld = 0.000010, acc = 0.552083, time = 30.315 sec\n 10) loss = 1.846209, kld = 0.000010, acc = 0.525417, time = 31.014 sec\n 20) loss = 1.680036, kld = 0.000010, acc = 0.565625, time = 31.535 sec\n 30) loss = 1.593975, kld = 0.000010, acc = 0.561667, time = 31.352 sec\n 40) loss = 1.688189, kld = 0.000010, acc = 0.541875, time = 30.849 sec\n 50) loss = 1.697957, kld = 0.000010, acc = 0.551458, time = 30.337 sec\n 60) loss = 1.623374, kld = 0.000010, acc = 0.561667, time = 30.434 sec\n 70) loss = 1.761743, kld = 0.000010, acc = 0.541875, time = 30.296 sec\n 80) loss = 1.679443, kld = 0.000010, acc = 0.565000, time = 30.201 sec\n 90) loss = 1.729807, kld = 0.000010, acc = 0.535625, time = 30.275 sec\n100) loss = 1.851496, kld = 0.000010, acc = 0.495625, time = 30.841 sec\n110) loss = 1.534651, kld = 0.000010, acc = 0.571042, time = 30.097 sec\n120) loss = 1.737582, kld = 0.000010, acc = 0.535833, time = 29.730 sec\n130) loss = 1.667550, kld = 0.000010, acc = 0.544375, time = 30.140 sec\n140) loss = 1.800410, kld = 0.000010, acc = 0.534792, time = 30.761 sec\n150) loss = 1.648618, kld = 0.000010, acc = 0.563333, time = 30.585 sec\n160) loss = 1.729128, kld = 0.000010, acc = 0.532917, time = 30.504 sec\n170) loss = 1.986174, kld = 0.000010, acc = 0.511667, time = 30.743 sec\n180) loss = 1.772293, kld = 0.000010, acc = 0.533542, time = 30.736 sec\n190) loss = 1.767555, kld = 0.000010, acc = 0.546458, time = 30.822 sec\n"
],
[
"meta_testset = Omniglot('./data/',\n # Number of ways\n num_classes_per_task=20,\n # Resize the images to 28x28 and converts them to PyTorch tensors (from Torchvision)\n transform=Compose([Resize(28), ToTensor()]),\n # Transform the labels to integers (e.g. (\"Glagolitic/character01\", \"Sanskrit/character14\", ...) to (0, 1, ...))\n target_transform=Categorical(num_classes=20),\n # Creates new virtual classes with rotated versions of the images (from Santoro et al., 2016)\n class_augmentations=[Rotation([90, 180, 270])],\n meta_test=True,\n download=True)\nmeta_testset = ClassSplitter(meta_testset, shuffle=True, num_train_per_class=5, num_test_per_class=15)\nmeta_testloader = BatchMetaDataLoader(meta_testset, batch_size=batch_size, num_workers=0)\n\ntot_loss = torch.tensor(0., device=device)\ntot_acc = torch.tensor(0., device=device)\ntot_reg = torch.tensor(0., device=device)\n\nfor batch_idx, meta_test_batch in zip(range(num_batches), meta_testloader):\n start = time.time()\n train_inputs, train_targets = [x.to(device) for x in meta_test_batch[\"train\"]]\n test_inputs, test_targets = [x.to(device) for x in meta_test_batch[\"test\"]]\n \n cum_loss = torch.tensor(0., device=device)\n accuracy = torch.tensor(0., device=device)\n reg = torch.tensor(0., device=device)\n\n for task_idx, (train_input, train_target, test_input, test_target) in enumerate(\n zip(train_inputs, train_targets, test_inputs, test_targets)):\n optimizer.zero_grad()\n pred, kld = model(train_input, train_target, test_input, test_target, 0.5, 5)\n with torch.no_grad(): \n loss = criterion(pred, test_target)\n cum_loss += loss\n accuracy += torch.sum(pred.argmax(1) == test_target.cuda())\n reg += kld\n\n tot_loss += cum_loss / batch_size\n tot_acc += accuracy / (batch_size * 20 * 15)\n tot_reg += reg / batch_size\n \ntot_loss /= num_batches\ntot_acc /= num_batches\ntot_reg /= num_batches\n \nprint(\"loss = %f, test_kld = %f, meta_test_acc = %f, time = %.3f sec\" %(tot_loss, tot_reg, tot_acc, time.time() - start))",
"loss = 1.743715, test_kld = 0.000010, meta_test_acc = 0.543837, time = 24.121 sec\n"
],
[
"torch.save(model.state_dict(), \"./save/thesis06_omniglot20w5s_5step\")",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
ec8ee9963d1481a367f7d2dc9c233dbdd57444a5 | 169,588 | ipynb | Jupyter Notebook | MAIN-FNTA.ipynb | SABS-R3-projects/JA-ML | 6d43c1a15befde877c89203218d502e9d8a4b01f | [
"BSD-3-Clause"
] | null | null | null | MAIN-FNTA.ipynb | SABS-R3-projects/JA-ML | 6d43c1a15befde877c89203218d502e9d8a4b01f | [
"BSD-3-Clause"
] | 1 | 2019-12-10T10:24:19.000Z | 2019-12-10T10:24:19.000Z | MAIN-FNTA.ipynb | SABS-R3-projects/JA-ML | 6d43c1a15befde877c89203218d502e9d8a4b01f | [
"BSD-3-Clause"
] | null | null | null | 169,588 | 169,588 | 0.882191 | [
[
[
"# Machine Learning - FNTA\n",
"_____no_output_____"
],
[
"**By Jakke Neiro & Andrei Roibu** ",
"_____no_output_____"
],
[
"## 1. Importing All Required Dependencies",
"_____no_output_____"
],
[
"This script imports all the required dependencies for running the different functions and the codes. Also, by using the _run_ command, the various notebooks are imprted into the main notebook.",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom scipy import interp\n\nimport glob, os\n\nfrom sklearn.metrics import accuracy_score, roc_curve, roc_auc_score, auc, roc_auc_score, confusion_matrix, classification_report, log_loss\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.discriminant_analysis import LinearDiscriminantAnalysis, QuadraticDiscriminantAnalysis\n\nfrom sklearn import svm, datasets, tree\nfrom sklearn.ensemble import RandomForestClassifier\n\n\nfrom sklearn.neural_network import MLPClassifier, MLPRegressor\nfrom sklearn.preprocessing import StandardScaler\n",
"_____no_output_____"
]
],
[
[
"In order to take advantage of the speed increases provided by GPUs, this code has been modified in order to run on Google Colab notebooks. In order to do this, the user needs to set the *google_colab_used* parameter to **True**. For usage on a local machine, this needs to be set to **False**\n\nIf used on a google colab notebook, the user will need to follow the instructions to generate and then copy an autorisation code from a generated link.\n",
"_____no_output_____"
]
],
[
[
"google_colab_used = True\n\nif google_colab_used == True:\n # Load the Drive helper and mount\n from google.colab import drive\n\n # This will prompt for authorization.\n drive.mount('/content/drive')\n\n data_drive = '/content/drive/My Drive/JA-ML/data'\n\n os.chdir(data_drive)\n\nelse:\n os.chdir(\"./data\")",
"Go to this URL in a browser: https://accounts.google.com/o/oauth2/auth?client_id=947318989803-6bn6qk8qdgf4n4g3pfee6491hc0brc4i.apps.googleusercontent.com&redirect_uri=urn%3aietf%3awg%3aoauth%3a2.0%3aoob&response_type=code&scope=email%20https%3a%2f%2fwww.googleapis.com%2fauth%2fdocs.test%20https%3a%2f%2fwww.googleapis.com%2fauth%2fdrive%20https%3a%2f%2fwww.googleapis.com%2fauth%2fdrive.photos.readonly%20https%3a%2f%2fwww.googleapis.com%2fauth%2fpeopleapi.readonly\n\nEnter your authorization code:\n··········\nMounted at /content/drive\n"
]
],
[
[
"## 2. Data Pre-Processing",
"_____no_output_____"
],
[
"This section imports all the required datasets as pandas dataframes, concatenates them, after which it pre-processes them by eliminating all non-numerical data and columns which contain the same data-values. This script also creates the input dataset and the labeled output dataset.",
"_____no_output_____"
]
],
[
[
"def data_preprocessing():\n \n '''\n This reads all the input datasets, pre-processes them and then generates the input dataset and the labelled dataset.\n \n Args:\n None\n \n Returns:\n X (ndarray): A 2D array containing the input processed data\n y (ndarray): A 1D array containing a list of labels, with 1 corresponding to \"active\" and 0 corresponding to \"dummy\"\n \n '''\n \n df_list = []\n y = np.array([])\n for file in glob.glob(\"fnta*.csv\"):\n df = pd.read_csv(file, header = 0)\n\n cols = df.shape[0]\n if \"actives\" in file:\n y_df = np.ones((cols))\n else:\n y_df = np.zeros((cols))\n y = np.concatenate((y,y_df), axis=0)\n\n df_list.append(df)\n\n global_df = pd.concat(df_list, axis=0, ignore_index=True)\n global_df = global_df._get_numeric_data() # removes any non-numeric data\n global_df = global_df.loc[:, (global_df != global_df.iloc[0]).any()] # modifies the dataframe to remove columns with only 0s\n\n X_headers = list(global_df.columns.values)\n X = global_df.values\n \n return X,y",
"_____no_output_____"
],
[
"X,y = data_preprocessing()",
"_____no_output_____"
],
[
"def data_split(X,y,random_state=42):\n \n '''\n This function takes the original datasets and splits them into training and testing datasets. For consistency, the function employs a 80-20 split for the train and test sets.\n \n Args:\n X (ndarray): A 2D array containing the input processed data\n y (ndarray): A 1D array containing a list of labels, with 1 corresponding to \"active\" and 0 corresponding to \"dummy\"\n random_state (int): An integer, representing the seed to be used by the random number generator; if not provided, the default value goes to 42\n \n Returns:\n X_train (ndarray): 2D array of input dataset used for training\n X_test (ndarray): 2D array of input dataset used for testing\n y_train (ndarray): 1D array of train labels \n y_test (ndarray): 1D array of test labels \n \n '''\n\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=random_state)\n \n return X_train, X_test, y_train, y_test",
"_____no_output_____"
],
[
"X_train, X_test, y_train, y_test = data_split(X,y)",
"_____no_output_____"
]
],
[
[
"## 3. Model Evaluation\n",
"_____no_output_____"
],
[
"This section produces the ROC plot, as well as several other performance metrics, including the classifier scores, the log-loss for each classifier, the confusion matrix and the classification report including the f1 score. The f1 score can be interpreted as a weighted average of the precision and recall, where an F1 score reaches its best value at 1 and worst score at 0.",
"_____no_output_____"
]
],
[
[
"def ROC_plotting(title, y_test, y_score):\n \n '''\n This function generates the ROC plot for a given model.\n \n Args:\n title (string): String represending the name of the model.\n y_test (ndarray): 1D array of test dataset \n y_score (ndarray): 1D array of model-predicted labels\n \n Returns:\n ROC Plot\n \n '''\n \n\n fpr, tpr, _ = roc_curve(y_test, y_score)\n roc_auc = auc(fpr, tpr)\n \n plt.figure()\n lw = 2 # linewidth\n plt.plot(fpr, tpr, color='darkorange', lw=lw, label='ROC curve (area = %0.2f)' % roc_auc)\n plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')\n plt.xlim([0.0, 1.0])\n plt.ylim([0.0, 1.05])\n plt.xlabel('False Positive Rate')\n plt.ylabel('True Positive Rate')\n plt.title(title)\n plt.legend(loc=\"lower right\")\n plt.show()",
"_____no_output_____"
],
[
"def performance_evaluation(X_train, X_test, y_train, y_test, predicted_train, predicted_test, y_score, title=\"model\"):\n \n '''\n This function prints the results of the different classifiers,a s well as several performance metrics\n \n Args:\n X_train (ndarray): 2D array of input dataset used for training\n X_test (ndarray): 2D array of input dataset used for testing\n y_train (ndarray): 1D array of train labels \n y_test (ndarray): 1D array of test labels \n title (string): the classifier name\n predicted_train (ndarray): 1D array of model-predicted labels for the train dataset \n predicted_test (ndarray): 1D array of model-predicted labels for the test dataset\n \n Returns:\n ROC Plot\n \n '''\n \n print(\"For the \", title, \" classifier:\")\n print(\"Training set score: %f\" % accuracy_score(y_train,predicted_train ))\n print(\"Training log-loss: %f\" % log_loss(y_train, predicted_train))\n print(\"Training set confusion matrix:\")\n print(confusion_matrix(y_train,predicted_train))\n print(\"Training set classification report:\")\n print(classification_report(y_train,predicted_train))\n \n print(\"Test set score: %f\" % accuracy_score(y_test, predicted_test))\n print(\"Test log-loss: %f\" % log_loss(y_test, predicted_test))\n print(\"Test set confusion matrix:\")\n print(confusion_matrix(y_test,predicted_test))\n print(\"Test set classification report:\")\n print(classification_report(y_test,predicted_test))\n\n ROC_plotting(\"ROC for \"+ title,y_test, y_score)\n fpr, tpr, _ = roc_curve(y_test, y_score)\n roc_auc = auc(fpr, tpr)\n print(\"AUC:\" + str(roc_auc))",
"_____no_output_____"
],
[
"def model_evaluation(function_name, X_train, X_test, y_train, y_test, title):\n\n '''\n This function evaluates the propoesed model the results of the different classifiers,a s well as several performance metrics\n\n Args:\n function_name (function): the function describing the employed model\n X_train (ndarray): 2D array of input dataset used for training\n X_test (ndarray): 2D array of input dataset used for testing\n y_train (ndarray): 1D array of train labels \n y_test (ndarray): 1D array of test labels \n title (string): the classifier name\n \n Returns:\n ROC Plot\n \n '''\n\n if title == 'Neural Network':\n y_predicted_train, y_predicted_test, y_score = neural_network(X_train, X_test, y_train, y_test)\n else:\n y_predicted_train, y_predicted_test, y_score = function_name(X_train, y_train, X_test)\n\n performance_evaluation(X_train, X_test, y_train, y_test, y_predicted_train, y_predicted_test, y_score, title)\n",
"_____no_output_____"
],
[
"def multiple_model_evaluation(function_name, X, y, title):\n\n '''\n This function takes the proposed model and original datasets and evaluates the proposed model by splitting the datasets randomly for 5 times.\n \n Args:\n function_name (function): the function describing the employed model\n X (ndarray): A 2D array containing the input processed data\n y (ndarray): A 1D array containing a list of labels, with 1 corresponding to \"active\" and 0 corresponding to \"dummy\"\n title (string): the classifier name\n\n '''\n\n random_states = [1, 10, 25, 42, 56]\n\n test_set_scores = []\n test_log_losses = []\n roc_aucs = []\n\n for random_state in random_states:\n X_train, X_test, y_train, y_test = data_split(X,y,random_state=random_state)\n\n if title == 'Neural Network':\n y_predicted_train, y_predicted_test, y_score = neural_network(X_train, X_test, y_train, y_test)\n else:\n y_predicted_train, y_predicted_test, y_score = function_name(X_train, y_train, X_test)\n\n test_set_score = accuracy_score(y_test, y_predicted_test)\n test_log_loss = log_loss(y_test, y_predicted_test)\n\n fpr, tpr, _ = roc_curve(y_test, y_score)\n roc_auc = auc(fpr, tpr)\n\n test_set_scores.append(test_set_score)\n test_log_losses.append(test_log_loss)\n roc_aucs.append(roc_auc)\n\n print(\"The average test set score for \", title, \"is: \", str(np.mean(test_set_scores)))\n print(\"The average test log-loss for \", title, \"is: \", str(np.mean(test_log_loss)))\n print(\"The average AUC for \", title, \"is: \", str(np.mean(roc_auc)))\n \n",
"_____no_output_____"
]
],
[
[
"## 4. Logistic regression, linear and quadratic discriminant analysis",
"_____no_output_____"
],
[
"### 4.1. Logistic regression",
"_____no_output_____"
],
[
"Logistic regression (logit regression, log-liner classifier) is a generalized linear model used for classification that uses a log-linear link function to model the outcome of a binary reponse variable $\\mathbf{y}$ using a single or multiple predictors $\\mathbf{X}$. Mathematically, the logistic regression primarily computes the probability of the value of a response variable given a value of the predictor, and this probability is then used for predicting the most probable outcome. The logistic regression has several advantages: it is easy to implement, it is efficient to train and it does not require input features to be scaled. However, the logistic regression can only produce a non-linear decision boundary. Therefore, with a complex dataset as ours, we do not expect it to perform particularly well.",
"_____no_output_____"
]
],
[
[
"def LogReg(X_train, y_train, X_test):\n \"\"\"Classification using logistic regression \n\n Args:\n X_train: Predictor or feature values used for training\n y_train: Response values used for training\n X_test: Predictor or feature values used for predicting the response values using the classifier\n\n Returns:\n y_predicted: The predicted response values\n\n \"\"\"\n scaler = StandardScaler()\n scaler.fit(X_train)\n X_train = scaler.transform(X_train)\n X_test = scaler.transform(X_test)\n \n #Define and train the model\n classifier = LogisticRegression(max_iter = 500).fit(X_train, y_train)\n \n #Predict the response values using the test predictor data\n y_predicted_test = classifier.predict(X_test)\n y_predicted_train = classifier.predict(X_train)\n y_score = classifier.predict_proba(X_test)[:,1]\n return y_predicted_train, y_predicted_test, y_score",
"_____no_output_____"
],
[
"model_evaluation(LogReg, X_train, X_test, y_train, y_test, title='Logistic Regression')",
"For the Logistic Regression classifier:\nTraining set score: 0.999351\nTraining log-loss: 0.022408\nTraining set confusion matrix:\n[[41135 5]\n [ 22 455]]\nTraining set classification report:\n precision recall f1-score support\n\n 0.0 1.00 1.00 1.00 41140\n 1.0 0.99 0.95 0.97 477\n\n accuracy 1.00 41617\n macro avg 0.99 0.98 0.99 41617\nweighted avg 1.00 1.00 1.00 41617\n\nTest set score: 0.998270\nTest log-loss: 0.059750\nTest set confusion matrix:\n[[10285 5]\n [ 13 102]]\nTest set classification report:\n precision recall f1-score support\n\n 0.0 1.00 1.00 1.00 10290\n 1.0 0.95 0.89 0.92 115\n\n accuracy 1.00 10405\n macro avg 0.98 0.94 0.96 10405\nweighted avg 1.00 1.00 1.00 10405\n\n"
],
[
"multiple_model_evaluation(LogReg, X, y, title='Logistic Regression')",
"The average test set score for Logistic Regression is: 0.9984238346948583\nThe average test log-loss for Logistic Regression is: 0.0531115059974207\nThe average AUC for Logistic Regression is: 0.9981679131279841\n"
]
],
[
[
"### 4.2. Linear discriminant analysis",
"_____no_output_____"
],
[
"LDA employs Bayes' theorem to fit a Gaussian density to each class of data. The classes are assumed to have the same covariance matrix. This generates a linear decision boundry. ",
"_____no_output_____"
]
],
[
[
"def LDA(X_train, y_train, X_test):\n \n \"\"\"Classification using LDA \n\n Args:\n X_train: Predictor or feature values used for training\n y_train: Response values used for training\n X_test: Predictor or feature values used for predicting the response values using the classifier\n\n Returns:\n y_predicted_train: The predicted response values for the training dataset\n y_predicted_test: The predicted response values for the test dataset\n\n \"\"\"\n \n classifier = LinearDiscriminantAnalysis()\n classifier = classifier.fit(X_train, y_train)\n y_predicted_test = classifier.predict(X_test)\n y_predicted_train = classifier.predict(X_train)\n y_score = classifier.predict_proba(X_test)[:,1]\n return y_predicted_train, y_predicted_test, y_score",
"_____no_output_____"
],
[
"model_evaluation(LDA, X_train, X_test, y_train, y_test, title='Linear Discriminant')",
"For the Linear Discriminant classifier:\nTraining set score: 0.995459\nTraining log-loss: 0.156857\nTraining set confusion matrix:\n[[41046 94]\n [ 95 382]]\nTraining set classification report:\n precision recall f1-score support\n\n 0.0 1.00 1.00 1.00 41140\n 1.0 0.80 0.80 0.80 477\n\n accuracy 1.00 41617\n macro avg 0.90 0.90 0.90 41617\nweighted avg 1.00 1.00 1.00 41617\n\nTest set score: 0.995963\nTest log-loss: 0.139418\nTest set confusion matrix:\n[[10266 24]\n [ 18 97]]\nTest set classification report:\n precision recall f1-score support\n\n 0.0 1.00 1.00 1.00 10290\n 1.0 0.80 0.84 0.82 115\n\n accuracy 1.00 10405\n macro avg 0.90 0.92 0.91 10405\nweighted avg 1.00 1.00 1.00 10405\n\n"
],
[
"multiple_model_evaluation(LDA, X, y, title='Linear Discriminant')",
"The average test set score for Linear Discriminant is: 0.9950985103315715\nThe average test log-loss for Linear Discriminant is: 0.12282021353011703\nThe average AUC for Linear Discriminant is: 0.9908209743524741\n"
]
],
[
[
"### 4.3. Quadratic discriminant analysis",
"_____no_output_____"
],
[
"QDA is similar to LDA, however it employs a quadratic decision boundary, rather than a linear one.",
"_____no_output_____"
]
],
[
[
"def QDA(X_train, y_train, X_test):\n \"\"\"Classification using QDA \n\n Args:\n X_train: Predictor or feature values used for training\n y_train: Response values used for training\n X_test: Predictor or feature values used for predicting the response values using the classifier\n\n Returns:\n y_predicted_train: The predicted response values for the training dataset\n y_predicted_test: The predicted response values for the test dataset\n\n \"\"\"\n classifier = QuadraticDiscriminantAnalysis()\n classifier = classifier.fit(X_train, y_train)\n y_predicted_test = classifier.predict(X_test)\n y_predicted_train = classifier.predict(X_train)\n y_score = classifier.predict_proba(X_test)[:,1]\n return y_predicted_train, y_predicted_test, y_score",
"_____no_output_____"
],
[
"model_evaluation(QDA, X_train, X_test, y_train, y_test, title='Quadratic Discriminant')",
"For the Quadratic Discriminant classifier:\nTraining set score: 0.011798\nTraining log-loss: 34.132076\nTraining set confusion matrix:\n[[ 14 41126]\n [ 0 477]]\nTraining set classification report:\n precision recall f1-score support\n\n 0.0 1.00 0.00 0.00 41140\n 1.0 0.01 1.00 0.02 477\n\n accuracy 0.01 41617\n macro avg 0.51 0.50 0.01 41617\nweighted avg 0.99 0.01 0.00 41617\n\nTest set score: 0.011148\nTest log-loss: 34.154512\nTest set confusion matrix:\n[[ 1 10289]\n [ 0 115]]\nTest set classification report:\n precision recall f1-score support\n\n 0.0 1.00 0.00 0.00 10290\n 1.0 0.01 1.00 0.02 115\n\n accuracy 0.01 10405\n macro avg 0.51 0.50 0.01 10405\nweighted avg 0.99 0.01 0.00 10405\n\n"
],
[
"multiple_model_evaluation(QDA, X, y, title='Quadratic Discriminant')",
"The average test set score for Quadratic Discriminant is: 0.011513695338779432\nThe average test log-loss for Quadratic Discriminant is: 34.14787296811251\nThe average AUC for Quadratic Discriminant is: 0.5001457725947522\n"
]
],
[
[
"## 5. Decision trees and random forest",
"_____no_output_____"
],
[
"### 5.1. Single decision tree",
"_____no_output_____"
],
[
"Decision trees are a non-parametric learning method used for both classification and regression. The advantages of decision trees are that they are easy to understand and they can be used for a broad range of data. However, the main disadvantages are that a single decision tree is easily overfitted and hence even small perturbations in the data might result in a markedly different classifier. This problem is tackled by generating several decision trees for deriving the final classifier. Here, we first train a single decision tree before we looking into more sophisticated ensemble methods.",
"_____no_output_____"
],
[
"We fit a single decision tree with default parameters and predict the values of $\\mathbf{y}$ based on the test data.",
"_____no_output_____"
]
],
[
[
"def DecisionTree(X_train, y_train, X_test):\n \n \"\"\"Classification using Decision Tree \n\n Args:\n X_train: Predictor or feature values used for training\n y_train: Response values used for training\n X_test: Predictor or feature values used for predicting the response values using the classifier\n\n Returns:\n y_predicted_train: The predicted response values for the training dataset\n y_predicted_test: The predicted response values for the test dataset\n\n \"\"\"\n \n classifier = tree.DecisionTreeClassifier()\n classifier = classifier.fit(X_train, y_train)\n y_predicted_test = classifier.predict(X_test)\n y_predicted_train = classifier.predict(X_train)\n y_score = classifier.predict_proba(X_test)[:,1]\n return y_predicted_train, y_predicted_test, y_score",
"_____no_output_____"
],
[
"model_evaluation(DecisionTree, X_train, X_test, y_train, y_test, title='Decision Tree')",
"For the Decision Tree classifier:\nTraining set score: 1.000000\nTraining log-loss: 0.000000\nTraining set confusion matrix:\n[[41140 0]\n [ 0 477]]\nTraining set classification report:\n precision recall f1-score support\n\n 0.0 1.00 1.00 1.00 41140\n 1.0 1.00 1.00 1.00 477\n\n accuracy 1.00 41617\n macro avg 1.00 1.00 1.00 41617\nweighted avg 1.00 1.00 1.00 41617\n\nTest set score: 0.997117\nTest log-loss: 0.099584\nTest set confusion matrix:\n[[10274 16]\n [ 14 101]]\nTest set classification report:\n precision recall f1-score support\n\n 0.0 1.00 1.00 1.00 10290\n 1.0 0.86 0.88 0.87 115\n\n accuracy 1.00 10405\n macro avg 0.93 0.94 0.93 10405\nweighted avg 1.00 1.00 1.00 10405\n\n"
],
[
"multiple_model_evaluation(DecisionTree, X, y, title='Decision Tree')",
"The average test set score for Decision Tree is: 0.9968092263334934\nThe average test log-loss for Decision Tree is: 0.10622347307937488\nThe average AUC for Decision Tree is: 0.9382557992140955\n"
]
],
[
[
"### 5.2. Random forest",
"_____no_output_____"
],
[
"Radnom forest explanation...",
"_____no_output_____"
]
],
[
[
"def RandomForest(X_train, y_train, X_test):\n \n \"\"\"Classification using Random Forest \n\n Args:\n X_train: Predictor or feature values used for training\n y_train: Response values used for training\n X_test: Predictor or feature values used for predicting the response values using the classifier\n\n Returns:\n y_predicted_train: The predicted response values for the training dataset\n y_predicted_test: The predicted response values for the test dataset\n\n \"\"\"\n \n rf_classifier = RandomForestClassifier(n_estimators=200)\n rf_classifier = rf_classifier.fit(X_train, y_train)\n y_predicted_test = rf_classifier.predict(X_test)\n y_predicted_train = rf_classifier.predict(X_train)\n y_score = rf_classifier.predict_proba(X_test)[:,1]\n return y_predicted_train, y_predicted_test, y_score",
"_____no_output_____"
],
[
"model_evaluation(RandomForest, X_train, X_test, y_train, y_test, title='Random Forest')",
"For the Random Forest classifier:\nTraining set score: 1.000000\nTraining log-loss: 0.000000\nTraining set confusion matrix:\n[[41140 0]\n [ 0 477]]\nTraining set classification report:\n precision recall f1-score support\n\n 0.0 1.00 1.00 1.00 41140\n 1.0 1.00 1.00 1.00 477\n\n accuracy 1.00 41617\n macro avg 1.00 1.00 1.00 41617\nweighted avg 1.00 1.00 1.00 41617\n\nTest set score: 0.997790\nTest log-loss: 0.076347\nTest set confusion matrix:\n[[10290 0]\n [ 23 92]]\nTest set classification report:\n precision recall f1-score support\n\n 0.0 1.00 1.00 1.00 10290\n 1.0 1.00 0.80 0.89 115\n\n accuracy 1.00 10405\n macro avg 1.00 0.90 0.94 10405\nweighted avg 1.00 1.00 1.00 10405\n\n"
],
[
"multiple_model_evaluation(RandomForest, X, y, title='Random Forest')",
"The average test set score for Random Forest is: 0.9977703027390676\nThe average test log-loss for Random Forest is: 0.08298600767638417\nThe average AUC for Random Forest is: 0.9997160603371784\n"
]
],
[
[
"## 6. Neural Network",
"_____no_output_____"
],
[
"A Neural Network, also known as a multi-layered perceptron, is a supervised learning algorithm that learns a function, which is trained using a set of features and targets. A neural network can learns a non-linear function approximator, allowing classification of data. Between the input and output layers, there are a set of non-linear hidden layers. The advantages of a neural network are it's ability to learn non-linear models and perform learning in real-time. However, a NN can suffer from different validation accuracy induced by random weight initialization, has a large number of hyper parameters which require tunning and is sensitive to feature scaling. \n\nThe neural_network function below makes use of inbuilt MLPClassifier, which implements a multi-layer perceptron (MLP) algorithm that trains using Backpropagation.\n\nAs MLPs are sensitive to feature scaling, the data is scaled using the built-in StandardScaler for standardization. The same scaling s applied to the test set for meaningful results.\n\nMost of the MLPClassifier's parameters where left to random. However, several were modifed in order to enhance performance. Firstly, the solver was set to _adam_, which reffers to a stochastic gradient-based optimizer, the alpha regularization parameter was set to 1e-5, the number of hidden layers was set to 2, each with 70 neurons (numbers determined through experimentation throughout the day), and the max_iterations was set to 1500.",
"_____no_output_____"
]
],
[
[
"def neural_network(X_train, X_test, y_train, y_test):\n \n '''\n This function takes in the input datasets, creates a neural network, trains and then tests it.\n \n Written by AndreiRoibu\n \n Args:\n X_train (ndarray): 2D array of input dataset used for training\n X_test (ndarray): 2D array of input dataset used for testing\n y_train (ndarray): 1D array of train labels \n y_test (ndarray): 1D array of test labels \n \n Returns:\n predicted_train (ndarray): 1D array of model-predicted labels for the train dataset \n predicted_test (ndarray): 1D array of model-predicted labels for the test dataset\n \n '''\n \n scaler = StandardScaler()\n scaler.fit(X_train)\n X_train = scaler.transform(X_train)\n X_test = scaler.transform(X_test)\n \n classifier = MLPClassifier(solver='adam', alpha=1e-5, hidden_layer_sizes=(70,70), random_state=1, max_iter=1500)\n \n classifier.fit(X_train, y_train)\n \n predicted_train = classifier.predict(X_train)\n predicted_test = classifier.predict(X_test)\n y_score = classifier.predict_proba(X_test)[:,1]\n return predicted_train, predicted_test, y_score",
"_____no_output_____"
],
[
"model_evaluation(neural_network, X_train, X_test, y_train, y_test, title='Neural Network')",
"For the Neural Network classifier:\nTraining set score: 1.000000\nTraining log-loss: 0.000000\nTraining set confusion matrix:\n[[41140 0]\n [ 0 477]]\nTraining set classification report:\n precision recall f1-score support\n\n 0.0 1.00 1.00 1.00 41140\n 1.0 1.00 1.00 1.00 477\n\n accuracy 1.00 41617\n macro avg 1.00 1.00 1.00 41617\nweighted avg 1.00 1.00 1.00 41617\n\nTest set score: 0.998654\nTest log-loss: 0.046472\nTest set confusion matrix:\n[[10287 3]\n [ 11 104]]\nTest set classification report:\n precision recall f1-score support\n\n 0.0 1.00 1.00 1.00 10290\n 1.0 0.97 0.90 0.94 115\n\n accuracy 1.00 10405\n macro avg 0.99 0.95 0.97 10405\nweighted avg 1.00 1.00 1.00 10405\n\n"
],
[
"multiple_model_evaluation(neural_network, X, y, title='Neural Network')",
"The average test set score for Neural Network is: 0.9991542527630948\nThe average test log-loss for Neural Network is: 0.026555752998710852\nThe average AUC for Neural Network is: 0.998776355262602\n"
],
[
"",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
]
] |
ec8ef639de3ea6a9bd79cf73dec6878cbb73d3c1 | 2,484 | ipynb | Jupyter Notebook | ipython_notebooks/regression line.ipynb | genkioffice/prettyplotlib | aa964ff777e60d26f078d8ace386936bf41cbd15 | [
"MIT"
] | 960 | 2015-01-04T02:25:42.000Z | 2022-03-31T00:33:20.000Z | ipython_notebooks/regression line.ipynb | Gonaco/prettyplotlib | aa964ff777e60d26f078d8ace386936bf41cbd15 | [
"MIT"
] | 10 | 2015-03-24T21:43:22.000Z | 2018-04-03T06:47:08.000Z | ipython_notebooks/regression line.ipynb | Gonaco/prettyplotlib | aa964ff777e60d26f078d8ace386936bf41cbd15 | [
"MIT"
] | 117 | 2015-01-04T10:41:43.000Z | 2021-11-05T12:51:36.000Z | 29.223529 | 111 | 0.501208 | [
[
[
"empty"
]
]
] | [
"empty"
] | [
[
"empty"
]
] |
ec8efcab0035e7aca61318d2dc48711e5f1009a6 | 7,784 | ipynb | Jupyter Notebook | tests/data/naics/create_alembic_import_data_from_naics_csvs.ipynb | saravanpa-aot/lear | 349595098a15f441ba05214cf16ea861eac75606 | [
"Apache-2.0"
] | null | null | null | tests/data/naics/create_alembic_import_data_from_naics_csvs.ipynb | saravanpa-aot/lear | 349595098a15f441ba05214cf16ea861eac75606 | [
"Apache-2.0"
] | null | null | null | tests/data/naics/create_alembic_import_data_from_naics_csvs.ipynb | saravanpa-aot/lear | 349595098a15f441ba05214cf16ea861eac75606 | [
"Apache-2.0"
] | null | null | null | 36.373832 | 327 | 0.561665 | [
[
[
"# Create Bulk Import Output for Alembic Scripts from StatsCan NAICS Data\nThis notebook contains code snippets to generate output for alembic scripts.\n\nThe output generation by default generates data for production purposes. But if `generate_subset_of_naics_data` is set to `True`, a subset of the codes can be generated for test purposes. This option was added as unit tests were taking too long to load the production set of naics structures/codes and naics elements.\n",
"_____no_output_____"
]
],
[
[
"%run /workspaces/lear/tests/data/common/naics_utils.ipynb\n\nimport os\nimport json\nimport csv\nimport chardet\nimport uuid",
"_____no_output_____"
],
[
"naics_structure_filename = '../data/naics/naics-scian-2017-structure-v3-eng.csv'\nnaics_element_filename = '../data/naics/naics-scian-2017-element-v3-eng.csv'\nnaics_year = 2017\nnaics_version = 3\n# set to generate_subset_of_naics_data to True if need to generate a subset of NAICS data by filtering on codes.\n# This is needed so that lear services/jobs/apis are able to apply alembic migrations scripts quickly when running unit tests.\n# Leave as False if all NAICS data needs to be generated.\ngenerate_subset_of_naics_data = False\n# only used if generate_subset_of_naics_data is set to True to filter on which codes to generate output for\n# for example, codes values of ['112320', '311351', '311911', '311920', '327410', '333248', '335223', '413130', '413190'] contain data relevant to the search term 'roast'\ncodes_to_add = ['112320', '311351', '311911', '311920', '327410', '333248', '335223', '413130', '413190']",
"_____no_output_____"
]
],
[
[
"### Create dict from NAICS structure data",
"_____no_output_____"
]
],
[
[
"char_encoding_result = None\nwith open(naics_structure_filename, 'rb') as rawdata:\n char_encoding_result = chardet.detect(rawdata.read(100000))\nchar_encoding_result\nassert char_encoding_result\nencoding = char_encoding_result['encoding']\nassert encoding\nencoding",
"_____no_output_____"
],
[
"structure_file = open(naics_structure_filename, encoding=encoding)\ncsvreader = csv.reader(structure_file)\nheader = []\nheader = next(csvreader)\nheader",
"_____no_output_____"
],
[
"with open(naics_structure_filename, newline='', encoding=encoding) as csvfile:\n # map custom field names that match database field names\n field_names = ['level', 'hierarchical_structure', 'code', 'class_title', 'superscript', 'class_definition']\n reader = csv.DictReader(csvfile, fieldnames=field_names)\n # The line will skip the first row of the csv file (Header row)\n next(reader)\n structure_dict_arr = []\n\n try:\n if generate_subset_of_naics_data:\n for row in reader:\n code = row['code']\n if code in codes_to_add:\n # print(f'code is match: {code}')\n # add custom properties to data row\n row['year'] = naics_year\n row['version'] = naics_version\n row['naics_key'] = str(uuid.uuid4())\n structure_dict_arr.append(row)\n print(structure_dict_arr)\n else:\n for row in reader:\n # add custom properties to data row\n row['year'] = naics_year\n row['version'] = naics_version\n row['naics_key'] = str(uuid.uuid4())\n structure_dict_arr.append(row)\n print(structure_dict_arr)\n except csv.Error as e:\n sys.exit('file {}, line {}: {}'.format(filename, reader.line_num, e))\n",
"_____no_output_____"
]
],
[
[
"### Create dict from NAICS element data",
"_____no_output_____"
]
],
[
[
"structure_file = open(naics_element_filename)\ncsvreader = csv.reader(structure_file)\nheader = []\nheader = next(csvreader)\nheader",
"_____no_output_____"
],
[
"# note in order for this to work, need to update ~/.jupyter/jupyter_notebook_config.py by adding c.NotebookApp.iopub_data_rate_limit = 10000000\n# if not done, an error message is thrown indicating streaming data rate exceeded\nwith open(naics_element_filename, newline='') as csvfile:\n # map custom field names that match database field names\n field_names = ['level', 'code', 'class_title', 'element_type_label', 'element_description']\n reader = csv.DictReader(csvfile, fieldnames=field_names)\n # The line will skip the first row of the csv file (Header row)\n next(reader)\n element_dict_arr = []\n\n try:\n if generate_subset_of_naics_data:\n for row in reader:\n code = row['code']\n if code in codes_to_add:\n # print(f'code is match: {code}')\n # add custom properties to data row\n row['year'] = naics_year\n row['version'] = naics_version\n row['element_type'] = get_element_type_from_label(row['element_type_label'])\n del row['element_type_label']\n element_dict_arr.append(row)\n print(element_dict_arr)\n else:\n for row in reader:\n # add custom properties to data row\n row['year'] = naics_year\n row['version'] = naics_version\n row['element_type'] = get_element_type_from_label(row['element_type_label'])\n del row['element_type_label']\n element_dict_arr.append(row)\n print(element_dict_arr)\n except csv.Error as e:\n sys.exit('file {}, line {}: {}'.format(filename, reader.line_num, e))\n",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
ec8f003cadb8c4deb9f558afdca74f70b24bba58 | 273,120 | ipynb | Jupyter Notebook | lessons/07_Step_5.ipynb | iafleischer/CFDPython | 02e1959e483b4503e85ccfe1f4fdb39e9b1601f8 | [
"CC-BY-3.0"
] | null | null | null | lessons/07_Step_5.ipynb | iafleischer/CFDPython | 02e1959e483b4503e85ccfe1f4fdb39e9b1601f8 | [
"CC-BY-3.0"
] | null | null | null | lessons/07_Step_5.ipynb | iafleischer/CFDPython | 02e1959e483b4503e85ccfe1f4fdb39e9b1601f8 | [
"CC-BY-3.0"
] | 1 | 2021-05-01T13:45:12.000Z | 2021-05-01T13:45:12.000Z | 529.302326 | 85,018 | 0.928808 | [
[
[
"Text provided under a Creative Commons Attribution license, CC-BY. All code is made available under the FSF-approved MIT license. (c) Lorena A. Barba, Gilbert F. Forsyth 2015. Thanks to NSF for support via CAREER award #1149784.",
"_____no_output_____"
]
],
[
[
"[@LorenaABarba](https://twitter.com/LorenaABarba)",
"_____no_output_____"
],
[
"12 steps to Navier-Stokes\n=====\n***",
"_____no_output_____"
],
[
"Up to now, all of our work has been in one spatial dimension (Steps [1](./01_Step_1.ipynb) to [4](./05_Step_4.ipynb)). We can learn a lot in just 1D, but let's grow up to flatland: two dimensions. \n\nIn the following exercises, you will extend the first four steps to 2D. To extend the 1D finite-difference formulas to partial derivatives in 2D or 3D, just apply the definition: a partial derivative with respect to $x$ is the variation in the $x$ direction *at constant* $y$.\n\nIn 2D space, a rectangular (uniform) grid is defined by the points with coordinates:\n\n$$x_i = x_0 +i \\Delta x$$\n\n$$y_i = y_0 +i \\Delta y$$\n\nNow, define $u_{i,j} = u(x_i,y_j)$ and apply the finite-difference formulas on either variable $x,y$ *acting separately* on the $i$ and $j$ indices. All derivatives are based on the 2D Taylor expansion of a mesh point value around $u_{i,j}$.\n\nHence, for a first-order partial derivative in the $x$-direction, a finite-difference formula is:\n\n$$ \\frac{\\partial u}{\\partial x}\\biggr\\rvert_{i,j} = \\frac{u_{i+1,j}-u_{i,j}}{\\Delta x}+\\mathcal{O}(\\Delta x)$$\n\nand similarly in the $y$ direction. Thus, we can write backward-difference, forward-difference or central-difference formulas for Steps 5 to 12. Let's get started!",
"_____no_output_____"
],
[
"Step 5: 2-D Linear Convection\n----\n***",
"_____no_output_____"
],
[
"The PDE governing 2-D Linear Convection is written as\n\n$$\\frac{\\partial u}{\\partial t}+c\\frac{\\partial u}{\\partial x} + c\\frac{\\partial u}{\\partial y} = 0$$\n\nThis is the exact same form as with 1-D Linear Convection, except that we now have two spatial dimensions to account for as we step forward in time. \n\nAgain, the timestep will be discretized as a forward difference and both spatial steps will be discretized as backward differences. \n\nWith 1-D implementations, we used $i$ subscripts to denote movement in space (e.g. $u_{i}^n-u_{i-1}^n$). Now that we have two dimensions to account for, we need to add a second subscript, $j$, to account for all the information in the regime. \n\nHere, we'll again use $i$ as the index for our $x$ values, and we'll add the $j$ subscript to track our $y$ values. ",
"_____no_output_____"
],
[
"With that in mind, our discretization of the PDE should be relatively straightforward. \n\n$$\\frac{u_{i,j}^{n+1}-u_{i,j}^n}{\\Delta t} + c\\frac{u_{i, j}^n-u_{i-1,j}^n}{\\Delta x} + c\\frac{u_{i,j}^n-u_{i,j-1}^n}{\\Delta y}=0$$\n\nAs before, solve for the only unknown:\n\n$$u_{i,j}^{n+1} = u_{i,j}^n-c \\frac{\\Delta t}{\\Delta x}(u_{i,j}^n-u_{i-1,j}^n)-c \\frac{\\Delta t}{\\Delta y}(u_{i,j}^n-u_{i,j-1}^n)$$\n\nWe will solve this equation with the following initial conditions:\n\n$$u(x,y) = \\begin{cases}\n\\begin{matrix}\n2\\ \\text{for} & 0.5 \\leq x, y \\leq 1 \\cr\n1\\ \\text{for} & \\text{everywhere else}\\end{matrix}\\end{cases}$$\n\nand boundary conditions:\n\n$$u = 1\\ \\text{for } \\begin{cases}\n\\begin{matrix}\nx = 0,\\ 2 \\cr\ny = 0,\\ 2 \\end{matrix}\\end{cases}$$",
"_____no_output_____"
]
],
[
[
"from mpl_toolkits.mplot3d import Axes3D ##New Library required for projected 3d plots\n\nimport numpy\nfrom matplotlib import pyplot, cm\n%matplotlib inline\n\n###variable declarations\nnx = 81\nny = 81\nnt = 100\nc = 1\ndx = 2 / (nx - 1)\ndy = 2 / (ny - 1)\nsigma = .2\ndt = sigma * dx\n\nx = numpy.linspace(0, 2, nx)\ny = numpy.linspace(0, 2, ny)\n\nu = numpy.ones((ny, nx)) ##create a 1xn vector of 1's\nun = numpy.ones((ny, nx)) ##\n\n###Assign initial conditions\n\n##set hat function I.C. : u(.5<=x<=1 && .5<=y<=1 ) is 2\nu[int(.5 / dy):int(1 / dy + 1),int(.5 / dx):int(1 / dx + 1)] = 2 \n\n###Plot Initial Condition\n##the figsize parameter can be used to produce different sized images\nfig = pyplot.figure(figsize=(11, 7), dpi=100)\nax = fig.gca(projection='3d') \nX, Y = numpy.meshgrid(x, y) \nsurf = ax.plot_surface(X, Y, u[:], cmap=cm.viridis)\n\n",
"_____no_output_____"
]
],
[
[
"### 3D Plotting Notes",
"_____no_output_____"
],
[
"To plot a projected 3D result, make sure that you have added the Axes3D library. ",
"_____no_output_____"
],
[
" from mpl_toolkits.mplot3d import Axes3D",
"_____no_output_____"
],
[
"The actual plotting commands are a little more involved than with simple 2d plots.",
"_____no_output_____"
],
[
"```python\nfig = pyplot.figure(figsize=(11, 7), dpi=100)\nax = fig.gca(projection='3d')\nsurf2 = ax.plot_surface(X, Y, u[:])\n```",
"_____no_output_____"
],
[
"The first line here is initializing a figure window. The **figsize** and **dpi** commands are optional and simply specify the size and resolution of the figure being produced. You may omit them, but you will still require the \n \n fig = pyplot.figure()\n\nThe next line assigns the plot window the axes label 'ax' and also specifies that it will be a 3d projection plot. The final line uses the command\n \n plot_surface()\n\nwhich is equivalent to the regular plot command, but it takes a grid of X and Y values for the data point positions. \n",
"_____no_output_____"
],
[
"##### Note\n\n\nThe `X` and `Y` values that you pass to `plot_surface` are not the 1-D vectors `x` and `y`. In order to use matplotlibs 3D plotting functions, you need to generate a grid of `x, y` values which correspond to each coordinate in the plotting frame. This coordinate grid is generated using the numpy function `meshgrid`.\n\n X, Y = numpy.meshgrid(x, y)\n\n ",
"_____no_output_____"
],
[
"### Iterating in two dimensions",
"_____no_output_____"
],
[
"To evaluate the wave in two dimensions requires the use of several nested for-loops to cover all of the `i`'s and `j`'s. Since Python is not a compiled language there can be noticeable slowdowns in the execution of code with multiple for-loops. First try evaluating the 2D convection code and see what results it produces. ",
"_____no_output_____"
]
],
[
[
"u = numpy.ones((ny, nx))\nu[int(.5 / dy):int(1 / dy + 1), int(.5 / dx):int(1 / dx + 1)] = 2\n\nfor n in range(nt + 1): ##loop across number of time steps\n un = u.copy()\n row, col = u.shape\n for j in range(1, row):\n for i in range(1, col):\n u[j, i] = (un[j, i] - (c * dt / dx * (un[j, i] - un[j, i - 1])) -\n (c * dt / dy * (un[j, i] - un[j - 1, i])))\n u[0, :] = 1\n u[-1, :] = 1\n u[:, 0] = 1\n u[:, -1] = 1\n\nfig = pyplot.figure(figsize=(11, 7), dpi=100)\nax = fig.gca(projection='3d')\nsurf2 = ax.plot_surface(X, Y, u[:], cmap=cm.viridis)",
"_____no_output_____"
]
],
[
[
"Array Operations\n----------------\n\nHere the same 2D convection code is implemented, but instead of using nested for-loops, the same calculations are evaluated using array operations. ",
"_____no_output_____"
]
],
[
[
"u = numpy.ones((ny, nx))\nu[int(.5 / dy):int(1 / dy + 1), int(.5 / dx):int(1 / dx + 1)] = 2\n\nfor n in range(nt + 1): ##loop across number of time steps\n un = u.copy()\n u[1:, 1:] = (un[1:, 1:] - (c * dt / dx * (un[1:, 1:] - un[1:, :-1])) -\n (c * dt / dy * (un[1:, 1:] - un[:-1, 1:])))\n u[0, :] = 1\n u[-1, :] = 1\n u[:, 0] = 1\n u[:, -1] = 1\n\nfig = pyplot.figure(figsize=(11, 7), dpi=100)\nax = fig.gca(projection='3d')\nsurf2 = ax.plot_surface(X, Y, u[:], cmap=cm.viridis)\n\n ",
"_____no_output_____"
]
],
[
[
"## Learn More",
"_____no_output_____"
],
[
"The video lesson that walks you through the details for Step 5 (and onwards to Step 8) is **Video Lesson 6** on You Tube:",
"_____no_output_____"
]
],
[
[
"from IPython.display import YouTubeVideo\nYouTubeVideo('tUg_dE3NXoY')",
"_____no_output_____"
],
[
"from IPython.core.display import HTML\ndef css_styling():\n styles = open(\"../styles/custom.css\", \"r\").read()\n return HTML(styles)\ncss_styling()",
"_____no_output_____"
]
],
[
[
"> (The cell above executes the style for this notebook.)",
"_____no_output_____"
]
]
] | [
"raw",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"raw"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
]
] |
ec8f02ef3046047adecf3948d320361d3e478fa7 | 83,135 | ipynb | Jupyter Notebook | Assignment_9_Plotting_Vector_using_NumPy_and_MatPlotLib.ipynb | emmataguinod/Linear-Algebra_2ndSem | e64f936e5b002cb23346098435f7861a14644555 | [
"Apache-2.0"
] | null | null | null | Assignment_9_Plotting_Vector_using_NumPy_and_MatPlotLib.ipynb | emmataguinod/Linear-Algebra_2ndSem | e64f936e5b002cb23346098435f7861a14644555 | [
"Apache-2.0"
] | null | null | null | Assignment_9_Plotting_Vector_using_NumPy_and_MatPlotLib.ipynb | emmataguinod/Linear-Algebra_2ndSem | e64f936e5b002cb23346098435f7861a14644555 | [
"Apache-2.0"
] | null | null | null | 82.886341 | 18,345 | 0.795116 | [
[
[
"<a href=\"https://colab.research.google.com/github/emmataguinod/Linear-Algebra_2ndSem/blob/main/Assignment_9_Plotting_Vector_using_NumPy_and_MatPlotLib.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
],
[
"#Assignment 9 - Plotting Vector using NumPy and MatPlotLib",
"_____no_output_____"
],
[
"In this laboratory we will be discussing the basics of numerical and scientific programming by working with Vectors using NumPy and MatPlotLib.",
"_____no_output_____"
],
[
"### Objectives\nAt the end of this activity you will be able to:\n1. Be familiar with the libraries in Python for numerical and scientific programming.\n2. Visualize vectors through Python programming.\n3. Perform simple vector operations through code.",
"_____no_output_____"
],
[
"## Discussion",
"_____no_output_____"
],
[
"### NumPy",
"_____no_output_____"
],
[
"NumPy stands for Numerical Python and is a Python package for scientific computing. It requires a practical multi-dimensional array of objects and a variety of functions for working with them.",
"_____no_output_____"
],
[
"Scalars are quantities that can be defined entirely by a single magnitude (or numerical value).\n\nVectors are quantities with a magnitude and a direction that are completely specified.",
"_____no_output_____"
],
[
"#### Representing Vectors",
"_____no_output_____"
],
[
"$$ A = 7\\hat{x} + 6\\hat{y} \\\\\nB = 9\\hat{x} - 3\\hat{y}\\\\\nC = 7\\hat{i} + 8\\hat{j} - 4\\hat{k} \\\\\nD = 6\\hat{i} - 7\\hat{j} + 5\\hat{k}$$",
"_____no_output_____"
],
[
"In which it's matrix equivalent is:",
"_____no_output_____"
],
[
"$$ A = \\begin{bmatrix} 7 \\\\ 6\\end{bmatrix} , B = \\begin{bmatrix} 9 \\\\ -3\\end{bmatrix} , C = \\begin{bmatrix} 7 \\\\ 8 \\\\ -4 \\end{bmatrix}, D = \\begin{bmatrix} 6 \\\\ -7 \\\\ 5\\end{bmatrix}\n$$\n$$ A = \\begin{bmatrix} 7 & 6\\end{bmatrix} , B = \\begin{bmatrix} 9 & -3\\end{bmatrix} , C = \\begin{bmatrix} 7 & 8 & -4\\end{bmatrix} , D = \\begin{bmatrix} 6 & -7 & 5\\end{bmatrix} \n$$",
"_____no_output_____"
],
[
"We can then start doing numpy code with this by:",
"_____no_output_____"
]
],
[
[
"import numpy as np",
"_____no_output_____"
],
[
"A = np.array([7, 6])\nB = np.array([9, -3])\nC = np.array([\n [7],\n [8],\n [-4]\n])\nD = np.array ([[6],\n [-7],\n [5]])\nprint('Vector A is ', A)\nprint('Vector B is ', B)\nprint('Vector C is ', C)\nprint('Vector D is ', D)",
"Vector A is [7 6]\nVector B is [ 9 -3]\nVector C is [[ 7]\n [ 8]\n [-4]]\nVector D is [[ 6]\n [-7]\n [ 5]]\n"
]
],
[
[
"#### Describing vectors in NumPy",
"_____no_output_____"
],
[
"If we intend to execute basic to complex operations using vectors, we must first define them. Knowing the shape, size, and dimensions of vectors are the most basic ways to describe them.",
"_____no_output_____"
]
],
[
[
"### Checking shapes\n### Shapes tells us how many elements are there on each row and column\n\nA.shape\nL = np.array([4, 9, -8, 6, -2.1, 7])\nL.shape\n",
"_____no_output_____"
],
[
"### Checking size\n### Array/Vector sizes tells us many total number of elements are there in the vector\n\nL.size",
"_____no_output_____"
],
[
"### Checking dimensions\n### The dimensions or rank of a vector tells us how many dimensions are there for the vector.\nW = np.array([[[4, 9, -8, 6, -2.1, 7]]])\nW.ndim",
"_____no_output_____"
]
],
[
[
"#### Addition",
"_____no_output_____"
],
[
"The addition rule is straightforward: we simply add the matrices' elements according to their index. In this situation, combining vectors $C$ and $D$ yields the following vector:",
"_____no_output_____"
],
[
"$$R = 13\\hat{i}+\\hat{j}+\\hat{k} \\\\ \\\\or \\\\ \\\\ R = \\begin{bmatrix} 13 & 1 & 1\\end{bmatrix} \n$$",
"_____no_output_____"
]
],
[
[
"R = np.add(C, D) \nP = np.add(A, B)",
"_____no_output_____"
],
[
"R = C + D\n\nR",
"_____no_output_____"
],
[
"P = A + B \n\nP",
"_____no_output_____"
],
[
"pos1 = np.array([1,2,3])\npos2 = np.array([3,-10,13])\npos3 = np.array([2,-5,7])\npos4 = np.array([4,-0.3,4])\nR = pos1 + pos2 + pos3 + pos4\n#R = np.multiply(pos3, pos4)\n#R = pos2/ pos4\nR",
"_____no_output_____"
],
[
"pos1 = np.array([1,2,3])\npos2 = np.array([3,-10,13])\npos3 = np.array([2,-5,7])\npos4 = np.array([4,-0.3,4])\n#R = pos1 + pos2 + pos3 + pos4\nR = np.multiply(pos3, pos4)\n#R = pos2 / pos4\nR",
"_____no_output_____"
],
[
"pos1 = np.array([1,2,3])\npos2 = np.array([3,-10,13])\npos3 = np.array([2,-5,7])\npos4 = np.array([4,-0.3,4])\n#R = pos1 + pos2 + pos3 + pos4\n#R = np.multiply(pos1, pos4)\nR = pos2 / pos4\nR",
"_____no_output_____"
]
],
[
[
"##### Try for yourself!",
"_____no_output_____"
],
[
"Try to implement subtraction, multiplication, and division with vectors $A$ and $B$!",
"_____no_output_____"
]
],
[
[
"### Subtraction\nJ = np.subtract (A,B)\n\nJ",
"_____no_output_____"
],
[
"### Multiplication\nO = np.multiply (A,B)\n\nO",
"_____no_output_____"
],
[
"### Division\nN = np.divide (A,B)\n\nN",
"_____no_output_____"
]
],
[
[
"### Scaling",
"_____no_output_____"
],
[
"Scaling, also known as scalar multiplication, involves multiplying a scalar value by a vector. Consider the following case:",
"_____no_output_____"
],
[
"$$S = 7 \\cdot B$$",
"_____no_output_____"
],
[
"We can do this in numpy through:\n",
"_____no_output_____"
]
],
[
[
"#S = 7 * B\nS = np.multiply(7,B)\nS",
"_____no_output_____"
]
],
[
[
"Try to implement scaling with two vectors.",
"_____no_output_____"
]
],
[
[
"###Scaling with two vectors\n#S = 4 * A\n#S = 4 * B\nA = np.array ([7,4])\nB = np.array ([10,7])\nS = np.multiply (4,A)\nS",
"_____no_output_____"
],
[
"###Scaling with two vectors\n#S = 4 * A\n#S = 4 * B\nA = np.array ([7,4])\nB = np.array ([10,7])\nS = np.multiply (4,B)\nS",
"_____no_output_____"
]
],
[
[
"### MatPlotLib",
"_____no_output_____"
],
[
"MatPlotLib, also known as the MATLab Plotting Library, is Python's version of MATLab's plotting capability. MatPlotLib has a wide range of applications, from grabbing numbers to showing data in several dimensions.",
"_____no_output_____"
],
[
"#### Visualizing Data",
"_____no_output_____"
]
],
[
[
"import matplotlib.pyplot as plt\nimport matplotlib\n%matplotlib inline",
"_____no_output_____"
],
[
"A = [7, 4]\nB = [-4, 3]\nplt.scatter(A[0], A[1], label='A', c='blue')\nplt.scatter(B[0], B[1], label='B', c='red')\nplt.grid()\nplt.legend()\nplt.show()",
"_____no_output_____"
],
[
"A = np.array([2, -3])\nB = np.array([2, 4])\nR = A + B\nMagnitude = np.sqrt(np.sum(R**2))\nplt.title(\"Resultant Vector\\nMagnitude:{}\" .format(Magnitude))\nplt.xlim(-4, 4)\nplt.ylim(-4, 4)\nplt.quiver(0, 0, A[0], A[1], angles='xy', scale_units='xy', scale=1, color='red')\nplt.quiver(A[0], A[1], B[0], B[1], angles='xy', scale_units='xy', scale=1, color='green')\nplt.quiver(0, 0, R[0], R[1], angles='xy', scale_units='xy', scale=1, color='black')\nplt.grid()\nplt.show()\nprint(R)\nprint(Magnitude)\nSlope = R[1]/R[0]\nprint(Slope)\nAngle = (np.arctan(Slope))*(180/np.pi)\nprint(Angle)",
"_____no_output_____"
]
],
[
[
"Try plotting Three Vectors and show the Resultant Vector as a result. Use Head to Tail Method.",
"_____no_output_____"
]
],
[
[
"### 2x2 vector\nA = np.array([1.5, -2])\nB = np.array([1.5, 4])\nR = A + B\nMagnitude = np.sqrt(np.sum(R**2))\nplt.title(\"Resultant Vector\\nMagnitude:{}\" .format(Magnitude))\nplt.xlim(-4, 4)\nplt.ylim(-4, 4)\nplt.quiver(0, 0, A[0], A[1], angles='xy', scale_units='xy', scale=1, color='purple')\nplt.quiver(A[0], A[1], B[0], B[1], angles='xy', scale_units='xy', scale=1, color='blue')\nplt.quiver(0, 0, R[0], R[1], angles='xy', scale_units='xy', scale=1, color='grey')\nplt.grid()\nplt.show()\nprint(R)\nprint(Magnitude)\nSlope = R[1]/R[0]\nprint(Slope)\nAngle = (np.arctan(Slope))*(180/np.pi)\nprint(Angle)",
"_____no_output_____"
],
[
"####Three vectors\nsheesh = np.array([8, 12])\nyikes = np.array([12, 8])\nmeh = sheesh + yikes\nnah = np.array([-15, 15])\nshoot = nah + sheesh + yikes\nmagnitude = np.sqrt(np.sum(shoot**2))\nplt.title(\"Resultant Vector\\nMagnitude: {} \\n Resultant: {}\".format(magnitude, shoot))\nplt.xlim(-10, 40)\nplt.ylim(-10, 40)\nplt.quiver(0, 0, sheesh[0], sheesh[1], angles='xy', scale_units='xy', scale=1, color='green')\nplt.quiver(sheesh[0], sheesh[1], yikes[0], yikes[1], angles='xy', scale_units='xy', scale=1, color='blue')\nplt.quiver(meh[0], meh[1], nah[0], nah[1], angles='xy', scale_units='xy', scale=1, color='purple')\nplt.quiver(0, 0, shoot[0], shoot[1], angles='xy', scale_units='xy', scale=1, color='red')\nplt.grid()\nplt.show()\nSlope = R[1]/R[0]\nprint(Slope)\nAngle = (np.arctan(Slope))*(180/np.pi)\nprint(Angle)",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
ec8f2aca9af6cc836cf3b98dfa3cbb6a210cbc31 | 73,204 | ipynb | Jupyter Notebook | code/smote_ipf/smote_ipf.ipynb | vs666/SMAI-Project | bf910e6417f861f302a1f3bed49dfcd7f6f7e695 | [
"MIT"
] | null | null | null | code/smote_ipf/smote_ipf.ipynb | vs666/SMAI-Project | bf910e6417f861f302a1f3bed49dfcd7f6f7e695 | [
"MIT"
] | null | null | null | code/smote_ipf/smote_ipf.ipynb | vs666/SMAI-Project | bf910e6417f861f302a1f3bed49dfcd7f6f7e695 | [
"MIT"
] | 1 | 2021-12-13T05:19:59.000Z | 2021-12-13T05:19:59.000Z | 228.7625 | 40,562 | 0.916685 | [
[
[
"# SMOTE IPF Implementation to oversample any dataset\n\nThe following code can be used to make create oversampled dataset using SMOTE-IPF. It also shows various details of the dataset created like the number of new samples in each class and shows a diagram showing the changes from the original dataset side by side.",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport matplotlib.pyplot as plt\nimport smote_variants as sv\nimport imbalanced_databases as imbd",
"_____no_output_____"
]
],
[
[
"Loading the IRIS dataset with binary classification. IRIS can be replaced with any dataset here.",
"_____no_output_____"
]
],
[
[
"dataset= imbd.load_iris0()\nX, y= dataset['data'], dataset['target']",
"_____no_output_____"
]
],
[
[
"## Illustrating the imbalance in the current dataset using a scatter plot",
"_____no_output_____"
],
[
"Finding the Majority class and the distribution of the dataset",
"_____no_output_____"
]
],
[
[
"for i in np.unique(y):\n print(\"class %d - samples: %d\" % (i, np.sum(y == i)))",
"class 0 - samples: 100\nclass 1 - samples: 50\n"
]
],
[
[
"We find that class 0 is the majority class. Using that information and making a scatter plot of the dataset.",
"_____no_output_____"
]
],
[
[
"plt.figure(figsize=(10, 10))\nplt.scatter(X[y == 0][:,0], X[y == 0][:,1], label='Majority class', c='blue')\nplt.scatter(X[y == 1][:,0], X[y == 1][:,1], label='Minority class', c='olive')\nplt.title('Original dataset')\nplt.xlabel('Coordinate 0')\nplt.ylabel('Coordinate 1')\nplt.legend()",
"_____no_output_____"
]
],
[
[
"## Oversampling using SMOTE_IPF\n\nWe use default paramaters with proportion `1.0`. This ensures that the oversampled dataset has equal number of samples. All the default parameters are \n```\n{\n 'proportion': 1.0, \n 'n_neighbors': 5, # number of neighbors in SMOTE sampling\n 'n_folds': 9, # the number of partitions\n 'k': 3, # used in stopping condition\n 'p': 0.01, # percentage value ([0,1]) used in stopping condition\n 'voting': 'majority', # 'majority'/'consensus'\n 'n_jobs': 1, # number of parallel jobs\n 'classifier': DecisionTreeClassifier(random_state=2), # classifier object\n 'random_state': None # initializer of random_state\n}\n```",
"_____no_output_____"
]
],
[
[
"oversampler= sv.SMOTE_IPF()\nX_samp, y_samp= oversampler.sample(X, y)",
"2021-11-20 23:19:15,970:INFO:SMOTE_IPF: Running sampling via ('SMOTE_IPF', \"{'proportion': 1.0, 'n_neighbors': 5, 'n_folds': 9, 'k': 3, 'p': 0.01, 'voting': 'majority', 'n_jobs': 1, 'classifier': DecisionTreeClassifier(random_state=2), 'random_state': None}\")\n2021-11-20 23:19:15,971:INFO:SMOTE: Running sampling via ('SMOTE', \"{'proportion': 1.0, 'n_neighbors': 5, 'n_jobs': 1, 'random_state': <module 'numpy.random' from '/home/nmc/.local/lib/python3.9/site-packages/numpy/random/__init__.py'>}\")\n2021-11-20 23:19:15,979:INFO:SMOTE_IPF: Removing 0 elements\n2021-11-20 23:19:15,985:INFO:SMOTE_IPF: Removing 0 elements\n2021-11-20 23:19:15,992:INFO:SMOTE_IPF: Removing 0 elements\n"
]
],
[
[
"`X_samp` contains oversampled `X` data and `y_samp` contains oversampled `y` data. Now let's look at the number of samples in each class.",
"_____no_output_____"
]
],
[
[
"print('Majority class: %d' % np.sum(y_samp == 0))\nprint('Minority class: %d' % np.sum(y_samp == 1))",
"Majority class: 100\nMinority class: 100\n"
]
],
[
[
"Each class has equal number of datapoints as we have described above. Now we filters newly sampled datapoints to plot them distinctly below.",
"_____no_output_____"
]
],
[
[
"X_samp, y_samp= X_samp[len(X):], y_samp[len(y):]",
"_____no_output_____"
]
],
[
[
"Printing the number of new samples",
"_____no_output_____"
]
],
[
[
"for i in np.unique(y_samp):\n print(\"class %d - samples: %d\" % (i, np.sum(y_samp == i)))",
"class 1 - samples: 50\n"
]
],
[
[
"Now using a scatter plot, we plot Original dataset vs Oversampled dataset, showing the new samples distinctly.",
"_____no_output_____"
]
],
[
[
"plt.figure(figsize=(20, 10))\nax= plt.subplot(121) # Plotting original\nplt.scatter(X[y == 0][:,0], X[y == 0][:,1], label='Majority class', c='blue', marker='o')\nplt.scatter(X[y == 1][:,0], X[y == 1][:,1], label='Minority class', c='olive', marker='o')\n# plt.scatter(X_samp[y_samp == 1][:,0], X_samp[y_samp == 1][:,1], label='New minority samples', c='olive', marker='x')\nplt.title('Original dataset')\nplt.xlabel('Coordinate 0')\nplt.ylabel('Coordinate 1')\n\n\nax= plt.subplot(122)\nplt.scatter(X[y == 0][:,0], X[y == 0][:,1], label='Majority class', c='blue', marker='o')\nplt.scatter(X[y == 1][:,0], X[y == 1][:,1], label='Minority class', c='olive', marker='o')\nplt.scatter(X_samp[y_samp == 1][:,0], X_samp[y_samp == 1][:,1], label='New minority samples', c='olive', marker='x')\nplt.title('Oversampled dataset')\nplt.xlabel('Coordinate 0')\nplt.ylabel('Coordinate 1')\nplt.legend()",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
ec8f3480f421ce41e65304a8c06bb16247f33513 | 7,019 | ipynb | Jupyter Notebook | Linked List/.ipynb_checkpoints/Linked_List_1-checkpoint.ipynb | jarvis-1805/DSAwithPYTHON | 872073d1b8d0001ea8b1a54b5e327dd0c1c406f2 | [
"Apache-2.0"
] | 1 | 2021-03-21T18:54:34.000Z | 2021-03-21T18:54:34.000Z | Linked List/Linked_List_1.ipynb | jarvis-1805/DSAwithPYTHON | 872073d1b8d0001ea8b1a54b5e327dd0c1c406f2 | [
"Apache-2.0"
] | null | null | null | Linked List/Linked_List_1.ipynb | jarvis-1805/DSAwithPYTHON | 872073d1b8d0001ea8b1a54b5e327dd0c1c406f2 | [
"Apache-2.0"
] | null | null | null | 20.227666 | 62 | 0.411027 | [
[
[
"# Linked List Node",
"_____no_output_____"
]
],
[
[
"class Node:\n \n def __init__(self, data):\n self.data = data\n self.next = None\n \na = Node(13)\nb = Node(15)\na.next = b\nprint(a.data)\nprint(b.data)\nprint(a.next.data)\nprint(a)\nprint(a.next)\nprint(b)\n# print(b.next.data) ERROR",
"13\n15\n15\n<__main__.Node object at 0x000002DC602AE5E0>\n<__main__.Node object at 0x000002DC602AE0A0>\n<__main__.Node object at 0x000002DC602AE0A0>\n"
]
],
[
[
"# Linked List Input",
"_____no_output_____"
]
],
[
[
"class Node:\n def __init__(self, data):\n self.data = data\n self.next = None\n\ndef take_input():\n inputList = [int(ele) for ele in input().split()]\n head = None\n tail = None\n for currData in inputList:\n if currData == -1:\n break\n \n newNode = Node(currData)\n if head is None:\n head = newNode\n tail = newNode\n else:\n tail.next = newNode\n tail = newNode\n \n return head\n\ndef printLL(head):\n while head is not None:\n print(str(head.data) + ' -> ', end = '')\n head = head.next\n print('None')\n \n return\n\nhead = take_input()\nprintLL(head)\nprintLL(head)",
"1 2 3 2 1\n1 -> 2 -> 3 -> 2 -> 1 -> None\n1 -> 2 -> 3 -> 2 -> 1 -> None\n"
]
],
[
[
"# Insert At ith Position - Iteratively",
"_____no_output_____"
]
],
[
[
"def length(head):\n ctr = 0\n while head is not None:\n ctr += 1\n head = head.next\n return ctr\n\ndef insert(head, i, key):\n if i<0 or i>length(head):\n return head\n \n curr = head\n prev = None\n ctr = 0\n while ctr < i:\n prev = curr\n curr = curr.next\n ctr += 1\n \n newNode = Node(key)\n if prev is not None:\n prev.next = newNode\n \n else:\n head = newNode\n newNode.next = curr\n \n return head\n\nprintLL(head)\nhead = insert(head, 2, 6)\nprintLL(head)",
"1 -> 2 -> 3 -> 4 -> 5 -> None\n1 -> 2 -> 6 -> 3 -> 4 -> 5 -> None\n"
],
[
"head = insert(head, 0, 9)\nprintLL(head)",
"9 -> 1 -> 2 -> 6 -> 3 -> 4 -> 5 -> None\n"
],
[
"head = insert(head, 7, 10)\nprintLL(head)",
"9 -> 1 -> 2 -> 6 -> 3 -> 4 -> 5 -> 10 -> None\n"
],
[
"head = insert(head, 9, 13)\nprintLL(head)",
"9 -> 1 -> 2 -> 6 -> 3 -> 4 -> 5 -> 10 -> None\n"
]
],
[
[
"# Insert At Ith Position - Recursively",
"_____no_output_____"
]
],
[
[
"def insertRec(head, i, data):\n if i<0:\n return head\n \n if i == 0:\n newNode = Node(data)\n newNode.next = head\n return newNode\n \n if head is None:\n return None\n \n smallHead = insertRec(head.next, i-1, data)\n head.next = smallHead\n return head\n\nprintLL(head)\nhead = insertRec(head, 2, 6)\nprintLL(head)",
"1 -> 2 -> 3 -> 4 -> 5 -> None\n1 -> 2 -> 6 -> 3 -> 4 -> 5 -> None\n"
],
[
"head = insertRec(head, 0, 9)\nprintLL(head)",
"9 -> 1 -> 2 -> 6 -> 3 -> 4 -> 5 -> None\n"
],
[
"head = insertRec(head, 7, 10)\nprintLL(head)",
"9 -> 1 -> 2 -> 6 -> 3 -> 4 -> 5 -> 10 -> None\n"
],
[
"head = insertRec(head, 9, 13)\nprintLL(head)",
"9 -> 1 -> 2 -> 6 -> 3 -> 4 -> 5 -> 10 -> None\n"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
]
] |
ec8f4e21052f21b56cd84bbdc6175a081c881deb | 10,668 | ipynb | Jupyter Notebook | examples/supercompressible/Supercompressible_example.ipynb | gawelk/F3DASM | 4a4e7233add608820de9ee0fd1c369c2fa1d24c1 | [
"BSD-3-Clause"
] | 45 | 2019-10-15T06:08:23.000Z | 2020-08-01T03:15:11.000Z | examples/supercompressible/Supercompressible_example.ipynb | gawelk/F3DASM | 4a4e7233add608820de9ee0fd1c369c2fa1d24c1 | [
"BSD-3-Clause"
] | 1 | 2020-02-28T10:35:41.000Z | 2020-03-09T13:19:54.000Z | examples/supercompressible/Supercompressible_example.ipynb | gawelk/F3DASM | 4a4e7233add608820de9ee0fd1c369c2fa1d24c1 | [
"BSD-3-Clause"
] | 10 | 2020-01-10T09:42:58.000Z | 2020-07-20T19:57:15.000Z | 32.229607 | 249 | 0.52231 | [
[
[
"import os\nimport pickle\nimport time\nimport shutil\nfrom f3dasm.doe.doevars import DoeVars\nfrom f3dasm.simulator.abaqus.utils import create_temp_dir\nfrom f3dasm.simulator.abaqus.steps import PreProc, RunJob, PostProc\nfrom f3dasm.simulator.abaqus.utils import clean_abaqus_dir",
"_____no_output_____"
]
],
[
[
"### Defining the simulation flow\n\nHere, we define a custom simulation, with pre-processing, run, and post-processing steps. In this case all 3 steps are executed with abaqus, but with due to structure of F3DASM any step could easily be replaced with any other software package.",
"_____no_output_____"
]
],
[
[
"class Simulation():\n def __init__(self, name, \n preproc_script = None, \n postproc_script = None,\n ): \n self.name = name\n self.preproc = PreProc(name = name, abq_script =preproc_script)\n self.job = RunJob(name)\n self.postproc = PostProc(name = name, abq_script = postproc_script)\n\n def write_configs(self, simdir, inputs = None):\n self.preproc.write_input_pkl(simdir = simdir, inputs = inputs )\n self.job.write_input_pkl(simdir = simdir)\n self.postproc.write_input_pkl(simdir = simdir)\n\n def execute(self, simdir, inputs):\n self.preproc.execute(simdir = simdir, inputs=inputs)\n self.job.execute(simdir = simdir)\n self.postproc.execute(simdir = simdir)\n\n def extract_results(self, simdir):\n file_name = self.name + '_postproc'\n file_name = os.path.join(simdir, file_name)\n with open(file_name, 'rb') as file:\n data = pickle.load(file, encoding='latin1')\n return data",
"_____no_output_____"
]
],
[
[
"Helper function extracting results from linear buckling analysis, necessary to define inputs to RIKS",
"_____no_output_____"
]
],
[
[
"def get_inputs_riks(inputs, sim_lin_bckl, i_doe_lin_buckle_path):\n data_lin_buckle = sim_lin_bckl.extract_results(simdir=i_doe_lin_buckle_path )\n inputs_riks = inputs.copy()\n inputs_riks['coilable'] = int(data_lin_buckle['post-processing']['coilable'])\n inputs_riks['lin_bckl_max_disp'] = data_lin_buckle['post-processing']['max_disps'][1]\n inputs_riks['lin_buckle_odb'] = sim_lin_bckl.name\n inputs_riks['imperfection'] = 0.001\n return inputs_riks",
"_____no_output_____"
]
],
[
[
"# Defining DoE",
"_____no_output_____"
]
],
[
[
"vars = {'ratio_d': 0.006, #[0.004, 0.073],\n 'ratio_pitch': [0.75, 0.9], #[.25, 1.5],\n 'ratio_top_diameter': 0.7, #[0., 0.8],\n 'n_longerons': 3, \n 'bottom_diameter': 100.,\n 'young_modulus': 3500.,\n 'shear_modulus': 1287.}\n\ndoe = DoeVars(vars)\nprint('DoEVars definition:')\nprint(doe)\n\nprint('\\n DoEVars summary information:')\nprint(doe.info())\n# Compute sampling and combinations\ndoe.do_sampling()\n\nprint('\\n Pandas dataframe with compbined-sampled values:')\nprint(doe.data)\ndoe_pd = doe.data\ndoe_list = doe_pd.index.values.tolist()",
"DoEVars definition:\nDoeVars(variables={'ratio_d': 0.006, 'ratio_pitch': [0.75, 0.9], 'ratio_top_diameter': 0.7, 'n_longerons': 3, 'bottom_diameter': 100.0, 'young_modulus': 3500.0, 'shear_modulus': 1287.0}, sampling_vars=[], data=None)\n\n DoEVars summary information:\n-----------------------------------------------------\n DOE VARIABLES \n-----------------------------------------------------\nratio_d : 0.006\nratio_pitch : [0.75, 0.9]\nratio_top_diameter : 0.7\nn_longerons : 3\nbottom_diameter : 100.0\nyoung_modulus : 3500.0\nshear_modulus : 1287.0\n\n\nNone\n\n Pandas dataframe with compbined-sampled values:\n ratio_d ratio_pitch ratio_top_diameter n_longerons bottom_diameter \\\n0 0.006 0.75 0.7 3.0 100.0 \n1 0.006 0.90 0.7 3.0 100.0 \n\n young_modulus shear_modulus \n0 3500.0 1287.0 \n1 3500.0 1287.0 \n"
]
],
[
[
"# Setting up simulations",
"_____no_output_____"
]
],
[
[
"sim_lb = Simulation(name = 'linear_buckle', \n preproc_script = 'abaqus_modules.supercompressible_fnc.lin_buckle', \n postproc_script = 'abaqus_modules.supercompressible_fnc.post_process_lin_buckle'\n )\n\nsim_riks = Simulation(name = 'riks', \n preproc_script = 'abaqus_modules.supercompressible_fnc.riks', \n postproc_script = 'abaqus_modules.supercompressible_fnc.post_process_riks'\n )\nsim_riks.job.config['job_info']['numCpus'] = 1\nsim_lb.job.config['job_info']['numCpus'] = 1",
"_____no_output_____"
]
],
[
[
"## Create necessary directories",
"_____no_output_____"
]
],
[
[
"example_name = 'example_1'\n\nif not os.path.exists(example_name):\n os.mkdir(example_name)\nanalysis_folder = os.path.join(example_name, 'analyses')\nos.mkdir(analysis_folder )\n\n\ntemp_dir_name = '_temp'\ncreate_temp_dir(temp_dir_name)\n\nsim_lb_path = os.path.join(analysis_folder, sim_lb.name )\nos.mkdir(sim_lb_path)\n\nsim_rx_path = os.path.join(analysis_folder, sim_riks.name )\nos.mkdir(sim_rx_path)",
"_____no_output_____"
]
],
[
[
"## Run the simulations",
"_____no_output_____"
]
],
[
[
"for i_doe in doe_list:\n\n #LINEAR BUCKLING\n i_doe_path = os.path.join(sim_lb_path, 'DoE_point%i' % i_doe)\n os.mkdir( i_doe_path)\n inputs = doe_pd.iloc[i_doe].to_dict()\n inputs['n_longerons'] = int(inputs['n_longerons'])\n sim_lb.execute(simdir=i_doe_path, inputs = inputs)\n\n print('Linear buckling, Doe ',i_doe, 'complete' )\n #RIKS \n inputs_riks = get_inputs_riks(inputs, sim_lb, i_doe_path) \n if inputs_riks['coilable']: \n i_doe_riks = os.path.join(sim_rx_path, 'DoE_point%i' % i_doe)\n os.mkdir( i_doe_riks)\n\n #Riks needs access to lin buckle odb file \n lb_odb = os.path.join(i_doe_path, sim_lb.name + '.odb')\n target = os.path.join(i_doe_riks, sim_lb.name + '.odb')\n shutil.copyfile(lb_odb, target, follow_symlinks=True)\n while not os.path.exists(target):\n print('copying odb')\n time.sleep(0.001)\n\n #with odb files we also need to pass prt file, in order \n # for odb to recognize the model instance\n lb_odb = os.path.join(i_doe_path, sim_lb.name + '.prt')\n target = os.path.join(i_doe_riks, sim_lb.name + '.prt')\n shutil.copyfile(lb_odb, target, follow_symlinks=True)\n while not os.path.exists(target):\n print('copying inp')\n time.sleep(0.001)\n\n\n sim_riks.write_configs(simdir = i_doe_riks, inputs = inputs_riks)\n sim_riks.execute(simdir = i_doe_riks, inputs = inputs_riks)\n print('RIKS, Doe ',i_doe, 'complete' )\n\n riks_data = sim_riks.extract_results(i_doe_riks)\n\n else:\n print('RIKS, Doe ',i_doe, 'failed' )\n print('DoE non-coilable')\n \n\n \n clean_abaqus_dir(ext2rem=('.abq', '.com', '.log', '.mdl', '.pac', '.rpy',\n '.sel', '.stt'),\n dir_path=None)",
"Linear buckling, Doe 0 complete\nRIKS, Doe 0 complete\nLinear buckling, Doe 1 complete\nRIKS, Doe 1 failed\nDoE non-coilable\n"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
ec8f612289f62823d8df698c3a394bde0c435523 | 84,185 | ipynb | Jupyter Notebook | rigid_body_3DOF.ipynb | martinlarsalbert/motion-to-acceleration | b78d07033c5a6107d04bf693f88a7cbd77020c28 | [
"MIT"
] | null | null | null | rigid_body_3DOF.ipynb | martinlarsalbert/motion-to-acceleration | b78d07033c5a6107d04bf693f88a7cbd77020c28 | [
"MIT"
] | 1 | 2020-10-26T19:47:02.000Z | 2020-10-26T19:47:02.000Z | rigid_body_3DOF.ipynb | martinlarsalbert/motion-to-acceleration | b78d07033c5a6107d04bf693f88a7cbd77020c28 | [
"MIT"
] | 1 | 2020-10-26T09:17:00.000Z | 2020-10-26T09:17:00.000Z | 116.117241 | 17,920 | 0.869763 | [
[
[
"## Rigid body 3 DOF\nDevlop a system for a rigid body in 3 DOF and do a simualtion",
"_____no_output_____"
]
],
[
[
"import warnings\n#warnings.filterwarnings('ignore')\n%matplotlib inline\n%load_ext autoreload\n%autoreload 2",
"_____no_output_____"
],
[
"import sympy as sp\nimport sympy.physics.mechanics as me\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom substitute_dynamic_symbols import substitute_dynamic_symbols, find_name, find_derivative_name, lambdify, find_derivatives\n\nfrom pydy.codegen.ode_function_generators import generate_ode_function\nfrom scipy.integrate import odeint\nfrom sympy import cos,sin\n",
"_____no_output_____"
],
[
"x0,y0,z0 = me.dynamicsymbols('x0 y0 z0')\nx01d,y01d,z01d = me.dynamicsymbols('x01d y01d z01d')\nu,v,w = me.dynamicsymbols('u v w')\nu1d,v1d,w1d = me.dynamicsymbols('u v w',1)\n\n\nphi,theta,psi = me.dynamicsymbols('phi theta psi')\nphi1d,theta1d,psi1d = me.dynamicsymbols('phi1d theta1d psi1d')\n\nt = sp.symbols('t')",
"_____no_output_____"
],
[
"N = me.ReferenceFrame('N')",
"_____no_output_____"
],
[
"S = N.orientnew('S', 'Axis', [psi,N.z])",
"_____no_output_____"
],
[
"S.ang_vel_in(N)",
"_____no_output_____"
],
[
"S.ang_acc_in(N)",
"_____no_output_____"
],
[
"M = me.Point('M')\nO = M.locatenew('P',0)\n\nM.set_vel(N,0)\nO.set_vel(S,u*S.x + v*S.y)\n\nO.v1pt_theory(M,N,S)",
"_____no_output_____"
],
[
"velocity_matrix = O.vel(N).to_matrix(N)\nvelocity_matrix",
"_____no_output_____"
]
],
[
[
"## Mass",
"_____no_output_____"
]
],
[
[
"mass = sp.symbols('m')",
"_____no_output_____"
]
],
[
[
"## Inertia",
"_____no_output_____"
]
],
[
[
"I_xx, I_yy, I_zz = sp.symbols('I_xx, I_yy, I_zz')\nbody_inertia_dyadic = me.inertia(S, ixx=I_xx, iyy=I_yy, izz=I_zz)\nbody_inertia_dyadic",
"_____no_output_____"
],
[
"body_inertia_dyadic.to_matrix(S)",
"_____no_output_____"
],
[
"body_central_inertia = (body_inertia_dyadic, O)",
"_____no_output_____"
],
[
"body = me.RigidBody('Rigid body', masscenter=O, frame = S,\n mass=mass, inertia=body_central_inertia)",
"_____no_output_____"
]
],
[
[
"## Forces",
"_____no_output_____"
]
],
[
[
"fx, fy, fz, mx, my, mz = sp.symbols('f_x f_y f_z m_x m_y m_z')",
"_____no_output_____"
],
[
"force_vector = fx*S.x + fy*S.y \ntorque_vector = mz*S.z",
"_____no_output_____"
],
[
"force = (O, force_vector)\ntorque = (S, torque_vector)",
"_____no_output_____"
]
],
[
[
"## Equations of Motion",
"_____no_output_____"
]
],
[
[
"coordinates = [x0, y0, psi]\nspeeds = [u, v, psi1d]",
"_____no_output_____"
],
[
"kinematical_differential_equations = [x0.diff() - velocity_matrix[0], \n y0.diff() - velocity_matrix[1],\n psi.diff() - psi1d,\n ]",
"_____no_output_____"
],
[
"kinematical_differential_equations",
"_____no_output_____"
],
[
"#?me.KanesMethod",
"_____no_output_____"
],
[
"kane = me.KanesMethod(N, coordinates, speeds, kinematical_differential_equations)",
"_____no_output_____"
],
[
"loads = [force,\n torque]",
"_____no_output_____"
],
[
"bodies = [body]\nfr, frstar = kane.kanes_equations(bodies, loads)",
"_____no_output_____"
],
[
"constants = [I_xx, I_yy, I_zz,mass]\n\nspecified = [fx, fy, mz] # External force/torque\n\nright_hand_side = generate_ode_function(kane.forcing_full, coordinates,\n speeds, constants,\n mass_matrix=kane.mass_matrix_full,specifieds=specified)",
"_____no_output_____"
],
[
"coordinates_ = [0, 0, 0,]\nspeeds_ = [0, 0, 0,]\n\nstart = np.array(coordinates_+speeds_)\n\nt_ = 0.\nforce_torque = [1,0,0]\nnumerical_specified = np.array(force_torque)\n\nI_xx_ = 1\nI_yy_ = 1\nI_zz_ = 1\nmass_ = 1\n\nnumerical_constants = np.array([I_xx_, I_yy_, I_zz_,mass_])\n\nright_hand_side(start, t_, numerical_specified, numerical_constants)",
"_____no_output_____"
],
[
"def simulate(t,force_torque, I_xx,I_yy,I_zz,mass, initial_coordinates = [0, 0, 0,], \n initial_speeds = [0, 0, 0,]):\n \n start = np.array(initial_coordinates+initial_speeds)\n \n numerical_specified = force_torque\n \n numerical_constants = np.array([I_xx, I_yy, I_zz, mass])\n \n df = pd.DataFrame(index=t)\n y = odeint(right_hand_side, start, t, args=(numerical_specified, numerical_constants))\n \n for i,symbol in enumerate(coordinates+speeds):\n name = symbol.name\n df[name] = y[:,i]\n \n return df",
"_____no_output_____"
],
[
"t = np.linspace(0,10,100)\n\ndf = simulate(t=t, force_torque=[1,0,0],I_xx=1, I_yy=1, I_zz=1, mass=1)\n\n\nfig,ax = plt.subplots()\ndf.plot(y='x0', ax=ax);\nax.set_xlabel('time [s]')\n\nfig,ax = plt.subplots()\ndf.plot(y='u', ax=ax);\nax.set_xlabel('time [s]');",
"_____no_output_____"
],
[
"def track_plot(df,ax, l=1, time_step='1S'):\n \n df.plot(x='y0', y='x0',ax = ax)\n \n df_ = df.copy()\n df_.index = pd.TimedeltaIndex(df_.index,unit='s')\n df_ = df_.resample(time_step).first() \n \n def plot_body(row):\n x = row['y0']\n y = row['x0']\n psi = row['psi']\n xs = [x-l/2*np.sin(psi),x+l/2*np.sin(psi)]\n ys = [y-l/2*np.cos(psi),y+l/2*np.cos(psi)]\n ax.plot(xs,ys,'k-')\n \n for index,row in df_.iterrows():\n plot_body(row)\n \n ax.set_xlabel('y0')\n ax.set_ylabel('x0')\n ax.axis('equal')\n \n \n \n \n ",
"_____no_output_____"
],
[
"t = np.linspace(0,10,100)\n\ndf = simulate(t=t, force_torque=[0,0,0],I_xx=1, I_yy=1, I_zz=1, mass=1,\n initial_speeds=[1,0,0,],initial_coordinates=[0, 0, 0,])\n\n\nfig,ax = plt.subplots()\ntrack_plot(df,ax)\n\n\nfig,ax = plt.subplots()\ndf.plot(y='psi', ax=ax);\nax.set_xlabel('time')\nax.set_ylabel('psi')\n\n",
"/Users/martinalexandersson/Dev/motion-to-acceleration/venv/lib/python3.6/site-packages/pandas/plotting/_matplotlib/core.py:1085: UserWarning: Attempting to set identical left == right == 0.0 results in singular transformations; automatically expanding.\n ax.set_xlim(left, right)\n"
],
[
"\nradius = 10 # Radius of rotation [m]\nw = 0.1 # Angle velocity [rad/s]\nV = radius*w # Speed of point [m/s]\nt = np.linspace(0,2*np.pi/w,100)\n\nmass=1\n\nexpected_acceleration = -radius*w**2\nexpected_force = mass*expected_acceleration\n\n\ndf = simulate(t=t, force_torque=[0,-expected_force,0],I_xx=1, I_yy=1, I_zz=1, mass=mass,\n initial_speeds=[V,0,w,],initial_coordinates=[0, 0, 0,])\n\n\nfig,ax = plt.subplots()\ntrack_plot(df,ax, time_step = '5S')\n\n\nfig,ax = plt.subplots()\ndf.plot(y='psi', ax=ax);\nax.set_xlabel('time')\nax.set_ylabel('psi')",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
ec8f612f027bb67c3230257a1123ac7984b6a62f | 3,124 | ipynb | Jupyter Notebook | project1/read_me.ipynb | wesleybeckner/UW_Direct | a6bbfd8360f7b971d94daefde149599b7b87b32e | [
"MIT"
] | null | null | null | project1/read_me.ipynb | wesleybeckner/UW_Direct | a6bbfd8360f7b971d94daefde149599b7b87b32e | [
"MIT"
] | null | null | null | project1/read_me.ipynb | wesleybeckner/UW_Direct | a6bbfd8360f7b971d94daefde149599b7b87b32e | [
"MIT"
] | null | null | null | 33.234043 | 155 | 0.582266 | [
[
[
"<a id='top'></a>\n\n# There are Two Projects Here\n\nProject 1 has more to do with data visualziation and project 2 with a litle more optimization/hardcore machine learning\n\nI think both have validity in the job market and are useful things to learn -- It just depends on what your interests are and what is of interest!\n\n# Corian Project (these are mostly just notes): \n\n## Pots of Gold\n\n1. here's your product wheel and the loss in transition sheets from your random\n production plan to one that is optimized. Potentially run through\n monte carlo\n * traveling salesman\n \n## Hypotheses\n* we have some anomalies in the data\n 1. [unusual Next Color Code](#color)\n \n* can we use the sequence of order numbers to isolate those for which we had failed transitions \n (i.e. we gave up) and determine the relationship between transition attempts and fail rate?\n 1. [fail rates](#fail)\n\n \n\n \n\n* ### Transition Distributions\n 1. [distributions](#dist) Let's fill in the cells in the matrix\n 1. [fraction of transitions explained](#terminate) When should we terminate a bad transition?\n 3. [sorted boxplots](#sorted)\n 2. are we learning as we do the transition more often? [median transition sheets vs number of transits](#transits)\n\n\n[combined data](#combinations)\n\n* if we combine with **family labels** what are the transition characteristics for (#2)\n \n 1. inter-family transitions\n 2. intra-family transitions\n 3. inter-subfamily transitions\n 4. intra-subfamily transitions\n 5. where are we having the most operational upsets in terms of product/family/subfamily\n\n\n* if we combine with **scheduling data**\n 1. what transitions are being scheduled today and do we have transition data for them\n\n\n* combined with **downtime data**\n 1. what is the relationship between products on the line and downtime\n \n # Gendorf Project\n \n [app-site](#http://gendorf-dev.herokuapp.com/)\n \n -- more dataviz focused and less ML\n \n It's a current project we're working on -- up to ya'lls interest level\n ",
"_____no_output_____"
]
]
] | [
"markdown"
] | [
[
"markdown"
]
] |
ec8f63a27c960de6e3b22039a0dc5a920ad09e0e | 8,078 | ipynb | Jupyter Notebook | Final Report.ipynb | Soumya44/Coursera_Capstone | 71066179b762f0d3f2093bf54b720e8e5d4e0f17 | [
"MIT"
] | 1 | 2020-01-03T20:10:26.000Z | 2020-01-03T20:10:26.000Z | Final Report.ipynb | Soumya44/Coursera_Capstone | 71066179b762f0d3f2093bf54b720e8e5d4e0f17 | [
"MIT"
] | 1 | 2020-01-13T15:01:53.000Z | 2020-01-13T15:01:53.000Z | Final Report.ipynb | Soumya44/Coursera_Capstone | 71066179b762f0d3f2093bf54b720e8e5d4e0f17 | [
"MIT"
] | 12 | 2019-02-02T11:43:30.000Z | 2020-08-01T19:17:51.000Z | 58.536232 | 756 | 0.702154 | [
[
[
"# A Recommender System for Groceries Contractor",
"_____no_output_____"
],
[
"# Brief Introduction",
"_____no_output_____"
],
[
"## Part 1: Problem Description\n### There is a groceries contractor in one of the boroughs of Toronto (Scarborough). This contractor provides places such as: Different types of Restaurants, Bakery, Breakfast Spot, Brewery and Café with fresh and high-quality groceries. The contractor wants to build a warehouse for the groceries it buys from villagers and farmers inside the borough, so that they will support more customers and also bring better \"Quality of Service\" to the old customers. \n### For example, if the warehouse is close to those old and famous restaurants, then the vegetables and other groceries would be delivered to the restaurant in the right time and there would be no delay so the restaurant cooks can start their job from the morning and the Quality of Service will be high and this contractor will gain more reputation and income.\n### The contractor should build this warehouse where it is closest to its customers in order to minimize the cost of transpotation in addition to the example above. which neighborhood (in that borough) would be a better choice for the contractor to build the warehouse in that neighborhood. Finding the right neighborhood is our mission and our recommender system will provide this contractor with a sorted list of neighborhoods in which the first elemnt of the list will be the best suggested neighborhood.\n",
"_____no_output_____"
],
[
"## Part 2: Data We Need\n### 1- We will need geo-locational information about that specific borough and the neighborhoods in that borough. We specifically and technically mean the latitude and longitude numbers of that borough. We assume that it is \"Scarborough\" in Toronto. This is easily provided for us by the contractor, because the contractor has already made up his mind about the borough. The Postal Codes that fall into that borough (Scarborough) would also be sufficient fo us. I fact we will first find neighborhoods inside Scarborough by their corresponding Postal Codes.\n\n### 2- We will need data about different venues in different neighborhoods of that specific borough. In order to gain that information we will use \"Foursquare\" locational information. By locational information for each venue we mean basic and advanced information about that venue. For example there is a venue in one of the neighborhoods. As basic information, we can obtain its precise latitude and longitude and also its distance from the center of the neighborhood. But we are looking for advanced information such as the category of that venue and whether this venue is a popular one in its category or maybe the average price of the services of this venue. A typical request from Foursquare will provide us with the following information:\n#### [Postal Code]\t[Neighborhood(s)]\t[Neighborhood Latitude]\t[Neighborhood Longitude]\t[Venue]\t[Venue Summary]\t[Venue Category]\t[Distance (meter)]\n#### [M1L]\t[Clairlea, Golden Mile, Oakridge]\t[43.711112]\t[-79.284577]\t[Tim Hortons]\t[This spot is popular]\t[Coffee Shop]\t[592]",
"_____no_output_____"
],
[
"### Some Notes about \"Foursquare\": <https://foursquare.com/>\n##### Foursquare is a local search-and-discovery service mobile app which provides search results for its users (Wikipedia).\n##### Founded: New York City, New York, U.S\n##### Users: 60 million\n##### Date launched: March 11, 2009\n##### Employees: Over 200\n##### Founders: Dennis Crowley, Naveen Selvadurai\n##### Owner: Foursquare Labs, Inc.",
"_____no_output_____"
],
[
"# Main Article\n## Part 1: Identifying Neighborhoods inside \"Scarborough\"\n### We will use Postal Codes of different regions inside Scarborough to find the list of neighborhoods. We will essentially obtain our information from <https://en.wikipedia.org/wiki/List_of_postal_codes_of_Canada:_M> and then process the table inside this site. Images from dataframes and also from maps will be provided in the presentation. Here we only present our strategy and how we got the mission accomplished. ",
"_____no_output_____"
],
[
"## Part 2: Connecting to Foursquare and Retrieving Locational Data \n## for Each Venue in Every Neighborhood \n### After finding the list of neighborhoods, we then connect to the Foursquare API to gather information about venues inside each and every neighborhood. For each neighborhood, we have chosen the radius to be 1000 meter. It means that we have asked Foursquare to find venues that are at most 1000 meter far from the center of the neighborhood. (I think distance is measured by latitude and longitude of venues and neighborhoods, and it is not the walking distance for venues.)",
"_____no_output_____"
],
[
"## Part 3: Processing the Retrieved Data and Creating a DataFrome for All the Venues inside the Scarborough\n### When the data is completely gathered, we will perform processing on that raw data to find our desirable features for each venue. Our main feature is the category of that venue. After this stage, the column \"Venue's Category\" wil be One-hot encoded and different venues will have different feature-columns. After On-hot encoding we will integrate all restaurant columns to one column \"Total Restaurants\" and all food joint columns to \"Total Joints\" column. We assumed that different resaturants use the Same raw groceries. This assumption is made for simplicity and due to not having a very detailed dataset about different venues. \n### Now, the dataset is fully ready to be used for machine learning (and statistical analysis) purposes.",
"_____no_output_____"
],
[
"## Part 4: Applying one of Machine Learning Techniques (K-Means Clustering)\n### Here we cluster neighborhoods via K-means clustering method. We think that 5 clusters is enough and can cover the complexity of our problem. After clustering we will update our dataset and create a column representing the group for each neighborhood. ",
"_____no_output_____"
],
[
"# Decision Making and Reporting Results\n### Now, we focus on the centers of clusters and compare them for their \"Total Restaurants\" and their \"Total Joints\". The group which its center has the highest \"Total Sum\" will be our best recommendation to the contractor. {Note: Total Sum = Total Restaurants + Total Joints + Other Venues.} This algorithm although is pretty straightforward yet is strongly powerful. \n\n## Results:\n### Based on this analysis, the best recommended neighborhood will be:\n### {'Neighborhood': 'Agincourt',\n### 'Postal Code': 'M1S',\n### 'Neighborhood Latitude': 43.7942003,\n### 'Neighborhood Longitude': -79.26202940000002}\n",
"_____no_output_____"
],
[
"## Thank You !",
"_____no_output_____"
]
]
] | [
"markdown"
] | [
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
]
] |
ec8f6b71b88c3d94ee0963ac2a3a4a1089d6a31c | 22,493 | ipynb | Jupyter Notebook | probability_sampling.ipynb | saiddddd/probability-sampling | 2821c3e838b55f1c33c1cca59b223bf053d5479f | [
"MIT"
] | 1 | 2021-05-18T02:40:47.000Z | 2021-05-18T02:40:47.000Z | probability_sampling.ipynb | saiddddd/probability-sampling | 2821c3e838b55f1c33c1cca59b223bf053d5479f | [
"MIT"
] | null | null | null | probability_sampling.ipynb | saiddddd/probability-sampling | 2821c3e838b55f1c33c1cca59b223bf053d5479f | [
"MIT"
] | null | null | null | 26.124274 | 117 | 0.383942 | [
[
[
"# Import required libraries\nimport numpy as np\nimport pandas as pd",
"_____no_output_____"
],
[
"# Set random seed\nnp.random.seed(42)",
"_____no_output_____"
],
[
"# Define total number of products\nnumber_of_products = 10\n\n# Create data dictionary\ndata = {'product_id':np.arange(1, number_of_products+1).tolist(),\n 'measure':np.round(np.random.normal(loc=10, scale=0.5, size=number_of_products),3)}\n\n# Transform dictionary into a data frame\ndf = pd.DataFrame(data)",
"_____no_output_____"
],
[
"df",
"_____no_output_____"
],
[
"# Store the real mean in a separate variable\nreal_mean = round(df['measure'].mean(),3)\nprint(real_mean)",
"10.224\n"
]
],
[
[
"# SIMPLE RANDOM SAMPLING",
"_____no_output_____"
]
],
[
[
"# Obtain simple random sample\nsimple_random_sample = df.sample(n=4).sort_values(by='product_id')\n\n# Save the sample mean in a separate variable\nsimple_random_mean = round(simple_random_sample['measure'].mean(),3)\n\n# View sampled data frame\nsimple_random_sample",
"_____no_output_____"
]
],
[
[
"# SYSTEMATIC SAMPLING",
"_____no_output_____"
]
],
[
[
"# Define systematic sampling function\ndef systematic_sampling(df, step):\n \n indexes = np.arange(0,len(df),step=step)\n systematic_sample = df.iloc[indexes]\n return systematic_sample\n \n # Obtain a systematic sample and save it in a new variable\nsystematic_sample = systematic_sampling(df, 3)\n\n# Save the sample mean in a separate variable\nsystematic_mean = round(systematic_sample['measure'].mean(),3)\n\n# View sampled data frame\nsystematic_sample",
"_____no_output_____"
]
],
[
[
"# CLUSTER SAMPLING",
"_____no_output_____"
]
],
[
[
"def cluster_sampling(df, number_of_clusters):\n \n try:\n # Divide the units into cluster of equal size\n df['cluster_id'] = np.repeat([range(1,number_of_clusters+1)],len(df)/number_of_clusters)\n\n # Create an empty list\n indexes = []\n\n # Append the indexes from the clusters that meet the criteria\n # For this formula, clusters id must be an even number\n for i in range(0,len(df)):\n if df['cluster_id'].iloc[i]%2 == 0:\n indexes.append(i)\n cluster_sample = df.iloc[indexes]\n return(cluster_sample)\n \n except:\n print(\"The population cannot be divided into clusters of equal size!\")\n \n# Obtain a cluster sample and save it in a new variable\ncluster_sample = cluster_sampling(df,5)\n# Save the sample mean in a separate variable\ncluster_mean = round(cluster_sample['measure'].mean(),3)",
"_____no_output_____"
],
[
"cluster_sample",
"_____no_output_____"
]
],
[
[
"# STRATIFIED RANDOM SAMPLING (using class)",
"_____no_output_____"
]
],
[
[
"# Create data dictionary\ndata = {'product_id':np.arange(1, number_of_products+1).tolist(),\n 'product_strata':np.repeat([1,2], number_of_products/2).tolist(),\n 'measure':np.round(np.random.normal(loc=10, scale=0.5, size=number_of_products),3)}\n\n# Transform dictionary into a data frame\ndf = pd.DataFrame(data)\n\n# View data frame\ndf",
"_____no_output_____"
],
[
"# # Import StratifiedShuffleSplit\nfrom sklearn.model_selection import StratifiedShuffleSplit\n\n# Set the split criteria\nsplit = StratifiedShuffleSplit(n_splits=1, test_size=4)\n\n# Perform data frame split\nfor x, y in split.split(df, df['product_strata']):\n stratified_random_sample = df.iloc[y].sort_values(by='product_id')\n\n# View sampled data frame\nstratified_random_sample\n\n# Obtain the sample mean for each group\nstratified_random_sample.groupby('product_strata').mean().drop(['product_id'],axis=1)",
"_____no_output_____"
]
],
[
[
"# MEAURE MEAN COMPARISON PER SAMPLING METHOD",
"_____no_output_____"
]
],
[
[
"# Create a dictionary with the mean outcomes for each sampling method and the real mean\noutcomes = {'sample_mean':[simple_random_mean,systematic_mean,cluster_mean],\n 'real_mean':real_mean}\n\n# Transform dictionary into a data frame\noutcomes = pd.DataFrame(outcomes, index=['Simple Random Sampling','Systematic Sampling','Cluster Sampling'])\n\n# Add a value corresponding to the absolute error\noutcomes['abs_error'] = abs(outcomes['real_mean'] - outcomes['sample_mean'])\n\n# Sort data frame by absolute error\noutcomes.sort_values(by='abs_error')",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
]
] |
ec8f82ffb561c8cdabcbda44b3107b171851cfef | 106,216 | ipynb | Jupyter Notebook | Module5/Module5 - Lab1.ipynb | joben/DAT210x | bc047fcefc2bd4510c9a806200cbdfb891a3c5c5 | [
"MIT"
] | null | null | null | Module5/Module5 - Lab1.ipynb | joben/DAT210x | bc047fcefc2bd4510c9a806200cbdfb891a3c5c5 | [
"MIT"
] | null | null | null | Module5/Module5 - Lab1.ipynb | joben/DAT210x | bc047fcefc2bd4510c9a806200cbdfb891a3c5c5 | [
"MIT"
] | null | null | null | 268.222222 | 54,052 | 0.912678 | [
[
[
"# DAT210x - Programming with Python for DS",
"_____no_output_____"
],
[
"## Module5- Lab1",
"_____no_output_____"
],
[
"Start by importing whatever you need to import in order to make this lab work:",
"_____no_output_____"
]
],
[
[
"# .. your code here ..\nimport pandas as pd\nfrom sklearn.cluster import KMeans\n\nimport matplotlib.pyplot as plt\nimport matplotlib\n\n",
"_____no_output_____"
]
],
[
[
"### How to Get The Dataset",
"_____no_output_____"
],
[
"1. Open up the City of Chicago's [Open Data | Crimes](https://data.cityofchicago.org/Public-Safety/Crimes-2001-to-present/ijzp-q8t2) page.\n1. In the `Primary Type` column, click on the `Menu` button next to the info button, and select `Filter This Column`. It might take a second for the filter option to show up, since it has to load the entire list first.\n1. Scroll down to `GAMBLING`\n1. Click the light blue `Export` button next to the `Filter` button, and select `Download As CSV`",
"_____no_output_____"
],
[
"Now that you have th dataset stored as a CSV, load it up being careful to double check headers, as per usual:",
"_____no_output_____"
]
],
[
[
"# .. your code here ..\ndf = pd.read_csv('Datasets/Crimes.csv', index_col='ID')",
"_____no_output_____"
]
],
[
[
"Get rid of any _rows_ that have nans in them:",
"_____no_output_____"
]
],
[
[
"# .. your code here ..\ndf = df.dropna()",
"_____no_output_____"
]
],
[
[
"Display the `dtypes` of your dset:",
"_____no_output_____"
]
],
[
[
"# .. your code here ..\ndf.dtypes",
"_____no_output_____"
]
],
[
[
"Coerce the `Date` feature (which is currently a string object) into real date, and confirm by displaying the `dtypes` again. This might be a slow executing process...",
"_____no_output_____"
]
],
[
[
"# .. your code here ..\ndf['Date'] = pd.to_datetime(df['Date'])",
"_____no_output_____"
],
[
"df.dtypes",
"_____no_output_____"
],
[
"def doKMeans(df):\n # Let's plot your data with a '.' marker, a 0.3 alpha at the Longitude,\n # and Latitude locations in your dataset. Longitude = x, Latitude = y\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.scatter(df.Longitude, df.Latitude, marker='.', alpha=0.3)\n\n \n # TODO: Filter `df` using indexing so it only contains Longitude and Latitude,\n # since the remaining columns aren't really applicable for this lab:\n #\n # .. your code here ..\n df = df[['Longitude','Latitude']]\n\n # TODO: Use K-Means to try and find seven cluster centers in this df.\n # Be sure to name your kmeans model `model` so that the printing works.\n #\n # .. your code here ..\n model = KMeans(n_clusters=7)\n model.fit(df)\n \n # Now we can print and plot the centroids:\n centroids = model.cluster_centers_\n print(centroids)\n ax.scatter(centroids[:,0], centroids[:,1], marker='x', c='red', alpha=0.5, linewidths=3, s=169)\n plt.show()",
"_____no_output_____"
],
[
"# Print & Plot your data\ndoKMeans(df)",
"[[-87.68543715 41.98128232]\n [-87.61947513 41.8033442 ]\n [-87.63108338 41.69664745]\n [-87.75695675 41.8933103 ]\n [-87.58248336 41.7524872 ]\n [-87.66472793 41.77273546]\n [-87.70959262 41.8781271 ]]\n"
]
],
[
[
"Filter out the data so that it only contains samples that have a `Date > '2011-01-01'`, using indexing. Then, in a new figure, plot the crime incidents, as well as a new K-Means run's centroids.",
"_____no_output_____"
]
],
[
[
"# .. your code here ..\n\ndf = df[df.Date > '2011-01-01 01:00:00']",
"_____no_output_____"
],
[
"# Print & Plot your data\ndoKMeans(df)",
"[[-87.63457739 41.70588603]\n [-87.7548138 41.88998061]\n [-87.66505354 41.77571527]\n [-87.71056213 41.87546076]\n [-87.57983025 41.75061222]\n [-87.61923242 41.79163517]\n [-87.68715349 41.98350221]]\n"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
ec8f878498b5973c01264cb2ad91a33335aa41d2 | 191,469 | ipynb | Jupyter Notebook | .ipynb_checkpoints/02. Understand your data with Visualization-checkpoint.ipynb | Akshat4112/Predictive-Modeling-with-Python | 38de89d7b60e7d18206b93260aea53e4cf44e8b0 | [
"MIT"
] | 1 | 2022-02-03T05:46:24.000Z | 2022-02-03T05:46:24.000Z | 02. Understand your data with Visualization.ipynb | Akshat4112/Predictive-Modeling-with-Python | 38de89d7b60e7d18206b93260aea53e4cf44e8b0 | [
"MIT"
] | null | null | null | 02. Understand your data with Visualization.ipynb | Akshat4112/Predictive-Modeling-with-Python | 38de89d7b60e7d18206b93260aea53e4cf44e8b0 | [
"MIT"
] | 3 | 2020-05-17T13:23:07.000Z | 2021-05-22T13:55:11.000Z | 821.755365 | 99,816 | 0.957022 | [
[
[
"import matplotlib.pyplot as plt\nimport pandas as pd\nimport numpy",
"_____no_output_____"
],
[
"names = ['preg', 'plas', 'pres', 'skin', 'test', 'mass', 'pedi', 'age', 'class'] \ndata = pd.read_csv('datasets/pima-indians-diabetes.csv', names=names)",
"_____no_output_____"
],
[
"#Univariate Plots\ndata.hist()\nplt.show()",
"_____no_output_____"
],
[
"#Density Plots",
"_____no_output_____"
],
[
"data.plot(kind = 'density', subplots=True, layout=(3,3), sharex=False)\nplt.show()",
"_____no_output_____"
],
[
"#Box and Whisker Plots",
"_____no_output_____"
],
[
"data.plot(kind = 'box', subplots=True, layout=(3,3), sharex=False, sharey=False)\nplt.show()",
"_____no_output_____"
],
[
"#Multivariate Plots",
"_____no_output_____"
],
[
"#Correlationn Matrix Plot",
"_____no_output_____"
],
[
"corr = data.corr()\nfig = plt.figure()\nax = fig.add_subplot(111)\ncax = ax.matshow(corr, vmin = -1, vmax = 1)\nfig.colorbar(cax)\nticks = numpy.arange(0,9,1)\nax.set_xticks(ticks) \nax.set_yticks(ticks) \nax.set_xticklabels(names) \nax.set_yticklabels(names) \nplt.show()",
"_____no_output_____"
],
[
"#Scatter Plot Matrix",
"_____no_output_____"
],
[
"from pandas.plotting import scatter_matrix",
"_____no_output_____"
],
[
"scatter_matrix(data)\nplt.show()",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
ec8f8d36327e7843d362eaca08cbb5f2b0f556f3 | 12,678 | ipynb | Jupyter Notebook | Modulo4/Clase17_CAPMII.ipynb | AndresLaresBarragan/porinvp2022 | 22ddaa9cc6fad4a42bcbe923670c29a1f8a2e01c | [
"MIT"
] | null | null | null | Modulo4/Clase17_CAPMII.ipynb | AndresLaresBarragan/porinvp2022 | 22ddaa9cc6fad4a42bcbe923670c29a1f8a2e01c | [
"MIT"
] | null | null | null | Modulo4/Clase17_CAPMII.ipynb | AndresLaresBarragan/porinvp2022 | 22ddaa9cc6fad4a42bcbe923670c29a1f8a2e01c | [
"MIT"
] | null | null | null | 30.330144 | 471 | 0.543067 | [
[
[
"# Estimando $\\beta$ de los activos",
"_____no_output_____"
],
[
"<img style=\"float: right; margin: 0px 0px 15px 15px;\" src=\"https://upload.wikimedia.org/wikipedia/commons/4/48/Capital_market_line_of_CAPM.svg\" width=\"400px\" height=\"400px\" />\n\nEn la clase anterior aprendimos \n- ¿qué es el CAPM?; \n- ¿cuáles son los supuestos sobre los que se funda el CAPM?;\n- derivamos la fórmula del CAPM; y\n- aprendimos como obtener la $\\beta$ de un portafolio a partir de la $\\beta$ de activos individuales.\n\nEn la clase de hoy estudiaremos una forma de estimar los $\\beta$ de activos individuales.\n\n**Objetivos:**\n- Revisitar riesgo sistemático y no sistemático.\n- Estudiar un método para estimar las $\\beta$ de los activos.\n\n*Referencia:*\n- Notas del curso \"Portfolio Selection and Risk Management\", Rice University, disponible en Coursera.\n- [Notas del curso \"Financial Engineering\", Columbia University](http://www.columbia.edu/~ks20/FE-Notes/FE-Notes-Sigman.html)\n___",
"_____no_output_____"
],
[
"## <font color=green> Conclusiones de la [lectura](https://web.stanford.edu/~wfsharpe/art/parable/parable.htm). </font>",
"_____no_output_____"
],
[
"## 1. Riesgo sistemático y no sistemático.",
"_____no_output_____"
],
[
"Recordamos la fórmula de CAPM:\n\n$$E[r_i]-r_f=\\beta_i(E[r_M]-r_f),$$\n\ndonde $\\beta_i=\\frac{\\sigma_{M,i}}{\\sigma_M^2}$ y $\\sigma_{M,i}$ es la covarianza del portafolio de mercado con el activo individual $i$.",
"_____no_output_____"
],
[
"Todas las anteriores son variables determinísiticas.\n\n- ¿Qué pasa si usamos el CAPM como un modelo de rendimientos? Es decir,\n\n$$r_i=r_f+\\beta_i(r_M-r_f)+\\epsilon_i,$$\n\ndonde $\\epsilon_i$ es un término de error.",
"_____no_output_____"
],
[
"<font color=green> Despejando $\\epsilon_i$, tenemos que: </font>\n\n- $E[\\epsilon_i]=0$, y\n- $cov(\\epsilon_i,r_M)=0$.\n\n<font color=green> Ver en el tablero. </font>",
"_____no_output_____"
],
[
"Entonces, la varianza del activo $i$ es:\n\n$$\\sigma_i^2=\\beta_i^2\\sigma_M^2+var(\\epsilon_i),$$\n\ndonde el primer término corresponde al riesgo sistemático (de mercado) y el segundo al riesgo idiosincrático.\n___",
"_____no_output_____"
],
[
"## 2. Estimando $\\beta$ para un activo.",
"_____no_output_____"
],
[
"- En el mercado real, la cantidad de activos es ENORME, y tratar de construir el portafolio de mercado sería una tarea grandiosa, pero poco realista para un analista financiero. \n- Por lo tanto, los índices de mercado han sido creados para intentar aproximar el portafolio de mercado.\n- Dicho índice es un portafolio más pequeño que el de mercado, construido por los que se consideran los activos más dominantes, y que capturan la esencia de el portafolio de mercado.",
"_____no_output_____"
],
[
"- El índice de mercado más conocido es el Standard & Poor’s 500-stock index (S&P), compuesto de 500 activos.\n- Un $\\beta$ para un activo dado, se puede estimar utilizando el S&P en reemplazo de M, y utilizando datos históricos para ambos rendimientos (el del activo y el del S&P500).\n- Por ejemplo, considere un activo $i$ para el cual queremos estimar su $\\beta_i$.\n- Este estimado se construye usando medias, varianzas y covarianzas muestrales como sigue:\n - Escogemos $N$ rendimientos históricos, tales como los reportados mensualmente hace tres años.\n - Para $k=1,2,\\dots,N$, $r_{ik}$ y $r_{S\\&Pk}$ denotan el $k-$ésimo valor muestral de los rendimietos.\n\nEntonces\n\n$$\\hat{E[r_i]}=\\frac{1}{N}\\sum_{k=1}^{N}r_{ik}, \\text{ y}$$\n\n$$\\hat{E[r_{S\\&P}]}=\\frac{1}{N}\\sum_{k=1}^{N}r_{S\\&Pk}.$$",
"_____no_output_____"
],
[
"Además, la varianza $\\sigma_{S\\&P}^2$ se estima como\n\n$$\\hat{\\sigma_{S\\&P}^2}=\\frac{1}{N-1}\\sum_{k=1}^{N}(r_{S\\&Pk}-\\hat{E[r_{S\\&P}]})^2,$$\n\ny la covarianza $\\sigma_{S\\&P,i}$\n\n$$\\hat{\\sigma_{S\\&P,i}}=\\frac{1}{N-1}\\sum_{k=1}^{N}(r_{S\\&Pk}-\\hat{E[r_{S\\&P}]})(r_{ik}-\\hat{E[r_i]})$$",
"_____no_output_____"
],
[
"Finalmente, \n\n$$\\hat{\\beta_i}=\\frac{\\hat{\\sigma}_{S\\&P,i}}{\\hat{\\sigma}_{S\\&P}^2}.$$",
"_____no_output_____"
],
[
"### Ejemplo...\n\nEntrar a yahoo finance, a la información de MSFT, AAPL y ^GSPC.",
"_____no_output_____"
]
],
[
[
"# Importar paquetes\nimport pandas as pd\nimport pandas_datareader.data as web",
"_____no_output_____"
],
[
"# Función para descargar precios de cierre ajustados:\ndef get_adj_closes(tickers, start_date=None, end_date=None, freq='m'):\n # Fecha inicio por defecto (start_date='2010-01-01') y fecha fin por defecto (end_date=today)\n # Descargamos DataFrame con todos los datos\n closes = web.YahooDailyReader(symbols=tickers, start=start_date, end=end_date, interval=freq).read()['Adj Close']\n # Se ordenan los índices de manera ascendente\n closes.sort_index(inplace=True)\n return closes",
"_____no_output_____"
],
[
"# Importar datos de AAPL, MSFT, y ^GSPC",
"_____no_output_____"
],
[
"# Obtener rendimientos mensuales\n",
"_____no_output_____"
],
[
"# Obtener matriz de covarianza\n",
"_____no_output_____"
],
[
"# Beta de Microsoft\n",
"_____no_output_____"
],
[
"# Beta de Apple\n",
"_____no_output_____"
]
],
[
[
"## 3. Ejercicios",
"_____no_output_____"
],
[
"Su portafolio de inversión consiste de $\\$150,000$ invertidos en un solo activo. Suponga que la tasa de interés libre de riesgo es $3\\%$. Este activo tiene un rendimiento esperado del $12\\%$, y una volatilidad del $40\\%$, y el portafolio de mercado tiene un rendimiento esperado del $10\\%$ y una volatilidad del $18\\%$. Bajo las suposiciones del CAPM, ¿cuál es la volatilidad de un mejor portafolio alternativo que tiene el mismo rendimiento que el activo?\n\nA. $\\sigma = 15.52\\%$\n\nB. $\\sigma = 23.14\\%$\n\nC. $\\sigma = 30.25\\%$ \n\nD. $\\sigma = 35.13\\%$ ",
"_____no_output_____"
],
[
"Con los mismos datos del ejercicio anterior, y suponiendo que usted está conforme con el riesgo del activo, bajo las suposiciones de CAPM, ¿cuál sería un mejor rendimiento esperado que debería ganar?\n\nA. $E[r] = 13.65\\%$\n\nB. $E[r] = 15.30\\%$\n\nC. $E[r] = 18.56\\%$\n\nD. $E[r] = 20.38\\%$ ",
"_____no_output_____"
],
[
"Suponga que usted tiene invertidos $\\$30,000$ en los siguientes cuatro activos:\n\n| Activo | Cantidad invertida | $\\beta$ |\n| ------------- | ------------------ | ------- |\n| Activo A | $\\$5,000$ | 0.75 |\n| Activo B | $\\$10,000$ | 1.10 |\n| Activo C | $\\$8,000$ | 1.36 |\n| Activo D | $\\$7,000$ | 1.88 |\n\nLa tasa libre de riesgo es del $2\\%$ y el rendimiento esperado del portafolio de mercado es del $8\\%$. Con base en el CAPM, ¿cuál es la $\\beta$ del portafolio?\n\nA. $\\beta_P = 0.95$\n\nB. $\\beta_P = 1.19$\n\nC. $\\beta_P = 1.29$\n\nD. $\\beta_P = 1.62$",
"_____no_output_____"
],
[
"Considere la siguiente distribución de rendimientos:\n\n| Estado de la economía | Probabilidad | $R_A$ |\n| --------------------- | ------------------ | ------- |\n| Depresión | 0.30 | 0.10 |\n| Normal | 0.50 | 0.15 |\n| Expansión | 0.20 | 0.25 |\n\nSuponiendo que el CAPM es válido, que la volatilidad del portafolio de mercado es $\\sigma_M=0.10$ y que la correlación del activo A y el portafolio de mercado es $\\rho_{A,M}=0.9$, ¿cuál es el valor de $\\beta_A$?\n\nA. $\\beta_A=0.25$\n\nB. $\\beta_A=0.47$\n\nC. $\\beta_A=0.55$\n\nD. $\\beta_A=1.15$",
"_____no_output_____"
],
[
"Un fondo de inversiones con $\\beta=0.8$ tiene un rendimiento esperado del $16\\%$. Si la tasa libre de riesgo es $4\\%$ y el rendimiento esperado del portafolio de mercado es del $13\\%$, ¿debería invertir en este fondo?\n\nA. Si.\n\nB. No.",
"_____no_output_____"
],
[
"Suponga que encontramos un activo el cual, con base en su precio al día de hoy, tiene un rendimiento esperado más bajo que lo que sugiere la línea de mercado de activos. Si el CAPM es válido, ¿cuál(es) de las siguientes proposiciones es (son) verdaderas?\n\nA. Si el activo tiene un rendimiento esperado más bajo que el sugerido por la línea, significa que su precio es muy bajo.\n\nB. Si el activo tiene un rendimiento esperado más bajo que el sugerido por la línea, significa que su precio es muy alto.\n\nC. El activo se grafica por encima de la línea.\n\nD. El activo se grafica bajo la línea.",
"_____no_output_____"
],
[
"# Anuncios parroquiales\n\n## 1. Último quiz\n## 2. Revisar archivo Tarea7",
"_____no_output_____"
],
[
"<script>\n $(document).ready(function(){\n $('div.prompt').hide();\n $('div.back-to-top').hide();\n $('nav#menubar').hide();\n $('.breadcrumb').hide();\n $('.hidden-print').hide();\n });\n</script>\n\n<footer id=\"attribution\" style=\"float:right; color:#808080; background:#fff;\">\nCreated with Jupyter by Esteban Jiménez Rodríguez.\n</footer>",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
]
] |
ec8f9e7df88b6a2e5a8e6870b1123fffbe021ab2 | 23,005 | ipynb | Jupyter Notebook | demoFiles/plotDemo.ipynb | twosigma/beaker-notebook-archive | 5f2ac727e4cec5ce60a3664adbfeab20346a9c5b | [
"Apache-2.0"
] | 7 | 2017-07-17T03:47:43.000Z | 2020-02-18T07:32:41.000Z | demoFiles/plotDemo.ipynb | twosigma/beaker-notebook-archive | 5f2ac727e4cec5ce60a3664adbfeab20346a9c5b | [
"Apache-2.0"
] | null | null | null | demoFiles/plotDemo.ipynb | twosigma/beaker-notebook-archive | 5f2ac727e4cec5ce60a3664adbfeab20346a9c5b | [
"Apache-2.0"
] | 5 | 2017-05-17T02:19:51.000Z | 2018-11-15T11:17:26.000Z | 26.261416 | 175 | 0.511367 | [
[
[
"# Interactive Plot Widget",
"_____no_output_____"
]
],
[
[
"def plot = new Plot(title: \"Setting line properties\")\ndef ys = [0, 1, 6, 5, 2, 8]\ndef ys2 = [0, 2, 7, 6, 3, 8]\nplot << new Line(y: ys, width: 10, color: Color.red)\nplot << new Line(y: ys, width: 3, color: Color.yellow)\nplot << new Line(y: ys, width: 4, color: new Color(33, 87, 141), style: StrokeType.DASH, interpolation: 0)\nplot << new Line(y: ys2, width: 2, color: new Color(212, 57, 59), style: StrokeType.DOT)\nplot << new Line(y: [5, 0], x: [0, 5], style: StrokeType.LONGDASH)\nplot << new Line(y: [4, 0], x: [0, 5], style: StrokeType.DASHDOT)",
"_____no_output_____"
],
[
"new Plot().add(new Line(x: [0, 1, 2, 3, 4, 5], y: [0, 1, 6, 5, 2, 8]))\nnew Plot().add(new Line(x: (0..5), y: [0, 1, 6, 5, 2, 8]))\nnew Plot() << new Line(x: (0..5), y: [0, 1, 6, 5, 2, 8])",
"_____no_output_____"
],
[
"def plot = new Plot();\ndef y1 = [1.5, 1, 6, 5, 2, 8]\ndef cs = [Color.black, Color.red, Color.gray, Color.green, Color.blue, Color.pink]\ndef ss = [StrokeType.SOLID, StrokeType.SOLID, StrokeType.DASH, StrokeType.DOT, StrokeType.DASHDOT, StrokeType.LONGDASH]\nplot << new Stems(y: y1, color: cs, style: ss, width: 5)",
"_____no_output_____"
],
[
"def plot = new Plot(title: \"Bars\")\ndef cs = [new Color(255, 0, 0, 128)] * 5 // transparent bars\ncs[3] = Color.red // set color of a single bar, solid colored bar\nplot << new Bars(x: (1..5), y: [3, 5, 2, 3, 7], color: cs, outlineColor: Color.black, width: 0.3)",
"_____no_output_____"
],
[
"def plot = new Plot(title: \"Changing Point Size, Color, Shape\")\ndef y1 = [6, 7, 12, 11, 8, 14]\ndef y2 = y1.collect { it - 2 }\ndef y3 = y2.collect { it - 2 }\ndef y4 = y3.collect { it - 2 }\nplot << new Points(y: y1)\nplot << new Points(y: y2, shape: ShapeType.CIRCLE)\nplot << new Points(y: y3, size: 8.0, shape: ShapeType.DIAMOND)\nplot << new Points(y: y4, size: 12.0, color: Color.orange, outlineColor: Color.red)",
"_____no_output_____"
],
[
"def plot = new Plot(title: \"Changing point properties with list\")\ndef cs = [Color.black, Color.red, Color.orange, Color.green, Color.blue, Color.pink]\ndef ss = [6.0, 9.0, 12.0, 15.0, 18.0, 21.0]\ndef fs = [false, false, false, true, false, false]\nplot << [new Points(y: [5] * 6, size: 12.0, color: cs),\n new Points(y: [4] * 6, size: 12.0, color: Color.gray, outlineColor: cs),\n new Points(y: [3] * 6, size: ss, color: Color.red),\n new Points(y: [2] * 6, size: 12.0, color: Color.black, fill: fs, outlineColor: Color.black)]",
"_____no_output_____"
],
[
"def plot = new Plot()\ndef y = [3, 5, 2, 3]\ndef x0 = [0, 1, 2, 3]\ndef x1 = [3, 4, 5, 8]\nplot << new Area(x: x0, y: y)\nplot << new Area(x: x1, y: y, color: new Color(128, 128, 128, 50), interpolation: 0)",
"_____no_output_____"
],
[
"def p = new Plot()\np << new Line(y: [3, 6, 12, 24], displayName: \"Median\")\np << new Area(y: [4, 8, 16, 32], base: [2, 4, 8, 16],\n color: new Color(255, 0, 0, 50), displayName: \"Q1 to Q3\")",
"_____no_output_____"
],
[
"def y1 = [1,5,3,2,3]\ndef y2 = [7,2,4,1,3]\ndef p = new Plot(title: 'Plot with XYStacker', initHeight: 200)\ndef a1 = new Area(y: y1, displayName: 'y1')\ndef a2 = new Area(y: y2, displayName: 'y2')\np << XYStacker.stack([a1, a2])",
"_____no_output_____"
],
[
"def p = new Plot ()\np << new Line(y: [-1, 1])\np << new ConstantLine(x: 0.65, style: StrokeType.DOT, color: Color.blue)\np << new ConstantLine(y: 1, style: StrokeType.DASHDOT, color: Color.blue)\np << new ConstantLine(x: 1, y: 0.4, color: Color.gray, width: 5, showLabel: true)",
"_____no_output_____"
],
[
"new Plot() << new Line(y: [-3, 1, 3, 4, 5]) << new ConstantBand(x: [1, 2], y: [1, 3])",
"_____no_output_____"
],
[
"def p = new Plot() \np << new Line(x: [-3, 1, 2, 4, 5], y: [4, 2, 6, 1, 5])\np << new ConstantBand(x: [Double.NEGATIVE_INFINITY, 1], color: new Color(128, 128, 128, 50))\np << new ConstantBand(x: [1, 2])\np << new ConstantBand(x: [4, Double.POSITIVE_INFINITY])",
"_____no_output_____"
],
[
"def plot = new Plot()\ndef xs = (1..10)\ndef ys = [8.6, 6.1, 7.4, 2.5, 0.4, 0.0, 0.5, 1.7, 8.4, 1]\ndef label = { i ->\n if (ys[i] > ys[i+1] && ys[i] > ys[i-1]) return \"max\"\n if (ys[i] < ys[i+1] && ys[i] < ys[i-1]) return \"min\"\n if (ys[i] > ys[i-1]) return \"rising\"\n if (ys[i] < ys[i-1]) return \"falling\"\n return \"\"\n}\nfor (i = 0; i < xs.size(); i++) {\n if (i > 0 && i < xs.size()-1)\n plot << new Text(x: xs[i], y: ys[i], text: label(i), pointerAngle: -i/3.0)\n}\nplot << new Line(x: xs, y: ys)\nplot << new Points(x: xs, y: ys)",
"_____no_output_____"
],
[
"def ch = new Crosshair(color: new Color(255, 128, 5), width: 2, style: StrokeType.DOT)\ndef pp = new Plot(crosshair: ch, omitCheckboxes: true,\n legendLayout: LegendLayout.HORIZONTAL, legendPosition: LegendPosition.TOP)\ndef x = [1, 4, 6, 8, 10]\ndef y = [3, 6, 4, 5, 9]\npp << new Line(displayName: \"Line\", x: x, y: y, width: 3)\npp << new Bars(displayName: \"Bar\", x: (1..10), y: [2, 2, 4, 4, 2, 2, 0, 2, 2, 4], width: 0.5)\npp << new Points(x: x, y: y, size: 10, toolTip: {xs, ys -> \"x = \" + xs + \", y = \" + ys })",
"_____no_output_____"
],
[
"import com.twosigma.beaker.fileloader.CsvPlotReader\n\ndef rates = new CsvPlotReader().readAsList(\"tableRows.csv\")\n\nnew SimpleTimePlot(rates, [\"y1\", \"y10\"],\n yLabel: \"Price\", \n displayNames: [\"1 Year\", \"10 Year\"])",
"_____no_output_____"
],
[
"def points = 100;\ndef logBase = 10;\ndef expys = [];\ndef xs = [];\nfor(int i = 0; i < points; i++){\n xs[i] = i / 15.0;\n expys[i] = Math.exp(xs[i]); \n}\n\ndef cplot = new CombinedPlot(xLabel: \"Linear\");\ndef logYPlot = new Plot(title: \"Linear x, Log y\", yLabel: \"Log\", logY: true, yLogBase: logBase);\nlogYPlot << new Line(x: xs, y: expys, displayName: \"f(x) = exp(x)\");\nlogYPlot << new Line(x: xs, y: xs, displayName: \"g(x) = x\");\ncplot.add(logYPlot, 3);\n\n// works for 2nd Y axis too:\n// logYPlot << new YAxis(label: \"Right Log Y-Axis\", log: true, logBase: logBase);\n\ndef linearYPlot = new Plot(title: \"Linear x, Linear y\", yLabel: \"Linear\");\nlinearYPlot << new Line(x: xs, y: expys, displayName: \"f(x) = exp(x)\");\nlinearYPlot << new Line(x: xs, y: xs, displayName: \"g(x) = x\");\ncplot.add(linearYPlot, 3);\n\ncplot",
"_____no_output_____"
],
[
"def points = 100;\ndef logBase = 10;\ndef expys = [];\ndef xs = [];\nfor(int i = 0; i < points; i++){\n xs[i] = i /15\n expys[i] = Math.exp(xs[i]);\n}\n\ndef plot = new Plot(title: \"Log x, Log y\", xLabel: \"Log\", yLabel: \"Log\",\n logX: true, xLogBase: logBase, logY: true, yLogBase: logBase);\n\nplot << new Line(x: xs, y: expys, displayName: \"f(x) = exp(x)\");\nplot << new Line(x: xs, y: xs, displayName: \"f(x) = x\");\n\nplot",
"_____no_output_____"
],
[
"def cal = Calendar.getInstance();\ncal.add(Calendar.HOUR, -1)\n\ndef today = new Date();\ndef millis = today.time;\ndef hour = 1000 * 60 * 60;\n\ndef plot = new TimePlot(\n timeZone: new SimpleTimeZone(10800000, \"America/New_York\")\n);\n//list of milliseconds\nplot << new Points(x:(0..10).collect{millis + hour * it}, y:(0..10), size: 10, displayName: \"milliseconds\");\n//list of java.util.Date objects\nplot << new Points(x:(0..10).collect{cal.add(Calendar.HOUR, 1); cal.getTime()}, y:(0..10), size: 4, displayName: \"date objects\");",
"_____no_output_____"
],
[
"def today = new Date()\ndef millis = today.time\ndef nanos = millis * 1000 * 1000g // g makes it arbitrary precision\ndef np = new NanoPlot()\nnp << new Points(x:(0..10).collect{nanos + 7 * it}, y:(0..10))",
"_____no_output_____"
],
[
"import com.twosigma.beaker.chart.xychart.*\nimport com.twosigma.beaker.chart.legend.*\nimport com.twosigma.beaker.chart.xychart.plotitem.*\nimport com.twosigma.beaker.chart.Color\n\nr = new Random()\np = new Plot(title: \"Advanced Plot Styling\",\n labelStyle: \"font-size:32px; font-weight: bold; font-family: courier; fill: green;\",\n gridLineStyle: \"stroke: purple; stroke-width: 3;\",\n titleStyle: \"color: green;\"\n )\np << new Points(x: (1..1000).collect { r.nextGaussian() * 10.0d },\n y: (1..1000).collect { r.nextGaussian() * 20.0d })",
"_____no_output_____"
],
[
"import java.nio.file.Files\nbyte[] picture = Files.readAllBytes(new File(\"widgetArch.png\").toPath());\ndef p = new Plot();\n// x y width height are coordinates, opacity is a double in 0~1\n\n// image can be loaded via bytes, filepath, or url\np << new Rasters(x: [-10,3], y: [3,1.5], width: [6,5], height:[10,8], opacity: [1,0.5], dataString: picture);\n//p << new Rasters(x: -1, y: 4.5, width: 5, height: 8, opacity:0.5, filePath: \"widgetArch.png\");\np << new Rasters(x: [-4], y: [10.5], width: [7], height: [2], opacity:[1], fileUrl: \"https://www.twosigma.com/static/img/twosigma.png\");\n\n// a list of images!\ndef x = [-8, -5, -3, -2, -1, 1, 2, 4, 6, 8]\ndef y = [4, 5, 1, 2, 0 ,3, 6, 4, 5, 9]\ndef width = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]\ndef opacity = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1]\np << new Rasters(x: x, y: y, width:width, height:width, opacity:opacity,fileUrl: \"http://icons.iconarchive.com/icons/paomedia/small-n-flat/1024/sign-check-icon.png\")\n",
"_____no_output_____"
],
[
"def plot = new Plot(title: \"Setting 2nd Axis bounds\")\ndef ys = [0, 2, 4, 6, 15, 10]\ndef ys2 = [-40, 50, 6, 4, 2, 0]\ndef ys3 = [3, 6, 3, 6, 70, 6]\nplot << new YAxis(label:\"Spread\")\nplot << new Line(y: ys)\nplot << new Line(y: ys2, yAxis: \"Spread\")\nplot.getYAxes()[0].setBound(1,5);\nplot.getYAxes()[1].setBound(3,6) // this should change the bounds of the 2nd, right axis\nplot\n",
"_____no_output_____"
]
]
] | [
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
ec8f9ea78f7fe2f3abb10ccf867420fd7f337729 | 144,487 | ipynb | Jupyter Notebook | Pyber HW.ipynb | atalaykozsoy/Pyber | 889e0ca815b3d8c0567d6e47de39d2c1a1d8a1cf | [
"MIT"
] | null | null | null | Pyber HW.ipynb | atalaykozsoy/Pyber | 889e0ca815b3d8c0567d6e47de39d2c1a1d8a1cf | [
"MIT"
] | null | null | null | Pyber HW.ipynb | atalaykozsoy/Pyber | 889e0ca815b3d8c0567d6e47de39d2c1a1d8a1cf | [
"MIT"
] | null | null | null | 389.45283 | 73,268 | 0.929938 | [
[
[
"%matplotlib inline",
"_____no_output_____"
],
[
"#Pyber Homework\n\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport numpy as np\n\n# create the pathways for the files to be read\ncity_data_csv = \"~/Desktop/Pyber/data/city_data.csv\"\nride_data_csv = \"~/Desktop/Pyber/data/ride_data.csv\"",
"_____no_output_____"
],
[
"#read and label the data sets\ncity_data_df = pd.read_csv(city_data_csv)\nride_data_df = pd.read_csv(ride_data_csv)\n",
"_____no_output_____"
],
[
"#combine/merge the two datasets\nPyber_data = pd.merge(ride_data_df, city_data_df, on =\"city\", how=\"left\")\n\nPyber_data.head()",
"_____no_output_____"
],
[
"# Obtain the x and y coordinates for each of the three city types\nurban = Pyber_data[Pyber_data[\"type\"]==\"Urban\"].groupby([Pyber_data[\"city\"]])\nsuburban = Pyber_data[Pyber_data[\"type\"]==\"Suburban\"].groupby([Pyber_data[\"city\"]])\nrural = Pyber_data[Pyber_data[\"type\"]==\"Rural\"].groupby([Pyber_data[\"city\"]])\n \nurban_fare = urban[\"fare\"].mean()\nsuburban_fare = suburban[\"fare\"].mean()\nrural_fare = rural[\"fare\"].mean()\n \nurban_ride = urban[\"ride_id\"].count()\nsuburban_ride = suburban[\"ride_id\"].count()\nrural_ride = rural[\"ride_id\"].count()\n \nurban_driver = urban[\"driver_count\"].mean()\nsuburban_driver = suburban[\"driver_count\"].mean()\nrural_driver = rural[\"driver_count\"].mean()\n \n\n#urban = Pyber_data.loc[Pyber_data['type']=='Urban']\n#suburban = Pyber_data.loc[Pyber_data['type']=='Suburban']\n#rural = Pyber_data.loc[Pyber_data['type']=='Rural']\n",
"_____no_output_____"
],
[
"# Build the scatter plots for each city types\nplt.scatter(urban_ride, urban_fare, label=\"urban\", s=urban_driver * 10, color=[\"coral\"], edgecolor=\"black\", alpha = 0.75, marker=\"o\" )\nplt.scatter(suburban_ride, suburban_fare, label=\"suburban\", s=suburban_driver * 10, color=[\"lightskyblue\"], edgecolor=\"black\", alpha=0.75, marker=\"o\")\nplt.scatter(rural_ride, rural_fare, label=\"rural\", s=rural_driver * 10, color=[\"gold\"], edgecolor=\"black\", alpha=0.75, marker=\"o\")\n\n# Add the background\nplt.grid()\n\n# Incorporate the other graph properties\nplt.xlabel(\"Total Number of Rides (Per City)\")\nplt.ylabel(\"Average Fare ($)\")\nplt.title(\"Pyber Ride Sharing Data (2016)\")\n\n# Create a legend\nlegend=plt.legend(fontsize = 8, title=\"City Types\", loc=\"best\")\n\n# Equal the size of all markers on the legend\nlegend.legendHandles[0]._sizes = [30]\nlegend.legendHandles[1]._sizes = [30]\nlegend.legendHandles[2]._sizes = [30]\n\n# Add text label regarding circle size\nplt.text(42,35,\"Note: \\nCircle size correlates with driver count per city.\", fontsize = 10)\n\n# Save Figure\n#plt.savefig(\"Images/PyberRideSharingData.png\", bbox_inches=\"tight\")\n\nplt.show()",
"_____no_output_____"
],
[
"# filter down the data\ncity_type = Pyber_data.groupby([\"type\"])\nfare_total = city_type[\"fare\"].sum()\n\n# create shortcuts for the graph\nlabels=[\"Rural\", \"Suburban\", \"Urban\"]\nexplode=(0.05, 0.05, 0.05)\ncolors=[\"gold\", \"lightskyblue\", \"lightcoral\"]\n\n#create the graph\nplt.pie(fare_total, explode=explode, labels=labels, colors=colors, autopct=\"%1.1f%%\", shadow=True, startangle=150)\nplt.title(\"% of Total Fares by City Type\")\n\nplt.show()",
"_____no_output_____"
],
[
"rides_total = city_type[\"ride_id\"].count()\n\nplt.pie(rides_total, explode=explode, labels=labels, colors=colors, autopct=\"%1.1f%%\", shadow=True, startangle=150)\nplt.title(\"% of Total Rides by City\")\n\nplt.show()",
"_____no_output_____"
],
[
"drivers_total = city_type[\"driver_count\"].sum()\n\nplt.pie(drivers_total, explode=explode, labels=labels, colors=colors, autopct=\"%1.1f%%\", shadow=True, startangle=150)\nplt.title(\"% of Total Drivers by City Type\")\n\nplt.show()",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
ec8faf90fcd27618c10a8d5c3d99c2e3bb024d60 | 49,959 | ipynb | Jupyter Notebook | clrp_pytorch_roberta_large_finetune.ipynb | ajenningsfrankston/Dynamic-Memory-Network-Plus-master | af9948b11ae623dd9eb2254e3cf10d0feff03349 | [
"MIT"
] | null | null | null | clrp_pytorch_roberta_large_finetune.ipynb | ajenningsfrankston/Dynamic-Memory-Network-Plus-master | af9948b11ae623dd9eb2254e3cf10d0feff03349 | [
"MIT"
] | null | null | null | clrp_pytorch_roberta_large_finetune.ipynb | ajenningsfrankston/Dynamic-Memory-Network-Plus-master | af9948b11ae623dd9eb2254e3cf10d0feff03349 | [
"MIT"
] | null | null | null | 64.132221 | 10,466 | 0.616445 | [
[
[
"<a href=\"https://colab.research.google.com/github/ajenningsfrankston/Dynamic-Memory-Network-Plus-master/blob/master/clrp_pytorch_roberta_large_finetune.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
],
[
"Now using roberta-large. ",
"_____no_output_____"
],
[
"This notebook uses the model created in pretrain any model notebook.\n\n1. Pretrain Roberta Model: https://www.kaggle.com/maunish/clrp-pytorch-roberta-pretrain\n2. Finetune Roberta Model: this notebook, <br/>\n Finetune Roberta Model TPU: https://www.kaggle.com/maunish/clrp-pytorch-roberta-finetune-tpu\n3. Inference Notebook: https://www.kaggle.com/maunish/clrp-pytorch-roberta-inference\n4. Roberta + SVM: https://www.kaggle.com/maunish/clrp-roberta-svm",
"_____no_output_____"
]
],
[
[
"!pip install accelerate",
"Requirement already satisfied: accelerate in /usr/local/lib/python3.7/dist-packages (0.3.0)\nRequirement already satisfied: torch>=1.4.0 in /usr/local/lib/python3.7/dist-packages (from accelerate) (1.9.0+cu102)\nRequirement already satisfied: pyaml>=20.4.0 in /usr/local/lib/python3.7/dist-packages (from accelerate) (20.4.0)\nRequirement already satisfied: typing-extensions in /usr/local/lib/python3.7/dist-packages (from torch>=1.4.0->accelerate) (3.7.4.3)\nRequirement already satisfied: PyYAML in /usr/local/lib/python3.7/dist-packages (from pyaml>=20.4.0->accelerate) (3.13)\n"
],
[
"from google.colab import drive\ndrive.mount('/content/gdrive')",
"Drive already mounted at /content/gdrive; to attempt to forcibly remount, call drive.mount(\"/content/gdrive\", force_remount=True).\n"
],
[
"FINAL = True",
"_____no_output_____"
]
],
[
[
"",
"_____no_output_____"
]
],
[
[
"import os,shutil\nfrom os import path\n\ndef move_files(source_dir,target_dir,show_dir=False):\n if not path.isdir(target_dir):\n os.makedirs(target_dir)\n # \n file_names = os.listdir(source_dir)\n #\n for file_name in file_names:\n shutil.copy(os.path.join(source_dir, file_name), target_dir)\n #\n if show_dir:\n print(os.listdir(target_dir))\n \nsource_dir = '/content/gdrive/MyDrive/kaggle_datasets/commonlitreadability'\ntarget_dir = '../input/commonlitreadabilityprize'\n\nmove_files(source_dir,target_dir,True)\n\n\n\n",
"['train.csv', 'test.csv', 'sample_submission.csv']\n"
],
[
"!pip install transformers\n!pip install colorama\n\nimport os\nimport gc\nimport sys\nimport math\nimport time\nimport tqdm\nimport random\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nfrom tqdm import tqdm\nimport matplotlib.pyplot as plt\n\nimport warnings\nwarnings.filterwarnings('ignore')\n\nfrom sklearn.metrics import mean_squared_error\nfrom sklearn.model_selection import StratifiedKFold\n\nimport torch\nimport torchvision\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.nn.functional as F\nfrom torch.utils.data import Dataset, DataLoader\n\nfrom accelerate import Accelerator\nfrom transformers import (RobertaTokenizer,RobertaModel,RobertaConfig,AutoModel,AutoTokenizer,get_cosine_schedule_with_warmup,AutoConfig)\n\nfrom colorama import Fore, Back, Style\nr_ = Fore.RED\nb_ = Fore.BLUE\nc_ = Fore.CYAN\ng_ = Fore.GREEN\ny_ = Fore.YELLOW\nm_ = Fore.MAGENTA\nsr_ = Style.RESET_ALL",
"Requirement already satisfied: transformers in /usr/local/lib/python3.7/dist-packages (4.8.2)\nRequirement already satisfied: filelock in /usr/local/lib/python3.7/dist-packages (from transformers) (3.0.12)\nRequirement already satisfied: tokenizers<0.11,>=0.10.1 in /usr/local/lib/python3.7/dist-packages (from transformers) (0.10.3)\nRequirement already satisfied: huggingface-hub==0.0.12 in /usr/local/lib/python3.7/dist-packages (from transformers) (0.0.12)\nRequirement already satisfied: requests in /usr/local/lib/python3.7/dist-packages (from transformers) (2.23.0)\nRequirement already satisfied: importlib-metadata; python_version < \"3.8\" in /usr/local/lib/python3.7/dist-packages (from transformers) (4.6.0)\nRequirement already satisfied: sacremoses in /usr/local/lib/python3.7/dist-packages (from transformers) (0.0.45)\nRequirement already satisfied: packaging in /usr/local/lib/python3.7/dist-packages (from transformers) (20.9)\nRequirement already satisfied: tqdm>=4.27 in /usr/local/lib/python3.7/dist-packages (from transformers) (4.41.1)\nRequirement already satisfied: pyyaml in /usr/local/lib/python3.7/dist-packages (from transformers) (3.13)\nRequirement already satisfied: regex!=2019.12.17 in /usr/local/lib/python3.7/dist-packages (from transformers) (2019.12.20)\nRequirement already satisfied: numpy>=1.17 in /usr/local/lib/python3.7/dist-packages (from transformers) (1.19.5)\nRequirement already satisfied: typing-extensions in /usr/local/lib/python3.7/dist-packages (from huggingface-hub==0.0.12->transformers) (3.7.4.3)\nRequirement already satisfied: chardet<4,>=3.0.2 in /usr/local/lib/python3.7/dist-packages (from requests->transformers) (3.0.4)\nRequirement already satisfied: urllib3!=1.25.0,!=1.25.1,<1.26,>=1.21.1 in /usr/local/lib/python3.7/dist-packages (from requests->transformers) (1.24.3)\nRequirement already satisfied: idna<3,>=2.5 in /usr/local/lib/python3.7/dist-packages (from requests->transformers) (2.10)\nRequirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.7/dist-packages (from requests->transformers) (2021.5.30)\nRequirement already satisfied: zipp>=0.5 in /usr/local/lib/python3.7/dist-packages (from importlib-metadata; python_version < \"3.8\"->transformers) (3.4.1)\nRequirement already satisfied: six in /usr/local/lib/python3.7/dist-packages (from sacremoses->transformers) (1.15.0)\nRequirement already satisfied: joblib in /usr/local/lib/python3.7/dist-packages (from sacremoses->transformers) (1.0.1)\nRequirement already satisfied: click in /usr/local/lib/python3.7/dist-packages (from sacremoses->transformers) (7.1.2)\nRequirement already satisfied: pyparsing>=2.0.2 in /usr/local/lib/python3.7/dist-packages (from packaging->transformers) (2.4.7)\nRequirement already satisfied: colorama in /usr/local/lib/python3.7/dist-packages (0.4.4)\n"
],
[
"train_data = pd.read_csv('../input/commonlitreadabilityprize/train.csv')\ntest_data = pd.read_csv('../input/commonlitreadabilityprize/test.csv')\nsample = pd.read_csv('../input/commonlitreadabilityprize/sample_submission.csv')\n\nnum_bins = int(np.floor(1 + np.log2(len(train_data))))\ntrain_data.loc[:,'bins'] = pd.cut(train_data['target'],bins=num_bins,labels=False)\n\nbins = train_data.bins.to_numpy()\ntarget = train_data.target.to_numpy()\n\ndef rmse_score(y_true,y_pred):\n return np.sqrt(mean_squared_error(y_true,y_pred))",
"_____no_output_____"
],
[
"config = {\n 'lr': 2e-5,\n 'wd':0.01,\n 'batch_size':16,\n 'valid_step':10,\n 'max_len':256,\n 'epochs':3,\n 'nfolds':5,\n 'seed':42\n}\n\nos.makedirs('models',exist_ok=True)\nfor i in range(config['nfolds']):\n os.makedirs(f'models/model{i}',exist_ok=True)\n\ndef seed_everything(seed=42):\n random.seed(seed)\n os.environ['PYTHONASSEED'] = str(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed(seed)\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = True\n\nseed_everything(seed=config['seed'])\n\ntrain_data['Fold'] = -1\nkfold = StratifiedKFold(n_splits=config['nfolds'],shuffle=True,random_state=config['seed'])\nfor k , (train_idx,valid_idx) in enumerate(kfold.split(X=train_data,y=bins)):\n train_data.loc[valid_idx,'Fold'] = k",
"_____no_output_____"
],
[
"train_data.head()",
"_____no_output_____"
],
[
"plt.figure(dpi=100)\nsns.countplot(train_data.bins);",
"_____no_output_____"
],
[
"class CLRPDataset(Dataset):\n def __init__(self,df,tokenizer,max_len=128):\n self.excerpt = df['excerpt'].to_numpy()\n self.targets = df['target'].to_numpy()\n self.max_len = max_len\n self.tokenizer = tokenizer\n \n def __getitem__(self,idx):\n encode = self.tokenizer(self.excerpt[idx],\n return_tensors='pt',\n max_length=self.max_len,\n padding='max_length',\n truncation=True)\n \n target = torch.tensor(self.targets[idx],dtype=torch.float) \n return encode, target\n \n def __len__(self):\n return len(self.excerpt)",
"_____no_output_____"
],
[
"class AttentionHead(nn.Module):\n def __init__(self, in_features, hidden_dim, num_targets):\n super().__init__()\n self.in_features = in_features\n self.middle_features = hidden_dim\n self.W = nn.Linear(in_features, hidden_dim)\n self.V = nn.Linear(hidden_dim, 1)\n self.out_features = hidden_dim\n\n def forward(self, features):\n att = torch.tanh(self.W(features))\n score = self.V(att)\n attention_weights = torch.softmax(score, dim=1)\n context_vector = attention_weights * features\n context_vector = torch.sum(context_vector, dim=1)\n\n return context_vector",
"_____no_output_____"
],
[
"class Model(nn.Module):\n def __init__(self):\n super(Model,self).__init__()\n config = RobertaConfig.from_pretrained(\"roberta-large\")\n config.output_hidden_states = True\n\n self.roberta = RobertaModel.from_pretrained('roberta-large',config=config) \n self.head = AttentionHead(768,768,1)\n self.dropout = nn.Dropout(0.05) #original dropout 0.1\n self.linear = nn.Linear(768,1)\n self.attention = nn.Sequential( \n nn.Linear(768, 512), \n nn.Tanh(), \n nn.Linear(512, 1),\n nn.Softmax(dim=1)\n )\n self.regressor = nn.Sequential( \n nn.Linear(768, 1) \n ) \n\n def forward(self,**xb):\n x = self.roberta(**xb)\n last_layer_hidden_states = x.hidden_states[-2] # previously -1\n weights = self.attention(last_layer_hidden_states)\n context_vector = torch.sum(weights * last_layer_hidden_states, dim=1) \n x = self.regressor(context_vector)\n # return x[0] ?\n return x",
"_____no_output_____"
],
[
"def run(fold,verbose=True):\n \n def loss_fn(outputs,targets):\n #outputs = outputs.view(-1)\n #targets = targets.view(-1)\n return torch.sqrt(nn.MSELoss(outputs,targets))\n \n def train_and_evaluate_loop(train_loader,valid_loader,model,loss_fn,optimizer,epoch,fold,best_loss,valid_step=10,lr_scheduler=None):\n train_loss = 0\n for i, (inputs1,targets1) in enumerate(train_loader):\n model.train()\n optimizer.zero_grad()\n inputs1 = {key:val.reshape(val.shape[0],-1) for key,val in inputs1.items()}\n outputs1 = model(**inputs1)\n print(outputs1)\n print(targets1)\n loss1 = loss_fn(outputs1,targets1)\n loss1\n loss1.backward()\n optimizer.step()\n \n train_loss += loss1.item()\n \n if lr_scheduler:\n lr_scheduler.step()\n \n #evaluating for every valid_step\n if (i % valid_step == 0) or ((i + 1) == len(train_loader)):\n model.eval()\n valid_loss = 0\n with torch.no_grad():\n for j, (inputs2,targets2) in enumerate(valid_loader):\n inputs2 = {key:val.reshape(val.shape[0],-1) for key,val in inputs2.items()}\n outputs2 = model(**inputs2)\n loss2 = loss_fn(outputs2,targets2)\n valid_loss += loss2.item()\n \n valid_loss /= len(valid_loader)\n if valid_loss <= best_loss:\n if verbose:\n print(f\"epoch:{epoch} | Train Loss:{train_loss/(i+1)} | Validation loss:{valid_loss}\")\n print(f\"{g_}Validation loss Decreased from {best_loss} to {valid_loss}{sr_}\")\n\n best_loss = valid_loss\n torch.save(model.state_dict(),f'./models/model{fold}/model{fold}.bin')\n tokenizer.save_pretrained(f'./models/model{fold}')\n \n return best_loss\n \n accelerator = Accelerator()\n print(f\"{accelerator.device} is used\")\n \n x_train,x_valid = train_data.query(f\"Fold != {fold}\"),train_data.query(f\"Fold == {fold}\")\n \n tokenizer = RobertaTokenizer.from_pretrained('roberta-large')\n model = Model()\n\n train_ds = CLRPDataset(x_train,tokenizer,config['max_len'])\n train_dl = DataLoader(train_ds,\n batch_size = config[\"batch_size\"],\n shuffle=True,\n num_workers = 4,\n pin_memory=True,\n drop_last=False)\n\n valid_ds = CLRPDataset(x_valid,tokenizer,config['max_len'])\n valid_dl = DataLoader(valid_ds,\n batch_size = config[\"batch_size\"],\n shuffle=False,\n num_workers = 4,\n pin_memory=True,\n drop_last=False)\n\n optimizer = optim.AdamW(model.parameters(),lr=config['lr'],weight_decay=config['wd'])\n lr_scheduler = get_cosine_schedule_with_warmup(optimizer,num_warmup_steps=0,num_training_steps= 10 * len(train_dl))\n \n model,train_dl,valid_dl,optimizer,lr_scheduler = accelerator.prepare(model,train_dl,valid_dl,optimizer,lr_scheduler)\n\n print(f\"Fold: {fold}\")\n best_loss = 9999\n for epoch in range(config[\"epochs\"]):\n print(f\"Epoch Started:{epoch}\")\n best_loss = train_and_evaluate_loop(train_dl,valid_dl,model,loss_fn,\n optimizer,epoch,fold,best_loss,\n valid_step=config['valid_step'],lr_scheduler=lr_scheduler)",
"_____no_output_____"
],
[
"for f in range(config['nfolds']):\n run(f)",
"cuda is used\n"
],
[
"gdrive_model_dir = '/content/gdrive/MyDrive/kaggle_models/commonlitreadability'\n\nif FINAL:\n shutil.make_archive(gdrive_model_dir + '/' + 'roberta_large_ft','zip','./models')\n\n",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
ec8fbb57ceb02c7a524ca1f6776299e7b98dc073 | 39,811 | ipynb | Jupyter Notebook | analise-de-dados-com-pandas/Notebooks/Aula3_Pandas.ipynb | edrmonteiro/DIO-Data-Challenges | 70d02eee2a163fe8fb28af89a4a9113db9c4da0a | [
"MIT"
] | null | null | null | analise-de-dados-com-pandas/Notebooks/Aula3_Pandas.ipynb | edrmonteiro/DIO-Data-Challenges | 70d02eee2a163fe8fb28af89a4a9113db9c4da0a | [
"MIT"
] | null | null | null | analise-de-dados-com-pandas/Notebooks/Aula3_Pandas.ipynb | edrmonteiro/DIO-Data-Challenges | 70d02eee2a163fe8fb28af89a4a9113db9c4da0a | [
"MIT"
] | null | null | null | 33.939471 | 463 | 0.332672 | [
[
[
"# **Python para análise de dados(Pandas)** ",
"_____no_output_____"
]
],
[
[
"#importando a biblioteca pandas\nimport pandas as pd",
"_____no_output_____"
],
[
"df = pd.read_csv(\"/content/drive/My Drive/Datasets/Gapminder.csv\",error_bad_lines=False, sep=\";\")",
"_____no_output_____"
],
[
"#Visualizando as 5 primeiras linhas\ndf.head()",
"_____no_output_____"
],
[
"df = df.rename(columns={\"country\":\"Pais\", \"continent\": \"continente\", \"year\":\"Ano\", \"lifeExp\":\"Expectativa de vida\", \"pop\":\"Pop Total\", \"gdpPercap\": \"PIB\"})",
"_____no_output_____"
],
[
"df.head(10)",
"_____no_output_____"
],
[
"#Total de linhas e colunas\ndf.shape",
"_____no_output_____"
],
[
"df.columns",
"_____no_output_____"
],
[
"df.dtypes",
"_____no_output_____"
],
[
"df.tail(15)",
"_____no_output_____"
],
[
"df.describe()",
"_____no_output_____"
],
[
"df[\"continente\"].unique()",
"_____no_output_____"
],
[
"Oceania = df.loc[df[\"continente\"] == \"Oceania\"]\nOceania.head()",
"_____no_output_____"
],
[
"Oceania[\"continente\"].unique()",
"_____no_output_____"
],
[
"df.groupby(\"continente\")[\"Pais\"].nunique()",
"_____no_output_____"
],
[
"df.groupby(\"Ano\")[\"Expectativa de vida\"].mean()",
"_____no_output_____"
],
[
"df[\"PIB\"].mean()",
"_____no_output_____"
],
[
"df[\"PIB\"].sum()",
"_____no_output_____"
]
]
] | [
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
ec8fc80cd090fbc513232bafdd39fcb74b898a45 | 324,588 | ipynb | Jupyter Notebook | tcga-lihc/1.1-hacknight-tcga-eda.ipynb | sohilsshah91/hacknights | 8f474311799d3466f15fe3564431029ed98ed2e6 | [
"MIT"
] | 9 | 2019-06-20T20:22:50.000Z | 2022-01-18T11:45:09.000Z | tcga-lihc/1.1-hacknight-tcga-eda.ipynb | sohilsshah91/hacknights | 8f474311799d3466f15fe3564431029ed98ed2e6 | [
"MIT"
] | 6 | 2019-07-07T23:25:14.000Z | 2021-08-23T20:34:56.000Z | tcga-lihc/1.1-hacknight-tcga-eda.ipynb | sohilsshah91/hacknights | 8f474311799d3466f15fe3564431029ed98ed2e6 | [
"MIT"
] | 5 | 2019-10-15T23:05:11.000Z | 2021-09-30T14:24:06.000Z | 41.898541 | 9,764 | 0.500906 | [
[
[
"# Healthcare Hack Nights: Part I",
"_____no_output_____"
]
],
[
[
"## RNA-Seq\nQuestions that can be answered by RNA-seq:\n - What genes are differentially expressed between group samples?\n - How does gene expression change across time or conditions? (eg, in benign vs malignant tumors)\n - What pathways or processes are enriched under a condition?",
"_____no_output_____"
],
[
"%matplotlib inline",
"_____no_output_____"
],
[
"import pandas as pd",
"_____no_output_____"
],
[
"lihc = pd.read_csv('../lihc_rnaseq.csv')\nlihc.set_index('bcr_patient_barcode', inplace=True)\nlihc.shape",
"_____no_output_____"
]
],
[
[
"In this count matrix, each column represents an Ensembl gene transcript, each row a patient sequenced RNA library, and the values give the raw numbers of fragments that were uniquely assigned to the respective gene in each library. We also have additional information on each of the patients samples (the rows of the count matrix) and on each of the genes (the columns of the matrix).\n \nWe now have all the ingredients to prepare our data object in a form that is suitable for analysis, namely:\n\n - countdata: a table with the fragment counts\n\n - rowdata: a table with information about the patient samples\n",
"_____no_output_____"
]
],
[
[
"lihc.head()",
"_____no_output_____"
]
],
[
[
"The dataset contains the number of counts for ~20k genes defined by their Entrez transcript ID x 423 deidentified patients. \n\nEntrez (https://www.ncbi.nlm.nih.gov/Web/Search/entrezfs.html) is a data retrieval system that provides users access to NCBI’s databases such as PubMed, GenBank, GEO, and many others. You can access Entrez from a web browser to manually enter queries, or you can use Biopython’s Bio.Entrez module for programmatic access to Entrez. \n\nEntrez gene IDs are unique gene identifiers that can be used to trace a particular gene or transcript to the genome.",
"_____no_output_____"
]
],
[
[
"# Get Entrez transcript IDs\n\nids = pd.Series(lihc.columns.values[1:]).apply(lambda x: x.split('|')[1]).values\nids[:5]",
"_____no_output_____"
]
],
[
[
"# BioMart\n",
"_____no_output_____"
]
],
[
[
"from pybiomart import Server, Dataset",
"_____no_output_____"
],
[
"# Retrieving a dataset directly with known dataset name\n\ndataset = Dataset(name='hsapiens_gene_ensembl',\n host='http://www.ensembl.org')\n\ndataset.query(\n filters={'chromosome_name': ['1','2']})",
"_____no_output_____"
]
],
[
[
"The `attributes` attribute can be used to pull up a list of additional fields available from the dataset",
"_____no_output_____"
]
],
[
[
"list(dataset.attributes)",
"_____no_output_____"
]
],
[
[
"We can map the gene stable ID with the mappings from the to get a sense of which pathways are linked to a particular gene:",
"_____no_output_____"
]
],
[
[
"dataset.filters",
"_____no_output_____"
],
[
"ensmbl_entrez_gene_ids = dataset.query(attributes=['ensembl_transcript_id', 'entrezgene_id'])\nensmbl_entrez_gene_ids.tail(10)",
"_____no_output_____"
],
[
"ensmbl_entrez_gene_ids.dropna(inplace=True)\nensmbl_entrez_gene_ids['NCBI gene ID'] = ensmbl_entrez_gene_ids['NCBI gene ID'].astype(int)\nensmbl_entrez_gene_ids = ensmbl_entrez_gene_ids.set_index('NCBI gene ID').to_dict()['Transcript stable ID']\nensmbl_entrez_gene_ids",
"_____no_output_____"
],
[
"ensmbl_ids = pd.Series(lihc.columns.values[1:]).apply(lambda x: x.split('|')[1]).astype(int).map(ensmbl_entrez_gene_ids).dropna()\n\n# Had to drop ~2k that didn't align, is there a better way?",
"_____no_output_____"
],
[
"dataset.filters",
"_____no_output_____"
],
[
"# Find a faster way to do this\n\nattributes = [\n# 'gene_id',\n 'entrezgene_id',\n 'ensembl_gene_id',\n 'ensembl_transcript_id',\n 'go_id',\n 'name_1006',\n 'definition_1006',\n 'go_linkage_type',\n 'hgnc_id',\n 'hgnc_symbol',\n# 'hgnc_trans_name',\n]\ngo_mappings = dataset.query(attributes=attributes)",
"_____no_output_____"
],
[
"go_mappings.to_csv('data/go_mappings.csv', index=False)",
"_____no_output_____"
],
[
"# Figure out better way to do this\nattributes = [\n# 'gene_id',\n 'entrezgene_id',\n 'ensembl_gene_id',\n 'ensembl_transcript_id',\n 'go_id',\n 'name_1006',\n 'definition_1006',\n 'go_linkage_type',\n 'hgnc_id',\n 'hgnc_symbol',\n# 'hgnc_trans_name',\n]\ndataset.query(attributes=attributes,\n filters={'transcript_id': ensmbl_ids.values}\n )",
"_____no_output_____"
]
],
[
[
"## Examine Gene Counts\n\n",
"_____no_output_____"
]
],
[
[
"go_mappings = pd.read_csv('data/go_mappings.csv')\n\ngo_mappings.shape",
"/Users/mattheweng/Desktop/Galvanize/Projects/hacknights/env-tcga/lib/python3.6/site-packages/IPython/core/interactiveshell.py:3058: DtypeWarning: Columns (0) have mixed types. Specify dtype option on import or set low_memory=False.\n interactivity=interactivity, compiler=compiler, result=result)\n"
],
[
"go_mappings.head()",
"_____no_output_____"
],
[
"go_mappings.loc[~go_mappings['NCBI gene ID'].isnull()]",
"_____no_output_____"
],
[
"# Create mapping of NCBI gene id to gene name/symbol\n\nmap_geneid_to_hfnc_symbol = go_mappings[['NCBI gene ID', 'HGNC symbol']] \\\n .drop_duplicates() \\\n .dropna()\n# map_geneid_to_hfnc_symbol['NCBI gene ID'] = map_geneid_to_hfnc_symbol['NCBI gene ID'].astype(int)\nmap_geneid_to_hfnc_symbol = map_geneid_to_hfnc_symbol.set_index('NCBI gene ID') \\\n .to_dict()['HGNC symbol']\nmap_geneid_to_hfnc_symbol",
"_____no_output_____"
],
[
"# Map NCBI gene id to gene name/symbol\nlihc_processed = lihc.copy(deep=True)\n\nlihc_processed.columns = list(map(lambda x: x.split('|')[1], lihc_processed.columns))",
"_____no_output_____"
],
[
"gene_ids = list(map(lambda x: str(int(x)) if type(x) is float else x, list(map_geneid_to_hfnc_symbol.keys())))\ngene_ids = [gene for gene in gene_ids if gene in lihc_processed.columns] # get intersection of gene ids\nlihc_processed = lihc_processed[gene_ids]\nlihc_processed",
"_____no_output_____"
],
[
"lihc_processed.columns = pd.Series(lihc_processed.T.index).map(map_geneid_to_hfnc_symbol).values\nlihc_processed.head()",
"_____no_output_____"
]
],
[
[
"## Pre-filtering RNA-seq data\n\nOur count matrix contains many rows with only zeros, and additionally many rows with only a few fragments total. In order to reduce the size of the object, and to increase the speed of our functions, we can remove the rows that have no or nearly no information about the amount of gene expression. Here we remove columns of the count matrix that have no counts, or only a single count across all samples:",
"_____no_output_____"
]
],
[
[
"lihc_processed = lihc_processed.T[lihc_processed.T.sum(axis=1) > len(lihc_processed)].T\nlihc_processed.shape",
"_____no_output_____"
]
],
[
[
"## Survey of clinical characteristics",
"_____no_output_____"
]
],
[
[
"## TODO: Conduct survey of clinical characteristics",
"_____no_output_____"
],
[
"clinpat = pd.read_csv('/Users/mattheweng/Downloads/TCGA-LIHC Clinical Data 1516/nationwidechildrens.org_clinical_patient_lihc.txt', sep='\\t')\nclinpat.columns",
"_____no_output_____"
]
],
[
[
"Some of these clinical features will be more relevant than others. Try exploring some of these to see which might be worth further delving into.\n\nWe can get a better idea of our patient population by viewing demographic characteristics and features representative of the health of the patients.",
"_____no_output_____"
]
],
[
[
"clinpat.loc[2:, ['bcr_patient_barcode', 'tumor_grade']]",
"_____no_output_____"
],
[
"# TODO: plot histogram",
"_____no_output_____"
]
],
[
[
"The patient barcodes need to be matched in order to make comparisons:",
"_____no_output_____"
]
],
[
[
"lihc_processed.head(2)",
"_____no_output_____"
],
[
"clinpat.head(2)",
"_____no_output_____"
]
],
[
[
"More information about how patient barcodes are defined can be found [here](https://docs.gdc.cancer.gov/Encyclopedia/pages/TCGA_Barcode/). For simplification, the alphanumeric combination to the right of the 2nd hyphen is the code that we're interested in. We'll need to transform these indices for both dataframes to combine these together.",
"_____no_output_____"
]
],
[
[
"# Transform clinical dataset\nclinpat.loc[2:, 'bcr_patient_barcode'] = clinpat.loc[2:, 'bcr_patient_barcode'].str.split('-').apply(lambda x: x[2])",
"_____no_output_____"
],
[
"# Transform LIHC RNA-seq dataset\nlihc_processed.index = list(map(lambda x: x[2], lihc.index.str.split('-')))",
"_____no_output_____"
],
[
"clinpat.head()",
"_____no_output_____"
],
[
"# Merge data for clincal and RNA-seq dataset\n\nlihc_processed = lihc_processed.join(clinpat[['bcr_patient_barcode', 'tumor_grade']].set_index('bcr_patient_barcode')).dropna()\nlihc_processed.head()",
"_____no_output_____"
],
[
"## Label and Split data based on recurrence? drug?",
"_____no_output_____"
],
[
"import matplotlib.pyplot as plt",
"_____no_output_____"
]
],
[
[
"## Days to death",
"_____no_output_____"
]
],
[
[
"clinpat['death_days_to'].loc[clinpat['death_days_to'] == '[Not Applicable]'] = '-1'\nclinpat.loc[2:,'death_days_to'] = clinpat.loc[2:,'death_days_to'].astype(int)",
"_____no_output_____"
],
[
"f = plt.figure(figsize=(12,6))\nplt.hist(clinpat.loc[2:,'death_days_to'], bins=10);\nplt.title('Histogram of days to death');",
"_____no_output_____"
],
[
"# Add new deceased variables\nmortality = clinpat.loc[2:,['death_days_to','bcr_patient_barcode']]\n\nmortality['deceased'] = mortality['death_days_to'].apply(lambda x: 1 if x > 0 else 0)\n\nmortality['deceased_1yr'] = mortality['death_days_to'].apply(lambda x: 1 if x <= 365 else 0)\nmortality['deceased_3yr'] = mortality['death_days_to'].apply(lambda x: 1 if x <= 1095 else 0)\nmortality['deceased_5yr'] = mortality['death_days_to'].apply(lambda x: 1 if x <= 1825 else 0)",
"_____no_output_____"
]
],
[
[
"# Looking for differentially expressed gene between clinical conditions",
"_____no_output_____"
],
[
"## R-log transformation\n\nhttps://bioconductor.org/help/course-materials/2017/CSAMA/labs/2-tuesday/lab-03-rnaseq/rnaseqGene_CSAMA2017.html\n\nMany common statistical methods for exploratory analysis of multidimensional data, for example clustering and principal components analysis (PCA), work best for data that generally has the same range of variance at different ranges of the mean values. When the expected amount of variance is approximately the same across different mean values, the data is said to be homoskedastic. For RNA-seq raw counts, however, the variance grows with the mean. For example, if one performs PCA directly on a matrix of size-factor-normalized read counts, the result typically depends only on the few most strongly expressed genes because they show the largest absolute differences between samples. A simple and often used strategy to avoid this is to take the logarithm of the normalized count values plus a small pseudocount; however, now the genes with the very lowest counts will tend to dominate the results because, due to the strong Poisson noise inherent to small count values, and the fact that the logarithm amplifies differences for the smallest values, these low count genes will show the strongest relative differences between samples.\n\nAs a solution, DESeq2 offers transformations for count data that stabilize the variance across the mean. One such transformation is the regularized-logarithm transformation or rlog 2. For genes with high counts, the rlog transformation will give similar result to the ordinary log2 transformation of normalized counts. For genes with lower counts, however, the values are shrunken towards the genes’ averages across all samples. Using an empirical Bayesian prior on inter-sample differences in the form of a ridge penalty, the rlog-transformed data then becomes approximately homoskedastic, and can be used directly for computing distances between samples and making PCA plots. Another transformation, the variance stabilizing transformation 17, is discussed alongside the rlog in the DESeq2 vignette.",
"_____no_output_____"
],
[
"## PCA Plots\nNow we have everything setup, the first thing to do is to generate PCA plots to observe whether the samples cluster as expected: controls with controls, and treatments with treatments.",
"_____no_output_____"
],
[
"Another way to visualize sample-to-sample distances is a principal components analysis (PCA). In this ordination method, the data points (here, the samples) are projected onto the 2D plane such that they spread out in the two directions that explain most of the differences (figure below). The x-axis is the direction that separates the data points the most. The values of the samples in this direction are written PC1. The y-axis is a direction (it must be orthogonal to the first direction) that separates the data the second most. The values of the samples in this direction are written PC2. The percent of the total variance that is contained in the direction is printed in the axis label. Note that these percentages do not add to 100%, because there are more dimensions that contain the remaining variance (although each of these remaining dimensions will explain less than the two that we see).",
"_____no_output_____"
]
],
[
[
"# Use DESEQ2 R library to rlog transform\n\nfrom rpy2.robjects.packages import importr\ndeseq = importr('DESeq2')",
"_____no_output_____"
],
[
"from rpy2.robjects import pandas2ri, Formula\npandas2ri.activate()",
"_____no_output_____"
],
[
"pd.DataFrame({'id': ['geneA', 'geneB', 'geneC'], 'sampleA1': [5,4,1], 'sampleA2': [5,4,1], 'sampleB1': [1,5,2], 'sampleB2': [1,5,2]})",
"_____no_output_____"
],
[
"pd.DataFrame({'samplename': ['sampleA1', 'sampleA2', 'sampleB1', 'sampleB2'], 'treatment': ['A','A','B','B']})",
"_____no_output_____"
],
[
"# binary matrix of group that the row belongs to\ndesign_formula",
"_____no_output_____"
]
],
[
[
"## NICK to create wrapper in python for DEseq2",
"_____no_output_____"
]
],
[
[
"# Create DESeq dataset\n\n'''\nDESeq2 object through rpy2\n input:\n count_matrix: should be a pandas dataframe with each column as count, and a id column for gene id\n example:\n id sampleA sampleB\n geneA 5 1\n geneB 4 5\n geneC 1 2\n design_matrix: an design matrix in the form of pandas dataframe, see DESeq2 manual, samplenames as rownames\n treatment\n sampleA1 A\n sampleA2 A\n sampleB1 B\n sampleB2 B\n design_formula: see DESeq2 manual, example: \"~ treatment\"\"\n gene_column: column name of gene id columns, example \"id\"\n\n'''\n\ncount_matrix = pd.DataFrame({'id': ['geneA', 'geneB', 'geneC'], 'sampleA1': [5,4,1], 'sampleA2': [5,4,1], 'sampleB1': [1,5,2], 'sampleB2': [1,5,2]})\ndesign_matrix = pd.DataFrame({'samplename': ['sampleA1', 'sampleA2', 'sampleB1', 'sampleB2'], 'treatment': ['A','A','B','B']})\n\nwith localconverter(ro.default_converter + pandas2ri.converter):\n count_matrix = ro.conversion.py2rpy(count_matrix.set_index('id'))\n design_matrix = ro.conversion.py2rpy(design_matrix.set_index('samplename'))\n\n# count_matrix = pandas2ri.py2ri(count_matrix)\n# design_matrix = pandas2ri.py2ri(lihc_processed['tumor_grade'])\ndesign_formula = Formula(\"~ treatment\")\n \ndds = deseq.DESeqDataSetFromMatrix(countData=count_matrix,\n colData=design_matrix,\n design=design_formula)\n\ndeseq.rlog(dds,blind=False)",
"R[write to console]: Error in estimateDispersionsFit(object, fitType, quiet = TRUE) : \n all gene-wise dispersion estimates are within 2 orders of magnitude\n from the minimum value, and so the standard curve fitting techniques will not work.\n One can instead use the gene-wise estimates as final estimates:\n dds <- estimateDispersionsGeneEst(dds)\n dispersions(dds) <- mcols(dds)$dispGeneEst\n ...then continue with testing using nbinomWaldTest or nbinomLRT\nCalls: <Anonymous> -> estimateDispersionsFit\n\nR[write to console]: In addition: \nR[write to console]: Warning message:\n\nR[write to console]: In DESeqDataSet(se, design = design, ignoreRank) :\nR[write to console]: \n \nR[write to console]: some variables in design formula are characters, converting to factors\n\n"
],
[
"dds",
"_____no_output_____"
],
[
"# Create DESeq dataset\n\n'''\nDESeq2 object through rpy2\n input:\n count_matrix: should be a pandas dataframe with each column as count, and a id column for gene id\n example:\n id sampleA sampleB\n geneA 5 1\n geneB 4 5\n geneC 1 2\n design_matrix: an design matrix in the form of pandas dataframe, see DESeq2 manual, samplenames as rownames\n treatment\n sampleA1 A\n sampleA2 A\n sampleB1 B\n sampleB2 B\n design_formula: see DESeq2 manual, example: \"~ treatment\"\"\n gene_column: column name of gene id columns, example \"id\"\n\n'''\n\nwith localconverter(ro.default_converter + pandas2ri.converter):\n count_matrix = lihc_processed.reset_index().drop(['tumor_grade'], axis=1).T\n count_matrix = ro.conversion.py2rpy(count_matrix.reset_index(drop=True))\n design_matrix = ro.conversion.py2rpy(lihc_processed['tumor_grade'].reset_index(drop=True))\n\n# count_matrix = pandas2ri.py2ri(count_matrix)\n# design_matrix = pandas2ri.py2ri(lihc_processed['tumor_grade'])\ndesign_formula = Formula(\"~ treatment\")\n \ndds = deseq.DESeqDataSetFromMatrix(countData=count_matrix,\n colData=design_matrix,\n design=design_formula)\n\ndeseq.rlog(dds,blind=False)",
"/Users/mattheweng/Desktop/Galvanize/Projects/hacknights/env-tcga/lib/python3.6/site-packages/rpy2/robjects/pandas2ri.py:60: UserWarning: Error while trying to convert the column \"0\". Fall back to string conversion. The error is: Series can only be of one type, or None.\n % (name, str(e)))\n"
],
[
"lihc_processed['tumor_grade'].reset_index(drop=True)",
"_____no_output_____"
],
[
"count_matrix = lihc_processed.reset_index().drop(['tumor_grade'], axis=1).T\ncount_matrix.reset_index(drop=True)",
"_____no_output_____"
],
[
"import rpy2.robjects as ro\nfrom rpy2.robjects.conversion import localconverter",
"_____no_output_____"
],
[
"with localconverter(ro.default_converter + pandas2ri.converter):\n r_from_pd_df = ro.conversion.py2rpy(lihc.head())\nr_from_pd_df",
"_____no_output_____"
],
[
"lihc_processed.drop('tumor_grade', axis=1).T",
"_____no_output_____"
],
[
"lihc_processed['tumor_grade']",
"_____no_output_____"
]
],
[
[
"## Differential Gene Expression\nNow we are ready to identify the differentially expressed genes between the two sets of samples: control vs. treatment. We will achieve this using the Characteristic Direction method[6](#ref6) that we developed and published in BMC Bioinformatics in 2014.\n\nAn implementation in Python of the Characteristic Direction method can be downloaded and installed from here: https://github.com/wangz10/geode.\n \n",
"_____no_output_____"
]
],
[
[
"import src.geode",
"_____no_output_____"
],
[
"import geode\nd_platform_cd = {} # to top up/down genes\ncd_results = pd.DataFrame(index=expr_df.index)\n\nsample_classes = {}\nfor layout in meta_df['LibraryLayout_s'].unique():\n ## make sample_class \n sample_class = np.zeros(expr_df.shape[1], dtype=np.int32)\n sample_class[meta_df['LibraryLayout_s'].values == layout] = 1\n sample_class[(meta_df['LibraryLayout_s'].values == layout) & \n (meta_df['infection_status_s'].values == 'Zika infected')] = 2\n platform = d_layout_platform[layout]\n sample_classes[platform] = sample_class\n\nsample_classes['combined'] = sample_classes['MiSeq'] + sample_classes['NextSeq 500']\nprint sample_classes\n\nfor platform, sample_class in sample_classes.items():\n cd_res = geode.chdir(expr_df.values, sample_class, expr_df.index, \n gamma=.5, sort=False, calculate_sig=False)\n cd_coefs = np.array(map(lambda x: x[0], cd_res))\n cd_results[platform] = cd_coefs\n \n # sort CD in by absolute values in descending order\n srt_idx = np.abs(cd_coefs).argsort()[::-1]\n cd_coefs = cd_coefs[srt_idx][:600]\n sorted_DEGs = expr_df.index[srt_idx][:600]\n # split up and down\n up_genes = dict(zip(sorted_DEGs[cd_coefs > 0], cd_coefs[cd_coefs > 0]))\n dn_genes = dict(zip(sorted_DEGs[cd_coefs < 0], cd_coefs[cd_coefs < 0]))\n d_platform_cd[platform+'-up'] = up_genes\n d_platform_cd[platform+'-dn'] = dn_genes\n\nprint cd_results.head()",
"_____no_output_____"
],
[
"## Check the cosine distance between the two signatures\nfrom scipy.spatial.distance import cosine\nfrom itertools import combinations\nfor col1, col2 in combinations(cd_results.columns, 2):\n print col1, col2, cosine(cd_results[col1], cd_results[col2])",
"_____no_output_____"
],
[
"# EXTRA: DELETE later",
"_____no_output_____"
]
],
[
[
"## Prepare count matrices\nexpect input data as obtained, e.g., from RNA-seq or another high-throughput sequencing experiment, in the form of a matrix of integer values. The value in the i-th row and the j-th column of the matrix tells how many reads (or fragments, for paired-end RNA-seq) have been assigned to gene i in sample j. Analogously, for other types of assays, the rows of the matrix might correspond e.g., to binding regions (with ChIP-Seq), species of bacteria (with metagenomic datasets), or peptide sequences (with quantitative mass spectrometry).\n\nThe values in the matrix should be counts of sequencing reads/fragments. This is important for DESeq2’s statistical model to hold, as only counts allow assessing the measurement precision correctly. It is important to never provide counts that were pre-normalized for sequencing depth/library size, as the statistical model is most powerful when applied to un-normalized counts, and is designed to account for library size differences internally.\n\n## Align Reads to reference genome\nThe computational analysis of an RNA-seq experiment begins from the FASTQ files that contain the nucleotide sequence of each read and a quality score at each position. These reads must first be aligned to a reference genome or transcriptome, or the abundances and estimated counts per transcript can be estimated without alignment, as described above. In either case, it is important to know if the sequencing experiment was single-end or paired-end, as the alignment software will require the user to specify both FASTQ files for a paired-end experiment. The output of this alignment step is commonly stored in a file format called SAM/BAM.\n\n\n## Define gene models\n\n## Plot counts\n\n## PCA Plot \n\n## Differential Expression Analysis\n## Gene Clustering\n\n\n\n",
"_____no_output_____"
]
],
[
[
"from diffexp.py_deseq import py_DESeq2\n\ndds = py_DESeq2(count_matrix = df,\n design_matrix = sample_df,\n design_formula = '~ sample',\n gene_column = 'id') # <- telling DESeq2 this should be the gene ID column\n \ndds.run_deseq() \ndds.get_deseq_result()\nres = dds.deseq_result \nres.head()",
"_____no_output_____"
],
[
"import matplotlib.pyplot as plt\nplt.scatter(res.log2FoldChange, -np.log2(res.padj))",
"_____no_output_____"
],
[
"from dgeclust import CountData, SimulationManager",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
]
] |
ec8fd890306bda1886666194d932cf3663d7d611 | 49,889 | ipynb | Jupyter Notebook | C1_classification_vec_spaces/Week3/C1_W3_Assignment.ipynb | htrismicristo/NLP_specialization | 8089d450a241a18f070f42d81d0098ff500a7a55 | [
"MIT"
] | null | null | null | C1_classification_vec_spaces/Week3/C1_W3_Assignment.ipynb | htrismicristo/NLP_specialization | 8089d450a241a18f070f42d81d0098ff500a7a55 | [
"MIT"
] | null | null | null | C1_classification_vec_spaces/Week3/C1_W3_Assignment.ipynb | htrismicristo/NLP_specialization | 8089d450a241a18f070f42d81d0098ff500a7a55 | [
"MIT"
] | null | null | null | 49.640796 | 13,668 | 0.673996 | [
[
[
"# Assignment 3: Hello Vectors\n\nWelcome to this week's programming assignment on exploring word vectors.\nIn natural language processing, we represent each word as a vector consisting of numbers.\nThe vector encodes the meaning of the word. These numbers (or weights) for each word are learned using various machine\nlearning models, which we will explore in more detail later in this specialization. Rather than make you code the\nmachine learning models from scratch, we will show you how to use them. In the real world, you can always load the\ntrained word vectors, and you will almost never have to train them from scratch. In this assignment, you will:\n\n- Predict analogies between words.\n- Use PCA to reduce the dimensionality of the word embeddings and plot them in two dimensions.\n- Compare word embeddings by using a similarity measure (the cosine similarity).\n- Understand how these vector space models work.\n\n\n\n## 1.0 Predict the Countries from Capitals\n\nIn the lectures, we have illustrated the word analogies\nby finding the capital of a country from the country. \nWe have changed the problem a bit in this part of the assignment. You are asked to predict the **countries** \nthat corresponds to some **capitals**.\nYou are playing trivia against some second grader who just took their geography test and knows all the capitals by heart.\nThanks to NLP, you will be able to answer the questions properly. In other words, you will write a program that can give\nyou the country by its capital. That way you are pretty sure you will win the trivia game. We will start by exploring the data set.\n\n<img src = 'map.jpg' width=\"width\" height=\"height\" style=\"width:467px;height:300px;\"/>\n\n### 1.1 Importing the data\n\nAs usual, you start by importing some essential Python libraries and then load the dataset.\nThe dataset will be loaded as a [Pandas DataFrame](https://pandas.pydata.org/pandas-docs/stable/getting_started/dsintro.html),\nwhich is very a common method in data science.\nThis may take a few minutes because of the large size of the data.",
"_____no_output_____"
]
],
[
[
"# Run this cell to import packages.\nimport pickle\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nfrom utils import get_vectors",
"_____no_output_____"
],
[
"data = pd.read_csv('capitals.txt', delimiter=' ')\ndata.columns = ['city1', 'country1', 'city2', 'country2']\n\n# print first five elements in the DataFrame\ndata.head(5)",
"_____no_output_____"
]
],
[
[
"***\n\n### To Run This Code On Your Own Machine:\nNote that because the original google news word embedding dataset is about 3.64 gigabytes,\nthe workspace is not able to handle the full file set. So we've downloaded the full dataset,\nextracted a sample of the words that we're going to analyze in this assignment, and saved\nit in a pickle file called `word_embeddings_capitals.p`\n\nIf you want to download the full dataset on your own and choose your own set of word embeddings,\nplease see the instructions and some helper code.\n\n- Download the dataset from this [page](https://code.google.com/archive/p/word2vec/).\n- Search in the page for 'GoogleNews-vectors-negative300.bin.gz' and click the link to download.",
"_____no_output_____"
],
[
"Copy-paste the code below and run it on your local machine after downloading\nthe dataset to the same directory as the notebook.\n\n```python\nimport nltk\nfrom gensim.models import KeyedVectors\n\n\nembeddings = KeyedVectors.load_word2vec_format('./GoogleNews-vectors-negative300.bin', binary = True)\nf = open('capitals.txt', 'r').read()\nset_words = set(nltk.word_tokenize(f))\nselect_words = words = ['king', 'queen', 'oil', 'gas', 'happy', 'sad', 'city', 'town', 'village', 'country', 'continent', 'petroleum', 'joyful']\nfor w in select_words:\n set_words.add(w)\n\ndef get_word_embeddings(embeddings):\n\n word_embeddings = {}\n for word in embeddings.vocab:\n if word in set_words:\n word_embeddings[word] = embeddings[word]\n return word_embeddings\n\n\n# Testing your function\nword_embeddings = get_word_embeddings(embeddings)\nprint(len(word_embeddings))\npickle.dump( word_embeddings, open( \"word_embeddings_subset.p\", \"wb\" ) )\n```\n\n***",
"_____no_output_____"
],
[
"Now we will load the word embeddings as a [Python dictionary](https://docs.python.org/3/tutorial/datastructures.html#dictionaries).\nAs stated, these have already been obtained through a machine learning algorithm. ",
"_____no_output_____"
]
],
[
[
"word_embeddings = pickle.load(open(\"word_embeddings_subset.p\", \"rb\"))\nlen(word_embeddings) # there should be 243 words that will be used in this assignment",
"_____no_output_____"
]
],
[
[
"Each of the word embedding is a 300-dimensional vector.",
"_____no_output_____"
]
],
[
[
"print(\"dimension: {}\".format(word_embeddings['Spain'].shape[0]))",
"dimension: 300\n"
]
],
[
[
"### Predict relationships among words\n\nNow you will write a function that will use the word embeddings to predict relationships among words.\n* The function will take as input three words.\n* The first two are related to each other.\n* It will predict a 4th word which is related to the third word in a similar manner as the two first words are related to each other.\n* As an example, \"Athens is to Greece as Bangkok is to ______\"?\n* You will write a program that is capable of finding the fourth word.\n* We will give you a hint to show you how to compute this.\n\nA similar analogy would be the following:\n\n<img src = 'vectors.jpg' width=\"width\" height=\"height\" style=\"width:467px;height:200px;\"/>\n\nYou will implement a function that can tell you the capital of a country.\nYou should use the same methodology shown in the figure above. To do this,\ncompute you'll first compute cosine similarity metric or the Euclidean distance.",
"_____no_output_____"
],
[
"### 1.2 Cosine Similarity\n\nThe cosine similarity function is:\n\n$$\\cos (\\theta)=\\frac{\\mathbf{A} \\cdot \\mathbf{B}}{\\|\\mathbf{A}\\|\\|\\mathbf{B}\\|}=\\frac{\\sum_{i=1}^{n} A_{i} B_{i}}{\\sqrt{\\sum_{i=1}^{n} A_{i}^{2}} \\sqrt{\\sum_{i=1}^{n} B_{i}^{2}}}\\tag{1}$$\n\n$A$ and $B$ represent the word vectors and $A_i$ or $B_i$ represent index i of that vector.\n& Note that if A and B are identical, you will get $cos(\\theta) = 1$.\n* Otherwise, if they are the total opposite, meaning, $A= -B$, then you would get $cos(\\theta) = -1$.\n* If you get $cos(\\theta) =0$, that means that they are orthogonal (or perpendicular).\n* Numbers between 0 and 1 indicate a similarity score.\n* Numbers between -1-0 indicate a dissimilarity score.\n\n**Instructions**: Implement a function that takes in two word vectors and computes the cosine distance.",
"_____no_output_____"
],
[
"<details>\n<summary>\n <font size=\"3\" color=\"darkgreen\"><b>Hints</b></font>\n</summary>\n<p>\n<ul>\n <li> Python's<a href=\"https://docs.scipy.org/doc/numpy/reference/\" > NumPy library </a> adds support for linear algebra operations (e.g., dot product, vector norm ...).</li>\n <li>Use <a href=\"https://docs.scipy.org/doc/numpy/reference/generated/numpy.dot.html\" > numpy.dot </a>.</li>\n <li>Use <a href=\"https://docs.scipy.org/doc/numpy/reference/generated/numpy.linalg.norm.html\">numpy.linalg.norm </a>.</li>\n</ul>\n</p>",
"_____no_output_____"
]
],
[
[
"# UNQ_C1 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)\ndef cosine_similarity(A, B):\n '''\n Input:\n A: a numpy array which corresponds to a word vector\n B: A numpy array which corresponds to a word vector\n Output:\n cos: numerical number representing the cosine similarity between A and B.\n '''\n\n ### START CODE HERE (REPLACE INSTANCES OF 'None' with your code) ###\n \n dot = np.dot(A, B)\n norma = np.linalg.norm(A)\n normb = np.linalg.norm(B)\n cos = dot/np.multiply(norma, normb)\n\n ### END CODE HERE ###\n return cos",
"_____no_output_____"
],
[
"# feel free to try different words\nking = word_embeddings['king']\nqueen = word_embeddings['queen']\n\ncosine_similarity(king, queen)",
"_____no_output_____"
]
],
[
[
"**Expected Output**:\n\n$\\approx$ 0.6510956",
"_____no_output_____"
],
[
"### 1.3 Euclidean distance\n\nYou will now implement a function that computes the similarity between two vectors using the Euclidean distance.\nEuclidean distance is defined as:\n\n$$ \\begin{aligned} d(\\mathbf{A}, \\mathbf{B})=d(\\mathbf{B}, \\mathbf{A}) &=\\sqrt{\\left(A_{1}-B_{1}\\right)^{2}+\\left(A_{2}-B_{2}\\right)^{2}+\\cdots+\\left(A_{n}-B_{n}\\right)^{2}} \\\\ &=\\sqrt{\\sum_{i=1}^{n}\\left(A_{i}-B_{i}\\right)^{2}} \\end{aligned}$$\n\n* $n$ is the number of elements in the vector\n* $A$ and $B$ are the corresponding word vectors. \n* The more similar the words, the more likely the Euclidean distance will be close to 0. \n\n**Instructions**: Write a function that computes the Euclidean distance between two vectors.",
"_____no_output_____"
],
[
"<details> \n<summary>\n <font size=\"3\" color=\"darkgreen\"><b>Hints</b></font>\n</summary>\n<p>\n<ul>\n <li>Use <a href=\"https://docs.scipy.org/doc/numpy/reference/generated/numpy.linalg.norm.html\" > numpy.linalg.norm </a>.</li>\n</ul>\n</p>",
"_____no_output_____"
]
],
[
[
"# UNQ_C2 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)\ndef euclidean(A, B):\n \"\"\"\n Input:\n A: a numpy array which corresponds to a word vector\n B: A numpy array which corresponds to a word vector\n Output:\n d: numerical number representing the Euclidean distance between A and B.\n \"\"\"\n\n ### START CODE HERE (REPLACE INSTANCES OF 'None' with your code) ###\n\n # euclidean distance\n\n d = np.linalg.norm(A-B)\n\n ### END CODE HERE ###\n\n return d\n",
"_____no_output_____"
],
[
"# Test your function\neuclidean(king, queen)",
"_____no_output_____"
]
],
[
[
"**Expected Output:**\n\n2.4796925",
"_____no_output_____"
],
[
"### 1.4 Finding the country of each capital\n\nNow, you will use the previous functions to compute similarities between vectors,\nand use these to find the capital cities of countries. You will write a function that\ntakes in three words, and the embeddings dictionary. Your task is to find the\ncapital cities. For example, given the following words: \n\n- 1: Athens 2: Greece 3: Baghdad,\n\nyour task is to predict the country 4: Iraq.\n\n**Instructions**: \n\n1. To predict the capital you might want to look at the *King - Man + Woman = Queen* example above, and implement that scheme into a mathematical function, using the word embeddings and a similarity function.\n\n2. Iterate over the embeddings dictionary and compute the cosine similarity score between your vector and the current word embedding.\n\n3. You should add a check to make sure that the word you return is not any of the words that you fed into your function. Return the one with the highest score.",
"_____no_output_____"
]
],
[
[
"# UNQ_C3 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)\ndef get_country(city1, country1, city2, embeddings):\n \"\"\"\n Input:\n city1: a string (the capital city of country1)\n country1: a string (the country of capital1)\n city2: a string (the capital city of country2)\n embeddings: a dictionary where the keys are words and values are their embeddings\n Output:\n countries: a dictionary with the most likely country and its similarity score\n \"\"\"\n ### START CODE HERE (REPLACE INSTANCES OF 'None' with your code) ###\n\n # store the city1, country 1, and city 2 in a set called group\n group = set((city1, country1, city2))\n\n # get embeddings of city 1\n city1_emb = embeddings[city1]\n\n # get embedding of country 1\n country1_emb = embeddings[country1]\n\n # get embedding of city 2\n city2_emb = embeddings[city2]\n\n # get embedding of country 2 (it's a combination of the embeddings of country 1, city 1 and city 2)\n # Remember: King - Man + Woman = Queen\n vec = country1_emb - city1_emb + city2_emb\n\n # Initialize the similarity to -1 (it will be replaced by a similarities that are closer to +1)\n similarity = -1\n\n # initialize country to an empty string\n country = ''\n\n # loop through all words in the embeddings dictionary\n for word in embeddings.keys():\n\n # first check that the word is not already in the 'group'\n if word not in group:\n\n # get the word embedding\n word_emb = embeddings[word]\n\n # calculate cosine similarity between embedding of country 2 and the word in the embeddings dictionary\n cur_similarity = cosine_similarity(vec, word_emb)\n\n # if the cosine similarity is more similar than the previously best similarity...\n if cur_similarity > similarity:\n\n # update the similarity to the new, better similarity\n similarity = cur_similarity\n\n # store the country as a tuple, which contains the word and the similarity\n country = (word, similarity)\n\n ### END CODE HERE ###\n\n return country",
"_____no_output_____"
],
[
"# Testing your function, note to make it more robust you can return the 5 most similar words.\nget_country('Athens', 'Greece', 'Cairo', word_embeddings)",
"_____no_output_____"
]
],
[
[
"**Expected Output:**\n\n('Egypt', 0.7626821)",
"_____no_output_____"
],
[
"### 1.5 Model Accuracy\n\nNow you will test your new function on the dataset and check the accuracy of the model:\n\n$$\\text{Accuracy}=\\frac{\\text{Correct # of predictions}}{\\text{Total # of predictions}}$$\n\n**Instructions**: Write a program that can compute the accuracy on the dataset provided for you. You have to iterate over every row to get the corresponding words and feed them into you `get_country` function above. ",
"_____no_output_____"
],
[
"<details>\n<summary>\n <font size=\"3\" color=\"darkgreen\"><b>Hints</b></font>\n</summary>\n<p>\n<ul>\n <li>Use <a href=\"https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.iterrows.html\" > pandas.DataFrame.iterrows </a>.</li>\n</ul>\n</p>",
"_____no_output_____"
]
],
[
[
"# UNQ_C4 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)\ndef get_accuracy(word_embeddings, data):\n '''\n Input:\n word_embeddings: a dictionary where the key is a word and the value is its embedding\n data: a pandas dataframe containing all the country and capital city pairs\n \n Output:\n accuracy: the accuracy of the model\n '''\n\n ### START CODE HERE (REPLACE INSTANCES OF 'None' with your code) ###\n # initialize num correct to zero\n num_correct = 0\n\n # loop through the rows of the dataframe\n for i, row in data.iterrows():\n\n # get city1\n city1 = row[0]\n\n # get country1\n country1 = row[1]\n\n # get city2\n city2 = row[2]\n\n # get country2\n country2 = row[3]\n\n # use get_country to find the predicted country2\n predicted_country2, _ = get_country(city1, country1, city2, word_embeddings)\n\n # if the predicted country2 is the same as the actual country2...\n if predicted_country2 == country2:\n # increment the number of correct by 1\n num_correct += 1\n\n # get the number of rows in the data dataframe (length of dataframe)\n m = len(data)\n\n # calculate the accuracy by dividing the number correct by m\n accuracy = num_correct/m\n\n ### END CODE HERE ###\n return accuracy\n",
"_____no_output_____"
]
],
[
[
"**NOTE: The cell below takes about 30 SECONDS to run.**",
"_____no_output_____"
]
],
[
[
"accuracy = get_accuracy(word_embeddings, data)\nprint(f\"Accuracy is {accuracy:.2f}\")",
"Accuracy is 0.92\n"
]
],
[
[
"**Expected Output:**\n\n$\\approx$ 0.92",
"_____no_output_____"
],
[
"# 3.0 Plotting the vectors using PCA\n\nNow you will explore the distance between word vectors after reducing their dimension.\nThe technique we will employ is known as\n[*principal component analysis* (PCA)](https://en.wikipedia.org/wiki/Principal_component_analysis).\nAs we saw, we are working in a 300-dimensional space in this case.\nAlthough from a computational perspective we were able to perform a good job,\nit is impossible to visualize results in such high dimensional spaces.\n\nYou can think of PCA as a method that projects our vectors in a space of reduced\ndimension, while keeping the maximum information about the original vectors in\ntheir reduced counterparts. In this case, by *maximum infomation* we mean that the\nEuclidean distance between the original vectors and their projected siblings is\nminimal. Hence vectors that were originally close in the embeddings dictionary,\nwill produce lower dimensional vectors that are still close to each other.\n\nYou will see that when you map out the words, similar words will be clustered\nnext to each other. For example, the words 'sad', 'happy', 'joyful' all describe\nemotion and are supposed to be near each other when plotted.\nThe words: 'oil', 'gas', and 'petroleum' all describe natural resources.\nWords like 'city', 'village', 'town' could be seen as synonyms and describe a\nsimilar thing.\n\nBefore plotting the words, you need to first be able to reduce each word vector\nwith PCA into 2 dimensions and then plot it. The steps to compute PCA are as follows:\n\n1. Mean normalize the data\n2. Compute the covariance matrix of your data ($\\Sigma$). \n3. Compute the eigenvectors and the eigenvalues of your covariance matrix\n4. Multiply the first K eigenvectors by your normalized data. The transformation should look something as follows:\n\n<img src = 'word_embf.jpg' width=\"width\" height=\"height\" style=\"width:800px;height:200px;\"/>",
"_____no_output_____"
],
[
"**Instructions**: \n\nYou will write a program that takes in a data set where each row corresponds to a word vector. \n* The word vectors are of dimension 300. \n* Use PCA to change the 300 dimensions to `n_components` dimensions. \n* The new matrix should be of dimension `m, n_componentns`. \n\n* First de-mean the data\n* Get the eigenvalues using `linalg.eigh`. Use `eigh` rather than `eig` since R is symmetric. The performance gain when using `eigh` instead of `eig` is substantial.\n* Sort the eigenvectors and eigenvalues by decreasing order of the eigenvalues.\n* Get a subset of the eigenvectors (choose how many principle components you want to use using `n_components`).\n* Return the new transformation of the data by multiplying the eigenvectors with the original data.",
"_____no_output_____"
],
[
"<details>\n<summary>\n <font size=\"3\" color=\"darkgreen\"><b>Hints</b></font>\n</summary>\n<p>\n<ul>\n <li>Use <a href=\"https://docs.scipy.org/doc/numpy/reference/generated/numpy.mean.html\" > numpy.mean(a,axis=None) </a> : If you set <code>axis = 0</code>, you take the mean for each column. If you set <code>axis = 1</code>, you take the mean for each row. Remember that each row is a word vector, and the number of columns are the number of dimensions in a word vector. </li>\n <li>Use <a href=\"https://docs.scipy.org/doc/numpy/reference/generated/numpy.cov.html\" > numpy.cov(m, rowvar=True) </a>. This calculates the covariance matrix. By default <code>rowvar</code> is <code>True</code>. From the documentation: \"If rowvar is True (default), then each row represents a variable, with observations in the columns.\" In our case, each row is a word vector observation, and each column is a feature (variable). </li>\n <li>Use <a href=\"https://docs.scipy.org/doc/numpy/reference/generated/numpy.linalg.eigh.html\" > numpy.linalg.eigh(a, UPLO='L') </a> </li>\n <li>Use <a href=\"https://docs.scipy.org/doc/numpy/reference/generated/numpy.argsort.html\" > numpy.argsort </a> sorts the values in an array from smallest to largest, then returns the indices from this sort. </li>\n <li>In order to reverse the order of a list, you can use: <code>x[::-1]</code>.</li>\n <li>To apply the sorted indices to eigenvalues, you can use this format <code>x[indices_sorted]</code>.</li>\n <li>When applying the sorted indices to eigen vectors, note that each column represents an eigenvector. In order to preserve the rows but sort on the columns, you can use this format <code>x[:,indices_sorted]</code></li>\n <li>To transform the data using a subset of the most relevant principle components, take the matrix multiplication of the eigenvectors with the original data. </li>\n <li>The data is of shape <code>(n_observations, n_features)</code>. </li>\n <li>The subset of eigenvectors are in a matrix of shape <code>(n_features, n_components)</code>.</li>\n <li>To multiply these together, take the transposes of both the eigenvectors <code>(n_components, n_features)</code> and the data (n_features, n_observations).</li>\n <li>The product of these two has dimensions <code>(n_components,n_observations)</code>. Take its transpose to get the shape <code>(n_observations, n_components)</code>.</li>\n</ul>\n</p>",
"_____no_output_____"
]
],
[
[
"# UNQ_C5 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)\ndef compute_pca(X, n_components=2):\n \"\"\"\n Input:\n X: of dimension (m,n) where each row corresponds to a word vector\n n_components: Number of components you want to keep.\n Output:\n X_reduced: data transformed in 2 dims/columns + regenerated original data\n \"\"\"\n\n ### START CODE HERE (REPLACE INSTANCES OF 'None' with your code) ###\n # mean center the data\n X_demeaned = X - np.mean(X, axis = 0)\n\n # calculate the covariance matrix\n covariance_matrix = np.cov(X_demeaned, rowvar = False)\n\n # calculate eigenvectors & eigenvalues of the covariance matrix\n eigen_vals, eigen_vecs = np.linalg.eigh(covariance_matrix, UPLO = 'L')\n\n # sort eigenvalue in increasing order (get the indices from the sort)\n idx_sorted = np.argsort(eigen_vals)\n \n # reverse the order so that it's from highest to lowest.\n idx_sorted_decreasing = idx_sorted[::-1]\n\n # sort the eigen values by idx_sorted_decreasing\n eigen_vals_sorted = eigen_vals[idx_sorted_decreasing]\n\n # sort eigenvectors using the idx_sorted_decreasing indices\n eigen_vecs_sorted = eigen_vecs[:, idx_sorted_decreasing]\n\n # select the first n eigenvectors (n is desired dimension\n # of rescaled data array, or dims_rescaled_data)\n eigen_vecs_subset = eigen_vecs_sorted[:, 0:n_components]\n\n # transform the data by multiplying the transpose of the eigenvectors \n # with the transpose of the de-meaned data\n # Then take the transpose of that product.\n X_reduced = np.transpose(np.dot(eigen_vecs_subset.T, X_demeaned.T))\n\n ### END CODE HERE ###\n\n return X_reduced\n",
"_____no_output_____"
],
[
"# Testing your function\nnp.random.seed(1)\nX = np.random.rand(3, 10)\nX_reduced = compute_pca(X, n_components=2)\nprint(\"Your original matrix was \" + str(X.shape) + \" and it became:\")\nprint(X_reduced)",
"Your original matrix was (3, 10) and it became:\n[[ 0.43437323 0.49820384]\n [ 0.42077249 -0.50351448]\n [-0.85514571 0.00531064]]\n"
]
],
[
[
"**Expected Output:**\n\nYour original matrix was: (3,10) and it became:\n\n<table>\n <tr>\n <td>\n 0.43437323\n </td>\n <td>\n 0.49820384\n </td>\n </tr>\n <tr>\n <td>\n 0.42077249\n </td>\n <td>\n -0.50351448\n </td>\n </tr>\n <tr>\n <td>\n -0.85514571\n </td>\n <td>\n 0.00531064\n </td>\n </tr>\n</table>\n\nNow you will use your pca function to plot a few words we have chosen for you.\nYou will see that similar words tend to be clustered near each other.\nSometimes, even antonyms tend to be clustered near each other. Antonyms\ndescribe the same thing but just tend to be on the other end of the scale\nThey are usually found in the same location of a sentence,\nhave the same parts of speech, and thus when\nlearning the word vectors, you end up getting similar weights. In the next week\nwe will go over how you learn them, but for now let's just enjoy using them.\n\n**Instructions:** Run the cell below.",
"_____no_output_____"
]
],
[
[
"words = ['oil', 'gas', 'happy', 'sad', 'city', 'town',\n 'village', 'country', 'continent', 'petroleum', 'joyful']\n\n# given a list of words and the embeddings, it returns a matrix with all the embeddings\nX = get_vectors(word_embeddings, words)\n\nprint('You have 11 words each of 300 dimensions thus X.shape is:', X.shape)",
"You have 11 words each of 300 dimensions thus X.shape is: (11, 300)\n"
],
[
"# We have done the plotting for you. Just run this cell.\nresult = compute_pca(X, 2)\nplt.scatter(result[:, 0], result[:, 1])\nfor i, word in enumerate(words):\n plt.annotate(word, xy=(result[i, 0] - 0.05, result[i, 1] + 0.1))\n\nplt.show()",
"_____no_output_____"
]
],
[
[
"**What do you notice?**\n\nThe word vectors for 'gas', 'oil' and 'petroleum' appear related to each other,\nbecause their vectors are close to each other. Similarly, 'sad', 'joyful'\nand 'happy' all express emotions, and are also near each other.",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
]
] |
ec8fdfb5519bb1de1be350acfb7ce738f5d9215b | 14,940 | ipynb | Jupyter Notebook | legacy/2_GAIN-interpolation-halted.ipynb | ataraxno/weighing_dev | d4ea91645435bef2656d6ed3235888ae90d0ad59 | [
"Apache-2.0"
] | null | null | null | legacy/2_GAIN-interpolation-halted.ipynb | ataraxno/weighing_dev | d4ea91645435bef2656d6ed3235888ae90d0ad59 | [
"Apache-2.0"
] | null | null | null | legacy/2_GAIN-interpolation-halted.ipynb | ataraxno/weighing_dev | d4ea91645435bef2656d6ed3235888ae90d0ad59 | [
"Apache-2.0"
] | null | null | null | 33.126386 | 86 | 0.494779 | [
[
[
"## Package loading",
"_____no_output_____"
]
],
[
[
"import tensorflow as tf\nfrom tensorflow.keras import *\nimport numpy as np\nimport pandas as pd\nfrom tqdm import tqdm\n\nfrom GAIN.utils import normalization, renormalization, rounding\nfrom GAIN.utils import xavier_init\nfrom GAIN.utils import binary_sampler, uniform_sampler, sample_batch_index",
"_____no_output_____"
],
[
"# def gain (data_x, gain_parameters):\n# '''Impute missing values in data_x\n\n# Args:\n# - data_x: original data with missing values\n# - gain_parameters: GAIN network parameters:\n# - batch_size: Batch size\n# - hint_rate: Hint rate\n# - alpha: Hyperparameter\n# - iterations: Iterations\n\n# Returns:\n# - imputed_data: imputed data\n# '''\n# # Define mask matrix\n# data_m = 1-np.isnan(data_x)\n\n# # System parameters\n# batch_size = 128\n# hint_rate = 0.9\n# alpha = 100\n# iterations = 10000\n\n# # Other parameters\n# no, dim = data_x.shape\n\n# # Hidden state dimensions\n# h_dim = int(dim)\n\n# # Normalization\n# norm_data, norm_parameters = normalization(data_x)\n# norm_data_x = np.nan_to_num(norm_data, 0)\n\n# ## GAIN architecture \n# # Input placeholders\n# # Data vector\n# X = tf.placeholder(tf.float32, shape = [None, dim])\n# # Mask vector \n# M = tf.placeholder(tf.float32, shape = [None, dim])\n# # Hint vector\n# H = tf.placeholder(tf.float32, shape = [None, dim])\n\n# # Discriminator variables\n# D_W1 = tf.Variable(xavier_init([dim*2, h_dim])) # Data + Hint as inputs\n# D_b1 = tf.Variable(tf.zeros(shape = [h_dim]))\n\n# D_W2 = tf.Variable(xavier_init([h_dim, h_dim]))\n# D_b2 = tf.Variable(tf.zeros(shape = [h_dim]))\n\n# D_W3 = tf.Variable(xavier_init([h_dim, dim]))\n# D_b3 = tf.Variable(tf.zeros(shape = [dim])) # Multi-variate outputs\n\n# theta_D = [D_W1, D_W2, D_W3, D_b1, D_b2, D_b3]\n\n# #Generator variables\n# # Data + Mask as inputs (Random noise is in missing components)\n# G_W1 = tf.Variable(xavier_init([dim*2, h_dim])) \n# G_b1 = tf.Variable(tf.zeros(shape = [h_dim]))\n\n# G_W2 = tf.Variable(xavier_init([h_dim, h_dim]))\n# G_b2 = tf.Variable(tf.zeros(shape = [h_dim]))\n\n# G_W3 = tf.Variable(xavier_init([h_dim, dim]))\n# G_b3 = tf.Variable(tf.zeros(shape = [dim]))\n\n# theta_G = [G_W1, G_W2, G_W3, G_b1, G_b2, G_b3]\n \n# ## GAIN functions\n# # Generator\n# def generator(x,m):\n# # Concatenate Mask and Data\n# inputs = tf.concat(values = [x, m], axis = 1) \n# G_h1 = tf.nn.relu(tf.matmul(inputs, G_W1) + G_b1)\n# G_h2 = tf.nn.relu(tf.matmul(G_h1, G_W2) + G_b2) \n# # MinMax normalized output\n# G_prob = tf.nn.sigmoid(tf.matmul(G_h2, G_W3) + G_b3) \n# return G_prob\n \n# # Discriminator\n# def discriminator(x, h):\n# # Concatenate Data and Hint\n# inputs = tf.concat(values = [x, h], axis = 1) \n# D_h1 = tf.nn.relu(tf.matmul(inputs, D_W1) + D_b1) \n# D_h2 = tf.nn.relu(tf.matmul(D_h1, D_W2) + D_b2)\n# D_logit = tf.matmul(D_h2, D_W3) + D_b3\n# D_prob = tf.nn.sigmoid(D_logit)\n# return D_prob\n \n# ## GAIN structure\n# # Generator\n# G_sample = generator(X, M)\n\n# # Combine with observed data\n# Hat_X = X * M + G_sample * (1-M)\n\n# # Discriminator\n# D_prob = discriminator(Hat_X, H)\n\n# ## GAIN loss\n# D_loss_temp = -tf.reduce_mean(M * tf.log(D_prob + 1e-8) \\\n# + (1-M) * tf.log(1. - D_prob + 1e-8)) \n\n# G_loss_temp = -tf.reduce_mean((1-M) * tf.log(D_prob + 1e-8))\n\n# MSE_loss = \\\n# tf.reduce_mean((M * X - M * G_sample)**2) / tf.reduce_mean(M)\n\n# D_loss = D_loss_temp\n# G_loss = G_loss_temp + alpha * MSE_loss \n\n# ## GAIN solver\n# D_solver = tf.train.AdamOptimizer().minimize(D_loss, var_list=theta_D)\n# G_solver = tf.train.AdamOptimizer().minimize(G_loss, var_list=theta_G)\n\n# ## Iterations\n# sess = tf.Session()\n# sess.run(tf.global_variables_initializer())\n \n# # Start Iterations\n# for it in tqdm(range(iterations)): \n \n# # Sample batch\n# batch_idx = sample_batch_index(no, batch_size)\n# X_mb = norm_data_x[batch_idx, :] \n# M_mb = data_m[batch_idx, :] \n# # Sample random vectors \n# Z_mb = uniform_sampler(0, 0.01, batch_size, dim) \n# # Sample hint vectors\n# H_mb_temp = binary_sampler(hint_rate, batch_size, dim)\n# H_mb = M_mb * H_mb_temp\n\n# # Combine random vectors with observed vectors\n# X_mb = M_mb * X_mb + (1-M_mb) * Z_mb \n\n# _, D_loss_curr = sess.run([D_solver, D_loss_temp], \n# feed_dict = {M: M_mb, X: X_mb, H: H_mb})\n# _, G_loss_curr, MSE_loss_curr = \\\n# sess.run([G_solver, G_loss_temp, MSE_loss],\n# feed_dict = {X: X_mb, M: M_mb, H: H_mb})\n \n# ## Return imputed data \n# Z_mb = uniform_sampler(0, 0.01, no, dim) \n# M_mb = data_m\n# X_mb = norm_data_x \n# X_mb = M_mb * X_mb + (1-M_mb) * Z_mb \n\n# imputed_data = sess.run([G_sample], feed_dict = {X: X_mb, M: M_mb})[0]\n\n# imputed_data = data_m * norm_data_x + (1-data_m) * imputed_data\n\n# # Renormalization\n# imputed_data = renormalization(imputed_data, norm_parameters) \n\n# # Rounding\n# imputed_data = rounding(imputed_data, data_x) \n\n# return imputed_data",
"_____no_output_____"
]
],
[
[
"## Parameters",
"_____no_output_____"
]
],
[
[
"BATCH_SIZE = 128\nHINT_RATE = 0.9\nALPHA = 100\nITERATIONS = 10000",
"_____no_output_____"
]
],
[
[
"## Data loading",
"_____no_output_____"
]
],
[
[
"data_df = pd.read_csv('./results/SW2_greenhouse.csv', index_col='Unnamed: 0')\ndata_df = data_df.astype('float32')\ndata_x = data_df.values\ndata_m = 1-np.isnan(data_x) # mask\nnum_data, dim = data_x.shape\nhidden_dim = int(dim)",
"_____no_output_____"
]
],
[
[
"## Normalization",
"_____no_output_____"
]
],
[
[
"norm_data, norm_parameters = normalization(data_x)\nnorm_data_x = np.nan_to_num(norm_data, 0)",
"_____no_output_____"
],
[
"class Generator(Model):\n def __init__(self, num_nodes, output_size):\n super(Generator, self).__init__()\n self.n = num_nodes\n self.o = output_size\n \n self.dense1 = layers.Dense(self.n)\n self.norm1 = layers.BatchNormalization()\n self.act1 = layers.Activation(tf.nn.leaky_relu)\n\n self.dense2 = layers.Dense(self.n)\n self.norm2 = layers.BatchNormalization()\n self.act2 = layers.Activation(tf.nn.leaky_relu)\n\n self.dense3 = layers.Dense(self.o)\n self.norm3 = layers.BatchNormalization()\n self.act3 = layers.Activation(tf.nn.sigmoid)\n \n def call(self, inp, TRAINING=True):\n \n inp = self.act1(self.norm1(self.dense1(inp), training=TRAINING))\n inp = self.act2(self.norm2(self.dense2(inp), training=TRAINING))\n inp = self.act3(self.norm3(self.dense3(inp), training=TRAINING))\n \n return inp",
"_____no_output_____"
],
[
"class Discrinimator(Model):\n def __init__(self, num_nodes, logit_size):\n super(Discrinimator, self).__init__()\n self.n = num_nodes\n self.l = logit_size\n \n self.dense1 = layers.Dense(self.n)\n self.norm1 = layers.BatchNormalization()\n self.act1 = layers.Activation(tf.nn.leaky_relu)\n\n self.dense2 = layers.Dense(self.n)\n self.norm2 = layers.BatchNormalization()\n self.act2 = layers.Activation(tf.nn.leaky_relu)\n\n self.dense3 = layers.Dense(self.l)\n self.norm3 = layers.BatchNormalization()\n self.act3 = layers.Activation(tf.nn.sigmoid)\n \n def call(self, inp, TRAINING=True):\n \n inp = self.act1(self.norm1(self.dense1(inp), training=TRAINING))\n inp = self.act2(self.norm2(self.dense2(inp), training=TRAINING))\n inp = self.act3(self.norm3(self.dense3(inp), training=TRAINING))\n \n return inp",
"_____no_output_____"
],
[
"generator = Generator(256, dim)\ndiscriminator = Discrinimator(256, dim)",
"_____no_output_____"
],
[
"generator(data_x)",
"_____no_output_____"
],
[
"def gain (data_x, gain_parameters):\n \n ## GAIN structure\n # Generator\n G_sample = generator(X, M)\n\n # Combine with observed data\n Hat_X = X * M + G_sample * (1-M)\n\n # Discriminator\n D_prob = discriminator(Hat_X, H)\n\n ## GAIN loss\n D_loss_temp = -tf.reduce_mean(M * tf.log(D_prob + 1e-8) \\\n + (1-M) * tf.log(1. - D_prob + 1e-8)) \n\n G_loss_temp = -tf.reduce_mean((1-M) * tf.log(D_prob + 1e-8))\n\n MSE_loss = \\\n tf.reduce_mean((M * X - M * G_sample)**2) / tf.reduce_mean(M)\n\n D_loss = D_loss_temp\n G_loss = G_loss_temp + alpha * MSE_loss \n\n ## GAIN solver\n D_solver = tf.train.AdamOptimizer().minimize(D_loss, var_list=theta_D)\n G_solver = tf.train.AdamOptimizer().minimize(G_loss, var_list=theta_G)\n\n ## Iterations\n sess = tf.Session()\n sess.run(tf.global_variables_initializer())\n \n # Start Iterations\n for it in tqdm(range(iterations)): \n \n # Sample batch\n batch_idx = sample_batch_index(no, batch_size)\n X_mb = norm_data_x[batch_idx, :] \n M_mb = data_m[batch_idx, :] \n # Sample random vectors \n Z_mb = uniform_sampler(0, 0.01, batch_size, dim) \n # Sample hint vectors\n H_mb_temp = binary_sampler(hint_rate, batch_size, dim)\n H_mb = M_mb * H_mb_temp\n\n # Combine random vectors with observed vectors\n X_mb = M_mb * X_mb + (1-M_mb) * Z_mb \n\n _, D_loss_curr = sess.run([D_solver, D_loss_temp], \n feed_dict = {M: M_mb, X: X_mb, H: H_mb})\n _, G_loss_curr, MSE_loss_curr = \\\n sess.run([G_solver, G_loss_temp, MSE_loss],\n feed_dict = {X: X_mb, M: M_mb, H: H_mb})\n \n ## Return imputed data \n Z_mb = uniform_sampler(0, 0.01, no, dim) \n M_mb = data_m\n X_mb = norm_data_x \n X_mb = M_mb * X_mb + (1-M_mb) * Z_mb \n\n imputed_data = sess.run([G_sample], feed_dict = {X: X_mb, M: M_mb})[0]\n\n imputed_data = data_m * norm_data_x + (1-data_m) * imputed_data\n\n # Renormalization\n imputed_data = renormalization(imputed_data, norm_parameters) \n\n # Rounding\n imputed_data = rounding(imputed_data, data_x) \n\n return imputed_data",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
ec8fe14837878861eedf753c4c1377adc8029297 | 42,273 | ipynb | Jupyter Notebook | Machine_Learning/loss function.ipynb | linzexinmasterchief/JupyterNotebooks | d33d3ed7335e75a0e8596b16a0f3669400a7c287 | [
"MIT"
] | null | null | null | Machine_Learning/loss function.ipynb | linzexinmasterchief/JupyterNotebooks | d33d3ed7335e75a0e8596b16a0f3669400a7c287 | [
"MIT"
] | null | null | null | Machine_Learning/loss function.ipynb | linzexinmasterchief/JupyterNotebooks | d33d3ed7335e75a0e8596b16a0f3669400a7c287 | [
"MIT"
] | null | null | null | 106.481108 | 11,988 | 0.878551 | [
[
[
"import numpy\nimport random\nimport math\nimport matplotlib.pyplot as plt",
"_____no_output_____"
],
[
"def original_function(x):\n return (1/2*x + 3)",
"_____no_output_____"
],
[
"X = [i for i in range(10)]",
"_____no_output_____"
],
[
"X",
"_____no_output_____"
],
[
"data_theory = [original_function(i) for i in range(len(X))]",
"_____no_output_____"
],
[
"data = [original_function(i) + random.randint(-10, 10) / 20 for i in range(len(X))]",
"_____no_output_____"
],
[
"data",
"_____no_output_____"
],
[
"plt.scatter(X, data)\n\nplt.scatter(X, data_theory)\n\nplt.plot(X, data_theory)",
"_____no_output_____"
],
[
"def loss_calculate(y_pred, y_actual):\n loss = 0\n for i in range(len(y_pred)):\n loss += (y_pred[i] - y_actual[i])\n return loss",
"_____no_output_____"
],
[
"loss = loss_calculate(data, data_theory)\npre_loss = loss\nd_loss = 10000",
"_____no_output_____"
],
[
"# parameter to train on\na = 10",
"_____no_output_____"
],
[
"def sigmoid(x):\n return 1 / (1 + math.e**(-x)) - 0.5",
"_____no_output_____"
],
[
"loss_rec = []",
"_____no_output_____"
],
[
"while loss**2 > 0.001:\n data_pred = [a*x + 3 for x in range(len(X))]\n loss = loss_calculate(data_pred, data_theory)\n d_loss = pre_loss - loss\n pre_loss = loss\n \n loss_rec.append(loss)\n \n # ajdust parameter\n a -= loss * 0.01\n print(loss, \"|\", d_loss)\n #print(data_pred)\n #print(data)",
"427.5 | -427.85\n235.12499999999997 | 192.37500000000003\n129.31874999999997 | 105.80625\n71.1253125 | 58.19343749999996\n39.11892187500001 | 32.006390624999995\n21.51540703125 | 17.603514843750013\n11.833473867187495 | 9.681933164062505\n6.508410626953125 | 5.325063240234369\n3.5796258448242204 | 2.928784782128905\n1.9687942146533222 | 1.6108316301708983\n1.082836818059325 | 0.8859573965939971\n0.5955602499326274 | 0.48727656812669773\n0.3275581374629475 | 0.26800211246967987\n0.1801569756046204 | 0.14740116185832708\n0.09908633658254207 | 0.08107063902207834\n0.05449748512039809 | 0.044588851462143975\n0.029973616816219018 | 0.024523868304179075\n"
],
[
"a",
"_____no_output_____"
],
[
"data_pred = [a*x + 3 for x in range(len(X))]",
"_____no_output_____"
],
[
"data_pred",
"_____no_output_____"
],
[
"plt.plot(X, data_pred)\n#plt.plot(X, data_theory)\nplt.scatter(X, data)",
"_____no_output_____"
],
[
"plt.plot([i for i in range(len(loss_rec))], loss_rec)\nplt.xlabel(\"Iterations\")\nplt.ylabel(\"loss\")",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
ec8fe4d68d6fa86f146291ae2c1479b93c935ec6 | 10,314 | ipynb | Jupyter Notebook | exp/tvm_jupyter/nnvm/deploy_model_on_mali_gpu.ipynb | gopala-kr/DL-on-Silicon | 33c38cf4814291e3937da7c26f9af014a95f313a | [
"MIT"
] | 25 | 2018-11-20T00:09:46.000Z | 2022-01-16T15:15:06.000Z | exp/tvm_jupyter/nnvm/deploy_model_on_mali_gpu.ipynb | jmuuu/DL-on-Silicon | a27b1ff5db10bd1d4588b5f41734e383f76966c6 | [
"MIT"
] | null | null | null | exp/tvm_jupyter/nnvm/deploy_model_on_mali_gpu.ipynb | jmuuu/DL-on-Silicon | a27b1ff5db10bd1d4588b5f41734e383f76966c6 | [
"MIT"
] | 6 | 2020-02-22T12:45:10.000Z | 2020-12-02T12:40:42.000Z | 51.313433 | 1,276 | 0.608687 | [
[
[
"%matplotlib inline",
"_____no_output_____"
]
],
[
[
"\n\nDeploy the Pretrained Model on ARM Mali GPU\n===========================================\n**Author**: `Lianmin Zheng <https://lmzheng.net/>`_, `Ziheng Jiang <https://ziheng.org/>`_\n\nThis is an example of using NNVM to compile a ResNet model and\ndeploy it on Firefly-RK3399 with ARM Mali GPU. We will use the\nMali-T860 MP4 GPU on this board to accelerate the inference.\n\n",
"_____no_output_____"
]
],
[
[
"import tvm\nimport nnvm.compiler\nimport nnvm.testing\nfrom tvm import rpc\nfrom tvm.contrib import util, graph_runtime as runtime",
"_____no_output_____"
]
],
[
[
"Build TVM Runtime on Device\n---------------------------\n\nThe first step is to build tvm runtime on the remote device.\n\n<div class=\"alert alert-info\"><h4>Note</h4><p>All instructions in both this section and next section should be\n executed on the target device, e.g. Rk3399. And we assume it\n has Linux running.</p></div>\n\nSince we do compilation on local machine, the remote device is only used\nfor running the generated code. We only need to build tvm runtime on\nthe remote device. Make sure you have opencl driver in your board.\nYou can refer to `tutorial <https://gist.github.com/mli/585aed2cec0b5178b1a510f9f236afa2>`_\nto setup OS and opencl driver for rk3399.\n\n.. code-block:: bash\n\n git clone --recursive https://github.com/dmlc/tvm\n cd tvm\n cp cmake/config.cmake .\n sed -i \"s/USE_OPENCL OFF/USE_OPENCL ON/\" config.cmake \n make runtime -j4\n\nAfter building runtime successfully, we need to set environment varibles\nin :code:`~/.bashrc` file. We can edit :code:`~/.bashrc`\nusing :code:`vi ~/.bashrc` and add the line below (Assuming your TVM \ndirectory is in :code:`~/tvm`):\n\n.. code-block:: bash\n\n export PYTHONPATH=$PYTHONPATH:~/tvm/python\n\nTo update the environment variables, execute :code:`source ~/.bashrc`.\n\n",
"_____no_output_____"
],
[
"Set Up RPC Server on Device\n---------------------------\nTo start an RPC server, run the following command on your remote device\n(Which is RK3399 in our example).\n\n .. code-block:: bash\n\n python -m tvm.exec.rpc_server --host 0.0.0.0 --port=9090\n\nIf you see the line below, it means the RPC server started\nsuccessfully on your device.\n\n .. code-block:: bash\n\n INFO:root:RPCServer: bind to 0.0.0.0:9090\n\n\n",
"_____no_output_____"
],
[
"Prepare the Pre-trained Model\n-----------------------------\nBack to the host machine, which should have a full TVM installed (with LLVM).\n\nWe will use pre-trained model from\n`MXNet Gluon model zoo <https://mxnet.incubator.apache.org/api/python/gluon/model_zoo.html>`_.\nYou can found more details about this part at tutorial `tutorial-from-mxnet`.\n\n",
"_____no_output_____"
]
],
[
[
"from mxnet.gluon.model_zoo.vision import get_model\nfrom mxnet.gluon.utils import download\nfrom PIL import Image\nimport numpy as np\n\n# only one line to get the model\nblock = get_model('resnet18_v1', pretrained=True)",
"_____no_output_____"
]
],
[
[
"In order to test our model, here we download an image of cat and\ntransform its format.\n\n",
"_____no_output_____"
]
],
[
[
"img_name = 'cat.png'\ndownload('https://github.com/dmlc/mxnet.js/blob/master/data/cat.png?raw=true', img_name)\nimage = Image.open(img_name).resize((224, 224))\n\ndef transform_image(image):\n image = np.array(image) - np.array([123., 117., 104.])\n image /= np.array([58.395, 57.12, 57.375])\n image = image.transpose((2, 0, 1))\n image = image[np.newaxis, :]\n return image\n\nx = transform_image(image)",
"_____no_output_____"
]
],
[
[
"synset is used to transform the label from number of ImageNet class to\nthe word human can understand.\n\n",
"_____no_output_____"
]
],
[
[
"synset_url = ''.join(['https://gist.githubusercontent.com/zhreshold/',\n '4d0b62f3d01426887599d4f7ede23ee5/raw/',\n '596b27d23537e5a1b5751d2b0481ef172f58b539/',\n 'imagenet1000_clsid_to_human.txt'])\n\nsynset_name = 'synset.txt'\ndownload(synset_url, synset_name)\nwith open(synset_name) as f:\n synset = eval(f.read())",
"_____no_output_____"
]
],
[
[
"Now we would like to port the Gluon model to a portable computational graph.\nIt's as easy as several lines.\n\n",
"_____no_output_____"
]
],
[
[
"# We support MXNet static graph(symbol) and HybridBlock in mxnet.gluon\nnet, params = nnvm.frontend.from_mxnet(block)\n# we want a probability so add a softmax operator\nnet = nnvm.sym.softmax(net)",
"_____no_output_____"
]
],
[
[
"Here are some basic data workload configurations.\n\n",
"_____no_output_____"
]
],
[
[
"batch_size = 1\nnum_classes = 1000\nimage_shape = (3, 224, 224)\ndata_shape = (batch_size,) + image_shape",
"_____no_output_____"
]
],
[
[
"Compile The Graph\n-----------------\nTo compile the graph, we call the :any:`nnvm.compiler.build` function\nwith the graph configuration and parameters. As we use OpenCL for\nGPU computing, the tvm will generate both OpenCL kernel code and ARM\nCPU host code. The CPU host code is used for calling OpenCL kernels.\nIn order to generate correct CPU code, we need to specify the target\ntriplet for host ARM device by setting the parameter :code:`target_host`.\n\n",
"_____no_output_____"
],
[
"If we run the example on our x86 server for demonstration, we can simply\nset it as :code:`llvm`. If running it on the RK3399, we need to\nspecify its instruction set. Set :code:`local_demo` to False if you\nwant to run this tutorial with a real device.\n\n",
"_____no_output_____"
]
],
[
[
"local_demo = True\n\nif local_demo:\n target_host = \"llvm\"\n target = \"llvm\"\nelse:\n # Here is the setting for my rk3399 board\n # If you don't use rk3399, you can query your target triple by \n # execute `gcc -v` on your board.\n target_host = \"llvm -target=aarch64-linux-gnu\"\n\n # set target as `tvm.target.mali` instead of 'opencl' to enable\n # optimization for mali\n target = tvm.target.mali()\n\nwith nnvm.compiler.build_config(opt_level=3):\n graph, lib, params = nnvm.compiler.build(net, target=target,\n shape={\"data\": data_shape}, params=params, target_host=target_host)\n\n# After `nnvm.compiler.build`, you will get three return values: graph,\n# library and the new parameter, since we do some optimization that will\n# change the parameters but keep the result of model as the same.\n\n# Save the library at local temporary directory.\ntmp = util.tempdir()\nlib_fname = tmp.relpath('net.tar')\nlib.export_library(lib_fname)",
"_____no_output_____"
]
],
[
[
"Deploy the Model Remotely by RPC\n--------------------------------\nWith RPC, you can deploy the model remotely from your host machine\nto the remote device.\n\n",
"_____no_output_____"
]
],
[
[
"# obtain an RPC session from remote device.\nif local_demo:\n remote = rpc.LocalSession()\nelse:\n # The following is my environment, change this to the IP address of your target device\n host = '10.77.1.145'\n port = 9090\n remote = rpc.connect(host, port)\n\n# upload the library to remote device and load it\nremote.upload(lib_fname)\nrlib = remote.load_module('net.tar')\n\n# create the remote runtime module\nctx = remote.cl(0) if not local_demo else remote.cpu(0)\nmodule = runtime.create(graph, rlib, ctx)\n# set parameter (upload params to the remote device. This may take a while)\nmodule.set_input(**params)\n# set input data\nmodule.set_input('data', tvm.nd.array(x.astype('float32')))\n# run\nmodule.run()\n# get output\nout = module.get_output(0)\n# get top1 result\ntop1 = np.argmax(out.asnumpy())\nprint('TVM prediction top-1: {}'.format(synset[top1]))",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
ec8ff3e30adfc23cbf28a8b4451ce804d1beae5f | 31,064 | ipynb | Jupyter Notebook | ipython/Tools/Plot xyz.ipynb | MechGen3SO/ARC | 69b877bd2e6063bc51410de6c65129a476a97bae | [
"MIT"
] | 30 | 2019-02-02T01:45:40.000Z | 2022-03-20T13:03:27.000Z | ipython/Tools/Plot xyz.ipynb | MechGen3SO/ARC | 69b877bd2e6063bc51410de6c65129a476a97bae | [
"MIT"
] | 434 | 2018-12-24T18:00:07.000Z | 2022-03-13T15:55:26.000Z | ipython/Tools/Plot xyz.ipynb | MechGen3SO/ARC | 69b877bd2e6063bc51410de6c65129a476a97bae | [
"MIT"
] | 19 | 2019-01-04T19:05:45.000Z | 2022-03-20T13:03:28.000Z | 98.303797 | 12,940 | 0.729848 | [
[
[
"# ARC Tools\n### Plot 3D coordinates",
"_____no_output_____"
],
[
"#### input parameters:",
"_____no_output_____"
]
],
[
[
"xyz = \"\"\"\nO 1.17464110 -0.15309781 0.00000000\nN 0.06304988 0.35149648 0.00000000\nC -1.12708952 -0.11333971 0.00000000\nH -1.93800144 0.60171738 0.00000000\nH -1.29769464 -1.18742971 0.00000000\n\"\"\"",
"_____no_output_____"
],
[
"from arc.plotter import show_sticks, plot_3d_mol_as_scatter\nfrom arc.species.converter import check_xyz_dict, molecules_from_xyz\nfrom arc.species.converter import xyz_to_pybel_mol, pybel_to_inchi\nfrom IPython.display import display\n%matplotlib inline\n\nxyz = '/home/alongd/Code/ARC/ipython/Tools/ts1016.log'\n\nxyz = check_xyz_dict(xyz)",
"_____no_output_____"
],
[
"path = '/home/alongd/Code/runs/T3/35/iteration_0/ARC/calcs/Species/C2H4_0/freq_a6950/output.out'",
"_____no_output_____"
],
[
"show_sticks(xyz)",
"_____no_output_____"
],
[
"# Analyze graph\n\ns_mol, b_mol = molecules_from_xyz(xyz)\nmol = b_mol or s_mol\nprint(mol.to_smiles())\nprint('\\n')\nprint(pybel_to_inchi(xyz_to_pybel_mol(xyz)))\nprint('\\n')\nprint(mol.to_adjacency_list())\nprint('\\n')\ndisplay(mol)",
"[O]N=C\n\n\nInChI=1/CH2NO/c1-2-3/h1H2\n\n\nmultiplicity 2\n1 O u1 p2 c0 {2,S}\n2 N u0 p1 c0 {1,S} {3,D}\n3 C u0 p0 c0 {2,D} {4,S} {5,S}\n4 H u0 p0 c0 {3,S}\n5 H u0 p0 c0 {3,S}\n\n\n\n"
],
[
"show_sticks(xyz)",
"_____no_output_____"
],
[
"plot_3d_mol_as_scatter(xyz)",
"WARNING:pint.util:Could not resolve planks_constant: UndefinedUnitError()\nWARNING:pint.util:Could not resolve plank_constant: UndefinedUnitError()\n"
]
]
] | [
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
ec8ff42113ab0c10fff2c66f88b35fe81967e439 | 130,016 | ipynb | Jupyter Notebook | 2015/07/offline-to-online-training.ipynb | pschulam/Notebook | 3404ce01a4ebdf23216ff01512a8f84b4f7758aa | [
"MIT"
] | null | null | null | 2015/07/offline-to-online-training.ipynb | pschulam/Notebook | 3404ce01a4ebdf23216ff01512a8f84b4f7758aa | [
"MIT"
] | null | null | null | 2015/07/offline-to-online-training.ipynb | pschulam/Notebook | 3404ce01a4ebdf23216ff01512a8f84b4f7758aa | [
"MIT"
] | null | null | null | 36.429252 | 2,763 | 0.46665 | [
[
[
"# Offline-to-Online Training\n\nOur model is trained generatively---the observed data log-likelihood is maximized using the EM algorithm. However, our goal is to deploy the model in a predictive setting. We want to predict the most likely future trajectory given (1) any baseline information and (2) the noisy marker values observed so far. The focus of this notebook is to understand how we can adjust the parameters of a generative trajectory model in order to improve the performance on the trajectory prediction task.\n\n## Related Work\n\nThe paper by [Raina and Ng (2003)](http://ai.stanford.edu/~rajatr/papers/nips03-hybrid.pdf) describes a hybrid generative/discriminative model. One of the key ideas in this work is the relative importance of random variables in the generative model when applied in a predictive context (i.e. the generative model is used to derive a conditional probability through Bayes rule). On page 3 there is an interesting point: they show that the decision rule for binary classification of UseNet documents can be formulated as a comparison between the sum of log-likelihood terms. They note that if features are extracted from, say, the message title and the message body there are many more log-likelihood terms for the body than there are for the title. The title, however, may be informative for making the decision. The NBC (or more generally any generatively trained classifier) will treat them all equally, however.\n\n## Experimental Setup\n\nThe metric of interest is the mean absolute error aggregated within the usual buckets we've defined---(1,2], (2,4], (4,8], and (8,25]. We'll begin by looking at predictions made after observing one year of data (i.e. we will only train a single online-adapted model). We will compare to two baselines. The first baseline will be the predictions made using the MAP estimate of the subtype under full information (i.e. observing all of the individual's pFVC data) and the second baseline will be the MAP estimate of the subtype under one year of data (i.e. the standard conditional prediction obtained via application of Bayes rule to the generative model).\n\n## Methods\n\nFor each individual $i$, let $y_i$ denote the vector of observed measurements, $t_i$ the measurement times, and $x_i$ the vector of covariates used in the population and subpopulation models. Each individual is associated with a subtype, which we denote using $z_i \\in \\{1, \\ldots, K\\}$. Let $\\Phi_1(t_i)$ denote the population feature matrix, $\\Phi_2(t_i)$ denote the subpopulation feature matrix, and $\\Phi_3(t_i)$ denote the individual-specific long-term effects feature matrix.\n\nIn the generative model, we specify the marginal probability of subtype membership and the conditional probability of observed markers given subtype membership. The marginal probability of subtype membership is modeled using softmax multiclass regression:\n\n$$ p(z_i = k \\mid w_{1:K}) \\propto \\exp \\{ x_i^\\top w_k \\}. $$\n\nThe conditional probability of a marker sequence given subtype membership is\n\n$$ p(y_i \\mid z_i = k, \\beta_{1:K}) = \\mathcal{N} ( m_i(k), \\Sigma_i ), $$\n\nwhere\n\n$$ m_i(k) = \\Phi_1(t_i) \\Lambda x_i + \\Phi_2(t_i) \\beta_k $$\nand\n$$ \\Sigma_i = \\Phi_3(t_i) \\Sigma_b \\Phi_3^\\top(t_i) + K_{\\text{OU}}(t_i) + \\sigma^2 \\mathbf{1}. $$\n\nGiven some observed data $y_i$, the posterior over subtype membership $z_i$ is\n\n$$ p(z_i = k \\mid y_i) \\propto p(z_i = k \\mid w_{1:K}) p(y_i \\mid z_i = k, \\beta_{1:K}). $$",
"_____no_output_____"
],
[
"## Code",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport pandas as pd\n\nfrom imp import reload",
"_____no_output_____"
],
[
"import sys\nsys.path.append('/Users/pschulam/Git/mypy')",
"_____no_output_____"
],
[
"np.set_printoptions(precision=2)",
"_____no_output_____"
]
],
[
[
"### B-spline Basis\n\nThis basis is **hard-coded** to implement the exact basis functions used to fit the model in the R code.",
"_____no_output_____"
]
],
[
[
"from mypy import bsplines\n\nboundaries = (-1.0, 23.0)\ndegree = 2\nnum_features = 6\nbasis = bsplines.universal_basis(boundaries, degree, num_features)",
"_____no_output_____"
]
],
[
[
"### Kernel Function",
"_____no_output_____"
]
],
[
[
"from mypy.util import as_row, as_col\n\ndef kernel(x1, x2=None, a_const=1.0, a_ou=1.0, l_ou=1.0):\n symmetric = x2 is None\n d = differences(x1, x1) if symmetric else differences(x1, x2)\n K = a_const * np.ones_like(d)\n K += ou_kernel(d, a_ou, l_ou)\n if symmetric:\n K += np.eye(x1.size)\n return K\n\ndef ou_kernel(d, a, l):\n return a * np.exp( - np.abs(d) / l )\n\ndef differences(x1, x2):\n return as_col(x1) - as_row(x2)",
"_____no_output_____"
],
[
"x_test = np.linspace(0, 20, 41)\nX_test = basis.eval(x_test)\nK_test = kernel(x_test, a_const=16.0, a_ou=36.0, l_ou=2.0)",
"_____no_output_____"
],
[
"X_test[:5, :]",
"_____no_output_____"
],
[
"K_test[:5, :5]",
"_____no_output_____"
]
],
[
[
"### Softmax Model",
"_____no_output_____"
]
],
[
[
"import scipy.optimize as opt\n\nfrom mypy.models import softmax\nreload(softmax)",
"_____no_output_____"
]
],
[
[
"### Trajectory Model",
"_____no_output_____"
]
],
[
[
"from scipy.stats import multivariate_normal\n\na_const = 16.0\na_ou = 36.0\nl_ou = 2.0\n\ndef phi1(x):\n return np.ones((x.size, 1))\n\ndef phi2(x):\n return basis.eval(x)\n\ndef gp_posterior(tnew, t, y, kern, **kwargs):\n from numpy import dot\n from scipy.linalg import inv, solve\n \n K11 = kern(tnew, **kwargs)\n K12 = kern(tnew, t, **kwargs)\n K22 = kern(t, **kwargs)\n \n m = dot(K12, solve(K22, y))\n K = K11 - dot(K12, solve(K22, K12.T))\n \n return m, K\n\ndef trajectory_means(t, x, b, B):\n from numpy import dot\n \n P1 = phi1(t)\n P2 = phi2(t)\n \n m1 = dot(P1, dot(b, x)).ravel()\n m2 = dot(B, P2.T)\n \n return m1 + m2\n\ndef trajectory_logl(t, x, y, z, B, b):\n if t.size < 1:\n return 0.0\n \n m = trajectory_means(t, x, b, B)[z]\n S = kernel(t, a_const=a_const, a_ou=a_ou, l_ou=l_ou)\n \n return multivariate_normal.logpdf(y, m, S)",
"_____no_output_____"
]
],
[
[
"### Load Parameters",
"_____no_output_____"
]
],
[
[
"b = np.loadtxt('param/pop.dat')\nB = np.loadtxt('param/subpop.dat')\nW = np.loadtxt('param/marginal.dat')\nW = np.r_[ np.zeros((1, W.shape[1])), W ]",
"_____no_output_____"
],
[
"from scipy.misc import logsumexp\n\ndef model_prior(t, x1, x2, y, b, B, W):\n return softmax.regression_log_proba(x2, W)\n\ndef model_likelihood(t, x1, x2, y, b, B, W):\n k = B.shape[0]\n return np.array([trajectory_logl(t, x1, y, z, B, b) for z in range(k)])\n\ndef model_posterior(t, x1, x2, y, b, B, W):\n prior = model_prior(t, x1, x2, y, b, B, W)\n likel = model_likelihood(t, x1, x2, y, b, B, W)\n lp = prior + likel\n return np.exp(lp - logsumexp(lp))\n\ndef model_evidence(t, x1, x2, y, b, B, W):\n prior = model_prior(t, x1, x2, y, b, B, W)\n likel = model_likelihood(t, x1, x2, y, b, B, W)\n lp = prior + likel\n return logsumexp(lp)",
"_____no_output_____"
]
],
[
[
"### Load Data",
"_____no_output_____"
]
],
[
[
"from copy import deepcopy\n\ndef PatientData(tbl):\n pd = {}\n pd['ptid'] = int(tbl['ptid'].values[0])\n pd['t'] = tbl['years_seen_full'].values.copy()\n pd['y'] = tbl['pfvc'].values.copy()\n pd['x1'] = np.asarray(tbl.loc[:, ['female', 'afram']].drop_duplicates()).ravel()\n pd['x2'] = np.asarray(tbl.loc[:, ['female', 'afram', 'aca', 'scl']].drop_duplicates()).ravel()\n pd['x2'] = np.r_[ 1.0, pd['x2'] ]\n return pd\n\ndef truncated_data(pd, censor_time):\n obs = pd['t'] <= censor_time\n pdc = deepcopy(pd)\n pdc['t'] = pd['t'][obs]\n pdc['y'] = pd['y'][obs]\n return pdc, pd['t'][~obs]\n\ndef eval_prior(pd, b=b, B=B, W=W):\n return model_prior(pd['t'], pd['x1'], pd['x2'], pd['y'], b, B, W)\n\ndef eval_likel(pd, b=b, B=B, W=W):\n return model_likelihood(pd['t'], pd['x1'], pd['x2'], pd['y'], b, B, W)\n\ndef run_inference(pd, b=b, B=B, W=W):\n ll = model_loglik(pd['t'], pd['x1'], pd['x2'], pd['y'], b, B, W)\n posterior = model_posterior(pd['t'], pd['x1'], pd['x2'], pd['y'], b, B, W)\n return ll, posterior",
"_____no_output_____"
],
[
"pfvc = pd.read_csv('data/benchmark_pfvc.csv')\ndata = [PatientData(tbl) for _, tbl in pfvc.groupby('ptid')]",
"_____no_output_____"
],
[
"ll, pst = run_inference(data[9], b, B, W)\nnp.round(pst, 3)",
"_____no_output_____"
]
],
[
[
"### Online Tuning Algorithm",
"_____no_output_____"
],
[
"We're going to tune the posterior predictions of our model at a given time point by adjusting the relative strengths of the likelihood and the prior used to determine the likelihood ratio that determines the posterior. The goal is to fit the *full information posterior* by modifying the *partial information posterior*. For any observed marker sequence $y_i$, we can express the posterior probabilities by specifying the log of the likelihood ratios of each subtype to some *pivot* subtype.\n\n$$ r_{11} = \\log \\frac{p(z = 1)}{p(z = 1)} + \\log \\frac{p(y_i \\mid z = 1)}{p(y_i \\mid z = 1)} $$\n\n$$ r_{21} = \\log \\frac{p(z = 2)}{p(z = 1)} + \\log \\frac{p(y_i \\mid z = 2)}{p(y_i \\mid z = 1)} $$\n\n$$ r_{31} = \\log \\frac{p(z = 3)}{p(z = 1)} + \\log \\frac{p(y_i \\mid z = 3)}{p(y_i \\mid z = 1)} $$\n\n$$ \\ldots $$\n\nNote that the first ratio is 0 since it is the log of a ratio that will always be 1. When making a MAP estimate of an individual's subtype, the maximum of these ratios is selected. More generally, if want to match the partial information posterior to the full information posterior as closely as possible, then we want to match these ratios as closely as possible. This suggests a simple adjustment algorithm--- fit $K - 1$ separate regressions where the features are the log ratios of each term in the joint distribution.",
"_____no_output_____"
]
],
[
[
"def log_ratio(L, pivot=0):\n R = L - L[:, pivot][:, np.newaxis]\n return R",
"_____no_output_____"
],
[
"full_log_priors = np.array([eval_prior(d) for d in data])\nfull_log_likels = np.array([eval_likel(d) for d in data])\n\nyr01_log_priors = np.array([eval_prior(truncated_data(d, 1.0)[0]) for d in data])\nyr01_log_likels = np.array([eval_likel(truncated_data(d, 1.0)[0]) for d in data])\n\nyr02_log_priors = np.array([eval_prior(truncated_data(d, 2.0)[0]) for d in data])\nyr02_log_likels = np.array([eval_likel(truncated_data(d, 2.0)[0]) for d in data])",
"_____no_output_____"
],
[
"L1 = log_ratio(yr01_log_priors)\nL2 = log_ratio(yr01_log_likels)\nY = log_ratio(full_log_priors) + log_ratio(full_log_likels)",
"_____no_output_____"
]
],
[
[
"#### Algorithm 1",
"_____no_output_____"
]
],
[
[
"def fit_adjustment(y, x1, x2):\n from scipy.linalg import lstsq\n n = y.size\n X = np.c_[ np.ones(n), x1, x2 ]\n w, _, _, _ = lstsq(X, y)\n return w\n\ndef make_adjustment(x1, x2, w):\n n = x1.size\n X = np.c_[ np.ones(n), x1, x2 ]\n return np.dot(X, w)",
"_____no_output_____"
],
[
"Yhat = np.zeros_like(Y)\nN, K = Yhat.shape\nW = np.zeros((K, 3))\nfor k in range(1, K):\n w = fit_adjustment(Y[:, k], L1[:, k], L2[:, k])\n W[k] = w\n Yhat[:, k] = make_adjustment(L1[:, k], L2[:, k], w)",
"_____no_output_____"
],
[
"full_log_ratio = Y\nyr01_log_ratio = L1 + L2",
"_____no_output_____"
],
[
"P = np.array([softmax.softmax_func(y) for y in full_log_ratio])\nQ1 = np.array([softmax.softmax_func(y) for y in yr01_log_ratio])\nQ2 = np.array([softmax.softmax_func(y) for y in Yhat])",
"_____no_output_____"
],
[
"np.sum(P * np.log(P))",
"_____no_output_____"
],
[
"np.sum(P * np.log(Q1))",
"_____no_output_____"
],
[
"np.sum(P * np.log(Q2))",
"_____no_output_____"
]
],
[
[
"This simple approach doesn't work very well using the multinomial regression objective as an evaluation, but this makes sense because each of the weights are learned entirely independently. Another option for evaluation is to look at whether the MAP under the adjusted distribution agrees more with the MAP under full information that the map under partial information.",
"_____no_output_____"
]
],
[
[
"np.mean(np.argmax(P, axis=1) == np.argmax(Q1, axis=1))",
"_____no_output_____"
],
[
"np.mean(np.argmax(P, axis=1) == np.argmax(Q2, axis=1))",
"_____no_output_____"
]
],
[
[
"Again, the results are not good. This isn't hopeless, though, since the way we trained the adjustment is pretty simple. For completeness, however, let's take a look at the confusion matrix to see if any key mistakes are being corrected using this approach.",
"_____no_output_____"
]
],
[
[
"from sklearn.metrics import confusion_matrix\nconfusion_matrix(np.argmax(P, axis=1), np.argmax(Q1, axis=1))",
"_____no_output_____"
],
[
"confusion_matrix(np.argmax(P, axis=1), np.argmax(Q2, axis=1))",
"_____no_output_____"
]
],
[
[
"#### Algorithm 2",
"_____no_output_____"
]
],
[
[
"def multinom_pred(W, P, X1, X2):\n Z = np.zeros_like(P)\n N, K = Z.shape\n for k in range(1, K):\n w = W[k]\n X = np.c_[ np.ones(N), X1[:, k], X2[:, k] ]\n Z[:, k] = np.dot(X, w)\n \n Q = np.array([softmax.softmax_func(z) for z in Z])\n return Q\n\ndef multinom_cost(W, P, X1, X2):\n Q = multinom_pred(W, P, X1, X2)\n return np.sum(P * np.log(Q))",
"_____no_output_____"
],
[
"def multinom_grad(W, P, X1, X2):\n Z = np.zeros_like(P)\n N, K = Z.shape\n for k in range(1, K):\n w = W[k]\n X = np.c_[ np.ones(N), X1[:, k], X2[:, k] ]\n Z[:, k] = np.dot(X, w)\n Q = np.array([softmax.softmax_func(z) for z in Z])\n \n D = np.zeros_like(W)\n for k in range(1, K):\n for i, z in enumerate(Z):\n g = softmax.softmax_grad(z)\n x = np.r_[ 1.0, X1[i, k], X2[i, k] ]\n for j in range(K):\n D[k] += P[i, j] / Q[i, j] * g[j, k] * x\n \n return D",
"_____no_output_____"
],
[
"from sklearn.preprocessing import StandardScaler\n#X1 = StandardScaler().fit_transform(L1)\nX1 = (L1 - L1.mean()) / np.std(L1)\n#X2 = StandardScaler().fit_transform(L2)\nX2 = (L2 - L2.mean()) / np.std(L2)\nW0 = np.zeros((P.shape[1], 3))",
"_____no_output_____"
],
[
"multinom_cost(W0, P, X1, X2)",
"_____no_output_____"
],
[
"multinom_grad(W0, P, X1, X2)",
"_____no_output_____"
],
[
"def check_grad(f, x0, eps=1e-10):\n f0 = f(x0)\n n = x0.size\n g = np.zeros_like(x0)\n for i in range(n):\n dt = np.zeros_like(x0)\n dt[i] += eps\n f1 = f(x0 + dt)\n g[i] = (f1 - f0) / eps\n \n return g",
"_____no_output_____"
],
[
"wshape = W0.shape\nf = lambda w: -multinom_cost(w.reshape(wshape), P, X1, X2)\ng = lambda w: -multinom_grad(w.reshape(wshape), P, X1, X2).ravel()",
"_____no_output_____"
],
[
"check_grad(f, W0.ravel(), 1e-8).reshape(wshape)",
"_____no_output_____"
],
[
"s = opt.minimize(f, W.ravel(), jac=g, method='BFGS')",
"_____no_output_____"
],
[
"W2 = s.x.reshape(wshape)\nW2",
"_____no_output_____"
],
[
"multinom_cost(W2, P, X1, X2)",
"_____no_output_____"
]
],
[
[
"We have a slight improvement in terms of log-likelihood. Let's check the accuracy of MAP subtype estimates.",
"_____no_output_____"
]
],
[
[
"Q3 = multinom_pred(W2, P, X1, X2)",
"_____no_output_____"
],
[
"np.mean(np.argmax(P, axis=1) == np.argmax(Q1, axis=1))",
"_____no_output_____"
],
[
"np.mean(np.argmax(P, axis=1) == np.argmax(Q3, axis=1))",
"_____no_output_____"
]
],
[
[
"No improvement on MAP accuracy.",
"_____no_output_____"
]
],
[
[
"confusion_matrix(np.argmax(P, axis=1), np.argmax(Q1, axis=1))",
"_____no_output_____"
],
[
"confusion_matrix(np.argmax(P, axis=1), np.argmax(Q3, axis=1))",
"_____no_output_____"
],
[
"subtypes = pd.read_csv('benchmark_pfvc_subtypes.csv')\nsubtypes['subtype'] = np.argmax(Q3, axis=1) + 1\nsubtypes.to_csv('benchmark_pfvc_1y_subtypes_multinom.csv', index=False)",
"_____no_output_____"
],
[
"!Rscript score_predictions.R benchmark_pfvc_1y_subtypes_multinom.csv",
"Loading required package: methods\nSource: local data frame [4 x 2]\n\n bin mae\n1 (1,2] 4.83\n2 (2,4] 6.78\n3 (4,8] 9.19\n4 (8,25] 11.06\nSource: local data frame [8 x 5]\n\n true_subtype (1,2] (2,4] (4,8] (8,25]\n1 1 5.68 9.47 7.15 16.67\n2 2 3.91 4.54 6.27 6.55\n3 3 4.02 5.02 6.38 6.54\n4 4 4.74 6.97 10.94 14.49\n5 5 5.04 7.06 10.39 15.04\n6 6 4.76 8.35 10.64 10.67\n7 7 6.17 9.10 12.86 19.92\n8 8 5.65 6.43 4.98 1.53\nSource: local data frame [8 x 9]\n\n true_subtype 1 2 3 4 5 6 7 8\n1 1 3.60 8.80 43.35 52.91 NA NA NA NA\n2 2 8.02 3.33 8.27 16.43 11.93 31.94 NA NA\n3 3 NA 10.56 3.57 11.86 8.81 NA NA NA\n4 4 NA 18.54 7.75 3.49 14.26 NA 16.51 NA\n5 5 NA NA 5.61 11.20 4.79 10.59 26.34 32.66\n6 6 NA NA 21.57 NA 12.80 3.38 9.03 15.67\n7 7 NA 38.78 9.05 13.80 20.69 13.15 4.08 NA\n8 8 NA NA NA 29.26 25.96 11.78 NA 3.73\n(1,2] \nSource: local data frame [8 x 9]\n\n true_subtype 1 2 3 4 5 6 7 8\n1 1 1.59 5.43 43.75 NA NA NA NA NA\n2 2 4.17 2.87 4.22 9.88 5.92 32.08 NA NA\n3 3 NA 6.59 3.23 5.04 14.12 NA NA NA\n4 4 NA 10.74 5.19 2.36 6.82 NA 9.11 NA\n5 5 NA NA 2.71 3.36 3.56 6.29 14.48 30.67\n6 6 NA NA 22.90 NA 8.00 2.12 3.36 13.86\n7 7 NA 38.67 7.18 10.34 11.09 5.57 2.77 NA\n8 8 NA NA NA 29.26 23.81 8.05 NA 3.14\n(2,4] \nSource: local data frame [8 x 9]\n\n true_subtype 1 2 3 4 5 6 7 8\n1 1 3.52 9.24 43.53 38.68 NA NA NA NA\n2 2 6.90 3.21 6.57 10.34 7.56 31.80 NA NA\n3 3 NA 8.65 3.11 8.57 12.01 NA NA NA\n4 4 NA 15.06 7.84 2.93 10.60 NA 10.83 NA\n5 5 NA NA 3.79 5.52 4.67 9.15 19.33 31.52\n6 6 NA NA 23.06 NA 12.99 2.69 6.71 13.98\n7 7 NA 38.99 10.66 10.89 17.46 10.79 2.83 NA\n8 8 NA NA NA NA 27.03 12.32 NA 4.75\n(4,8] \nSource: local data frame [8 x 9]\n\n true_subtype 1 2 3 4 5 6 7 8\n1 1 3.28 10.04 43.09 NA NA NA NA NA\n2 2 9.78 3.43 9.45 22.23 9.70 NA NA NA\n3 3 NA 10.86 3.67 13.78 6.34 NA NA NA\n4 4 NA 20.82 8.40 3.99 16.00 NA 19.89 NA\n5 5 NA NA 5.74 11.10 5.43 12.99 33.71 33.31\n6 6 NA NA 22.57 NA 16.26 3.37 10.71 15.95\n7 7 NA NA NA 14.86 23.21 18.11 5.46 NA\n8 8 NA NA NA NA NA 14.08 NA 3.68\n(8,25] \nSource: local data frame [8 x 9]\n\n true_subtype 1 2 3 4 5 6 7 8\n1 1 5.03 9.60 43.10 56.47 NA NA NA NA\n2 2 8.64 3.64 12.60 NA 16.81 NA NA NA\n3 3 NA 13.20 4.03 20.09 5.25 NA NA NA\n4 4 NA 27.91 11.15 5.43 18.90 NA 19.50 NA\n5 5 NA NA 6.22 19.01 5.53 13.64 35.73 33.74\n6 6 NA NA 19.20 NA 12.00 5.53 14.88 21.48\n7 7 NA NA NA 27.70 29.10 21.28 7.65 NA\n8 8 NA NA NA NA NA NA NA 1.53\n"
]
],
[
[
"#### Future Directions\n\n1. Use a Bayesian multinomial logistic regression classifier to sidestep the inexpressive linear model.\n\n2. Change the objective function to more directly reflect the cost function used to evaluate the model (i.e. not all subtype misclassification mistakes are equal, some mistakes are more costly and perhaps we'd like to reflect that in the learning procedure).\n\n3. Incorporate additional likelihood ratios based on other longitudinally measured outcomes.",
"_____no_output_____"
],
[
"### Adding Additional Markers",
"_____no_output_____"
]
],
[
[
"L3 = np.loadtxt('param/gi_5.0_ratios.dat')\nL4 = np.loadtxt('param/pdc_5.0_ratios.dat')\nL5 = np.loadtxt('param/hrt_5.0_ratios.dat')",
"_____no_output_____"
],
[
"#X3 = StandardScaler().fit_transform(L3)\nX3 = (L3 - L3.mean()) / np.std(L3)\n#X4 = StandardScaler().fit_transform(L4)\nX4 = (L4 - L4.mean()) / np.std(L4)\n#X5 = StandardScaler().fit_transform(L5)\nX5 = (L5 - L5.mean()) / np.std(L5)",
"_____no_output_____"
],
[
"def multinom_pred2(W, P, XX):\n Z = np.zeros_like(P)\n N, K = Z.shape\n for k in range(1, K):\n w = W[k]\n X = np.ones(N)\n for Xi in XX:\n X = np.c_[ X, Xi[:, k] ]\n Z[:, k] = np.dot(X, w)\n \n Q = np.array([softmax.softmax_func(z) for z in Z])\n return Q\n\ndef multinom_cost2(W, P, XX):\n Q = multinom_pred2(W, P, XX)\n return np.sum(P * np.log(Q))",
"_____no_output_____"
],
[
"def multinom_grad2(W, P, XX):\n Z = np.zeros_like(P)\n N, K = Z.shape\n for k in range(1, K):\n w = W[k]\n X = np.ones(N)\n for Xi in XX:\n X = np.c_[ X, Xi[:, k] ]\n Z[:, k] = np.dot(X, w)\n Q = np.array([softmax.softmax_func(z) for z in Z])\n \n D = np.zeros_like(W)\n for k in range(1, K):\n for i, z in enumerate(Z):\n g = softmax.softmax_grad(z)\n x = np.array([Xi[i, k] for Xi in XX])\n x = np.r_[ 1.0, x ]\n for j in range(K):\n D[k] += P[i, j] / Q[i, j] * g[j, k] * x\n \n return D",
"_____no_output_____"
],
[
"W0 = np.zeros((P.shape[1], 6))\nf2 = lambda w: -multinom_cost2(w.reshape(W0.shape), P, [X1, X2, X3, X4, X5])\ng2 = lambda w: -multinom_grad2(w.reshape(W0.shape), P, [X1, X2, X3, X4, X5]).ravel()",
"_____no_output_____"
],
[
"g2(W0.ravel()).reshape(W0.shape)",
"_____no_output_____"
],
[
"check_grad(f2, W0.ravel(), 1e-8).reshape(W0.shape)",
"_____no_output_____"
],
[
"s2 = opt.minimize(f2, np.random.normal(scale=5.0, size=W0.shape).ravel(), jac=g2, method='BFGS')",
"_____no_output_____"
],
[
"Q5 = multinom_pred2(s2.x.reshape(W0.shape), P, [X1, X2, X3, X4, X5])",
"_____no_output_____"
],
[
"np.mean(np.argmax(P, axis=1) == np.argmax(Q5, axis=1))",
"_____no_output_____"
],
[
"confusion_matrix(np.argmax(P, axis=1), np.argmax(Q1, axis=1))",
"_____no_output_____"
],
[
"confusion_matrix(np.argmax(P, axis=1), np.argmax(Q5, axis=1))",
"_____no_output_____"
],
[
"subtypes = pd.read_csv('benchmark_pfvc_subtypes.csv')\nsubtypes['subtype'] = np.argmax(Q5, axis=1) + 1\nsubtypes.to_csv('benchmark_pfvc_1y_subtypes_multinom2.csv', index=False)",
"_____no_output_____"
],
[
"!Rscript score_predictions.R benchmark_pfvc_1y_subtypes_multinom2.csv",
"Loading required package: methods\nSource: local data frame [4 x 2]\n\n bin mae\n1 (1,2] 4.62\n2 (2,4] 6.04\n3 (4,8] 7.67\n4 (8,25] 8.51\nSource: local data frame [8 x 5]\n\n true_subtype (1,2] (2,4] (4,8] (8,25]\n1 1 5.37 7.14 6.37 10.80\n2 2 3.38 4.40 5.19 5.44\n3 3 4.40 4.93 6.22 6.48\n4 4 4.36 6.05 7.94 9.98\n5 5 5.05 6.60 8.99 10.56\n6 6 4.91 7.68 10.19 8.81\n7 7 5.22 6.71 9.57 14.79\n8 8 5.91 6.43 4.98 1.53\nSource: local data frame [8 x 9]\n\n true_subtype 1 2 3 4 5 6 7 8\n1 1 3.45 11.47 43.35 NA NA NA NA NA\n2 2 8.33 3.28 8.40 11.29 5.48 34.50 NA NA\n3 3 NA 9.32 3.72 12.68 15.16 21.62 NA NA\n4 4 NA 15.23 8.69 3.87 13.18 4.46 11.56 NA\n5 5 NA 17.31 6.91 9.75 4.37 10.71 35.43 24.56\n6 6 NA NA 21.57 NA 12.58 4.37 11.50 14.50\n7 7 NA 38.78 9.05 13.21 16.84 13.12 3.91 NA\n8 8 NA NA NA 29.26 25.96 9.20 NA 3.83\n(1,2] \nSource: local data frame [8 x 9]\n\n true_subtype 1 2 3 4 5 6 7 8\n1 1 2.01 4.90 43.75 NA NA NA NA NA\n2 2 6.47 2.70 4.48 5.58 1.37 32.08 NA NA\n3 3 NA 6.22 3.24 6.47 18.21 22.80 NA NA\n4 4 NA 9.88 6.04 2.49 6.51 4.06 6.16 NA\n5 5 NA 15.11 2.46 3.43 3.36 7.14 20.73 17.33\n6 6 NA NA 22.90 NA 9.10 2.95 4.31 12.23\n7 7 NA 38.67 7.18 9.03 11.19 4.93 2.93 NA\n8 8 NA NA NA 29.26 23.81 5.65 NA 3.38\n(2,4] \nSource: local data frame [8 x 9]\n\n true_subtype 1 2 3 4 5 6 7 8\n1 1 3.34 9.84 43.53 NA NA NA NA NA\n2 2 7.51 3.19 6.20 9.95 3.26 34.37 NA NA\n3 3 NA 7.83 3.29 9.16 13.63 21.48 NA NA\n4 4 NA 13.66 7.85 3.18 9.91 3.71 11.19 NA\n5 5 NA 17.35 1.96 5.41 4.33 10.18 28.58 19.36\n6 6 NA NA 23.06 NA 12.39 3.85 11.41 12.85\n7 7 NA 38.99 10.66 10.13 17.16 8.62 3.14 NA\n8 8 NA NA NA NA 27.03 12.32 NA 4.75\n(4,8] \nSource: local data frame [8 x 9]\n\n true_subtype 1 2 3 4 5 6 7 8\n1 1 3.04 9.69 43.09 NA NA NA NA NA\n2 2 9.54 3.37 9.35 15.82 6.61 37.16 NA NA\n3 3 NA 9.27 3.90 14.25 NA 21.09 NA NA\n4 4 NA 18.54 8.51 4.19 14.45 2.93 16.34 NA\n5 5 NA 18.12 5.75 10.50 4.92 11.70 41.43 27.97\n6 6 NA NA 22.57 NA 15.25 4.79 15.79 15.25\n7 7 NA NA NA 14.28 21.95 14.72 4.80 NA\n8 8 NA NA NA NA NA 14.08 NA 3.68\n(8,25] \nSource: local data frame [8 x 9]\n\n true_subtype 1 2 3 4 5 6 7 8\n1 1 4.88 19.21 43.10 NA NA NA NA NA\n2 2 8.64 3.65 12.64 NA 7.69 NA NA NA\n3 3 NA 12.31 4.15 19.64 NA NA NA NA\n4 4 NA 20.85 14.55 5.93 20.20 5.26 NA NA\n5 5 NA 17.36 9.44 16.96 4.59 28.51 43.50 33.28\n6 6 NA NA 19.20 NA 12.00 5.99 NA 21.48\n7 7 NA NA NA 27.70 NA 17.42 7.39 NA\n8 8 NA NA NA NA NA NA NA 1.53\n"
]
],
[
[
"#### Cross validated results with 5 years of aux",
"_____no_output_____"
]
],
[
[
"from sklearn.cross_validation import KFold",
"_____no_output_____"
],
[
"L3 = np.loadtxt('param/gi_5.0_ratios.dat')\nL4 = np.loadtxt('param/pdc_5.0_ratios.dat')\nL5 = np.loadtxt('param/hrt_5.0_ratios.dat')\n\n#X3 = StandardScaler().fit_transform(L3)\nX3 = (L3 - L3.mean()) / L3.std()\n#X4 = StandardScaler().fit_transform(L4)\nX4 = (L4 - L4.mean()) / L4.std()\n#X5 = StandardScaler().fit_transform(L5)\nX5 = (L5 - L5.mean()) / L5.std()\n\nXX = [X1, X2, X3, X4, X5]\nW0 = np.zeros((P.shape[1], len(XX) + 1))\n\nnfolds = 10\naccs = []\nWW = []\nsols = []\nQfinal = np.zeros_like(P)\n\nfor train, test in KFold(P.shape[0], nfolds, shuffle=True, random_state=0):\n print('Starting new fold.')\n f2 = lambda w: -multinom_cost2(w.reshape(W0.shape), P[train], [Xi[train] for Xi in XX])\n g2 = lambda w: -multinom_grad2(w.reshape(W0.shape), P[train], [Xi[train] for Xi in XX]).ravel()\n s2 = opt.minimize(f2, W0.ravel(), jac=g2, method='BFGS')\n sols.append(s2)\n W = s2.x.reshape(W0.shape)\n WW.append(W)\n Qfinal[test] = multinom_pred2(W, P[test], [Xi[test] for Xi in XX])\n accs.append(np.mean(np.argmax(P[test], axis=1) == np.argmax(Qfinal[test], axis=1)))",
"Starting new fold.\nStarting new fold.\nStarting new fold.\nStarting new fold.\nStarting new fold.\nStarting new fold.\nStarting new fold.\nStarting new fold.\nStarting new fold.\nStarting new fold.\n"
],
[
"subtypes = pd.read_csv('benchmark_pfvc_subtypes.csv')\nsubtypes['subtype'] = np.argmax(Qfinal, axis=1) + 1\nsubtypes.to_csv('benchmark_pfvc_1y_subtypes_multinom3.csv', index=False)",
"_____no_output_____"
],
[
"!Rscript score_predictions.R benchmark_pfvc_1y_subtypes_multinom3.csv",
"Loading required package: methods\nSource: local data frame [4 x 2]\n\n bin mae\n1 (1,2] 4.78\n2 (2,4] 6.21\n3 (4,8] 8.14\n4 (8,25] 9.12\nSource: local data frame [8 x 5]\n\n true_subtype (1,2] (2,4] (4,8] (8,25]\n1 1 5.63 8.62 6.37 10.80\n2 2 3.44 4.31 5.23 6.33\n3 3 4.28 4.97 6.81 6.99\n4 4 4.35 6.09 8.49 10.94\n5 5 5.68 6.77 9.67 10.90\n6 6 5.03 8.16 10.63 8.98\n7 7 5.33 6.80 9.68 15.97\n8 8 6.06 7.33 8.16 1.53\nSource: local data frame [8 x 9]\n\n true_subtype 1 2 3 4 5 6 7 8\n1 1 3.41 11.47 32.23 NA NA NA NA NA\n2 2 8.33 3.28 9.21 11.29 6.87 31.94 NA NA\n3 3 NA 9.67 3.78 12.09 NA 21.62 NA NA\n4 4 NA 14.61 9.00 3.90 13.87 4.46 12.37 NA\n5 5 NA 18.35 7.50 9.89 4.39 10.86 34.91 24.56\n6 6 NA NA 21.57 NA 12.89 4.40 11.22 14.50\n7 7 NA 38.78 9.05 13.21 16.84 11.92 3.88 NA\n8 8 NA NA NA 29.26 25.96 10.31 NA 4.15\n(1,2] \nSource: local data frame [8 x 9]\n\n true_subtype 1 2 3 4 5 6 7 8\n1 1 1.66 4.90 26.62 NA NA NA NA NA\n2 2 6.47 2.73 4.61 5.58 1.37 32.08 NA NA\n3 3 NA 6.19 3.25 5.67 NA 22.80 NA NA\n4 4 NA 9.67 6.04 2.50 6.37 4.06 6.02 NA\n5 5 NA 20.05 NA 3.44 3.28 7.14 19.79 17.33\n6 6 NA NA 22.90 NA 9.15 2.95 4.37 12.23\n7 7 NA 38.67 7.18 9.03 11.19 4.12 3.13 NA\n8 8 NA NA NA 29.26 23.81 5.10 NA 3.57\n(2,4] \nSource: local data frame [8 x 9]\n\n true_subtype 1 2 3 4 5 6 7 8\n1 1 3.28 9.84 27.94 NA NA NA NA NA\n2 2 7.51 3.19 6.20 9.95 7.56 31.80 NA NA\n3 3 NA 7.85 3.30 8.72 NA 21.48 NA NA\n4 4 NA 12.65 7.94 3.22 9.38 3.71 11.56 NA\n5 5 NA 17.56 NA 5.38 4.31 10.48 28.58 19.36\n6 6 NA NA 23.06 NA 12.96 3.75 10.99 12.85\n7 7 NA 38.99 10.66 10.13 17.16 7.57 3.05 NA\n8 8 NA NA NA NA 27.03 9.50 NA 5.24\n(4,8] \nSource: local data frame [8 x 9]\n\n true_subtype 1 2 3 4 5 6 7 8\n1 1 3.04 9.69 43.09 NA NA NA NA NA\n2 2 9.54 3.41 9.56 15.82 7.91 NA NA NA\n3 3 NA 9.80 4.03 13.71 NA 21.09 NA NA\n4 4 NA 17.47 9.19 4.23 14.80 2.93 17.33 NA\n5 5 NA 17.98 7.50 10.45 4.97 11.79 39.15 27.97\n6 6 NA NA 22.57 NA 15.50 4.88 15.84 15.25\n7 7 NA NA NA 14.28 21.95 14.22 4.71 NA\n8 8 NA NA NA NA NA 14.80 NA 4.17\n(8,25] \nSource: local data frame [8 x 9]\n\n true_subtype 1 2 3 4 5 6 7 8\n1 1 4.88 19.21 43.10 NA NA NA NA NA\n2 2 8.64 3.60 14.20 NA 7.69 NA NA NA\n3 3 NA 12.95 4.21 18.81 NA NA NA NA\n4 4 NA 20.37 15.13 6.09 21.19 5.26 NA NA\n5 5 NA 17.36 NA 16.96 4.66 28.51 42.54 33.28\n6 6 NA NA 19.20 NA 12.00 6.05 18.84 21.48\n7 7 NA NA NA 27.70 NA 17.12 8.00 NA\n8 8 NA NA NA NA NA NA NA 1.53\n"
]
],
[
[
"#### Partial auxiliary information",
"_____no_output_____"
]
],
[
[
"L3 = np.loadtxt('param/gi_1.0_ratios.dat')\nL4 = np.loadtxt('param/pdc_1.0_ratios.dat')\nL5 = np.loadtxt('param/hrt_1.0_ratios.dat')\n\nX3 = (L3 - L3.mean()) / np.std(L3)\nX4 = (L4 - L4.mean()) / np.std(L4)\nX5 = (L5 - L5.mean()) / np.std(L5)\n\nXX = [X1, X2, X3, X4, X5]\nW0 = np.zeros((P.shape[1], len(XX) + 1))",
"_____no_output_____"
],
[
"f2 = lambda w: -multinom_cost2(w.reshape(W0.shape), P, XX)\ng2 = lambda w: -multinom_grad2(w.reshape(W0.shape), P, XX).ravel()\ns2 = opt.minimize(f2, W0.ravel(), jac=g2, method='BFGS')\nW = s2.x.reshape(W0.shape)\nQQ = multinom_pred2(W, P, XX)",
"_____no_output_____"
],
[
"s2",
"_____no_output_____"
],
[
"np.mean(np.argmax(P, axis=1) == np.argmax(QQ, axis=1))",
"_____no_output_____"
],
[
"confusion_matrix(np.argmax(P, axis=1), np.argmax(QQ, axis=1))",
"_____no_output_____"
],
[
"W",
"_____no_output_____"
],
[
"confusion_matrix(np.argmax(P, axis=1), np.argmax(QQ, axis=1))",
"_____no_output_____"
],
[
"subtypes = pd.read_csv('benchmark_pfvc_subtypes.csv')\nsubtypes['subtype'] = np.argmax(QQ, axis=1) + 1\nsubtypes.to_csv('benchmark_pfvc_1y_subtypes_multinom4.csv', index=False)",
"_____no_output_____"
],
[
"!Rscript score_predictions.R benchmark_pfvc_1y_subtypes_multinom4.csv",
"Loading required package: methods\nSource: local data frame [4 x 2]\n\n bin mae\n1 (1,2] 4.83\n2 (2,4] 6.84\n3 (4,8] 9.23\n4 (8,25] 10.78\nSource: local data frame [8 x 5]\n\n true_subtype (1,2] (2,4] (4,8] (8,25]\n1 1 5.68 9.47 7.15 16.67\n2 2 3.80 4.56 6.26 5.65\n3 3 3.98 4.95 6.46 6.63\n4 4 4.82 7.18 11.04 13.11\n5 5 5.26 7.38 10.38 15.44\n6 6 4.56 7.47 9.64 10.67\n7 7 6.37 9.87 14.09 20.49\n8 8 4.93 6.43 4.98 1.53\nSource: local data frame [8 x 9]\n\n true_subtype 1 2 3 4 5 6 7 8\n1 1 3.6 8.80 43.35 52.91 NA NA NA NA\n2 2 8.7 3.39 7.72 17.68 9.12 31.94 NA NA\n3 3 21.3 10.37 3.62 11.56 8.25 NA NA NA\n4 4 NA 21.20 7.53 3.65 14.24 4.33 11.24 NA\n5 5 NA NA 5.97 11.87 4.30 11.03 26.55 32.66\n6 6 NA NA 21.57 NA 12.16 3.60 9.03 15.67\n7 7 NA 44.12 9.65 14.10 19.99 12.73 3.91 NA\n8 8 NA NA NA NA 25.96 9.20 13.02 3.83\n(1,2] \nSource: local data frame [8 x 9]\n\n true_subtype 1 2 3 4 5 6 7 8\n1 1 1.59 5.43 43.75 NA NA NA NA NA\n2 2 NA 2.84 4.23 8.20 NA 32.08 NA NA\n3 3 12.48 6.45 3.34 5.25 NA NA NA NA\n4 4 NA 13.66 5.55 2.19 6.82 3.68 NA NA\n5 5 NA NA 2.58 3.55 3.51 7.52 14.96 30.67\n6 6 NA NA 22.90 NA 8.20 2.33 3.36 13.86\n7 7 NA 41.18 7.92 8.74 11.13 5.53 2.89 NA\n8 8 NA NA NA NA 23.81 5.65 13.02 3.38\n(2,4] \nSource: local data frame [8 x 9]\n\n true_subtype 1 2 3 4 5 6 7 8\n1 1 3.52 9.24 43.53 38.68 NA NA NA NA\n2 2 7.38 3.23 6.31 11.42 10.44 31.80 NA NA\n3 3 17.63 8.58 3.26 8.15 14.35 NA NA NA\n4 4 NA 18.03 7.64 2.94 10.58 3.55 8.22 NA\n5 5 NA NA 3.18 6.20 4.48 9.68 19.81 31.52\n6 6 NA NA 23.06 NA 12.10 3.19 6.71 13.98\n7 7 NA 42.91 11.96 10.74 17.26 10.34 3.48 NA\n8 8 NA NA NA NA 27.03 12.32 NA 4.75\n(4,8] \nSource: local data frame [8 x 9]\n\n true_subtype 1 2 3 4 5 6 7 8\n1 1 3.28 10.04 43.09 NA NA NA NA NA\n2 2 9.78 3.47 9.04 22.75 8.59 NA NA NA\n3 3 21.78 10.69 3.69 13.40 8.18 NA NA NA\n4 4 NA 23.76 7.76 4.42 16.00 2.93 15.78 NA\n5 5 NA NA 5.26 11.68 5.01 11.74 33.34 33.31\n6 6 NA NA 22.57 NA 15.35 3.70 10.71 15.95\n7 7 NA 47.87 NA 15.09 22.29 15.30 4.87 NA\n8 8 NA NA NA NA NA 14.08 NA 3.68\n(8,25] \nSource: local data frame [8 x 9]\n\n true_subtype 1 2 3 4 5 6 7 8\n1 1 5.03 9.60 43.10 56.47 NA NA NA NA\n2 2 8.64 3.77 11.58 26.51 NA NA NA NA\n3 3 26.81 13.08 4.02 19.35 5.25 NA NA NA\n4 4 NA 28.13 11.15 5.66 18.90 5.26 NA NA\n5 5 NA NA 7.50 19.92 3.50 16.48 35.75 33.74\n6 6 NA NA 19.20 NA 12.00 5.53 14.88 21.48\n7 7 NA NA NA 27.70 28.58 18.86 7.27 NA\n8 8 NA NA NA NA NA NA NA 1.53\n"
]
],
[
[
"#### Cross validated partial information",
"_____no_output_____"
]
],
[
[
"L3 = np.loadtxt('param/gi_1.0_ratios.dat')\nL4 = np.loadtxt('param/pdc_1.0_ratios.dat')\nL5 = np.loadtxt('param/hrt_1.0_ratios.dat')\nL6 = np.loadtxt('param/pv1_1.0_ratios.dat')\nL7 = np.loadtxt('param/rp_1.0_ratios.dat')\n\nX3 = (L3 - L3.mean()) / np.std(L3)\nX4 = (L4 - L4.mean()) / np.std(L4)\nX5 = (L5 - L5.mean()) / np.std(L5)\nX6 = (L6 - L6.mean()) / np.std(L6)\nX7 = (L7 - L7.mean()) / np.std(L7)\n\nXX = [X1, X2, X3, X4, X5, X6, X7]\nW0 = np.zeros((P.shape[1], len(XX) + 1))\n\nnfolds = 10\naccs = []\nWW = []\nsols = []\nQfinal = np.zeros_like(P)\n\nfor train, test in KFold(P.shape[0], nfolds, shuffle=True, random_state=0):\n print('Starting new fold.')\n f2 = lambda w: -multinom_cost2(w.reshape(W0.shape), P[train], [Xi[train] for Xi in XX])\n g2 = lambda w: -multinom_grad2(w.reshape(W0.shape), P[train], [Xi[train] for Xi in XX]).ravel()\n s2 = opt.minimize(f2, W0.ravel(), jac=g2, method='BFGS')\n sols.append(s2)\n W = s2.x.reshape(W0.shape)\n WW.append(W)\n Qfinal[test] = multinom_pred2(W, P[test], [Xi[test] for Xi in XX])\n accs.append(np.mean(np.argmax(P[test], axis=1) == np.argmax(Qfinal[test], axis=1)))",
"Starting new fold.\nStarting new fold.\nStarting new fold.\nStarting new fold.\nStarting new fold.\nStarting new fold.\nStarting new fold.\nStarting new fold.\nStarting new fold.\nStarting new fold.\n"
],
[
"[s.success for s in sols]",
"_____no_output_____"
],
[
"np.mean(np.argmax(P, axis=1) == np.argmax(Qfinal, axis=1))",
"_____no_output_____"
],
[
"confusion_matrix(np.argmax(P, axis=1), np.argmax(Q1, axis=1))",
"_____no_output_____"
],
[
"D = confusion_matrix(np.argmax(P, axis=1), np.argmax(Q1, axis=1))",
"_____no_output_____"
],
[
"A = D / D.sum(axis=1)[:, np.newaxis]\nb = np.bincount(np.argmax(P, axis=1), minlength=P.shape[1]) / P.shape[0]",
"_____no_output_____"
],
[
"c = np.dot(A, b)",
"_____no_output_____"
],
[
"Qconf = Q1 * (c / b)\nQconf /= Qconf.sum(axis=1)[:, np.newaxis]",
"_____no_output_____"
],
[
"confusion_matrix(np.argmax(P, axis=1), np.argmax(Qconf, axis=1))",
"_____no_output_____"
],
[
"confusion_matrix(np.argmax(P, axis=1), np.argmax(Qfinal, axis=1))",
"_____no_output_____"
],
[
"subtypes = pd.read_csv('benchmark_pfvc_subtypes.csv')\nsubtypes['subtype'] = np.argmax(Qfinal, axis=1) + 1\nsubtypes.to_csv('benchmark_pfvc_1y_subtypes_multinom5.csv', index=False)",
"_____no_output_____"
],
[
"!Rscript score_predictions.R benchmark_pfvc_1y_subtypes_multinom5.csv",
"Loading required package: methods\nSource: local data frame [4 x 2]\n\n bin mae\n1 (1,2] 4.95\n2 (2,4] 7.02\n3 (4,8] 9.41\n4 (8,25] 11.32\nSource: local data frame [8 x 5]\n\n true_subtype (1,2] (2,4] (4,8] (8,25]\n1 1 5.80 10.23 7.15 16.67\n2 2 3.80 4.41 6.05 6.55\n3 3 3.85 4.63 6.41 6.72\n4 4 5.16 7.33 11.37 13.32\n5 5 5.76 8.18 11.46 18.43\n6 6 4.55 8.36 10.64 10.67\n7 7 5.86 9.05 12.14 16.34\n8 8 6.05 7.50 6.42 1.53\nSource: local data frame [8 x 9]\n\n true_subtype 1 2 3 4 5 6 7 8\n1 1 3.60 8.60 32.23 52.91 NA NA NA NA\n2 2 8.26 3.36 8.37 17.82 11.93 31.94 NA NA\n3 3 NA 10.36 3.57 11.97 NA NA NA NA\n4 4 NA 20.33 7.75 3.70 13.53 4.46 10.72 NA\n5 5 NA 9.65 8.62 13.13 4.13 12.65 26.54 29.39\n6 6 NA NA 21.57 NA 12.74 3.39 9.04 13.86\n7 7 NA 38.78 9.65 14.69 20.87 9.26 3.80 1.33\n8 8 NA NA NA 29.26 25.96 10.35 NA 3.78\n(1,2] \nSource: local data frame [8 x 9]\n\n true_subtype 1 2 3 4 5 6 7 8\n1 1 1.59 4.88 26.62 NA NA NA NA NA\n2 2 4.17 2.88 4.27 11.00 5.92 32.08 NA NA\n3 3 NA 6.36 3.24 5.49 NA NA NA NA\n4 4 NA 13.31 5.31 2.21 6.03 4.06 6.21 NA\n5 5 NA NA 12.64 3.89 3.33 8.86 14.65 19.07\n6 6 NA NA 22.90 NA 7.80 2.14 3.19 8.68\n7 7 NA 38.67 7.92 9.91 11.31 4.42 2.98 1.02\n8 8 NA NA NA 29.26 23.81 6.76 NA 2.87\n(2,4] \nSource: local data frame [8 x 9]\n\n true_subtype 1 2 3 4 5 6 7 8\n1 1 3.52 8.60 27.94 38.68 NA NA NA NA\n2 2 7.63 3.25 6.42 10.43 7.56 31.80 NA NA\n3 3 NA 8.14 3.12 8.62 NA NA NA NA\n4 4 NA 16.96 7.63 2.91 9.90 3.71 8.47 NA\n5 5 NA 8.30 1.96 6.83 4.33 10.84 20.02 24.26\n6 6 NA NA 23.06 NA 12.76 2.71 6.54 13.98\n7 7 NA 38.99 11.96 11.93 17.60 8.99 3.13 0.78\n8 8 NA NA NA NA 27.03 11.67 NA 5.00\n(4,8] \nSource: local data frame [8 x 9]\n\n true_subtype 1 2 3 4 5 6 7 8\n1 1 3.28 10.04 43.09 NA NA NA NA NA\n2 2 9.65 3.49 9.54 24.08 9.70 NA NA NA\n3 3 NA 10.75 3.69 13.78 NA NA NA NA\n4 4 NA 23.01 8.55 4.45 15.68 2.93 15.78 NA\n5 5 NA 10.99 5.75 12.55 4.86 14.34 31.85 32.73\n6 6 NA NA 22.57 NA 16.26 3.37 10.71 15.95\n7 7 NA NA NA 15.66 23.72 16.32 4.80 1.62\n8 8 NA NA NA NA NA 14.03 NA 3.88\n(8,25] \nSource: local data frame [8 x 9]\n\n true_subtype 1 2 3 4 5 6 7 8\n1 1 5.03 9.60 43.10 56.47 NA NA NA NA\n2 2 8.64 3.64 12.60 NA 16.81 NA NA NA\n3 3 NA 13.14 4.03 20.09 NA NA NA NA\n4 4 NA 28.21 11.15 5.70 18.90 5.26 NA NA\n5 5 NA NA 9.44 21.48 3.29 16.95 36.61 33.92\n6 6 NA NA 19.20 NA 12.00 5.53 14.88 21.48\n7 7 NA NA NA 27.70 28.58 23.19 4.98 NA\n8 8 NA NA NA NA NA NA NA 1.53\n"
]
],
[
[
"#### Cross-validated partial information MAP target",
"_____no_output_____"
]
],
[
[
"L3 = np.loadtxt('param/gi_1.0_ratios.dat')\nL4 = np.loadtxt('param/pdc_1.0_ratios.dat')\nL5 = np.loadtxt('param/hrt_1.0_ratios.dat')\nL6 = np.loadtxt('param/pv1_1.0_ratios.dat')\nL7 = np.loadtxt('param/rp_1.0_ratios.dat')\n\nX3 = (L3 - L3.mean()) / np.std(L3)\nX4 = (L4 - L4.mean()) / np.std(L4)\nX5 = (L5 - L5.mean()) / np.std(L5)\nX6 = (L6 - L6.mean()) / np.std(L6)\nX7 = (L7 - L7.mean()) / np.std(L7)\n\nXX = [X1, X2, X3, X4, X5, X6, X7]\nW0 = np.zeros((P.shape[1], len(XX) + 1))\n\nnfolds = 10\naccs = []\nWW = []\nsols = []\nQfinal = np.zeros_like(P)\nPmap = np.array([softmax.onehot_encode(np.argmax(p), P.shape[1]) for p in P])\nPmap += 1e-4\nPmap /= Pmap.sum(axis=1)[:, np.newaxis]\n\nfor train, test in KFold(P.shape[0], nfolds, shuffle=True, random_state=0):\n print('Starting new fold.')\n f2 = lambda w: -multinom_cost2(w.reshape(W0.shape), Pmap[train], [Xi[train] for Xi in XX])\n g2 = lambda w: -multinom_grad2(w.reshape(W0.shape), Pmap[train], [Xi[train] for Xi in XX]).ravel()\n s2 = opt.minimize(f2, W0.ravel(), jac=g2, method='BFGS')\n if not s2.success:\n print('Failed.')\n break\n sols.append(s2)\n W = s2.x.reshape(W0.shape)\n WW.append(W)\n Qfinal[test] = multinom_pred2(W, Pmap[test], [Xi[test] for Xi in XX])\n accs.append(np.mean(np.argmax(Pmap[test], axis=1) == np.argmax(Qfinal[test], axis=1)))",
"Starting new fold.\n"
],
[
"np.mean(np.argmax(P, axis=1) == np.argmax(Qfinal, axis=1))",
"_____no_output_____"
],
[
"subtypes = pd.read_csv('benchmark_pfvc_subtypes.csv')\nsubtypes['subtype'] = np.argmax(Qfinal, axis=1) + 1\nsubtypes.to_csv('benchmark_pfvc_1y_subtypes_multinom5b.csv', index=False)",
"_____no_output_____"
],
[
"!Rscript score_predictions.R benchmark_pfvc_1y_subtypes_multinom5b.csv",
"Loading required package: methods\nSource: local data frame [4 x 2]\n\n bin mae\n1 (1,2] 5.07\n2 (2,4] 7.02\n3 (4,8] 9.50\n4 (8,25] 11.35\nSource: local data frame [8 x 5]\n\n true_subtype (1,2] (2,4] (4,8] (8,25]\n1 1 5.80 10.23 7.15 16.67\n2 2 3.82 4.51 6.34 6.89\n3 3 4.91 5.48 7.51 7.46\n4 4 4.98 7.41 11.10 13.18\n5 5 5.67 7.84 11.39 17.70\n6 6 4.68 8.10 10.33 10.33\n7 7 5.77 8.78 12.16 16.34\n8 8 6.51 6.70 4.98 1.53\nSource: local data frame [8 x 9]\n\n true_subtype 1 2 3 4 5 6 7 8\n1 1 3.60 8.60 32.23 52.91 NA NA NA NA\n2 2 8.02 3.35 8.18 18.00 11.93 31.94 NA NA\n3 3 NA 10.57 3.64 11.99 3.09 NA 20.81 52.81\n4 4 NA 20.03 8.57 3.74 13.31 4.46 10.72 NA\n5 5 NA 15.47 8.62 13.26 4.22 13.26 25.60 29.39\n6 6 NA NA 21.57 NA 12.68 3.73 9.13 4.05\n7 7 NA 38.78 10.08 15.65 21.14 9.26 3.91 1.33\n8 8 NA NA NA 29.26 25.96 7.71 NA 4.08\n(1,2] \nSource: local data frame [8 x 9]\n\n true_subtype 1 2 3 4 5 6 7 8\n1 1 1.59 4.88 26.62 NA NA NA NA NA\n2 2 4.17 2.86 4.08 9.88 5.92 32.08 NA NA\n3 3 NA 5.91 3.21 5.89 5.22 NA 15.74 51.41\n4 4 NA 12.99 5.75 2.26 6.16 4.06 6.21 NA\n5 5 NA 17.61 12.64 3.86 3.36 10.07 14.34 19.07\n6 6 NA NA 22.90 NA 7.90 2.87 3.37 4.05\n7 7 NA 38.67 8.19 9.64 11.31 4.42 2.95 1.02\n8 8 NA NA NA 29.26 23.81 5.16 NA 4.05\n(2,4] \nSource: local data frame [8 x 9]\n\n true_subtype 1 2 3 4 5 6 7 8\n1 1 3.52 8.60 27.94 38.68 NA NA NA NA\n2 2 6.90 3.25 6.33 10.34 7.56 31.80 NA NA\n3 3 NA 8.63 3.25 9.14 2.97 NA 30.95 NA\n4 4 NA 16.90 8.54 3.01 9.87 3.71 8.47 NA\n5 5 NA 13.25 1.96 6.84 4.33 11.44 18.97 24.26\n6 6 NA NA 23.06 NA 12.76 3.15 6.68 NA\n7 7 NA 38.99 11.96 12.66 18.05 8.99 3.21 0.78\n8 8 NA NA NA NA 27.03 10.90 NA 4.89\n(4,8] \nSource: local data frame [8 x 9]\n\n true_subtype 1 2 3 4 5 6 7 8\n1 1 3.28 10.04 43.09 NA NA NA NA NA\n2 2 9.78 3.46 9.33 22.03 9.70 NA NA NA\n3 3 NA 11.54 3.80 13.66 2.56 NA NA 56.31\n4 4 NA 22.38 9.77 4.64 15.42 2.93 15.78 NA\n5 5 NA 16.05 5.75 12.55 4.90 14.60 31.42 32.73\n6 6 NA NA 22.57 NA 16.26 3.53 11.12 NA\n7 7 NA NA NA 17.70 23.72 16.32 5.12 1.62\n8 8 NA NA NA NA NA 14.08 NA 3.68\n(8,25] \nSource: local data frame [8 x 9]\n\n true_subtype 1 2 3 4 5 6 7 8\n1 1 5.03 9.60 43.10 56.47 NA NA NA NA\n2 2 8.64 3.64 12.70 30.59 16.81 NA NA NA\n3 3 NA 12.63 3.99 20.40 0.81 NA NA 47.20\n4 4 NA 28.42 11.86 5.37 18.26 5.26 NA NA\n5 5 NA NA 9.44 21.48 3.87 16.95 33.70 33.92\n6 6 NA NA 19.20 NA 12.00 5.55 15.05 NA\n7 7 NA NA NA 27.70 28.58 23.19 4.98 NA\n8 8 NA NA NA NA NA NA NA 1.53\n"
]
],
[
[
"#### Cross-validated 2 year partial information",
"_____no_output_____"
]
],
[
[
"L3 = np.loadtxt('param/gi_2.0_ratios.dat')\nL4 = np.loadtxt('param/pdc_2.0_ratios.dat')\nL5 = np.loadtxt('param/hrt_2.0_ratios.dat')\nL6 = np.loadtxt('param/pv1_2.0_ratios.dat')\nL7 = np.loadtxt('param/rp_2.0_ratios.dat')\n\nX3 = (L3 - L3.mean()) / np.std(L3)\nX4 = (L4 - L4.mean()) / np.std(L4)\nX5 = (L5 - L5.mean()) / np.std(L5)\nX6 = (L6 - L6.mean()) / np.std(L6)\nX7 = (L7 - L7.mean()) / np.std(L7)\n\nXX = [X1, X2, X3, X4, X5, X6, X7]\nW0 = np.zeros((P.shape[1], len(XX) + 1))\n\nnfolds = 10\naccs = []\nWW = []\nsols = []\nQfinal = np.zeros_like(P)\n\nfor train, test in KFold(P.shape[0], nfolds, shuffle=True, random_state=0):\n print('Starting new fold.')\n f2 = lambda w: -multinom_cost2(w.reshape(W0.shape), P[train], [Xi[train] for Xi in XX])\n g2 = lambda w: -multinom_grad2(w.reshape(W0.shape), P[train], [Xi[train] for Xi in XX]).ravel()\n s2 = opt.minimize(f2, W0.ravel(), jac=g2, method='BFGS')\n sols.append(s2)\n W = s2.x.reshape(W0.shape)\n WW.append(W)\n Qfinal[test] = multinom_pred2(W, P[test], [Xi[test] for Xi in XX])\n accs.append(np.mean(np.argmax(P[test], axis=1) == np.argmax(Qfinal[test], axis=1)))",
"Starting new fold.\nStarting new fold.\nStarting new fold.\nStarting new fold.\nStarting new fold.\nStarting new fold.\nStarting new fold.\nStarting new fold.\nStarting new fold.\nStarting new fold.\n"
],
[
"np.mean(np.argmax(P, axis=1) == np.argmax(Qfinal, axis=1))",
"_____no_output_____"
],
[
"confusion_matrix(np.argmax(P, axis=1), np.argmax(Q1, axis=1))",
"_____no_output_____"
],
[
"confusion_matrix(np.argmax(P, axis=1), np.argmax(Qfinal, axis=1))",
"_____no_output_____"
],
[
"subtypes = pd.read_csv('benchmark_pfvc_subtypes.csv')\nsubtypes['subtype'] = np.argmax(Qfinal, axis=1) + 1\nsubtypes.to_csv('benchmark_pfvc_1y_subtypes_multinom6.csv', index=False)",
"_____no_output_____"
],
[
"!Rscript score_predictions.R benchmark_pfvc_1y_subtypes_multinom6.csv",
"Loading required package: methods\nSource: local data frame [4 x 2]\n\n bin mae\n1 (1,2] 4.55\n2 (2,4] 6.57\n3 (4,8] 8.71\n4 (8,25] 10.69\nSource: local data frame [8 x 5]\n\n true_subtype (1,2] (2,4] (4,8] (8,25]\n1 1 5.68 9.47 7.15 16.67\n2 2 3.59 4.37 5.56 6.34\n3 3 4.23 4.94 6.67 7.19\n4 4 4.22 6.71 9.89 12.47\n5 5 5.10 6.63 10.52 16.46\n6 6 4.73 8.24 10.27 9.21\n7 7 5.03 8.89 10.70 16.22\n8 8 5.38 6.38 7.98 1.53\nSource: local data frame [8 x 9]\n\n true_subtype 1 2 3 4 5 6 7 8\n1 1 3.60 8.80 43.35 52.91 NA NA NA NA\n2 2 8.33 3.34 8.36 11.52 11.93 NA NA NA\n3 3 NA 10.19 3.72 11.33 3.09 NA 24.22 NA\n4 4 NA 16.59 8.22 3.15 13.02 4.46 11.46 NA\n5 5 NA 15.29 5.97 10.69 4.79 8.72 24.03 29.78\n6 6 NA NA 21.57 NA 11.88 4.45 12.24 13.86\n7 7 NA 38.78 10.08 16.73 20.64 11.00 3.72 NA\n8 8 NA NA NA NA NA 9.23 9.34 4.23\n(1,2] \nSource: local data frame [8 x 9]\n\n true_subtype 1 2 3 4 5 6 7 8\n1 1 1.59 5.43 43.75 NA NA NA NA NA\n2 2 6.47 2.77 4.74 7.99 5.92 NA NA NA\n3 3 NA 5.78 3.28 4.53 5.69 NA 20.85 NA\n4 4 NA 7.04 5.99 2.38 6.13 4.06 5.37 NA\n5 5 NA 12.19 2.58 2.83 3.59 6.82 9.27 20.86\n6 6 NA NA 22.90 NA 6.84 3.15 3.76 8.68\n7 7 NA 38.67 8.19 13.87 10.41 5.40 2.73 NA\n8 8 NA NA NA NA NA 4.92 9.10 4.56\n(2,4] \nSource: local data frame [8 x 9]\n\n true_subtype 1 2 3 4 5 6 7 8\n1 1 3.52 9.24 43.53 38.68 NA NA NA NA\n2 2 7.51 3.17 6.65 11.00 7.56 NA NA NA\n3 3 NA 8.01 3.24 8.20 2.37 NA 30.95 NA\n4 4 NA 13.20 8.05 2.97 9.68 3.71 10.27 NA\n5 5 NA 16.08 3.18 5.14 4.37 8.24 14.11 24.32\n6 6 NA NA 23.06 NA 12.16 3.86 10.40 13.98\n7 7 NA 38.99 11.96 13.46 17.51 10.68 3.27 NA\n8 8 NA NA NA NA NA 9.50 9.87 5.06\n(4,8] \nSource: local data frame [8 x 9]\n\n true_subtype 1 2 3 4 5 6 7 8\n1 1 3.28 10.04 43.09 NA NA NA NA NA\n2 2 9.54 3.49 9.68 15.82 9.70 NA NA NA\n3 3 NA 10.60 3.91 12.18 2.49 NA NA NA\n4 4 NA 17.99 8.90 3.32 14.48 2.93 14.67 NA\n5 5 NA 15.67 5.26 10.76 5.48 10.13 25.00 33.58\n6 6 NA NA 22.57 NA 15.55 4.71 16.00 15.95\n7 7 NA NA NA 16.12 22.95 16.32 4.61 NA\n8 8 NA NA NA NA NA 14.80 8.93 3.53\n(8,25] \nSource: local data frame [8 x 9]\n\n true_subtype 1 2 3 4 5 6 7 8\n1 1 5.03 9.60 43.10 56.47 NA NA NA NA\n2 2 8.64 3.73 12.00 NA 16.81 NA NA NA\n3 3 NA 13.14 4.18 18.41 NA NA NA NA\n4 4 NA 26.02 11.15 4.35 17.37 5.26 16.45 NA\n5 5 NA 16.05 7.50 18.47 6.29 13.30 32.38 34.92\n6 6 NA NA 19.20 NA 11.61 5.99 22.89 21.48\n7 7 NA NA NA 27.70 29.10 23.19 5.32 NA\n8 8 NA NA NA NA NA NA NA 1.53\n"
]
],
[
[
"### Conclusions and Next Steps\n\nOne surprising result is that the accuracy of the MAP estimate does not necessarily translate into unchanged classification performance. The MAP accuracies are not too different, but we see some reasonable improvements in prediction accuracy.\n\nThere are a few outlying issues here.\n\n1. What is the best way to train the generative models used to compute the likelihood ratios in the discriminatively trained component?\n\n2. Is matching the full information posterior the best way to learn the adjustment? Are there other objectives that we should consider when tuning? E.g. should we try to maximize the probability of future measurements instead?\n\n3. Should the learning task be broken up? Estimating a multinomial distribution seems difficult, especially when working with a relatively constrained parameterization. Is there a way that we can find a better way to leverage the information? For example, should we estimate partitions of the subtypes instead?\n\n#### Additional outstanding issues\n\n1. Should we be using different sets of weights for each likelihood ratio model? Or should they be the same? This issue does not come up in the case where we adjust the weighing of evidence to decide between two classes, so it's not clear how to proceed.\n\n2. Can the problem be thought of as one where the a priori probabilities are changing? There is a method that looks at how to adjust a classifier when the a priori probability of classes are different in the test set than in the training set.",
"_____no_output_____"
],
[
"### Bayesian Decision Theory Approach",
"_____no_output_____"
]
],
[
[
"C = confusion_matrix(np.argmax(P, axis=1), np.argmax(Q1, axis=1))\nC",
"_____no_output_____"
],
[
"z_true = np.argmax(P, axis=1)\nz_pred = np.argmax(Q1, axis=1)",
"_____no_output_____"
],
[
"X = np.c_[ np.ones(Q1.shape[0]), StandardScaler().fit_transform(Q1)[:, 1:] ]\nY = P.copy()\n\ndef make_opt_problem(predicted, z_pred=z_pred, X=X, Y=Y):\n i = z_pred == predicted\n Xi = X[i]\n Yi = Y[i]\n W0 = np.random.normal(size=(Yi.shape[1], Xi.shape[1]))\n f = lambda w: -sum(softmax.regression_ll(x, y, w.reshape(W0.shape)) for x, y in zip(Xi, Yi))\n g = lambda w: -sum(softmax.regression_ll_grad(x, y, w.reshape(W0.shape)) for x, y in zip(Xi, Yi)).ravel()\n return W0, f, g",
"_____no_output_____"
],
[
"solutions = []\nfor i in range(Y.shape[1]):\n print('Fitting model {}'.format(i))\n W0, f, g = make_opt_problem(i)\n s = opt.minimize(f, W0.ravel(), jac=g, method='BFGS')\n solutions.append(s)",
"Fitting model 0\nFitting model 1\nFitting model 2\nFitting model 3\nFitting model 4\nFitting model 5\nFitting model 6\nFitting model 7\n"
],
[
"[s.success for s in solutions]",
"_____no_output_____"
],
[
"z_adju = z_pred.copy()\nfor i, s in enumerate(solutions):\n if not s.success:\n continue\n \n W = s.x.reshape((len(solutions), -1))\n i = z_pred == i\n Q = np.array([softmax.regression_proba(x, W) for x in X[i]])\n z_adju[i] = np.argmax(Q, axis=1)",
"_____no_output_____"
],
[
"np.mean(z_true == z_pred)",
"_____no_output_____"
],
[
"confusion_matrix(z_true, z_pred)",
"_____no_output_____"
],
[
"np.mean(z_true == z_adju)",
"_____no_output_____"
],
[
"confusion_matrix(z_true, z_adju)",
"_____no_output_____"
],
[
"W = s.x.reshape(W0.shape)",
"_____no_output_____"
],
[
"Q = np.array([softmax.regression_proba(x, W) for x in X])",
"_____no_output_____"
],
[
"Q.shape",
"_____no_output_____"
],
[
"np.argmax(Q, axis=1)",
"_____no_output_____"
],
[
"(z_true[i] == np.argmax(Q, axis=1)).mean()",
"_____no_output_____"
],
[
"(z_true[i] == z_pred[i]).mean()",
"_____no_output_____"
],
[
"z_tmp = z_pred.copy()\nz_tmp[i] = np.argmax(Q, axis=1)\nconfusion_matrix(z_true, z_tmp)",
"_____no_output_____"
],
[
"z_pred1 = np.argsort(Q1, axis=1)[:, -1]\nz_pred2 = np.argsort(Q1, axis=1)[:, -2]",
"_____no_output_____"
],
[
"correct = z_true == z_pred1\nz_comb = z_pred1.copy()\nz_comb[~correct] = z_pred2[~correct]",
"_____no_output_____"
],
[
"(z_comb == z_true).mean()",
"_____no_output_____"
],
[
"confusion_matrix(z_true, z_comb)",
"_____no_output_____"
],
[
"subtypes = pd.read_csv('benchmark_pfvc_subtypes.csv')\nsubtypes['subtype'] = z_comb + 1\nsubtypes.to_csv('benchmark_pfvc_1y_subtypes_combined.csv', index=False)",
"_____no_output_____"
],
[
"!Rscript score_predictions.R benchmark_pfvc_1y_subtypes_combined.csv",
"Loading required package: methods\nSource: local data frame [4 x 2]\n\n bin mae\n1 (1,2] 4.41\n2 (2,4] 5.31\n3 (4,8] 7.16\n4 (8,25] 8.19\nSource: local data frame [8 x 5]\n\n true_subtype (1,2] (2,4] (4,8] (8,25]\n1 1 6.35 8.62 6.73 13.28\n2 2 3.46 3.56 4.44 5.84\n3 3 4.09 4.34 5.27 5.79\n4 4 4.09 4.91 7.11 7.72\n5 5 4.93 6.34 8.94 9.85\n6 6 4.30 5.64 7.58 10.63\n7 7 5.36 7.16 12.43 13.88\n8 8 4.30 5.54 3.48 1.53\nSource: local data frame [8 x 9]\n\n true_subtype 1 2 3 4 5 6 7 8\n1 1 3.25 NA 16.09 NA 44.00 NA NA NA\n2 2 NA 3.20 8.81 12.49 14.78 36.26 NA NA\n3 3 19.12 6.00 3.60 12.95 2.36 24.89 NA NA\n4 4 26.12 15.55 10.34 3.92 7.46 4.33 18.26 NA\n5 5 NA NA 6.73 18.90 4.27 14.99 25.58 31.99\n6 6 NA NA NA 4.58 17.02 3.67 12.98 NA\n7 7 NA 38.65 14.35 9.06 19.92 10.16 3.71 NA\n8 8 NA NA NA NA NA 18.26 13.02 3.68\n(1,2] \nSource: local data frame [8 x 9]\n\n true_subtype 1 2 3 4 5 6 7 8\n1 1 1.88 NA 7.87 NA 57.06 NA NA NA\n2 2 NA 2.71 3.26 9.45 16.04 32.88 NA NA\n3 3 10.76 1.51 3.27 10.32 2.36 26.40 NA NA\n4 4 16.74 10.85 6.70 2.76 8.99 3.68 7.66 NA\n5 5 NA NA 2.46 6.09 3.22 13.15 13.95 23.09\n6 6 NA NA NA 5.72 9.46 2.80 3.38 NA\n7 7 NA 19.42 11.24 8.47 12.76 5.54 2.70 NA\n8 8 NA NA NA NA NA 18.41 13.02 3.25\n(2,4] \nSource: local data frame [8 x 9]\n\n true_subtype 1 2 3 4 5 6 7 8\n1 1 3.21 NA 15.36 NA 45.66 NA NA NA\n2 2 NA 3.11 7.21 13.06 9.48 36.94 NA NA\n3 3 14.43 3.96 3.08 7.36 NA 28.03 NA NA\n4 4 23.64 13.13 7.89 3.32 7.14 3.55 14.15 NA\n5 5 NA NA 1.96 11.09 4.08 13.19 19.99 27.30\n6 6 NA NA NA 5.27 16.98 3.32 9.58 NA\n7 7 NA 27.63 17.45 8.73 19.70 6.20 3.12 NA\n8 8 NA NA NA NA NA 18.18 NA 4.67\n(4,8] \nSource: local data frame [8 x 9]\n\n true_subtype 1 2 3 4 5 6 7 8\n1 1 2.77 NA 22.07 NA 47.94 NA NA NA\n2 2 NA 3.27 10.24 17.43 16.80 36.33 NA NA\n3 3 20.58 7.40 3.74 11.19 NA 23.67 NA NA\n4 4 29.99 18.10 11.22 4.55 3.27 2.93 21.31 NA\n5 5 NA NA 3.13 16.58 4.72 17.00 30.14 34.01\n6 6 NA NA NA 2.64 19.83 3.78 17.29 NA\n7 7 NA 44.20 NA 10.85 24.44 10.27 4.71 NA\n8 8 NA NA NA NA NA NA NA 3.48\n(8,25] \nSource: local data frame [8 x 9]\n\n true_subtype 1 2 3 4 5 6 7 8\n1 1 4.74 NA NA NA 40.33 NA NA NA\n2 2 NA 3.57 10.84 NA NA 36.51 NA NA\n3 3 26.46 8.99 4.09 18.41 NA 21.73 NA NA\n4 4 NA 24.94 15.65 5.10 7.75 5.26 23.26 NA\n5 5 NA NA 9.44 23.60 4.88 17.38 35.68 37.11\n6 6 NA NA NA NA 18.86 5.14 22.89 NA\n7 7 NA 55.05 NA NA NA 15.49 5.23 NA\n8 8 NA NA NA NA NA NA NA 1.53\n"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
ec8ff4977c6342450ac178dac83efdf491dec32e | 63,312 | ipynb | Jupyter Notebook | DataAnalysis/relation_between_facet_angle_and_erosion_rate.ipynb | gregtucker/facetmorphology | 0bad54943b825f2742ebc3ee4e82a7d2f47ed2d7 | [
"MIT"
] | null | null | null | DataAnalysis/relation_between_facet_angle_and_erosion_rate.ipynb | gregtucker/facetmorphology | 0bad54943b825f2742ebc3ee4e82a7d2f47ed2d7 | [
"MIT"
] | null | null | null | DataAnalysis/relation_between_facet_angle_and_erosion_rate.ipynb | gregtucker/facetmorphology | 0bad54943b825f2742ebc3ee4e82a7d2f47ed2d7 | [
"MIT"
] | null | null | null | 88.301255 | 14,372 | 0.826447 | [
[
[
"# Notes on the relationship between erosion rate and facet angle\n\nBuilding on Tucker et al. (2011), consider the cross-sectional profile of a facet with a uniform dip angle $\\theta$. The dip angle of the fault is $\\alpha$ and its slip rate is $V$. We want to know what is the implied *vertical* erosion rate for a given $\\theta$, $\\alpha$, and $V$.\n\nConsider a location in the footwall at a horizontal distance $x$ from the fault trace. The time $t$ taken for rock to reach that location from the fault trace is equal to the distance, $x$, divided by the horizontal component of slip rate, $v_h$. This in turn implies that\n\n$x = v_h t$.\n\nBy simple geometry, the horizontal component of slip rate is\n\n$v_h = V \\cos \\alpha$.\n\nIf $\\alpha = 60^\\circ$, as we'll generally assume here, then $v_h = V / 2$.\n\nWe can imagine a triangle formed by the cumulative fault slip $Vt$ (the hypotenuse), the horizontal component $v_h = V\\cos\\alpha$, and the vertical component $v_v = V\\sin\\alpha$. Thus, the three sides of the triangle are $Vt$ (hypotenuse), $Vt\\cos\\alpha$ (base), and $Vt\\sin\\alpha$ (height). If $\\alpha=60^\\circ$, the sides of the triangle are $Vt$, $Vt/2$, and $\\sqrt{3}Vt/2$, respectively.\n\nNow consider the case when the facet dip is less than that of the fault dip. This implies that at horizontal position $x = v_h t$, there is an uneroded vertical column of rock of height $H$, where $\\tan\\theta = H/x$, and therefore\n\n$H = x\\tan\\theta = v_h t \\frac{\\sin\\theta}{\\cos\\theta}$\n\n$=V t \\frac{\\cos\\alpha\\sin\\theta}{\\cos\\theta}$.\n\nAs a check on this math, note that if $\\theta = 0$, then $H=0$ (the surface is flat), whereas if $\\theta = \\alpha$, then we simply have the height of original (uneroded) triangle $Vt\\sin\\alpha$.\n\nThe cumulative erosion depth $h_e$ is the difference between the total column height if there were no erosion, and the actual column height:\n\n$h_e = Vt\\sin\\alpha - H = Vt\\sin\\alpha - Vt\\frac{\\cos\\alpha\\sin\\theta}{\\cos\\theta}$\n\n$= Vt \\left( \\sin\\alpha - \\frac{\\cos\\alpha\\sin\\theta}{\\cos\\theta} \\right)$.\n\nThe cumulative vertical erosion $h_e$ at position $x$ is also equal to the vertical erosion rate $E_v$ times the elapsed time, $t$, which implies\n\n$E_v = V \\left( \\sin\\alpha - \\frac{\\cos\\alpha\\sin\\theta}{\\cos\\theta} \\right)$.\n\nWe can do a basic sanity check. If $\\theta = \\alpha$ then the erosion rate should be zero, as it is above (the quantity in parentheses is zero). If $\\theta = 0$, then the vertical erosion rate should equal the vertical component of slip. And indeed because the right-hand term in parentheses is zero when $\\theta=0$, we have $E_v = V\\sin\\alpha$, which is the vertical component of slip rate.\n\nBelow, we plot this relationship, and look at some example scenarios.",
"_____no_output_____"
]
],
[
[
"def normalized_ero_rate(theta, alpha=60.0):\n alphar = np.radians(alpha)\n thetar = np.radians(theta)\n return np.sin(alphar) - np.cos(alphar) * (np.sin(thetar)\n / np.cos(thetar))",
"_____no_output_____"
],
[
"import matplotlib.pyplot as plt\nimport numpy as np",
"_____no_output_____"
],
[
"alpha = 60.0\ntheta = np.arange(0, 61)\nEvV = normalized_ero_rate(theta, alpha)",
"_____no_output_____"
],
[
"plt.plot(theta, EvV)\nplt.xlabel(r'$\\theta$ (degrees)')\nplt.ylabel(r'$E_n / V$')\nplt.grid(True)",
"_____no_output_____"
]
],
[
[
"As expected, when the facet and fault dip are the same, the erosion rate is zero. When the facet angle is zero, meaning the surface is flat, erosion is vertical and its rate is equal to the vertical component of the slip rate. That vertical component is:\n\n$v_v = V\\sin\\alpha$, which for $\\alpha = 60^\\circ$ again is $(\\sqrt{3}/2) V$:",
"_____no_output_____"
]
],
[
[
"normalized_ero_rate(0.0)",
"_____no_output_____"
]
],
[
[
"And for $\\alpha = 30^\\circ$:",
"_____no_output_____"
]
],
[
[
"normalized_ero_rate(30.0)",
"_____no_output_____"
]
],
[
[
"Suppose, for example, the slip rate is $\\sqrt{3}$ mm/yr. (Because of the grid geometry in the GrainFacet model, this is rate one gets using $\\delta=1$ m and $\\tau = 1,000$ yr.) Then for a flat slope, the vertical component of slip rate, $v_v$, and the vertical erosion rate, $E_v$, should be identical. Using the result above, we can calculate $v_v$ as:",
"_____no_output_____"
]
],
[
[
"V = np.sqrt(3)\nV * np.sin(np.radians(60.0))",
"_____no_output_____"
]
],
[
[
"And we can calculate $E_v$ as:",
"_____no_output_____"
]
],
[
[
"V * normalized_ero_rate(0.0, 60.0)",
"_____no_output_____"
]
],
[
[
"Indeed we see they are the same, and the maximum erosion rate for this particular scenario is 1.5 mm/yr.\n\nConsider next the example of a $30^\\circ$ facet. Here the vertical erosion rate corresponding to the above slip rate would be:",
"_____no_output_____"
]
],
[
[
"V * normalized_ero_rate(30.0, 60.0)",
"_____no_output_____"
]
],
[
[
"So, for a $30^\\circ$ facet, the erosion rate is 2/3 of the theoretical maximum (i.e., what it would be for a flat \"facet\", where any uplifted material is immediately shaved off).\n\nJust for completeness, we can look at the cases of $15^\\circ$ ...",
"_____no_output_____"
]
],
[
[
"V * normalized_ero_rate(15.0, 60.0)",
"_____no_output_____"
]
],
[
[
"... and $45^\\circ$ ...",
"_____no_output_____"
]
],
[
[
"V * normalized_ero_rate(45.0, 60.0)",
"_____no_output_____"
]
],
[
[
"## Slope-normal erosion rate\n\nThe vertical and slope-normal erosion rates are related by:\n\n$\\frac{E_n}{E_v} = \\cos \\theta$.\n\nIf we substitute this above, we find that:\n\n$E_n = V \\left( \\sin\\alpha\\cos\\theta- \\cos\\alpha\\sin\\theta \\right)$.\n\nWe plot this below:",
"_____no_output_____"
]
],
[
[
"theta = np.arange(0, 61)\nthetar = np.radians(theta)\nalphar = np.radians(60.0)\nEnV = np.sin(alphar) * np.cos(thetar) - np.cos(alphar) * np.sin(thetar)\nplt.plot(theta, EnV)\nplt.xlabel(r'$\\theta$ (degrees)')\nplt.ylabel(r'$E_n / V$')\nplt.grid(True)",
"_____no_output_____"
]
],
[
[
"For $\\theta = \\alpha = 60^\\circ$, there is no erosion and the fault plane is unmodified. For $\\theta = 0$, $E_n = E_v = \\sqrt{3} / 2$.\n\nNote that this differs from the result in Tucker et al. (2011), which reported the case of an arc-wise erosion vector.\n\nWe can simplify the above formula by using two trigonometric identities:\n\n$\\sin \\alpha \\cos \\theta = (1/2) (\\sin (a+b) + \\sin (a-b) )$, and\n\n$\\cos \\alpha \\sin \\theta = (1/2) (\\sin (a+b) - \\sin (a-b) )$.\n\nSubtracting the two, as in the above equation, we get:\n\n$E_n / V = \\sin (\\alpha - \\theta )$.\n\nLet's just verify it by plotting:\n\n",
"_____no_output_____"
]
],
[
[
"EnV = np.sin(alphar - thetar)\nplt.plot(theta, EnV)\nplt.xlabel(r'$\\theta$ (degrees)')\nplt.ylabel(r'$E_n / V$')\nplt.grid(True)",
"_____no_output_____"
]
],
[
[
"Voila: c'est la meme truc.",
"_____no_output_____"
],
[
"## How to calculate vertical erosion rate from a facet simulation\n\nSuppose you have a snapshot from a GrainFacet simulation run that is in quasi-steady state with respect to tectonic forcing. How do you calculate the erosion rate? It is not as simple as it is for models with \"vertical uplift\" because mass can escape due to tectonic slip out the right side and possibly the top. However, we can use the basic theory above to calculate an effective erosion rate for each model column, and then average them together. Here's how it works.\n\n### Horizontal position of a column\n\nLet $c$ denote the column number in a run, starting from column zero at $x=0$. The spacing between columns is $\\sqrt{3}/2$ cell widths. Thus, the $x$ coordinate, in cell widths, is:\n\n$x(c) = \\frac{\\sqrt{3}}{2} c$.\n\n### Height of a column\n\nWe can ask: what is the effective height of the column at that location? It's easy enough to count the number of nonzero cells in the column. We'll call this count $N(c)$. But how does that translate into height? First, let's assume that the land surface lies at the *center* of the top-most cell (of course, in plots, it *looks* like the surface is at the top of the filled cell, but this assumption is more consistent with how the fault is approximated in the model).\n\nIf we happen to be in an even-numbered column, the center of the bottom-most cell is at $y=0$, and so the center of the top cell is $N-1$. But if we happen to be in an odd-numbered column, the column is shifted up by half a cell width. So for these, the center of the top cell is $N-1/2$. We can generalize as follows to get the height, $H(c)$, of any arbitrary column:\n\n$H(c) = N - \\frac{1}{1 + (c \\mod 2)}$\n\nThis expression sets $H = N - 1$ for even columns, and $H = N - 0.5$ for odd columns.\n\n### Calculating erosion rate at a column\n\nTo calculate the erosion rate at a single column, we start with the expression for $E_v$ derived above:\n\n$E_v / V = \\sin\\alpha - \\frac{\\cos\\alpha\\sin\\theta}{\\cos\\theta}$.\n\nUsing $\\alpha = 60^\\circ$, and using numerical values of $\\sin\\alpha$ and $\\cos\\alpha$, this becomes:\n\n$E_v / V = \\frac{\\sqrt{3}}{2} - \\frac{1}{2} \\frac{\\sin\\theta}{\\cos\\theta}$.\n\nNoting that $\\sin\\theta / \\cos\\theta = \\tan\\theta = H / x$, we have:\n\n$\\frac{E_v}{V} = \\frac{\\sqrt{3}}{2} - \\frac{H}{2x}$.\n\nThis is the formula we use to calculate vertical erosion rate, $E_v$, from column height, $H$, and horizontal position $x$.\n\nThe calculation is encapsulated in the following function:",
"_____no_output_____"
]
],
[
[
"def vert_ero_rate(c, N):\n \"\"\"Normalized vertical erosion rate in column c with N solid cells.\n\n Parameters\n ----------\n c : int\n Column number\n N : int\n Number of solid cells in column\n \"\"\"\n x = 0.5 * np.sqrt(3) * c\n if N <= 1: # if only one cell, assume it's totally eroded\n H = 0.0\n else:\n H = N - 1.0 / (1.0 + (c % 2))\n return 0.5 * np.sqrt(3) - 0.5 * H / x",
"_____no_output_____"
]
],
[
[
"### Examples\n\nSuppose you have only 1 cell in column 2. That's the minimum number of cells possible in the numerical model: the bottom-most cell is a boundary, and always contains rock. The erosion rate should equal the vertical component of slip rate, so $E_v/V$ should equal $\\sin 60^\\circ = \\sqrt{3}/2 \\approx 0.866$. Let's try it:",
"_____no_output_____"
]
],
[
[
"vert_ero_rate(2, 1)",
"_____no_output_____"
]
],
[
[
"The result should be the same in odd-numbered columns, say column 7:",
"_____no_output_____"
]
],
[
[
"vert_ero_rate(7, 1)",
"_____no_output_____"
]
],
[
[
"If we had a $60^\\circ$ slope, columns 3 and 4 would have 5 and 7 solid cells, respectively, and the erosion rate should be zero:",
"_____no_output_____"
]
],
[
[
"vert_ero_rate(3, 5)",
"_____no_output_____"
],
[
"vert_ero_rate(4, 7)",
"_____no_output_____"
]
],
[
[
"The latter result tells us there is a slight numerical inaccuracy: probably a subtraction-of-close-numbers error. To diagnose it, let's see how $H/x$ compares with $\\sqrt{3}$:",
"_____no_output_____"
]
],
[
[
"c = 4\nN = 7\nH = N - 1.0 / (1.0 + (c % 2))\nx = 0.5 * np.sqrt(3) * c\nprint('Square root of 3 is approxately: ' + str(np.sqrt(3)))\nprint('H is: ' + str(H))\nprint('x is: ' + str(x))\nprint('H / x is: ' + str(H/x))",
"Square root of 3 is approxately: 1.7320508075688772\nH is: 6.0\nx is: 3.4641016151377544\nH / x is: 1.7320508075688774\n"
]
],
[
[
"We see that that there is a slight numerical error in $H/x$: it is off in the 16th decimal place (hence an error of order $10^{-16}$). This seems perfectly acceptable.\n\nAs a last example, consider columns 5 and 6 on a $30^\\circ$ facet. The number of filled cells would be 3 and 4, respectively. From the analysis above, we expect a vertical erosion rate of $\\sqrt{3}/3$, which is about:",
"_____no_output_____"
]
],
[
[
"np.sqrt(3.0)/3.0",
"_____no_output_____"
],
[
"vert_ero_rate(5, 3)",
"_____no_output_____"
],
[
"vert_ero_rate(6, 4)",
"_____no_output_____"
]
],
[
[
"## Summary\n\nTo summarize, the method for calculating average vertical erosion rate in each column (from columns 2 to the right-most inner column) of a snapshop of a GrainFacet model run consists of the following steps:\n\n(1) Calculate the position of the column: $x(c) = \\sqrt{3} c / 2$.\n\n(2) Calculate the height of the column: $H(N, c) = N - \\frac{1}{1 + (c \\mod 2)}$, if $N>1$, and $H = 0$ otherwise.\n\n(3) Calculate the column's erosion rate: $E_v = \\frac{\\sqrt{3}\\delta}{\\tau} \\left( \\frac{\\sqrt{3}}{2} - \\frac{H}{2x} \\right)$.\n\n(Note that step 3 includes that $V = \\sqrt{3}\\delta / \\tau$, where $\\delta$ is cell width, $\\tau$ is the slip interval, and $\\sqrt{3}\\delta$ is the slip distance.)\n\nOnce the individual columns have been calculated, their vertical erosion rates are averaged.",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
]
] |
ec8ff9d686ab1bf6383f5e9f64edc20347fa601b | 20,777 | ipynb | Jupyter Notebook | FirstApplication_python.ipynb | hsinlun0415/FirstApplication_python | bd17d733fc0f14d8473b2520ab4147cb1779e771 | [
"Apache-2.0"
] | null | null | null | FirstApplication_python.ipynb | hsinlun0415/FirstApplication_python | bd17d733fc0f14d8473b2520ab4147cb1779e771 | [
"Apache-2.0"
] | null | null | null | FirstApplication_python.ipynb | hsinlun0415/FirstApplication_python | bd17d733fc0f14d8473b2520ab4147cb1779e771 | [
"Apache-2.0"
] | null | null | null | 96.189815 | 9,612 | 0.842999 | [
[
[
"from sklearn.linear_model import LinearRegression\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt",
"_____no_output_____"
],
[
"df = pd.read_csv(\"regrex1.csv\")\nprint(df)",
" y x\n0 6.8102 9.8492\n1 9.8437 11.1450\n2 4.9767 5.1209\n3 5.0006 8.1085\n4 3.5047 3.5599\n5 3.6419 1.9883\n6 7.8725 15.5322\n7 5.1442 7.5202\n8 8.3883 14.0202\n9 6.8204 7.5226\n10 9.0185 14.3272\n11 2.6148 3.8950\n12 3.7367 1.8707\n13 2.8651 0.8347\n14 10.7469 18.3413\n15 2.3710 1.6645\n16 9.6653 17.0579\n17 7.1926 9.2375\n18 11.7833 21.0000\n19 3.9537 3.5286\n20 15.0000 25.0000\n21 7.9152 9.9008\n22 9.3368 17.5990\n23 8.7587 13.8790\n24 6.7813 10.8690\n25 7.9924 12.6238\n26 2.2806 0.9719\n27 10.4050 14.4814\n28 3.7891 2.1792\n29 10.9067 18.4052\n"
],
[
"plt.scatter(x=df.x, y=df.y)\nplt.xlabel('x')\nplt.ylabel('y')\nplt.show()",
"_____no_output_____"
],
[
"linear_regressor = LinearRegression()\n\nX = df.iloc[:, 1].values.reshape(-1, 1)\nY = df.iloc[:, 0].values.reshape(-1, 1)\n\nlinear_regressor.fit(X, Y)",
"_____no_output_____"
],
[
"Y_pred = linear_regressor.predict(X)\nprint(Y_pred)",
"[[ 6.86803101]\n [ 7.47584191]\n [ 4.65016382]\n [ 6.05153429]\n [ 3.91795759]\n [ 3.18077931]\n [ 9.53371196]\n [ 5.77558495]\n [ 8.82448979]\n [ 5.7767107 ]\n [ 8.96849191]\n [ 4.07514036]\n [ 3.12561759]\n [ 2.63966906]\n [10.85135482]\n [ 3.02889694]\n [10.24936029]\n [ 6.58110561]\n [12.09845071]\n [ 3.90327594]\n [13.97469984]\n [ 6.89223462]\n [10.50316989]\n [ 8.7582582 ]\n [ 7.34638072]\n [ 8.16949122]\n [ 2.70402441]\n [ 9.04082132]\n [ 3.2703233 ]\n [10.8813279 ]]\n"
],
[
"plt.scatter(X, Y)\nplt.plot(X, Y_pred, color='red')\nplt.xlabel('x')\nplt.ylabel('y')\nplt.show()",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
ec90021667dbc5431c845c17a2364ebcdace2d85 | 630,348 | ipynb | Jupyter Notebook | Mixed_layer_plot.ipynb | Sumanshekhar17/Mixed-Layer-Plot-using-Xarray | abe47f8c2faf950675367af0fe5182eed851dbf8 | [
"MIT"
] | 1 | 2021-07-28T04:17:44.000Z | 2021-07-28T04:17:44.000Z | Mixed_layer_plot.ipynb | Sumanshekhar17/Mixed-Layer-Plot-using-Xarray | abe47f8c2faf950675367af0fe5182eed851dbf8 | [
"MIT"
] | null | null | null | Mixed_layer_plot.ipynb | Sumanshekhar17/Mixed-Layer-Plot-using-Xarray | abe47f8c2faf950675367af0fe5182eed851dbf8 | [
"MIT"
] | null | null | null | 79.770691 | 62,952 | 0.635424 | [
[
[
"import numpy as np",
"_____no_output_____"
],
[
"import xarray as xr",
"_____no_output_____"
],
[
"# make dask cluster\nimport dask\nimport distributed\n\ncluster = distributed.LocalCluster()",
"/apps/pangeo/2021.01/lib/python3.7/site-packages/distributed/node.py:155: UserWarning: Port 8787 is already in use.\nPerhaps you already have a cluster running?\nHosting the HTTP server on port 41175 instead\n http_address[\"port\"], self.http_server.port\n"
],
[
"client = distributed.Client(cluster)\nclient",
"_____no_output_____"
],
[
"ds1 = xr.open_dataset(\n \"convection01_128full.nc\",\n chunks={\n \"xC\": 32,\n \"xF\": 32,\n \"yC\": 32,\n \"yF\": 32,\n \"zC\": 32,\n \"zF\": 32,\n \"time\": 10,\n }, # this tells xarray to open the dataset as a dask array\n)\nds1",
"_____no_output_____"
],
[
"ds2 = xr.open_dataset(\n \"convection16_128full.nc\",\n chunks={\n \"xC\": 32,\n \"xF\": 32,\n \"yC\": 32,\n \"yF\": 32,\n \"zC\": 32,\n \"zF\": 32,\n \"time\": 10,\n }, # this tells xarray to open the dataset as a dask array\n)\nds2",
"_____no_output_____"
],
[
"ds3 = xr.open_dataset(\n \"convection18_128full.nc\",\n chunks={\n \"xC\": 32,\n \"xF\": 32,\n \"yC\": 32,\n \"yF\": 32,\n \"zC\": 32,\n \"zF\": 32,\n \"time\": 10,\n }, # this tells xarray to open the dataset as a dask array\n)\nds3",
"_____no_output_____"
],
[
"ds4 = xr.open_dataset(\n \"convection18full.nc\",\n chunks={\n \"xC\": 32,\n \"xF\": 32,\n \"yC\": 32,\n \"yF\": 32,\n \"zC\": 32,\n \"zF\": 32,\n \"time\": 10,\n }, # this tells xarray to open the dataset as a dask array\n)\nds4",
"_____no_output_____"
],
[
"ds5 = xr.open_dataset(\n \"convection25full.nc\",\n chunks={\n \"xC\": 32,\n \"xF\": 32,\n \"yC\": 32,\n \"yF\": 32,\n \"zC\": 32,\n \"zF\": 32,\n \"time\": 10,\n }, # this tells xarray to open the dataset as a dask array\n)\nds5",
"_____no_output_____"
],
[
"#number of grid spacing in south,north and vertical direction\nNx=256\nNy=256\nNz=64\n\n#Length of grid in south, north and vertical direction\nLx=4\nLy=4\nLz=0.1\n\n#gravitational acceleration\ng=300\nalpha= 2e-4\nBo=3.6e-4 #maximum surface flux\n",
"_____no_output_____"
],
[
"x = (ds1['xC']-2)**2\ny = (ds1['yC']-2)**2\nradius = x+y ",
"_____no_output_____"
],
[
"radius.plot()",
"_____no_output_____"
],
[
"radius.where(radius<=1,0).plot()",
"_____no_output_____"
]
],
[
[
"## Vertical velocity with only values inside the cooling disk",
"_____no_output_____"
]
],
[
[
"%%time\nvertical_velocity1 = ds1['w'][:,:128,:,:].where(radius<=1,0)\nvertical_velocity1",
"CPU times: user 17.9 ms, sys: 2.31 ms, total: 20.2 ms\nWall time: 19.2 ms\n"
],
[
"%%time\nvertical_velocity1.isel(time=20,zF = 127).plot()",
"CPU times: user 368 ms, sys: 19.8 ms, total: 388 ms\nWall time: 484 ms\n"
],
[
"ds1['zF'][127]",
"_____no_output_____"
],
[
"%%time\nvertical_velocity2 = ds2['w'][:,:128,:,:].where(radius<=1,0)\nvertical_velocity3 = ds3['w'][:,:128,:,:].where(radius<=1,0)\nvertical_velocity4 = ds4['w'][:,:64,:,:].where(radius<=1,0)\nvertical_velocity5 = ds5['w'][:,:64,:,:].where(radius<=1,0)\n",
"CPU times: user 30.8 ms, sys: 2.67 ms, total: 33.4 ms\nWall time: 30.4 ms\n"
]
],
[
[
"## Modified temperature",
"_____no_output_____"
]
],
[
[
"modified_temp1 = (ds1['T']-ds1['T'].isel(time=0)).where(radius<=1,0)\nmodified_temp1.isel(time=65,xC = 128).plot()",
"_____no_output_____"
],
[
"%%time\nmodified_temp2 = (ds2['T']-ds2['T'].isel(time=0)).where(radius<=1,0)\nmodified_temp3 = (ds3['T']-ds3['T'].isel(time=0)).where(radius<=1,0)\nmodified_temp4 = (ds4['T']-ds4['T'].isel(time=0)).where(radius<=1,0)\nmodified_temp5 = (ds5['T']-ds5['T'].isel(time=0)).where(radius<=1,0)",
"CPU times: user 34 ms, sys: 11.1 ms, total: 45 ms\nWall time: 42.6 ms\n"
]
],
[
[
"## Calculating Buoyancy flux at each position",
"_____no_output_____"
]
],
[
[
"%%time\nbuo1 = vertical_velocity1*modified_temp1*g*alpha\nbuo1",
"CPU times: user 32.1 ms, sys: 10.5 ms, total: 42.7 ms\nWall time: 40.8 ms\n"
],
[
"%%time\nbuo2 = vertical_velocity2*modified_temp2*g*alpha\nbuo3 = vertical_velocity3*modified_temp3*g*alpha\nbuo4 = vertical_velocity4*modified_temp4*g*alpha\nbuo5 = vertical_velocity5*modified_temp5*g*alpha",
"CPU times: user 174 ms, sys: 17 ms, total: 191 ms\nWall time: 187 ms\n"
]
],
[
[
"## Average Vertical buoyancy flux ",
"_____no_output_____"
]
],
[
[
"avg_buoyancy1 = (buo1.sum(dim = 'xC').sum(dim = 'yC'))/12851\navg_buoyancy1",
"_____no_output_____"
],
[
"%%time\navg_buoyancy2 = (buo2.mean(dim = 'xC').mean(dim = 'yC'))/12851\navg_buoyancy3 = (buo3.mean(dim = 'xC').mean(dim = 'yC'))/12851\navg_buoyancy4 = (buo4.mean(dim = 'xC').mean(dim = 'yC'))/12851\navg_buoyancy5 = (buo5.mean(dim = 'xC').mean(dim = 'yC'))/12851",
"CPU times: user 73.3 ms, sys: 2.38 ms, total: 75.7 ms\nWall time: 73.6 ms\n"
]
],
[
[
"## accessing their diagonal element",
"_____no_output_____"
]
],
[
[
"(np.diagonal(avg_buoyancy1.values, offset=0, axis1=1, axis2=2)).shape",
"_____no_output_____"
],
[
"%%time\navg_buoyancy1 = np.diagonal(avg_buoyancy1.values, offset=0, axis1=1, axis2=2)\navg_buoyancy2 = np.diagonal(avg_buoyancy2.values, offset=0, axis1=1, axis2=2)\navg_buoyancy3 = np.diagonal(avg_buoyancy3.values, offset=0, axis1=1, axis2=2)\navg_buoyancy4 = np.diagonal(avg_buoyancy4.values, offset=0, axis1=1, axis2=2)\navg_buoyancy5 = np.diagonal(avg_buoyancy5.values, offset=0, axis1=1, axis2=2)",
"CPU times: user 1min 31s, sys: 5.2 s, total: 1min 36s\nWall time: 4min 24s\n"
],
[
"avg_buoyancy1",
"_____no_output_____"
],
[
"time = ds1['time'].values\ndepth = ds1['zF'][:128].values\nda = xr.DataArray(\n data=avg_buoyancy1,\n dims=[\"time\",\"zC\"],\n coords=dict(\n depth=([\"zC\"], depth),\n time=time,\n ),\n attrs=dict(\n description=\"horizontal average buoyancy flux\"\n ),\n)\nda",
"_____no_output_____"
],
[
"MLD1 = []\nMLD2 = []\nMLD3 = []\nMLD4 = np.zeros(45)\nMLD5 = []",
"_____no_output_____"
],
[
"a = [(ds1['zF'][4].values),(ds1['zF'][5].values)]\na",
"_____no_output_____"
],
[
"for i in np.arange(101):\n for j in np.arange(128):\n if avg_buoyancy1[i,j] >= 0.001*Bo:\n print('Hi')\n MLD1.append(ds1['zF'][j])\n ",
"_____no_output_____"
],
[
"MLD1",
"_____no_output_____"
],
[
"for i in np.arange(45):\n for j in np.arange(64):\n if avg_buoyancy4[i,j] >= 0.001*Bo:\n MLD4 = ds4['zF'][j].values\n ",
"_____no_output_____"
],
[
"MLD4",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
ec9050c18197c2b04b3651f921bf32b8e2595e40 | 7,634 | ipynb | Jupyter Notebook | homework/03 Search, list functions/homework/wang_zhizhou_3_1&2.ipynb | barjacks/algorithms_mine | bc248ed9ebb88aed73c6e8da3d3b9553d9173cdd | [
"MIT"
] | null | null | null | homework/03 Search, list functions/homework/wang_zhizhou_3_1&2.ipynb | barjacks/algorithms_mine | bc248ed9ebb88aed73c6e8da3d3b9553d9173cdd | [
"MIT"
] | null | null | null | homework/03 Search, list functions/homework/wang_zhizhou_3_1&2.ipynb | barjacks/algorithms_mine | bc248ed9ebb88aed73c6e8da3d3b9553d9173cdd | [
"MIT"
] | null | null | null | 23.489231 | 210 | 0.494367 | [
[
[
"## Asignment 1\n* Implement the sorting algorithm you came up with in pseudocode with Python\n* Test the sorting algorithm with a list of 10, 100, 1000 random numbers and compare the result using the %time to time your code and submit your results in code comments",
"_____no_output_____"
]
],
[
[
"## pseudocode\n# for number in a list:\n# put the first number of the list into the new list\n# for other numbers in the list\n# compare it with every number in the new list,get it's index number\n# if it's larger than any number in the new list, put it on the index+1 position",
"_____no_output_____"
],
[
"def get_sort(l):\n newlist = []\n newlist.append(l[0])\n for i in range(1,len(l)):\n added = False\n for m in range(len(newlist)):\n if l[i] > newlist[m]:\n added = False\n if added == False:\n if l[i] <= newlist[m]:\n newlist.insert(m,l[i])\n added = True \n if added == False:\n newlist.append(l[i])\n return newlist",
"_____no_output_____"
],
[
"test10 = [4,5,8,1,3,5,22,14,50,23]\n%time get_sort(test10)\n# CPU times: user 25 µs, sys: 0 ns, total: 25 µs\n# Wall time: 28.1 µs\n# Out[79]:\n# [1, 3, 4, 5, 5, 8, 14, 22, 23, 50]",
"CPU times: user 25 µs, sys: 0 ns, total: 25 µs\nWall time: 28.1 µs\n"
],
[
"import random",
"_____no_output_____"
],
[
"# test100 = random.sample(range(1, 1000), 100)\n# %time get_sort(test100)\n# CPU times: user 997 µs, sys: 20 µs, total: 1.02 ms\n# Wall time: 1.05 ms",
"_____no_output_____"
],
[
"# test1000 = random.sample(range(1, 10000), 1000)\n# %time get_sort(test1000)\n# CPU times: user 128 ms, sys: 3.59 ms, total: 131 ms\n# Wall time: 170 ms",
"_____no_output_____"
],
[
"sorted10 = get_sort(test10)\nsorted100 = get_sort(test100)\nsorted1000 = get_sort(test1000)",
"_____no_output_____"
]
],
[
[
"## Assignment 2\n* Implement the search algorithm you came up with in pseudocode with Python\n* Test the search algorithm with a list of 10,100,1000 random numbers (sorted with your sorting algorithm) and compare the result using the %time to time your code and submit your results in code comments",
"_____no_output_____"
]
],
[
[
"## pseudocode\n# input a sorted list and a search term\n# get the length of the list\n# for each number in the range of length\n# if the index number of the list equals to the search term\n# print index\n# something = True\n# else \n# something = False\n# if False\n# print the item is not found",
"_____no_output_____"
],
[
"def check_num(l,n):\n for i in range(len(l)):\n# print(l[i])\n found = False\n if l[i] == n:\n found = True\n return \"The index of the number in the list is \" + str(i)\n if not found:\n return \"The number is not in the list.\"",
"_____no_output_____"
],
[
"%time check_num(sorted10,4)\n# CPU times: user 7 µs, sys: 0 ns, total: 7 µs\n# Wall time: 11.2 µs\n# Out[122]:\n# 'The index of the number in the list is 2'",
"CPU times: user 7 µs, sys: 0 ns, total: 7 µs\nWall time: 11.2 µs\n"
],
[
"%time check_num(sorted100,4)\n# CPU times: user 20 µs, sys: 0 ns, total: 20 µs\n# Wall time: 23.1 µs\n# Out[123]:\n# 'The number is not in the list.'",
"CPU times: user 20 µs, sys: 0 ns, total: 20 µs\nWall time: 23.1 µs\n"
],
[
"%time check_num(sorted1000,9987)\n# CPU times: user 120 µs, sys: 0 ns, total: 120 µs\n# Wall time: 123 µs\n# Out[127]:\n# 'The index of the number in the list is 998'",
"CPU times: user 120 µs, sys: 0 ns, total: 120 µs\nWall time: 123 µs\n"
]
]
] | [
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
]
] |
ec90576e457b3b235a618ed6d97e9d32cf62c8fb | 105,107 | ipynb | Jupyter Notebook | ieee_fraud/lgb_bayesian_notebook.ipynb | mihamerstan/kaggle_experiments | 606de40d62e2a5d075e1a62908b14cb5e9e8cc61 | [
"MIT"
] | null | null | null | ieee_fraud/lgb_bayesian_notebook.ipynb | mihamerstan/kaggle_experiments | 606de40d62e2a5d075e1a62908b14cb5e9e8cc61 | [
"MIT"
] | null | null | null | ieee_fraud/lgb_bayesian_notebook.ipynb | mihamerstan/kaggle_experiments | 606de40d62e2a5d075e1a62908b14cb5e9e8cc61 | [
"MIT"
] | null | null | null | 105,107 | 105,107 | 0.667101 | [
[
[
"----------\n**IEEE Fraud Detection - Bayesian optimization - LGB**\n=====================================\n\n***Vincent Lugat***\n\n*July 2019*\n\n----------",
"_____no_output_____"
],
[
"",
"_____no_output_____"
],
[
"- <a href='#1'>1. Libraries and Data</a> \n- <a href='#2'>2. Bayesian Optimisation </a> \n- <a href='#3'>3. LGB + best hyperparameters</a>\n- <a href='#4'>4. Features importance</a>\n- <a href='#4'>5. Submission</a>",
"_____no_output_____"
],
[
"# <a id='1'>1. Librairies and data</a> ",
"_____no_output_____"
]
],
[
[
"# Libraries\nimport numpy as np \nimport pandas as pd \n# Data processing, metrics and modeling\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.model_selection import train_test_split, StratifiedKFold,KFold\nfrom bayes_opt import BayesianOptimization\nfrom datetime import datetime\nfrom sklearn.metrics import precision_score, recall_score, confusion_matrix, accuracy_score, roc_auc_score, f1_score, roc_curve, auc,precision_recall_curve\nfrom sklearn import metrics\nfrom sklearn import preprocessing\n# Lgbm\nimport lightgbm as lgb\n# Suppr warning\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\nimport itertools\nfrom scipy import interp\n\n# Plots\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n%matplotlib inline\nfrom matplotlib import rcParams",
"_____no_output_____"
]
],
[
[
"## DATASETS",
"_____no_output_____"
]
],
[
[
"%%time\ntrain_transaction = pd.read_csv('../input/train_transaction.csv', index_col='TransactionID')\ntest_transaction = pd.read_csv('../input/test_transaction.csv', index_col='TransactionID')\ntrain_identity = pd.read_csv('../input/train_identity.csv', index_col='TransactionID')\ntest_identity = pd.read_csv('../input/test_identity.csv', index_col='TransactionID')\nsample_submission = pd.read_csv('../input/sample_submission.csv', index_col='TransactionID')",
"_____no_output_____"
]
],
[
[
"## MERGE, MISSING VALUE, FILL NA",
"_____no_output_____"
]
],
[
[
"# merge \ntrain_df = train_transaction.merge(train_identity, how='left', left_index=True, right_index=True)\ntest_df = test_transaction.merge(test_identity, how='left', left_index=True, right_index=True)\n\nprint(\"Train shape : \"+str(train_df.shape))\nprint(\"Test shape : \"+str(test_df.shape))",
"_____no_output_____"
],
[
"pd.set_option('display.max_columns', 500)",
"_____no_output_____"
],
[
"# GPreda, missing data\ndef missing_data(data):\n total = data.isnull().sum()\n percent = (data.isnull().sum()/data.isnull().count()*100)\n tt = pd.concat([total, percent], axis=1, keys=['Total', 'Percent'])\n types = []\n for col in data.columns:\n dtype = str(data[col].dtype)\n types.append(dtype)\n tt['Types'] = types\n return(np.transpose(tt))",
"_____no_output_____"
],
[
"display(missing_data(train_df), missing_data(test_df))",
"_____no_output_____"
],
[
"del train_transaction, train_identity, test_transaction, test_identity",
"_____no_output_____"
]
],
[
[
"Source : https://www.kaggle.com/vaishvik25/refine-ieee-data",
"_____no_output_____"
]
],
[
[
"train_df['nulls1'] = train_df.isna().sum(axis=1)\ntest_df['nulls1'] = test_df.isna().sum(axis=1)",
"_____no_output_____"
],
[
"emails = {'gmail': 'google', 'att.net': 'att', 'twc.com': 'spectrum', 'scranton.edu': 'other', 'optonline.net': 'other', 'hotmail.co.uk': 'microsoft', 'comcast.net': 'other', 'yahoo.com.mx': 'yahoo', 'yahoo.fr': 'yahoo', 'yahoo.es': 'yahoo', 'charter.net': 'spectrum', 'live.com': 'microsoft', 'aim.com': 'aol', 'hotmail.de': 'microsoft', 'centurylink.net': 'centurylink', 'gmail.com': 'google', 'me.com': 'apple', 'earthlink.net': 'other', 'gmx.de': 'other', 'web.de': 'other', 'cfl.rr.com': 'other', 'hotmail.com': 'microsoft', 'protonmail.com': 'other', 'hotmail.fr': 'microsoft', 'windstream.net': 'other', 'outlook.es': 'microsoft', 'yahoo.co.jp': 'yahoo', 'yahoo.de': 'yahoo', 'servicios-ta.com': 'other', 'netzero.net': 'other', 'suddenlink.net': 'other', 'roadrunner.com': 'other', 'sc.rr.com': 'other', 'live.fr': 'microsoft', 'verizon.net': 'yahoo', 'msn.com': 'microsoft', 'q.com': 'centurylink', 'prodigy.net.mx': 'att', 'frontier.com': 'yahoo', 'anonymous.com': 'other', 'rocketmail.com': 'yahoo', 'sbcglobal.net': 'att', 'frontiernet.net': 'yahoo', 'ymail.com': 'yahoo', 'outlook.com': 'microsoft', 'mail.com': 'other', 'bellsouth.net': 'other', 'embarqmail.com': 'centurylink', 'cableone.net': 'other', 'hotmail.es': 'microsoft', 'mac.com': 'apple', 'yahoo.co.uk': 'yahoo', 'netzero.com': 'other', 'yahoo.com': 'yahoo', 'live.com.mx': 'microsoft', 'ptd.net': 'other', 'cox.net': 'other', 'aol.com': 'aol', 'juno.com': 'other', 'icloud.com': 'apple'}\nus_emails = ['gmail', 'net', 'edu']",
"_____no_output_____"
],
[
"#https://www.kaggle.com/c/ieee-fraud-detection/discussion/100499#latest_df-579654\nfor c in ['P_emaildomain', 'R_emaildomain']:\n train_df[c + '_bin'] = train_df[c].map(emails)\n test_df[c + '_bin'] = test_df[c].map(emails)\n \n train_df[c + '_suffix'] = train_df[c].map(lambda x: str(x).split('.')[-1])\n test_df[c + '_suffix'] = test_df[c].map(lambda x: str(x).split('.')[-1])\n \n train_df[c + '_suffix'] = train_df[c + '_suffix'].map(lambda x: x if str(x) not in us_emails else 'us')\n test_df[c + '_suffix'] = test_df[c + '_suffix'].map(lambda x: x if str(x) not in us_emails else 'us')",
"_____no_output_____"
],
[
"labels = {np.nan: np.nan, 'nan': np.nan, 't': 1, 'f': 2, 'm2': 3, 'm0': 4, 'm1': 5, 'gmail.com': 6, 'outlook.com': 7, 'yahoo.com': 8, 'mail.com': 9, 'anonymous.com': 10, 'hotmail.com': 11, 'verizon.net': 12, 'aol.com': 13, 'me.com': 14, 'comcast.net': 15, 'optonline.net': 16, 'cox.net': 17, 'charter.net': 18, 'rocketmail.com': 19, 'prodigy.net.mx': 20, 'embarqmail.com': 21, 'icloud.com': 22, 'live.com.mx': 23, 'gmail': 24, 'live.com': 25, 'att.net': 26, 'juno.com': 27, 'ymail.com': 28, 'sbcglobal.net': 29, 'bellsouth.net': 30, 'msn.com': 31, 'q.com': 32, 'yahoo.com.mx': 33, 'centurylink.net': 34, 'servicios-ta.com': 35, 'earthlink.net': 36, 'hotmail.es': 37, 'cfl.rr.com': 38, 'roadrunner.com': 39, 'netzero.net': 40, 'gmx.de': 41, 'suddenlink.net': 42, 'frontiernet.net': 43, 'windstream.net': 44, 'frontier.com': 45, 'outlook.es': 46, 'mac.com': 47, 'netzero.com': 48, 'aim.com': 49, 'web.de': 50, 'twc.com': 51, 'cableone.net': 52, 'yahoo.fr': 53, 'yahoo.de': 54, 'yahoo.es': 55, 'sc.rr.com': 56, 'ptd.net': 57, 'live.fr': 58, 'yahoo.co.uk': 59, 'hotmail.fr': 60, 'hotmail.de': 61, 'hotmail.co.uk': 62, 'protonmail.com': 63, 'yahoo.co.jp': 64, 'scranton.edu': 65, 'w': 66, 'h': 67, 'c': 68, 's': 69, 'r': 70, 'discover': 71, 'mastercard': 72, 'visa': 73, 'american express': 74, 'credit': 75, 'debit': 76, 'debit or credit': 77, 'charge card': 78, 'notfound': 81, 'found': 82, 'new': 83, 'unknown': 84, 'ip_proxy:transparent': 85, 'ip_proxy:anonymous': 86, 'ip_proxy:hidden': 87, 'android 7.0': 88, 'ios 11.1.2': 89, 'mac os x 10_11_6': 90, 'windows 10': 91, 'android': 284, 'linux': 93, 'ios 11.0.3': 94, 'mac os x 10_7_5': 95, 'mac os x 10_12_6': 96, 'mac os x 10_13_1': 97, 'ios 11.1.0': 98, 'mac os x 10_9_5': 99, 'windows 7': 100, 'windows 8.1': 101, 'mac': 102, 'ios 10.3.3': 103, 'mac os x 10.12': 104, 'mac os x 10_10_5': 105, 'mac os x 10_11_5': 106, 'ios 9.3.5': 107, 'android 5.1.1': 108, 'android 7.1.1': 109, 'android 6.0': 110, 'ios 10.3.1': 111, 'mac os x 10.9': 112, 'ios 11.1.1': 113, 'windows vista': 114, 'ios 10.3.2': 115, 'ios 11.0.2': 116, 'mac os x 10.11': 117, 'android 8.0.0': 118, 'ios 10.2.0': 119, 'ios 10.2.1': 120, 'ios 11.0.0': 121, 'mac os x 10.10': 122, 'mac os x 10_12_3': 123, 'mac os x 10_12': 124, 'android 6.0.1': 125, 'ios': 126, 'mac os x 10.13': 127, 'mac os x 10_12_5': 128, 'mac os x 10_8_5': 129, 'ios 11.0.1': 130, 'ios 10.0.2': 131, 'android 5.0.2': 132, 'windows xp': 133, 'ios 11.2.0': 134, 'mac os x 10.6': 135, 'windows 8': 136, 'mac os x 10_6_8': 137, 'mac os x 10_11_4': 138, 'mac os x 10_12_1': 139, 'ios 10.1.1': 140, 'mac os x 10_11_3': 141, 'mac os x 10_12_4': 142, 'mac os x 10_13_2': 143, 'android 4.4.2': 144, 'mac os x 10_12_2': 145, 'android 5.0': 146, 'func': 147, 'android 7.1.2': 148, 'android 8.1.0': 149, 'other': 150, 'mac os x 10_13_3': 151, 'ios 11.2.1': 152, 'ios 11.2.5': 153, 'windows': 154, 'ios 11.2.2': 155, 'ios 11.3.0': 156, 'ios 11.2.6': 157, 'mac os x 10_13_4': 158, 'mac os x 10_13_5': 159, 'ios 11.4.0': 160, 'ios 11.3.1': 161, 'ios 11.4.1': 162, 'android 9': 163, 'ios 12.0.0': 164, 'mac os x 10_13_6': 165, 'mac os x 10.14': 166, 'mac os x 10_14_0': 167, 'mac os x 10_14': 168, 'ios 12.1.0': 169, 'mac os x 10_14_1': 170, 'ios 12.0.1': 171, 'ios 12.1.1': 172, 'mac os x 10_14_2': 173, 'ios 12.1.2': 174, 'samsung browser 6.2': 175, 'mobile safari 11.0': 176, 'chrome 62.0': 177, 'chrome 62.0 for android': 178, 'edge 15.0': 179, 'mobile safari generic': 180, 'chrome 49.0': 181, 'chrome 61.0': 182, 'edge 16.0': 183, 'safari generic': 184, 'edge 14.0': 185, 'chrome 56.0 for android': 186, 'firefox 57.0': 187, 'chrome 54.0 for android': 188, 'mobile safari uiwebview': 189, 'chrome': 190, 'chrome 62.0 for ios': 191, 'firefox': 192, 'chrome 60.0 for android': 193, 'mobile safari 10.0': 194, 'chrome 61.0 for android': 195, 'ie 11.0 for desktop': 196, 'ie 11.0 for tablet': 197, 'mobile safari 9.0': 198, 'chrome generic': 199, 'chrome 59.0 for android': 200, 'firefox 56.0': 201, 'android webview 4.0': 202, 'chrome 55.0': 203, 'opera 49.0': 204, 'ie': 205, 'chrome 55.0 for android': 206, 'firefox 52.0': 207, 'chrome 57.0 for android': 208, 'chrome 56.0': 209, 'chrome 46.0 for android': 210, 'chrome 58.0': 211, 'firefox 48.0': 212, 'chrome 59.0': 213, 'samsung browser 4.0': 214, 'edge 13.0': 215, 'chrome 53.0 for android': 216, 'chrome 58.0 for android': 217, 'chrome 60.0': 218, 'mobile safari 8.0': 219, 'firefox generic': 220, 'generic/android 7.0': 221, 'mobile': 222, 'samsung/sm-g532m': 223, 'chrome 50.0 for android': 224, 'chrome 51.0 for android': 225, 'chrome 63.0': 226, 'chrome 52.0 for android': 227, 'chrome 51.0': 228, 'firefox 55.0': 229, 'edge': 230, 'opera': 231, 'chrome generic for android': 232, 'aol': 233, 'samsung browser 5.4': 234, 'samsung/sch': 235, 'silk': 236, 'chrome 57.0': 237, 'firefox 47.0': 238, 'chrome 63.0 for android': 239, 'samsung/sm-g531h': 240, 'chrome 43.0 for android': 241, 'waterfox': 242, 'nokia/lumia': 243, 'chrome 63.0 for ios': 244, 'puffin': 245, 'microsoft/windows': 246, 'cyberfox': 247, 'generic/android': 248, 'samsung': 849, 'opera generic': 250, 'chrome 49.0 for android': 251, 'zte/blade': 252, 'safari': 253, 'android browser 4.0': 254, 'samsung browser 5.2': 255, 'palemoon': 256, 'maxthon': 257, 'line': 258, 'lg/k-200': 259, 'iron': 260, 'blu/dash': 261, 'seamonkey': 262, 'firefox 58.0': 263, 'chrome 64.0 for android': 264, 'chrome 64.0': 265, 'firefox 59.0': 266, 'chrome 64.0 for ios': 267, 'm4tel/m4': 268, 'comodo': 269, 'lanix/ilium': 270, 'samsung browser generic': 271, 'chromium': 272, 'opera 51.0': 273, 'inco/minion': 274, 'samsung browser 7.0': 275, 'mozilla/firefox': 276, 'samsung browser 4.2': 277, 'samsung browser 6.4': 278, 'chrome 65.0': 279, 'chrome 65.0 for android': 280, 'chrome 65.0 for ios': 281, 'cherry': 282, 'icedragon': 283, 'edge 17.0': 285, 'chrome 66.0': 286, 'chrome 66.0 for android': 287, 'safari 11.0': 288, 'safari 9.0': 289, 'safari 10.0': 290, 'google': 291, 'chrome 66.0 for ios': 292, 'google search application 48.0': 293, 'opera 52.0': 294, 'firefox 60.0': 295, 'opera 53.0': 296, 'samsung browser 3.3': 297, 'google search application 49.0': 298, 'facebook': 299, 'firefox mobile 61.0': 300, 'chrome 67.0': 301, 'chrome 69.0': 302, 'chrome 67.0 for android': 303, 'firefox 61.0': 304, 'samsung browser 7.2': 305, 'chrome 67.0 for ios': 306, 'google search application 52.0': 307, 'firefox 62.0': 308, 'mobile safari 12.0': 309, 'chrome 68.0': 310, 'opera 54.0': 311, 'firefox mobile 62.0': 312, 'google search application 54.0': 313, 'safari 12.0': 314, 'chrome 68.0 for android': 315, 'chrome 68.0 for ios': 316, 'chrome 39.0 for android': 317, 'chrome 70.0': 318, 'rim': 2752, 'chrome 69.0 for android': 320, 'google search application 56.0': 321, 'samsung browser 7.4': 322, 'opera 55.0': 323, 'firefox 63.0': 324, 'google search application 58.0': 325, 'chrome 69.0 for ios': 326, 'chrome 70.0 for android': 327, 'uc': 328, 'google search application 59.0': 329, 'opera 56.0': 330, 'chrome 71.0': 331, 'google search application 60.0': 332, 'edge 18.0': 333, 'chrome 70.0 for ios': 334, 'firefox mobile 63.0': 335, 'google search application 61.0': 336, 'firefox 64.0': 337, 'google search application 62.0': 338, 'chrome 71.0 for android': 339, 'google search application 63.0': 340, 'chrome 71.0 for ios': 341, 'google search application 64.0': 342, 'samsung browser 8.2': 343, 'google search application 65.0': 344, 'blackberry': 345, '2220x1080': 346, '1334x750': 347, '1280x800': 348, '1366x768': 349, '1920x1080': 350, '1680x1050': 351, '1136x640': 352, '5120x2880': 353, '2880x1800': 354, '1920x1200': 355, '2560x1600': 356, '2048x1536': 357, '1024x768': 358, '1280x720': 359, '2560x1440': 360, '2208x1242': 361, '2001x1125': 362, '1440x900': 363, '1600x900': 364, '2672x1440': 365, '1280x1024': 366, '960x540': 367, '2732x2048': 368, '2436x1125': 369, '2048x1152': 370, '2960x1440': 371, '1024x600': 372, '855x480': 373, '4096x2304': 374, '2160x1440': 375, '2562x1442': 376, '801x480': 377, '2736x1824': 378, '3441x1440': 379, '2880x1620': 380, '3840x2160': 381, '1638x922': 382, '1280x768': 383, '1360x768': 384, '1280x960': 385, '3440x1440': 386, '1152x720': 387, '1280x1025': 388, '3360x2100': 389, '2304x1296': 390, '1152x864': 391, '3200x1800': 392, '2112x1188': 393, '2224x1668': 394, '2400x1350': 395, '2000x1125': 396, '1600x1000': 397, '2560x1080': 398, '1728x972': 399, '3000x2000': 400, '1024x640': 401, '3840x2400': 402, '2304x1440': 403, '1280x600': 404, '1400x1050': 405, '1600x1200': 406, '3201x1800': 407, '1356x900': 408, '1344x756': 409, '1624x1080': 410, '1536x864': 411, '1800x1125': 412, '1920x1281': 413, '2961x1442': 414, '1366x1024': 415, '1344x840': 416, '3360x1890': 417, '1536x1152': 418, '1200x675': 419, '1480x720': 420, '2400x1600': 421, '3200x2000': 422, '1281x801': 423, '960x640': 424, '1776x1000': 425, '2048x1280': 426, '2049x1152': 427, '1138x640': 428, '2160x1215': 429, '2880x1440': 430, '0x0': 431, '2520x1575': 432, '5760x3240': 433, '3843x2163': 434, '1184x720': 435, '1440x810': 436, '2076x1080': 437, '1600x837': 438, '1093x615': 439, '1281x721': 440, '1152x648': 441, '2392x1440': 442, '2048x1080': 443, '2735x1825': 444, '1680x945': 445, '1805x1015': 446, '5760x1080': 447, '2816x1584': 448, '4500x3000': 449, '1684x947': 450, '1440x960': 451, '1364x768': 452, '3072x1728': 453, '5040x3150': 454, '7500x5000': 455, '768x576': 456, '1768x992': 457, '1658x946': 458, '1200x720': 459, '1239x697': 460, '1188x720': 461, '1232x800': 462, '1920x1280': 463, '1264x924': 464, '1400x900': 465, '3240x2160': 466, '2961x1440': 467, '1422x889': 468, '1848x1155': 469, '3360x1050': 470, '3840x1080': 471, '2010x1080': 472, '2160x1350': 473, '1440x720': 474, '1280x712': 475, '1512x945': 476, '1296x774': 477, '1368x768': 478, '3520x1980': 479, '800x600': 480, '1700x960': 481, '2560x1800': 482, '6400x3600': 483, '2368x1440': 484, '1824x1026': 485, '1912x1025': 486, '600x450': 487, '3840x1600': 488, '1760x990': 489, '2700x1800': 490, '1371x857': 491, '1776x1080': 492, '2552x1337': 493, '3600x2250': 494, '2560x1700': 495, '2816x1760': 496, '1440x800': 497, '1440x803': 498, '1920x1018': 499, '6016x3384': 500, '1280x620': 501, '1281x720': 502, '1720x1440': 503, '1408x880': 504, '640x360': 505, '1920x975': 506, '976x600': 507, '1062x630': 508, '2800x1575': 509, '6720x3780': 510, '1440x759': 511, '1120x700': 512, '1921x1081': 513, '1280x1023': 514, '1279x1023': 515, '1441x901': 516, '1679x1049': 517, '1680x1051': 518, '2220x1081': 519, '1920x1079': 520, '1919x1199': 521, '1680x1049': 522, '1365x768': 523, '1919x1079': 524, '1919x1200': 525, '1919x1080': 526, '1366x767': 527, '1584x990': 528, '2880x1442': 529, '1281x800': 530, '1229x691': 531, '1600x1024': 532, '1600x899': 533, '1536x960': 534, '1502x844': 535, '1920x1201': 536, '1439x809': 537, '1408x792': 538, '1279x1024': 539, '1599x900': 540, '1920x1081': 541, '921x691': 542, '3841x2161': 543, '1921x1080': 544, '480x320': 545, '1888x941': 546, '2049x1536': 547, '2160x1439': 548, '1707x960': 549, '1024x767': 550, '1365x767': 551, '3001x2000': 552, '3839x2160': 553, '1916x901': 554, '3838x2158': 555, '1599x899': 556, '3199x1800': 557, '1511x944': 558, '2737x1825': 559, '2736x1823': 560, '2735x1823': 561, '2559x1439': 562, '2400x1500': 563, '2882x1442': 564, '1729x973': 565, '1727x971': 566, '1023x767': 567, '1918x1080': 568, '1439x900': 569, '4499x2999': 570, '1280x740': 571, '2999x2000': 572, '1024x552': 573, '1440x899': 574, '2255x1503': 575, '1025x768': 576, '1280x732': 577, '3839x2159': 578, '3840x2162': 579, '3696x2310': 580, '2159x1439': 581, '2256x1504': 582, '1439x899': 583, '2159x1440': 584, '1359x768': 585, '1092x614': 586, '2048x1278': 587, '2591x1619': 588, '4200x2625': 589, '2710x1440': 590, '1272x960': 591, '1023x768': 592, '3838x2160': 593, '2100x1312': 594, '1360x767': 595, '1024x819': 596, '1502x845': 597, '2561x1442': 598, '2559x1440': 599, '2160x1081': 600, '1920x1279': 601, '2160x1080': 602, '1596x710': 603, '1496x844': 604, '1280x900': 605, '2047x1152': 606, '2094x1080': 607, '1800x1440': 608, '993x664': 609, '1622x1081': 610, '1360x765': 611, '1280x799': 612, '1279x800': 613, '1920x983': 614, '888x540': 615, '1722x1440': 616, '1679x1050': 617, '2791x1440': 618, '1921x1200': 619, '6144x3456': 620, '2699x1799': 621, '4096x3072': 622, '4608x2592': 623, '910x512': 624, '2257x1505': 625, '1536x1024': 626, '1208x720': 627, '1842x1047': 628, '2462x1641': 629, '1920x1199': 630, '1601x900': 631, '1368x912': 632, '2112x1320': 633, '1265x948': 634, '1359x767': 635, '1920x1017': 636, '1681x1050': 637, '3241x2161': 638, '3011x2007': 639, '2188x1459': 640, '1599x1200': 641, '1500x1000': 642, '2560x1313': 643, '1920x1440': 644, '2703x1441': 645, '1151x921': 646, '3000x1687': 647, '1151x863': 648, '1480x721': 649, '792x480': 650, '3521x1980': 651, '1279x767': 652, '3200x1801': 653, '2560x1439': 654, '1272x952': 655, '1600x901': 656, '2255x1504': 657, '1862x1048': 658, '3840x2159': 659, '3168x1980': 660, '3439x1440': 661, '2560x1441': 662, '800x480': 663, '3200x1799': 664, '3840x2161': 665, '1364x767': 666, '3000x1999': 667, '2688x1242': 668, '1080x675': 669, '2735x1824': 670, '2303x1295': 671, '1592x828': 672, '3122x1442': 673, '2560x1599': 674, '3239x2159': 675, '1281x1024': 676, '1466x403': 677, '4096x2160': 678, '1727x1079': 679, '1231x358': 680, '3584x2016': 681, '2390x1344': 682, '3839x2158': 683, '2281x1081': 684, '1152x672': 685, '2401x1350': 686, '2560x1079': 687, '2161x1080': 688, '1366x766': 689, '683x384': 690, '1720x947': 691, '1024x576': 692, '4320x2700': 693, '2341x1081': 694, '1537x865': 695, '1792x828': 696, '960x480': 697, '3280x2048': 698, '1408x844': 699, '1021x669': 700, '1800x1200': 701, '3120x1440': 702, '1919x1279': 703, '1279x799': 704, '2556x1248': 705, '1000x798': 706, '1600x1280': 707, '1296x810': 708, '3199x1799': 709, '2562x1314': 710, '1624x750': 711, '1416x805': 712, '1920x1128': 713, '2399x1598': 714, '3840x1599': 715, '1918x1078': 716, '2388x1668': 717, '2592x1728': 718, '2562x1441': 719, '1792x768': 720, '3201x1799': 721, '1679x987': 722, '3841x2401': 723, '1794x1080': 724, '1279x720': 725, '2960x1442': 726, '1501x844': 727, '1918x1347': 728, '2950x1440': 729, '1024x820': 730, '1366x769': 731, '2401x1351': 732, '1600x868': 733, '1280x801': 734, '1264x945': 735, '1279x719': 736, '4800x2700': 737, '1151x864': 738, '2880x1920': 739, '960x600': 740, '2696x1440': 741, '5960x1080': 742, '2241x1081': 743, '7680x4320': 744, '2303x1439': 745, '1025x640': 746, '2399x1348': 747, '8640x1620': 748, '2048x1151': 749, '1399x1050': 750, '1920x974': 751, '4225x2377': 752, '2680x1440': 753, '1223x691': 754, '2047x1279': 755, '1279x768': 756, '1621x1081': 757, '1152x922': 758, '1260x787': 759, '1669x942': 760, '3123x1440': 761, '2463x1642': 762, '1361x768': 763, '2048x1537': 764, '8960x5040': 765, '1500x840': 766, '2559x1080': 767, '1408x1127': 768, '1416x758': 769, '2560x1418': 770, '1926x921': 771, '1268x862': 772, '3784x1584': 773, '4500x3001': 774, '2398x1599': 775, '1344x720': 776, '2559x1599': 777, '2133x1080': 778, '1919x1280': 779, '2340x1080': 780, '2999x1998': 781, '1151x719': 782, '2160x1920': 783, '2052x1368': 784, '4300x1800': 785, '1501x843': 786, '2559x1079': 787, '1281x769': 788, '1408x1126': 789, '1680x1018': 790, '2162x1081': 791, '455x256': 792, '2646x1440': 793, '2562x1440': 794, '2789x1442': 795, '2241x1080': 796, '3839x1599': 797, '1280x767': 798, '2561x1312': 799, '2162x1082': 800, '2768x1440': 801, '3239x2160': 802, '2341x1080': 803, '2928x1440': 804, '1767x991': 805, '2734x1824': 806, 'match_status:2': 807, 'match_status:1': 808, 'match_status:0': 809, 'match_status:-1': 810, 'desktop': 811, 'samsung sm-g892a build/nrd90m': 812, 'ios device': 813, 'macos': 814, 'sm-g930v build/nrd90m': 815, 'blade a602 build/mra58k': 816, 'xt1635-02 build/npn26.118-22-2': 817, 'z970': 818, 'sm-n920v build/nrd90m': 819, 'redmi note 4 build/mmb29m': 820, 'lenovo pb1-750m build/s100': 821, 'lt22i build/6.2.a.1.100': 822, 'rv:52.0': 823, 'sm-g950u build/nrd90m': 824, 'lg-h872 build/nrd90u': 825, 'lg-k500 build/mmb29m': 826, 'sm-p550 build/mmb29m': 827, 'sm-j700m build/mmb29k': 828, 'trident/7.0': 829, 'rv:57.0': 830, 'samsung sm-g930t build/nrd90m': 831, 'blade v6 plus build/mra58k': 832, 'bll-l23 build/huaweibll-l23': 833, 'kyocera-c6742a build/lmy47v': 834, 'f3113 build/33.2.a.4.70': 835, 'd5306 build/19.4.a.0.182': 836, 'm4 ss4457 build/mra58k': 837, 'sm-g955u build/nrd90m': 838, 'sm-g610m build/mmb29k': 839, 'samsung sm-g935f build/nrd90m': 840, 'xt1635-01': 841, 'rv:56.0': 842, 'vs500': 843, 'cam-l03 build/huaweicam-l03': 844, 'rct6303w87m7 build/mra58k': 845, 'm4 ss4451 build/lmy47d': 846, 'kffowi build/lvy48f': 847, 'moto e (4) build/nma26.42-19': 848, 'e2306 build/26.3.a.1.33': 850, 'ilium l910 build/mra58k': 851, 'gt-i9300': 852, 'lg-h420 build/lrx21y': 853, 'windows nt 6.2': 854, 'xt1032 build/lpbs23.13-56-2': 855, 'gt-i9060m build/ktu84p': 856, 'rv:38.0': 857, 'moto g (4) build/npjs25.93-14-10': 858, 'sm-g530t': 859, 'lgmp260 build/nrd90u': 860, 'sm-e500m build/ktu84p': 861, 'samsung sm-g950u build/nrd90m': 862, 'rv:11.0': 863, 'sm-g920i build/nrd90m': 864, 'samsung sm-j327t build/nrd90m': 865, 'sm-g900h build/mmb29k': 866, 'trt-l53 build/huaweitrt-l53': 867, 'moto g play build/mpis24.241-15.3-26': 868, 'nexus 6p build/opr5.170623.011': 869, 'sgp521': 870, 'vs988 build/nrd90u': 871, 'sm-g531h build/lmy48b': 872, 'moto c build/nrd90m.063': 873, 'lg-k410 build/lrx22g': 874, 'sm-g935f build/mmb29k': 875, 'moto c plus build/nrd90m.05.022': 876, 'lg-x180g build/lmy47i': 877, 'sm-g925p build/nrd90m': 878, 'es-us': 879, 'sm-g920v build/nrd90m': 880, 'sm-g935t build/nrd90m': 881, 'moto e (4) plus build/nma26.42-69': 882, 'sm-g925w8': 883, 'rv:48.0': 884, 'samsung-sm-g930a build/nrd90m': 885, 'htc one m9 build/nrd90m': 886, '7055a build/kvt49l': 887, 'moto g play build/mpis24.241-15.3-7': 888, 'sm-t810 build/nrd90m': 889, 'rv:51.0': 890, 'sm-n910v build/mmb29m': 891, 'sm-g930t build/nrd90m': 892, 'lg-h870 build/nrd90u': 893, 'samsung sm-g950u1 build/nrd90m': 894, 'samsung sm-s727vl build/mmb29m': 895, 'sm-n920g build/nrd90m': 896, 'moto g (5) plus build/npn25.137-72': 897, 'moto g (5) build/npp25.137-72': 898, 'sm-g935f build/nrd90m': 899, 'lg-d373 build/kot49i.v10a': 900, 'ale-l23 build/huaweiale-l23': 901, 'z836bl': 902, 'ilium x710 build/mra58k': 903, 'lg-d680 build/kot49i': 904, 'm4 ss4450 build/mra58k': 905, 'lg-k580 build/mra58k': 906, 'sm-j700m build/lmy48b': 907, 'sm-a710m': 908, 'moto z2 play build/npss26.118-19-6': 909, 'moto g (4) build/npj25.93-14.5': 910, 'z981 build/mmb29m': 911, 'sm-t710 build/nrd90m': 912, 'sm-t700 build/mmb29k': 913, 'one touch 4033a build/jdq39': 914, 'f5121 build/34.3.a.0.228': 915, 'was-lx3 build/huaweiwas-lx3': 916, 'r8106': 917, 'motog3 build/mpis24.65-33.1-2-16': 918, 'sm-g925i build/mmb29k': 919, 'huawei vns-l53 build/huaweivns-l53': 920, 'blade v6 build/lrx22g': 921, 'sm-t113nu build/ktu84p': 922, 'samsung sm-g530h build/lrx22g': 923, 'sm-j510mn build/mmb29m': 924, 'sm-g950f build/nrd90m': 925, 'samsung sm-j700m build/mmb29k': 926, 'sm-g570m build/mmb29k': 927, 'samsung-sm-g891a build/nrd90m': 928, 'sm-j500m build/lmy48b': 929, 'sm-j727u build/nrd90m': 930, 'lgls676 build/mxb48t': 931, 'ta-1003': 932, 'sm-g900v build/mmb29m': 933, 'vk810': 934, 'kfaswi build/lvy48f': 935, 'sm-j320m build/lmy47v': 936, 'sm-j120h build/lmy47v': 937, 'sm-n950u build/nmf26x': 938, 'lg-d693n build/lrx22g': 939, 'huawei rio-l03 build/huaweirio-l03': 940, 'samsung sm-g930a build/nrd90m': 941, '5012g build/mra58k': 942, 'sm-p350 build/mmb29m': 943, 'sm-a300h build/lrx22g': 944, 'moto': 1166, 'sm-g925v build/nrd90m': 946, 'samsung sm-n910a build/mmb29m': 947, 'sm-g532m build/mmb29t': 948, '5080a build/mra58k': 949, 'xt1254 build/mcg24.251-5-5': 950, 'kfgiwi build/lvy48f': 951, 'lg-d331 build/lrx22g': 952, 'sm-g950u1 build/nrd90m': 953, 'pixel': 954, 'sm-g900t build/mmb29m': 955, 'e5606 build/30.2.a.1.21': 956, 'moto z2 play build/nps26.74-16-1': 957, 'z410': 958, 'lg-h500 build/lrx21y': 959, 'huawei tag-l13 build/huaweitag-l13': 960, 'p4526a build/nrd90m': 961, 'sm-g900f build/mmb29m': 962, 'moto g (4) build/npj25.93-14.7': 963, 'sm-a510m build/nrd90m': 964, 'xt1565': 965, 'blade v6 max build/mra58k': 966, 'f3213 build/36.1.a.1.86': 967, 'rex': 968, 'pixel build/opr3.170623.013': 969, 'xt1585 build/nck25.118-10.5': 970, 'motog3-te': 971, 'ilium lt510 build/mra58k': 972, 'samsung-sm-g870a build/mmb29m': 973, 'sm-a500m build/ktu84p': 974, 'htc': 975, 'samsung sm-g950f build/nrd90m': 976, 'lg-x240 build/mra58k': 977, 'kfauwi build/lvy48f': 978, 'lg-k428 build/mmb29m': 979, 'samsung sm-t580 build/nrd90m': 980, 'edison': 981, 'hisense f20 build/mmb29m': 982, 'sm-j327v build/nrd90m': 983, 'lgls775 build/nrd90u': 984, 'sm-a710m build/lmy47x': 985, 'lg-ls777 build/nrd90u': 986, 'lg-h320 build/lrx21y': 987, 'qtasun1 build/nrd90m': 988, 'sm-j730g build/nrd90m': 989, 'rv:58.0': 990, 'htc 10 build/nrd90m': 991, 'xt1063': 992, 'vs995 build/nrd90m': 993, 'sm-g550t build/mmb29k': 994, 'samsung sm-g930p build/nrd90m': 995, 'lg-k530 build/mmb29m': 996, 'samsung sm-j701m build/nrd90m': 997, 'samsung sm-t587p build/nrd90m': 998, 'sm-t530nu build/lrx22g': 999, 'sm-s920l': 1000, 'sm-g530h build/lrx22g': 1001, 'lg-v410': 1002, 'samsung sm-g891a build/nrd90m': 1003, 'gt-p5113': 1004, 'lg-h840 build/nrd90u': 1005, 'bac-l03 build/huaweibac-l03': 1006, 'samsung sm-t810 build/nrd90m': 1007, 'vs835': 1008, 'lg-tp450 build/nrd90u': 1009, 'sm-j727v build/nrd90m': 1010, 'sm-g955f build/nrd90m': 1011, 'kfdowi build/lvy48f': 1012, 'sm-j200m build/lmy47x': 1013, 'sm-j320h': 1014, 'blade v8 se build/nrd90m': 1015, 'sm-n920t build/nrd90m': 1016, 'lg-h542 build/mra58k': 1017, 'sm-t330nu build/lmy47x': 1018, 'samsung sm-j500m build/lmy48b': 1019, 'lg-m322': 1020, 'chc-u03 build/huaweichc-u03': 1021, 'htc desire 650 build/mmb29m': 1022, 'samsung-sm-g935a build/nrd90m': 1023, 'sm-g925i build/nrd90m': 1024, 'lg-k200 build/mxb48t': 1025, 'aquaris': 1026, 'sm-j710mn build/mmb29k': 1027, 'alcatel_4060a': 1028, 'lgms631 build/mra58k': 1029, 'mot-a6020l37 build/lmy47v': 1030, '5011a build/nrd90m': 1031, 'sm-t550 build/nmf26x': 1032, 'moto g (5) plus build/npns25.137-15-11': 1033, 'e6603 build/32.4.a.1.54': 1034, 'redmi 4a build/mmb29m': 1035, 'rv:55.0': 1036, 'sm-t520': 1037, 'htc desire 10 lifestyle build/mmb29m': 1038, 'sm-a720f build/mmb29k': 1039, 'moto g (5s': 1040, 'sm-g925i build/lmy47x': 1041, 'sm-g930r4 build/nrd90m': 1042, 'samsung sm-g610m build/mmb29k': 1043, 'lenovo k33b36 build/nrd90n': 1044, 'sm-g930u build/nrd90m': 1045, 'moto z2 play build/nps26.118-19': 1046, 'samsung sm-n950u build/nmf26x': 1047, 'sm-a320fl build/mmb29k': 1048, 'lg-m250 build/nrd90u': 1049, 'kfsawi': 1050, 'samsung sm-g930f build/nrd90m': 1051, 'lenovo k33b36 build/mmb29m': 1052, 'samsung sm-n920v build/nrd90m': 1053, 'windows nt 6.1': 1054, 'rv:45.0': 1055, 'samsung-sm-j327a build/nrd90m': 1056, 'sm-g935v build/nrd90m': 1057, 'mddrjs': 1058, 'sm-g935p build/nrd90m': 1059, 'sm-g930p build/nrd90m': 1060, 'sm-g928v build/nrd90m': 1061, 'lg-m210 build/nrd90u': 1062, 'blade v580 build/lmy47d': 1063, 'ilium lt500 build/lmy47o': 1064, 'y635-l03 build/huaweiy635-l03': 1065, 'xt1635-01 build/ndns26.118-23-12-3': 1066, 'rv:42.0': 1067, 'samsung sm-g925t build/nrd90m': 1068, 'samsung-sm-g900a build/lrx21t': 1069, 'moto z (2': 1070, 'sm-g360t1 build/lmy47x': 1071, 'sm-s903vl': 1072, 'samsung sm-g920a build/nrd90m': 1073, 'huawei vns-l23 build/huaweivns-l23': 1074, 'lg-h811 build/mra58k': 1075, 'm4 ss4456 build/lmy47v': 1076, 'samsung-sm-t337a build/lmy47x': 1077, 'samsung sm-g955u build/nrd90m': 1078, 'sm-g900p build/mmb29m': 1079, 'sm-g800f build/kot49h': 1080, 'bntv400': 1081, 'huawei vns-l21 build/huaweivns-l21': 1082, 'oneplus a5000 build/nmf26x': 1083, 'htc desire 626s build/lmy47o': 1084, 'xt1710-02 build/ndss26.118-23-15': 1085, 'rv:49.0': 1086, 'eva-l09 build/huaweieva-l09': 1087, 'lenovo': 1088, 'lg-k540 build/mmb29m': 1089, 'frd-l04 build/huaweifrd-l04': 1090, 'linux x86_64': 1091, 'samsung sm-g935t build/nrd90m': 1092, 'sm-g900m build/lrx21t': 1093, 'lg-h820 build/nrd90u': 1094, 'z983 build/nmf26f': 1095, 'lgls991': 1096, 'sm-s120vl': 1097, 'le': 1098, 'ilium l1120 build/nrd90m': 1099, 'xt1650 build/ncls26.118-23-13-3': 1100, 'sm-t377p build/nmf26x': 1101, 'qtaqz3 build/lmy47v': 1102, 'sm-a310m': 1103, 'f5321': 1104, 'kftbwi build/lvy48f': 1105, 'm4 ss4453 build/mmb29m': 1106, 'xt1064 build/mpb24.65-34-3': 1107, 'vs987 build/nrd90u': 1108, 'sm-g386t': 1109, 'samsung-sm-n920a build/nrd90m': 1110, 'samsung sm-j727t1 build/nrd90m': 1111, 'samsung sm-g532m build/mmb29t': 1112, 'sm-g925t build/nrd90m': 1113, 'nethelper70': 1114, 'samsung sm-g890a build/nrd90m': 1115, 'd5803 build/23.5.a.1.291': 1116, 'gt-i8190l build/jzo54k': 1117, 'sm-a510m build/lmy47x': 1118, 'nexus': 1119, 'sm-t580 build/nrd90m': 1120, 'sm-g900v': 1121, 'zeia8': 1122, 'sm-n920p build/nrd90m': 1123, 'sm-t550': 1124, 'samsung sm-j320f build/lmy47v': 1125, 'vs501 build/nrd90u': 1126, 'linux i686': 1127, 'sm-g900p build/lrx21t': 1128, 'sm-n910h': 1129, 'sm-g920f build/nrd90m': 1130, 'sm-t560 build/ktu84p': 1131, 'motoe2(4g-lte': 1132, 'lg-h542 build/lrx22g': 1133, 'touch': 1134, 'moto g (5) build/npp25.137-82': 1135, 'lg-x230 build/mra58k': 1136, 'lg-k371': 1137, 'f3113 build/33.3.a.1.97': 1138, 'sm-t560nu build/mmb29m': 1139, 'samsung sm-a520f build/nrd90m': 1140, 'sm-p600 build/lmy47x': 1141, 'htc u11 build/nmf26x': 1142, 'kfapwi build/ktu84m': 1143, 'g3313': 1144, 'ax920': 1145, 'sm-j327vpp build/nrd90m': 1146, 'lg-m320 build/nrd90u': 1147, 'sm-g610f': 1148, 'rv:54.0': 1149, 'qmv7a': 1150, 'sm-t817v build/nrd90m': 1151, 'htc desire 530 build/mmb29m': 1152, 'lg-ls998': 1153, 'rv:41.0': 1154, 'gt-p5210 build/jdq39': 1155, 'sm-j730f': 1156, 'kfjwi': 1157, 'moto g (5) plus build/npn25.137-83': 1158, 'htc desire 510 build/kot49h': 1159, 'lg-h810': 1160, 'sm-g920p build/nrd90m': 1161, 'd6603 build/23.5.a.1.291': 1162, 'xt1650': 1163, 'sm-g930f build/nrd90m': 1164, 'lgms550 build/nrd90u': 1165, 'sm-a520f build/nrd90m': 1167, 'sm-g920t build/nrd90m': 1168, 'fractal': 1169, 'moto e (4) build/nma26.42-69': 1170, 'huawei': 1171, 'sm-t350 build/nmf26x': 1172, 'samsung sm-j730gm build/nrd90m': 1173, 'ta-1039 build/n2g47h': 1174, 'xt1003': 1175, 'motog3 build/mpis24.65-25.1-19': 1176, 'bv6000': 1177, 'sm-t230 build/kot49h': 1178, 'samsung sm-g920p build/nrd90m': 1179, 'htc one a9 build/nrd90m': 1180, 'nx785qc8g': 1181, 'rv:47.0': 1182, 'e5506 build/29.1.a.0.101': 1183, 'lgl33l/v100': 1184, 'sm-g920v build/mmb29k': 1185, 'sm-j510fn': 1186, 'lgms330 build/lmy47v': 1187, 'microsoft': 1188, 'sm-t560nu build/nmf26x': 1189, 'samsung sm-g955f build/nrd90m': 1190, 'xt1053 build/lpas23.12-21.7-1': 1191, '5025g build/lmy47i': 1192, 'lg-h918 build/nrd90m': 1193, 'sm-g900w8': 1194, 'sm-j320v build/nmf26x': 1195, 'sm-j105b build/lmy47v': 1196, 'samsung sm-g550t1 build/mmb29k': 1197, '5010s build/mra58k': 1198, 'lg-h910 build/nrd90m': 1199, 'motoe2 build/lpcs23.13-56-5': 1200, 'lgms210 build/nrd90u': 1201, 'lg-m700 build/nmf26x': 1202, 'sm-g930v build/mmb29m': 1203, 'sm-a510m build/mmb29k': 1204, '5054n': 1205, 'samsung sm-g900a build/mmb29m': 1206, 'sm-g935v': 1207, 'samsung-sm-t677a': 1208, 'sm-j500fn': 1209, 'lgmp450 build/nrd90u': 1210, 'pixel 2 build/opm1.171019.011': 1211, 'sm-j701m build/nrd90m': 1212, 'z798bl build/mmb29m': 1213, 'huawei cun-l03 build/huaweicun-l03': 1214, 'lenovo-a6020l36 build/lmy47v': 1215, 'vs820': 1216, 'kfthwi build/ktu84m': 1217, 'lg-k530 build/nrd90u': 1218, 'sm-t827v build/nrd90m': 1219, 'samsung sm-g935f build/mmb29k': 1220, 'lg-h650 build/mra58k': 1221, 'samsung-sgh-i337 build/kot49h': 1222, 'lg-k220 build/mxb48t': 1223, 'lg-h810/h81021z': 1224, 'ale-l21 build/huaweiale-l21': 1225, 'redmi 4x build/n2g47h': 1226, 'samsung sm-g935a build/nrd90m': 1227, 'sm-t713 build/nrd90m': 1228, '5051a build/mmb29m': 1229, 'lg-p714': 1230, 'samsung sm-g531h build/lmy48b': 1231, 'sm-g928g build/nrd90m': 1232, 'x10': 1233, 'gt-p5210 build/kot49h': 1234, 'gt-i9500': 1235, 'huawei y625-u13 build/huaweiy625-u13': 1236, 'hisense l675 build/mra58k': 1237, 'p5526a build/nrd90m': 1238, 'sm-t818v': 1239, 'es-mx': 1240, 'ta-1039 build/nmf26f': 1241, 'sm-a310m build/lmy47x': 1242, 'lg-d851': 1243, 'sm-j320v build/mmb29m': 1244, 'lg-h990 build/nrd90m': 1245, 'lg-v495/v49520l': 1246, 'sm-t350 build/mmb29m': 1247, 'pra-lx3 build/huaweipra-lx3': 1248, 'xt1080 build/su6-7.7': 1249, 'huawei y520-u03 build/huaweiy520-u03': 1250, 'lenovo a2016b30 build/mra58k': 1251, 'mya-l23': 1252, 'k88 build/mmb29m': 1253, 'motog3 build/mpis24.107-55-2-17': 1254, 'lgls770': 1255, 'lg-m153 build/mxb48t': 1256, 'qtair7 build/lmy47d': 1257, '2pq93': 1258, 'sm-j111m build/lmy47v': 1259, 'sm-s320vl': 1260, 'lg-h901': 1261, 'ilium': 1831, 'sm-t377v build/nmf26x': 1263, 'one a2005 build/mmb29m': 1264, 'sm-n910t build/mmb29m': 1265, 'android 5.1': 1266, 'xt1058 build/lpas23.12-21.7-1': 1267, 'pixel build/opm1.171019.011': 1268, 'pixel xl build/opm1.171019.011': 1269, 'lg-tp260 build/nrd90u': 1270, 'sm-n900v build/lrx21v': 1271, 'moto x play build/npd26.48-24-1': 1272, 'sky_5.0lm': 1273, 'sm-a320y build/nrd90m': 1274, 'sm-t230nu build/kot49h': 1275, 'kftt build/iml74k': 1276, '2ps64 build/nrd90m': 1277, 'samsung-sm-g930a': 1278, 'stv100-2 build/mmb29m': 1279, 'g3123 build/40.0.a.6.135': 1280, 'samsung sm-a300h build/lrx22g': 1281, 'z971': 1282, 'sm-g935w8': 1283, 'sm-p580 build/nrd90m': 1284, 'samsung-sm-j320a build/mmb29k': 1285, 'samsung-sm-j727a build/nrd90m': 1286, 'verykools5030': 1287, 'sm-j320p build/lmy47x': 1288, 'lgls751': 1289, 'f8331 build/41.2.a.7.76': 1290, 'sm-a520f build/mmb29k': 1291, 'blade l5 build/lmy47i': 1292, 'tommy2': 1293, 'rv:50.0': 1294, 'sla-l03 build/huaweisla-l03': 1295, 'moto e (4) build/nma26.42-11-3': 1296, 'sm-t280 build/lmy47v': 1297, 'f8331': 1298, 'z837vl': 1299, 'sm-a510f build/nrd90m': 1300, 'xt1580': 1301, 'sm-t820 build/nrd90m': 1302, 'sm-g550t1 build/mmb29k': 1303, 'lg-h871': 1304, 'lg-ls993 build/nrd90u': 1305, 'sm-t377w': 1306, 'samsung sm-j700m build/lmy48b': 1307, 'sm-g900f build/lrx21t': 1308, 'samsung-sm-g900a build/mmb29m': 1309, 'xt1032 build/kxb21.14-l1.40': 1310, 'xt1563 build/mpds24.107-52-5': 1311, 'blade': 3252, 'verykools5019': 1313, 'sm-g360v build/lmy48b': 1314, 'lg-x210 build/lmy47i': 1315, 'hi6210sft build/mra58k': 1316, 'samsung sm-t530nu build/lrx22g': 1317, 'c6743 build/lmy47v': 1318, 'bln-l24 build/honorbln-l24': 1319, 'sm-g935t': 1320, 'redmi note 3 build/mmb29m': 1321, 'samsung-sm-g900a build/lmy47x': 1322, 'e6810 build/5.320vz.03.r': 1323, 'e6553': 1324, 'pixel 2 xl build/opm1.171019.011': 1325, 'z835 build/nmf26v': 1326, 'lg-d725': 1327, 'xt1008 build/lpbs23.13-56-2': 1328, 'xt1040': 1329, 'en-us': 1330, 'sm-t807v': 1331, 'vs996': 1332, 'e6853': 1333, 'lg-k550 build/nrd90u': 1334, 'xt1030 build/su6-7.7': 1335, 'lg-m154': 1336, 'sm-j700t build/nmf26x': 1337, 'sm-g920f build/mmb29k': 1338, '8050g build/lmy47i': 1339, 'sm-a500fu build/mmb29m': 1340, 'sm-t337v': 1341, 'samsung-sm-t377a build/mmb29k': 1342, 'helio': 1343, 'blade v7 build/mra58k': 1344, 'motog3 build/mpi24.65-33.1-2': 1345, 'sm-n950f build/nmf26x': 1346, 'lgms550 build/mxb48t': 1347, 'xt1031': 1348, 'samsung sm-t710 build/nrd90m': 1349, 'lenovo tb-7703x build/s100': 1350, 'moto g (5) build/npps25.137-15-11': 1351, 'lt30p': 1352, 'moto g (4) build/npj25.93-14': 1353, 'mha-l09 build/huaweimha-l09': 1354, 'huawei vns-l31 build/huaweivns-l31': 1355, 'hisense l675 pro build/nrd90m': 1356, 'samsung-sm-g925a build/nrd90m': 1357, 'mi a1 build/n2g47h': 1358, 'sm-n900t': 1359, 'vs425pp build/lmy47v': 1360, 'sm-j727t build/nrd90m': 1361, 'lg-x220 build/lmy47i': 1362, 'att-ie11': 1363, 'samsung sm-g900f build/mmb29m': 1364, 'blade v8 build/nrd90m': 1365, 'sm-j700t1 build/nmf26x': 1366, 'sm-g930r7': 1367, 'lgus215 build/nrd90u': 1368, 'sm-t237p': 1369, 'ta-1038': 1370, 'sm-g930w8 build/nrd90m': 1371, 'sm-g950w': 1372, 'sm-j100vpp build/lmy48b': 1373, 'lgls990': 1374, 'sm-a500h': 1375, 'f3313 build/37.0.a.2.108': 1376, 'sm-g900i': 1377, 'sm-t813 build/nrd90m': 1378, 'rv:31.0': 1379, 'sm-j730gm build/nrd90m': 1380, 'h1711 build/huaweih1711': 1381, 'build/opr6.170623.013': 1382, 'lg-v930': 1383, 'lg-k240 build/mxb48t': 1384, 'z982 build/nmf26v': 1385, 'samsung sm-j700t build/nmf26x': 1386, 'rv:44.0': 1387, 'xt1055': 1388, 'moto e (4) build/ncq26.69-56': 1389, 'moto e (4) build/ndqs26.69-23-2-3': 1390, 'xt1575': 1391, 'sm-j327p build/mmb29m': 1392, 'sm-t377v': 1393, 'huawei g7-l03 build/huaweig7-l03': 1394, 'lg-d693n build/kot49i.v10a': 1395, 'sm-p900': 1396, 'sm-j500m build/mmb29m': 1397, 'samsung sm-n920t build/nrd90m': 1398, 'blade a510 build/mra58k': 1399, 'lgl62vl': 1400, 'bbb100-3': 1401, 'sm-n910c build/mmb29k': 1402, 'lg-ls997 build/nrd90m': 1403, 'sm-n950w': 1404, '4027a build/kot49h': 1405, 'rv:53.0': 1406, 'e2306 build/26.1.a.3.111': 1407, 'zte': 1408, 'lg-m150': 1409, 'htc one build/lrx22g': 1410, 'samsung sm-g925p build/nrd90m': 1411, 'samsung sm-g570m build/mmb29k': 1412, 'lg-h830 build/nrd90u': 1413, 'samsung sm-n920p build/nrd90m': 1414, 'samsung-sm-g890a build/mmb29k': 1415, 'samsung sm-g920t build/nrd90m': 1416, 'samsung sm-n920a build/nrd90m': 1417, 'rv:46.0': 1418, 'sm-t800 build/mmb29k': 1419, 'hp2015': 1420, 'moto g (5) build/npp25.137-38': 1421, 'samsung sm-g903f build/mmb29k': 1422, 'sm-j100mu': 1423, 'e6833': 1424, 'sm-g920w8': 1425, '5010g build/mra58k': 1426, 'a3_mini': 1427, 'xt1528': 1428, 'rv:39.0': 1429, 'sm-n900w8 build/lrx21v': 1430, 'sm-g920t1': 1431, 'hp': 1432, 'sm-s820l': 1433, 'xt1032 build/lpbs23.13-57-2': 1434, 'sm-g920v': 1435, 'samsung sm-g850f/g850fxxs2cqd9 build/lrx22g': 1436, 'lg-h850': 1437, 'redmi': 1438, 'd6708': 1439, 'lg-h900/h90022b': 1440, 'samsung-sm-g920a build/nrd90m': 1441, 'huawei can-l01 build/huaweican-l01': 1442, 'samsung sm-g935p build/nrd90m': 1443, 'wow64': 1444, 'samsung sm-j327t1 build/nrd90m': 1445, 'xt1563 build/mpd24.107-52': 1446, 'cro-l03 build/huaweicro-l03': 1447, 'hisense f23 build/nrd90m': 1448, 'moto c plus build/nrd90m.05.034': 1449, 'redmi note 4 build/nrd90m': 1450, 'lg-k373': 1451, 'sm-g530p': 1452, 'sm-j320fn build/lmy47v': 1453, 'moto x4 build/npw26.83-18-2-0-4': 1454, 'f5121 build/34.3.a.0.238': 1455, 'ilium x210 build/lmy47i': 1456, 'htc6545lvw': 1457, 'sm-s902l': 1458, 'c2104': 1459, 'samsung-sm-t537a': 1460, 'vs990 build/mra58k': 1461, 'sm-n920v': 1462, '5056a build/mmb29m': 1463, 'lenovo a6020l37 build/lmy47v': 1464, 'samsung-sm-g928a build/nrd90m': 1465, 'ph-1': 1466, 'g3423': 1467, 'e5803 build/32.4.a.1.54': 1468, 'hisense u963 build/mra58k': 1469, 'moto x4 build/npw26.83-42': 1470, 'f3113 build/33.2.a.3.81': 1471, 'htc one_m8 build/mra58k': 1472, 'samsung sm-g530t build/lmy47x': 1473, 'sm-g530h build/ktu84p': 1474, 'sm-p355m build/mmb29m': 1475, 'one touch 4016a build/jdq39': 1476, 'ilium x510 build/mra58k': 1477, 'blade a520 build/nrd90m': 1478, 'vs986 build/mra58k': 1479, 'xt1609 build/mpis24.241-2.35-1-17': 1480, 'samsung-sm-g890a': 1481, 'lg-d855 build/lrx21r': 1482, 'lg-h631': 1483, 'fp2': 1484, 'gt-n5110': 1485, 'm3': 1486, 'g3313 build/43.0.a.5.79': 1487, 'sm-g955u1 build/nrd90m': 1488, 'sm-j530gm build/nrd90m': 1489, 'xt1563': 1490, 'kfsuwi build/lvy48f': 1491, 'r1': 1492, 'blade l2 plus build/kot49h': 1493, 'samsung sm-g925f build/nrd90m': 1494, 'xt1635-02': 1495, 'lg-v521': 1496, 'ta-1038 build/nmf26o': 1497, 'sch-i435': 1498, 'sm-g928t build/nrd90m': 1499, 'p027': 1500, '5054s build/lmy47v': 1501, 'motog3-te build/mpds24.65-33-1-30': 1502, 'xt1064': 1503, 'sm-j327t1 build/nrd90m': 1504, 'p00c': 1505, 'samsung sm-g920f build/nrd90m': 1506, 'lg-h840 build/mmb29m': 1507, 'lenovo tb-x103f build/lenovotb-x103f': 1508, 'revvlplus': 1509, 'lg-m430 build/nrd90u': 1510, 'xt1060': 1511, 'huawei nxt-l09 build/huaweinxt-l09': 1512, 'lenovoa3300-gv build/jdq39': 1513, 'rv:35.0': 1514, 'kfmewi': 1515, 'le x520 build/iexcnfn5902303111s': 1516, 'nexus 6p build/opr5.170623.014': 1517, 'lg-m327 build/nrd90u': 1518, 'samsung-sm-g890a build/nrd90m': 1519, 'pspc550 build/lmy47d': 1520, 'sm-g930vl build/nrd90m': 1521, 'stv100-3': 1522, 'samsung sm-n950u1 build/nmf26x': 1523, 'sm-a320y': 1524, 'samsung sm-a720f build/nrd90m': 1525, 'sm-g355m build/kot49h': 1526, 'me173x': 1527, 'gt-s7580l build/jdq39': 1528, 'alcatel': 2442, 'nxa116qc164': 1530, 'zte blade l5 build/lmy47i': 1531, 'sm-g360v': 1532, 'sm-g925f build/nrd90m': 1533, 'ax820 build/mra58k': 1534, 'sm-g360m': 1535, 'v.40r': 1536, 'blu': 1537, 'malc': 1538, 'sm-g928v build/mmb29k': 1539, 'asus_z00ed': 1540, 'sh-04f': 1541, 'trekker-x3 build/mmb29m': 1542, 'blade l7 build/mra58k': 1543, 'nexus 5 build/m4b30z': 1544, 'venue': 1545, 'samsung sm-j327a build/nrd90m': 1546, 'sm-t310 build/kot49h': 1547, 'sm-a310f build/mmb29k': 1548, 'moto g (4) build/npjs25.93-14-8': 1549, 'moto e (4) plus build/nma26.42-11-3': 1550, 'vs880pp': 1551, 's.n.o.w.4': 1552, 'sm-j727t1 build/nrd90m': 1553, 'samsung-sm-n900a': 1554, 'samsung sm-g900t build/mmb29m': 1555, 'sgh-i337m build/lrx22c': 1556, 'sm-n900t build/lrx21v': 1557, 'mami': 1558, 'm4': 1559, 'sm-n9005 build/lrx21v': 1560, 'trekker-m1': 1561, 'sch-i535': 1562, 'lg-v496': 1563, 'nokia': 2779, 'sgh-i317m': 1565, 'turbo c5 build/lmy47i': 1566, 'e5506 build/29.2.a.0.166': 1567, 'lg-h540': 1568, 'lg-d855': 1569, 'z963vl': 1570, 'samsung sm-j327p build/mmb29m': 1571, 'vfd': 1572, 'sm-s906l': 1573, 'huawei y560-l03 build/huaweiy560-l03': 1574, 'pra-lx1 build/huaweipra-lx1': 1575, 'lg-h443/h44312g': 1576, 'lenovo tb2-x30f build/lenovotb2-x30f': 1577, '2pyb2': 1578, 'htc one a9s build/mra58k': 1579, 'lg-k450 build/mxb48t': 1580, 'sm-n950u1 build/nmf26x': 1581, 'zte-z835': 1582, 'xt1033': 1583, 'sm-g900r4': 1584, 'lg-k430 build/mra58k': 1585, 'g8141 build/47.1.a.5.51': 1586, 'z956 build/mmb29m': 1587, 'vs880': 1588, '5049w build/nrd90m': 1589, 'bla-l29 build/huaweibla-l29': 1590, '5057m': 1591, 'k88': 1592, 'blade a460 build/lmy47o': 1593, 'blu life xl build/l050u': 1594, 'coolpad': 1595, 'vivo': 2391, 'sm-g930t1': 1597, '2pzc5': 1598, 'sm-t113 build/ktu84p': 1599, 'att': 1600, 'studio': 2940, 'z959 build/lmy47v': 1602, 'zte blade a321 build/nmf26f': 1603, 'htc6535lvw': 1604, 'motog3 build/mpi24.65-25': 1605, 'lg-x165g build/lrx21m': 1606, 'sm-j327t build/nrd90m': 1607, 'slay': 1608, 'orbis': 1609, 'samsung sm-g925i build/nrd90m': 1610, 's57 build/ktu84p': 1611, 'samsung sm-a510m build/nrd90m': 1612, 'sm-a500m build/lrx22g': 1613, 'samsung sm-j111m build/lmy47v': 1614, 'sch-i545 build/lrx22c': 1615, 'lg-h345': 1616, 'sm-s907vl': 1617, 'samsung-sm-g870a': 1618, 'sm-j727vpp build/nrd90m': 1619, 'lgl58vl': 1620, 'samsung sm-p580 build/nrd90m': 1621, '9003a build/mra58k': 1622, 'moto g (5) plus build/npn25.137-82': 1623, 'lg-m400 build/nrd90u': 1624, 'xt1058': 1625, 'htc6525lvw': 1626, 'frd-l14 build/huaweifrd-l14': 1627, 'xt1096': 1628, 'qwestie8': 1629, 'samsung-sm-t817a': 1630, 'asus': 1631, 'lg-d331': 1632, 'lg-m255': 1633, 'sm-g928f': 1634, 'verykools5525': 1635, 'kfsowi': 1636, 'sm-n920r4': 1637, 'sm-p550 build/nmf26x': 1638, 'sm-s550tl': 1639, 'a621r': 1640, 'samsung sm-n950f build/nmf26x': 1641, 'lg-h810/h81022f': 1642, 'f5121 build/34.3.a.0.252': 1643, 'sgh-m919n': 1644, 'h1611': 1645, 'xt1063 build/mpb24.65-34-3': 1646, 'lgl52vl build/lmy47v': 1647, 'sm-n920a build/mmb29k': 1648, 'g3223 build/42.0.a.4.101': 1649, 'lg-lk460': 1650, 'xt1094': 1651, 'p5026a': 1652, 'samsung-sm-g530az build/lmy48b': 1653, 'lgls665 build/lmy47v': 1654, 'samsung sm-n900t build/lrx21v': 1655, 'sm-g610m build/nrd90m': 1656, 'z833': 1657, 'lg-d850': 1658, 'd6503': 1659, 'sm-a720f build/nrd90m': 1660, 'samsung sm-j530gm build/nrd90m': 1661, 'moto c build/nrd90m.046': 1662, 'zte a2017u build/nrd90m': 1663, 'gt-n7100': 1664, 'samsung sm-j710mn build/mmb29k': 1665, 'xt1063 build/mpb24.65-34': 1666, 'sm-p605v': 1667, 'rv:37.0': 1668, 'samsung sm-g928g build/nrd90m': 1669, 'lenovo tab 2 a7-30gc build/kot49h': 1670, 'lg-k428 build/nrd90u': 1671, 'blu studio c 5+5 build/lrx21m': 1672, 'f3113': 1673, 'sm-s975l': 1674, 'xt1097': 1675, 'motog3-te build/mpds24.65-33-1-3': 1676, '0pm92': 1677, 'd2306': 1678, 'mya-l13 build/huaweimya-l13': 1679, 'lg-h700 build/nrd90u': 1680, 'a50c+': 1681, 'samsung sm-g900m build/lrx21t': 1682, '5095i build/mra58k': 1683, 'lg-v495': 1684, 'f3213 build/36.0.a.2.146': 1685, 'a1601 build/lmy47i': 1686, 'lgus991': 1687, 'samsung-sm-t807a': 1688, 's6000': 1689, 'sm-j320vpp': 1690, 'rv:52.9': 1691, 'samsung sm-t800 build/mmb29k': 1692, 'intel': 1693, 'blade a475 build/lmy47d': 1694, 'lg-k550': 1695, 'e5506': 1696, 'samsung sm-g900i build/mmb29m': 1697, 'win64': 1698, '0pja2': 1699, 'blu energy x plus build/lrx21m': 1700, 'sm-a710m build/nrd90m': 1701, '5015a build/lmy47i': 1702, 'samsung sm-j530f build/nrd90m': 1703, 'sm-g900l': 1704, 'asus_p00j': 1705, 'ta-1025': 1706, 'sm-s327vl': 1707, 'a0001 build/mhc19q': 1708, 'gt-i9195l': 1709, '6039a build/lrx22g': 1710, 'sm-a800i': 1711, 'lg-d959': 1712, 'sm-g900v build/lrx21t': 1713, 'samsung sm-g930r4 build/nrd90m': 1714, 'lgl83bl': 1715, 'sm-g928p': 1716, 'c6906 build/14.6.a.1.236': 1717, 'k92': 1718, 'k90u': 1719, 'samsung-sm-g935a': 1720, 'xt1580 build/npks25.200-12-9': 1721, 'sm-a310f build/nrd90m': 1722, 'boie9': 1723, 'sm-t377p': 1724, 'lg-ls995': 1725, 'lgus990': 1726, 'sm-g935u': 1727, 'xt1021 build/kxc21.5-40': 1728, 'kiw-l24': 1729, 'sm-t377t': 1730, 'rv:43.0': 1731, 'lg-d320 build/kot49i.v10a': 1732, 'alcatel_5044r build/nrd90m': 1733, 'samsung-sm-n910a': 1734, 'htc6500lvw': 1735, 'xt1575 build/nphs25.200-23-1': 1736, 'rs988': 1737, 'lg-h931': 1738, 'rct6s03w12': 1739, 'samsung-sm-g920a build/mmb29k': 1740, 'sm-t807t': 1741, 'cam-l23': 1742, 'sm-t817t': 1743, 'sm-j727p': 1744, 'sm-g550t2': 1745, 'vs985': 1746, 'lg-h815 build/mra58k': 1747, 'sm-g900p': 1748, 'samsung sm-j320m build/lmy47v': 1749, 'bba100-1': 1750, '831c': 1751, 'lg-h830': 1752, 'alcatel one touch 7047a build/jdq39': 1753, 'sm-g800f build/mmb29k': 1754, 'sm-e700m': 1755, 'samsung sm-j710mn build/nrd90m': 1756, 'z831': 1757, 'gt-p3100': 1758, 'e5606': 1759, 'samsung-sm-g900a': 1760, 'asus_z00ud': 1761, 'sm-n900p build/lrx21v': 1762, 'lg-v700': 1763, 'samsung sm-j700t1 build/nmf26x': 1764, 'p008 build/nrd90m': 1765, 'oneplus': 1766, 'samsung-sm-j320a build/nmf26x': 1767, 'sm-g800m build/kot49h': 1768, 'bbb100-2': 1769, 'lg-d680': 1770, 'sm-s727vl build/mmb29m': 1771, 'f3313': 1772, 'one': 1773, 'd5106': 1774, 'samsung-sgh-i337': 1775, 'lgl84vl build/nrd90u': 1776, 'ta-1028 build/nmf26o': 1777, 'yoga': 1778, 'd5316': 1779, 'sph-l720': 1780, 'sm-n915v': 1781, '0paj5': 1782, 'rct6513w87 build/mra58k': 1783, 'e6853 build/32.4.a.1.54': 1784, 'sm-g720ax': 1785, 'sm-g900m build/mmb29m': 1786, '6045i build/lrx22g': 1787, 'e2006': 1788, 'bbb100-1': 1789, 'gt-i9060l build/jdq39': 1790, 'sm-t530': 1791, 'infinit': 1792, 'sm-j110m build/lmy48b': 1793, 'lg-d213': 1794, 'lava_a3': 1795, 'gt-s7390': 1796, 'alcatel_5054o': 1797, 'lenovo yt3-x50f build/mmb29m': 1798, 'samsung-sm-g925a': 1799, 'sm-a300h': 1800, 'huawei build/mmb28b': 1801, 'ilium l620 build/nrd90m': 1802, 'sm-n910p': 1803, 'lenovo pb2-650y build/mra58k': 1804, 'sm-g9250': 1805, 'sm-n920c build/nrd90m': 1806, 'sm-j710gn': 1807, 'sm-a500w': 1808, 'nexus 6 build/n6f27m': 1809, 'moto z2 play build/npss26.118-19-11': 1810, 'qtaqz3 build/kot49h': 1811, 'm4 ss4452 build/lmy47v': 1812, 'sm-t110 build/jdq39': 1813, 'neffos c5 build/lmy47d': 1814, 'ta-1028 build/nrd90m': 1815, 'ta-1044 build/nmf26f': 1816, 'astro': 2846, 'samsung-sm-j320az': 1818, 'bg2-w09': 1819, 'sm-j700h build/mmb29k': 1820, 'gt-p5110': 1821, 'asus_z01kd': 1822, 'lg-k330': 1823, 'sm-j530f build/nrd90m': 1824, 'lg-d693n': 1825, 'lg-h525n build/mra58k': 1826, 'p5006a': 1827, 'samsung sm-j510mn build/mmb29m': 1828, 'iris 870 build/mra58k': 1829, 'xt1032 build/kxb20.9-1.10-1.24-1.1': 1830, 'lg-h932': 1832, 'sm-t705': 1833, 'sm-g920i': 1834, 'moto g (4) build/npjs25.93-14-13': 1835, 'samsung sm-j120h build/lmy47v': 1836, 'moto g (5) build/npp25.137-15': 1837, '7048a build/lrx22g': 1838, '4013m build/kot49h': 1839, 'stv100-1': 1840, 'moto c build/nrd90m.054': 1841, 'lg-e450f': 1842, 'lg-h960': 1843, 'gxq6580_weg_l build/lmy47i': 1844, 'sm-g930t': 1845, 'beat': 1846, 'sm-g928g build/mmb29k': 1847, 'vk700 build/lrx22g': 1848, 'eva-l19 build/huaweieva-l19': 1849, 'lgl57bl': 1850, 'xt1023': 1851, 'xt1008': 1852, 'sm-g900t1': 1853, 'neffos x1 max build/nrd90m': 1854, 'm4 ss4458 build/mmb29m': 1855, 'e6603': 1856, 'a96 build/lmy47i': 1857, 'lenovo tb3-710i build/lmy47i': 1858, 'sm-a520w build/nrd90m': 1859, 'samsung sm-j500m build/mmb29m': 1860, 'e5803': 1861, 'e2104': 1862, 'samsung sm-g800f build/mmb29k': 1863, 'sm-t800': 1864, 'was-lx1a build/huaweiwas-lx1a': 1865, 'alcatel_5098o build/mmb29m': 1866, 'blade a465 build/lmy47d': 1867, 'ilium x520 build/nrd90m': 1868, 'sla-l02': 1869, 'f5121': 1870, 'rv:29.0': 1871, 'lg-h735 build/lmy47v': 1872, 'max': 1873, 'g620s-l03': 1874, 'sm-g318ml': 1875, 'samsung sm-a320y build/nrd90m': 1876, 'sm-t320 build/kot49h': 1877, 'motog3 build/mpi24.65-25.1': 1878, 'f1f build/lmy47v': 1879, 'moto g play build/njh47f': 1880, 'huawei lua-u23 build/huaweilua-u23': 1881, 'lg-h343': 1882, 'mi a1 build/opr1.170623.026': 1883, 'lgms323': 1884, 'verykools5004': 1885, 'sm-j710mn build/nrd90m': 1886, 'lg-h650 build/lmy47v': 1887, 'd2306 build/18.6.a.0.182': 1888, 'samsung sm-g925p build/mmb29k': 1889, 'f3213': 1890, 'sm-g928p build/nrd90m': 1891, 'u': 1892, 'htc_d10i': 1893, 'ta-1027 build/n2g47h': 1894, 'vtr-l29': 1895, 'ta-1025 build/nmf26f': 1896, 'iris 820 build/mra58k': 1897, 'wileyfox': 1898, 'lg-h340ar': 1899, 'ilium l610 build/mra58k': 1900, 'ideatab': 1901, 'a9': 1902, 'iphone': 1903, 'redmi 4a build/n2g47h': 1904, 'moto z2 play build/npss26.118-19-14': 1905, 'blade l3 build/kot49h': 1906, 'zte blade a512 build/mmb29m': 1907, '4047a build/nrd90m': 1908, 'n9136': 1909, 'sm-g610f build/nrd90m': 1910, 'dt0704k08': 1911, 'hisense': 1912, 'sm-g920r4': 1913, 'a466bg': 1914, 'nem-l51 build/honornem-l51': 1915, '4047g build/nrd90m': 1916, 'motog3': 1917, 'mi': 2154, 'alcatel one touch 5036a build/jdq39': 1919, 'd5316 build/19.4.a.0.182': 1920, 'lg-k420': 1921, 'sm-a300fu build/mmb29m': 1922, '1016s': 1923, 'lenovo tb3-710f build/lrx21m': 1924, 'sgp621 build/23.5.a.1.291': 1925, 'lg-v520': 1926, 'rs988 build/mmb29m': 1927, 'asus_x008dc build/nrd90m': 1928, 'asus_z01bda': 1929, 'alcatel one touch 7042a build/jdq39': 1930, 'archos': 1931, 'rct6773w22b': 1932, 'xt1032': 1933, 'f80 piabella build/mra58k': 1934, 'xt1650 build/npls26.118-20-5-11': 1935, 'samsung sm-a510f build/nrd90m': 1936, 'sm-j320w8': 1937, 'sm-g955f': 1938, 'samsung sm-g610m build/nrd90m': 1939, 'grand2c': 1940, 'lg-d625': 1941, 'ftj152d': 1942, 'sm-g928v': 1943, 'qtasun1': 1944, 'z799vl': 1945, 'bba100-2': 1946, 'sm-j710f build/nrd90m': 1947, 'ht0701a16': 1948, 'ego': 1949, 'samsung sm-g920i build/nrd90m': 1950, 'sm-g903f build/mmb29k': 1951, 'asus_x00dd': 1952, 'aquaris u plus build/nmf26f': 1953, 'robin': 1954, 'verykools5035': 1955, 'icon': 1956, 'xt1680': 1957, 'one a2003 build/mmb29m': 1958, 'moto g play build/npi26.48-36': 1959, 'verykools4009': 1960, 'z839': 1961, '5085b build/mra58k': 1962, 's60 build/mmb29m': 1963, 'asus_z00ad build/lrx21v': 1964, 'huawei gra-l09 build/huaweigra-l09': 1965, 'sm-g930f build/mmb29k': 1966, 'samsung-sm-j320az build/mmb29k': 1967, 'xt1092': 1968, 'moto c build/nrd90m.050': 1969, 'sm-g570m build/nrd90m': 1970, 'huawei lyo-l21 build/huaweilyo-l21': 1971, 'playtab': 1972, 'tab2a7-10f': 1973, 'vky-l09 build/huaweivky-l09': 1974, 'lg-h815': 1975, 'u feel lite build/mra58k': 1976, 'e5823 build/32.4.a.1.54': 1977, 'pixel 2 xl build/opm1.171019.013': 1978, 'sm-a320fl build/nrd90m': 1979, 'n817': 1980, 'joy': 1981, 'c6906': 1982, 'e6810': 1983, 'sm-j710gn build/mmb29k': 1984, 'sm-t530 build/lrx22g': 1985, 'pixel build/opm1.171019.012': 1986, 'was-l03t build/huaweiwas-l03t': 1987, 'tornado': 1988, 'sm-g950u build/r16nw': 1989, 'sm-j500fn build/mmb29m': 1990, 'sm-t825 build/nrd90m': 1991, 'lgls775 build/mmb29m': 1992, 'moto g (5) plus build/npn25.137-92': 1993, 'sm-g390w': 1994, 'samsung sm-j500fn build/mmb29m': 1995, 'sm-a510f build/mmb29k': 1996, 'gt-i9195i': 1997, 'asus_x015d build/nrd90m': 1998, 'ideataba1000-f': 1999, 'lm-x210(g': 2000, 'alter': 2001, 'sm-g390f': 2002, 'e5823': 2003, 'a37f': 2004, 'f5321 build/34.3.a.0.238': 2005, 'tab7': 2006, 'lgls675 build/lmy47v': 2007, 'ilium l950 build/ktu84p': 2008, 'rv:59.0': 2009, 'e6633': 2010, 'lg-h831 build/nrd90u': 2011, 'sm-t555': 2012, 'trt-lx3': 2013, 'highway': 2014, 'verykools5034': 2015, 'rct6203w46 build/kot49h': 2016, 'samsung sm-j320fn build/lmy47v': 2017, 'moto z2 play build/npss26.118-19-4': 2018, 'gt-n8000': 2019, 'samsung sm-n900w8 build/lrx21v': 2020, 'mobiistar_lai_yuna_x': 2021, 'iris80': 2022, 'gt-i9505 build/lrx22c': 2023, 'vtr-l09 build/huaweivtr-l09': 2024, 'bnd-l21 build/honorbnd-l21': 2025, 'nexus 5x build/opm3.171019.013': 2026, 'samsung sm-a320fl build/nrd90m': 2027, 'lg-d802': 2028, 'htc u11 build/opr6.170623.013': 2029, 'lg-d722': 2030, 'sla-l22': 2031, 'moto g (5) build/npp25.137-93': 2032, 'pixel 2 build/opm2.171019.016': 2033, 'xt1021 build/lpcs23.13-34.8-3': 2034, 'lglk430': 2035, 't1': 2036, 'tr10cs1 build/jdq39': 2037, 'redmi 3s build/mmb29m': 2038, 'e2303': 2039, 'sm-g531f': 2040, 'e8qp': 2041, 'samsung-sm-n920a': 2042, 'sm-a310m build/nrd90m': 2043, 'lgl41c': 2044, 'grand': 2045, 'sm-g903m': 2046, '9022x': 2047, 'alp-l09 build/huaweialp-l09': 2048, 'sm-g850m build/lrx22g': 2049, 'lg-f400k': 2050, 'rct6223w87': 2051, 'e2104 build/24.0.a.5.14': 2052, '9002a': 2053, 'covet build/nrd90m': 2054, 'xt1080': 2055, 'sm-j120fn build/lmy47x': 2056, 'rne-l03 build/huaweirne-l03': 2057, 'me301t': 2058, 'sm-g900f build/kot49h': 2059, 'ta-1027': 2060, 'sgh-m919 build/ktu84p': 2061, 'p5006a build/nrd90m': 2062, 'g8141': 2063, 'sm-g530w': 2064, 'ax921 build/mra58k': 2065, 'sm-n910f build/mmb29m': 2066, 'zte blade a511 build/mra58k': 2067, 'ilium l1000 build/lrx22g': 2068, 'kyy22': 2069, 'asus_z017d': 2070, 'lg-h870ds build/nrd90u': 2071, 'lgls755': 2072, 'stellar': 2323, 'gt-i9515': 2074, 'frd-l09 build/huaweifrd-l09': 2075, 'ax1070': 2076, 'samsung sm-t550 build/nmf26x': 2077, 'sens': 2078, 'vt0701a08': 2079, 'duk-al20': 2080, 'lenovo pb2-670y build/mra58k': 2081, 'sm-j510f': 2082, 'sm-j330fn build/nrd90m': 2083, 'htc desire 526g build/kot49h': 2084, 'sm-j330f': 2085, '4003a': 2086, 'rv:60.0': 2087, 'htc_one_m8s/2.12.111.1': 2088, 'z832 build/mmb29m': 2089, 'samsung sm-a510m build/mmb29k': 2090, 'cph1701': 2091, 'a952': 2092, 'e5306 build/27.3.a.0.165': 2093, 'z9 plus build/nrd90m': 2094, 'dash': 2431, 'lg-d320': 2096, 'nx521j': 2097, '916': 2098, '4034g': 2099, 'gravity build/nrd90m': 2100, 'plus': 2101, 'pixel build/opm1.171019.016': 2102, 'sm-n910t3': 2103, '9203a build/mra58k': 2104, 'mha-l29 build/huaweimha-l29': 2105, 'sgh-i337m': 2106, 'pe-tl10': 2107, 'z2': 2108, 'sgh-m919v': 2109, 'tmovi build/vision': 2110, 'xt1068': 2111, 'advance': 2112, 'sm-g360f': 2113, 'plk-l01 build/honorplk-l01': 2114, 'za409': 2115, 'energy x 2 build/e050l': 2116, 'lg-h850 build/mmb29m': 2117, 'gt-i8200n': 2118, 'hisense f102 build/nrd90m': 2119, '5017a': 2120, 'moto x4 build/opw27.57-40': 2121, 'b1-810': 2122, 'moto g play build/npis26.48-36-2': 2123, 'stv100-4 build/lmy47v': 2124, 'ta-1004': 2125, 'sm-g313ml': 2126, 'sm-g530m': 2127, 'lg-e980h': 2128, 'bah-l09 build/huaweibah-l09': 2129, 'dli-l22 build/honordli-l22': 2130, 'sm-g388f': 2131, 'htcd160lvwpp': 2132, 'htc one mini build/kot49h': 2133, 'sm-n9500': 2134, 'samsung-sgh-i537': 2135, '5042a': 2136, 'sm-g903w': 2137, 'bln-l21 build/honorbln-l21': 2138, 'sm-g730v': 2139, 'alumini': 2140, '9008a build/nrd90m': 2141, 'ideataba2109a': 2142, 'g3223 build/42.0.a.4.167': 2143, 'lg-m710 build/nrd90u': 2144, 'xt1254': 2145, 'sm-j510fn build/nmf26x': 2146, 'lg-h520': 2147, 'aquaris_a4.5': 2148, 'sm-j500g': 2149, 'hisense t963 build/mra58k': 2150, 'sm-j200h': 2151, '8080 build/lrx21m': 2152, 'sm-g530fz': 2153, 'logic': 2155, 'xt1650 build/npls26.118-20-5-3': 2156, 'pixel 2 build/opm1.171019.019': 2157, 'asus_x00dda': 2158, 'huawei y360-u23 build/huaweiy360-u23': 2159, 'e6653 build/32.4.a.1.54': 2160, 'nxa8qc116': 2161, 'aerial': 2162, 'xs-z47b7vqtmxs': 2163, 's70v': 2164, '8062 build/mra58k': 2165, 'lg-d400': 2166, 'samsung-sm-g930az': 2167, 'xt1572 build/nphs25.200-15-8': 2168, 'd6603': 2169, 'zte blade v8 mini build/nrd90m': 2170, 'ta-1044': 2171, 'moto g (5) plus build/npns25.137-92-4': 2172, 'vtr-al00 build/huaweivtr-al00': 2173, 'mya-l03 build/huaweimya-l03': 2174, 'hisense f32 build/nmf26f': 2175, 'sm-t217s build/kot49h': 2176, 'b3-a40': 2177, 'zuum_zen_i build/lrx21m': 2178, 'b3-a20': 2179, 'en-gb': 2180, 'gt-s5310l': 2181, 'vs5012 build/nrd90m': 2182, 'c1904': 2183, 'sm-t585': 2184, 'sm-t580': 2185, 'f3111 build/33.3.a.1.97': 2186, 'lgls992': 2187, 'bv7000': 2188, 'k10': 2189, 'lg-p708g': 2190, 'sm-g925i': 2191, 'sm-g386w': 2192, 'sm-a300m': 2193, 'huawei m2-801w build/huaweim2-801w': 2194, 'samsung sm-g920f build/lrx22g': 2195, 'lenovo yt3-850f build/mmb29m': 2196, 'ple-701l': 2197, 'limit': 2198, 'sm-t285m': 2199, 'iris702': 2200, 'sm-j530g': 2201, 'lg-h812': 2202, 'lgl164vl build/nrd90u': 2203, 'huawei can-l11 build/huaweican-l11': 2204, 'nx16a8116kp': 2205, 'sm-e500m': 2206, 'a574bl build/nmf26f': 2207, 'samsung sm-a730f build/nmf26x': 2208, 'sm-t827r4': 2209, 'lenovo yt3-850m build/mmb29m': 2210, 'f3313 build/37.0.a.2.248': 2211, 'ta-1044 build/opr1.170623.026': 2212, 'sm-g950f build/r16nw': 2213, 'f5321 build/34.2.a.2.47': 2214, 'sm-g7105': 2215, 'gt-s7275b': 2216, 'minion_tab': 2217, 'x900+': 2218, 'samsung-sm-j327az': 2219, 'lgms428': 2220, 'samsung sm-t813 build/nrd90m': 2221, 'ilium lt520 build/nrd90m': 2222, 'samsung-sm-n900a build/lrx21v': 2223, 'sm-a530f build/nmf26x': 2224, 'sm-p607t': 2225, 'redmi 5 plus build/n2g47h': 2226, 'sm-t355y build/nmf26x': 2227, 'sm-j700p': 2228, 'ilium x220 build/mra58k': 2229, 'lgl163bl': 2230, 'ta-1052': 2231, 'pixel xl build/opm1.171019.016': 2232, 'o1': 2233, 'origins': 2234, 'asus_z017da': 2235, 'moto g (5) build/npps25.137-93-4': 2236, 'lg-m257': 2237, 'aquaris x5 plus build/nmf26f': 2238, 'xt1072': 2239, 'verykool': 2240, 'x3402': 2241, 'lgms345': 2242, 'lg-v500': 2243, 'a1-850': 2244, 'a97': 2245, 'sm-n920p': 2246, 'fig-lx3 build/huaweifig-lx3': 2247, '5045i': 2248, 'stf-l09 build/huaweistf-l09': 2249, 'alcatel_4060o build/mmb29m': 2250, 'lgus992': 2251, 'xt1609': 2252, 'sm-t537v': 2253, 'moto x4 build/opws27.57-40-6': 2254, 'sc-02h': 2255, 'sm-a500fu': 2256, 'ilium pad t7x build/lmy47i': 2257, 'xt1095': 2258, 'samsung sm-a310f build/nrd90m': 2259, 'gt-n5110 build/jdq39': 2260, '5056n': 2261, 'samsung-sgh-i497': 2262, 'moto e (4) plus build/nma26.42-142': 2263, 'alcatel one touch 7040a build/jdq39': 2264, 'zte-z956': 2265, 'e5306 build/27.3.a.0.129': 2266, 'c6903': 2267, 'bla-l09 build/huaweibla-l09': 2268, 'alp-l09 build/huaweialp-l09s': 2269, 'magno': 2270, 'alcatel_5056o': 2271, 'lg-sp200': 2272, 'xt1650 build/ncls26.118-23-13-6-5': 2273, 'vk815': 2274, 'samsung sm-g950f build/r16nw': 2275, 'moto g (4) build/npjs25.93-14-8.1-4': 2276, 'sm-c900f': 2277, 'p5046a': 2278, 'stk_sync_5e': 2279, 'vs996 build/opr1.170623.026': 2280, 'sm-g955u build/r16nw': 2281, 'was-tl10 build/huaweiwas-tl10': 2282, 'g25524k': 2283, 'sgp511': 2284, 'samsung-sm-j120a': 2285, 'sov33 build/35.0.d.2.25': 2286, 'z813 build/lmy47o': 2287, 'g3223': 2288, 'sm-g730w8 build/kot49h': 2289, '4034e': 2290, 'mya-l11': 2291, 'sm-a520w': 2292, 'rct6k03w13': 2293, 'd5306': 2294, 'rv:14.0': 2295, 'samsung-sm-g925a build/mmb29k': 2296, 'a5002': 2297, 'pixel 2 xl build/opm1.171019.021': 2298, 'sm-t210': 2299, 'nyx_a1': 2300, 'f3311': 2301, 'pixel 2 build/opm1.171019.021': 2302, 'mi max 2 build/nmf26f': 2303, 'sph-l720t': 2304, 'xt1021': 2305, 'nexus 6 build/mob30m': 2306, 'sm-n920t': 2307, 'b1-790': 2308, 'g527-u081': 2309, 'lg-d681': 2310, 'samsung sm-g570m build/nrd90m': 2311, 'helix': 2312, 'xt890 build/9.8.2i-50_sml-25': 2313, 'samsung sm-g9600 build/r16nw': 2314, 'sm-g925t build/lmy47x': 2315, 'sm-g900m build/kot49h': 2316, 'pixel xl build/opm1.171019.021': 2317, 'moto g (5) plus build/npn25.137-15': 2318, 'samsung sm-g950u build/r16nw': 2319, 'g3123': 2320, 'xt1710-02': 2321, 'sm-g9600 build/r16nw': 2322, 'zur70016': 2324, 'sm-g928g': 2325, 'samsung sm-a310m build/lmy47x': 2326, 'star': 2327, 'g8341': 2328, 'xt1580 build/npks25.200-17-8': 2329, 'lex829': 2330, 'ta-1027 build/opr1.170623.026': 2331, 'ta-1020': 2332, 'ht0703k16': 2333, 'aquaris v build/n2g47h': 2334, 'sm-g350m': 2335, 'infinix': 2336, 'h550': 2337, 'm431': 2338, 'aoc_u706g': 2339, 'gigaset': 2340, 'sla-l23': 2341, 'samsung sm-g955u build/r16nw': 2342, 'lg-h221': 2343, 'g3123 build/40.0.a.6.189': 2344, 'grace': 2345, 'pmid7102dc': 2346, 'sm-j500h': 2347, 'moto g (4) build/npjs25.93-14-15': 2348, 'lg-sp320': 2349, 'build/opr1.170623.032': 2350, 'v502015': 2351, 'zte blade a6 build/nmf26f': 2352, 'aerial build/nrd90m': 2353, 'vs980 4g build/lrx22g': 2354, 'grant': 2355, 'moto c build/nrd90m.057': 2356, 'sm-g965u build/r16nw': 2357, 'samsung sm-a310f/a310fxxu4crb1 build/nrd90m': 2358, 'xt1225': 2359, 'm4 ss4457-r build/nrd90m': 2360, 'samsung sm-n950u build/r16nw': 2361, 'r831l': 2362, 'swift': 2363, 'pra507': 2364, 'f8131': 2365, 'iris50': 2366, 'm10/q1010': 2367, 'ax705': 2368, 'samsung sm-g965u1 build/r16nw': 2369, 'htcd100lvwpp': 2370, 'p01m': 2371, 'p00a': 2372, 'lg-v522': 2373, 'samsung-sm-g900a build/kot49h': 2374, 'sm-t380': 2375, 'rne-l21 build/huaweirne-l21': 2376, 'htcd200lvw': 2377, 'sm-n950u build/r16nw': 2378, 'moto e (4) build/ndqs26.69-64-2': 2379, 'sm-p600': 2380, 'b1-750': 2381, 'gt-s7582l': 2382, 'lg-h820': 2383, 'samsung-sm-t377a': 2384, 'sm-t587p': 2385, 'z557bl': 2386, 'f3111 build/33.3.a.1.115': 2387, 'a577vl': 2388, 'sm-g960u build/r16nw': 2389, 'htc_one': 2390, 'sm-g955f build/r16nw': 2392, 'sm-g925r4': 2393, 'za990': 2394, '4009f': 2395, 'was-lx2j': 2396, 'samsung sm-j250m build/nmf26x': 2397, 'z': 2398, 'was-lx1 build/huaweiwas-lx1': 2399, 'pixel build/opm2.171019.029': 2400, 'pixel 2 xl build/opm2.171019.029': 2401, 'sm-g901f': 2402, 'samsung-sm-g920az': 2403, 'samsung sm-g960u build/r16nw': 2404, 'sm-g9650 build/r16nw': 2405, 'dream': 2406, 'lgms395': 2407, 'moto c build/nrd90m.070': 2408, 'sm-g965f build/r16nw': 2409, 'sm-g930p': 2410, 'sm-a730f build/nmf26x': 2411, 'a463bg': 2412, 'gt-i8190n': 2413, \"f80's+\": 2414, '47418': 2415, 'moto g play build/npis26.48-36-5': 2416, 'e6683': 2417, 'rv:61.0': 2418, '6055b': 2419, 'c6603': 2420, 'redmi note 5a build/n2g47h': 2421, 'smart': 2422, 'kylin': 2423, 'sm-j510mn build/nmf26x': 2424, 'am508': 2425, 'rv:33.0': 2426, 'samsung-sm-j727az': 2427, 'samsung sm-g935w8 build/nrd90m': 2428, 'sm-g361f': 2429, 'sm-p355m': 2430, 'ta-1038 build/o00623': 2432, 'moto g (5) plus build/npns25.137-92-8': 2433, 'sm-g960f build/r16nw': 2434, 'g3313 build/43.0.a.7.25': 2435, 'asus_x00id': 2436, 'lg-lg870': 2437, 'lg-m200 build/nrd90u': 2438, 'sp7731g': 2439, 'alumini3 build/mra58k': 2440, 'samsung sm-g892a build/r16nw': 2441, 'sm-n910t build/lmy47x': 2443, 'sm-n950u1 build/r16nw': 2444, 'bnd-l34': 2445, 'samsung sm-n950u1 build/r16nw': 2446, 'htc_one_m8/4.28.502.2': 2447, 'sm-g570f': 2448, 'sm-j120m': 2449, 'hisense e51 build/lmy47v': 2450, 't08': 2451, 'sgh-i747m': 2452, 'sm-j700t1': 2453, 'samsung sm-g9650 build/r16nw': 2454, 'bv8000pro': 2455, 'asus_z01bdc': 2456, 'sm-t810': 2457, 'e5306': 2458, 'moto g (5) plus build/npns25.137-93-8': 2459, 'g3123 build/40.0.a.6.175': 2460, 'sm-g615f': 2461, 'asus_x018d': 2462, 'lg-h933': 2463, 'mtt': 2464, 'samsung sm-g965u build/r16nw': 2465, 'f5122': 2466, 'samsung sm-g965f build/r16nw': 2467, 'techpad': 2468, 'fusion5_u7': 2469, 'vs425': 2470, 'r2': 2471, 'sm-g955w': 2472, 'pulp 4g build/lmy47v': 2473, 'x78': 2474, 'asus_x00hd build/nmf26f': 2475, 'ax821 build/mra58k': 2476, 'aquaris x build/nmf26f': 2477, 'sm-j700f': 2478, 'lg-v410/v41020c': 2479, 'blade v8q build/n2g47h': 2480, 'd5503': 2481, 'moto x4 build/opws27.57-40-14': 2482, 'build/opm1.171019.011': 2483, 'domos': 2484, 'moto g (5) build/npps25.137-93-8': 2485, 'ul40': 2486, 'lgms631': 2487, 'sm-a300m build/ktu84p': 2488, 'f8332': 2489, 'eml-l29 build/huaweieml-l29': 2490, 'sm-g892a': 2491, 'northwell': 2492, 'hisense f24 build/nrd90m': 2493, 'moto g(6) play build/opp27.61-14-4': 2494, 'ta-1032': 2495, 'f5121 build/34.4.a.2.19': 2496, 'sm-g950u1 build/r16nw': 2497, 'motog3-te build/mpd24.65-33': 2498, 'htc_desire_820': 2499, 'asus_a001': 2500, 'ta-1028 build/o00623': 2501, 'ax1060': 2502, 'clt-l09': 2503, 'sm-p550': 2504, 'build/kot49h': 2505, 'a0001': 2506, 'xt1710-02 build/ndss26.118-23-19-6': 2507, 'samsung sm-a710m build/nrd90m': 2508, 'samsung sm-g950u1 build/r16nw': 2509, 'akus': 2510, 'n9560 build/nmf26f': 2511, 'shift build/lmy47i': 2512, 'sm-t670': 2513, 'verykools5530 build/lmy47i': 2514, 'verykools5005': 2515, 'a3-a20': 2516, 'sm-g850m': 2517, 'e501': 2518, 'verykools5524': 2519, '6037b': 2520, 'pixel build/opm4.171019.016.b1': 2521, 'sm-g920p': 2522, 'e6790tm': 2523, 'cph1607': 2524, 'sm-g920t': 2525, 'moto g (5) plus build/npns25.137-92-10': 2526, 'lg-k350': 2527, 'bolt': 2528, 'lg-d373': 2529, '5044a': 2530, 'sm-j700t build/mmb29k': 2531, 'pixel 2 xl build/opm2.171019.029.b1': 2532, 'sm-g530t1': 2533, 'ta-1025 build/opr1.170623.026': 2534, 's471': 2535, 'oneplus a5010 build/opm1.171019.011': 2536, 'moto g (4) build/npjs25.93-14-18': 2537, 'gt-s7582': 2538, 'sm-g900fd': 2539, 'xt1609 build/npis26.48-38-3': 2540, 'za509': 2541, 'fever': 2542, 'xt1563 build/mpd24.65-25': 2543, 'sm-t330': 2544, 'sm-j250m build/nmf26x': 2545, 'alumini3plus': 2546, 'sm-n950f build/r16nw': 2547, 'samsung sm-g930f/xxu2drc1 build/nrd90m': 2548, 'redmi note 4x build/mra58k': 2549, 'hisense f8 mini build/nrd90m': 2550, 'h3321': 2551, 'studio_g_hd': 2552, 'azumi_kinzo_a5_ql': 2553, 'y550-l02': 2554, 'ls5': 2555, 'sm-j120w': 2556, '7_plus': 2557, 'moto g (5) build/npp25.137-33': 2558, 'sm-j7108': 2559, 'samsung sm-n910v build/mmb29m': 2560, 'ane-lx3 build/huaweiane-lx3': 2561, 'samsung sm-g955f build/r16nw': 2562, 'yolo': 2563, 'z812': 2564, 'rne-l23 build/huaweirne-l23': 2565, 'gt-n8010': 2566, 'sgp611': 2567, 'gt-i9506': 2568, 'z965 build/nmf26v': 2569, 'd2406': 2570, 'samsung sm-j327az build/nrd90m': 2571, 'moto e (4) plus build/nma26.42-152': 2572, 'pixel xl build/opm4.171019.016.b1': 2573, 'orion': 2574, 'nexus 5x build/opr4.170623.006': 2575, 'cph1723': 2576, 'sm-g955u1 build/r16nw': 2577, 'rne-l22 build/huaweirne-l22': 2578, 'io': 2579, 'asus_x008d build/nrd90m': 2580, 'xt1635-02 build/opn27.76-12-22': 2581, 'sm-a520f build/r16nw': 2582, 'g8142': 2583, 'g630-u251': 2584, 'lg-k212': 2585, 'ldn-lx3 build/huaweildn-lx3': 2586, 'z955a': 2587, 'lg-e975': 2588, 'power_2': 2589, 'z957': 2590, 'samsung sm-a520f build/r16nw': 2591, 'samsung sm-g390f build/nrd90m': 2592, 'a7': 2593, 'rct6873w42m_f7': 2594, 'g3121 build/48.1.a.2.21': 2595, 'gt-i9301i': 2596, 'i50f build/nrd90m': 2597, 'sm-g935v build/r16nw': 2598, 'sm-g930v build/r16nw': 2599, 'sm-g930p build/r16nw': 2600, 'moto g (4) build/npjs25.93-14-8.1-9': 2601, 'lm-q710(fgn': 2602, 'sm-t820': 2603, 'moto e5 build/opp27.91-25': 2604, 'bnd-l24': 2605, 'samsung-sm-g930a build/r16nw': 2606, 'tu20402-58': 2607, 'sm-g9650': 2608, 'sm-g935p': 2609, 'zei403': 2610, 'moto g (5) build/npps25.137-93-12': 2611, 'pixel 2 build/opm2.171026.006.c1': 2612, 'pra-lx2': 2613, 'sm-g935f build/r16nw': 2614, 'sm-g950': 2615, 'eml-l09 build/huaweieml-l09': 2616, 'nexus 5x build/n2g48c': 2617, 'bla-l09': 2618, 'samsung sm-g930f build/r16nw': 2619, 'sm-t360': 2620, 't09': 2621, '5026a build/nrd90m': 2622, 'aquaris_m4.5': 2623, 'iris': 2624, 'col-l29': 2625, 'samsung sm-a530f build/nmf26x': 2626, 'lg-m703': 2627, 'moto g (4) build/npjs25.93-14.7-8': 2628, 'sm-g930f build/r16nw': 2629, 'xt1635-02 build/opns27.76-12-22-3': 2630, 'r8006': 2631, 'pixel 2 build/opm2.171026.006.g1': 2632, 'glam': 2633, 'atu-lx3 build/huaweiatu-lx3': 2634, 'd2206': 2635, 'a37fw': 2636, 'sm-g386f': 2637, 'mi 4w build/mmb29m': 2638, 'lg-h931 build/opr1.170623.026': 2639, 'samsung sm-g930a build/r16nw': 2640, 'sm-j400m build/r16nw': 2641, 'moto g(6) plus build/opws27.113-25-4': 2642, 'rv:62.0': 2643, 'sm-g930t build/r16nw': 2644, 'sm-j106b': 2645, 'pixel build/opm4.171019.021.p1': 2646, 't1-a21w': 2647, 'sm-n915t': 2648, 'sm-a320fl build/r16nw': 2649, 'pixel xl build/opm4.171019.021.p1': 2650, 'sm-g892u build/r16nw': 2651, 'pixel 2 xl build/opm2.171026.006.h1': 2652, 'samsung sm-g935t build/r16nw': 2653, 'sm-j510h': 2654, 'ax681': 2655, 'zte blade a0620 build/nmf26f': 2656, 'lg-k210': 2657, 'samsung-sm-g891a build/r16nw': 2658, 'lm-x210 build/n2g47h': 2659, 'moto g (5) plus build/npns25.137-92-14': 2660, 'moto x4 build/opws27.57-40-17': 2661, 'd5503 build/14.6.a.0.368': 2662, 'weimei_we': 2663, 'lgl59bl': 2664, 'samsung sm-g960u1 build/r16nw': 2665, 'blade v9 build/opr1.170623.032': 2666, 'ta-1039 build/opr1.170623.026': 2667, 'sm-a605gn build/r16nw': 2668, 'samsung sm-n950n build/r16nw': 2669, 'energy': 3440, 'vie-l09': 2671, 'moto g play build/npis26.48-43-2': 2672, 'sm-t805 build/mmb29k': 2673, 'lld-l31 build/honorlld-l31': 2674, 'tank': 2675, 'moto g(6) build/ops27.82-72': 2676, 'lm-x210cm': 2677, 'asus_x00ad': 2678, 'lm-x410(fg': 2679, '5085a': 2680, 'g3123 build/48.1.a.2.21': 2681, 'd5803': 2682, 'orange_rise_33': 2683, 'n9519': 2684, 'h3311': 2685, 'es-es': 2686, 's8': 2687, 'mi a1 build/opm1.171019.026': 2688, 'lg-h872': 2689, 'clt-l29 build/huaweiclt-l29': 2690, 'vorago': 2691, 'sm-t217t': 2692, 'bkl-l09': 2693, 'lg-h860': 2694, 'sm-j737p build/r16nw': 2695, '6036a build/jls36c': 2696, 'lg-k557 build/mxb48t': 2697, 'sm-s737tl': 2698, 'sm-g935s': 2699, 'moto g(6) play build/opp27.91-87': 2700, 'g3313 build/43.0.a.7.55': 2701, 'sm-g316m': 2702, '5049z': 2703, 'cubot_x18_plus': 2704, 'gt-n5120': 2705, 'sm-j327w': 2706, 'samsung sm-a520w build/nrd90m': 2707, 'lg-h930': 2708, 'lg-v400': 2709, 'rv:26.0': 2710, 'f3216': 2711, 'sm-j600g build/r16nw': 2712, 'moto g (5) plus build/npn25.137-35': 2713, 'samsung sm-n950f build/r16nw': 2714, 'samsung-sm-j321az': 2715, 'moto z3 play build/opw28.70-22': 2716, 'rainbow': 2717, 'sm-g930u build/r16nw': 2718, 'moto z2 play build/npss26.118-19-22': 2719, 'sm-g965u1': 2720, 'nativo build/nrd90m': 2721, 'bla-l29 build/huaweibla-l29s': 2722, 'miracle': 2723, 'fla-lx3 build/huaweifla-lx3': 2724, 'sm-g928t': 2725, 'samsung sm-j330fn build/nrd90m': 2726, 'lg-h870': 2727, 'sm-g935t build/r16nw': 2728, 'h3223 build/50.1.a.10.40': 2729, 'oneplus a3003 build/opr1.170623.032': 2730, 'g3223 build/48.1.a.2.21': 2731, 'gini s5pro build/nrd90m': 2732, 'sm-g920r7': 2733, 'lm-g710': 2734, 'samsung sm-g935a build/r16nw': 2735, 'ev05': 2736, 'sm-n910t': 2737, 'sm-g800f': 2738, 'samsung sm-g960f build/r16nw': 2739, 'fig-lx1 build/huaweifig-lx1': 2740, 'nokia 6.1 build/opr1.170623.026': 2741, 'htc 10 build/opr1.170623.027': 2742, 'fp1u': 2743, 'sm-a530f build/r16nw': 2744, 'sm-g960u1': 2745, 'a10 pro build/lmy47i': 2746, 'lg-d852': 2747, 'samsung sm-g532f build/mmb29t': 2748, 'xt1650 build/ocls27.76-69-6': 2749, 'moto g (5) build/npps25.137-93-14': 2750, 'g8232': 2751, 'g3112': 2753, 'moto e5 build/opp27.91-72': 2754, 'redmi note 3 build/nmf26q': 2755, 'samsung sm-j600g build/r16nw': 2756, 'p00i': 2757, 'sm-j701mt': 2758, 'lg-ls997 build/opr1.170623.032': 2759, 'moto g (5) plus build/npns25.137-93-14': 2760, 'r7plusf': 2761, 'sm-g357fz': 2762, 'f3112': 2763, 'duo': 2764, 'sm-a520w build/r16nw': 2765, 'sm-p555m': 2766, 'z730': 2767, 'moto z2 play build/ops27.76-12-25': 2768, 'samsung sm-a320fl build/r16nw': 2769, 'sm-a510f': 2770, 'oneplus a5000 build/opm1.171019.011': 2771, 'redmi note 5 build/opm1.171019.011': 2772, 'dra-lx3 build/huaweidra-lx3': 2773, 'vky-l29': 2774, 'moto z2 play build/opss27.76-12-25-3': 2775, 'note8': 2776, 'xt1562': 2777, 'cam-l21': 2778, 'rv:60.1.0': 2780, 'samsung sm-g930t build/r16nw': 2781, 'ane-lx1 build/huaweiane-lx1': 2782, 'sh-01k': 2783, 'rct6973w43md': 2784, 'lg-v410/v41020e': 2785, 'vs988 build/opr1.170623.032': 2786, 'lm-x212(g': 2787, 'lg-h955': 2788, 'sm-g935f': 2789, 'life': 2790, 'moto g(6) build/ops27.82-87': 2791, 'samsung sm-j400m build/r16nw': 2792, 'bla-a09 build/huaweibla-a09': 2793, 'pixel 2 build/ppr1.180610.009': 2794, 'u2': 2795, 'n9137': 2796, 'sm-n960u build/m1ajq': 2797, 'sm-a320f': 2798, 'samsung sm-j737t build/r16nw': 2799, 'sm-j500f': 2800, 'samsung sm-j510mn build/nmf26x': 2801, 'g8141 build/47.1.a.12.270': 2802, 'infinit mx build/nrd90m': 2803, 'kempler_tv': 2804, 'breeze': 2805, 'nx16a11264': 2806, 'lg-h873 build/nrd90u': 2807, 'g3223 build/48.1.a.2.50': 2808, 'asus_z01hd': 2809, 'ta-1021 build/opr1.170623.026': 2810, 'polaroid': 2811, 'moto z2 play build/opss27.76-12-25-7': 2812, 'sm-g930w8 build/r16nw': 2813, 'vf-895n': 2814, 'lg-h870ds build/opr1.170623.032': 2815, 'lumia': 2816, 'aum-l29': 2817, 'redmi 5a build/n2g47h': 2818, 'samsung-sm-j727a': 2819, 'funwebproducts': 2820, 'zte-k88': 2821, 'moto g(6) build/ops27.82-41': 2822, 'samsung-sm-g891a': 2823, 'samsung sm-g930p build/r16nw': 2824, 'g8441': 2825, 'lg-h631mx': 2826, 'neffos': 2827, 'e5306 build/27.3.a.0.173': 2828, 'motog3 build/mpis24.65-33.1-2-10': 2829, 'sm-j337a': 2830, 'xt1635-02 build/opns27.76-12-22-9': 2831, '9024w': 2832, 'lex722': 2833, 'moto e (4) build/nma26.42-157': 2834, 'bbf100-2': 2835, 'bll-l22': 2836, 'verykool_s5516': 2837, 'moto g(6) plus build/opw27.113-89': 2838, 'bv6000s': 2839, 'samsung sm-a605gn build/r16nw': 2840, 'sm-n920f': 2841, 'samsung sm-g935f build/r16nw': 2842, 'rv:28.0': 2843, 'pixel 2 build/ppr2.180905.005': 2844, 'samsung sm-n960u build/m1ajq': 2845, 'rct6873w42m': 2847, 'g3123 build/48.1.a.2.50': 2848, 'carbon_harpia': 2849, 'sm-a730f': 2850, 'bah-w09': 2851, 'k011 build/kot49h': 2852, 'sm-j701f': 2853, 'sm-g930vc': 2854, 'u972': 2855, 'samsung sm-g900m build/mmb29m': 2856, 'lg-uk495': 2857, 'moto e (4) plus build/nma26.42-162': 2858, 'sm-g357m': 2859, 'lg-d805': 2860, 'samsung sm-j510fn build/nmf26x': 2861, 'sl729': 2862, 'sm-g900h': 2863, 'g3313 build/43.0.a.7.70': 2864, 'pixel 2 xl build/ppr2.180905.005': 2865, 'moto x4 build/opws27.57-40-22': 2866, 'samsung sm-j737p build/r16nw': 2867, 'arc': 2868, 'k014': 2869, 'sm-g960w': 2870, 'sgp512': 2871, 'asus_z00xs': 2872, '7048x': 2873, 'nx551j': 2874, 'e5603': 2875, 'lgus110': 2876, 'rv:36.0': 2877, 'samsung-sm-n915a build/ktu84p': 2878, 'a50lt': 2879, 'xt1053': 2880, 'asus_z012dc': 2881, 'sm-j810m build/r16nw': 2882, 'n1': 2883, '5099a build/o00623': 2884, 'sm-n9600 build/m1ajq': 2885, '7040n': 2886, 'moto g(6) play build/opp27.91-140': 2887, 'bbb100-1 build/opm1.171019.026': 2888, 'sm-j330fn': 2889, 'ine-lx2 build/huaweiine-lx2': 2890, 'sm-j701m': 2891, 'f5121 build/34.4.a.2.107': 2892, 'e5306 build/27.1.a.1.81': 2893, 'infinit_x_cam': 2894, 'trt-lx2': 2895, 'moto g (5) build/opp28.85-13': 2896, 'a500': 2897, 'sm-n900v': 2898, 'e5663': 2899, 'sm-g900t': 2900, 'galaxy': 2901, 'ags-w09': 2902, 'cph1835 build/o11019': 2903, 'gt-s6810m': 2904, 'hi6250 build/mra58k': 2905, 'lm-x410.f': 2906, 'pixel 2 xl build/ppr2.181005.003': 2907, 'redmi 5 plus build/opm1.171019.019': 2908, 'moto g (5) plus build/ops28.85-13': 2909, 'g3312': 2910, 'moto g(6) plus build/opws27.113-89-2': 2911, 'bgo-dl09': 2912, 'par-lx9': 2913, '5058a': 2914, 'oneplus a6003 build/pkq1.180716.001': 2915, 'sm-n920i': 2916, 'xt1562 build/mpds24.107-52-11': 2917, 'lgl82vl': 2918, 'mix': 2919, 'pixel xl build/ppr2.181005.003': 2920, 'pspcm20a0': 2921, 'a1_pro': 2922, 'moto e5 build/opp27.91-146': 2923, 'lg-g710': 2924, 'moto e5 plus build/opp27.91-122': 2925, 'alcatelonetouch': 2926, 'xt1572': 2927, 'sm-t677v': 2928, 'sm-a5100': 2929, 'moto e (4) build/ndq26.69-64-9': 2930, 'clt-l04 build/huaweiclt-l04': 2931, 'shock': 2932, 'samsung-sm-g870a build/lrx21t': 2933, 'l-03k': 2934, 'h8314': 2935, 'sm-g532mt': 2936, 'z9': 2937, 'samsung sm-j810m build/r16nw': 2938, 'samsung-sm-g530a': 2939, 'rct6103w46': 2941, 'frd-l19': 2942, 'nokia 2 build/nmf26f': 2943, 'sm-j727t': 2944, 'sm-j250f': 2945, 'pixel 2 build/ppr2.181005.003': 2946, 'sm-g900f': 2947, 'gt-i9195 build/kot49h': 2948, 'ane-lx3': 2949, 'g8231': 2950, 'ilium m3 build/nrd90m': 2951, 's60 lite build/nrd90m': 2952, 'mix build/opr1.170623.032': 2953, 'lg-m700': 2954, 'sm-t350': 2955, 'nx591j': 2956, 'lg-v530': 2957, 'pra-lx3': 2958, 'moto x play': 2959, 'redmi note 4': 2960, 'sm-j700m': 2961, 'trt-l53': 2962, 'lgmp260': 2963, 'samsung sm-t377w build/nmf26x': 2964, 'sm-g950f': 2965, 'moto g(6) play': 2966, 'moto z3 play': 2967, 'sm-j600fn': 2968, 'rv:63.0': 2969, 'sm-j730gm': 2970, 'moto g (4)': 2971, 'huawei vns-l23': 2972, 'c6506 build/10.7.a.0.222': 2973, 'blade v8 se': 2974, 'fig-lx3': 2975, 'rv:21.0': 2976, 'moto z2 play': 2977, 's9+': 2978, '5056a': 2979, 'g24027k': 2980, 'sm-n9600': 2981, 'm4_b2': 2982, 'sm-g610m': 2983, 'bla-a09': 2984, 'sm-a510m': 2985, 'blade v6 plus': 2986, 'rne-l03': 2987, 'h8216': 2988, 'ultra': 2989, 'ldn-lx3': 2990, 'htc one': 2991, 'samsung sm-a530f build/r16nw': 2992, 'moto g(6) plus': 2993, 'sm-g930f': 2994, '5026a': 2995, 'moto e (4)': 2996, 'r505': 2997, 'moto g (5)': 2998, 'lm-x210': 2999, 'sm-a605gn': 3000, 'uniq': 3001, 'c6506': 3002, 'atu-lx3': 3003, 'huawei rio-l03': 3004, 'bac-l03': 3005, 'sm-n960u': 3006, 'lg-h840': 3007, 'cam-l03': 3008, 'sla-l03': 3009, 'blade v8q': 3010, 'lg-m400': 3011, '5052y': 3012, 'sm-g570m build/r16nw': 3013, 'sm-a720f': 3014, 'qmv7b': 3015, 'huawei tag-l13': 3016, 'sm-g930r4 build/r16nw': 3017, '5080a': 3018, 'sm-t113nu': 3019, 'redmi 6a': 3020, 'sm-a600fn': 3021, 'was-lx3': 3022, 'eva-l09': 3023, 'moto g(6)': 3024, 'moto g (5) plus': 3025, 'asus_a007': 3026, 'p008': 3027, 'nokia_x': 3028, 'm4 ss4453': 3029, 'sm-j400m': 3030, 'blade a602': 3031, 'd5303': 3032, 'bll-l23': 3033, 'sgh-t999l': 3034, 'moto e (4) plus': 3035, 'moto g play': 3036, 'sm-g570m': 3037, 'ags-l03': 3038, 'alumini_3_plus': 3039, 'sm-g965f': 3040, 'fig-lx1': 3041, 'sm-j120h': 3042, '4047a': 3043, 'motorola': 3044, 'sm-j320f': 3045, 'hisense f23': 3046, 'huawei vns-l53': 3047, 'htc desire 10 lifestyle': 3048, 'mi a1': 3049, 'sm-j727v': 3050, 'sm-t560': 3051, 'lg-x180g': 3052, 'mya-l13': 3053, 'moto e5': 3054, 'pixel 2': 3055, 'sc-03k': 3056, 'mha-l09': 3057, 'sm-g960u': 3058, 'sm-j530gm': 3059, 'hisense f24': 3060, 'asus_x00hd': 3061, 'infinit_1': 3062, 'sm-a530f': 3063, 'sm-g925v': 3064, 'sm-j600g': 3065, 'sm-j730g': 3066, 'dra-lx3': 3067, 'lg-k530': 3068, 'fedora': 3069, 'vky-l09': 3070, 'lg-k410': 3071, 'f3111': 3072, 'lenovo a2016b30': 3073, 'pixel xl': 3074, 'lg-k200': 3075, 'sm-n950u': 3076, 'blade v6': 3077, 'clt-l04': 3078, 'redmi note 5a': 3079, 'lg-k120': 3080, 'moto c': 3081, 'sm-g800m': 3082, 'rv:64.0': 3083, 'sm-a320fl': 3084, 'gxq6580_weg_l': 3085, 'nexus 5x build/opm7.181005.003': 3086, 'ale-l23': 3087, '5080x': 3088, 'lg-k520': 3089, 'sm-g950u': 3090, 'sm-t813': 3091, 'lg-k580': 3092, 'asus_z01md': 3093, 'mya-l03': 3094, 'shv39': 3095, 'm4 ss4456': 3096, 'samsung sm-n910f/n910fxxu1dql2 build/mmb29m': 3097, 'vs987': 3098, 'sm-g955u': 3099, '5011a': 3100, 'pixel 2 xl': 3101, 'huawei vns-l31': 3102, 'sm-g960n': 3103, 'htc desire 626s': 3104, 'redmi note 5': 3105, 'lg-ls993': 3106, 'sm-g532m': 3107, 'sm-p580': 3108, 'sm-a530w': 3109, 'lg-h873': 3110, 'sm-j500m': 3111, 'sm-j727u': 3112, 'bln-l24': 3113, 'blade v580': 3114, 'sm-a520f': 3115, 'sm-g965u': 3116, 'rne-l23': 3117, 'sm-g960f': 3118, 'gt-i9505': 3119, 'lg-m700 build/opm1.171019.026': 3120, 'ale-l21': 3121, 'sm-g9600': 3122, 'b1-850': 3123, 'redmi 4x': 3124, 'fla-lx3': 3125, 'oneplus a3000': 3126, 'hisense f8 mini': 3127, 'sm-g950u1': 3128, 'rne-l21': 3129, 'sm-j810m': 3130, 'alcatel_5044r': 3131, 'huawei cun-l03': 3132, 'eml-l09': 3133, 'moto g(6) play build/opp27.91-143': 3134, '4047g': 3135, 'sm-g531h': 3136, 'gt-i9060m': 3137, 'sm-j250m': 3138, 'ilium x520': 3139, 'was-lx1a': 3140, 'sm-n920g': 3141, 'sm-g900m': 3142, 'mi max 3': 3143, 'aw790': 3144, 'redmi note 3': 3145, 'sm-n900': 3146, 'sm-g850f': 3147, 'bnd-l21': 3148, 'kfdowi': 3149, 'cmr-w09': 3150, 'sm-t280': 3151, 'kfauwi': 3152, 'lg-x240': 3153, 'ilium l1120': 3154, 'neffos x1 max': 3155, 'ilium x710': 3156, 'moto x4': 3157, 'hisense u963': 3158, 'pra-lx1': 3159, 'nem-l51': 3160, 'sm-g930w8': 3161, 'gt-i9300i': 3162, 'lld-l31': 3163, 'cmr-w19': 3164, 'h3223': 3165, 'sm-j530f': 3166, 'lm-q610.fg': 3167, 'dra-l21': 3168, 'lg-k220': 3169, 'lg-k500': 3170, 'htc one a9s': 3171, 'nexus 5x': 3172, 'pixel 3': 3173, 'cph1725': 3174, 'kfgiwi': 3175, 'sm-j727t1': 3176, 'htc desire 530': 3177, 'lg-tp450': 3178, 'sm-n920c': 3179, 'oneplus a5000': 3180, 'moto c plus': 3181, 'sm-g930v': 3182, 'samsung sm-p900 build/lrx22g': 3183, 'oneplus a6003': 3184, 'p5047a': 3185, 'sm-a310f': 3186, 'sm-g955u1': 3187, 'lg-h831': 3188, 'oneplus a3003': 3189, 'lg-m320': 3190, 'motorola one': 3191, 'lml413dl': 3192, 'sm-j327t1': 3193, 'sm-n950f': 3194, 'eml-l29': 3195, 'oneplus a5010': 3196, 'ane-lx1': 3197, 'sm-j510mn': 3198, 'sm-s727vl': 3199, 'sm-n910v': 3200, 'sm-j320m': 3201, 'oneplus a3010': 3202, 'sm-g955n': 3203, 'nexus 5': 3204, '9003a': 3205, 'sm-t560nu': 3206, 'ane-lx2': 3207, 'kfsuwi': 3208, 'vtr-l09': 3209, 'sm-t713': 3210, 'redmi 5 plus': 3211, 'sm-g920f': 3212, 'lg-m250': 3213, 'blade l7': 3214, 'tr10rs1': 3215, 'sm-j700t': 3216, 'sm-g900f build/njh47f': 3217, 'sm-t385m': 3218, '6045o': 3219, 'hisense t963': 3220, 'redmi 6': 3221, 'm4_b3': 3222, 'clt-l29': 3223, 'zte blade v8 mini': 3224, 'lenovo p2a42': 3225, 'sm-n960f': 3226, 'alp-l09': 3227, 'atu-l11': 3228, 'asus_a009': 3229, 'mi a2': 3230, 'lg-h650': 3231, 'm5': 3232, 'sm-j200g build/lmy47x': 3233, '5012g': 3234, 'htc desire 650': 3235, '5059a': 3236, 'rne-l22': 3237, 'alcatel_6060c': 3238, 'hisense hi 3 build/nrd90m': 3239, 'kfaswi': 3240, 'sm-t700': 3241, 'sm-g550t1': 3242, 'g3121': 3243, 'sm-j610g': 3244, 'lenovo k33b36': 3245, 'frd-l04': 3246, 'sm-n910c': 3247, 'moto e5 plus': 3248, 'zte blade a6': 3249, 'blade l2 plus': 3250, 'lg-x210': 3251, 'sm-g925f': 3253, 'dli-l22': 3254, 'lenovoa3300-gv': 3255, 'sm-n900w8': 3256, 'lgl84vl': 3257, '5033a': 3258, 'lg-k240': 3259, 'lg-h634': 3260, 'lg-us998': 3261, 'moto e5 play': 3262, 'z982': 3263, 'sm-t710': 3264, 'iphone 6plus': 3265, 'lg-x150': 3266, 'm4 ss4458': 3267, 'lg-h870ds': 3268, 'd6653': 3269, 'ta-1039': 3270, 'ta-1033': 3271, 'lm-g710vm': 3272, 'y635-l03': 3273, 'z981': 3274, 'moto e5 play build/opg28.54-19': 3275, 'samsung-sm-j320a': 3276, 'cro-l03': 3277, 'sne-lx3': 3278, 'sm-g900t build/lmy47x': 3279, 'eva-l19': 3280, 'vs990': 3281, 'lgms330': 3282, 'ta-1028': 3283, 'blade a520': 3284, 'lgls775': 3285, 'sm-n7505': 3286, 'shift4.2': 3287, 'e6883': 3288, '5049w': 3289, 'alpha 950': 3290, 'sm-j400f': 3291, 'ilium x510': 3292, 'g3412': 3293, 'lg-m430': 3294, 'max10 build/lmy47i': 3295, '5054s': 3296, 'chc-u03': 3297, 'lenovo pb2-650y': 3298, '5015a': 3299, 'hi6210sft': 3300, '5025g': 3301, 'sm-g530h': 3302, 's60': 3303, 'htc 10': 3304, 'lg-h700': 3305, 'lm-q610(fgn': 3306, 'sm-j700h': 3307, 'lg-x230': 3308, 'sm-j320fn': 3309, 'samsung sm-t580 build/m1ajq': 3310, 'blade a6 max build/n2g47h': 3311, 'sm-t530nu': 3312, 'ldn-l21': 3313, 'lg-d280': 3314, 'sm-a500m': 3315, 'sm-j337t': 3316, 'm4 ss4457': 3317, 'sm-a300fu': 3318, 'blade v9': 3319, 'lg-h918': 3320, 'hisense f20': 3321, 'sm-j111m': 3322, 'huawei g7-l03': 3323, 'ridge': 3324, 'hs20502-16': 3325, 'lenovo pb2-670y': 3326, 'lg-x165g': 3327, 'kffowi': 3328, 'asus_x00lda': 3329, 'cmr-al09': 3330, 'bluboo': 3331, 'lg-d852g': 3332, '2ps64': 3333, 'lm-q710.fg': 3334, 'samsung-sm-j327a': 3335, 'xt1585': 3336, 'mi a2 lite': 3337, 'lg-m151': 3338, '5010g': 3339, 'sm-j327v': 3340, 'sm-j710mn': 3341, 'vs988': 3342, 'lgls676': 3343, 'z988': 3344, 'lgl158vl': 3345, 'moto e5 cruise': 3346, 'sm-g892u': 3347, 'lg-h500': 3348, 'rv:27.0': 3349, 'lm-v350': 3350, 'sm-j200m': 3351, 'samsung-sm-g920a': 3352, '8_plus': 3353, 'kfarwi': 3354, '5044t': 3355, 'lld-l21': 3356, 'sm-s337tl': 3357, 'sm-j727r4': 3358, 'onix': 3359, 'sm-t900': 3360, 'redmi s2': 3361, 'a502dl': 3362, 'lg-m153': 3363, '4009a': 3364, 'vs995': 3365, 'lg-k425': 3366, 'moto g(6) build/opss27.82-87-3': 3367, 'bln-l21': 3368, 'h3123': 3369, 'p5525a': 3370, '5041c': 3371, 'sm-g9500': 3372, 'gt-i9060i': 3373, 'gt-n8013': 3374, 'lg-h910': 3375, 'gravity': 3376, 'sm-t357w': 3377, 'lg-x220': 3378, 'shift': 3379, 'bla-l29': 3380, 'moto g (5) plus build/opss28.85-13-3': 3381, '5099a': 3382, 'pocophone': 3383, 'sm-c5010': 3384, 'lya-l09': 3385, '8050g': 3386, 'rex build/mra58k': 3387, 'lg-ls777': 3388, 'sm-g930u': 3389, 'pixel 3 xl': 3390, 'p5526a': 3391, 'lg-d120 build/kot49i.v10a': 3392, 'frd-l09': 3393, 'hisense l675': 3394, 'sm-t110': 3395, 'kfapwi': 3396, 'gt-i8190l': 3397, 'moto e (4) plus build/nma26.42-167': 3398, 'sm-t378v': 3399, 'a10': 3400, 'sm-g925t': 3401, 'sm-j710f': 3402, 'n9517': 3403, 'ax921': 3404, 'mxg401': 3405, '6016a': 3406, 'lg-tp260': 3407, 'sm-g355m': 3408, 'alp-l29': 3409, 'lgms550': 3410, 'lg-d415': 3411, 'e6782': 3412, 'k81': 3413, 'sm-n900p': 3414, 'lg-h542': 3415, 'sm-j327u': 3416, 'lg-h811': 3417, '2014819': 3418, 'ghia_axis7': 3419, 'm4 ss4458-r': 3420, 'samsung-sm-g928a': 3421, 'z916bl': 3422, 'lg-ls997': 3423, 'lgmp450': 3424, 'sm-g950n': 3425, 'c5': 3426, 'lgms210': 3427, 'rocket': 3428, 'sm-j720f': 3429, 'sm-g930r4': 3430, 'sch-i545': 3431, 'samsung sm-j610g build/m1ajq': 3432, 'lg-m710': 3433, 'sm-n960u1': 3434, 'sgp311': 3435, 'vk410': 3436, 'hi6250': 3437, 'vs986': 3438, 'sm-j327p': 3439, 'nexus 6p': 3441, 'kfthwi': 3442, 'sm-j337v': 3443, 'htc one m9plus': 3444, 'kftbwi': 3445, 'sm-j737t': 3446, 'sm-t337t': 3447, 'sm-n960w': 3448, 'im6': 3449, 'sm-j737v': 3450, 'kfkawi': 3451, 'huawei y560-l03': 3452, 'sm-j415g': 3453, 'asus_a006': 3454, 'vie-l29': 3455, 'sm-g610m build/m1ajq': 3456, 'moto e5 play build/opgs28.54-19-2': 3457, 'lg-d690': 3458, 'moto e5 play build/ocps27.91-51-3-3': 3459, 'lg-d631': 3460, 'redmi note 6 pro': 3461, 'kyocera-c6742': 3462, 'gt-i9060l': 3463, 'moto z3': 3464, 'vs501': 3465, 'g3311': 3466, 'htl23': 3467, 'q01a': 3468, 'samsung sm-t820 build/r16nw': 3469, 'sm-g930vl': 3470, 'sm-a750g': 3471, '6062w': 3472, 'redmi 4a': 3473, 'p10': 3474, 'infinit_lite_2': 3475, 'sm-t827v': 3476, 'sm-j737p': 3477, 'moto e (4) build/nma26.42-167': 3478, 'a622gl': 3479, 'sm-j327vpp': 3480, 'sm-a600t1': 3481, 'e2306': 3482, 'lm-q710.fgn': 3483, 'jkm-lx3': 3484, 'sm-a700k': 3485, 'slate': 3486, 'sm-j327t': 3487, 'moto g (5) build/opps28.85-13-2': 3488, 'hma-l09': 3489, 'ale-l02': 3490, 'rv:65.0': 3491, 'rct6513w87': 3492, 'cph1835': 3493, '5085b': 3494, 'lgl164vl': 3495, 'asus_x00pd': 3496, 'rct6303w87m7': 3497, 'ghia_axis7p': 3498, 'samsung-sm-t337a': 3499, 'mha-l29': 3500, 'sm-g532g': 3501, 'samsung-sm-g530az': 3502, 'mb520': 3503, 'lg-h990': 3504, 'lg-m327': 3505, 'lifesize': 3506, 'pspc550': 3507, '8080': 3508, 'oneplus a6013': 3509, '7048a': 3510, 'sm-n950u1': 3511, 'tab8': 3512, 'sm-n9200': 3513, 'c2105': 3514, 'sm-j320v': 3515, 'kftt': 3516, 'samsung sm-g610m build/m1ajq': 3517, 'verykools5702': 3518, 'sm-j105b': 3519, 'max10': 3520, 'sm-g600s': 3521, 'cpu': 3522, 'lm-v405': 3523, '4027a': 3524, 'sm-g360p': 3525, 'n9560': 3526, 'z956': 3527, 'c6902': 3528, 'z818l': 3529, 'sm-p350': 3530, 'a462c': 3531, 'sm-j600gt': 3532, 'sm-t837v': 3533, 'e6910': 3534, 'z851m': 3535, 'sm-a920f': 3536, 'asus_a002a': 3537, 'pic-lx9': 3538, 'bla-al00': 3539, 'neffos_c9a': 3540, 'lenovo tb-7104f build/o11019': 3541, 'sgp621': 3542, 'sm-g903f': 3543, '5086a': 3544, 'sm-t387v': 3545, 'sm-g611f': 3546, 'lg-d325': 3547, 'sm-t597v': 3548, 'neffos_c7': 3549, 'sm-g925p': 3550, 'lg-m210': 3551, 'zte-z999': 3552, 'p002': 3553, 'lm-x410pm': 3554, 'che2-l23': 3555, 'q05a': 3556, 'azumi_doshi_a55_ql': 3557, 'lg-k430': 3558, 'hk-914581': 3559, 'sm-t817v': 3560, 'verykools5036': 3561, 'rct6b03w13': 3562, 'sm-j810y': 3563, 'z899vl': 3564, 'sm-s367vl': 3565, 'sm-p905v': 3566, 'infinity': 3567, 'sm-g935r4': 3568, '6043a': 3569, 'atu-l21': 3570, 'd2303': 3571, 'lgl157bl': 3572, 'phone': 3573, 'lg-q710al': 3574, 'n5702l': 3575, 'so-02h': 3576, 'verykools5027': 3577, 'bah-l09': 3578, 'sm-j120fn': 3579, 'asus_x008dc': 3580, 'ags-l09': 3581, 'techpad_10y': 3582, 'dig-l03': 3583, 'sm-j530l': 3584, 'asus_z017dc': 3585, 'motoe2': 3586, 'f3116': 3587, 'lava_r1': 3588, 'sm-j260m': 3589, '8082': 3590, 'lg-k540': 3591, 'zebra': 3592, 'armor': 3593, 'sm-t320': 3594, 'lm-q850': 3595, 'pspcl20a0': 3596, 'aura': 3597, 'azumi_iro_a5_q': 3598, 'kyocera-c6742a': 3599, 'alcatel_5098o': 3600, 'centurylink': 3601, 'spectrum': 3602, 'us': 3603, 'mx': 3604, 'yahoo': 3605, 'de': 3606, 'jp': 3607, 'uk': 3608, 'fr': 3609, 'com': 3610, 'es': 3611, 'apple': 3612}\n",
"_____no_output_____"
],
[
"for c1, c2 in train_df.dtypes.reset_index().values:\n if c2=='O':\n train_df[c1] = train_df[c1].map(lambda x: labels[str(x).lower()])\n test_df[c1] = test_df[c1].map(lambda x: labels[str(x).lower()])",
"_____no_output_____"
],
[
"def nan2mean(df):\n for x in list(df.columns.values):\n #print(\"___________________\"+x)\n #print(df[x].isna().sum())\n df[x] = df[x].fillna(df[x].mean())\n #print(\"Mean-\"+str(df[x].mean()))\n return df",
"_____no_output_____"
],
[
"train_df=nan2mean(train_df)",
"_____no_output_____"
],
[
"test_df=nan2mean(test_df)",
"_____no_output_____"
]
],
[
[
"## ENCODING",
"_____no_output_____"
]
],
[
[
"# Label Encoding\nfor f in train_df.columns:\n if train_df[f].dtype=='object': \n lbl = preprocessing.LabelEncoder()\n lbl.fit(list(train_df[f].values) + list(test_df[f].values))\n train_df[f] = lbl.transform(list(train_df[f].values))\n test_df[f] = lbl.transform(list(test_df[f].values)) \ntrain_df = train_df.reset_index()\ntest_df = test_df.reset_index()",
"_____no_output_____"
],
[
"features = list(train_df)\nfeatures.remove('isFraud')\ntarget = 'isFraud'",
"_____no_output_____"
]
],
[
[
"# <a id='2'>2. Bayesian Optimisation</a> ",
"_____no_output_____"
]
],
[
[
"#cut tr and val\nbayesian_tr_idx, bayesian_val_idx = train_test_split(train_df, test_size = 0.3, random_state = 42, stratify = train_df[target])\nbayesian_tr_idx = bayesian_tr_idx.index\nbayesian_val_idx = bayesian_val_idx.index",
"_____no_output_____"
],
[
"#black box LGBM \ndef LGB_bayesian(\n #learning_rate,\n num_leaves, \n bagging_fraction,\n feature_fraction,\n min_child_weight, \n min_data_in_leaf,\n max_depth,\n reg_alpha,\n reg_lambda\n ):\n \n # LightGBM expects next three parameters need to be integer. \n num_leaves = int(num_leaves)\n min_data_in_leaf = int(min_data_in_leaf)\n max_depth = int(max_depth)\n\n assert type(num_leaves) == int\n assert type(min_data_in_leaf) == int\n assert type(max_depth) == int\n \n\n param = {\n 'num_leaves': num_leaves, \n 'min_data_in_leaf': min_data_in_leaf,\n 'min_child_weight': min_child_weight,\n 'bagging_fraction' : bagging_fraction,\n 'feature_fraction' : feature_fraction,\n #'learning_rate' : learning_rate,\n 'max_depth': max_depth,\n 'reg_alpha': reg_alpha,\n 'reg_lambda': reg_lambda,\n 'objective': 'binary',\n 'save_binary': True,\n 'seed': 1337,\n 'feature_fraction_seed': 1337,\n 'bagging_seed': 1337,\n 'drop_seed': 1337,\n 'data_random_seed': 1337,\n 'boosting_type': 'gbdt',\n 'verbose': 1,\n 'is_unbalance': False,\n 'boost_from_average': True,\n 'metric':'auc'} \n \n oof = np.zeros(len(train_df))\n trn_data= lgb.Dataset(train_df.iloc[bayesian_tr_idx][features].values, label=train_df.iloc[bayesian_tr_idx][target].values)\n val_data= lgb.Dataset(train_df.iloc[bayesian_val_idx][features].values, label=train_df.iloc[bayesian_val_idx][target].values)\n\n clf = lgb.train(param, trn_data, num_boost_round=50, valid_sets = [trn_data, val_data], verbose_eval=0, early_stopping_rounds = 50)\n \n oof[bayesian_val_idx] = clf.predict(train_df.iloc[bayesian_val_idx][features].values, num_iteration=clf.best_iteration) \n \n score = roc_auc_score(train_df.iloc[bayesian_val_idx][target].values, oof[bayesian_val_idx])\n\n return score",
"_____no_output_____"
],
[
"# Bounded region of parameter space\nbounds_LGB = {\n 'num_leaves': (31, 500), \n 'min_data_in_leaf': (20, 200),\n 'bagging_fraction' : (0.1, 0.9),\n 'feature_fraction' : (0.1, 0.9),\n #'learning_rate': (0.01, 0.3),\n 'min_child_weight': (0.00001, 0.01), \n 'reg_alpha': (1, 2), \n 'reg_lambda': (1, 2),\n 'max_depth':(-1,50),\n}",
"_____no_output_____"
],
[
"LGB_BO = BayesianOptimization(LGB_bayesian, bounds_LGB, random_state=42)",
"_____no_output_____"
],
[
"print(LGB_BO.space.keys)",
"_____no_output_____"
],
[
"init_points = 10\nn_iter = 15",
"_____no_output_____"
],
[
"print('-' * 130)\n\nwith warnings.catch_warnings():\n warnings.filterwarnings('ignore')\n LGB_BO.maximize(init_points=init_points, n_iter=n_iter, acq='ucb', xi=0.0, alpha=1e-6)",
"_____no_output_____"
],
[
"LGB_BO.max['target']",
"_____no_output_____"
],
[
"LGB_BO.max['params']",
"_____no_output_____"
]
],
[
[
"## CONFUSION MATRIX",
"_____no_output_____"
]
],
[
[
"# Confusion matrix \ndef plot_confusion_matrix(cm, classes,\n normalize = False,\n title = 'Confusion matrix\"',\n cmap = plt.cm.Blues) :\n plt.imshow(cm, interpolation = 'nearest', cmap = cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation = 0)\n plt.yticks(tick_marks, classes)\n\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])) :\n plt.text(j, i, cm[i, j],\n horizontalalignment = 'center',\n color = 'white' if cm[i, j] > thresh else 'black')\n \n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')",
"_____no_output_____"
]
],
[
[
"# <a id='3'>3. LGB + best hyperparameters</a> ",
"_____no_output_____"
]
],
[
[
"param_lgb = {\n 'min_data_in_leaf': int(LGB_BO.max['params']['min_data_in_leaf']), \n 'num_leaves': int(LGB_BO.max['params']['num_leaves']), \n #'learning_rate': LGB_BO.max['params']['learning_rate'],\n 'min_child_weight': LGB_BO.max['params']['min_child_weight'],\n 'bagging_fraction': LGB_BO.max['params']['bagging_fraction'], \n 'feature_fraction': LGB_BO.max['params']['feature_fraction'],\n 'reg_lambda': LGB_BO.max['params']['reg_lambda'],\n 'reg_alpha': LGB_BO.max['params']['reg_alpha'],\n 'max_depth': int(LGB_BO.max['params']['max_depth']), \n 'objective': 'binary',\n 'save_binary': True,\n 'seed': 1337,\n 'feature_fraction_seed': 1337,\n 'bagging_seed': 1337,\n 'drop_seed': 1337,\n 'data_random_seed': 1337,\n 'boosting_type': 'gbdt',\n 'verbose': 1,\n 'is_unbalance': False,\n 'boost_from_average': True,\n 'metric':'auc'\n }",
"_____no_output_____"
],
[
"plt.rcParams[\"axes.grid\"] = True\n\nnfold = 5\nskf = StratifiedKFold(n_splits=nfold, shuffle=True, random_state=42)\n\noof = np.zeros(len(train_df))\nmean_fpr = np.linspace(0,1,100)\ncms= []\ntprs = []\naucs = []\ny_real = []\ny_proba = []\nrecalls = []\nroc_aucs = []\nf1_scores = []\naccuracies = []\nprecisions = []\npredictions = np.zeros(len(test_df))\nfeature_importance_df = pd.DataFrame()\n\ni = 1\nfor train_idx, valid_idx in skf.split(train_df, train_df.isFraud.values):\n print(\"\\nfold {}\".format(i))\n trn_data = lgb.Dataset(train_df.iloc[train_idx][features].values,\n label=train_df.iloc[train_idx][target].values\n )\n val_data = lgb.Dataset(train_df.iloc[valid_idx][features].values,\n label=train_df.iloc[valid_idx][target].values\n ) \n \n clf = lgb.train(param_lgb, trn_data, num_boost_round = 500, valid_sets = [trn_data, val_data], verbose_eval = 100, early_stopping_rounds = 100)\n oof[valid_idx] = clf.predict(train_df.iloc[valid_idx][features].values) \n \n predictions += clf.predict(test_df[features]) / nfold\n \n # Scores \n roc_aucs.append(roc_auc_score(train_df.iloc[valid_idx][target].values, oof[valid_idx]))\n accuracies.append(accuracy_score(train_df.iloc[valid_idx][target].values, oof[valid_idx].round()))\n recalls.append(recall_score(train_df.iloc[valid_idx][target].values, oof[valid_idx].round()))\n precisions.append(precision_score(train_df.iloc[valid_idx][target].values ,oof[valid_idx].round()))\n f1_scores.append(f1_score(train_df.iloc[valid_idx][target].values, oof[valid_idx].round()))\n \n # Roc curve by folds\n f = plt.figure(1)\n fpr, tpr, t = roc_curve(train_df.iloc[valid_idx][target].values, oof[valid_idx])\n tprs.append(interp(mean_fpr, fpr, tpr))\n roc_auc = auc(fpr, tpr)\n aucs.append(roc_auc)\n plt.plot(fpr, tpr, lw=2, alpha=0.3, label='ROC fold %d (AUC = %0.4f)' % (i,roc_auc))\n \n # Precion recall by folds\n g = plt.figure(2)\n precision, recall, _ = precision_recall_curve(train_df.iloc[valid_idx][target].values, oof[valid_idx])\n y_real.append(train_df.iloc[valid_idx][target].values)\n y_proba.append(oof[valid_idx])\n plt.plot(recall, precision, lw=2, alpha=0.3, label='P|R fold %d' % (i)) \n \n i= i+1\n \n # Confusion matrix by folds\n cms.append(confusion_matrix(train_df.iloc[valid_idx][target].values, oof[valid_idx].round()))\n \n # Features imp\n fold_importance_df = pd.DataFrame()\n fold_importance_df[\"Feature\"] = features\n fold_importance_df[\"importance\"] = clf.feature_importance()\n fold_importance_df[\"fold\"] = nfold + 1\n feature_importance_df = pd.concat([feature_importance_df, fold_importance_df], axis=0)\n\n# Metrics\nprint(\n '\\nCV roc score : {0:.4f}, std: {1:.4f}.'.format(np.mean(roc_aucs), np.std(roc_aucs)),\n '\\nCV accuracy score : {0:.4f}, std: {1:.4f}.'.format(np.mean(accuracies), np.std(accuracies)),\n '\\nCV recall score : {0:.4f}, std: {1:.4f}.'.format(np.mean(recalls), np.std(recalls)),\n '\\nCV precision score : {0:.4f}, std: {1:.4f}.'.format(np.mean(precisions), np.std(precisions)),\n '\\nCV f1 score : {0:.4f}, std: {1:.4f}.'.format(np.mean(f1_scores), np.std(f1_scores))\n)\n\n#ROC \nf = plt.figure(1)\nplt.plot([0,1],[0,1],linestyle = '--',lw = 2,color = 'grey')\nmean_tpr = np.mean(tprs, axis=0)\nmean_auc = auc(mean_fpr, mean_tpr)\nplt.plot(mean_fpr, mean_tpr, color='blue',\n label=r'Mean ROC (AUC = %0.4f)' % (np.mean(roc_aucs)),lw=2, alpha=1)\n\nplt.xlabel('False Positive Rate')\nplt.ylabel('True Positive Rate')\nplt.title('LGB ROC curve by folds')\nplt.legend(loc=\"lower right\")\n\n# PR plt\ng = plt.figure(2)\nplt.plot([0,1],[1,0],linestyle = '--',lw = 2,color = 'grey')\ny_real = np.concatenate(y_real)\ny_proba = np.concatenate(y_proba)\nprecision, recall, _ = precision_recall_curve(y_real, y_proba)\nplt.plot(recall, precision, color='blue',\n label=r'Mean P|R')\nplt.xlabel('Recall')\nplt.ylabel('Precision')\nplt.title('P|R curve by folds')\nplt.legend(loc=\"lower left\")\n\n# Confusion maxtrix & metrics\nplt.rcParams[\"axes.grid\"] = False\ncm = np.average(cms, axis=0)\nclass_names = [0,1]\nplt.figure()\nplot_confusion_matrix(cm, \n classes=class_names, \n title= 'LGB Confusion matrix [averaged/folds]')\nplt.show()",
"_____no_output_____"
]
],
[
[
"# <a id='4'>4. Features importance</a> ",
"_____no_output_____"
]
],
[
[
"plt.style.use('dark_background')\ncols = (feature_importance_df[[\"Feature\", \"importance\"]]\n .groupby(\"Feature\")\n .mean()\n .sort_values(by=\"importance\", ascending=False)[:30].index)\nbest_features = feature_importance_df.loc[feature_importance_df.Feature.isin(cols)]\n\nplt.figure(figsize=(10,10))\nsns.barplot(x=\"importance\", y=\"Feature\", data=best_features.sort_values(by=\"importance\",ascending=False),\n edgecolor=('white'), linewidth=2, palette=\"rocket\")\nplt.title('LGB Features importance (averaged/folds)', fontsize=18)\nplt.tight_layout()",
"_____no_output_____"
]
],
[
[
"# <a id='5'>5. Submission</a> ",
"_____no_output_____"
]
],
[
[
"sample_submission['isFraud'] = predictions\nsample_submission.to_csv('submission_IEEE.csv')",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
ec905800f71cdfe60e8a8d2120a3f922fd598125 | 174,573 | ipynb | Jupyter Notebook | Data Visualization/Matplotlib/2. Bar Charts.ipynb | shreejitverma/Data-Scientist | 03c06936e957f93182bb18362b01383e5775ffb1 | [
"MIT"
] | 2 | 2022-03-12T04:53:03.000Z | 2022-03-27T12:39:21.000Z | Data Visualization/Matplotlib/2. Bar Charts.ipynb | shreejitverma/Data-Scientist | 03c06936e957f93182bb18362b01383e5775ffb1 | [
"MIT"
] | null | null | null | Data Visualization/Matplotlib/2. Bar Charts.ipynb | shreejitverma/Data-Scientist | 03c06936e957f93182bb18362b01383e5775ffb1 | [
"MIT"
] | 2 | 2022-03-12T04:52:21.000Z | 2022-03-27T12:45:32.000Z | 284.784666 | 21,202 | 0.924501 | [
[
[
"import numpy as np\nimport pandas as pd\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\n%matplotlib inline ",
"_____no_output_____"
],
[
"#Graph Styling\n# https://tonysyu.github.io/raw_content/matplotlib-style-gallery/gallery.html\nplt.style.use('seaborn-darkgrid')",
"_____no_output_____"
]
],
[
[
"# Bar Graphs",
"_____no_output_____"
]
],
[
[
"#Simple Bar Chart\nid1 = np.arange(1,10)\nscore = np.arange(20,110,10)\nplt.bar(id1,score)\nplt.xlabel('Student ID')\nplt.ylabel('Score')\nplt.show()",
"_____no_output_____"
],
[
"# Changing color of the bar chart\nid1 = np.arange(1,10)\nscore = np.arange(20,110,10)\nplt.figure(figsize=(8,5)) # Setting the figure size\nax = plt.axes()\nax.set_facecolor(\"#ECF0F1\") # Setting the background color by specifying the HEX Code\nplt.bar(id1,score,color = '#FFA726')\nplt.xlabel(r'$Student $ $ ID$')\nplt.ylabel(r'$Score$')\nplt.show()",
"_____no_output_____"
],
[
"#Plotting multiple sets of data\nx1= [1,3,5,7] \nx2=[2,4,6,8]\ny1 = [7,7,7,7]\ny2= [17,18,29,40]\nplt.figure(figsize=(8,6))\nax = plt.axes()\nax.set_facecolor(\"white\")\nplt.bar(x1,y1,label = \"First\",color = '#42B300') # First set of data\nplt.bar(x2,y2,label = \"Second\",color = '#94E413') # Second set of data\nplt.xlabel('$X$')\nplt.ylabel('$Y$')\nplt.title ('$Bar $ $ Chart$')\nplt.legend()\nplt.show()",
"_____no_output_____"
],
[
"# Horizontal Bar Chart\nAge = [28,33,43,45,57]\nName = [\"Asif\", \"Steve\", 'John', \"Ravi\", \"Basit\"]\nplt.barh(Name,Age, color =\"yellowgreen\")\nplt.show()",
"_____no_output_____"
],
[
"# Changing the width of Bars\nnum1 = np.array([1,3,5,7,9])\nnum2 = np.array([2,4,6,8,10])\nplt.figure(figsize=(8,4))\nplt.bar(num1, num1**2, width=0.2 , color = '#FF6F00')\nplt.bar(num2, num2**2, width=0.2 , color = '#FFB300')\nplt.plot()",
"_____no_output_____"
],
[
"# Displaying values at the top of vertical bars\nnum1 = np.array([1,3,5,7,9])\nnum2 = np.array([2,4,6,8,10])\nplt.figure(figsize=(10,6))\nplt.bar(num1, num1**2, width=0.3 , color = '#FF6F00')\nplt.bar(num2, num2**2, width=0.3 , color = '#FFB300')\nfor x,y in zip(num1,num1**2):\n plt.text(x, y+0.05, '%d' % y, ha='center' , va= 'bottom')\nfor x,y in zip(num2,num2**2):\n plt.text(x, y+0.05, '%d' % y, ha='center' , va= 'bottom')\nplt.plot()",
"_____no_output_____"
],
[
"x = np.arange(1,21)\nplt.figure(figsize=(16,8))\ny1 = np.random.uniform(0.1,0.7,20)\ny2 = np.random.uniform(0.1,0.7,20)\n\n\nplt.bar(x, +y1, facecolor='#C0CA33', edgecolor='white') #specify edgecolor by name\nplt.bar(x, -y2, facecolor='#FF9800', edgecolor='white')\n\nfor x,y in zip(x,y1):\n plt.text(x, y+0.05, '%.2f' % y, ha='center' , va= 'bottom', fontsize = 10)\n \nplt.xlim(0,21)\nplt.ylim(-1.25,+1.25)\nplt.show()",
"_____no_output_____"
]
],
[
[
"### Stacked Vertical Bar",
"_____no_output_____"
]
],
[
[
"plt.style.use('seaborn-darkgrid')\nx1= ['Asif','Basit','Ravi','Minil'] \ny1= [17,18,29,40]\ny2 = [20,21,22,23]\nplt.figure(figsize=(5,7))\nplt.bar(x1,y1,label = \"Open Tickets\",width = 0.5,color = '#FF6F00')\nplt.bar(x1,y2,label = \"Closed Tickets\",width = 0.5 ,bottom = y1 , color = '#FFB300')\nplt.xlabel('$X$')\nplt.ylabel('$Y$')\nplt.title ('$Bar $ $ Chart$')\nplt.legend()\nplt.show()",
"_____no_output_____"
],
[
"plt.style.use('seaborn-darkgrid')\nx1= ['Asif','Basit','Ravi','Minil'] \ny1= np.array([17,18,29,40])\ny2 =np.array([20,21,22,23])\ny3 =np.array([5,9,11,12])\nplt.figure(figsize=(5,7))\nplt.bar(x1,y1,label = \"Open Tickets\",width = 0.5,color = '#FF6F00')\nplt.bar(x1,y2,label = \"Closed Tickets\",width = 0.5 ,bottom = y1 , color = '#FFB300')\nplt.bar(x1,y3,label = \"Cancelled Tickets\",width = 0.5 ,bottom = y1+y2 , color = '#F7DC6F')\nplt.xlabel('$X$')\nplt.ylabel('$Y$')\nplt.title ('$Bar $ $ Chart$')\nplt.legend()\nplt.show()",
"_____no_output_____"
]
],
[
[
"### Grouped Bar Chart",
"_____no_output_____"
]
],
[
[
"# Grouped Bar Chart\n\nplt.figure(figsize=(7,9))\n\n# set width of bar\nbarWidth = 0.25\n \n# set height of bar\ny1= np.array([17,18,29,40])\ny2 =np.array([20,21,22,23])\ny3 =np.array([5,9,11,12])\n \n# Set position of bar on X axis\npos1 = np.arange(len(y1))\npos2 = [x + barWidth for x in pos1]\npos3 = [x + barWidth for x in pos2]\n\n# Make the plot\nplt.bar(pos1, y1, color='#FBC02D', width=barWidth, label='Open')\nplt.bar(pos2, y2, color='#F57F17', width=barWidth, label='Closed')\nplt.bar(pos3, y3, color='#E65100', width=barWidth, label='Cancelled')\n\n# Add xticks on the middle of the group bars\nplt.xlabel('Assignee', fontweight='bold')\nplt.ylabel('Number of Tickets', fontweight='bold')\nplt.xticks([i + barWidth for i in range(len(y1))], ['Asif', 'Basit', 'Ravi', 'Minil'])\n\n# Create legend & Show graphic\nplt.legend()\nplt.show()\nnp.arange(len(y1))",
"_____no_output_____"
]
],
[
[
"### Stacked Vertical Bar",
"_____no_output_____"
]
],
[
[
"plt.style.use('seaborn-darkgrid')\nx1= ['Asif','Basit','Ravi','Minil'] \ny1= [17,18,29,40]\ny2 = [20,21,22,23]\nplt.figure(figsize=(8,5))\nplt.barh(x1,y1,label = \"Open Tickets\",color = '#FF6F00')\nplt.barh(x1,y2,label = \"Closed Tickets\", left = y1 , color = '#FFB300')\nplt.xlabel('$X$')\nplt.ylabel('$Y$')\nplt.title ('$Bar $ $ Chart$')\nplt.legend()\nplt.show()",
"_____no_output_____"
]
],
[
[
"### Displaying values in Bar Charts",
"_____no_output_____"
]
],
[
[
"# Displaying values in the stacked vertical bars using plt.text()\nplt.style.use('seaborn-darkgrid')\nx1= ['Asif','Basit','Ravi','Minil'] \ny1= [17,18,29,40]\ny2 = [20,21,22,23]\nplt.figure(figsize=(5,7))\nplt.bar(x1,y1,label = \"Open Tickets\",width = 0.5,color = '#FF6F00')\nplt.bar(x1,y2,label = \"Closed Tickets\",width = 0.5 ,bottom = y1 , color = '#FFB300')\nplt.xlabel('$X$')\nplt.ylabel('$Y$')\nplt.title ('$Bar $ $ Chart$')\nfor x,y in zip(x1,y1):\n plt.text(x, y-10, '%d' % y, ha='center' , va= 'bottom')\n\nfor x,y,z in zip(x1,y2,y1):\n plt.text(x, y+z-10, '%d' % y, ha='center' , va= 'bottom')\n \nplt.legend()\nplt.show()",
"_____no_output_____"
],
[
"# Displaying values in the stacked horizontal bars using plt.text()\nplt.style.use('seaborn-darkgrid')\nx1= ['Asif','Basit','Ravi','Minil'] \ny1= [17,18,29,40]\ny2 = [20,21,22,23]\nplt.figure(figsize=(8,5))\nplt.barh(x1,y1,label = \"Open Tickets\",color = '#FF6F00')\nplt.barh(x1,y2,label = \"Closed Tickets\", left = y1 , color = '#FFB300')\nplt.xlabel('$X$')\nplt.ylabel('$Y$')\n\nfor x,y in zip(x1,y1):\n plt.text(y-10, x, '%d' % y, ha='center' , va= 'bottom')\n \nfor x,y,z in zip(x1,y2,y1):\n plt.text(y+z-10, x, '%d' % y, ha='center' , va= 'bottom')\n \nplt.title ('$Bar $ $ Chart$')\nplt.legend()\nplt.show()",
"_____no_output_____"
],
[
"# Displaying values at the top of the Grouped Bar Chart using plt.text()\nplt.figure(figsize=(7,9))\n\n# set width of bar\nbarWidth = 0.25\n \n# set height of bar\ny1= np.array([17,18,29,40])\ny2 =np.array([20,21,22,23])\ny3 =np.array([5,9,11,12])\n \n# Set position of bar on X axis\npos1 = np.arange(len(y1))\npos2 = [x + barWidth for x in pos1]\npos3 = [x + barWidth for x in pos2]\n\n# Make the plot\nplt.bar(pos1, y1, color='#FBC02D', width=barWidth, label='Open')\nplt.bar(pos2, y2, color='#F57F17', width=barWidth, label='Closed')\nplt.bar(pos3, y3, color='#E65100', width=barWidth, label='Cancelled')\n\n# Add xticks on the middle of the group bars\nplt.xlabel('Assignee', fontweight='bold')\nplt.ylabel('Number of Tickets', fontweight='bold')\nplt.xticks([i + barWidth for i in range(len(y1))], ['Asif', 'Basit', 'Ravi', 'Minil'])\n\nfor x,y in zip(pos1,y1):\n plt.text(x, y, '%d' % y, ha='center' , va= 'bottom')\n \nfor x,y in zip(pos2,y2):\n plt.text(x, y, '%d' % y, ha='center' , va= 'bottom')\n\nfor x,y in zip(pos3,y3):\n plt.text(x, y, '%d' % y, ha='center' , va= 'bottom')\n\nplt.title ('$Grouped $ $ Bar $ $ Chart$')\n\n# Create legend & Show graphic\nplt.legend()\nplt.show()",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
]
] |
ec90625ca1e16331136ccb8b41f3307df1c2400c | 791,825 | ipynb | Jupyter Notebook | project01_Facial_Keypoint_Detection/3. Facial Keypoint Detection, Complete Pipeline.ipynb | GabrielPila/udacity-computer-vision-nanodegree | 16d0d60d76633e4c6137cd658fcf41d46b0a8806 | [
"MIT"
] | 1 | 2021-05-29T21:23:43.000Z | 2021-05-29T21:23:43.000Z | project01_Facial_Keypoint_Detection/3. Facial Keypoint Detection, Complete Pipeline.ipynb | GabrielPila/udacity-computer-vision-nanodegree | 16d0d60d76633e4c6137cd658fcf41d46b0a8806 | [
"MIT"
] | null | null | null | project01_Facial_Keypoint_Detection/3. Facial Keypoint Detection, Complete Pipeline.ipynb | GabrielPila/udacity-computer-vision-nanodegree | 16d0d60d76633e4c6137cd658fcf41d46b0a8806 | [
"MIT"
] | null | null | null | 2,199.513889 | 323,020 | 0.960379 | [
[
[
"## Face and Facial Keypoint detection\n\nAfter you've trained a neural network to detect facial keypoints, you can then apply this network to *any* image that includes faces. The neural network expects a Tensor of a certain size as input and, so, to detect any face, you'll first have to do some pre-processing.\n\n1. Detect all the faces in an image using a face detector (we'll be using a Haar Cascade detector in this notebook).\n2. Pre-process those face images so that they are grayscale, and transformed to a Tensor of the input size that your net expects. This step will be similar to the `data_transform` you created and applied in Notebook 2, whose job was tp rescale, normalize, and turn any iimage into a Tensor to be accepted as input to your CNN.\n3. Use your trained model to detect facial keypoints on the image.\n\n---",
"_____no_output_____"
],
[
"In the next python cell we load in required libraries for this section of the project.",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\n%matplotlib inline",
"_____no_output_____"
]
],
[
[
"#### Select an image \n\nSelect an image to perform facial keypoint detection on; you can select any image of faces in the `images/` directory.",
"_____no_output_____"
]
],
[
[
"import cv2\n# load in color image for face detection\nimage = cv2.imread('images/obamas.jpg')\n\n# switch red and blue color channels \n# --> by default OpenCV assumes BLUE comes first, not RED as in many images\nimage = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n\n# plot the image\nfig = plt.figure(figsize=(9,9))\nplt.imshow(image)",
"_____no_output_____"
]
],
[
[
"## Detect all faces in an image\n\nNext, you'll use one of OpenCV's pre-trained Haar Cascade classifiers, all of which can be found in the `detector_architectures/` directory, to find any faces in your selected image.\n\nIn the code below, we loop over each face in the original image and draw a red square on each face (in a copy of the original image, so as not to modify the original). You can even [add eye detections](https://docs.opencv.org/3.4.1/d7/d8b/tutorial_py_face_detection.html) as an *optional* exercise in using Haar detectors.\n\nAn example of face detection on a variety of images is shown below.\n\n<img src='images/haar_cascade_ex.png' width=80% height=80%/>\n",
"_____no_output_____"
]
],
[
[
"# load in a haar cascade classifier for detecting frontal faces\nface_cascade = cv2.CascadeClassifier('detector_architectures/haarcascade_frontalface_default.xml')\n\n# run the detector\n# the output here is an array of detections; the corners of each detection box\n# if necessary, modify these parameters until you successfully identify every face in a given image\nfaces = face_cascade.detectMultiScale(image, 1.2, 2)\n\n# make a copy of the original image to plot detections on\nimage_with_detections = image.copy()\n\n# loop over the detected faces, mark the image where each face is found\nfor (x,y,w,h) in faces:\n # draw a rectangle around each detected face\n # you may also need to change the width of the rectangle drawn depending on image resolution\n cv2.rectangle(image_with_detections,(x,y),(x+w,y+h),(255,0,0),3) \n\nfig = plt.figure(figsize=(9,9))\n\nplt.imshow(image_with_detections)",
"_____no_output_____"
]
],
[
[
"## Loading in a trained model\n\nOnce you have an image to work with (and, again, you can select any image of faces in the `images/` directory), the next step is to pre-process that image and feed it into your CNN facial keypoint detector.\n\nFirst, load your best model by its filename.",
"_____no_output_____"
]
],
[
[
"import torch\nfrom models import Net\n\nnet = Net()\n\n## TODO: load the best saved model parameters (by your path name)\n## You'll need to un-comment the line below and add the correct name for *your* saved model\nnet.load_state_dict(torch.load('saved_models/keypoints_model_2.pt'))\n\n## print out your net and prepare it for testing (uncomment the line below)\nnet.eval()",
"_____no_output_____"
],
[
"print(net.conv1.weight.shape)\nprint(net.conv2.weight.shape)\nprint(net.conv3.weight.shape)\nprint(net.fc1.weight.shape)\nprint(net.fc2.weight.shape)",
"torch.Size([32, 1, 5, 5])\ntorch.Size([64, 32, 5, 5])\ntorch.Size([128, 64, 5, 5])\ntorch.Size([1000, 73728])\ntorch.Size([136, 1000])\n"
],
[
"!ls saved_models/keypoints_model_1.pt",
"saved_models/keypoints_model_1.pt\r\n"
]
],
[
[
"## Keypoint detection\n\nNow, we'll loop over each detected face in an image (again!) only this time, you'll transform those faces in Tensors that your CNN can accept as input images.\n\n### TODO: Transform each detected face into an input Tensor\n\nYou'll need to perform the following steps for each detected face:\n1. Convert the face from RGB to grayscale\n2. Normalize the grayscale image so that its color range falls in [0,1] instead of [0,255]\n3. Rescale the detected face to be the expected square size for your CNN (224x224, suggested)\n4. Reshape the numpy image into a torch image.\n\n**Hint**: The sizes of faces detected by a Haar detector and the faces your network has been trained on are of different sizes. If you find that your model is generating keypoints that are too small for a given face, try adding some padding to the detected `roi` before giving it as input to your model.\n\nYou may find it useful to consult to transformation code in `data_load.py` to help you perform these processing steps.\n\n\n### TODO: Detect and display the predicted keypoints\n\nAfter each face has been appropriately converted into an input Tensor for your network to see as input, you can apply your `net` to each face. The ouput should be the predicted the facial keypoints. These keypoints will need to be \"un-normalized\" for display, and you may find it helpful to write a helper function like `show_keypoints`. You should end up with an image like the following with facial keypoints that closely match the facial features on each individual face:\n\n<img src='images/michelle_detected.png' width=30% height=30%/>\n\n\n",
"_____no_output_____"
]
],
[
[
"image_copy = np.copy(image)\nnew_size = 224\nfig = plt.figure(figsize=(10,9))\n\n# loop over the detected faces from your haar cascade\nfor i, (x,y,w,h) in enumerate(faces):\n \n # Select the region of interest that is the face in the image \n margin = int(w*0.35)\n roi = image_copy[max(y-margin+15,0):min(y+h+margin+10,image.shape[0]), \n max(x-margin,0):min(x+w+margin,image.shape[1])]\n \n ## TODO: Convert the face region from RGB to grayscale\n roi_gray = cv2.cvtColor(roi, cv2.COLOR_RGB2GRAY)\n \n ## TODO: Normalize the grayscale image so that its color range falls in [0,1] instead of [0,255]\n roi_gray = roi_gray/255\n \n ## TODO: Rescale the detected face to be the expected square size for your CNN (224x224, suggested)\n roi_gray = cv2.resize(roi_gray, (new_size,new_size))\n \n ## TODO: Reshape the numpy image shape (H x W x C) into a torch image shape (C x H x W)\n roi_gray = roi_gray.reshape(1, 1, new_size,new_size)\n roi_gray = torch.tensor(roi_gray, dtype=torch.float32)\n \n ## TODO: Make facial keypoint predictions using your loaded, trained network \n predicted_key_pts = net(roi_gray)\n predicted_key_pts = predicted_key_pts.reshape(-1,2).detach().numpy() *50+100 \n\n ## TODO: Display each detected face and the corresponding keypoints \n roi_gray_img = np.squeeze(roi_gray.numpy())\n fig.add_subplot(2, 2, i+1, xticks=[], yticks=[])\n plt.imshow(roi_gray_img, cmap='gray')\n plt.scatter(predicted_key_pts[:, 0], predicted_key_pts[:, 1], s=20, marker='.', c='m')",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
]
] |
ec906af9b1692c0cbcb107c6af54c8b30caff526 | 1,270 | ipynb | Jupyter Notebook | lecture/Lesson 01/Lesson 01 - Exceptions - TryCatch.ipynb | shaheen19/Adv_Py_Scripting_for_GIS_Course | d5e3109c47b55d10a7b8c90e5eac837f659af200 | [
"Apache-2.0"
] | 7 | 2020-01-22T14:22:57.000Z | 2021-12-22T11:33:40.000Z | lecture/Lesson 01/Lesson 01 - Exceptions - TryCatch.ipynb | achapkowski/Adv_Py_Scripting_for_GIS_Course | d5e3109c47b55d10a7b8c90e5eac837f659af200 | [
"Apache-2.0"
] | null | null | null | lecture/Lesson 01/Lesson 01 - Exceptions - TryCatch.ipynb | achapkowski/Adv_Py_Scripting_for_GIS_Course | d5e3109c47b55d10a7b8c90e5eac837f659af200 | [
"Apache-2.0"
] | 2 | 2020-04-22T11:33:01.000Z | 2021-01-04T21:16:04.000Z | 21.896552 | 162 | 0.534646 | [
[
[
"# Exception Handling in Python",
"_____no_output_____"
]
],
[
[
"from IPython.display import HTML\n\nHTML('<html><iframe allowfullscreen=\"\" frameborder=\"0\" height=\"270\" src=\"https://www.youtube.com/embed/nqGhjLUhyDc\" width=\"480\"></iframe></html>')",
"_____no_output_____"
]
]
] | [
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
]
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.