code
stringlengths
38
801k
repo_path
stringlengths
6
263
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/mpfoster/Biochem6765/blob/master/example_simple_stats_6765.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="NZiEF32pio3r" # # Limits of statistical metrics # We will examine four distinct datasets using common statistical metrics, and examine how well those metrics can distinguish the data, and how representative of the data are the statistics. # # For each dataset we will compute these metrics: # * _mean_ of $x$ and $y$ values: # # $$\bar{x} = \frac{1}{N}\sum_{i=1}^N x_i$$ # # * _sample variance_ of $x$ and $y$: # # $$s = \frac{1}{N-1}\sum_i (x_i-\bar{x})^2$$ # # * _sample standard deviation_ of $x$ and $y$: # # $$s = \sqrt{\frac{1}{N-1}\sum_i (x_i-\bar{x})^2}$$ # # * correlation coefficient # # $$\rho = \frac{\bar{xy}-\bar{x}\bar{y}}{\sigma_x\sigma_y}$$ # * Linear regression line # $$y = mx+b$$ # # If you prefer to use another tool to perform this analysis (e.g., MS Excel, Google Sheets), the data file in _cvs_ format, can be downloaded from [this link](https://github.com/mpfoster/Biochem6765/raw/master/data/a-data.csv). # # What follow are tips for performing the analysis using Python. # + id="cgNHPKfBio31" outputId="60f98f14-4c6f-4a86-d9f2-3e15a9138c1f" # we'll use pandas to read data from web and to facilitate analysis import pandas as pd df = pd.read_csv('https://github.com/mpfoster/Biochem6765/raw/master/data/a-data.csv') df # + [markdown] id="8YMnTrlLio36" # Note that there are 44 rows (starting with the 0th row); that's 11 $xy$ pairs per dataset I-IV. We could split the data into four separate dataframes by issuing commands like: # + id="5rbQQ7ZPio36" outputId="496b3013-2d96-42db-a06f-2863d51aa4a1" df1=df.query("dataset == 'I'") # all entries with "dataset == 'I'" will be placed in the new df1 df1 # + id="xeDH6FZhio37" outputId="eee3b7fe-4298-4aaf-c97d-b542c64162e5" df1.describe() # the command prints out basic statistics on the df # + id="VqLOMRU2io39" outputId="65c61cfd-fc12-4bc1-b754-0521365257ef" # df.std() by default computes the sample standard dev, as per ddof=1 # the Variance is the square of the stdev print('V(x,y) = %.03f, %.03f' % (df1.x.std()**2, df1.y.std()**2)) # + id="ipOvjL7rio3_" outputId="0d0d6c24-180d-4018-c333-944e74b8ca68" # covariance matrix for x and y df1.cov() # + id="s1Udiu7Uio4A" outputId="cc7688e1-7653-4e5f-d2cb-099be314beba" # (Pearson) correlation between x and y df1.corr() # + [markdown] id="7btrXHD1io4C" # There are many Python tools for linear regression; here's we'll just use the polynomial fit function in numpy (a line is a first-order polynomial). Numpy's `polyfit` function takes as input a paired list of x and y values, and the polynomial order, and returns the coefficients of the polynomial; in this case, the slope and y-intercept: # + id="FfGCap58io4D" outputId="972f0882-6898-4aae-f1f3-e078951920e8" import numpy as np m,b = np.polyfit(df1.x,df1.y,1) print('slope: %.g, intercept: %.g' % (m,b)) fitline = m*df1.x+b df1.loc[:,('fit')]= fitline # add a column with the best-vit values # + id="gfMYF_mIio4E" outputId="f7f6772c-b147-4809-d0a6-9d766b691c78" import matplotlib.pyplot as plt plt.plot(df1.x, df1.y,'o') plt.plot(df1.x, df1.fit,'-') plt.title('slope: %.3g, intercept: %.g' % (m,b)) # + [markdown] id="LsbuJ-Seio4F" # We can then repeat the above analysis for each of the 4 datasets. # # (If interested, the process could be streamlined make use of the Pandas `groupby` function.)
example_simple_stats_6765.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import chess board = chess.Board() board dir(list(board.legal_moves)[0]) list(board.legal_moves)[0].promotion [x.from_square for x in list(board.legal_moves)] list(set([board.piece_at(x.from_square).piece_type for x in list(board.legal_moves)]))[0] list(set([board.piece_at(x.from_square).piece_type for x in list(board.legal_moves)])) chess.PAWN dir(board) list(board.generate_castling_moves()) dir(chess.Move) board.push(list(board.legal_moves)[0]) board board.pop() board import copy board2 = copy.deepcopy(board) board.push(list(board.legal_moves)[0]) board board2 import chess chess.PAWN chess.ROOK chess.BISHOP chess.QUEEN chess.KING import random random.random() < 0.5 chess.BLACK dir(chess.Move) import time time.sleep(1.8) def is_eat(board, move): to_square = move.to_square return board.piece_at(to_square) is not None list(board.legal_moves)[0].to_square is_eat(board, list(board.legal_moves)[0])
Untitled.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Régression logistique en 2D # # Prédire la couleur d'un vin à partir de ses composants. # %matplotlib inline from papierstat.datasets import load_wines_dataset data = load_wines_dataset() X = data.drop(['quality', 'color'], axis=1) y = data['color'] from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y) from statsmodels.discrete.discrete_model import Logit model = Logit(y_train == "white", X_train) res = model.fit() res.summary2() # On ne garde que les deux premières. X_train2 = X_train.iloc[:, :2] # + import pandas df = pandas.DataFrame(X_train2.copy()) df['y'] = y_train import matplotlib.pyplot as plt fig, ax = plt.subplots(1, 1, figsize=(4, 4)) df[df.y == "white"].plot(x="fixed_acidity", y="volatile_acidity", ax=ax, kind='scatter', label="white") df[df.y == "red"].plot(x="fixed_acidity", y="volatile_acidity", ax=ax, kind='scatter', label="red", color="red", s=2) ax.set_title("Vins rouges et white selon deux composantes"); # - from sklearn.linear_model import LogisticRegression model = LogisticRegression() model.fit(X_train2, y_train == "white") model.coef_, model.intercept_ # On trace cette droite sur le graphique. x0 = 3 y0 = -(model.coef_[0,0] * x0 + model.intercept_) / model.coef_[0,1] x1 = 14 y1 = -(model.coef_[0,0] * x1 + model.intercept_) / model.coef_[0,1] x0, y0, x1, y1 import matplotlib.pyplot as plt fig, ax = plt.subplots(1, 1, figsize=(4, 4)) df[df.y == "white"].plot(x="fixed_acidity", y="volatile_acidity", ax=ax, kind='scatter', label="white") df[df.y == "red"].plot(x="fixed_acidity", y="volatile_acidity", ax=ax, kind='scatter', label="red", color="red", s=2) ax.plot([x0, x1], [y0, y1], 'y--', lw=4, label='frontière trouvée\npar la régression\nlogistique') ax.legend() ax.set_title("Vins rouges et blancs\nselon deux composantes");
_doc/notebooks/lectures/wines_color_line.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # ADN # Implemente un programa que identifique a una persona en función de su ADN, según se indica a continuación. # <code>$ python dna.py databases/large.csv sequences/5.txt # Lavender</code> # ## Empezando # - Dentro de la carpeta data/adn se encuentra la información necesaria para resolver este ejercicio la cual incluye un archivo de base de datos y archivos txt con las cadenas adn # ## Antecedentes # El ADN, el portador de información genética en los seres vivos, se ha utilizado en la justicia penal durante décadas. Pero, ¿cómo funciona exactamente el perfil de ADN? Dada una secuencia de ADN, ¿cómo pueden los investigadores forenses identificar a quién pertenece? # # Bueno, el ADN es en realidad solo una secuencia de moléculas llamadas nucleótidos, dispuestas en una forma particular (una doble hélice). Cada nucleótido de ADN contiene una de cuatro bases diferentes: adenina (A), citosina (C), guanina (G) o timina (T). Cada célula humana tiene miles de millones de estos nucleótidos ordenados en secuencia. Algunas porciones de esta secuencia (es decir, el genoma) son iguales, o al menos muy similares, en casi todos los seres humanos, pero otras porciones de la secuencia tienen una mayor diversidad genética y, por tanto, varían más en la población. # # Un lugar donde el ADN tiende a tener una alta diversidad genética es en las repeticiones cortas en tándem (STR). Un STR es una secuencia corta de bases de ADN que tiende a repetirse consecutivamente numerosas veces en lugares específicos dentro del ADN de una persona. El número de veces que se repite un STR en particular varía mucho entre los individuos. En las siguientes muestras de ADN, por ejemplo, Alice tiene el STR <code>AGAT</code> repetido cuatro veces en su ADN, mientras que Bob tiene el mismo STR repetido cinco veces. # <img src="./img/adn.PNG"> # El uso de varios STR, en lugar de solo uno, puede mejorar la precisión del perfil de ADN. Si la probabilidad de que dos personas tengan el mismo número de repeticiones para un solo STR es del 5%, y el analista observa 10 STR diferentes, entonces la probabilidad de que dos muestras de ADN coincidan puramente por casualidad es de aproximadamente 1 en 1 billón (asumiendo que todos los STR son independientes entre sí). Entonces, si dos muestras de ADN coinciden en el número de repeticiones para cada uno de los STR, el analista puede estar bastante seguro de que provienen de la misma persona. CODIS, la base de datos de ADN del FBI , utiliza 20 STR diferentes como parte de su proceso de elaboración de perfiles de ADN. # # ¿Cómo sería una base de datos de ADN de este tipo? Bueno, en su forma más simple, podría imaginarse formateando una base de datos de ADN como un archivo CSV, donde cada fila corresponde a un individuo y cada columna corresponde a un STR particular. # <code>name,AGAT,AATG,TATC # Alice,28,42,14 # Bob,17,22,19 # Charlie,36,18,25</code> # Los datos del archivo anterior sugerirían que Alice tiene la secuencia <code>AGAT</code> repetida 28 veces consecutivamente en algún lugar de su ADN, la secuencia <code>AATG</code> repetida 42 veces y <code>TATC</code> repetida 14 veces. Bob, mientras tanto, tiene esos mismos tres STR repetidos 17, 22 y 19 veces, respectivamente. Y Charlie tiene esos mismos tres STR repetidos 36, 18 y 25 veces, respectivamente. # # Entonces, dada una secuencia de ADN, ¿cómo podría identificar a quién pertenece? Bueno, imagina que buscas en la secuencia de ADN la secuencia consecutiva más larga de <code>AGAT</code>s repetidos y descubres que la secuencia más larga tiene 17 repeticiones. Si luego encontrara que la secuencia más larga de <code>AATG</code> tiene 22 repeticiones y la secuencia más larga de <code>TATC</code> 19 repeticiones, eso proporcionaría una evidencia bastante buena de que el ADN era de Bob. Por supuesto, también es posible que una vez que tome los recuentos de cada uno de los STR, no coincida con nadie en su base de datos de ADN, en cuyo caso no tendrá ninguna coincidencia. # # En la práctica, dado que los analistas saben en qué cromosoma y en qué lugar del ADN se encontrará un STR, pueden localizar su búsqueda en una sección limitada del ADN. Pero ignoraremos ese detalle para este problema. # # Su tarea es escribir un programa que tomará una secuencia de ADN y un archivo CSV que contiene recuentos de STR para una lista de individuos y luego generará a quién pertenece el ADN (lo más probable). # ## Especificaciones # En un archivo llamado <code>dna.py</code>, implementar un programa que identifica a la que pertenece una secuencia de ADN. # # - El programa debe requerir como primer argumento de línea de comando el nombre de un archivo CSV que contiene los recuentos de STR para una lista de individuos y debe requerir como segundo argumento de línea de comando el nombre de un archivo de texto que contiene la secuencia de ADN para identificar. # # - Si su programa se ejecuta con el número incorrecto de argumentos en la línea de comandos, su programa debería imprimir un mensaje de error de su elección (con <code>print</code>). Si se proporciona el número correcto de argumentos, puede suponer que el primer argumento es de hecho el nombre de archivo de un archivo CSV válido y que el segundo argumento es el nombre de archivo de un archivo de texto válido. # # - Su programa debería abrir el archivo CSV y leer su contenido en la memoria. # - Puede suponer que la primera fila del archivo CSV serán los nombres de las columnas. La primera columna será la palabra <code>name</code> y las columnas restantes serán las propias secuencias STR. # # - Su programa debería abrir la secuencia de ADN y leer su contenido en la memoria. # - Para cada uno de los STR (de la primera línea del archivo CSV), su programa debe calcular la ejecución más larga de repeticiones consecutivas del STR en la secuencia de ADN para identificar. # - Si los conteos de STR coinciden exactamente con cualquiera de las personas en el archivo CSV, su programa debe imprimir el nombre de la persona que coincide. # - Puede suponer que los recuentos de STR no coincidirán con más de un individuo. # - Si los recuentos de STR no coinciden exactamente con ninguno de los individuos en el archivo CSV, su programa debería imprimir <code>"No match"</code>. # ## Uso # Su programa debería comportarse según los siguientes ejemplos. # <code>$ python dna.py databases/large.csv sequences/5.txt # Lavender</code> # # # <code>$ python dna.py # Usage: python dna.py data.csv sequence.txt </code> # # <code>$ python dna.py data.csv # Usage: python dna.py data.csv sequence.txt</code> # ## Sugerencia # - Puede encontrar <a href='https://docs.python.org/3/library/csv.html'><code>csv</code></a> útil el módulo de Python para leer archivos CSV en la memoria. Es posible que desee aprovechar <a href='https://docs.python.org/3/library/csv.html#csv.reader'><code>csv.reader</code></a> o <a href='https://docs.python.org/3/library/csv.html#csv.DictReader'><code>csv.DictReader</code></a>. # # - Las funciones <a href='https://docs.python.org/3.3/tutorial/inputoutput.html#reading-and-writing-files'><code>open</code></a> y <a href='https://docs.python.org/3.3/tutorial/inputoutput.html#methods-of-file-objects'><code>read</code></a> pueden resultar útiles para leer archivos de texto en la memoria. # - Considere qué estructuras de datos podrían ser útiles para realizar un seguimiento de la información en su programa. A <code>list</code> o a <code>dict</code> pueden resultar útiles. # ## Pruebas # Asegúrese de probar su código para cada uno de los siguientes. # # - Ejecute su programa como <code>python dna.py databases/small.csv sequences/1.txt.</code> Su programa debería generar <code>Bob</code>. # - Ejecute su programa como <code>python dna.py databases/small.csv sequences/2.txt.</code> Su programa debería generar <code>No</code> match. # - Ejecute su programa como <code>python dna.py databases/small.csv sequences/3.txt.</code> Su programa debería generar <code>No</code> match. # - Ejecute su programa como <code>python dna.py databases/small.csv sequences/4.txt.</code> Su programa debería generar <code>Alice</code>. # - Ejecute su programa como <code>python dna.py databases/large.csv sequences/5.txt.</code> Su programa debería generar <code>Lavender</code>. # - Ejecute su programa como <code>python dna.py databases/large.csv sequences/6.txt.</code> Su programa debería generar <code>Luna</code>. # - Ejecute su programa como <code>python dna.py databases/large.csv sequences/7.txt.</code> Su programa debería generar <code>Ron</code>. # - Ejecute su programa como <code>python dna.py databases/large.csv sequences/8.txt.</code> Su programa debería generar <code>Ginny</code>. # - Ejecute su programa como <code>python dna.py databases/large.csv sequences/9.txt.</code> Su programa debería generar <code>Draco</code>. # - Ejecute su programa como <code>python dna.py databases/large.csv sequences/10.txt.</code> Su programa debería generar <code>Albus</code>. # - Ejecute su programa como <code>python dna.py databases/large.csv sequences/11.txt.</code> Su programa debería generar <code>Hermione</code>. # - Ejecute su programa como <code>python dna.py databases/large.csv sequences/12.txt.</code> Su programa debería generar <code>Lily</code>. # - Ejecute su programa como <code>python dna.py databases/large.csv sequences/13.txt.</code> Su programa debería generar <code>No</code> match. # - Ejecute su programa como <code>python dna.py databases/large.csv sequences/14.txt.</code> Su programa debería generar <code>Severus</code>. # - Ejecute su programa como <code>python dna.py databases/large.csv sequences/15.txt.</code> Su programa debería generar <code>Sirius</code>. # - Ejecute su programa como <code>python dna.py databases/large.csv sequences/16.txt.</code> Su programa debería generar <code>No</code> match. # - Ejecute su programa como <code>python dna.py databases/large.csv sequences/17.txt.</code> Su programa debería generar <code>Harry</code>. # - Ejecute su programa como <code>python dna.py databases/large.csv sequences/18.txt.</code> Su programa debería generar <code>No</code> match. # - Ejecute su programa como <code>python dna.py databases/large.csv sequences/19.txt.</code> Su programa debería generar <code>Fred</code>. # - Ejecute su programa como <code>python dna.py databases/large.csv sequences/20.txt.</code> Su programa debería generar <code>No</code> match.
Modulo4/Ejercicios/.ipynb_checkpoints/Problema1-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:anaconda] # language: python # name: conda-env-anaconda-py # --- # + [markdown] slideshow={"slide_type": "slide"} # # *** # *** # # Graphlab 安装与使用 # *** # *** # # 王成军 # # <EMAIL> # # 计算传播网 http://computational-communication.com # + [markdown] slideshow={"slide_type": "slide"} # # Problem # # 只有低版本的anaconda才可以安装,强行安装还会破坏掉anaconda的jupyter notebook中的kernel,排除使用anaconda运行graphlab的方案。 # # + [markdown] slideshow={"slide_type": "slide"} # # Register for Academic Use of GraphLab Create # # https://turi.com/download/academic.html # + [markdown] slideshow={"slide_type": "slide"} # ## 查邮件 # # https://turi.com/download/install-graphlab-create.html?email=wangchengjun%40nju.edu.cn&key=4972-65DF-8E02-816C-AB15-021C-EC1B-0367&utm_medium=email&utm_source=transactional&utm_campaign=beta_registration_confirmation # + [markdown] slideshow={"slide_type": "slide"} # # Renew Academic License for GraphLab Create # # https://turi.com/download/renew.html # + [markdown] slideshow={"slide_type": "subslide"} # ### License Renewal Confirmation # Your academic license for GraphLab Create has been renewed. Please restart GraphLab Create while connected to the internet. # # # Email: <EMAIL>ch<EMAIL> # # Expiration Date: 03-13-2019 # + [markdown] slideshow={"slide_type": "subslide"} # # Python 2.7.x # # GraphLab Create installation requires a Python 2.7.x environment and pip version >= 7 and Anaconda2 v4.0.0 (64-bit). IPython Notebook is recommended. # - # # # To install a different version of Python without overwriting the current version # # https://conda.io/docs/user-guide/tasks/manage-python.html # # Creating a new environment and install the second Python version into it. To create the new environment for Python 2.7, in your Terminal window or an Anaconda Prompt, run: # > conda create -n py27 python=2.7 anaconda # # + [markdown] slideshow={"slide_type": "subslide"} # # ## Activate the new environment ** 切换到新环境** # # - linux/Mac下需要使用: `source activate py27` # - windows需要使用: `activate py27` # # **退出环境: `source deactivate py27` # 也可以使用** `activate root`切回root环境 # # 3. [Verify that the new environment is your current environment.](https://conda.io/docs/user-guide/tasks/manage-environments.html#determine-current-env) # 4. To verify that the current environment uses the new Python version, in your Terminal window or an Anaconda Prompt, run: `python --version` # + slideshow={"slide_type": "subslide"} # ! python --version # + [markdown] slideshow={"slide_type": "slide"} # # Install your licensed copy of GraphLab Create # pip install --upgrade --no-cache-dir https://get.graphlab.com/GraphLab-Create/2.1/your registered email address here/your product key here/GraphLab-Create-License.tar.gz # + [markdown] slideshow={"slide_type": "fragment"} # # Open a terminal and input: # # pip install --upgrade --no-cache-dir https://get.graphlab.com/GraphLab-Create/2.1/[email protected]/4972-65DF-8E02-816C-AB15-021C-EC1B-0367/GraphLab-Create-License.tar.gz # # + [markdown] slideshow={"slide_type": "subslide"} # # Error # Could not find a version that satisfies the requirement graphlab-create>=2.1 (from GraphLab-Create-License==2.1) (from versions: ) # No matching distribution found for graphlab-create>=2.1 (from GraphLab-Create-License==2.1) # + [markdown] slideshow={"slide_type": "slide"} # # # 使用方法 # # https://turi.com/learn/userguide/ # # GraphLab Create is a Python package that allows programmers to perform end-to-end large-scale data analysis and data product development. # # - Data ingestion and cleaning with SFrames. SFrame is an efficient disk-based tabular data structure that is not limited by RAM. This lets you scale your analysis and data processing to handle terabytes of data, even on your laptop. # # - Data exploration and visualization with GraphLab Canvas. GraphLab Canvas is a browser-based interactive GUI that allows you to explore tabular data, summary plots and statistics. # # - Network analysis with SGraph. SGraph is a disk-based graph data structure that stores vertices and edges in SFrames. # # - Predictive model development with machine learning toolkits. GraphLab Create includes several toolkits for quick prototyping with fast, scalable algorithms. # # - Production automation with data pipelines. Data pipelines allow you to assemble reusable code tasks into jobs and automatically run them on common execution environments (e.g. Amazon Web Services, Hadoop). # + slideshow={"slide_type": "slide"} from graphlab import SGraph, Vertex, Edge g = SGraph() verts = [Vertex(0, attr={'breed': 'labrador'}), Vertex(1, attr={'breed': 'labrador'}), Vertex(2, attr={'breed': 'vizsla'})] g = g.add_vertices(verts) g = g.add_edges(Edge(1, 2)) print(g) # + slideshow={"slide_type": "slide"} from graphlab import SGraph, Vertex, Edge g = SGraph() verts = [Vertex(0, attr={'breed': 'labrador'}), Vertex(1, attr={'breed': 'labrador'}), Vertex(2, attr={'breed': 'vizsla'})] g = g.add_vertices(verts) g = g.add_edges(Edge(1, 2)) print g # - g.show() # + slideshow={"slide_type": "slide"} from graphlab import SFrame,SGraph edge_data = SFrame.read_csv('../data/bond_edges.csv') #'https://static.turi.com/datasets/bond/bond_edges.csv') g = SGraph() g = g.add_edges(edge_data, src_field='src', dst_field='dst') print(g) # + slideshow={"slide_type": "subslide"} vertex_data = SFrame.read_csv('https://static.turi.com/datasets/bond/bond_vertices.csv') g = SGraph(vertices=vertex_data, edges=edge_data, vid_field='name', src_field='src', dst_field='dst') # + slideshow={"slide_type": "subslide"} g.show(vlabel='id', highlight=['<NAME>', 'Moneypenny'], \ arrows=True) # + [markdown] slideshow={"slide_type": "slide"} # # 阅读材料 # - https://turi.com/learn/userguide/ #
code/03.graphlab.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="hZIlzfZLFTKJ" from sentence_transformers import SentenceTransformer import scipy # + id="3BEHRaggO_u3" colab={"base_uri": "https://localhost:8080/", "height": 136, "referenced_widgets": ["c3b087ef6c2c4836b116136b3a8aed96", "<KEY>", "<KEY>", "6c8a4e42f72f4f1191439035e3d68c14", "<KEY>", "<KEY>", "abb13724282b4cc1aed1581a711ce53d", "c443e3a2a66b42feb12d3fe59c2feddd"]} outputId="d0830089-e569-4285-9398-c726b69addaa" model = SentenceTransformer('bert-base-nli-mean-tokens') # + id="G3SGmmkLRTx4" sentences = [] # + id="wGi0HzQHFeLJ" cellView="form" input_data = "<NAME> was born in 1951 and has been making independent feminist films since the late 1970s.[3] Much of her filmmaking is collaborative. She began her career in the context of a larger feminist movement in Italy of the 1970s.[3] For her 1983 film Scuola senza fine (School without End), she put together a group of amateur women to make the film.[3] The group of former housewives had completed a 150-hour secondary school diploma course in 1976 and did not want to stop learning after it ended.[4] With the help of their teacher, they formed a study and research group. Monti shot the film about them from 1979\u20131981, with the first half of it being made collectively by the group. It was completed in 1983.[4] In 1986, Monti made a documentary called Filo a catena about the conditions of female textile workers" sentences.append(input_data) # + id="4XWOtzjiUgwQ" colab={"base_uri": "https://localhost:8080/"} outputId="9d14cd81-6694-4f94-9c02-b0d95024fa56" sentences # + id="Xumlsr0jRP26" sentence_embeddings = model.encode(sentences) # + id="Vo0_UmAMF21-" colab={"base_uri": "https://localhost:8080/"} cellView="form" outputId="68fa048d-9ccb-42c0-aa67-503a0b0e0ee8" query = "The austere stone facade, rusticated inferiorly, has bronze doors; the lunettes above have a relief depicting the Immaculate Conception and Jesus by the sculptor <NAME>. On the roof of the main entrance is a much restored fresco of St Francis of Assisi receiving the Stigmata (1696) by an unknown painter. The church also has a painting of the Immaculate Conception with the Holy Spirit and St Joseph and Saints (1685) by <NAME>." #@param {type: 'string'} queries = [query] query_embeddings = model.encode(queries) number_top_matches = len(sentences) print("Semantic Search Results") for query, query_embedding in zip(queries, query_embeddings): distances = scipy.spatial.distance.cdist([query_embedding], sentence_embeddings, "cosine")[0] results = zip(range(len(distances)), distances) results = sorted(results, key=lambda x: x[1]) print("\n\n======================\n\n") print("Query:", query) print(f"\nSimilar {number_top_matches} sentence(s) in corpus:") cos_scores = [] for idx, distance in results[0:number_top_matches]: print(sentences[idx].strip(), "(Cosine Score: %.4f)" % (1-distance)) cos_scores.append(1-distance) # + id="ZcFZi-8wIqOU" colab={"base_uri": "https://localhost:8080/"} outputId="e5c2dc77-663a-4d61-c558-94e4485f0ead" b = 0.5 max_marks = 5 max_score = max(cos_scores) marks_obtained = max([((max_score-b+0.05)/(1-b))*max_marks if max_score < 0.85 else max_marks, 0]) print("Marks: ", "%.2f" % marks_obtained, "/", "%.2f" % max_marks)
notebooks/TextSimilarity.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd import numpy as np from pyecharts.charts import * from pyecharts.components import Table from pyecharts import options as opts from pyecharts.faker import Faker import itertools import warnings warnings.filterwarnings("ignore") # ### 变量 # # 数据表格命名即为symbol变量名字,其中首行字段分别应为**Date, Open, High, Low, Close, Volume, Market Cap**, 其中Date与Close是必要的,区分大小写 symbol = '铁矿石' # ### 读取与输出 df = pd.read_excel('%s.xlsx' % symbol) statistic_analysis(df, symbol) # ### 批量分析 symbol_list = ['btc', '铁矿石'] for symbol in symbol_list: print('现在处理%s的数据' % symbol) df = pd.read_excel('%s.xlsx' % symbol) statistic_analysis(df, symbol) # ### 具体分析代码 def statistic_analysis(df, symbol): print('以下是%s的基础性统计分析:' % symbol) # ===清洗数据 df.sort_values(by=['Date'], inplace=True) df.drop_duplicates(subset=['Date'], inplace=True) df.reset_index(inplace=True, drop=True) df['Date'] = pd.to_datetime(df['Date']) # 转换时间格式 df = df.resample(rule='1D', on='Date', label='left', closed='left').agg( {'Close': 'last', 'Open': 'first', 'High': 'max', 'Low': 'min', 'Volume': 'sum', 'Market Cap': 'last', }) df.reset_index(inplace=True) df['day_return'] = df['Close'].pct_change() # 计算日收益率 df = df.dropna() print('前十行数据:', '\n', df.head(10)) # === 简单统计分析 print('\n', '------------------------', '\n') print('日收益率最大值: ', df['day_return'].max(), '\n', '日收益率最小值: ', df['day_return'].min()) print('\n', '------------------------', '\n') print('日单向波动大于10%的时间与幅度: ', '\n', df.loc[abs(df['day_return']) > 0.1, ['Date', 'day_return']]) print('\n', '------------------------', '\n') print('阳线数量:', len(df[df['day_return']>0]), '阴线数量', len(df[df['day_return']<0])) print('\n', '------------------------', '\n') # === 连续盈亏统计 print('\n', '------------------------', '\n') print('最大连续上涨笔数: ', max( [len(list(v)) for k, v in itertools.groupby(np.where(df['day_return'] > 0, 1, np.nan))]), '\n') # 最大连续上涨笔数 print('最大连续下跌笔数: ', max( [len(list(v)) for k, v in itertools.groupby(np.where(df['day_return'] < 0, 1, np.nan))])) # 最大连续下跌笔数 print('\n', '------------------------', '\n') # 连续盈亏天数画图 df.loc[df['day_return'] > 0, 'larger_than_0'] = 1 df.loc[df['day_return'] < 0, 'larger_than_0'] = -1 df.dropna(subset=['larger_than_0'], inplace=True) condition = df['larger_than_0'] != df['larger_than_0'].shift() df.loc[condition, 'start_time'] = df['Date'] df['start_time'].fillna(method='ffill', inplace=True) df_list = [] for k, g in df.groupby('start_time'): g = g.copy() g.reset_index(drop=True, inplace=True) if g.loc[0, 'larger_than_0'] == -1: continue g['up_days'] = g.index + 1 g.set_index('Date', inplace=True) df_list.append(g) df_all = pd.concat(df_list, sort=False) for k, g in df.groupby('start_time'): g = g.copy() g.reset_index(drop=True, inplace=True) if g.loc[0, 'larger_than_0'] == 1: continue g['down_days'] = g.index + 1 g.set_index('Date', inplace=True) df_list.append(g) df_all = pd.concat(df_list, sort=False) long_group = pd.DataFrame(df_all['up_days'].groupby(df_all['up_days']).count()) short_group = pd.DataFrame(df_all['down_days'].groupby(df_all['down_days']).count()) long_group['up_days'] = (long_group['up_days'] - long_group['up_days'].shift(-1)).fillna(1) short_group['down_days'] = (short_group['down_days'] - short_group['down_days'].shift(-1)).fillna(1) long_and_short = Bar() if len(long_group.index) > len(short_group.index): indexOfbar = long_group.index else: indexOfbar = short_group.index long_and_short.add_xaxis(indexOfbar.tolist()) long_and_short.add_yaxis('连续上涨', long_group['up_days'].tolist()) long_and_short.add_yaxis('连续下跌', short_group['down_days'].tolist()) long_and_short.set_global_opts(title_opts=opts.TitleOpts(title="连续上涨下跌天数统计")) long_and_short.render('%s_连续上涨下跌天数统计.html' % symbol) print('连续盈亏天数柱状图已绘制完毕') # === 收益率画图 return_all = (Line() .add_xaxis(df['Date'].tolist()) .add_yaxis('day_return', df['day_return'].tolist(), label_opts=opts.LabelOpts(is_show=False)) .set_global_opts( datazoom_opts=[opts.DataZoomOpts(type_="inside")],) ) return_all.set_global_opts(title_opts=opts.TitleOpts(title="日收益率波动")) return_all.render('%s_日收益率波动图.html' % symbol) print('日收益率波动图已绘制完毕') # === 分布画图 # 收益率的分布情况 bins1 = np.arange(round(df['day_return'].min(), 2), -0.05, 0.02) bins2 = np.arange(-0.05, 0.05, 0.01) bins3 = np.arange(0.05, round(df['day_return'].max(), 2), 0.02) bins = np.append(bins1, bins2) bins = np.append(bins, bins3) df['day_return_layer'] = pd.cut(df['day_return'], bins) aggResult = df.groupby(by=['day_return_layer'])['day_return'].agg({'day_return':np.size}) pAggResute = pd.DataFrame(round(aggResult/aggResult.sum(), 4, )) distribution = Bar(init_opts=opts.InitOpts(width='1200px', height='600px')) distribution.add_xaxis(pAggResute.index.astype(str).tolist()) distribution.add_yaxis('', pAggResute['day_return'].tolist(), category_gap=0, color=Faker.rand_color()) distribution.set_global_opts(title_opts=opts.TitleOpts(title="日收益率分布")) distribution.set_global_opts(yaxis_opts=opts.AxisOpts(axistick_opts=opts.AxisTickOpts(is_show=False))) distribution.render('%s_日收益率分布图.html' % symbol) print('日收益率分布图已绘制完毕') # 收益率大于0.05和0.1的时间分布情况 df.loc[abs(df['day_return']) >= 0.05, 'Year1'] = df['Date'].dt.year df_ = df.loc[df['Year1'] != None, ] aggResult1 = df_.groupby(by=['Year1']).count() df.loc[abs(df['day_return']) >= 0.1, 'Year2'] = df['Date'].dt.year df_ = df.loc[df['Year2'] != None, ] aggResult2 = df_.groupby(by=['Year2']).count() distribution_year = Bar() distribution_year.add_xaxis(aggResult1.index.tolist()) distribution_year.add_yaxis('>0.05', aggResult1['Date'].tolist(), color=Faker.rand_color()) distribution_year.add_yaxis('>0.1', aggResult2['Date'].tolist(), color=Faker.rand_color()) distribution_year.set_global_opts(title_opts=opts.TitleOpts(title="大收益率时间分布")) distribution_year.render('%s_大收益率时间分布图.html' % symbol) print('大收益率时间分布图已绘制完毕') # === 周期性画图 df['year'] = df['Date'].dt.year year_bins_no = list(df['year'].drop_duplicates()) year_bins = list(df['year'].drop_duplicates()) year_bins.append(df['year'].max() + 1) # year_bins_ = pd.to_datetime(year_bins, format='%Y') # df['date_layer'] = pd.cut(df['Date'], year_bins_) circle_line = Line() circle_line.add_xaxis(list(range(len(df[df['year']==(df['year'].min()+1)]['year'])))) for _ in year_bins_no: circle_line.add_yaxis('%s' % _, df.loc[df['year']==int('%d' % _), 'Close'], label_opts=opts.LabelOpts(is_show=False)) circle_line.set_global_opts(title_opts=opts.TitleOpts(title="每年情况")) circle_line.render('%s_年周期图.html' % symbol) print('年周期图已绘制完毕') # ===++++月收益率系列 print('\n', '------------------------', '\n') print('接下来是以月收益率计算得到的结果') df_month = df.resample(rule='1M', on='Date', label='left', closed='left').agg( {'Close': 'last', 'Open': 'first', 'High': 'max', 'Low': 'min', 'Volume': 'sum', 'Market Cap': 'last', }) df_month.reset_index(inplace=True) df_month['month_return'] = df_month['Close'].pct_change() # 计算月收益率 df_month = df_month.dropna() print('前十行数据:', '\n', df_month.head(10)) month_return = (Line() .add_xaxis(df_month['Date'].tolist()) .add_yaxis('月收益率', df_month['month_return'].tolist(), label_opts=opts.LabelOpts(is_show=False))) month_return.set_global_opts(title_opts=opts.TitleOpts(title="月收益率波动")) month_return.render('%s_月收益率波动图.html' % symbol) print('月收益率波动图已绘制完毕') # === 月均收益率与胜率 month_return_groupby_mean = (df_month['month_return'].groupby(df_month['Date'].dt.month)).mean().round(2) month_return_groupby_median = (df_month['month_return'].groupby(df_month['Date'].dt.month)).median().round(2) month_return_groupby1 = df_month[df_month['month_return']>0].groupby(df_month['Date'].dt.month)[['month_return']].size() month_return_groupby2 = df_month.groupby(df_month['Date'].dt.month)['month_return'].size() month_return_groupby_win_rate = (month_return_groupby1 / month_return_groupby2).round(2) month_return_groupby_plt1 = (Bar() .add_xaxis(month_return_groupby_mean.index.tolist()) .add_yaxis('月均收益率', month_return_groupby_mean.tolist(), stack="stack1", yaxis_index=0) .add_yaxis('月收益率中位数', month_return_groupby_median.tolist(), stack="stack1", yaxis_index=0) .extend_axis(yaxis=opts.AxisOpts()) .set_global_opts(yaxis_opts=opts.AxisOpts(type_='value')) .set_series_opts( label_opts=opts.LabelOpts( position="inside", )) ) month_return_groupby_plt2 = (Line() .add_xaxis(['1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12']) .add_yaxis('胜率', month_return_groupby_win_rate.tolist(), yaxis_index=1, linestyle_opts=opts.LineStyleOpts(width=3)) .extend_axis(yaxis=opts.AxisOpts()) ) month_return_plt = month_return_groupby_plt2.overlap(month_return_groupby_plt1) month_return_plt.render('%s_月均收益率与胜算图.html' % symbol) print('月均收益率与胜算图已绘制完毕') # === 月累计收益率 df_month['month'] = df_month['Date'].dt.month df_month1 = df_month[df_month['month'] == 1] df_month2 = df_month[df_month['month'] == 2] df_month3 = df_month[df_month['month'] == 3] df_month4 = df_month[df_month['month'] == 4] df_month5 = df_month[df_month['month'] == 5] df_month6 = df_month[df_month['month'] == 6] df_month7 = df_month[df_month['month'] == 7] df_month8 = df_month[df_month['month'] == 8] df_month9 = df_month[df_month['month'] == 9] df_month10 = df_month[df_month['month'] == 10] df_month11 = df_month[df_month['month'] == 11] df_month12 = df_month[df_month['month'] == 12] month_cumprod = Line() month_cumprod.add_xaxis(list(range(df['year'].max() - df['year'].min() + 1))) for i in [df_month1, df_month2, df_month3, df_month4, df_month5, df_month6, df_month7, df_month8, df_month9, df_month10, df_month11, df_month12]: i['equity'] = (i['month_return'] + 1).cumprod() month_cumprod.add_yaxis(str(i['month'].tolist()[1]), i['equity'].tolist(), label_opts=opts.LabelOpts(is_show=False)) month_cumprod.set_global_opts(xaxis_opts=opts.AxisOpts(is_show=False)) month_cumprod.render('%s_月累计收益率图.html' % symbol) print('月累计收益率图已绘制完毕')
auto_statistic_analysis.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.6 - AzureML # language: python # name: python3-azureml # --- # # Deploying a web service to Azure Container Instance (ACI) # # This notebook shows the steps for deploying a model as service to ACI. The workflow is similar no matter where you deploy your model: # # 1. Register the model. # 2. Prepare to deploy. (Specify assets, usage, compute target.) # 3. Deploy the model to the compute target. # 4. Test the deployed model, also called a web service. # 5. Consume the model using Power BI from azureml.core import Workspace from azureml.core.compute import AksCompute, ComputeTarget from azureml.core.webservice import Webservice, AksWebservice from azureml.core.model import Model import azureml.core print(azureml.core.VERSION) # # Get workspace # Load existing workspace from the config file info. # + from azureml.core.workspace import Workspace ws = Workspace.from_config() print(ws.name, ws.resource_group, ws.location, ws.subscription_id, sep = '\n') # - # # Get or Register the model # If not already done, register an existing trained model, add description and tags. # # This is the model you've already trained using manual training or using [Automated Machine Learning](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-create-portal-experiments). # # In the code snippet below we're using the already trained model original_model.pkl that is saved in the folder that contains this notebook. We're registering this model with the name "IBM-attrition-model". Later on we will use the same name in the scoring script. # + from azureml.core.model import Model # if the model is already registered as part of training then uncomment the line below. Make sure model is registered with the name "IBM_attrition_model" model = Model(ws, 'aml-wrkshp-classif-empl-attrition') #Register the model # # if the model is not already registered as part of training register the original_model.pkl file provided in the same folder as this notebook # model = Model.register(model_path = "original_model.pkl", # this points to a local file # model_name = "IBM_attrition_model", # this is the name the model is registered as # tags = {'area': "HR", 'type': "attrition"}, # description = "Attrition model to understand attrition risk", # workspace = ws) print('Model name: ', model.name, '\n', 'Model description: ', model.description, '\n', 'Model version: ', model.version, sep='') # - # Name of the saved model as artifact model.get_sas_urls() # # Prepare to deploy # # To deploy the model, you need the following items: # # - **An entry script**, this script accepts requests, scores the requests by using the model, and returns the results. # - **Dependencies**, like helper scripts or Python/Conda packages required to run the entry script or model. # - **The deployment configuration** for the compute target that hosts the deployed model. This configuration describes things like memory and CPU requirements needed to run the model. # ## 1. Define your entry script and dependencies # # ### Entry script # # We will first write the entry script as shown below. Note a few points in the entry script. # # The script contains two functions that load and run the model: # # **init()**: Typically, this function loads the model into a global object. This function is run only once, when the Docker container for your web service is started. # # When you register a model, you provide a model name that's used for managing the model in the registry. You use this name with the Model.get_model_path() method to retrieve the path of the model file or files on the local file system. If you register a folder or a collection of files, this API returns the path of the directory that contains those files. # # **run(input_data)**: This function uses the model to predict a value based on the input data. Inputs and outputs of the run typically use JSON for serialization and deserialization. You can also work with raw binary data. You can transform the data before sending it to the model or before returning it to the client. # + # %%writefile score.py import os import json import numpy as np import pandas as pd from sklearn.linear_model import LogisticRegression #sklearn.externals.joblib is deprecated in 0.21 and will be removed in 0.23. #Please import this functionality directly from joblib, which can be installed with: pip install joblib. #from sklearn.externals import joblib import joblib from inference_schema.schema_decorators import input_schema, output_schema from inference_schema.parameter_types.numpy_parameter_type import NumpyParameterType from inference_schema.parameter_types.pandas_parameter_type import PandasParameterType input_sample = pd.DataFrame(data=[{'Age': 41, 'BusinessTravel': 'Travel_Rarely', 'DailyRate': 1102, 'Department': 'Sales', 'DistanceFromHome': 1, 'Education': 2, 'EducationField': 'Life Sciences', 'EnvironmentSatisfaction': 2, 'Gender': 'Female', 'HourlyRate': 94, 'JobInvolvement': 3, 'JobLevel': 2, 'JobRole': 'Sales Executive', 'JobSatisfaction': 4, 'MaritalStatus': 'Single', 'MonthlyIncome': 5993, 'MonthlyRate': 19479, 'NumCompaniesWorked': 8, 'OverTime': 0, 'PercentSalaryHike': 11, 'PerformanceRating': 3, 'RelationshipSatisfaction': 1, 'StockOptionLevel': 0, 'TotalWorkingYears': 8, 'TrainingTimesLastYear': 0, 'WorkLifeBalance': 1, 'YearsAtCompany': 6, 'YearsInCurrentRole': 4, 'YearsSinceLastPromotion': 0, 'YearsWithCurrManager': 5}]) output_sample = np.array([0]) def init(): # AZUREML_MODEL_DIR is an environment variable created during deployment. Join this path with the filename of the model file. # It holds the path to the directory that contains the deployed model (./azureml-models/$MODEL_NAME/$VERSION). # If there are multiple models, this value is the path to the directory containing all deployed models (./azureml-models). global model model_path = os.getenv('AZUREML_MODEL_DIR') if (model_path is None): model_path = '.' model_path = os.path.join(model_path, 'classif-empl-attrition.pkl') print(model_path) # Deserialize the model file back into a sklearn model model = joblib.load(model_path) @input_schema('data', PandasParameterType(input_sample)) @output_schema(NumpyParameterType(output_sample)) def run(data): try: result = model.predict(data) return json.dumps({"result": result.tolist()}) except Exception as e: result = str(e) return json.dumps({"error": result}) # Test the functions if run locally if __name__ == "__main__": init() prediction = run(input_sample) print(prediction) # - # ### Automatic schema generation # To automatically generate a schema for your web service, provide a sample of the input and/or output in the constructor for one of the defined type objects. The type and sample are used to automatically create the schema. Azure Machine Learning then creates an OpenAPI (Swagger) specification for the web service during deployment. # To use schema generation, include the _inference-schema_ package in your Conda environment file. # ### Define dependencies # # The following YAML is the Conda dependencies file we will use for inference. If you want to use automatic schema generation, your entry script must import the inference-schema packages. # + # %%writefile myenv.yml name: project_environment dependencies: - python=3.6.2 - pip: - azureml-core==1.17.0 - azureml-defaults==1.17.0 - scikit-learn==0.22.2.post1 - sklearn-pandas - inference-schema[numpy-support] - pandas - numpy # + from azureml.core import Environment # Instantiate environment myenv = Environment.from_conda_specification(name = "myenv", file_path = "myenv.yml") # - # ## 2. Define your inference configuration # # The inference configuration describes how to configure the model to make predictions. This configuration isn't part of your entry script. It references your entry script and is used to locate all the resources required by the deployment. It's used later, when you deploy the model. # + from azureml.core.model import InferenceConfig inference_config = InferenceConfig(entry_script='score.py', environment=myenv) # - # ## 3. Define your deployment configuration # # Before deploying your model, you must define the deployment configuration. The deployment configuration is specific to the compute target that will host the web service. The deployment configuration isn't part of your entry script. It's used to define the characteristics of the compute target that will host the model and entry script. # + from azureml.core.webservice import AciWebservice aci_config = AciWebservice.deploy_configuration(cpu_cores=1, memory_gb=1, tags = {'area': "HR", 'type': "attrition"}, description='Explain predictions on employee attrition') # - # # ## 4. Deploy Model as Webservice on Azure Container Instance # # Deployment uses the inference configuration deployment configuration to deploy the models. The deployment process is similar regardless of the compute target. # # In summary, a deployed service is created from a model, script, and associated files. The resulting web service is a load-balanced, HTTP endpoint with a REST API. You can send data to this API and receive the prediction returned by the model. # + # Delete web service if already exists aci_webservice_name = 'predict-attrition-aci' try: service = Webservice(name=aci_webservice_name, workspace=ws) service.delete() print("The web service '", aci_webservice_name, "' has been deleted.", sep='') except Exception as e: if (e.args[0].split(':', 1)[0] == 'WebserviceNotFound'): print("The web service '", aci_webservice_name, "' doesn't exist.", sep='') # + aci_service = Model.deploy(ws, name=aci_webservice_name, models=[model], inference_config=inference_config, deployment_config=aci_config) aci_service.wait_for_deployment(True) # - print(aci_service.state) # In case of deploying error, debug using the logs print(service.get_logs()) # ## 5. Web service schema # # If you used automatic schema generation with your deployment, you can get the address of the OpenAPI specification for the service by using the swagger_uri property. (For example, print(service.swagger_uri).) Use a GET request or open the URI in a browser to retrieve the specification. print(service.swagger_uri) # ## Deploy Web Service on AKS # + from azureml.core.webservice import AksWebservice aks_target = AksCompute(ws,"inf-cluster-2") # + # Delete web service if already exists aks_webservice_name = 'predict-attrition-aks' try: service = Webservice(name=aks_webservice_name, workspace=ws) service.delete() print("The web service '", aks_webservice_name, "' has been deleted.", sep='') except Exception as e: if (e.args[0].split(':', 1)[0] == 'WebserviceNotFound'): print("The web service '", aks_webservice_name, "' doesn't exist.", sep='') # + # If deploying to a cluster configured for dev/test, ensure that it was created with enough # cores and memory to handle this deployment configuration. Note that memory is also used by # things such as dependencies and AML components. aks_config = AksWebservice.deploy_configuration(cpu_cores = 1, memory_gb = 1) aks_service = Model.deploy(ws, name=aks_webservice_name, models=[model], inference_config=inference_config, deployment_config=aks_config, deployment_target=aks_target) aks_service.wait_for_deployment(show_output = True) # - print(aks_service.state) # In case of deploying error, debug using the logs print(aks_service.get_logs()) # # Test the deployed model # # Every deployed web service provides a REST API, so you can create client applications in a variety of programming languages. If you've enabled key authentication for your service, you need to provide a service key as a token in your request header. If you've enabled token authentication for your service, you need to provide an Azure Machine Learning JWT token as a bearer token in your request header. # + import json import pandas as pd # the sample below contains the data for an employee that is not an attrition risk sample = pd.DataFrame(data=[{'Age': 49, 'BusinessTravel': 'Travel_Rarely', 'DailyRate': 1098, 'Department': 'Research & Development', 'DistanceFromHome': 4, 'Education': 2, 'EducationField': 'Medical', 'EnvironmentSatisfaction': 4, 'Gender': 'Female', 'HourlyRate': 21, 'JobInvolvement': 3, 'JobLevel': 2, 'JobRole': 'Laboratory Technician', 'JobSatisfaction': 3, 'MaritalStatus': 'Single', 'MonthlyIncome': 711, 'MonthlyRate': 2124, 'NumCompaniesWorked': 8, 'OverTime': 1, 'PercentSalaryHike': 8, 'PerformanceRating': 4, 'RelationshipSatisfaction': 3, 'StockOptionLevel': 0, 'TotalWorkingYears': 2, 'TrainingTimesLastYear': 0, 'WorkLifeBalance': 3, 'YearsAtCompany': 2, 'YearsInCurrentRole': 1, 'YearsSinceLastPromotion': 0, 'YearsWithCurrManager': 1}]) # the sample below contains the data for an employee that is an attrition risk # sample = pd.DataFrame(data=[{'Age': 49, 'BusinessTravel': 'Travel_Rarely', 'DailyRate': 1098, 'Department': 'Research & Development', 'DistanceFromHome': 4, 'Education': 2, 'EducationField': 'Medical', 'EnvironmentSatisfaction': 4, 'Gender': 'Female', 'HourlyRate': 21, 'JobInvolvement': 3, 'JobLevel': 2, 'JobRole': 'Laboratory Technician', 'JobSatisfaction': 3, 'MaritalStatus': 'Single', 'MonthlyIncome': 711, 'MonthlyRate': 2124, 'NumCompaniesWorked': 8, 'OverTime': 'Yes', 'PercentSalaryHike': 8, 'PerformanceRating': 4, 'RelationshipSatisfaction': 3, 'StockOptionLevel': 0, 'TotalWorkingYears': 2, 'TrainingTimesLastYear': 0, 'WorkLifeBalance': 3, 'YearsAtCompany': 2, 'YearsInCurrentRole': 1, 'YearsSinceLastPromotion': 0, 'YearsWithCurrManager': 1}]) # converts the sample to JSON string sample = pd.DataFrame.to_json(sample) # deserializes sample to a python object sample = json.loads(sample) # serializes sample to JSON formatted string as expected by the scoring script sample = json.dumps({"data":sample}) # + aci_webservice_name = 'predict-attrition-aci' aci_service = Webservice(name=aci_webservice_name, workspace=ws) aks_webservice_name = 'predict-attrition-aks' aks_service = Webservice(name=aks_webservice_name, workspace=ws) # + # Get predictions from ACI webservice prediction_from_aci = aci_service.run(sample) print(prediction_from_aci) # + # Get predictions from AKS webservice prediction_from_aks = aks_service.run(sample) print(prediction_from_aks) # - # # Consume the model using Power BI # You can also consume the model from Power BI. See details [here](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-consume-web-service#consume-the-service-from-power-bi). #
02-training_and_inference/02.4-model_deployment/01-deploying_a_web_service_to_azure_container_instance.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Pandas groupby # # Let's examine a dataframe # + import pandas as pd import numpy as np values = np.array([1, 3, 2, 4, 1, 6, 4]) example_df = pd.DataFrame({ 'value': values, 'even': values % 2 == 0, 'above_three': values > 3 }, index=['a', 'b', 'c', 'd', 'e', 'f', 'g']) print(example_df) # - # ### Group by one variable # # We see that `even` is a variable. Let's group the dataframe using the values of the `even` variable. grouped_data = example_df.groupby('even') print('\n') print(grouped_data.groups) print(' \nABOVE THREE') print(grouped_data.count()['above_three']) print('\n') # ### Group by multiple variables # # grouped_data = example_df.groupby(['even','above_three']) print('\n') print(grouped_data.groups) print('\n') # ### Get sum of each group grouped_data = example_df.groupby('even') print(grouped_data.sum()) # ### Limit columns in result # + grouped_data = example_df.groupby('even') # You can take one or more columns from the result DataFrame print grouped_data.sum()['value'] print '\n' # Blank line to separate results # You can also take a subset of columns from the grouped data before # collapsing to a DataFrame. In this case, the result is the same. print grouped_data['value'].sum() # - # ### Subway data grouped by station # # We group the subway data into stations and get the mean ridership for each station. # + import os basepath = os.path.abspath('16_Pandas_groupby.py') dname = os.path.dirname(basepath) os.chdir(dname) # Declare the data file path if os.environ['COMPUTERNAME'] == 'JDAZO': filepath = os.path.normpath(os.path.join(basepath,'..\\..\\..\\data\\' \ 'nyc_subway_weather.csv')) elif os.environ['COMPUTERNAME'] == 'MELLOYELLO': filepath = os.path.normpath(os.path.join(basepath,'..\\..\\..\\data\\' \ 'nyc_subway_weather.csv')) # Load the subway dataset subway_df = pd.read_csv(filepath) # Glimpse at the top few records print(subway_df.head()) # - # The __`UNIT`__ column seems to contain the station ID, so we'll group by the `UNIT` variable. The **`ENTRIESn_hourly`** variable seems to hold the ridership values. Show the topmost ten stations and their average hourly ridership. stations = subway_df.groupby('UNIT') station_ridership = stations['ENTRIESn_hourly'].mean() print(station_ridership.head(n=5)) # ### Subway ridership by day of the week subway_df.head() subway_df.groupby('day_week') subway_df.groupby('day_week').mean() subway_df.groupby('day_week').mean()['ENTRIESn_hourly'] ridership_by_day = subway_df.groupby('day_week').mean()['ENTRIESn_hourly'] # Let's create a plot to show ridership by day of week # + print('\n') # %pylab inline import seaborn as sns ridership_by_day.plot() print('\n') # - # Ridership is significantly lower in days 5 and 6 (Saturday and Sunday) than during the rest of the week. #
Gapminder2D/Gapminder2D/Pandas GroupBy.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/PietrH/common_wikidata_props/blob/main/get_wikidata_botanical_collector_properties.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="s6s_F8rgBlDA" # # Counting the most common claims for an occupation on WikiData # + id="Cz2cHfxX6HRZ" colab={"base_uri": "https://localhost:8080/"} outputId="b1dcdffd-e459-49bb-8fc5-ef7ecc26616a" # mount google drive to store data to from google.colab import drive drive.mount('/gdrive') # + colab={"base_uri": "https://localhost:8080/"} id="oTHM-rMM1RAh" outputId="766db5b6-4ec8-4e27-9079-c51a06bfe51f" # !pip install sparqlwrapper # !pip install --upgrade plotly # # !pip install wikidata # https://rdflib.github.io/sparqlwrapper/ # + id="aoR2NyYPfUgt" import requests import re from collections import Counter import itertools as it import pandas as pd import sys from SPARQLWrapper import SPARQLWrapper, JSON import plotly.express as px # + id="gb7AaCbJfmdT" #@title Number of objects to retreive #@markdown Howmany objects should be queried from Wikidata? What is the limit? limit = 200000 #@param {type: "number"} #@markdown Select checkbox if only doing a quick test run: quick_test_run = False #@param {type: "boolean"} #@markdown Select checkbox to remove reference claims from the count: remove_references = True #@param {type: "boolean"} #@markdown --- if(quick_test_run): limit = 300 # + id="7CnDkiyw1GHk" endpoint_url = "https://query.wikidata.org/sparql" # botanical collector #query = """SELECT DISTINCT ?item ?itemLabel WHERE { # SERVICE wikibase:label { bd:serviceParam wikibase:language "[AUTO_LANGUAGE]". } # { # SELECT DISTINCT ?item ?property ?propertyLabel WHERE { # # ?item p:P106 ?statement0. # # ?statement0 (ps:P106/(wdt:P279*)) wd:Q2083925. # # } # LIMIT 100000 # } #}""" # naturalist #query = """SELECT DISTINCT ?item ?itemLabel WHERE { # SERVICE wikibase:label { bd:serviceParam wikibase:language "[AUTO_LANGUAGE]". } # { # SELECT DISTINCT ?item ?property ?propertyLabel WHERE { # # ?item p:P106 ?statement0. # # ?statement0 (ps:P106/(wdt:P279*)) wd:Q18805. # # } # LIMIT 100000 # } #}""" # biologist query = """SELECT DISTINCT ?item ?itemLabel WHERE { SERVICE wikibase:label { bd:serviceParam wikibase:language "[AUTO_LANGUAGE]". } { SELECT DISTINCT ?item ?property ?propertyLabel WHERE { ?item p:P106 ?statement0. ?statement0 (ps:P106/(wdt:P279*)) wd:Q864503. } LIMIT %s } }""" % (limit) def get_results(endpoint_url, query): user_agent = "get-botcol-props Python/%s.%s" % (sys.version_info[0], sys.version_info[1]) # TODO adjust user agent; see https://w.wiki/CX6 sparql = SPARQLWrapper(endpoint_url, agent=user_agent) sparql.setQuery(query) sparql.setReturnFormat(JSON) return sparql.query().convert() results = get_results(endpoint_url, query) #for result in results["results"]["bindings"]: # print(result) # + [markdown] id="Yg19skEWc8Ft" # Number of entities found: # + colab={"base_uri": "https://localhost:8080/"} id="WKxF9bha7wqy" outputId="a36a1b53-c506-40a9-f377-cd2a60b8d43f" n_matches = len(results['results']['bindings']) n_matches # + [markdown] id="PwGCqcYmUFZo" # Let's use the wbgetentities api to get property information, 50 properties and all statements at a time (so one request per 50 properties) # + colab={"base_uri": "https://localhost:8080/"} id="uecsMvTP8x2V" outputId="8aa3b616-c33c-4253-f0f3-9846ca157274" def grouper(n,iterable): iterable = iter(iterable) return iter(lambda:list(it.islice(iterable,n)), []) q_nums = [] for result in results['results']['bindings']: q_nums.append(result['itemLabel']['value']) out = [] for q_num_group in grouper(50,q_nums): #q_num = result['itemLabel']['value'] #reply = requests.get(f"https://www.wikidata.org/w/api.php?action=wbgetentities&format=json&ids={q_num}") #json.loads(reply.json()) reply = requests.get(f"https://www.wikidata.org/w/api.php?action=wbgetentities&format=json&ids={'|'.join(q_num_group)}") out.append(re.findall('(?<=property\":\")P[0-9]+',reply.text)) c = Counter([item for sublist in out for item in sublist]) n_prop = len(set([item for sublist in out for item in sublist])) #c.most_common(n_prop) c.most_common(10) # + id="eDi0tmI8UYJQ" def get_datatype_json(json): return json["datatype"] def detect_reference_prop(json): return str(json['claims']['P31']).find('Q18608359') > 0 def get_label_json(json): return json['labels']['en']['value'] df = pd.DataFrame(c.most_common(n_prop),columns=['Property','n']) out = {} for p_num_group in grouper(50,list(df.Property.values)): reply = requests.get(f"https://www.wikidata.org/w/api.php?action=wbgetentities&format=json&ids={'|'.join(p_num_group)}") out.update(reply.json()['entities']) df['json'] = df['Property'].map(out) df['Label'] = df['json'].map(get_label_json) df['datatype'] = df['json'].map(get_datatype_json) df['reference_property'] = df['json'].map(detect_reference_prop) # + id="kg_PPkyaAv8v" if(remove_references): df = df[~df['reference_property']] # + id="3_enlenbmTsZ" # %load_ext google.colab.data_table # + colab={"base_uri": "https://localhost:8080/", "height": 632} id="XMOqSvnFmPG7" outputId="da733045-dab3-41c1-b4f0-3db546f491b6" df.drop('json', axis = 1) # + [markdown] id="HxLZjMl8uPoN" # Some properties can occur more than once on an entity, especially claims regarding references; Because I think it's valuable to see what claims are added to references, I included these in the tally # + id="v5aTd-6Atpob" # %unload_ext google.colab.data_table # + [markdown] id="ztqg70Tp5qhE" # write output to csv and download # # + id="YrTxkRrr5tBb" #from os.path import exists #from google.colab import files #if(exists("/gdrive/MyDrive/biologist_properties_142901.csv")): # df = pd.read_csv("/gdrive/MyDrive/biologist_properties_142901.csv") #else: # df.to_csv("/gdrive/MyDrive/biologist_properties_142901.csv") # files.download("/gdrive/MyDrive/biologist_properties_142901.csv") # + colab={"base_uri": "https://localhost:8080/", "height": 542} id="MV9W27V12TcV" outputId="3bd6823a-8041-4b43-df8c-7d978bb8de84" fig = px.treemap(df, path=['datatype', 'Label'], values='n', title = "All claims for all Biologists and subclasses on Wikidata") fig.update_traces(root_color="lightgrey") fig.update_layout(margin = dict(t=50, l=25, r=25, b=25)) fig.show() # + id="PL01UoRpC5bB" fig.write_html("biologist_142k_no-ref.html") # + colab={"base_uri": "https://localhost:8080/", "height": 542} id="oTS3l9SnDx7f" outputId="d834b1a4-37b5-41e1-f7ac-9fa091f3391b" # Limit ourselves to statements that occur a certain amount #df2 = df[df['n'] > 200] df2 = df.head(50) fig2 = px.treemap(df2, path=['datatype', 'Label'], values='n', title = "50 most common claims for all Biologists and subclasses on Wikidata") fig2.update_traces(root_color="lightgrey") fig2.update_layout(margin = dict(t=50, l=25, r=25, b=25)) fig2.show() # + id="ViC7Sq4qTJTT" fig2.write_html("biologist_142k_no-ref_top50.html") # + [markdown] id="4DvMHfXikaMX" # ## Just claims and identifiers seperatly # Split to just claims and identifiers, show only top 10 of each. Mention that the occurrence is a required field because that's how we built the query. # + colab={"base_uri": "https://localhost:8080/", "height": 424} id="BvESYYyMkgVb" outputId="aacb111c-9dfc-40b0-ca84-08e1330c8551" df_simple = df[(df['datatype']!="external-id") & (df['datatype']!='Wikibase-item')] df_simple # + colab={"base_uri": "https://localhost:8080/", "height": 542} id="DWZklIn1XIGa" outputId="e70acf87-ecad-469f-ada0-a13cdd1cc4cd" no_ids = df[(df['datatype']!="external-id")] fig5 = px.treemap(no_ids, path=['Label'], values='n',title = "Claims made about Biologists and subclasses of biologists on wikidata") fig5.update_traces(root_color="lightgrey") fig5.update_layout(margin = dict(t=50, l=25, r=25, b=25)) fig5.show() # + id="su0peOlkYu1P" fig5.write_html("biologist_142k_no-ref_claims.html") # + colab={"base_uri": "https://localhost:8080/", "height": 542} id="VQExqYKzXgJi" outputId="1d321a92-dd23-4f56-f478-87ec55f2b077" no_ids_top15 = df[(df['datatype']!="external-id")].head(15) fig6 = px.treemap(no_ids_top15, path=['Label'], values='n',title = "15 most common claims made about Biologists and subclasses of biologists on wikidata") fig6.update_traces(root_color="lightgrey") fig6.update_layout(margin = dict(t=50, l=25, r=25, b=25)) fig6.show() # + id="4KXszV2RYn-5" fig6.write_html("biologist_142k_no-ref_claims_top15.html") # + id="9SINmdqmZvkM" no_ids.drop(['json','datatype','reference_property'], axis = 1).to_csv("biologist_142k_claims_tally.csv") #no_ids.head(1000).to_csv("biologist_142k_claims_tally_top1000.csv") # + [markdown] id="oon34UM_VzcS" # ## Identifiers only # + colab={"base_uri": "https://localhost:8080/", "height": 424} id="gwVE5UEtRZuJ" outputId="71c21308-7045-4d4a-fc44-754bf29f5cce" # just identifiers only_identifiers = df[(df['datatype']=="external-id")].drop(['json','datatype','reference_property'], axis = 1) only_identifiers.to_csv("biologist_142k_identifiers_tally.csv") only_identifiers # + [markdown] id="D0LQrmeMDmEQ" # Place all the claims except external id's in the same category, show the most common ones (10) in a treemap. # + colab={"base_uri": "https://localhost:8080/", "height": 542} id="dxMiS3ihDrpB" outputId="201641ec-300b-415e-bccc-cfa9300342e0" fig3 = px.treemap(only_identifiers,path=['Label'],values = 'n',title = 'Most commonly used identifiers for Biologists and subclasses of biologists on Wikidata') fig3.update_traces(root_color="lightgrey") fig3.update_layout(margin = dict(t=50, l=25, r=25, b=25)) fig3.show() # + id="Tv57MrAdUPya" fig3.write_html("biologist_142k_no-ref_only_id.html") # + colab={"base_uri": "https://localhost:8080/", "height": 817} id="86lly8BtV4aZ" outputId="898bd20b-0122-43c8-cf30-a14a2bf25eca" identifiers_top_20 = df[(df['datatype']=="external-id")].drop(['json','datatype','reference_property'], axis = 1).head(20) fig4 = px.treemap(identifiers_top_20,path=['Label'],values = 'n',title = 'The 20 most common identifiers for Biologists and subclasses of biologists on Wikidata', width = 800, height = 800) fig4.update_traces(root_color="lightgrey") fig4.update_layout(margin = dict(t=50, l=25, r=25, b=25)) fig4.show() # + id="THPd73UQWxYY" fig4.write_html("biologist_142k_top20_ids.html") # + [markdown] id="w9_LZ9w5DslD" # ## Identifiers only # # A closer look at the external identifiers only, a treemap and a table with all of the identifiers sorted by how often they are used. Also one with just the top 20. # # # # # + id="dP0KYsHoD_CW"
get_wikidata_botanical_collector_properties.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="v-4Xelj3ijPv" # # Name : <NAME> # # Student ID : 160041004 # # Course : CSE 4836 - Pattern Recognition Lab # # Lab : 03 # # Task : 02 # # Topic : Building a Convolutional Neural Network model # + id="zbrFZABgihCn" executionInfo={"status": "ok", "timestamp": 1608287391531, "user_tz": -360, "elapsed": 959, "user": {"displayName": "msi1427", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgZyvIsxXEp7PtiWLY2iJomz1wvULfSzviw2HTRaw=s64", "userId": "01187658583479366356"}} # %reload_ext autoreload # %autoreload 2 # %matplotlib inline bs = 16 # + id="NI7O5zj-iyB4" executionInfo={"status": "ok", "timestamp": 1608287392967, "user_tz": -360, "elapsed": 1050, "user": {"displayName": "msi1427", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgZyvIsxXEp7PtiWLY2iJomz1wvULfSzviw2HTRaw=s64", "userId": "01187658583479366356"}} import numpy as np import torch from torch import nn from torch import optim from torchvision import datasets, transforms from torch.utils.data import random_split, DataLoader import matplotlib.pyplot as plt # + colab={"base_uri": "https://localhost:8080/"} id="JIDaTsnk-KK2" executionInfo={"status": "ok", "timestamp": 1608287395936, "user_tz": -360, "elapsed": 2569, "user": {"displayName": "msi1427", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgZyvIsxXEp7PtiWLY2iJomz1wvULfSzviw2HTRaw=s64", "userId": "01187658583479366356"}} outputId="a331f5a4-b6ac-45d2-ad43-7c48b1d9981c" # Fetch data train_data = datasets.CIFAR10('data',train=True,download=True,transform=transforms.ToTensor()) test_data = datasets.CIFAR10('data',train=False,download=True,transform=transforms.ToTensor()) # + [markdown] id="XmpOfQnB_sfu" # # Data Visualization # + colab={"base_uri": "https://localhost:8080/", "height": 325} id="kkP4LmZ1_oit" executionInfo={"status": "ok", "timestamp": 1608287401703, "user_tz": -360, "elapsed": 5555, "user": {"displayName": "msi1427", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgZyvIsxXEp7PtiWLY2iJomz1wvULfSzviw2HTRaw=s64", "userId": "01187658583479366356"}} outputId="7d3e5dcf-a3f3-4b61-cda0-3f3f84b56e3b" # Train Data Distribution import seaborn as sns class_names = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck'] labels = list() for i in range(len(train_data)) : labels.append(train_data[i][1]) labels = torch.tensor(labels) cnt = torch.bincount(labels).numpy() ax = sns.barplot(x = class_names, y = cnt) ax.set_xlabel('Class Names',color="white") ax.set_ylabel('Train Data Distribution',color="white") ax.set_xticklabels(ax.get_xticklabels(), rotation=90,color="white"); # + colab={"base_uri": "https://localhost:8080/", "height": 325} id="l_nyUIu_AF2s" executionInfo={"status": "ok", "timestamp": 1608287402888, "user_tz": -360, "elapsed": 4186, "user": {"displayName": "msi1427", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgZyvIsxXEp7PtiWLY2iJomz1wvULfSzviw2HTRaw=s64", "userId": "01187658583479366356"}} outputId="b7551b2e-eaae-4222-c206-af8b4b36f6c1" # Test Data Distribution labels = list() for i in range(len(test_data)) : labels.append(test_data[i][1]) labels = torch.tensor(labels) cnt = torch.bincount(labels).numpy() ax = sns.barplot(x = class_names, y = cnt) ax.set_xlabel('Class Names',color="white") ax.set_ylabel('Test Data Distribution',color="white") ax.set_xticklabels(ax.get_xticklabels(), rotation=90,color="white"); # + [markdown] id="6mTKYVREAenj" # # Train - Test Load # + id="rj__tioCAVOT" executionInfo={"status": "ok", "timestamp": 1608287402889, "user_tz": -360, "elapsed": 2716, "user": {"displayName": "msi1427", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgZyvIsxXEp7PtiWLY2iJomz1wvULfSzviw2HTRaw=s64", "userId": "01187658583479366356"}} train_loader = DataLoader(train_data, batch_size=bs) test_loader = DataLoader(test_data, batch_size=bs) # + [markdown] id="Lcgi90XuAu7c" # # Model Architecture [ Intial ] # + id="8GBJgBl-BUVD" '''' model = models.Sequential() model.add(layers.Conv2D(32, (3, 3), activation='relu', input_shape=(32, 32, 3))) model.add(layers.MaxPooling2D((2, 2))) model.add(layers.Conv2D(64, (3, 3), activation='relu')) model.add(layers.MaxPooling2D((2, 2))) model.add(layers.Conv2D(64, (3, 3), activation='relu')) model.add(layers.Flatten()) model.add(layers.Dense(64, activation='relu')) model.add(layers.Dense(10)) '''' # + id="sRsIUA_C_0aU" executionInfo={"status": "ok", "timestamp": 1608290451338, "user_tz": -360, "elapsed": 1930, "user": {"displayName": "msi1427", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgZyvIsxXEp7PtiWLY2iJomz1wvULfSzviw2HTRaw=s64", "userId": "01187658583479366356"}} class ConvNetClassifer1(nn.Module): def __init__(self): super().__init__() # Conv Layers self.conv1 = nn.Conv2d(3, 32, 3) self.conv2 = nn.Conv2d(32, 64, 3) self.conv3 = nn.Conv2d(64, 64, 3) # Pooling Layers self.pool = nn.MaxPool2d(2,2) # FC Layers self.l1 = nn.Linear(64*4*4,64) self.l2 = nn.Linear(64,10) def forward(self, x): c1 = nn.functional.relu(self.conv1(x)) c1 = self.pool(c1) c2 = nn.functional.relu(self.conv2(c1)) c2 = self.pool(c2) c3 = nn.functional.relu(self.conv3(c2)) c3 = c3.view(-1,64*4*4) # Flatten h1 = nn.functional.relu(self.l1(c3)) logits = self.l2(h1) return logits model1 = ConvNetClassifer1().cuda() # + [markdown] id="SOO-tmDSJPx3" # # Model Architecture [ Changed ] # + [markdown] id="JGV2m7sbX1Ou" # Removed a conv layer and added a linear layer # + id="A5TJSjwEJOmf" executionInfo={"status": "ok", "timestamp": 1608290440395, "user_tz": -360, "elapsed": 2344, "user": {"displayName": "msi1427", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgZyvIsxXEp7PtiWLY2iJomz1wvULfSzviw2HTRaw=s64", "userId": "01187658583479366356"}} class ConvNetClassifer2(nn.Module): def __init__(self): super().__init__() # Conv Layers self.conv1 = nn.Conv2d(3, 6, 5) # in_channel, out_channel, kernel_size self.conv2 = nn.Conv2d(6, 16, 5) # Pooling Layers self.pool = nn.MaxPool2d(2,2) # FC layers self.l1 = nn.Linear(16*5*5,120) self.l2 = nn.Linear(120,64) self.l3 = nn.Linear(64,10) def forward(self, x): c1 = nn.functional.relu(self.conv1(x)) c1 = self.pool(c1) c2 = nn.functional.relu(self.conv2(c1)) c2 = self.pool(c2) c2 = c2.view(-1, 16*5*5) # Flatten h1 = nn.functional.relu(self.l1(c2)) h2 = nn.functional.relu(self.l2(h1)) logits = self.l3(h2) return logits model2 = ConvNetClassifer2().cuda() # + [markdown] id="XbiMbhMOO26P" # # Performance on Initial Model # + id="WrpQ9ckiJGge" executionInfo={"status": "ok", "timestamp": 1608287920419, "user_tz": -360, "elapsed": 975, "user": {"displayName": "msi1427", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgZyvIsxXEp7PtiWLY2iJomz1wvULfSzviw2HTRaw=s64", "userId": "01187658583479366356"}} optimizer = optim.Adam(model1.parameters(), lr = 1e-3) loss = nn.CrossEntropyLoss() # + colab={"base_uri": "https://localhost:8080/"} id="aIDeI6PDP-wj" executionInfo={"status": "ok", "timestamp": 1608287923658, "user_tz": -360, "elapsed": 955, "user": {"displayName": "msi1427", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgZyvIsxXEp7PtiWLY2iJomz1wvULfSzviw2HTRaw=s64", "userId": "01187658583479366356"}} outputId="b48635b0-319d-4fae-e4d5-e7bf558ea59c" model1 # + id="ig1eUrnBO2XM" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1608288013216, "user_tz": -360, "elapsed": 87396, "user": {"displayName": "msi1427", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgZyvIsxXEp7PtiWLY2iJomz1wvULfSzviw2HTRaw=s64", "userId": "01187658583479366356"}} outputId="467e8afd-b6b3-48eb-c6b0-7b4a306624b8" nb_epochs = 5 for epoch in range(nb_epochs): losses = list() model1.train() accuracies = list() for batch in train_loader: x,y = batch logit = model1(x.cuda()) # Computing Loss J = loss(logit,y.cuda()) # Cleaning the gradient model1.zero_grad() # Accumulating the partial derivatives of J wrt params J.backward() # Step in the opposite direction of the gradient optimizer.step() losses.append(J.item()) accuracies.append(y.eq(logit.detach().argmax(dim=1).cpu()).float().mean()) print(f'Epoch {epoch+1}',end=', ' ) print(f'train loss: {torch.tensor(losses).mean():.2f}',end=', ') print(f'train accuracy: {torch.tensor(accuracies).mean():.2f}') # Evaluation losses = list() accuracies = list() model.eval() for batch in test_loader: x,y = batch # Forward prop to calculate loss with torch.no_grad(): logit = model1(x.cuda()) # Computing Loss J = loss(logit,y.cuda()) losses.append(J.item()) accuracies.append(y.eq(logit.detach().argmax(dim=1).cpu()).float().mean()) print(f'Epoch {epoch+1}',end=', ' ) print(f'test loss: {torch.tensor(losses).mean():.2f}',end=', ') print(f'test accuracy: {torch.tensor(accuracies).mean():.2f}') # + [markdown] id="9zPkiIrrO6m8" # # Performance on Changed Model # + id="E_TLQmoHbb7D" executionInfo={"status": "ok", "timestamp": 1608288041557, "user_tz": -360, "elapsed": 1088, "user": {"displayName": "msi1427", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgZyvIsxXEp7PtiWLY2iJomz1wvULfSzviw2HTRaw=s64", "userId": "01187658583479366356"}} optimizer = optim.Adam(model2.parameters(), lr = 1e-3) loss = nn.CrossEntropyLoss() # + id="SbeEzzqoXGAt" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1608288042753, "user_tz": -360, "elapsed": 900, "user": {"displayName": "msi1427", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgZyvIsxXEp7PtiWLY2iJomz1wvULfSzviw2HTRaw=s64", "userId": "01187658583479366356"}} outputId="687a163f-4f30-4fe8-d356-f2880e00cf20" model2 # + id="BbIqyD8qO9FE" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1608288128357, "user_tz": -360, "elapsed": 85041, "user": {"displayName": "msi1427", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgZyvIsxXEp7PtiWLY2iJomz1wvULfSzviw2HTRaw=s64", "userId": "01187658583479366356"}} outputId="6ab385ec-8758-4bc3-8081-84b3e1c4b65e" nb_epochs = 5 for epoch in range(nb_epochs): losses = list() model2.train() accuracies = list() for batch in train_loader: x,y = batch logit = model2(x.cuda()) # Computing Loss J = loss(logit,y.cuda()) # Cleaning the gradient model2.zero_grad() # Accumulating the partial derivatives of J wrt params J.backward() # Step in the opposite direction of the gradient optimizer.step() losses.append(J.item()) accuracies.append(y.eq(logit.detach().argmax(dim=1).cpu()).float().mean()) print(f'Epoch {epoch+1}',end=', ' ) print(f'train loss: {torch.tensor(losses).mean():.2f}',end=', ') print(f'train accuracy: {torch.tensor(accuracies).mean():.2f}') # Evaluation losses = list() accuracies = list() model2.eval() for batch in test_loader: x,y = batch # Forward prop to calculate loss with torch.no_grad(): logit = model2(x.cuda()) # Computing Loss J = loss(logit,y.cuda()) losses.append(J.item()) accuracies.append(y.eq(logit.detach().argmax(dim=1).cpu()).float().mean()) print(f'Epoch {epoch+1}',end=', ' ) print(f'test loss: {torch.tensor(losses).mean():.2f}',end=', ') print(f'test accuracy: {torch.tensor(accuracies).mean():.2f}') # + [markdown] id="SfFA4iAEZn8M" # # Train your initial model for 10,30,50,100 epochs. Plot the train-accuracy, test-accuracy in a graph and make comments on the performance of your model. # + id="gCg_ZNWkcQcT" executionInfo={"status": "ok", "timestamp": 1608290469940, "user_tz": -360, "elapsed": 1785, "user": {"displayName": "msi1427", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgZyvIsxXEp7PtiWLY2iJomz1wvULfSzviw2HTRaw=s64", "userId": "01187658583479366356"}} optimizer = optim.Adam(model1.parameters(), lr = 1e-3) loss = nn.CrossEntropyLoss() # + colab={"base_uri": "https://localhost:8080/"} id="EY_K6GaHcFVE" executionInfo={"status": "ok", "timestamp": 1608292195317, "user_tz": -360, "elapsed": 1725030, "user": {"displayName": "msi1427", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgZyvIsxXEp7PtiWLY2iJomz1wvULfSzviw2HTRaw=s64", "userId": "01187658583479366356"}} outputId="3c9d8163-0f0e-4740-bd32-d30e090729bb" nb_epochs = 100 epoch_list=[10,30,50,100] train_acc=list() test_acc=list() for epoch in range(nb_epochs): losses = list() model1.train() accuracies = list() for batch in train_loader: x,y = batch # Forward propagation logit = model1(x.cuda()) # Computing Loss J = loss(logit,y.cuda()) # Cleaning the gradient model1.zero_grad() # Accumulating the partial derivatives of J wrt params J.backward() # Step in the opposite direction of the gradient optimizer.step() losses.append(J.item()) accuracies.append(y.eq(logit.detach().argmax(dim=1).cpu()).float().mean()) print(f'Epoch {epoch+1}',end=', ' ) print(f'train loss: {torch.tensor(losses).mean():.2f}',end=', ') print(f'train accuracy: {torch.tensor(accuracies).mean():.2f}') if epoch+1 in epoch_list : acc = torch.tensor(accuracies).mean() train_acc.append(acc.numpy()) # Evaluation losses = list() accuracies = list() model1.eval() for batch in test_loader: x,y = batch # Forward prop to calculate loss with torch.no_grad(): logit = model1(x.cuda()) # Computing Loss J = loss(logit,y.cuda()) losses.append(J.item()) accuracies.append(y.eq(logit.detach().argmax(dim=1).cpu()).float().mean()) print(f'Epoch {epoch+1}',end=', ' ) print(f'test loss: {torch.tensor(losses).mean():.2f}',end=', ') print(f'test accuracy: {torch.tensor(accuracies).mean():.2f}') if epoch+1 in epoch_list : acc = torch.tensor(accuracies).mean() test_acc.append(acc.numpy()) # + colab={"base_uri": "https://localhost:8080/"} id="-jGIZ9c2cWXl" executionInfo={"status": "ok", "timestamp": 1608292195320, "user_tz": -360, "elapsed": 1711784, "user": {"displayName": "msi1427", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgZyvIsxXEp7PtiWLY2iJomz1wvULfSzviw2HTRaw=s64", "userId": "01187658583479366356"}} outputId="9b32ea74-893e-4274-8423-536f577d69d7" train_acc,test_acc # + colab={"base_uri": "https://localhost:8080/", "height": 295} id="vBSNsHchcbAE" executionInfo={"status": "ok", "timestamp": 1608292195322, "user_tz": -360, "elapsed": 1710697, "user": {"displayName": "msi1427", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgZyvIsxXEp7PtiWLY2iJomz1wvULfSzviw2HTRaw=s64", "userId": "01187658583479366356"}} outputId="f567783d-09fd-4fb2-f82f-bd7d6c17814f" plt.plot(epoch_list, train_acc,label='train_acc') plt.plot(epoch_list, test_acc,label='test_acc') plt.title("Accuracy comparison on each epoch",color="white") plt.xlabel("Epochs",color="white") plt.ylabel("Accuracy",color="white") plt.legend(loc="upper left") plt.show() # + [markdown] id="PUuIcdaCciLE" # # Confusion matrix for result up to 10 epochs # + id="mRRH0kOSdQJr" executionInfo={"status": "ok", "timestamp": 1608292196535, "user_tz": -360, "elapsed": 1185, "user": {"displayName": "msi1427", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgZyvIsxXEp7PtiWLY2iJomz1wvULfSzviw2HTRaw=s64", "userId": "01187658583479366356"}} model1 = ConvNetClassifer1().cuda() optimizer = optim.Adam(model1.parameters(), lr = 1e-3) loss = nn.CrossEntropyLoss() # + colab={"base_uri": "https://localhost:8080/"} id="ylKkovrocbhT" executionInfo={"status": "ok", "timestamp": 1608292371767, "user_tz": -360, "elapsed": 176401, "user": {"displayName": "msi1427", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgZyvIsxXEp7PtiWLY2iJomz1wvULfSzviw2HTRaw=s64", "userId": "01187658583479366356"}} outputId="cb1369e0-0215-427e-cc89-7726d692ad88" nb_epochs = 10 for epoch in range(nb_epochs): losses = list() model1.train() accuracies = list() for batch in train_loader: x,y = batch logit = model1(x.cuda()) # Computing Loss J = loss(logit,y.cuda()) # Cleaning the gradient model1.zero_grad() # Accumulating the partial derivatives of J wrt params J.backward() # Step in the opposite direction of the gradient optimizer.step() losses.append(J.item()) accuracies.append(y.eq(logit.detach().argmax(dim=1).cpu()).float().mean()) print(f'Epoch {epoch+1}',end=', ' ) print(f'train loss: {torch.tensor(losses).mean():.2f}',end=', ') print(f'train accuracy: {torch.tensor(accuracies).mean():.2f}') # Evaluation losses = list() accuracies = list() model.eval() for batch in test_loader: x,y = batch # Forward prop to calculate loss with torch.no_grad(): logit = model1(x.cuda()) # Computing Loss J = loss(logit,y.cuda()) losses.append(J.item()) accuracies.append(y.eq(logit.detach().argmax(dim=1).cpu()).float().mean()) print(f'Epoch {epoch+1}',end=', ' ) print(f'test loss: {torch.tensor(losses).mean():.2f}',end=', ') print(f'test accuracy: {torch.tensor(accuracies).mean():.2f}') # + [markdown] id="kGurJfDcdK_k" # # Predictions # + id="GndLHgajdKb0" executionInfo={"status": "ok", "timestamp": 1608292514344, "user_tz": -360, "elapsed": 2921, "user": {"displayName": "msi1427", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgZyvIsxXEp7PtiWLY2iJomz1wvULfSzviw2HTRaw=s64", "userId": "01187658583479366356"}} model1.eval() actual = list() predicted = list() for batch in test_loader: x,y = batch # Forward prop to calculate loss with torch.no_grad(): logit = model1(x.cuda()) preds = logit.detach().argmax(dim=1).cpu() for p in preds : predicted.append(p) actuals = y for a in actuals : actual.append(a) actual = torch.tensor(actual) predicted = torch.tensor(predicted) # + [markdown] id="_iIdqzXvdRpr" # # Confusion Matrix # + colab={"base_uri": "https://localhost:8080/", "height": 342} id="raIIkYVPdUAt" executionInfo={"status": "ok", "timestamp": 1608292574012, "user_tz": -360, "elapsed": 1569, "user": {"displayName": "msi1427", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgZyvIsxXEp7PtiWLY2iJomz1wvULfSzviw2HTRaw=s64", "userId": "01187658583479366356"}} outputId="4f1143da-a305-427b-9b5a-05fc6ba7ceb5" from sklearn.metrics import confusion_matrix import seaborn as sns mat = confusion_matrix(actual,predicted) heat = sns.heatmap(mat,square=True,annot=True,fmt='d',cbar=True,cmap=plt.cm.gist_heat) class_label = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck'] heat.set_xticklabels(class_label, rotation=90,color="white") heat.set_yticklabels(class_label, rotation=0,color="white") heat.set_xlabel('Predicted Label',color="white") heat.set_ylabel('True Label',color="white") # + colab={"base_uri": "https://localhost:8080/"} id="Gtx7e-8IdXPb" executionInfo={"status": "ok", "timestamp": 1608292578001, "user_tz": -360, "elapsed": 1087, "user": {"displayName": "msi1427", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgZyvIsxXEp7PtiWLY2iJomz1wvULfSzviw2HTRaw=s64", "userId": "01187658583479366356"}} outputId="d263c992-1c20-4702-e77e-24591a53a5c2" class_acc = list() for i in range(len(class_label)): pred = mat[i,i] act = np.sum(mat[i,:]) class_acc.append(pred/act) class_acc # + colab={"base_uri": "https://localhost:8080/", "height": 325} id="QD1lI4D_dY4v" executionInfo={"status": "ok", "timestamp": 1608292581117, "user_tz": -360, "elapsed": 1009, "user": {"displayName": "msi1427", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgZyvIsxXEp7PtiWLY2iJomz1wvULfSzviw2HTRaw=s64", "userId": "01187658583479366356"}} outputId="7a72c8eb-7251-4ffd-cabf-eff2ca7bee1b" ax = sns.barplot(x = class_names, y = class_acc) ax.set_xlabel('Type of Clothing',color="white") ax.set_ylabel('Accuracy',color="white") ax.set_xticklabels(ax.get_xticklabels(), rotation=90,color="white"); # + [markdown] id="F9b9TMMSdqEQ" # # Easiet and hardest one for the model to classify # + colab={"base_uri": "https://localhost:8080/"} id="_9k_xHuldbZT" executionInfo={"status": "ok", "timestamp": 1608292587634, "user_tz": -360, "elapsed": 985, "user": {"displayName": "msi1427", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgZyvIsxXEp7PtiWLY2iJomz1wvULfSzviw2HTRaw=s64", "userId": "01187658583479366356"}} outputId="a6f19de8-256c-4a4c-e7bc-ef8f9863beda" mxidx = np.argmax(class_acc) print(f'Easiest class for the model to classify : {class_label[mxidx]}') # + colab={"base_uri": "https://localhost:8080/"} id="_iNa91vWdc_8" executionInfo={"status": "ok", "timestamp": 1608292589789, "user_tz": -360, "elapsed": 1332, "user": {"displayName": "msi1427", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgZyvIsxXEp7PtiWLY2iJomz1wvULfSzviw2HTRaw=s64", "userId": "01187658583479366356"}} outputId="7b854eb0-4908-4900-bca6-43f0fac80236" mnidx = np.argmin(class_acc) print(f'Hardest class for the model to classify : {class_label[mnidx]}')
implementations/Basic CNN on CIFAR10.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # 시각 심화 # # - **Instructor**: <NAME> / <NAME> # - **Email**: [<EMAIL>](mailto:<EMAIL>) / [<EMAIL>](mailto:<EMAIL>) # # ## NeuralNetwork Example # # In this example you will practice a simple neural network written by only [Numpy](https://www.numpy.org) which is fundamental package for scientific computing with Python. The goals of this example are as follows: # # - Understand **Neural Networks** and how they work. # - Learn basically how to **write and use code**(*Numpy*). # # *If you are more familiar with PyTorch and TensorFlow(or Keras), You might be wondering why to write from the ground up with numpy instead of the built-in framework. This process is essential for understanding how a neural network works, and if you understand it, will not be too difficult to write in code.* # # And this example also is written in [IPython Notebook](https://ipython.org/notebook.html), an interactive computational environment, in which you can run code directly. # ### Environments # # In this assignment, we assume the follows environments. # # The [Python](https://www.python.org) is a programming language that lets you work quickly and integrate systems more effectively. It is widely used in various fields, and also used in machine learning. # # The [Pytorch](https://pytorch.org) is an open source deep learning platform, provides a seamless path from research to production. # # The [Tensorflow](https://www.tensorflow.org) is an end-to-end open source platform for machine learning. It has a comprehensive, flexible ecosystem of tools, libraries and community resources that lets researchers push the state-of-the-art in ML and developers easily build and deploy ML powered applications. # # The [CUDA®](https://developer.nvidia.com/cuda-zone) Toolkit provides high-performance GPU-accelerated computation. In deep learning, the model takes an age to train without GPU-acceleration. ~~even with the GPU, it still takes a lot of time~~. # # # - [Python3](https://www.python.org/downloads/) (recommend 3.6 or above) # - [PyTorch](https://pytorch.org) (recommend 1.0) # - [Tensorflow](https://tensorflow.org) (recommend above 1.13.0, but under 2.0 *There are huge difference between 2.0 and below*) # - [NumPy](http://www.numpy.org) the fundamental package for scientific computing with Python # # # - (Optional) [Anaconda](https://www.anaconda.com/distribution/#download-section), *popular Python Data Science Platform* # - (Optional) [Jupyter](https://jupyter.org/) (Notebook or Lab) # - (Optional) [CUDA](https://developer.nvidia.com/cuda-downloads) support GPU # # # Python packages can install by `pip install [package name]` or using **Anaconda** by `conda install [package name]`. # # *If you are having trouble installing or something else, please contact TA or <EMAIL>.* # # Code # ### Import packages # # Numpy the basic scientific computing package used in customary. from pathlib import Path import numpy as np # ## Load MNIST dataset # # Tesnorflow provide mnist dataset as binary archive file [link](https://chromium.googlesource.com/external/github.com/tensorflow/tensorflow/+/r0.7/tensorflow/g3doc/tutorials/mnist/download/index.md). # In this exampe, we already downloaded datafile in `./data` directory. So just laod dataset from `./data`. import idx2numpy data_dir = Path('../data/MNIST/raw') train_images = idx2numpy.convert_from_file(str(data_dir.joinpath('train-images-idx3-ubyte'))) train_labels = idx2numpy.convert_from_file(str(data_dir.joinpath('train-labels-idx1-ubyte'))) test_images = idx2numpy.convert_from_file(str(data_dir.joinpath('t10k-images-idx3-ubyte'))) test_labels = idx2numpy.convert_from_file(str(data_dir.joinpath('t10k-labels-idx1-ubyte'))) # ## (Optional) Visualize # + from PIL import Image from IPython.display import display def show(ary): display(Image.fromarray(ary)) # - for image, label, _ in zip(train_images, train_labels, range(10)): print(label) show(image.reshape((28, 28))) # ## Preprocessing # # The data must be preprocessed before training the network. If you inspect the first image in the training set, you will see that the pixel values fall in the range of 0 to 255. We scale these values to a range of 0 to 1 before feeding to the neural network model. For this, we divide the values by 255. It's important that the training set and the testing set are preprocessed in the same way: # + train_images = np.expand_dims(train_images, -1) test_images = np.expand_dims(test_images, -1) train_images = train_images / 255. test_images = test_images / 255. # - num_classes = 10 train_labels = np.eye(num_classes)[train_labels] test_labels = np.eye(num_classes)[test_labels] # ## Network # # This is a simple two dense(fully connected) layer network. The code is quite easy. # # So, whole network architecture as follow: # # - Dense # - Sigmoid # - Dense # - Sigmoid # - Dense # - Sigmoid # ### Dense Layer # + class Layer: pass class Dense(Layer): def __init__(self, input_units, output_units): self.weights = np.random.randn(output_units, input_units) * .01 self.biases = np.random.randn(output_units, 1) * .1 def forward(self, inputs): self.inputs = inputs return np.dot(self.weights, inputs) + self.biases def backward(self, grads): size = np.size(grads, -1) self.grad_weights = np.dot(grads, self.inputs.T) / size self.grad_biases = np.sum(grads, axis=1, keepdims=True) / size return np.dot(self.weights.T, grads) def update(self, lr: float = .01): # Here we perform a stochastic gradient descent step. self.weights = self.weights - lr * self.grad_weights self.biases = self.biases - lr * self.grad_biases # - # ### Activate Function: Sigmoid class Sigmoid(Layer): def forward(self, inputs): self.inputs = inputs return 1. / (1. + np.exp(-inputs)) def backward(self, grads): r = self.forward(self.inputs) return grads * r * (1. - r) def update(self, lr): pass # ### Training # + from typing import List from functools import reduce def fit(networks: List[Layer], X, y, train=True, epsilon=1e-7): X = np.reshape(X, (X.shape[0], -1)) # Forward preds = reduce(lambda inputs, layer: layer.forward(inputs), [X.T, *networks]).T # Compute Loss loss = -np.sum(y * np.log(np.clip(preds, epsilon, 1. - epsilon) + epsilon)) / np.size(preds, 0) if train: # Backward grads = -(np.divide(y, preds) - np.divide(1 - y, 1 - preds)) grads = reduce(lambda grads, layer: layer.backward(grads), [grads.T, *reversed(networks)]) # Update for layer in networks: layer.update(lr) return loss.mean(), preds # - # ## Prepare def get_batch(datasets, batch): image, label = None, None images, labels = datasets for b, (i, l) in enumerate(zip(*datasets)): if not (b % batch): if image is not None and label is not None: yield image, label image = np.empty((batch, 28, 28, 1), dtype=np.float32) label = np.empty((batch, 10), dtype=np.uint8) image[b % batch] = i label[b % batch] = l np.random.seed(42) lr = .1 batch = 128 epochs = 10 network = [ Dense(28*28, 10), Sigmoid(), ] # ## Train for epoch in range(epochs): # Train scope train_loss, test_loss, test_acc = 0, 0, 0 for images, labels in get_batch((train_images, train_labels), batch): loss, _ = fit(network, images, labels) train_loss += loss for images, labels in get_batch((test_images, test_labels), batch): loss, preds = fit(network, images, labels, train=False) test_loss += loss test_acc += (preds.argmax(axis=-1) == labels.argmax(axis=-1)).mean() print(f'Epoch: {epoch}') print(f'\tTrain Loss: {train_loss / (len(train_images) / batch)}') print(f'\tTest Loss: {test_loss / (len(test_images) / batch)}') print(f'\tTest Acc: {test_acc / (len(test_images) / batch)}') # ## Test for image, label, _ in zip(test_images, test_labels, range(10)): show((image[:, :, 0] * 255.).astype(np.uint8)) prediction = reduce(lambda inputs, layer: layer.forward(inputs), [np.reshape(np.array(image), -1)[None, :].T, *network]) print (f'Label: {label.argmax(-1)}, Prediction: {prediction.argmax()}') # ## Q1. More complex model # # We can create more complex models by adding layers to the network. # # What makes the next model(*two-dense-layer*) different from the previous model with one layer? network = [ Dense(28*28, 128), Sigmoid(), Dense(128, 10), Sigmoid(), ] # ## Q2. What is activation function? # # What is the role of the Sigmoid in the middle? If not, what would happen? # # What other functions will replace Sigmoid? network = [ Dense(28*28, 128), Dense(128, 10), Sigmoid(), ] # ## Q3. And then more, more, more ... layers? # # What will happen if you stack a lot of layers? network = [ Dense(28*28, 600), Sigmoid(), Dense(600, 500), Sigmoid(), Dense(500, 400), Sigmoid(), Dense(400, 300), Sigmoid(), Dense(300, 200), Sigmoid(), Dense(200, 100), Sigmoid(), Dense(100, 10), Sigmoid(), ] # ## Q4. Parameters # # Check the size of the model parameters. def sizeof(model): return sum(getattr(layer, attr, np.empty(0)).size for attr in ['weights', 'bias'] for layer in model)
2-Neural-Network-Scratch/nn.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import nonlinearity as nl import feed_forward as f # %load_ext autoreload # %autoreload 2 # %matplotlib inline # + import numpy as np import pandas as pd import matplotlib from matplotlib import pyplot as plt from sklearn.preprocessing import OneHotEncoder from sklearn.cross_validation import train_test_split import time # - from sklearn.datasets import fetch_mldata mnist = fetch_mldata('MNIST original') print(np.unique(mnist.target), mnist.target.shape) print(mnist.data.shape) mnist.target.shape = (mnist.target.shape[0], 1) print(np.unique(mnist.target), mnist.target.shape) print(np.unique(mnist.data/255), mnist.data.shape) enc = OneHotEncoder(n_values=10, sparse=False) target = enc.fit_transform(mnist.target) idx = np.random.choice(len(target), 10) print(target[idx]) print(mnist.target[idx]) train_x, test_x, train_y, test_y = train_test_split(mnist.data/255, target, test_size=0.15, random_state=0) # + idx = np.random.choice(len(test_x), 10) print(test_x[idx]) print(test_y[idx]) print() idx = np.random.choice(len(train_x), 10) print(train_x[idx]) print(train_y[idx]) print() print(len(train_x), len(train_y)) print(len(test_x), len(test_y)) # - nn = f.FeedForwardNet(layers=[784,256,10], transforms=[nl.Sigmoid, nl.Softmax]) time = time.time() nn.fit(train_x, train_y, lr=0.1, max_iter=25, mini_batch_size=10,\ save_accuracy=True, save_costs=True, report_every=1) # test_x=test_x, test_y=test_y) print('Training ') print(nn.costs) pd.Series(nn.costs).plot() def predict_and_plot_image_index(idx): pred = pd.Series(nn.predict(mnist.data[idx])[0]) pred.plot(kind='bar') # show image fig = plt.figure() ax = fig.add_subplot(1, 1, 1) ax.matshow(mnist.data[idx].reshape(28,28), cmap = matplotlib.cm.binary) plt.xticks(np.array([])) plt.yticks(np.array([])) plt.title('Index {}, Predicted {}, Actually {}'.format(idx, pred.argmax(), mnist.target[idx])) plt.show() import random for i in range(10): predict_and_plot_image_index(random.choice(range(len(mnist.data)))) # + yhat = nn.predict(mnist.data).max(axis=1) worst = yhat.argsort()[:30] print(nn.predict(mnist.data[worst]).max(axis=1)) for i in range(len(worst)): predict_and_plot_image_index(worst[i]) # - # find accuracy on test set nn.evaluate(test_x, test_y) # + # try throwing out every other of pixels even_x = train_x.T[::2].T less_pixels = f.FeedForwardNet(layers=[even_x.shape[1],256,10], transforms=[nl.Sigmoid, nl.Softmax]) less_pixels.fit(even_x, train_y, lr=0.1, max_iter=25, mini_batch_size=10,\ save_accuracy=True, save_costs=True, report_every=1) # test on dataset less_pixels.evaluate(test_x.T[::2].T, test_y) # - print(less_pixels.costs) pd.Series(less_pixels.costs).plot() less_pixels.evaluate(test_x.T[::2].T, test_y) # + # try throwing out top half of pixels half_x = train_x.T[:len(train_x[0])//2].T less_pixels = f.FeedForwardNet(layers=[half_x.shape[1],256,10], transforms=[nl.Sigmoid, nl.Softmax]) less_pixels.fit(half_x, train_y, lr=0.1, max_iter=25, mini_batch_size=10,\ save_accuracy=True, save_costs=True, report_every=1) print(less_pixels.costs) pd.Series(less_pixels.costs).plot() less_pixels.evaluate(test_x.T[:len(test_x[0])//2].T, test_y) # - prev_costs = less_pixels.costs # + less_pixels.fit(half_x, train_y, lr=0.1, max_iter=10, mini_batch_size=10,\ save_accuracy=True, save_costs=True, report_every=1) print(less_pixels.costs) pd.Series(np.concatenate((prev_costs, less_pixels.costs))).plot() less_pixels.evaluate(test_x.T[:len(test_x[0])//2].T, test_y) # + # try a semi-deep ReLu network relu = f.FeedForwardNet(layers=[784, 256, 150, 75, 10], transforms=[nl.ReLu, nl.ReLu, nl.ReLu, nl.Softmax]) relu.fit(train_x, train_y, lr=0.01, max_iter=25, mini_batch_size=10,\ save_accuracy=True, save_costs=True, report_every=1) pd.Series(relu.costs).plot() relu.evaluate(test_x, test_y) # -
mnist_example.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from holodeck import packagemanager # + pycharm={"name": "#%%\n"} packagemanager.installed_packages() # + pycharm={"name": "#%%\n"} packagemanager.available_packages() # + pycharm={"name": "#%%\n"} packagemanager.install('DefaultWorlds') # + pycharm={"name": "#%%\n"} packagemanager.package_info('DefaultWorlds') # + pycharm={"name": "#%%\n"} packagemanager.world_info('EuropeanForest') # + pycharm={"name": "#%%\n"} packagemanager.scenario_info('EuropeanForest-MaxDistance') # + pycharm={"name": "#%%\n"} packagemanager.scenario_info('MazeWorld-FinishMazeSphere') # + pycharm={"name": "#%%\n"}
holodeck_package_management.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # 第1章 基础:逻辑与证明 # ## 1.1 命题逻辑 # ### 1.1.1 引言 # `逻辑规则`给出数学语句的准确含义,这些规则用来区分有效和无效的数学论证。 # # ### 1.1.2 命题 # `命题`是一个陈述句(即陈述事实的语句),它或真或假,但不能既真又假。 # # 我们用字母来表示`命题变元`,它是代表命题的变量。习惯上用字母p,q,r,s,...表示命题。如果一个命题是真命题,它的`真值`为真,用T表示;如果它是假命题,其真值为假,用F表示。 # # 涉及命题的逻辑领域称为`命题演算`或`命题逻辑`。 # # 许多数学陈述都是由一个或多个命题组合而来。称为`复合命题`的新命题是由已知命题用逻辑运算符组合而来。 # # **定义1**:令p为一命题,则p的否定记作$\lnot$p。命题$\lnot$p读作『非p』。p的否定($\lnot$p)的真值和p的真值相反。 # # 表1 命题之否定的真值表 # # p | $\lnot$p # ---| -- # T | F # F | T # # 命题的否定也可以看作`否定运算符`作用在命题上的结构。否定运算符从一个已知命题构造出一个新命题。这些逻辑运算符也称作`联接词`。 # # **定义2**:令p和q为命题。p、q的合取即命题『p并且q』,记作$p \land q$。当p和q都是真时,$p \land q$命题为真,否则为假。 # # 表2 俩命题合取的真值表 # # p | q | $p \land q$ # ---|----|-------- # T | T | T # T | F | F # F | T | F # F | F | F # # **定义3**:令p和q为命题。p和q的析取即命题『p或q』,记作$p \lor q$。当p和q均为假时,合取命题$p \lor q$为假,否则为真。 # # 表3 俩命题析取的真值表 # # p | q | $p \lor q$ # ---|----|-------- # T | T | T # T | F | T # F | T | T # F | F | F # # **定义4**:令p和q为命题。p和q的异或(记作$p \oplus q$)是这样一个命题:当p和q中恰好只有一个为真时命题为真,否则为假。 # # 表4 俩命题异或的真值表 # # p | q | $p \oplus q$ # ---|----|-------- # T | T | F # T | F | T # F | T | T # F | F | F # # ### 1.1.3 条件语句 # **定义5**:令p和q为命题。条件语句p->q是命题『如果p,则q』。当p为真而q为假时,条件语句p->q为假,否则为真。在条件语句p->q中,p称为假设(前件、前提),q称为结论(后件)。 # # 表5 条件命题p->q的真值表 # # p | q | p->q | # -- | -- | -- | # T | T | T # T | F | F # F | T | T # F | F | T # # **逆命题、逆否命题与反命题** 由条件语句p->q可以构成一些新的条件语句。命题q->p称为 p->q 的`逆命题`,而p->q的`逆否命题`是命题$\lnot q \to \lnot p$ 。命题$\lnot p \to \lnot q$称为$p \to q$的`反命题`。 # # 当两个复合命题总是具有相同真值时,我们称它们是`等价的`。因此一个条件语句与它的逆否命题是等价的。条件语句的逆与反也是等价的。 # # **定义6(双条件语句)**:令p和q为命题。双条件语句p<->q是命题『p当且仅当q』。当p和q有同样的真值时,双条件语句为真,否则为假。双条件语句也称为双向蕴含。 # # 表6 双条件语句p<->q的真值表 # # p | q | p<->q | # -- | -- | -- | # T | T | T # T | F | F # F | T | F # F | F | T # # 可以用缩写符号『iff』代替『当且仅当』(if and only if)。注意,p<->q与$(p \to q) \land (q \to p)$ 有完全相同的真值。 # # ### 1.1.4 复合命题的真值表 # 通过上述四个逻辑联接词——合取、析取、条件、双条件可以构造含有一些命题变元的结构复杂的复合命题。如 :构造复合命题$(p \lor \lnot q \to (p \land q))$ # # ### 1.1.5 逻辑运算符的优先级 # 表8 逻辑运算符的优先级 # # 运算符 | 优先级 # -- | -- # $\lnot$ | 1 # $\land$ | 2 # $\lor$ | 3 # $\to$ | 4 # $\leftrightarrow$ | 5 # # ### 1.1.6 逻辑运算和位运算 # 习惯上,我们用1表示真,用0表示假。即,1表示T(真),0表示F(假)。如果一个变量的值或为真或为假,则此变量就称为`布尔变量`。一个布尔变量可以用一位表示。 # # 我们还会用符号OR、AND和XOR表示运算符$\lor$,$\land$,$\oplus$。 # # 信息一般用位串(即由0和1构成的序列)表示。这时,对位串的运算即可用来处理信息。 # # **定义7**:位串是0位或多位的序列。位串的长度就是它所含位的数目。 # ## 1.2 命题逻辑的应用 # ### 1.2.2 语句翻译 # 用命题变元表示语句中的每个成分,并找出它们之间合适的逻辑联接词。 # # 例1:『你可以在校园访问因特网,仅当你主修计算机科学或者你不是新生。』 # # - a:你可以在校园访问因特网 # - c:你主修计算机科学 # - f:你是新生 # # $a \to (c \lor \lnot f)$ # # 例2:『如果你身高不足4英尺,那么你不能乘坐过山车,除非你已满16周岁。』 # # - q:你能乘坐过山车 # - r:你身高不足4英尺 # - s:你已满16周岁 # # $(r \land \lnot s) \to \lnot q$ # ## 1.3 命题等价式 # ### 1.3.1 引言 # **定义1**:一个真值永远是真的复合命题(无论其中出现的命题变元的真值是什么),称为`永真式`(tautology),也称为`重言式`。一个真值永远为假的复合命题称为`矛盾式`(contradiction)。既不是永真式又不是矛盾式的复合命题称为可能式(contingency)。 # # 表1 永真式和矛盾式的例子 # # p | $\lnot p$ | $p \lor \lnot p$ | $p \land \lnot p$ # -- | -- | -- | -- # T | F | T | F # F | T | T | F # # ### 1.3.2 逻辑等价式 # 在所有可能得情况下都有相同真值的两个复合命题称为`逻辑等价`的。 # # **定义2**:如果p<->q是永真式,则复合命题p和q称为是逻辑等价的。用记号$p \equiv q$表示。 # # 注:符号$\equiv$不是逻辑联结词,$p \equiv q$不是一个复合命题,而是代表『p<->q是永真式』这一语句。 # # 判定两个复合命题是否等价的方法之一是使用真值表。 # # 表2 德摩根律(逻辑等价例子) # # - $\lnot (p \land q) \equiv \lnot p \lor \lnot q$ # - $\lnot (p \lor q) \equiv \lnot p \land \lnot q$ # # ![1](1-1.png) # # ### 1.3.4 构造新的逻辑等价式 # 表6中的逻辑等价式以及已建立起来的其他(如表7和表8所示的那些)等价式,可以用于构造其他等价式。能这样做的原因是**复合命题中的一个命题可以用与它逻辑等价的复合命题替换而不改变原复合命题的真值**。 # # ### 1.3.5 命题的可满足性 # 一个复合命题称为是`可满足的`,如果存在一个对其变元的真值赋值使其为真。当不存在这样的赋值时,即当复合命题对所有变元的真值赋值都是假的,则复合命题是`不可满足的`。 # # 当我们找到一个特定的使得复合命题为真的真值赋值时,就证明了它是可满足的。这样的一个赋值称为这个特定的可满足性问题的一个`解`。可是,要证明一个复合命题是不可满足的,我们需要证明每一组变元的真值赋值都使其为假。尽管我们总是可以用真值表来确定一个复合命题是否是可满足的,但通常有更有效的方法,如例9所示。 # # ![2](1-2.png) # # ![3](1-3.png) # # ## 1.4 谓词和量词 # ### 1.4.2 谓词 # 『x大于3』:第一部分即变量x是语句的主语。第二部分(`谓词`『大于3』)表明语句的主语具有一个性质。我们可以用P(x)表示语句『x大于3』,其中P表示谓词『大于3』,而x是变量。语句P(x)也可以说成是命题函数P在x的值。一旦给变量x赋一个值,语句P(x)就成为命题并具有真值。 # # 一般地,涉及n个变量$x_1,x_2,...,x_n$的语句可以表示成$P(x_1,x_2,...,x_n)$形式为$P(x_1,x_2,...,x_n)$的语句是`命题函数`P在n元祖$(x_1,x_2,...,x_n)$的值,P也称为`n位谓词`或`n元谓词`。 # # `前置条件`和`后置条件`:描述合法输入的语句叫做前置条件,而程序运行的输出应该满足的条件称为后置条件。 # # ### 1.4.3 量词 # `量化`表示在何种程度上谓词对于一定范围的个体成立。这里讨论两类量化:`全称量化`,它告诉我们一个谓词在所考虑范围内对每一个体都为真;`存在量化`,它告诉我们一个谓词对所考虑范围内的一个或多个个体为真。处理谓词和量词的逻辑领域称为`谓词演算`。 # # **定义1**:P(x)的`全称量化`是语句『P(x)对x在其论域的所有值为真。』符号$\forall xP(x)$表示P(x)的全称量化,其中$\forall$称为`全称量词`。命题$\forall xP(x)$读作『对所有x,P(x)』或『对每个x,P(x)』。一个使P(x)为假的个体称为$\forall xP(x)$的`反例`。 # # **定义2**:P(x)的存在量化是命题『论域中存在一个个体x满足P(x)。』我们用符号$\exists xP(x)$表示P(x)的存在量化,其中$\exists$称为存在量词。 # # ![1-4](1-4.png) # # ### 1.4.4 约束论域的量词 # 在要限定一个量词的论域时经常采用简写的表示法。在这个表示法里,变量必须满足的条件直接放在量词的后面。 # # - 语句$\forall x<0(x^2>0)$表示对于每一个满足x<0的实数x有$x^2>0$。 # - 语句$\exists z>0(z^2=2)$表示存在一个满足z>0的实数z有$z^2=2$。 # # 注意, # # - 全称量化的约束和一个条件语句的全称量化等价。$\forall x<0(x^2>0)$与$\forall x(x<0 \to x^2>0)$等价。 # - 存在量化的约束和一个合取式的存在量化等价。$\exists z>0(z^2=2)$与$\exists z(z>0 \land z^2=2)$等价。 # # ### 1.4.5 量词的优先级 # 量词$\forall$和$\exists$比命题演算中的所有逻辑运算符都具有更高的优先级。 # # ### 1.4.6 变量绑定 # 当量词作用于变量x时,我们说此变量的这次出现为`约束的`。一个变量的出现被称为是`自由的`,如果没有被量词约束或设置为等于某一特定值。命题函数中的所有变量出现必须是约束的或者被设置为等于某个值得,才能把它转变为一个命题。这可以通过采用一组全称量词、存在量词和赋值来实现。 # # 逻辑表达式中一个量词作用到的部分称为这个量词的作用域。 # # ### 1.4.7 涉及量词的逻辑等价式 # **定义3**:涉及谓词和量词的语句是逻辑等价的当且仅当无论用什么谓词代入这些语句,也无论为这些命题函数里的变量指定什么论域,它们都有相同的真值。我们用$S \equiv T$表示涉及谓词和量词的两个语句S和T是逻辑等价的。 # # ### 1.4.8 量化表达式的否定 # $\lnot \forall x P(x) \equiv \exists x \lnot P(x)$ # # $\lnot \exists x Q(x) \equiv \forall x \lnot Q(x)$ # # 量词否定的规则称为量词的德摩根律。 # # ![1-5](1-5.png) # # ### 1.4.9 语句到逻辑表达式的翻译 # 例24 用谓词和量词表达语句“这个班上的某个学生去过墨西哥”和“这个班上的每个学生或去过加拿大,或去过墨西哥。” # # 解 引入变量x,“这个班上的某个学生去过墨西哥” 即 “在这个班上有个学生x,x去过墨西哥。”引入谓词M(x)表示“x去过墨西哥”, # # - 如果x的论域是这个班上的学生,则第一句可翻译为:$\exists x M(x)$。 # - 如果x的论域是所有人,则引入谓词S(x)表示“x是这个班上的一个学生”,则第一句可翻译为:$\exists x(S(x) \land M(x))$ # # 令C(先)表示“x去过加拿大”,类似地,第二句可以表示成: # # - $\forall x(C(x) \lor M(x))$ # - $\forall x(S(x) \to (C(x) \lor M(x)))$ # # 还可以使用两个变量谓词V(x,y)表示“x去过y国家”。这样,V(x,墨西哥)和V(x,加拿大)具有与M(x)和C(x)相同的意思。如果我们要处理的语句涉及人们去过不同的国家,我们可以倾向于使用这种双变量的方法。否则为了简单起见,我们可以用一个变量的谓词C(x)和M(x)。 # # ### 1.4.10 系统规范说明中量词的使用 # 例25 用谓词和量词表达系统规范说明“每封大于1MB的邮件会被压缩”和“如果一个用户处于活动状态,那么至少有一条网络链路是有效的”。 # # 解 令S(m,y)表示“邮件m大于yMB”,其中变量m的论域是所有邮件,变量y是一个正实数;令C(m)表示“邮件m会被压缩”。那么规范说明“每封大于1MB的邮件会被压缩”可以表达为$\forall m(S(m,1) \to C(m))$。 # # 令A(u)表示“用户u处于活动状态”,其中变量u的论域是所有用户;令S(n,x)表示“网络链路n处于x状态”,其中n的论域是所有网络链路,x的论域是网络链路所有可能得状态。那么规范说明“如果用户处于活动状态,那么至少有一个网络链路有效”可以表达为$\exists u A(u) \to \exists n S(n,有效)$ # # ### 1.4.12 逻辑程序设计 # 有一类重要的程序设计语言使用谓词逻辑的规则进行推理。Prolog(Programming in Logic)就是其一。Prolog程序包括一组声明,其中包括两类语句:`Prolog事实`和`Prolog规则`。Prolog事实通过指定那些满足谓词的元素来定义谓词。Prolog规则使用Prolog事实定义好的那些谓词来定义新的谓词。 # # 例28 考虑一个Prolog程序,它给出的事实是每门课程的教室和学生注册的课程。程序使用这些事实来回答有关给特定学生上课的教授的查询。这样的程序可使用谓词instructor(p,c)和enrolled(s,c)分别表示教授p是讲授课程c的老师及学生s注册课程c。例如,此程序中的Prolog事实可能包含: # # ``` # instructor(chan,math273) # instructor(patel,ee222) # instructor(grossman,cs301) # enrolled(kevin,math273) # enrolled(juana,ee222) # enrolled(juana,cs301) # enrolled(kiko,math273) # enrolled(kiko,cs301) # ``` # # 一个新的谓词teaches(p,s)表示教授p教学生s没可以用Prolog规则来定义: # # ``` # eaches(P,S): - instructor(P,C),enrolled(S,C) # ``` # # 查询: # # ``` # # ? enrolled(kevin,math273) # yes # # # ? enrolled(X,math273) # kevin # kiko # # # ? teaches(X, juana) # patel # grossman # ``` # ## 1.5 嵌套量词 # ### 1.5.1 引言 # 嵌套量词:一个量词出现在另一个量词的作用域内,如$\forall x \exists y(x+y=0)$ # # 量词范围内的一切都可以认为是一个命题,上面表达式与$\forall x Q(x)$是一样的,其中Q(x)表示$\exists y P(x,y)$,而P(x,y)表示x+y=0。 # # ### 1.5.2 理解涉及嵌套量词的语句 # **将量化当做循环**: # - $\forall x \forall y P(x,y)$:先对x的所有值做循环,而对x的每个值再对y的所有值循环。 # - $\forall x \exists y P(x,y)$:先对x的所有值做循环,而对x的每个值对y的值循环直到找到一个y使P(x,y)为真。 # - $\exists x \forall y P(x,y)$:对x的值循环直到找到某个x,就这个x对y的所有值循环时P(x,y)总为真。 # - $\exists x \exists y P(x,y)$:对x的值循环,循环时对x的每个值都对y的值循环,直到找到x的一个值和y的一个值使P(x,y)为真。 # # ### 1.5.3 量词的顺序 # ![1-6](1-6.png) # # ### 1.5.7 嵌套量词的否定 # 嵌套量词语句的否定可以通过连续地应用单个量词语句的否定规则得到。 # # - $\lnot \forall x \exists y(xy=1) \equiv \exists x \forall y (xy \ne 1)$ # - 没有一个妇女已搭乘过世界上每一条航线上的一个航班 # - P(w,f):w搭乘过航班f # - Q(f,a):f是航线a上的航班 # - $\forall w \lnot \forall a \exists f(P(w,f) \land Q(f,a)) \equiv \forall w \exists a \forall f(\lnot P(w,f) \lor \lnot Q(f,a))$ # - $lim_{x \to a}f(x)$不存在这一事实,其中f(x)是实变量x的实值函数,而a属于f的定义域。 # - $\lnot \forall \epsilon > 0 \exists \delta > 0 \forall x(0 < |x-a| < \delta \to |f(x) - L| < \epsilon) \equiv \exists \epsilon > 0 \forall \delta > 0 \exists x(0 < |x - a| < \delta \land |f(x) - L| \ge \epsilon)$ # ## 1.6 推理规则 # ### 1.6.2 命题逻辑的有效论证 # **定义1**:命题逻辑中的一个论证是一连串的命题。除了论证中最后一个命题外都叫做`前提`(premise),最后那个命题叫做`结论`(conclusion)。一个`论证`(argument)是`有效的`(valid),如果它的所有前提为真蕴涵着结论为真。 # # 命题逻辑中的论证形式是一连串涉及命题变元的复合命题。无论用什么特定命题来替换其中的命题变元,如果前提均真时结论为真,则称该论证形式是有效的。 # # ### 1.6.3 命题逻辑的推理规则 # 永真式$(p \land (p \to q)) \to q$是称为`假言逻辑`(modus ponens)或分离逻辑(law of detachment)的推理规则的基础。 # # ![1-7-1](1-7-1.png) # ![1-7-2](1-7-2.png) # # ### 1.6.5 消解律 # 已经开发出的计算机程序能够将定理的推理和证明任务自动化。许多这类程序利用称为`消解律`(resolution)的推理规则。这个推理规则基于永真式:$((p \lor q) \land (\lnot p \lor r)) \to (q \lor r)$ # # 消解规则最后的析取式称为`消解式`(resolvent)。当在此永真式中令q=r时,可得$(p \lor q) \land (\lnot p \lor q) \to q$。而且,当令r=F时,可得$(p \lor q) \land (\lnot p) \to q$(因为 $q \lor F \equiv q$),这是永真式,析取三段论规则就基于此式。 # # 消解律在基于逻辑规则的编程语言中扮演着重要的角色,如在Prolog中。而且,可以用消解律来构建自动定理正面系统。要使用消解律作为仅有的推理规则来够早命题逻辑中的证明,假设和结论必须表示为`子句`(clause),这里子句是指变量或其否定的一个析取式。 # # ### 1.6.6 谬误 # `肯定结论的谬误`(fallacy of affirming the conclusion):$((p \to q) \land q) \to p$,因为当p为假而q为真时,它为假。 # # `否定假设的谬误`(fallacy of denying the hypothesis):$((p \to q) \land \lnot q) \to \lnot p$,因为当p为假而q为真时,它为假。 # # ### 1.6.7 量化命题的推理规则 # - `全称实例`(universal instantiation):是从给定前提$\forall x P(x)$得出P(c)为真的推理规则,其中c是论域里的一个特定的成员。 # - `全称引入`(universal generalization):是从对论域里所有元素c都有P(c)为真的前提条件推出$\forall x P(x)$为真的推理规则。 # - `存在实例`(existential instantiation):是允许从“如果我们知道$\exists x P(x)$为真,得出在论域中存在一个元素c使得P(c)为真”的推理规则。 # - `存在引入`(existential generalization):是用来从“已知有一特定的c使P(c)为真时得出$\exists x P(x)$为真”的推理规则。 # # ![1-8](1-8.png) # # ### 1.6.8 命题和量化命题推理规则的组合使用 # `全称假言推理`(universal modus ponens): # # ![1-9](1-9.png) # # `全称取拒式`(universal modus tollens): # # ![1-10](1-10.png) # ## 1.7 证明导论 # ### 1.7.2 一些专业术语 # 一个`定理`(theorem)是一个能够被证明是真的语句。不太重要的定理有时称为`命题`(定理也可以称为`事实`(fact)或`结论`(result))。我们用一个`证明`(proof)来展示一个定理是真的。证明就是建立定理真实性的一个有效论证。证明中用到的语句可以包括`公理`(axiom)(或`假设`(postulate)),这些是我们假定为真的语句、定理的前提(如果有的话)和以前已经被证明的定理。 # # 一个不太重要但有助于证明其他结论的定理称为`引理`(lemma)。`推论`(corollary)是从一个已经被证明的定理可以直接建立起来的一个定理。`猜想`(conjecture)是一个被提出认为是真的命题,通常是基于部分证据、启发式论证或者专家的直觉。当猜想的一个证明被发现时,猜想就变成了定理。许多时候猜想被证明是假的,因此它们不是定理。 # # ### 1.7.3 理解定理是如何陈述得 # 证明的第一步通常涉及选择论域里的一个一般性元素。随后的步骤是证明这个元素具有所考虑的性质。最后,全称引入蕴含着定理对论域里所有元素都成立。 # # ### 1.7.5 直接证明法 # 条件语句$p \to q$的直接证明法的构造:第一步假设p为真;第二步用推理规则构造,而第三步表明q必须也为真。直接证明法是通过证明如果p为真,那么q也肯定为真,这样p为真且q为假的情况永远不会发生从而证明条件语句$p \to q$为真。 # # ### 1.7.6 反证法 # 证明形如$\forall x(P(x) \to Q(x))$的定理,不从前提开始以结论结束来证明这类定理的方法叫做`间接证明法`。一类非常有用的间接证明法称为`反证法`(proof by contraposition)。反证法利用了这样一个事实:条件语句$p \to q$等价于它的逆否命题$\lnot q \to \lnot p$。 # # ### 1.7.7 归谬证明法 # 假设我们要证明命题p是真的。再假定我们能找到一个矛盾式q使得$\lnot p \to q$为真。因为q是假的,而$\lnot p \to q$是真的,所以我们能够得出结论$\lnot p$为假,这意味着p为真。怎样才能找到一个矛盾式q以这样的方式帮助我们证明p是真的呢? # # 因为无论r是什么,命题$r \lor \lnot r$就是矛盾式,所以如果我们能够证明对某个命题r,$\lnot p \to (r \land \lnot r)$为真,就能证明p是真的。这种类型的证明称为`归谬证明法`(proof by contradiction)。归谬证明法是另一种间接证明法。 # ## 1.8 证明的方法和测量 # ### 1.8.2 穷举证明法和分情形证明法 # `分情形证明法`:$(p_1 \lor p_2 \lor \cdots \lor p_n) \to q$ 可以用永真式:$((p_1 \to q) \land (p_2 \to q) \land \cdots \land (p_n \to q))$作为推理规则。 # # 穷举证明法:穷尽所有的可能性。 # # ### 1.8.3 存在性证明 # $\exists x P(x)$这类命题的证明称为`存在性证明`(existence proof)。 # # `构造性的`(constructive):找到一个元素a,使得P(a)为真。 # # `非构造性的`(nonconstructive):一种常用方法是归谬证明,证明该存在量化式的否定式蕴含一个矛盾。 # # ### 1.8.4 唯一性证明 # `唯一性证明`(uniqueness proof)两个部分组成: # # 1. 存在性:证明存在某个元素x具有期望的性质。 # 2. 唯一性:证明如果$y \ne x$,则y不具有期望的性质。
math/discrete_math/1-The-Foundations-Logic-and-Proofs.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="eE0nezVlVC0l" # # Import com os pacotes necessários # + id="w-m0uKNmUEzj" import pandas as pd import numpy as np from sklearn.ensemble import RandomForestClassifier from sklearn.model_selection import train_test_split # + [markdown] id="kzYjj2x2VZqN" # # Lendo o dataset # + id="auY4EzifVYdB" dados = pd.read_csv('/content/train.csv') # + colab={"base_uri": "https://localhost:8080/", "height": 333} id="aoX9DwucViyG" outputId="4725fbaa-29a6-400d-d1de-540d13bc21b2" dados.head() # + id="ueJsgu1gVwhr" dados = dados.drop(['Name', 'Ticket', 'Cabin', 'Embarked'], axis=1) # + colab={"base_uri": "https://localhost:8080/", "height": 201} id="tauU8YFXWGoy" outputId="9550eda3-d581-4102-9d53-839965999d7f" dados.head() # + [markdown] id="OB0G4CKFWRbz" # # Editando Chave e Variável Resposta # + id="_SGntSp_WU0t" dados = dados.set_index(['PassengerId']) # + id="LZ06FdxlXU7N" dados = dados.rename(columns= {'Survived': 'target'}, inplace= False) # + colab={"base_uri": "https://localhost:8080/", "height": 232} id="UermRZPWWwE-" outputId="c9f07bd0-6e65-4e62-902d-2a7e9304ed37" dados.head() # + [markdown] id="EyN4LOvEXgG1" # # Descritiva dos dados # + colab={"base_uri": "https://localhost:8080/", "height": 292} id="59bI6Ve3Xhya" outputId="2aaa235e-dd29-4d68-dba7-badfd06006d1" dados.describe() # + colab={"base_uri": "https://localhost:8080/", "height": 171} id="uVQ8nj0oXkDt" outputId="c5bf9e43-7373-4075-bb3b-83d744a25ee0" dados.describe(include=['O']) # + [markdown] id="ddpYLgAIX-7A" # # Transformação dos dados # + id="nw4h3iDAXvJL" dados['Sex_F'] = np.where(dados['Sex'] == 'female', 1, 0) dados['Pclass_1'] = np.where(dados['Pclass'] == 1, 1, 0) dados['Pclass_2'] = np.where(dados['Pclass'] == 2, 1, 0) dados['Pclass_3'] = np.where(dados['Pclass'] == 3, 1, 0) # + id="SLPtUm-QYkJK" dados = dados.drop(['Pclass', 'Sex'], axis = 1) # + colab={"base_uri": "https://localhost:8080/", "height": 232} id="U6RNisX0YsVJ" outputId="ae907bbc-0da4-48bc-dce7-eba1d6faf4dd" dados.head() # + colab={"base_uri": "https://localhost:8080/"} id="4Mjp-_2iYvKP" outputId="b7ca6d31-6340-4f85-badd-f73cf11ce8a0" dados.isnull().sum() # + id="6IxLd7ThY2OS" dados.ffill(0, inplace = True) # + colab={"base_uri": "https://localhost:8080/"} id="XDtXbwPAZXNl" outputId="399010d2-730c-4b57-fe70-b55d77816fd1" dados.isnull().sum() # + [markdown] id="Wib07d2OZGaC" # # Amostragem # + colab={"base_uri": "https://localhost:8080/"} id="ItO0xPWQZEoL" outputId="02fea316-9f4a-4e97-8c7c-ccc26766728c" x_train, x_test, y_train, y_test = train_test_split(dados.drop(['target'], axis=1), dados['target'], test_size = 0.3, random_state = 1234) [{'treino': x_train.shape}, {'teste': x_test.shape}] # + [markdown] id="MaTGDO28aGOH" # # Modelo # + colab={"base_uri": "https://localhost:8080/"} id="e_kPKmtcZ_-f" outputId="f37ec3c5-1611-443e-d30a-d96e5efb3e2e" rainforest = RandomForestClassifier(n_estimators = 1000, criterion = 'gini', max_depth = 5) rainforest.fit(x_train, y_train) # + id="el4Z_sy7aalt" probabilidade = rainforest.predict_proba(dados.drop('target', axis = 1))[:, 1] classificado = rainforest.predict(dados.drop('target', axis = 1)) # + id="M8sHCOJ5a-1P" dados['probabilidade'] = probabilidade dados['classificado'] = classificado # + colab={"base_uri": "https://localhost:8080/", "height": 442} id="9K5Li7eUay22" outputId="7cb2a805-725c-4b80-e2b8-d965061d5d7f" dados # + id="AC33lrcta0Su"
titanic-dados.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # mpld3 # # [mpld3](http://mpld3.github.io/) is a Python package that adds interactivity to Matplotlib graphics, for enhanced visualization in browsers. It does so by producing [D3.js]( out of the Matplotlib figure. # # + # first the imports: the matplotlib usuals, plus mpld3 import numpy as np import matplotlib.pyplot as plt import mpld3 plt.style.use('bmh') # - # ## Line graph # + # Now we create a regular matplotlib figure, but we tell mpld3 to display it plt.plot( [3,1,4,1,5], 'ks-', mec='w', mew=5, ms=20 ) mpld3.display() # The resulting figure will have interactive pan/zoom controls (lower left) # - # ## Bubbles with tooltips # Another example, this one taken from the [Scatter plot with tooltips](http://mpld3.github.io/examples/scatter_tooltip.html) example in the mpld3 gallery. # + """ Scatter Plot With Tooltips ========================== A scatter-plot with tooltip labels on hover. Hover over the points to see the point labels. Use the toolbar buttons at the bottom-right of the plot to enable zooming and panning, and to reset the view. """ fig, ax = plt.subplots(subplot_kw=dict(axisbg='#EEEEEE')) N = 100 scatter = ax.scatter(np.random.normal(size=N), np.random.normal(size=N), c=np.random.random(size=N), s=1000 * np.random.random(size=N), alpha=0.3, cmap=plt.cm.jet) ax.grid(color='white', linestyle='solid') ax.set_title("Scatter Plot (with tooltips!)", size=20) labels = ['point {0}'.format(i + 1) for i in range(N)] tooltip = mpld3.plugins.PointLabelTooltip(scatter, labels=labels) mpld3.plugins.connect(fig, tooltip) mpld3.display() # -
vmfiles/IPNB/Examples/b Graphics/20 mpld3.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # The sequence of triangle numbers is generated by adding the natural numbers. So the 7th triangle number would be 1 + 2 + 3 + 4 + 5 + 6 + 7 = 28. The first ten terms would be: # # 1, 3, 6, 10, 15, 21, 28, 36, 45, 55, ... # # Let us list the factors of the first seven triangle numbers: # # - 1: 1 # - 3: 1,3 # - 6: 1,2,3,6 # - 10: 1,2,5,10 # - 15: 1,3,5,15 # - 21: 1,3,7,21 # - 28: 1,2,4,7,14,28 # # We can see that 28 is the first triangle number to have over five divisors. # # What is the value of the first triangle number to have over five hundred divisors? # # https://stackoverflow.com/questions/5811151/why-do-we-check-up-to-the-square-root-of-a-prime-number-to-determine-if-it-is-pr def nth_triangle(n): return int(n*(n+1)/2) nth_triangle(7) # + import math def n_factors(n): factors = 0 for i in range(1,int(math.sqrt(n))): if n % i == 0: factors += 1 return factors*2 # - n_factors(28) def first_tri_n_factors(n): i = 1 while True: tri = nth_triangle(i) n_fac = n_factors(tri) if n_fac>n: return tri i += 1 first_tri_n_factors(500)
project-euler/12.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernel_info: # name: dev # kernelspec: # display_name: 'Python 3.7.9 64-bit (''PythonDataV2'': conda)' # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/NicoleLund/flight_delay_prediction/blob/model_prep_210819/data_manipulation_modeling/investigate_models/b_random_forest_all_y.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="VrN_jQ-Y-ZoN" # # b_random_forest_delays_dec_hrs # ---- # # Written in Google Colab # # By <NAME> # # This workbook builds a random forest model for 2017 flight performance. # + id="qEuSZXoP-ZoQ" # Import Dependencies # Plotting # %matplotlib inline import matplotlib.pyplot as plt # Data manipulation import numpy as np import pandas as pd from statistics import mean from operator import itemgetter from sklearn.model_selection import train_test_split from sklearn.preprocessing import LabelEncoder, MinMaxScaler from tensorflow.keras.utils import to_categorical # Parameter Selection from sklearn import tree from sklearn.ensemble import RandomForestClassifier from sklearn.model_selection import GridSearchCV # Model Development from sklearn.linear_model import LinearRegression from sklearn.linear_model import LogisticRegression from sklearn.svm import SVC from tensorflow import keras from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense from tensorflow.keras.layers import Dropout from tensorflow.keras.wrappers.scikit_learn import KerasClassifier # Model Metrics from sklearn.metrics import classification_report # Save/load files from tensorflow.keras.models import load_model import joblib # # Ignore deprecation warnings # import warnings # warnings.simplefilter('ignore', FutureWarning) # + id="6y1Qt9KM-ZoR" # Set the seed value for the notebook, so the results are reproducible from numpy.random import seed seed(1) # + [markdown] id="QJMIpNeT-ZoR" # # Read in the csv model files # + id="uI4uJfPS-ZoS" # # Read the CSV files from AWS to Pandas Dataframe # X_train = pd.read_csv("https://flight-delay-prediction.s3.us-west-1.amazonaws.com/2017_TUS_X_train.csv") # X_test = pd.read_csv("https://flight-delay-prediction.s3.us-west-1.amazonaws.com/2017_TUS_X_test.csv") # y_train = pd.read_csv("https://flight-delay-prediction.s3.us-west-1.amazonaws.com/2017_TUS_y_train.csv") # y_test = pd.read_csv("https://flight-delay-prediction.s3.us-west-1.amazonaws.com/2017_TUS_y_test.csv") # - # Read the CSV files from AWS to Pandas Dataframe X_train = pd.read_csv("../feature_assessment/2017_TUS_X_train_dec_hrs.csv") X_test = pd.read_csv("../feature_assessment/2017_TUS_X_test_dec_hrs.csv") y_train = pd.read_csv("../feature_assessment/2017_TUS_y_train_dec_hrs.csv") y_test = pd.read_csv("../feature_assessment/2017_TUS_y_test_dec_hrs.csv") X_train.drop('CRS_ARR_hours',axis=1) # + colab={"base_uri": "https://localhost:8080/", "height": 162} id="TwZMc8kO_R3N" outputId="e2ab1b42-2d63-4c18-c676-90e097dba7dc" X_train.head(3) # - y_train = y_train.DELAY y_test = y_test.DELAY # + colab={"base_uri": "https://localhost:8080/", "height": 142} id="bQdjDWzE_UOf" outputId="273319f4-39c0-40b4-dc0f-5180f962ca07" y_train.head(3) # + [markdown] id="E3Sdva3S-ZoU" # ## Random Forest Classifier Method # + id="FtsgLo0P-ZoV" # Create model model = RandomForestClassifier(n_estimators=1000) model = model.fit(X_train, y_train) # + id="bPEaK7j_-ZoV" # Sort the features by their importance tree_feature_sort = sorted(zip(X_train.columns,model.feature_importances_),key=itemgetter(1), reverse=True) # tree_feature_sort # + colab={"base_uri": "https://localhost:8080/", "height": 730} id="YPwSrCru-ZoV" outputId="505a7aad-ad0e-4642-de46-06bcbec9e686" # Plot Decision Tree Feature Importance fig = plt.figure(figsize=[12,12]) plt.barh(*zip(* (tree_feature_sort))) plt.xlabel('Feature Importance') plt.ylabel('Feature Name') plt.title('Random Forest Assessment') plt.show() # + [markdown] id="1Q<KEY>" # # Score Model # + colab={"base_uri": "https://localhost:8080/"} id="B9VseAiy_1Yn" outputId="e1e5def4-6f48-441d-dffa-5a5e74a17d90" print('Random Forest Score:') model.score(X_test, y_test) # + [markdown] id="EEancxnY-ZoV" # # Make **Predictions** # + id="QKvGmeVB-ZoV" predictions = model.predict(X_test) # + id="TzqHn7l7Kv_e" results_delayed = pd.DataFrame({ \ "DELAY": y_test, "DELAY_PREDICT": predictions}) # + colab={"base_uri": "https://localhost:8080/", "height": 110} id="vykCQImCK_pj" outputId="70eaa1ba-f0de-4214-9b15-4708b65ba420" delay_summary = results_delayed.apply(pd.value_counts) delay_summary # + id="6MWRY_GbWHqj" outputId="9ab46220-51fd-49c1-e212-cf233574f3a8" colab={"base_uri": "https://localhost:8080/"} print(classification_report(y_test, predictions))
data_manipulation_modeling/investigate_models/b_random_forest_delays_dec_hrs.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + #Moving Average __--__-- # + import yfinance as yf import matplotlib.pyplot as plt from matplotlib.pyplot import figure # %matplotlib inline plt.style.use("seaborn-whitegrid") import pandas as pd import numpy as np df = yf.download('PINS', start = '2020-10-10') def strategy(df, sma1, sma2): df = df.copy() df['ret'] = np.log(df.Close.pct_change() + 1) df['SMA1'] = df.Close.rolling(sma1).mean() df['SMA2'] = df.Close.rolling(sma2).mean() df = df.dropna() df['position'] = np.where(df['SMA1'] > df['SMA2'], 1,0) df['stratret'] = df['position'].shift(1) + df['ret'] df = df.dropna() return df def performance(df): return np.exp(df[['ret', 'stratret']].sum()) ''' performance(strategy(df,20,50)) ''' stratdf = strategy(df,20,50) fig, ax = plt.subplots(figsize=(10,6)) ax2 = ax.twinx() ax.plot(stratdf[['Close', 'SMA1','SMA2']]) ax2.plot(stratdf['position']) # - SMA_list1 = range(30,101,5) SMA_list2 = range(130,201,5) def Tester(SMAlist1, SMAlist2): profits = [] a,b = [], [] for i,e in zip(SMA_list1, SMA_list2): profit = performance(strategy(df,i,e)) profits.append(profit) a.append(i) b.append(e) col = {'level_0':'SMA1', 'level_1': 'SMA2'} frame = pd.DataFrame(profits, [a,b]).reset_index().rename(columns=col) frame['edge'] = frame.stratret - frame.ret return frame.sort_values('edge', ascending = False) Tester(SMA_list1, SMA_list2)
Simple Moving Average _____________ --__--__.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] colab_type="text" id="jcB8JS-_s5Vq" # ![Quora-1.png](attachment:Quora-1.png) # + [markdown] colab_type="text" id="J6rnhr2Xs5Vs" # <h1 style="text-align:center;font-size:30px;" > Quora Question Pairs </h1> # + [markdown] colab_type="text" id="o9fciGc7s5Vu" # <h1> 1. Business Problem </h1> # + [markdown] colab_type="text" id="LRzmxjKxs5Vw" # <h2> 1.1 Description </h2> # + [markdown] colab_type="text" id="1nlaIYe9s5Vx" # <p>Quora is a place to gain and share knowledge—about anything. It’s a platform to ask questions and connect with people who contribute unique insights and quality answers. This empowers people to learn from each other and to better understand the world.</p> # <p> # Over 100 million people visit Quora every month, so it's no surprise that many people ask similarly worded questions. Multiple questions with the same intent can cause seekers to spend more time finding the best answer to their question, and make writers feel they need to answer multiple versions of the same question. Quora values canonical questions because they provide a better experience to active seekers and writers, and offer more value to both of these groups in the long term. # </p> # <br> # > Credits: Kaggle # # + [markdown] colab_type="text" id="wdWP5SdFs5Vy" # __ Problem Statement __ # - Identify which questions asked on Quora are duplicates of questions that have already been asked. # - This could be useful to instantly provide answers to questions that have already been answered. # - We are tasked with predicting whether a pair of questions are duplicates or not. # + [markdown] colab_type="text" id="34hYn911s5V0" # <h2> 1.2 Sources/Useful Links</h2> # + [markdown] colab_type="text" id="7YIjqVPgs5V4" # - Source : https://www.kaggle.com/c/quora-question-pairs # <br><br>____ Useful Links ____ # - Discussions : https://www.kaggle.com/anokas/data-analysis-xgboost-starter-0-35460-lb/comments # - Kaggle Winning Solution and other approaches: https://www.dropbox.com/sh/93968nfnrzh8bp5/AACZdtsApc1QSTQc7X0H3QZ5a?dl=0 # - Blog 1 : https://engineering.quora.com/Semantic-Question-Matching-with-Deep-Learning # - Blog 2 : https://towardsdatascience.com/identifying-duplicate-questions-on-quora-top-12-on-kaggle-4c1cf93f1c30 # + [markdown] colab_type="text" id="jlNRUR4Ws5V5" # <h2>1.3 Real world/Business Objectives and Constraints </h2> # + [markdown] colab_type="text" id="Hv6fd7txs5V7" # 1. The cost of a mis-classification can be very high. # 2. You would want a probability of a pair of questions to be duplicates so that you can choose any threshold of choice. # 3. No strict latency concerns. # 4. Interpretability is partially important. # + [markdown] colab_type="text" id="VIam5Aaks5V9" # <h1>2. Machine Learning Probelm </h1> # + [markdown] colab_type="text" id="jnty9Bhls5V-" # <h2> 2.1 Data </h2> # + [markdown] colab_type="text" id="rty1PZv3s5V_" # <h3> 2.1.1 Data Overview </h3> # + [markdown] colab_type="text" id="-gu8pAt3s5WB" # <p> # - Data will be in a file Train.csv <br> # - Train.csv contains 5 columns : qid1, qid2, question1, question2, is_duplicate <br> # - Size of Train.csv - 60MB <br> # - Number of rows in Train.csv = 404,290 # </p> # + [markdown] colab_type="text" id="v9grbSNds5WC" # <h3> 2.1.2 Example Data point </h3> # + [markdown] colab_type="text" id="9WEQ-lSxs5WE" # <pre> # "id","qid1","qid2","question1","question2","is_duplicate" # "0","1","2","What is the step by step guide to invest in share market in india?","What is the step by step guide to invest in share market?","0" # "1","3","4","What is the story of Kohinoor (Koh-i-Noor) Diamond?","What would happen if the Indian government stole the Kohinoor (Koh-i-Noor) diamond back?","0" # "7","15","16","How can I be a good geologist?","What should I do to be a great geologist?","1" # "11","23","24","How do I read and find my YouTube comments?","How can I see all my Youtube comments?","1" # </pre> # + [markdown] colab_type="text" id="9qPVfeEjs5WF" # <h2> 2.2 Mapping the real world problem to an ML problem </h2> # + [markdown] colab_type="text" id="JfBn0LYPs5WI" # <h3> 2.2.1 Type of Machine Leaning Problem </h3> # + [markdown] colab_type="text" id="QEqiUD_Ps5WJ" # <p> It is a binary classification problem, for a given pair of questions we need to predict if they are duplicate or not. </p> # + [markdown] colab_type="text" id="keZOL1las5WL" # <h3> 2.2.2 Performance Metric </h3> # + [markdown] colab_type="text" id="YHktaBrMs5WN" # Source: https://www.kaggle.com/c/quora-question-pairs#evaluation # # Metric(s): # * log-loss : https://www.kaggle.com/wiki/LogarithmicLoss # * Binary Confusion Matrix # + [markdown] colab_type="text" id="FmDMBWJjs5WO" # <h2> 2.3 Train and Test Construction </h2> # + [markdown] colab_type="text" id="l7PcvKQss5WQ" # <p> </p> # <p> We build train and test by randomly splitting in the ratio of 70:30 or 80:20 whatever we choose as we have sufficient points to work with. </p> # + [markdown] colab_type="text" id="cW_MVIlps5WQ" # <h1>3. Exploratory Data Analysis </h1> # + colab={} colab_type="code" id="sNzZdmBJs5WS" outputId="0e1df4ed-4a74-4b0e-e84e-1b3862bbf55d" import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt from subprocess import check_output # %matplotlib inline import plotly.offline as py py.init_notebook_mode(connected=True) import plotly.graph_objs as go import plotly.tools as tls import os import gc import re from nltk.corpus import stopwords import distance from nltk.stem import PorterStemmer from bs4 import BeautifulSoup # + [markdown] colab_type="text" id="__T8jddGs5Wc" # <h2> 3.1 Reading data and basic stats </h2> # + colab={} colab_type="code" id="ifM_s9rvs5Wd" outputId="2e17a7bc-9a5b-4c43-d35b-081cc9f92528" df = pd.read_csv("train.csv") print("Number of data points:",df.shape[0]) # + colab={} colab_type="code" id="34zXGW8gs5Wj" outputId="ab7d570a-9eeb-477a-b7cb-663ff6fd04fa" df.head() # + colab={} colab_type="code" id="mx4DFwMns5Wp" outputId="1141e0bb-2750-489e-8b8c-2ba680f7416c" df.info() # + [markdown] colab_type="text" id="HHHTGTzws5Ww" # We are given a minimal number of data fields here, consisting of: # # - id: Looks like a simple rowID # - qid{1, 2}: The unique ID of each question in the pair # - question{1, 2}: The actual textual contents of the questions. # - is_duplicate: The label that we are trying to predict - whether the two questions are duplicates of each other. # + [markdown] colab_type="text" id="ZulqVzTDs5Wx" # <h3> 3.2.1 Distribution of data points among output classes</h3> # - Number of duplicate(smilar) and non-duplicate(non similar) questions # + colab={} colab_type="code" id="YHp64yNjs5Wx" outputId="361ddf04-d545-45f9-dbe2-8bebd695e8da" df.groupby("is_duplicate")['id'].count().plot.bar() # + colab={} colab_type="code" id="-usI2K2bs5W4" outputId="ff0a6a8b-65ad-487a-d5ec-df3c223ba620" print('~> Total number of question pairs for training:\n {}'.format(len(df))) # + colab={} colab_type="code" id="YiPia6Pjs5W_" outputId="3cde4cec-4314-4c14-e807-b35e969bf9e8" print('~> Question pairs are not Similar (is_duplicate = 0):\n {}%'.format(100 - round(df['is_duplicate'].mean()*100, 2))) print('\n~> Question pairs are Similar (is_duplicate = 1):\n {}%'.format(round(df['is_duplicate'].mean()*100, 2))) # + [markdown] colab_type="text" id="wGX03QVRs5XF" # <h3> 3.2.2 Number of unique questions </h3> # + colab={} colab_type="code" id="VOKa6aU2s5XG" outputId="8f644b1d-27c0-4d63-84e2-bb2a42419be2" qids = pd.Series(df['qid1'].tolist() + df['qid2'].tolist()) unique_qs = len(np.unique(qids)) qs_morethan_onetime = np.sum(qids.value_counts() > 1) print ('Total number of Unique Questions are: {}\n'.format(unique_qs)) #print len(np.unique(qids)) print ('Number of unique questions that appear more than one time: {} ({}%)\n'.format(qs_morethan_onetime,qs_morethan_onetime/unique_qs*100)) print ('Max number of times a single question is repeated: {}\n'.format(max(qids.value_counts()))) q_vals=qids.value_counts() q_vals=q_vals.values # + colab={} colab_type="code" id="plcvbd4Cs5XM" outputId="8e137cc1-e0c4-44f4-9cc2-703302206d4f" x = ["unique_questions" , "Repeated Questions"] y = [unique_qs , qs_morethan_onetime] plt.figure(figsize=(10, 6)) plt.title ("Plot representing unique and repeated questions ") sns.barplot(x,y) plt.show() # + [markdown] colab_type="text" id="G-CwGaMms5XS" # <h3>3.2.3 Checking for Duplicates </h3> # + colab={} colab_type="code" id="YCiDBHm5s5XT" outputId="d8011926-4086-4c9a-9fcf-59663a584ec4" #checking whether there are any repeated pair of questions pair_duplicates = df[['qid1','qid2','is_duplicate']].groupby(['qid1','qid2']).count().reset_index() print ("Number of duplicate questions",(pair_duplicates).shape[0] - df.shape[0]) # + [markdown] colab_type="text" id="iaHTnnt8s5XX" # <h3> 3.2.4 Number of occurrences of each question </h3> # + colab={} colab_type="code" id="dPZwk-C8s5Xa" outputId="0d6d5978-2306-4ed3-cf27-f2a0b974e47d" plt.figure(figsize=(20, 10)) plt.hist(qids.value_counts(), bins=160) plt.yscale('log', nonposy='clip') plt.title('Log-Histogram of question appearance counts') plt.xlabel('Number of occurences of question') plt.ylabel('Number of questions') print ('Maximum number of times a single question is repeated: {}\n'.format(max(qids.value_counts()))) # + [markdown] colab_type="text" id="h_WdYxlYs5Xj" # <h3> 3.2.5 Checking for NULL values </h3> # + colab={} colab_type="code" id="r0x1gR2fs5Xk" outputId="721aef48-e628-40c6-d567-25466f4283e1" #Checking whether there are any rows with null values nan_rows = df[df.isnull().any(1)] print (nan_rows) # + [markdown] colab_type="text" id="CCYmufv6s5Xo" # - There are two rows with null values in question2 # + colab={} colab_type="code" id="yLBRyACgs5Xp" outputId="076046a9-1510-41ef-cf98-15b38661dca4" # Filling the null values with ' ' df = df.fillna('') nan_rows = df[df.isnull().any(1)] print (nan_rows) # + [markdown] colab_type="text" id="l9Qcl5xfs5Xs" # <h2>3.3 Basic Feature Extraction (before cleaning) </h2> # + [markdown] colab_type="text" id="RRzvPYzGs5Xu" # Let us now construct a few features like: # - ____freq_qid1____ = Frequency of qid1's # - ____freq_qid2____ = Frequency of qid2's # - ____q1len____ = Length of q1 # - ____q2len____ = Length of q2 # - ____q1_n_words____ = Number of words in Question 1 # - ____q2_n_words____ = Number of words in Question 2 # - ____word_Common____ = (Number of common unique words in Question 1 and Question 2) # - ____word_Total____ =(Total num of words in Question 1 + Total num of words in Question 2) # - ____word_share____ = (word_common)/(word_Total) # - ____freq_q1+freq_q2____ = sum total of frequency of qid1 and qid2 # - ____freq_q1-freq_q2____ = absolute difference of frequency of qid1 and qid2 # + colab={} colab_type="code" id="Iq4DZ-rYs5Xv" outputId="d34e66da-d84b-49ea-8852-4beb9da688ba" if os.path.isfile('df_fe_without_preprocessing_train.csv'): df = pd.read_csv("df_fe_without_preprocessing_train.csv",encoding='latin-1') else: df['freq_qid1'] = df.groupby('qid1')['qid1'].transform('count') df['freq_qid2'] = df.groupby('qid2')['qid2'].transform('count') df['q1len'] = df['question1'].str.len() df['q2len'] = df['question2'].str.len() df['q1_n_words'] = df['question1'].apply(lambda row: len(row.split(" "))) df['q2_n_words'] = df['question2'].apply(lambda row: len(row.split(" "))) def normalized_word_Common(row): w1 = set(map(lambda word: word.lower().strip(), row['question1'].split(" "))) w2 = set(map(lambda word: word.lower().strip(), row['question2'].split(" "))) return 1.0 * len(w1 & w2) df['word_Common'] = df.apply(normalized_word_Common, axis=1) def normalized_word_Total(row): w1 = set(map(lambda word: word.lower().strip(), row['question1'].split(" "))) w2 = set(map(lambda word: word.lower().strip(), row['question2'].split(" "))) return 1.0 * (len(w1) + len(w2)) df['word_Total'] = df.apply(normalized_word_Total, axis=1) def normalized_word_share(row): w1 = set(map(lambda word: word.lower().strip(), row['question1'].split(" "))) w2 = set(map(lambda word: word.lower().strip(), row['question2'].split(" "))) return 1.0 * len(w1 & w2)/(len(w1) + len(w2)) df['word_share'] = df.apply(normalized_word_share, axis=1) df['freq_q1+q2'] = df['freq_qid1']+df['freq_qid2'] df['freq_q1-q2'] = abs(df['freq_qid1']-df['freq_qid2']) df.to_csv("df_fe_without_preprocessing_train.csv", index=False) df.head() # + [markdown] colab_type="text" id="-zLujovVs5X3" # <h3> 3.3.1 Analysis of some of the extracted features </h3> # + [markdown] colab_type="text" id="zRIFQTkCs5X3" # - Here are some questions have only one single words. # + colab={} colab_type="code" id="jSS0X82Ds5X5" outputId="5dacd7b2-d955-4435-9639-f1c6acd9b580" print ("Minimum length of the questions in question1 : " , min(df['q1_n_words'])) print ("Minimum length of the questions in question2 : " , min(df['q2_n_words'])) print ("Number of Questions with minimum length [question1] :", df[df['q1_n_words']== 1].shape[0]) print ("Number of Questions with minimum length [question2] :", df[df['q2_n_words']== 1].shape[0]) # + [markdown] colab_type="text" id="kFzTIHW3s5YB" # <h4> 3.3.1.1 Feature: word_share </h4> # + colab={} colab_type="code" id="s4rwGLFDs5YD" outputId="0103aaa0-3f5a-4eb4-cd22-164a57d7aef0" plt.figure(figsize=(12, 8)) plt.subplot(1,2,1) sns.violinplot(x = 'is_duplicate', y = 'word_share', data = df[0:]) plt.subplot(1,2,2) sns.distplot(df[df['is_duplicate'] == 1.0]['word_share'][0:] , label = "1", color = 'red') sns.distplot(df[df['is_duplicate'] == 0.0]['word_share'][0:] , label = "0" , color = 'blue' ) plt.show() # + [markdown] colab_type="text" id="RcwMI4xps5YJ" # - The distributions for normalized word_share have some overlap on the far right-hand side, i.e., there are quite a lot of questions with high word similarity # - The average word share and Common no. of words of qid1 and qid2 is more when they are duplicate(Similar) # + [markdown] colab_type="text" id="K0AbOS65s5YL" # <h4> 3.3.1.2 Feature: word_Common </h4> # + colab={} colab_type="code" id="_mCFvztcs5YM" outputId="008ac763-a832-4c11-88fa-5da52cdb9305" plt.figure(figsize=(12, 8)) plt.subplot(1,2,1) sns.violinplot(x = 'is_duplicate', y = 'word_Common', data = df[0:]) plt.subplot(1,2,2) sns.distplot(df[df['is_duplicate'] == 1.0]['word_Common'][0:] , label = "1", color = 'red') sns.distplot(df[df['is_duplicate'] == 0.0]['word_Common'][0:] , label = "0" , color = 'blue' ) plt.show() # + [markdown] colab_type="text" id="9Ej1ouEVs5YR" # <p> The distributions of the word_Common feature in similar and non-similar questions are highly overlapping </p>
Quora_question_pair_similarity/1.Quora.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Datasets Validation using Grandinet Boosting Classifier: # # For the ArticularyWordRecognition Dataset: from sktime.datasets import load_UCR_UEA_dataset from sklearn.ensemble import GradientBoostingClassifier from sklearn.datasets import make_classification from sktime.datatypes._panel._convert import from_nested_to_3d_numpy import numpy as np from sklearn.metrics import accuracy_score from sklearn.metrics import roc_auc_score from sklearn.metrics import f1_score from pyts.datasets import load_basic_motions from sklearn.model_selection import train_test_split X_train,y_train= load_UCR_UEA_dataset('ArticularyWordRecognition', split="train", return_X_y=True) X_test,y_test= load_UCR_UEA_dataset('ArticularyWordRecognition', split="test", return_X_y=True) X_3d = from_nested_to_3d_numpy(X_train) X_3d.shape X_test_3d= from_nested_to_3d_numpy(X_test) X_test_3d.shape # + class reservoir_features: ''' creates an object associated with a multivariate dataset ''' def __init__(self,data,num_features): ''' data: Takes in a multidimensional array (x * y * z) - z>y Initializes it x: Timeseries y: Attributes for a given timeseries observation z: timestamped observations (features) num_features: you must specify the dimension you want to reduce it to ''' self.features = [] self.filters_used = [] self.original_data = data.copy() self.data = data.copy() self.num_features = num_features self.x = data.shape[0] self.y = data.shape[1] self.z = data.shape[2] # perform checks #1. 3d numpy array #2. Each time series should have same number of observations #3. num_features should be less than timestamped observations def normalize(self): ''' Each attribute could potentially be on a different scale modifies the original data and performs a min max normalization ''' for i in range(self.original_data.shape[0]): for j in range(self.original_data.shape[1]): self.data[i][j] = (self.original_data[i][j] - self.original_data[i][j].min())/(self.original_data[i][j].max()-self.original_data[i][j].min()) def filters(self,stride_len = [1], num_filters = 1): ''' stride_len: num of columns to skip after each filter multiplication num_filters: you can specify the number of filters you need; each filter will be of a differnt size size of filter = n*m (n = # of rows = attribute size, m = # of columns) ''' #Have error check to make sure stride len is a list and value is <length of attributes n = self.y #Edge case vals is empty/smaller than num_filters for iteration in range(num_filters): m = self._get_m(stride_len[iteration]) filter_a = np.random.random((n,m)) print("filter of size ", str(n), "*", str(m), "was created\n") self.filters_used.append(filter_a) temp_features =[] for i in range(self.x): temp = [] j = 0 while j + m < self.data.shape[2]: temp.append((filter_a*self.data[i,:,j:j+m]).mean()) j+=stride_len[iteration] temp_features.append(temp) self.features.append(temp_features) def _get_m(self,stride_len): ''' stride_len: based on stride length,& num_features, we calculate possible filter size ''' m = self.z -(self.num_features)*stride_len return m def result_features(self): ''' if multiple filters were added, takes the average result ''' ans =[] for timeseries in range(len(self.features[0])): temp =[] for feature in range(len(self.features[0][0])): val = np.mean([self.features[filter][timeseries][feature] for filter in range(len(self.features))]) temp.append(val) ans.append(temp) return ans # - def transform_data(data, num_features, stride_len, num_filters): data_transformed = reservoir_features(data ,num_features = num_features) #normalize data_transformed.normalize() #create 2 filters data_transformed.filters(stride_len = stride_len, num_filters = num_filters) data_transformed = data_transformed.result_features() return data_transformed X_train_transformed= transform_data(X_3d, 40, [3], 1) X_test_transformed=transform_data(X_test_3d, 40, [3], 1) clf = GradientBoostingClassifier(n_estimators=100, learning_rate=1.0, max_depth=1, random_state=0).fit(X_train_transformed, y_train) clf.score(X_train_transformed, y_train) clf.apply(X_train_transformed) clf.predict_log_proba(X_train_transformed) # ## Accuracy for Train Data: # Accuracy: clf_train_pred=clf.predict(X_train_transformed) clf_acc_score = accuracy_score(y_train, clf_train_pred) print(clf_acc_score) roc_auc_score(y_train, clf.predict_proba(X_train_transformed), multi_class='ovr') train_f1 = f1_score(y_train, clf_train_pred, average='weighted') train_f2 = f1_score(y_train, clf_train_pred, average='macro') train_f3 = f1_score(y_train, clf_train_pred, average='micro') print(train_f1) print(train_f2) print(train_f3) # ## For the Test set: # + clf_test_pred=clf.predict(X_test_transformed) clf_test_acc_score = accuracy_score(y_test, clf_test_pred) print(clf_test_acc_score) # - roc_auc_score(y_test, clf.predict_proba(X_test_transformed), multi_class='ovr') #f1-score: test_f1 = f1_score(y_test, clf_test_pred, average='weighted') test_f2 = f1_score(y_test, clf_test_pred, average='macro') test_f3 = f1_score(y_test, clf_test_pred, average='micro') print(test_f1) print(test_f2) print(test_f3) # # For the AtrialFibrillation dataset: # + X_AF,y_AF= load_UCR_UEA_dataset('AtrialFibrillation', split="train", return_X_y=True) X_AF_test,y_AF_test= load_UCR_UEA_dataset('AtrialFibrillation', split="test", return_X_y=True) X_AF_3d = from_nested_to_3d_numpy(X_AF) X_AF_3d.shape X_AF_test_3d= from_nested_to_3d_numpy(X_AF_test) X_AF_test_3d.shape print(X_AF_3d.shape) print(X_AF_test_3d.shape) # - X_train_transformed_AF = transform_data(X_AF_3d, 40, [3], 1) X_test_transformed_AF = transform_data(X_AF_test_3d, 40, [3], 1) print(len(X_train_transformed_AF)) print(len(X_train_transformed_AF[0])) clf_AF = GradientBoostingClassifier(n_estimators=100, learning_rate=1.0, max_depth=1, random_state=0).fit(X_train_transformed_AF, y_AF) clf_AF.score(X_train_transformed_AF, y_AF) clf_AF.apply(X_train_transformed_AF) # ## Accuracy for Train Data: # # + # Accuracy: clf_train_pred_AF=clf_AF.predict(X_train_transformed_AF) clf_acc_score_AF = accuracy_score(y_AF, clf_train_pred_AF) print("Accuracy_score:",clf_acc_score_AF) train_AF_f1 = f1_score(y_AF, clf_train_pred_AF, average='weighted') train_AF_f2 = f1_score(y_AF, clf_train_pred_AF, average='macro') train_AF_f3 = f1_score(y_AF, clf_train_pred_AF, average='micro') print("f1_score:Weighted:",train_f1) print("f1_score:macro:",train_f2) print("f1_score:micro:",train_f3) # - roc_auc_score(y_AF, clf_AF.predict_proba(X_train_transformed_AF), multi_class='ovr') # ## Accuracy for Test Data: # + # Accuracy: clf_test_pred_AF=clf_AF.predict(X_test_transformed_AF) clf_acc_score_test_AF = accuracy_score(y_AF_test, clf_test_pred_AF) print("Accuracy_score:",clf_acc_score_test_AF) # F1-Score: test_AF_f1 = f1_score(y_AF_test, clf_test_pred_AF, average='weighted') test_AF_f2 = f1_score(y_AF_test, clf_test_pred_AF, average='macro') test_AF_f3 = f1_score(y_AF_test, clf_test_pred_AF, average='micro') print("f1_score:Weighted:",test_AF_f1) print("f1_score:macro:",test_AF_f2) print("f1_score:micro:",test_AF_f3) # - roc_auc_score(y_AF_test, clf_AF.predict_proba(X_test_transformed_AF), multi_class='ovr') # # For the Cricket dataset # + X_C,y_C= load_UCR_UEA_dataset('Cricket', split="train", return_X_y=True) X_C_test,y_C_test= load_UCR_UEA_dataset('Cricket', split="test", return_X_y=True) X_C_3d = from_nested_to_3d_numpy(X_C) X_C_3d.shape X_C_test_3d= from_nested_to_3d_numpy(X_C_test) X_C_test_3d.shape print(X_C_3d.shape) print(X_C_test_3d.shape) # - X_train_transformed_C = transform_data(X_C_3d, 200, [3], 1) X_test_transformed_C = transform_data(X_C_test_3d, 200, [3], 1) clf_C = GradientBoostingClassifier(n_estimators=100, learning_rate=1.0, max_depth=1, random_state=0).fit(X_train_transformed_C, y_C) clf_C.score(X_train_transformed_C, y_C) clf_C.apply(X_train_transformed_C) # # Accuracy for train Data: # + # Accuracy: clf_train_pred_C=clf_C.predict(X_train_transformed_C) clf_acc_score_C = accuracy_score(y_C, clf_train_pred_C) print("Accuracy_score:",clf_acc_score_C) train_C_f1 = f1_score(y_C, clf_train_pred_C, average='weighted') train_C_f2 = f1_score(y_C, clf_train_pred_C, average='macro') train_C_f3 = f1_score(y_C, clf_train_pred_C, average='micro') print("f1_score:Weighted:",train_C_f1) print("f1_score:macro:",train_C_f2) print("f1_score:micro:",train_C_f3) # - roc_auc_score(y_C, clf_C.predict_proba(X_train_transformed_C), multi_class='ovr') # # Accuracy for test Data: # + # Accuracy: clf_test_pred_C=clf_C.predict(X_test_transformed_C) clf_acc_score_test_C = accuracy_score(y_C_test, clf_test_pred_C) print("Accuracy_score:",clf_acc_score_test_C) # F1-Score: test_C_f1 = f1_score(y_C_test, clf_test_pred_C, average='weighted') test_C_f2 = f1_score(y_C_test, clf_test_pred_C, average='macro') test_C_f3 = f1_score(y_C_test, clf_test_pred_C, average='micro') print("f1_score:Weighted:",test_C_f1) print("f1_score:macro:",test_C_f2) print("f1_score:micro:",test_C_f3) # - roc_auc_score(y_C_test, clf_C.predict_proba(X_test_transformed_C), multi_class='ovr') # # For the Epilepsy Data: # + X_E,y_E= load_UCR_UEA_dataset('Epilepsy', split="train", return_X_y=True) X_E_test,y_E_test= load_UCR_UEA_dataset('Epilepsy', split="test", return_X_y=True) X_E_3d = from_nested_to_3d_numpy(X_E) X_E_3d.shape X_E_test_3d= from_nested_to_3d_numpy(X_E_test) X_E_test_3d.shape print(X_E_3d.shape) print(X_E_test_3d.shape) # - X_train_transformed_E = transform_data(X_E_3d, 16, [3], 1) X_test_transformed_E = transform_data(X_E_test_3d, 16, [3], 1) clf_E = GradientBoostingClassifier(n_estimators=100, learning_rate=1.0, max_depth=1, random_state=0).fit(X_train_transformed_E, y_E) clf_E.score(X_train_transformed_E, y_E) clf_E.apply(X_train_transformed_E) # # Accuracy for Train Data: # + # Accuracy: clf_train_pred_E=clf_E.predict(X_train_transformed_E) clf_acc_score_E = accuracy_score(y_E, clf_train_pred_E) print("Accuracy_score:",clf_acc_score_E) train_E_f1 = f1_score(y_E, clf_train_pred_E, average='weighted') train_E_f2 = f1_score(y_E, clf_train_pred_E, average='macro') train_E_f3 = f1_score(y_E, clf_train_pred_E, average='micro') print("f1_score:Weighted:",train_E_f1) print("f1_score:macro:",train_E_f2) print("f1_score:micro:",train_E_f3) # - roc_auc_score(y_E, clf_E.predict_proba(X_train_transformed_E), multi_class='ovr') # # Accuracy for Test Data: # # + # Accuracy: clf_test_pred_E=clf_E.predict(X_test_transformed_E) clf_acc_score_test_E = accuracy_score(y_E_test, clf_test_pred_E) print("Accuracy_score:",clf_acc_score_test_E) # F1-Score: test_E_f1 = f1_score(y_E_test, clf_test_pred_E, average='weighted') test_E_f2 = f1_score(y_E_test, clf_test_pred_E, average='macro') test_E_f3 = f1_score(y_E_test, clf_test_pred_E, average='micro') print("f1_score:Weighted:",test_E_f1) print("f1_score:macro:",test_E_f2) print("f1_score:micro:",test_E_f3) # - roc_auc_score(y_E_test, clf_E.predict_proba(X_test_transformed_E), multi_class='ovr') # # For the FingerMovements Data: # # + X_F,y_F= load_UCR_UEA_dataset('FingerMovements',return_X_y=True) X_F_Train,X_F_test,y_F_Train,y_F_test= train_test_split(X_F,y_F, test_size=0.5, random_state=0) X_F_3d = from_nested_to_3d_numpy(X_F_Train) X_F_3d.shape X_F_test_3d= from_nested_to_3d_numpy(X_F_test) X_F_test_3d.shape print(X_F_3d.shape) print(X_F_test_3d.shape) # - X_train_transformed_F = transform_data(X_F_3d, 16, [3], 1) X_test_transformed_F = transform_data(X_F_test_3d, 16, [3], 1) clf_F = GradientBoostingClassifier(n_estimators=100, learning_rate=1.0, max_depth=1, random_state=0).fit(X_train_transformed_F, y_F_Train) clf_F.score(X_train_transformed_F, y_F_Train) clf_F.apply(X_train_transformed_F) # # Accuracy for Train Data: # # + # Accuracy: clf_train_pred_F=clf_F.predict(X_train_transformed_F) clf_acc_score_F = accuracy_score(y_F_Train, clf_train_pred_F) print("Accuracy_score:",clf_acc_score_F) train_F_f1 = f1_score(y_F_Train, clf_train_pred_F, average='weighted') train_F_f2 = f1_score(y_F_Train, clf_train_pred_F, average='macro') train_F_f3 = f1_score(y_F_Train, clf_train_pred_F, average='micro') print("f1_score:Weighted:",train_F_f1) print("f1_score:macro:",train_F_f2) print("f1_score:micro:",train_F_f3) # - # # Accuracy for Test Data: # # + # Accuracy: clf_test_pred_F=clf_F.predict(X_test_transformed_F) clf_acc_score_test_F = accuracy_score(y_F_test, clf_test_pred_F) print("Accuracy_score:",clf_acc_score_test_F) # F1-Score: test_F_f1 = f1_score(y_F_test, clf_test_pred_F, average='weighted') test_F_f2 = f1_score(y_F_test, clf_test_pred_F, average='macro') test_F_f3 = f1_score(y_F_test, clf_test_pred_F, average='micro') print("f1_score:Weighted:",test_F_f1) print("f1_score:macro:",test_F_f2) print("f1_score:micro:",test_F_f3) # - roc_auc_score(y_F_test, clf_F.predict_proba(X_test_transformed_F)[:,1]) # # For the Handwriting Data Set: # + X_H,y_H= load_UCR_UEA_dataset('Handwriting', return_X_y=True) X_H_Train,X_H_test,y_H_Train,y_H_test= train_test_split(X_H,y_H, test_size=0.5, random_state=0) X_H_3d = from_nested_to_3d_numpy(X_H_Train) X_H_3d.shape X_H_test_3d= from_nested_to_3d_numpy(X_H_test) X_H_test_3d.shape print(X_H_3d.shape) print(X_H_test_3d.shape) # - X_train_transformed_H = transform_data(X_H_3d, 10, [3], 1) X_test_transformed_H = transform_data(X_H_test_3d, 10, [3], 1) clf_H = GradientBoostingClassifier(n_estimators=100, learning_rate=1.0, max_depth=1, random_state=0).fit(X_train_transformed_H, y_H_Train) clf_H.score(X_train_transformed_H, y_H_Train) clf_H.apply(X_train_transformed_H) # # Accuracy for Train Data: # + # Accuracy: clf_train_pred_H=clf_H.predict(X_train_transformed_H) clf_acc_score_H = accuracy_score(y_H_Train, clf_train_pred_H) print("Accuracy_score:",clf_acc_score_H) train_H_f1 = f1_score(y_H_Train, clf_train_pred_H, average='weighted') train_H_f2 = f1_score(y_H_Train, clf_train_pred_H, average='macro') train_H_f3 = f1_score(y_H_Train, clf_train_pred_H, average='micro') print("f1_score:Weighted:",train_H_f1) print("f1_score:macro:",train_H_f2) print("f1_score:micro:",train_H_f3) # - roc_auc_score(y_H_Train, clf_H.predict_proba(X_train_transformed_H), multi_class='ovr') # # Accuracy for Test Data: # + # Accuracy: clf_test_pred_H=clf_H.predict(X_test_transformed_H) clf_acc_score_test_H = accuracy_score(y_H_test, clf_test_pred_H) print("Accuracy_score:",clf_acc_score_test_H) # F1-Score: test_H_f1 = f1_score(y_H_test, clf_test_pred_H, average='weighted') test_H_f2 = f1_score(y_H_test, clf_test_pred_H, average='macro') test_H_f3 = f1_score(y_H_test, clf_test_pred_H, average='micro') print("f1_score:Weighted:",test_H_f1) print("f1_score:macro:",test_H_f2) print("f1_score:micro:",test_H_f3) # - roc_auc_score(y_H_test, clf_H.predict_proba(X_test_transformed_H), multi_class='ovr') # # For Basic Motions: # # + X_B, X_B_test, y_B, y_B_test = load_basic_motions(return_X_y=True) print(X_B.shape) print(X_B_test.shape) # - X_train_transformed_B = transform_data(X_B, 16, [3], 1) X_test_transformed_B = transform_data(X_B_test, 16, [3], 1) clf_B = GradientBoostingClassifier(n_estimators=100, learning_rate=1.0, max_depth=1, random_state=0).fit(X_train_transformed_B, y_B) clf_B.score(X_train_transformed_B, y_B) # # Accuracy for Train Data: # + # Accuracy: clf_train_pred_B=clf_B.predict(X_train_transformed_B) clf_acc_score_B = accuracy_score(y_B, clf_train_pred_B) print("Accuracy_score:",clf_acc_score_B) train_B_f1 = f1_score(y_B, clf_train_pred_B, average='weighted') train_B_f2 = f1_score(y_B, clf_train_pred_B, average='macro') train_B_f3 = f1_score(y_B, clf_train_pred_B, average='micro') print("f1_score:Weighted:",train_B_f1) print("f1_score:macro:",train_B_f2) print("f1_score:micro:",train_B_f3) # - roc_auc_score(y_B, clf_B.predict_proba(X_train_transformed_B), multi_class='ovr') # # Accuracy for Test Data: # + # Accuracy: clf_test_pred_B=clf_B.predict(X_test_transformed_B) clf_acc_score_test_B = accuracy_score(y_B_test, clf_test_pred_B) print("Accuracy_score:",clf_acc_score_test_B) # F1-Score: test_B_f1 = f1_score(y_B_test, clf_test_pred_B, average='weighted') test_B_f2 = f1_score(y_B_test, clf_test_pred_B, average='macro') test_B_f3 = f1_score(y_B_test, clf_test_pred_B, average='micro') print("f1_score:Weighted:",test_B_f1) print("f1_score:macro:",test_B_f2) print("f1_score:micro:",test_B_f3) # - roc_auc_score(y_B_test, clf_B.predict_proba(X_test_transformed_B), multi_class='ovr')
Evalutaion- RFC_ GBC/Gradient_Boosting_Classifier.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + id="zu0UQrjs4esF" def square(y): return y*y # + colab={"base_uri": "https://localhost:8080/"} id="N0z0qiwi5dMd" outputId="9f784d8b-8490-4927-88e5-5bd71d779270" square(2) # + id="FNbDNlWg5jmS" #Anonymous function: This function will not have a name! #Lambda Function #Syntax: Lambda arguments: operation # + colab={"base_uri": "https://localhost:8080/"} id="JemWCn1S58yg" outputId="1e6280d5-ac81-4bc3-b40d-bd8e6fd87888" s = lambda y: y*y print(s(2)) # + id="FetEd2Df6Gpt" #Make use of lambda function and return the sum of 3 numbers # + colab={"base_uri": "https://localhost:8080/"} id="xBY2sioK7Txj" outputId="91d11bfb-45ff-4d3b-ac94-0fd064a6ffe3" num = [1,2,3,4,5,6,7,8,9,10] res = [] for i in num: res.append(i**2) print(res) # + id="mY8zrYG38DMc" #MAP - Syntax map(Function, *iterables) --> map object It computes the operations of the function over the iterable # + colab={"base_uri": "https://localhost:8080/"} id="1VnLW3pu9Ka2" outputId="c1965e73-2777-4565-b7f7-7dc06e6f0ca9" map(lambda x: x**2,num) # + colab={"base_uri": "https://localhost:8080/"} id="eVOZ0UlJ7lgJ" outputId="f970344e-ade5-431a-c831-e8695d693608" #Map num = [1,2,3,4,5,6,7,8,9,10] square_lst = list(map(lambda x: x**2,num)) square_lst # + colab={"base_uri": "https://localhost:8080/"} id="bRgZdf0L7_2Y" outputId="f3e08cba-e2d5-42c4-b265-aa244618dcb7" Myclassstudens = ['Vahini','Abdul','Tajmul'] upper_case = list(map(str.upper,Myclassstudens)) upper_case # - # # To change the list items to lower case to upper case ls = ['School','college','hospital'] uplst = list(map(lambda x: {if x == 'college'} x.upper(),ls)) uplst # + colab={"base_uri": "https://localhost:8080/", "height": 35} id="UANqk_f19_ld" outputId="a2f3f9ee-b2ba-4cfd-c126-bbab4d9d6a65" a = "Abdul" a.upper().split(' ') # + id="pT2rKjnK8uGi" float_nums = [4.77,8.99,2.33,10.99,6,33] #use map and convert this list into a list of integers round_numbers = list(map(int, float_nums)) # + colab={"base_uri": "https://localhost:8080/"} id="gqJqUlC19svf" outputId="c1eeff1e-a202-44d6-b1eb-503c1b2dce5f" round_numbers # + id="M4H6Szqu9s39" #Make use of Map and Lambda to generate the same pattern name = ['jack','sparrow','harry','potter'] index = [1,2,3,4] OP: [('jack',1),('sparrow',2),('harry',3),('potter',4)] # + colab={"base_uri": "https://localhost:8080/"} id="LF7-oARP9s7W" outputId="b52a3c61-739d-4bdf-afda-611803cba7a8" name = ['jack','sparrow','harry','potter'] index = [1,2,3,4] list(map(lambda i,j: (i,j),name,index)) # + colab={"base_uri": "https://localhost:8080/"} id="49RYatg_C5kd" outputId="736cb65a-e69c-4ca1-a32d-53ec70cbcb0e" #ZIP a = [1,2,3,4,5] b = [6,7,8,9,10] c = [] for i,j in zip(a,b): c.append(i*j) print(c) # + colab={"base_uri": "https://localhost:8080/"} id="2Z7YB47-C5of" outputId="aae1c2c3-f0a5-4943-c096-6635aab1fbae" a = ['a','b','c','d'] for indx,a in enumerate(a): print("Index",indx) print("Element",a) # + colab={"base_uri": "https://localhost:8080/"} id="sf5CFL1CC5tO" outputId="e6ff5793-eecb-468f-f679-f565684dc407" #List Comprehension list_nos = [1,2,3,4,5,6,7] res = [] for i in list_nos: res.append(i*i) print(res) # + id="2IgQPVKMC5yN" lst_squared = [i*i for i in list_nos] # + colab={"base_uri": "https://localhost:8080/"} id="Ejq7NAulC53-" outputId="97d44739-398c-41eb-c8f4-d405228f87ab" lst_squared # + id="56vS3C4p8uN9" #Write a code using list comprehension to find the 3times each number from the list #return type is also a list list_nos = [1,2,3,4,5,6,7] O/P: [2,6,9,12,15,18,21] # + id="R_MmYCV7NUQF" Words = ['inceptz','card','btch13','pythn','class'] vowels = ['a','e','i','o','u'] #Write a simple code to return a list of words which contains vowels #Write it using simple for's and if's #List comprehension is the shorted form return and iterate over a list in a single #more than one for loops inside list comprehension #Suggestion: avoid or limit with 2 loops as probelems go complex! #return type of List comprehension is one list! # + colab={"base_uri": "https://localhost:8080/"} id="EUGawXCOPMyY" outputId="4ede8ec3-c6dd-4192-e6f8-07573054a09e" res = [] for i in Words: print(i) for j in i: if(j in vowels): res.append(i) #break print(res) # + colab={"base_uri": "https://localhost:8080/"} id="nwEGF8xgPe6C" outputId="8af81850-8355-425c-8e73-58b1a0b844b5" res # + id="uDvy6XH-VHLK" odd = [] even = [] res = [odd.append("Even") if i%2 == 0 else even.append("Odd") for i in range(0,20)] #Return Type 1. One List # + id="zRsBDnj1Vw7F" odd # + colab={"base_uri": "https://localhost:8080/"} id="RJbR7DViVp81" outputId="48c07192-fe22-412b-91a8-fbacd156da20" res # + colab={"base_uri": "https://localhost:8080/", "height": 130} id="qVWZ7I73PgUH" outputId="31334a27-83ba-41b7-a906-5e82179a3632" res =[] hal = [res.append(i) for i in Words for j in i if j in vowels ] hal # + colab={"base_uri": "https://localhost:8080/"} id="KjGZHTCfU2Id" outputId="215736c7-d6f5-461c-b604-786183b2a2e3" res # + id="yCUxCXPtRC4H" Set comprehensions dict comprehensions No Tuple comprehensions # + colab={"base_uri": "https://localhost:8080/"} id="DeeAPqFSPz9H" outputId="e5771879-58fc-4aee-d649-6cfe56b8a98b" hal = [[[1,2]]] [l for i in hal for k in i for l in k] # + id="KYK5aYYMUOqV"
Py_Advanced_Functions_day_7.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns # %matplotlib inline df=pd.read_csv('../data/Services.csv') df.head() tmp=df['Region'].value_counts().to_dict() plt.title('Services consume by Region') sns.countplot(x='Region',data=df) plt.title('Services consume by Region') plt.pie(x=list(tmp.values()),labels=list(tmp.keys()),data=df,shadow=True,startangle=-40,autopct='%1.1f%%',explode=(0.1,0.1,0,0,0)) plt.Circle((0,0),0.80,color='black', fc='white',linewidth=1.25) x=df['gender'].value_counts().to_dict() plt.title(' by Gender') plt.pie(x=list(x.values()),labels=list(x.keys()),data=df,shadow=True,startangle=-40,autopct='%1.1f%%',explode=(0.1,0)) plt.Circle((0,0),0.80,color='black', fc='white',linewidth=1.25) x=df['Mobile_Banking'].value_counts().to_dict() plt.title(' Mobile banking Users') plt.pie(x=list(x.values()),labels=list(x.keys()),data=df,shadow=True,startangle=-40,autopct='%1.1f%%',explode=(0.1,0)) plt.Circle((0,0),0.80,color='black', fc='white',linewidth=1.25) x=df['Net_Banking'].value_counts().to_dict() plt.title(' Net banking Users') plt.pie(x=list(x.values()),labels=list(x.keys()),data=df,shadow=True,startangle=-40,autopct='%1.1f%%',explode=(0.1,0)) plt.Circle((0,0),0.80,color='black', fc='white',linewidth=1.25) tmp=df['ThirdParty_paymet_App'].value_counts().to_dict() plt.title('ThirdParty Most used paymet App') plt.pie(x=list(tmp.values()),labels=list(tmp.keys()),data=df,shadow=True,startangle=-40,autopct='%1.1f%%',explode=(0.1,0.1,0.1,0,0)) plt.Circle((0,0),0.80,color='black', fc='white',linewidth=1.25) x=df['Credit_card'].value_counts().to_dict() plt.title(' Credit Card Users') plt.pie(x=list(x.values()),labels=list(x.keys()),data=df,shadow=True,startangle=-40,autopct='%1.1f%%',explode=(0.1,0)) plt.Circle((0,0),0.80,color='black', fc='white',linewidth=1.25) x=df['Visa_Card'].value_counts().to_dict() plt.title(' Visa Card Users') plt.pie(x=list(x.values()),labels=list(x.keys()),data=df,shadow=True,startangle=-40,autopct='%1.1f%%',explode=(0.1,0)) plt.Circle((0,0),0.80,color='black', fc='white',linewidth=1.25) x=df['Rupay_Card'].value_counts().to_dict() plt.title(' Rupay Card Users') plt.pie(x=list(x.values()),labels=list(x.keys()),data=df,shadow=True,startangle=-40,autopct='%1.1f%%',explode=(0.2,0)) plt.Circle((0,0),0.80,color='black', fc='white',linewidth=1.25) df['SMS_banking'].value_counts() x=df['SMS_banking'].value_counts().to_dict() plt.title(' SMS banking Users') plt.pie(x=list(x.values()),labels=list(x.keys()),data=df,shadow=True,startangle=-40,autopct='%1.1f%%',explode=(0.1,0)) plt.Circle((0,0),0.80,color='black', fc='white',linewidth=1.25)
Notebooks/Services.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/kishanRaj10/18CSE044-DMDW-LAB/blob/main/DMDW_Assignment_3.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + colab={"base_uri": "https://localhost:8080/", "height": 419} id="UN7iADNUlqF1" outputId="664c5e84-99d0-4ba2-8d71-d7651f71c44f" import pandas as pd path="https://raw.githubusercontent.com/kishanRaj10/18CSE044-DMDW-LAB/main/Toyota.csv%20-%20Toyota.csv.csv" data =pd.read_csv(path) data # + colab={"base_uri": "https://localhost:8080/"} id="i5fL-qHYs-9k" outputId="3fe36cfe-d3f1-4c22-e52c-0a1237697565" type(data) # + colab={"base_uri": "https://localhost:8080/"} id="3meCuiT7uZ8A" outputId="1876b4b1-b147-4d8f-c0ab-a57771f11b10" data.shape # + colab={"base_uri": "https://localhost:8080/"} id="FDaRhA7ruiD6" outputId="f3f3d558-9aac-4cb1-a22a-47f03d1e7eb0" data.info # + colab={"base_uri": "https://localhost:8080/"} id="pmaeRfiiulAy" outputId="cf32e5e4-0541-4925-a0ee-fb711cc170bb" data.index # + colab={"base_uri": "https://localhost:8080/"} id="HXfRdOBUuqUi" outputId="2ec2057a-e66f-48f2-db1a-9b08ec612625" data.columns # + colab={"base_uri": "https://localhost:8080/", "height": 204} id="F99S7FByuu08" outputId="7cede985-5d27-4224-af78-7c6ccfc31a93" data.head() # + colab={"base_uri": "https://localhost:8080/", "height": 204} id="zGjDK7R9uzJ0" outputId="1ec2f660-1e27-4f14-c2af-42b0301bb60d" data.tail() # + colab={"base_uri": "https://localhost:8080/", "height": 204} id="Z1rdoWpmu1j-" outputId="2be3a0af-aa21-49e3-ccd8-d9b62a7e2ded" data.head(5) # + colab={"base_uri": "https://localhost:8080/", "height": 359} id="cNqohSXiu4zd" outputId="282690a6-a4b8-4bac-9692-064bdfaf7361" data[['Price',"Age"]].head(10) # + colab={"base_uri": "https://localhost:8080/"} id="Sxrco6FDu8_l" outputId="e897a435-84c4-4f82-b1b8-23e814067322" data.isnull().sum() # + colab={"base_uri": "https://localhost:8080/"} id="5DpHByUGvAsh" outputId="ce42614c-883a-4357-9cd4-c9ebf88ece91" data.dropna(inplace=True) data.isnull().sum() # + colab={"base_uri": "https://localhost:8080/"} id="tD_UkiOIvGyi" outputId="c8b3da90-b673-4568-fd4f-3a607a73f37b" data.shape # + colab={"base_uri": "https://localhost:8080/", "height": 359} id="8gkQpSeFvMrR" outputId="95cbe378-0c26-4af1-ae77-878cef1ec603" data.head(10) # + colab={"base_uri": "https://localhost:8080/"} id="24WPCqFjvSwT" outputId="3f20d865-4712-4725-e759-8eef46f8a245" data['MetColor'].mean() # + colab={"base_uri": "https://localhost:8080/"} id="yxzjmhLZvWhm" outputId="d1a73a1b-138c-436f-af95-8678a3ea5318" data['MetColor'].head() # + colab={"base_uri": "https://localhost:8080/"} id="tqJh63oZvaeM" outputId="587ba2e1-4455-45c5-8c96-1020020a4f45" import numpy as np data['MetColor'].replace(np.NaN,data['MetColor'].mean()).head() # + colab={"base_uri": "https://localhost:8080/", "height": 359} id="5XMFid0yvlLY" outputId="1d8968dd-5bee-4413-e449-bddce3ae3fcd" data.head(10) # + colab={"base_uri": "https://localhost:8080/"} id="Wke_DGP4voJ-" outputId="41a62efe-03fc-488f-bd4f-9199d48aa289" data['CC'].mean() # + colab={"base_uri": "https://localhost:8080/"} id="WGwPrBj1vypj" outputId="a7cd873d-2379-44be-8ab6-3944ecbd9bed" data['CC'].head() # + colab={"base_uri": "https://localhost:8080/", "height": 669} id="LY0dyYeNv7Nr" outputId="abd580ff-b56a-46ac-993d-632f991df2de" data[['Age',"KM"]].head(20) # + id="Ysz_mw0IwBEH"
DMDW_Assignment_3.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from bqplot import pyplot as plt import ipywidgets as widgets import numpy as np # generate some fake n = 2000 x = np.linspace(0.0, 10.0, n) np.random.seed(0) y = np.cumsum(np.random.randn(n)*10).astype(int) fig_hist = plt.figure( title='Histogram') hist = plt.hist(y, bins=25) hist.bins = 10; slider = widgets.IntSlider(description='Bins number', min=1, max=100, v_model=30) # + widgets.link((hist, 'bins'), (slider, 'value')) fig_lines = plt.figure( title='Line Chart') lines = plt.plot(x, y) fig_lines.layout.width = 'auto' fig_lines.layout.height = 'auto' fig_hist.layout.width = 'auto' fig_hist.layout.height = 'auto' grid_layout = widgets.GridspecLayout(5, 3) grid_layout[:2, :] = fig_lines grid_layout[2:4, :] = fig_hist grid_layout[4, 1] = slider grid_layout.layout.height = '1000px' grid_layout # - selector = plt.brush_int_selector() def update_range(*ignore): if selector.selected is not None and len(selector.selected) == 2: xmin, xmax = selector.selected mask = (x > xmin) & (x < xmax) hist.sample = y[mask] selector.observe(update_range, 'selected')
notebooks/bqplot.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd from sklearn.cluster import KMeans import matplotlib.pyplot as plt ratings = pd.read_csv("ratings.txt", sep="\t", names=["user_id", "movie_id", "rating", "timestamp"]) movies = pd.read_csv("movies.txt", sep="|", usecols=[0,1], names=["movie_id","title"]) ratings.head() movies.head() transformed_ratings = ratings.pivot(index='movie_id',columns='user_id',values='rating') transformed_ratings = transformed_ratings.fillna(0) transformed_ratings # # Clustering transformed_ratings.iloc[:,0:944] def init_cluster_centroids(data, n): cluster_centroids = data.sample(n=n).values return cluster_centroids cent = init_cluster_centroids(transformed_ratings,4) def calc_nearest_centroid(centroids, data): #print("classifying centroids") #create the nw centroid distances col_names = [] for i in range(len(centroids)): s = "distance_"+str(i) col_names.append(s) data[s] = data.iloc[:,0:943].sub(centroids[i][0:943]).pow(2).sum(1).pow(0.5) data["centroid"] = data[col_names].idxmin(axis=1) calc_nearest_centroid(cent, transformed_ratings) transformed_ratings def calc_new_centroids(centroids, data): cluster_centroids = [] for i in range(len(centroids)): s = "distance_"+str(i) sub_data = (data[data["centroid"] == s]).iloc[:,0:943] if(sub_data.shape[0] == 0): cluster_centroids.append(init_cluster_centroids(data, 1)[0].iloc[:,0:943]) else: cluster_centroids.append(sub_data.mean(axis=0)) return cluster_centroids def calc_score(centroids, data): score = 0 for i in range(len(centroids)): s = "distance_"+str(i) sub_data = (data[data["centroid"] == s]).iloc[:,0:943] score+=((sub_data-centroids[i][0:943]) ** 2).sum().sum() return score def cluster(data, n): centroids = init_cluster_centroids(data, n) calc_nearest_centroid(centroids,data) start_score = calc_score(centroids,data) new_score = start_score iteration = 0 start=1 while((start or (start_score>new_score))): iteration+=1 start_score = new_score print("iteration:"+str(iteration), start_score) start = 0 centroids = calc_new_centroids(centroids, data) calc_nearest_centroid(centroids,data) new_score = calc_score(centroids,data) return new_score cluster(transformed_ratings, 4) inertia = [] for i in range(1,6): kmeans = KMeans(n_clusters=2**i) kmeans.fit(transformed_ratings) inertia.append(kmeans.inertia_) y_kmeans = kmeans.predict(transformed_ratings) #print(y_kmeans) plt.scatter([2,4,8,16,32],inertia) plt.show() kmeans = KMeans(n_clusters=8) kmeans.fit(transformed_ratings) y_kmeans = kmeans.predict(transformed_ratings) transformed_ratings["group"] = y_kmeans for j in range(0,8): print("cluster - "+str(j)) for i in transformed_ratings.loc[transformed_ratings['group'] == j].drop(columns=['group']).mean(axis=1).nlargest(n=5).index.values: print(str(i)+" "+movies.loc[movies['movie_id'] == i].title.values[0]) print() transformed_ratings.loc[transformed_ratings['group'] == 0].drop(columns=['group']).mean(axis=1) transformed_ratings.loc[transformed_ratings['group'] == 1].drop(columns=['group']).mean(axis=0).nlargest(n=3).index.values transformed_ratings.loc[transformed_ratings['group'] == 2].drop(columns=['group']).mean(axis=0).nlargest(n=3).index.values transformed_ratings.loc[transformed_ratings['group'] == 3].drop(columns=['group']).mean(axis=0).nlargest(n=3).index.values transformed_ratings.loc[transformed_ratings['group'] == 4].drop(columns=['group']).mean(axis=0).nlargest(n=3).index.values transformed_ratings.loc[transformed_ratings['group'] == 5].drop(columns=['group']).mean(axis=0).nlargest(n=3).index.values transformed_ratings.loc[transformed_ratings['group'] == 6].drop(columns=['group']).mean(axis=0).nlargest(n=3).index.values transformed_ratings.loc[transformed_ratings['group'] == 7].drop(columns=['group']).mean(axis=0).nlargest(n=3).index.values transformed_ratings # # Matrix factorization import numpy as np from implicit.datasets.movielens import get_movielens from implicit.bpr import BayesianPersonalizedRanking titles, ratings = get_movielens("100k") model = BayesianPersonalizedRanking(factors=100, iterations=1000) ratings.eliminate_zeros() ratings.data = np.ones(len(ratings.data)) model.fit(ratings) Parameters ---------- factors : int, optional The number of latent factors to compute learning_rate : float, optional The learning rate to apply for SGD updates during training regularization : float, optional The regularization factor to use dtype : data-type, optional Specifies whether to generate 64 bit or 32 bit floating point factors iterations : int, optional The number of training epochs to use when fitting the data verify_negative_samples: bool, optional When sampling negative items, check if the randomly picked negative item has actually been liked by the user. This check increases the time needed to train but usually leads to better predictions. num_threads : int, optional The number of threads to use for fitting the model. This only applies for the native extensions. Specifying 0 means to default to the number of cores on the machine. random_state : int, RandomState or None, optional The random state for seeding the initial item and user factors. Default is None. model.item_factors model.user_factors np.dot(model.item_factors[234], model.user_factors[7])
Demos.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # !pip install pylinac==2.3.2 pymedphys==0.36.0-dev1 # + import numpy as np import imageio import matplotlib.pyplot as plt from IPython.display import display import pymedphys from pymedphys._wlutz import pylinac as _pylinac_wrapper from pymedphys._wlutz import reporting as _reporting # - image_filepath = pymedphys.data_path("pylinac_offset.png") image_filepath image = imageio.imread(image_filepath) plt.figure(figsize=(10,10)) plt.imshow(image) # + def get_x_and_y(image): pixels_per_mm = 4 shape = np.shape(image) x = np.arange(-shape[1] / 2, shape[1] / 2) / pixels_per_mm y = np.arange(-shape[0] / 2, shape[0] / 2) / pixels_per_mm display(shape) display(x) display(y) return x, y x, y = get_x_and_y(image) # - field_centre, bb_centre = _pylinac_wrapper.run_wlutz_raw( x, y, image, fill_errors_with_nan=True, pylinac_version="2.3.2") display(field_centre) display(bb_centre) bb_diameter = 8 edge_lengths = [20, 26] penumbra = 2 fig, axs = _reporting.image_analysis_figure( x, y, image, bb_centre, field_centre, 0, bb_diameter, edge_lengths, penumbra, ) # + crop = 400 cropped_image = image[crop:-crop, crop:-crop] plt.figure(figsize=(10,10)) plt.imshow(cropped_image) # - cropped_x, cropped_y = get_x_and_y(cropped_image) cropped_field_centre, cropped_bb_centre = _pylinac_wrapper.run_wlutz_raw( cropped_x, cropped_y, cropped_image, fill_errors_with_nan=True, pylinac_version="2.3.2") display(cropped_field_centre) display(cropped_bb_centre) fig, axs = _reporting.image_analysis_figure( cropped_x, cropped_y, cropped_image, cropped_bb_centre, cropped_field_centre, 0, bb_diameter, edge_lengths, penumbra, )
prototyping/wlutz/pylinac-offset.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import pandas as pd import numpy as np from tqdm import tqdm tqdm.pandas(desc="progress-bar") from gensim.models import Doc2Vec from sklearn import utils from sklearn.model_selection import train_test_split import gensim from sklearn.linear_model import LogisticRegression from gensim.models.doc2vec import TaggedDocument import re import seaborn as sns import matplotlib.pyplot as plt from bs4 import BeautifulSoup import nltk from nltk.corpus import stopwords from sklearn.metrics import accuracy_score, f1_score # - # ### Get documents df = pd.read_csv('Consumer_Complaints.csv') df = df[['Consumer complaint narrative','Product']] df = df[pd.notnull(df['Consumer complaint narrative'])] df.rename(columns = {'Consumer complaint narrative':'narrative'}, inplace = True) df.head(10) # ### Clean Documents def cleanText(text): text = BeautifulSoup(text, "lxml").text text = re.sub(r'\|\|\|', r' ', text) text = re.sub(r'http\S+', r'<URL>', text) text = text.lower() text = text.replace('x', '') return text df['narrative'] = df['narrative'].apply(cleanText) # + train, test = train_test_split(df, test_size=0.3, random_state=42) def tokenize_text(text): tokens = [] for sent in nltk.sent_tokenize(text): for word in nltk.word_tokenize(sent): if len(word) < 2: continue tokens.append(word.lower()) return tokens train_tagged = train.apply( lambda r: TaggedDocument(words=tokenize_text(r['narrative']), tags=[r.Product]), axis=1) test_tagged = test.apply( lambda r: TaggedDocument(words=tokenize_text(r['narrative']), tags=[r.Product]), axis=1) # - model_dbow = Doc2Vec(dm=0, vector_size=100, negative=5, hs=0, min_count=2, sample = 0) model_dbow.build_vocab([x for x in tqdm(train_tagged.values)]) for epoch in range(30): model_dbow.train(utils.shuffle([x for x in tqdm(train_tagged.values)]), total_examples=len(train_tagged.values), epochs=1) model_dbow.alpha -= 0.002 model_dbow.min_alpha = model_dbow.alpha print(epoch) def vec_for_learning(model, tagged_docs): sents = tagged_docs.values targets, regressors = zip(*[(doc.tags[0], model.infer_vector(doc.words, steps=20)) for doc in tqdm(sents)]) return targets, regressors y_train, X_train = vec_for_learning(model_dbow, train_tagged) y_test, X_test = vec_for_learning(model_dbow, test_tagged) logreg = LogisticRegression(n_jobs=1, C=1e5) logreg.fit(X_train, y_train) y_pred = logreg.predict(X_test) print('Testing accuracy %s' % accuracy_score(y_test, y_pred)) print('Testing F1 score: {}'.format(f1_score(y_test, y_pred, average='weighted')))
src/Archive/create_doc_embeddings.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Poisson distributions # # Show the effects of varying `mu` of the Poisson discrete distribution. import matplotlib.pyplot as plt from scipy import stats fig, axs = plt.subplots(6, figsize=(4,12), sharey=True) bins = list(range(18,90,1)) for i, mu in enumerate([1,2,5,10,30,50]): data = stats.poisson.rvs(loc=18, mu=mu, size=150000) # left edge at 18, average at mu axs.flat[i].hist(data,bins=bins);
lectures/supplements/poisson_demo.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # https://csr.lanl.gov/data/2017.html # # https://csr.lanl.gov/data-fence/1571548765/1fcFh-bNWqPJuUSQe72Z9N_Nw28=/unified-host-network-dataset-2017/netflow.html # # Compressed, netflow_day-02.bz2 is 1100MB # # Extracts to 6.7GB; that's 115,949,436 lines # # # !bzip2 -d netflow_day-02.bz2 import pandas print('pandas',pandas.__version__) import networkx print('networkx',networkx.__version__) from networkx import nx from matplotlib import pyplot as plt import time import pickle # ## Load data # # Time: The start time of the event in epoch time format # # Duration: The duration of the event in seconds. # # SrcDevice: The device that likely initiated the event. # # DstDevice: The receiving device. # # Protocol: The protocol number. # # SrcPort: The port used by the SrcDevice. # # DstPort: The port used by the DstDevice. # # SrcPackets: The number of packets the SrcDevice sent during the event. # # DstPackets: The number of packets the DstDevice sent during the event. # # SrcBytes: The number of bytes the SrcDevice sent during the event. # # DstBytes: The number of bytes the DstDevice sent during the event. # # start_time = time.time() nfdf = pandas.read_csv('netflow_day-02', nrows=1000000, names=['Time','Duration','SrcDevice','DstDevice','Protocol','SrcPort','DstPort','SrcPackets','DstPackets','SrcBytes','DstBytes']) # # print('elapsed',round(time.time()-start_time,2),'seconds') # # nfdf.to_pickle("netflow_day_02_1E6rows.pkl") start_time = time.time() nfdf = pandas.read_pickle('netflow_day_02_1E6rows.pkl') print('elapsed',round(time.time()-start_time,2),'seconds') # ## Exploration, Characterization nfdf.shape nfdf.head() # ## visualize using Networkx # # Here I provide code to visualize a graph # # https://networkx.github.io/documentation/stable/reference/generated/networkx.convert_matrix.from_pandas_edgelist.html#networkx.convert_matrix.from_pandas_edgelist try: G.clear() except NameError: pass # The following line allows you to select the number of rows used in the graph G = nx.from_pandas_edgelist(nfdf[0:10],'SrcDevice','DstDevice','Duration') G.number_of_nodes() # ## no labels nx.draw(G, with_labels = False, node_size = 100) plt.show() nx.draw(G, with_labels=False,node_size=100,pos = nx.circular_layout(G)) plt.show() pos = nx.spring_layout(G) nx.draw(G,with_labels=True) nx.draw_networkx_edge_labels(G,pos=pos,with_labels=True) _=plt.axis('off') # ## TASK: what number of nodes makes the visualization unreadable? # # You have a dataframe with 1,000,000 rows. # # First, design your experiments. What number of rows will you explore to address the question? # # At what point does node and/or edge labeling provide value?
Week 10 - Activity.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import requests import re from bs4 import BeautifulSoup def get_player_info(root_url, page_amount): players = [] # Look through each page for i in range(1, page_amount + 1): r = requests.get(root_url+"?page={}".format(i)) soup = BeautifulSoup(r.text) table = soup.find('table') for info in table.find_all('tr'): player = {} # Get Player Name if info.find('a') is not None: player['player_name'] = info.find('a').find_all('span')[0].get_text() + ' ' + info.find('a').find_all('span')[2].get_text() # Gets stats for player row_info = info.find_all('td') if row_info != []: player['games'] = row_info[1].get_text() player['ab'] = row_info[2].get_text() player['h'] = row_info[4].get_text() player['hr'] = row_info[7].get_text() player['bb'] = row_info[9].get_text() player['so'] = row_info[10].get_text() player['avg'] = row_info[13].get_text() player['obp'] = row_info[14].get_text() player['slg'] = row_info[15].get_text() players.append(player) return players # Get training set train_set = ['2010', '2011', '2012', '2013', '2014', '2015', '2016', '2017'] train_dict = {} for each in train_set: if each == '2010': pages = 7 else: pages = 6 train_dict[each] = get_player_info('https://www.mlb.com/stats/' + each, pages) # Get test set test_set = ['2018', '2019'] test_dict = {} for each in test_set: test_dict[each] = get_player_info('https://www.mlb.com/stats/' + each, 6) # Run on actual set run_dict = {} run_dict['2020'] = get_player_info('https://www.mlb.com/stats/', 6) # + import numpy as np import pandas as pd from collections import defaultdict # Processing from sklearn.preprocessing import StandardScaler from sklearn.pipeline import make_pipeline # Scores from sklearn.metrics import r2_score from sklearn.metrics import explained_variance_score from sklearn.metrics import max_error # Transoforms our data into a dataframe def transform_to_df(dict_to_map): df_return = pd.DataFrame() for key in dict_to_map: filtered_map = [t for t in dict_to_map[key] if t] temp_df = pd.DataFrame(filtered_map) df_return = df_return.append(temp_df, ignore_index = True) return df_return.dropna() # Train Dataframe of all values train_df = transform_to_df(train_dict) # Test Dataframe of all values test_df = transform_to_df(test_dict) # Run Dataframe to predict values for HomeRuns run_df = transform_to_df(run_dict) def evaluate_model(clf, label, perf_data, X_train, X_test, y_train, y_test): experiment_id = len(perf_data) for i in range(30): if(label == 'BayesianRidge'): y_train = np.ravel(y_train) _ = clf.fit(X_train, y_train) y_test_pred = clf.predict(X_test) perf_data[experiment_id]['model'] = label perf_data[experiment_id]['test_accuracy'] = clf.score(X_test, y_test) perf_data[experiment_id]['r2'] = r2_score(y_test, y_test_pred) perf_data[experiment_id]['explained_variance'] = explained_variance_score(y_test, y_test_pred) perf_data[experiment_id]['max_error'] = max_error(y_test, y_test_pred) experiment_id = experiment_id + 1 return perf_data # + from sklearn.model_selection import train_test_split # Models from sklearn.linear_model import LinearRegression, BayesianRidge from sklearn import tree from sklearn import linear_model from sklearn.linear_model import LogisticRegression from sklearn.preprocessing import PolynomialFeatures # Training set x_train = train_df[['games', 'ab', 'h', 'bb', 'so', 'avg', 'obp', 'slg']] y_train = train_df[['hr']].replace('0','1') # Ensure that all values are numerics y_train['hr'] = pd.to_numeric(y_train['hr']) # Testing Set x_test = test_df[['games', 'ab', 'h', 'bb', 'so', 'avg', 'obp', 'slg']] y_test = test_df[['hr']].replace('0','1') # Ensure that all values are numerics y_test['hr'] = pd.to_numeric(y_test['hr']) # Models to test our data with models = { 'DecisionTree' : tree.DecisionTreeRegressor(), 'LinearRegression' : LinearRegression(), 'BayesianRidge': BayesianRidge(), 'Lasso' : linear_model.Lasso(alpha=0.1) } # Test our models model_perf_data = defaultdict(dict) for label, clf in models.items(): _= evaluate_model(clf, label, model_perf_data, x_train, x_test, y_train, y_test) df_perf = pd.DataFrame(model_perf_data).T df_metrics = [] for model in df_perf.model.unique(): y1 = df_perf[df_perf['model'] == model]['test_accuracy'] y2 = df_perf[df_perf['model'] == model]['r2'] y3 = df_perf[df_perf['model'] == model]['explained_variance'] y4 = df_perf[df_perf['model'] == model]['max_error'] df_metrics.append({ 'model' : model, 'test_accuracy' : y1.mean(), 'r2' : y2.mean(), 'explained_variance' : y3.mean(), 'max_error' : y4.mean(), }) pd_plot = pd.DataFrame(df_metrics) pd_plot # + import matplotlib.ticker as ticker import matplotlib.pyplot as plt import seaborn as sns perf_columns = ['test_accuracy', 'r2'] fig, ax = plt.subplots(1, 1, figsize=(10, 6)) sns.set_style("ticks") df = pd_plot.sort_values(by='test_accuracy', ascending=False) df = df.reset_index() metrics = ['test_accuracy', 'r2', 'explained_variance'] width = 1.0 / len(metrics) * 0.80 for i, col in enumerate(metrics): ax.bar(df.index + i * width, df[col], width) ax.set_xticks(df.index + width) ax.set_ylim(bottom=0.7, top=1) _ = ax.set_xticklabels(df['model']) ax.legend(metrics, loc='best') fig.tight_layout() # + # Prediction Set may need to scale for full season x_run = run_df[['games', 'ab', 'h', 'bb', 'so', 'avg', 'obp', 'slg']] # Scale run data for a full season # Only 60 games were played out of a normal 162 so we need to scale our data by a factor of 2.7 for rows in x_run: if((rows != 'avg') and (rows != 'obp') and (rows != 'slg')): x_run[rows] = pd.to_numeric(x_run[rows]) x_run[rows] = x_run[rows] * 2.7 x_run[rows] = x_run[rows].apply(np.floor) y_run = run_df[['hr']].replace('0','1') # Ensure that all values are numerics y_run['hr'] = pd.to_numeric(y_run['hr']) model = LinearRegression() model.fit(x_train, y_train) predictions = model.predict(x_run) final_df = [] i = 0 for each in run_df['player_name']: final_df.append({ 'player_name' : each, 'hr_prediction' : int(round(predictions[i][0])) }) i+=1 final_plot = pd.DataFrame(final_df) print(final_plot.tail(20)) final_plot = final_plot.sort_values('hr_prediction', ascending=False) final_plot.head(5) # + from sklearn.inspection import permutation_importance clf = LinearRegression() clf.fit(x_train,y_train) feature_names = ['games', 'ab', 'h', 'bb', 'so', 'avg', 'obp', 'slg'] # Compute permutation_importance permutated_importance_result = permutation_importance(clf, x_train, y_train, n_repeats=10) # Arrange the data for plotting feature_importance_sorted_idx = np.flip(np.argsort(clf.coef_)) s_feature_importances = pd.Series(clf.coef_[0], index=feature_names).sort_values(ascending=False) df_permutation_importances = pd.DataFrame(permutated_importance_result.importances[feature_importance_sorted_idx].T[0], columns=np.array(feature_names)[feature_importance_sorted_idx]) # Plot feature_importances and permutation_importance fig, ax = plt.subplots(1, 2,figsize=(12, 8)) temp = [] for each in s_feature_importances.values: new_val = (each - min(s_feature_importances.values))/ (max(s_feature_importances.values)-min(s_feature_importances.values)) temp.append(new_val) _ = sns.barplot(x=s_feature_importances.values, y=s_feature_importances.index, ax=ax[0]) _ = ax[0].set_title('Logarithmic Scale') _ = ax[0].set(xscale="log") _ = ax[1].set_title('Normalized Scale') _ = sns.barplot(x=np.array(temp), y=s_feature_importances.index, ax=ax[1]) # -
notebooks/Yates_Connor.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.9.5 64-bit # name: python3 # --- import librosa import librosa.display import IPython.display import pandas as pd import os import numpy as np df=pd.read_csv(r"C:\Users\prana\Downloads\UrbanSound8K\metadata\UrbanSound8K.csv") # df df.isnull().sum() randomAudio=r"C:\Users\prana\Downloads\UrbanSound8K\audio\fold3\6988-5-0-3.wav" libAudio,sr=librosa.load(randomAudio) librosa.display.waveplot(libAudio,sr) IPython.display.Audio(randomAudio) df["class"].value_counts() def feature_extractor(file): audio_data,sample_rate=librosa.load(file,res_type="kaiser_fast") mfcc_features=librosa.feature.mfcc(y=audio_data,sr=sample_rate,n_mfcc=50) mfcc_mean=np.mean(mfcc_features.T,axis=0) return mfcc_mean # + audio_dataset=r"C:\Users\prana\Downloads\UrbanSound8K\audio" from tqdm import tqdm extracted_features=[] for i,row in tqdm(df.iterrows()): file=os.path.join(os.path.abspath(audio_dataset),"fold"+str(row["fold"]) + "/",str(row["slice_file_name"])) class_labels=row["class"] data=feature_extractor(file) extracted_features.append([data,class_labels]) # - audio_data=pd.DataFrame(extracted_features,columns=["features","class"]) from sklearn.preprocessing import LabelEncoder le=LabelEncoder() audio_data["class"]=le.fit_transform(audio_data["class"]) x=np.array(audio_data["features"].tolist()) y=np.array(audio_data["class"].tolist()) from sklearn.model_selection import train_test_split as tts train_x,test_x,train_y,test_y=tts(x,y,test_size=0.1,random_state=20) from sklearn.linear_model import LogisticRegression lr=LogisticRegression() lr.fit(train_x,train_y) print("Test score =",100*lr.score(test_x,test_y),"%") pred_lr=lr.predict(test_x) pred_lr=pd.DataFrame(pred_lr,columns=["pred"]) # + from sklearn.tree import DecisionTreeClassifier dt=DecisionTreeClassifier() dt.fit(train_x,train_y) print("test score=",100*dt.score(test_x,test_y),"%") pred_dt=dt.predict(test_x) pred_dt=pd.DataFrame(pred_dt,columns=["pred"]) # - from sklearn.ensemble import RandomForestClassifier rf=RandomForestClassifier() rf.fit(train_x,train_y) print("test score=",100*rf.score(test_x,test_y),"%") pred_rf=rf.predict(test_x) pred_rf=pd.DataFrame(pred_rf,columns=["pred"]) from xgboost import XGBClassifier xg=XGBClassifier() xg.fit(train_x,train_y) print("test score=",100*xg.score(test_x,test_y),"%") pred_xg=xg.predict(test_x) pred_xg=pd.DataFrame(pred_xg,columns=["pred"]) from statistics import mode ensembled_pred=[] for i in range(0,len(test_x)): ensembled_pred.append(mode([pred_lr["pred"][i],pred_dt["pred"][i],pred_rf["pred"][i],pred_xg["pred"][i]])) from sklearn.metrics import accuracy_score accuracy_score(ensembled_pred,test_y)
bou bou.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # Les espaces de stockage # Il y a plusieurs types de stockage à l'Alliance de recherche numérique du Canada : # * Personnel (`/home`) # * Temporaire local (`$SLURM_TMPDIR`) # * Temporaire réseau (`/scratch`) # * Projet partagé (`/project`) # * *Nearline* de longue durée (`/nearline`) # * [Dépôt fédéré de données de recherche](https://www.frdr-dfdr.ca/repo/) (DFDR) # # Or, vos données ont plusieurs aspects : # * **Taille** : petits, gros, très gros fichiers # * **Nombre** : peu nombreux ou en très grand nombre # * **Transférabilité** : données regroupées et/ou compressées # * **Vie** : pendant une tâche, entre des tâches, d'un projet à l'autre, à archiver # * **Niveau d'accès** : données confidentielles, partagées, publiées # # Le but de ce chapitre est de pouvoir considérer tous les aspects nécessaires à la gestion des données actives sur les différents espaces de stockage disponibles. # ## Description de vos données # Au premier chapitre, nous avons vu comment obtenir le nombre de # fichiers de nos données locales, de même que la taille totale. # # Sur une grappe de calcul, un **système de fichiers parallel** # [*Lustre*](https://docs.computecanada.ca/wiki/Tuning_Lustre/fr) ou *GPFS* # est surtout optimisé pour des fichiers de plus de 10 Mo, # c'est-à-dire pour des accès à large bande-passante. Ainsi, à l'échelle des # [grappes de calcul](https://docs.computecanada.ca/wiki/B%C3%A9luga#Stockage) : # # * 1 Ko - 100 Ko : très petit fichier # * 1 Mo - 10 Mo : petit fichier # * 100 Mo - 1 Go : taille raisonnable # * 10 Go - 100 Go : gros fichier # * 1 To - 10 To : très gros fichier # À quantité totale égale d'octets, il est plus **difficile pour *Lustre* # de gérer un grand nombre** de petits fichiers qu'un plus petit nombre # de grands fichiers. Ainsi, du point de vue du système de stockage : # # * Par dossier : # * 1 à 100 fichiers par dossier : c'est raisonnable # * 300 à 3k fichiers par dossier : une lourdeur se fait sentir # * 10k fichiers et plus : il faut classer les fichiers dans des sous-dossiers # * Par espace [limité par un quota](https://docs.computecanada.ca/wiki/Storage_and_file_management/fr#Quotas_et_politiques) : # * 1 à 500k fichiers : sauvegarde facilement à jour # * 1M à 5M fichiers : sauvegarde potentiellement plus longue que 24h # * 10M fichiers et plus : sauvegarde plus longue que 24h # ### Transférabilité # #### Regroupement dans un fichier archive # Étant donné que le transfert ou la synchronisation de # [plusieurs petits fichiers](https://docs.computecanada.ca/wiki/Handling_large_collections_of_files/fr) # implique un accès fréquent aux métadonnées du système de stockage # *Lustre*, il est donc préférable de # [regrouper les fichiers](https://docs.computecanada.ca/wiki/Archiving_and_compressing_files/fr) # d'un jeu de données avant tout transfert. Pour ce faire, il existe # plusieurs formats décodables sur les différentes grappes : # # **WinZip** ou **Zip** (`.zip`) - compressé # ```Bash # man zip # q pour quitter # ``` # # **7-Zip** (`.7z`) - compressé # ```Bash # man 7z # q pour quitter # ``` # # **Tape ARchive** ou [**TAR**](https://docs.computecanada.ca/wiki/A_tutorial_on_%27tar%27/fr) (`.tar`) - non compressé, sauf si utilisé avec Gzip ou autre # ```Bash # man tar # q pour quitter # ``` # # **Disk ARchive** ou [**DAR**](https://docs.computecanada.ca/wiki/Dar/fr) (`.dar`) - compression configurable # ```Bash # man dar # q pour quitter # ``` # #### Compression # La [compression des fichiers](https://docs.computecanada.ca/wiki/Archiving_and_compressing_files/fr) # a un **coût en temps CPU** à chaque fois que nous voulons lire ou écrire # les données. Ainsi, il vaut mieux utiliser la compression **lorsque # c'est "avantageux"** en espace et/ou en temps. Par exemple : # * Décompresser de nombreux fichiers sur le noeud de calcul plutôt que # de lire chaque petit fichier de *Lustre*. # # Concernant les fichiers binaires tels que FLAC, JPEG, PDF, PNG, etc., # leur encodage **inclut déjà une compression** des données. # * Selon le type de données que vous utilisez dans votre domaine de recherche, il se pourrait qu'elles soient déjà compressées. À vous de vérifier. # * Habituellement, on gagne peu d'espace à compresser davantage ces fichiers. # **Exercice - DAR** # # Dans un terminal : # # ```Bash # dar -w -c arch -g donnees -g images # ``` # * `-w` : pour taire un avertissement d'écrasement d'archive existante # * `-c` : pour créer l'archive `arch` # * `-g` : pour spécifier un fichier ou dossier à la fois # # ```Bash # # ls arch* # dar -l arch # ``` # * `-l` : pour lister les fichiers dans l'archive DAR `arch` # * Note : **ne pas** fournir le nom d'une découpe avec l'extension `.1.dar` # # ```Bash # dar -w -z -c arch -g donnees -g images # ``` # * `-z` : pour activer la compression pour tous les fichiers # * Voir le résultat avec `dar -l arch` # # ```Bash # dar -w -z -Z '*.png' -c arch -g donnees -g images # ``` # * `-Z '*.png'` : pour exclure les fichiers PNG # * Voir le résultat avec `dar -l arch` # **Les deux (2) types de compression** : # # a) Compression explicite : # * Lorsque vous utilisez un outil de compression (`gzip`, `zip`, etc.) # * Lorsque l'encodage permet de compresser les données # (`dar`, bibliothèque `zlib`, etc.) # * Lors d'un transfert avec `rsync`, il existe des options permettant # de compresser automatiquement les données lors du transfert. # Cela étant dit, le transfert risque d'être limité par la vitesse du processeur... # # ```Bash # man rsync # Chercher: /compress + n, Quitter: q # ``` # b) Compression implicite : # * Lorsque le **système de fichiers compresse automatiquement** vos # données avant de les écrire sur le système de stockage : # * La commande `du` montre l'espace réellement utilisée sur le disque # * La commande `du --apparent-size` montre la taille normale du fichier # * Lorsque le format du fichier de données inclut une compression des # données. Voici quelques exemples : # * [Hierarchical Data Format - HDF5](https://docs.computecanada.ca/wiki/HDF5/fr) - format standard pour données scientifiques # * [Apache Parquet](https://parquet.apache.org/documentation/latest/) - stockage compressé de données tabulaires # * Note: l'autre format, [Apache Feather](https://arrow.apache.org/docs/python/feather.html), ne compresse pas vraiment les données, sauf si on considère que c'est un format binaire prenant moins de place # **Exercice - Espace utilisé** # # Dans un terminal Jupyter, essayez les commandes : # ```Bash # du -s ~ # du -s --apparent-size ~ # ``` # # * Notez les différences, s'il y en a # * Notez à quel point cette commande peut surcharger le système de # stockage; le survol récursif des fichiers constitue un grand nombre # d'accès aux métadonnées de *Lustre* # # En pratique, les grappes de calcul offrent des outils efficaces pour # mesurer l'espace utilisé en fonction de l'espace alloué. # Nous verrons cela à la fin du chapitre. # #### Fichiers épars (sparse files) # Restez à l'affût des fichiers épars dont la faible taille sur le disque n'est pas représentative de la taille normale du fichier : # * Une certaine taille `T` est allouée pour le fichier. # Par exemple : 1 To # * Seulement `t` octets sont réellement écrits dans le fichier # (où `t << T`). Par exemple : 1 Go # * Ce type de fichiers est typiquement utilisé pour des volumes de # stockage de machines virtuelles, ce qui économise de l'espace sur le # serveur hôte # * Problème : lors d'un transfert ou d'une sauvegarde (*backup*), # ce sont les `T` octets qui sont lus, ce qui peut être catastrophique # si `T` est vraiment très grand # * **Solution** : les fichiers `.dar` permettent d'optimiser # l'encapsulation de fichiers épars avant un transfert ou une copie # ### Plan de gestion des données actives # Les données sur les grappes de calcul sont généralement présentes en attente d'être traitées, manipulées ou publiées, d'où la notion de *données actives*. Or, il y a une planification à faire et des questions à se poser. # #### Vie des données # * **Durée de vie variable** - avons-nous affaire à des données temporaires ou à des données persistantes? # * Utilisées (à répétition ou non) pendant une seule tâche de calcul? # * Utilisées entre deux tâches de calcul? # * Utilisées pour plusieurs calculs par plusieurs personnes? # * Peut-on importer à nouveau les données en cas de perte? # * Peut-on recalculer les données en cas de perte? Si oui, à quel coût? # * **Fréquence d'accès** - combien de temps sépare les différents accès? # * Utilisation courante (à chaque jour, semaine ou mois)? # * Données à garder pour plus tard, par exemple pour reproduire les résultats? # * Données à publier sur des dépôts de données de recherche? # * **Urgence d'accès** - à propos de données critiques ou importantes : # * Combien y a-t-il de copies? # * Doivent-elles être accessibles en tout temps? # Si aucune planification n'est faite, les données temporaires s'accumulent et prennent de la place dans l'espace alloué qui est de taille limitée. Si les données sont mal décrites, leur identification, leur compréhension et leur publication deviennent des tâches laborieuses pour les personnes qui en héritent. # ```Bash # # cat .gitignore # Fichiers à ignorer, car temporaires # # cat README.md # Description du projet # ``` # #### Niveaux d'accès # * Quel est le **niveau de confidentialité** de vos données? # * **Qui** devrait avoir accès à quelles données? # * **Où** pouvez-vous héberger vos données? # * **Quand** : qui devrait avoir accès à vos données à votre départ? # Et à partir de quand peut-on publier les données? # # Référez-vous aux **politiques de votre université** concernant la gestion # des données de recherche. # Sans précaution, les données que vous laissez sur les grappes de # calcul risquent de ne pas être accessibles aux membres d'un # laboratoire qui poursuivent leur recherche. # # Dès que possible, mettez en place les accès nécessaires pour le # [partage de vos données](https://docs.computecanada.ca/wiki/Sharing_data/fr). # ## Les différents types de stockage # Stockage accessible selon le type de noeud : # noeud de **connexion** (ou interactif) ou # noeud de **calcul** (CPU ou GPU) : # # | Stockage | Connexion | Calcul | # |-----------------|:---------:|:-------:| # | `/home` | Oui | Oui* | # | `$SLURM_TMPDIR` | **Non** | Oui | # | `/scratch` | Oui | Oui | # | `/project` | Oui | Oui | # | `/nearline` | Oui | **Non** | # `*` En lecture-seule sur Cedar # # À propos des [différents espaces de stockage](https://docs.computecanada.ca/wiki/Storage_and_file_management/fr#Types_de_stockage). # ### Votre dossier personnel (`$HOME`) # ```Bash # # ls -a # # ls -la # # ls -ld $HOME # ``` # * **Point d'entrée** par défaut lors d'une connexion à une grappe de calcul # * [Quota d'espace relativement petit](https://docs.computecanada.ca/wiki/Storage_and_file_management/fr#Quotas_et_politiques), mais accepte un nombre de fichiers relativement grand # * Idéal pour la [compilation et l'installation de logiciels](https://docs.computecanada.ca/wiki/Installing_software_in_your_home_directory/fr) # ### Dossier temporaire local (`$SLURM_TMPDIR`) # ```Bash # # ls -ld $SLURM_TMPDIR # salloc # À partir de login1 # # # ls -ld $SLURM_TMPDIR # df -h $SLURM_TMPDIR # exit # ``` # * [Stockage local très rapide](https://docs.computecanada.ca/wiki/Using_node-local_storage/fr), mais limité à la durée de la tâche de calcul # * Faible latence à comparé *Lustre* # * Grande bande-passante, en particulier pour les petits fichiers # * **Données supprimées à la fin** de la tâche de calcul # * Si plusieurs noeuds travaillent sur un même calcul parallèle, **chaque noeud a son propre dossier** `$SLURM_TMPDIR` # * Cas d'utilisation : # * **Importation** de plusieurs **petits fichiers** qui seront utilisés à répétition lors d'un calcul # * **Sauvegarde** de fichiers qui sont **constamment modifiés** - à exporter vers Lustre à la fin du calcul # ### Espace réseau pour données temporaires (`$SCRATCH`) # ```Bash # df -h /scratch # # ls -ld $SCRATCH # # # Enlever l'accès en lecture # chmod g-r $SCRATCH # # ls -ld $SCRATCH # # # Modifier le GID du répertoire # id # chgrp def-sponsor00 $SCRATCH # # ls -ld $SCRATCH # ``` # ```Bash # # Création d'un dossier de résultats partagés # # mkdir -p $SCRATCH/partage/resultats # # ls -lR $SCRATCH # # # Ajouter les permissions r (et x, s'il y a lieu) récursivement # chmod -R o+rX $SCRATCH/partage # # ls -lR $SCRATCH # ``` # ```Bash # # Visiter le dossier partagé de quelqu'un d'autre # # cd /scratch/userXY/partage # pwd # # ls -l # # cd $HOME # ``` # * Espace de stockage réseau de [grande capacité](https://docs.computecanada.ca/wiki/Storage_and_file_management/fr#Quotas_et_politiques) pour des **données temporaires** # * **Pas** sauvegardé # * [Purge mensuelle](https://docs.computecanada.ca/wiki/Scratch_purging_policy/fr) pour les données âgées de plus de 60 jours # * Performance variable selon l'utilisation de l'ensemble des utilisateurs # * Cas d'utilisation : # * Utilisation de données **pendant quelques jours** seulement # * Stocker temporairement des **résultats en grand nombre** de fichiers # * Stocket des résultats **intermédiaires** qui sont **trop gros** pour `/project` # ### Espace projet partagé (`/project`) # ```Bash # # ls -ld /project # # ls -ld /project/def-sponsor00 # # ls -l /project/def-sponsor00 # ``` # * Espace de stockage réseau de [petite à grande capacité](https://docs.computecanada.ca/wiki/Storage_and_file_management/fr#Quotas_et_politiques) pour des **données de projet** # * Un espace projet par défaut par groupe de recherche (sauf sur Niagara) # * Petite augmentation d'espace projet **sur simple demande (jusqu'à 10 To)** # * Quota plus élevé si [allocation spéciale](https://www.computecanada.ca/page-daccueil-du-portail-de-recherche/acces-aux-ressources/concours-dallocation-des-ressources/ressources-pour-les-groupes-de-recherche-crgr/?lang=fr) # * Sauvegardé **quotidiennement** # * Nombre de fichiers relativement limité (500k par défaut) # * Données de projet : # * Potentiellement **partagées** - [configuration des ACLs](https://docs.computecanada.ca/wiki/Sharing_data/fr) # * Durée de vie en fonction de la durée d'un projet # * Typiquement plus importantes que les données temporaires # * Cas d'utilisation : # * Stockage de jeux de données **réutilisés sur plusieurs mois et/ou par plusieurs personnes** # * Stockage de **résultats finaux** coûteux à reproduire # ### Stockage très longue durée (`/nearline`) # Interface de stockage sur disque : # * On peut voir les fichiers avec la commande `ls` # * Les données les plus anciennes dans # [`/nearline` sont probablement sur ruban](https://docs.computecanada.ca/wiki/Using_nearline_storage/fr) # * Voir les commandes pour [diagnostiquer l'état des fichiers](https://docs.computecanada.ca/wiki/Using_nearline_storage/fr#Transf.C3.A9rer_des_donn.C3.A9es_.C3.A0_partir_de_.2Fnearline) dans `/nearline` # # À considérer : # * D'une part, la migration de données sur ruban réduit l'espace # utilisé sur les disques - économie d'argent pour le stockage # * D'autre part, chaque lecture de fichier migré sur ruban créera # **une requête bloquante** causant un temps de réponse de quelques # minutes à quelques heures (si le système d'archivage est surchargé # de requêtes) # * Voilà pourquoi il est impératif d'y sauvegarder un faible nombre # de très grands fichiers # * **À éviter** : y copier de nombreux petits fichiers avant de les # regrouper dans un fichier d'archive # # Cas d'utilisation : # * Les commandes `7z`, `dar`, `tar` et `zip` devraient uniquement # regrouper des fichiers provenant de `/project` ou de `/scratch` # * Stockage de données importantes **qui ne seront pas utilisées # pendant plusieurs mois** # ### Exemple de cycle de vie des données # ![Pipeline 1](images/data-flow-1.png) # # Description des différentes étapes : # * Téléchargement des données dans `/scratch` # * Pour une utilisation de quelques jours à quelques semaines # * Nul besoin de les protéger davantage # * Soumettre une série de tâches de calcul # * Une tâche par fichier dans `dossier` dans la partition `/scratch` # * Le script de tâche est dans la partition `/home` # * Utilisation des variables `$FIC` et `$SLURM_TMPDIR` pour copier le fichier à traiter localement sur le noeud de calcul # * Se déplacer dans le dossier local # * Y configurer un environnement Python # * Exécuter le code Python présent dans `/home`, fournir le nom du fichier à traiter et rediriger toutes les sorties dans un fichier local # * Rapatrier le fichier de résultats dans un dossier dans `/scratch` # * Post-traitement - traiter tous les fichiers de résultats afin de ne garder que l'essentiel dans `/project` # #### Exercice - Exécution d'un pipeline # * Étudier les scripts : # * `scripts/blastn-pipeline.sh` # * `scripts/blastn-traitement.sh` # * Lancer le pipeline avec la commande : # # ```Bash # bash scripts/blastn-pipeline.sh # ``` # # * Surveiller les tâches avec `squeue -u $USER` # * Trouver les fichiers créés dans : # * `$SCRATCH/donnees` et # * `$SCRATCH/donnees/res_prll` # * Lancer le post-traitement avec la commande : # # ```Bash # bash scripts/blastn-traitement.sh # ``` # * Trouver le fichier TSV dans l'espace projet # ## Gestion du stockage # Avec le temps, les données s'accumulent. Il devient alors nécessaire de surveiller l'espace utilisé, de même que le nombre de fichiers qui s'y trouvent. # ``` # df -h /project # df -hi /project # ``` # # * [La commande `diskusage_report`](https://docs.computecanada.ca/wiki/Storage_and_file_management/fr#Introduction) permet de générer un court rapport sur l'espace utilisé et le nombre de fichiers de chacun des espaces de stockage auxquels vous avez accès # * À chaque jour, un rapport de consommation de l'espace `/project` par utilisateur est produit : # * Sur Béluga : dans `/project/.stats/<nom-allocation>` # * Sur Cedar : dans `/project/.stats/<nom-allocation>.json` # * Disponible sur demande pour Graham et Niagara # * Sur Béluga et Narval, il existe aussi un outil plus avancé : # [`diskusage_explorer`](https://docs.computecanada.ca/wiki/Diskusage_Explorer/fr) # * Utilise les données en format SQLite générées quotidiennement dans `/project/.duc_databases/` # * Permet d'avoir des statistiques de consommation du stockage par répertoire, ce qui évite d'utiliser la commande `du -sh *` # **En cas de données inaccessibles** : # * Une professeure ou un professeur peut demander à ce que les données soient supprimées # * Par contre, pour obtenir l'accès aux données, il faut avoir le consentement de la personne qui les a isolées (probablement involontairement) # * En cas d'absence de réponse, c'est la politique de l'université du groupe de recherche qui permettra ou non d'obtenir l'accès aux données # # Dans tous les cas, il vaut mieux prévenir, et ce, dès l'importation de données sur les grappes de calcul. # ## Points à retenir # * Le système de fichiers *Lustre* est optimisé pour des gros fichiers # (+10 Mo) # * Ne pas mettre trop de fichiers et dossiers dans un même répertoire # (maximum 1000 items) # * Pour le transfert de données et l'utilisation du *Nearline*, # il vaut mieux regrouper les données dans un fichier archive # (comme Zip, DAR, etc.) # * Dans l'espace projet, il faut planifier qui devrait avoir accès à quoi et quand # * Pour optimiser les tâches, utiliser `$SLURM_TMPDIR` # * La commande `diskusage_report` pour un aperçu de l'espace utilisé # * Pour les données et codes critiques : # * avoir une copie ailleurs et # * utiliser un gestionnaire de versions.
4-stockage.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # This section shows the process of getting the data from Billboards WebSite and Twitter using `selenium` web drive and the `snscrape` library. # we need to import some packages. If you do not have intelled a specific package, you can install it using either - conda install or pip install. import requests # to gather online data using methods such as GET and POST import time # time modules import os #operational system from bs4 import BeautifulSoup # html interp from selenium import webdriver #browser simulator from selenium.webdriver.chrome.options import Options import pandas as pd import numpy as np import itertools import glob import sqlalchemy from sqlalchemy import create_engine import snscrape.modules.twitter as sntwitter #twitter scrapper library from nltk.sentiment import SentimentIntensityAnalyzer #sentiment analyzer # The billboard website is written in Javascript and doesn't have an open API. This website feature prevents us from using the library Requests directly to get the information we wanted. As everything comes with a cost, this method takes a long while to get the data. We need to set up a WebDrive browser simulator to overcome this issue. I am using a chromedriver simulator (in case you don't have it on your P.C, you can download the web driver [here](https://chromedriver.chromium.org/downloads)). With this in mind, we can now set up the chrome simulator using the code below. os.chdir('C:/Users/joaom/Desktop') # change directory chrome_driver = 'C://Users//joaom//Desktop//chromedriver' #set the webdriver #Set up chromedrive options chrome_options = Options() chrome_options.add_argument("--headless") chrome_options.add_argument("--window-size=1366x768") #set the browser simulator driver = webdriver.Chrome(options=chrome_options) driver # If you want to save this data straight to your `SQL` server you can run this code below with your `SQL` server user and password. engine = create_engine("mysql://User:password@localhost/database") con = engine.connect() # To save the data in a `SQL` server, we need to treat the strings because some symbols are not allowed in the `SQL` language. To do this, I created two functions: the first one is to strip the accents of a text using unicodedata, and the second one, we use Regular Expressions to substitute the not-allowed symbols in `SQL`. # + ## These functions are important to strip accents and other special characteres from our data #before inserting them into MySQL import re import unicodedata def strip_accents(text): """ Strip accents from input String. """ try: text = unicode(text, 'utf-8') except (TypeError, NameError): # unicode is a default on python 3 pass text = unicodedata.normalize('NFD', text) text = text.encode('ascii', 'ignore') text = text.decode("utf-8") return str(text) def text_to_sql(text): """ Convert input text to id. """ text = strip_accents(text.lower()) text = re.sub('[^@$!?&.#0-9a-zA-Z_-]'," ", text) text= text.lstrip() return text # - # After investigating the Billboards website HTML code look for the charts table, we could create the web crawler. With the following function, we can get the charts of any given week on the Hot 100 historical charts. def get_charts(weekdate): url= "https://www.billboard.com/charts/hot-100" +'/'+weekdate driver.get(url) html_source = driver.page_source soup = BeautifulSoup(html_source, 'lxml') data=[] weekdate=weekdate for music in range(100): rawdata= soup.find_all('div', attrs={'class' : 'o-chart-results-list-row-container'})[music].text.replace('NEW','').replace('ENTRY','').replace('RE-','').splitlines() fields={} x=[] for i in rawdata: if i != '': text= strip_accents(i) text=text_to_sql(text) x.append(text) fields['position']=x[0] fields['music']=x[1] fields['artist']=x[2] fields['lastweek']=x[3] fields['peak'] = x[4] fields['weeks'] = x[5] fields['weekdate']=weekdate data.append(fields) data = pd.DataFrame(data) return data # But the weekly charts are not the only data we need. We have to get the tweets that contain the songs on the chart in its corpus and get the relevant information in those tweets, for instance: the tweet content and the number of favorites and retweets. So with the following function, we can get the data from billboard and Twitter merged in one dataset just inserting a date. Notice the tweets are collected from one week lag to the inserted week date in the YY-MM-DD format because the alleged tweets' impact on Billboard charts is measured before the weekly charts are updated. The function uses `snscrapper` to overcome Twitter's API limits. The data can be stored in a `SQL` server or saved as Pickle or any other data file supported by `Pandas`. def get_weekly_data(weekdate): charts = get_charts(weekdate) weekdate=pd.to_datetime(weekdate) def get_tweets(charts, weekdate): data = charts.assign(search= lambda x: x['artist'] + ' AND ' + x['music'] ) tweets_list = [] for search in data['search']: music= search.split('AND ')[-1] until = weekdate.strftime('%Y-%m-%d') since = (weekdate - pd.to_timedelta(1, 'W')).strftime('%Y-%m-%d') query = search + ' lang:en since:{} until:{}'.format(since, until) for tweet in itertools.islice(sntwitter.TwitterSearchScraper(query).get_items(), 0,100,None): #getting 100 tweets fields = {} date = str(tweet.date) text = str(tweet.content) text = strip_accents(text) text = text_to_sql(text) username = str(tweet.user.username) favorites = str(tweet.likeCount) retweets = str(tweet.retweetCount) fields['datetime'] = date fields['usarname'] = username fields['text'] = text fields['favorites'] = favorites fields['retweets'] = retweets fields['music'] = music tweets_list.append(fields) tweets_df = pd.DataFrame(tweets_list) return tweets_df tweets = get_tweets(charts, weekdate) week_data = charts.merge(tweets, left_on="music", right_on="music", how="outer", indicator=True) week_data = week_data.drop('_merge', axis=1) week_data.to_pickle('C:/Users/joaom/Desktop/data/data_'+ weekdate.strftime('%Y')+'/data_'+ ''.join(weekdate.strftime('%Y-%m-%d').split('-'))+'.pkl') #week_data.to_sql(name= 'data_'+''.join(weekdate.strftime('%Y-%m-%d').split('-')),con=con,if_exists='replace') return week_data # For instance, we can get the data for my 2020's birthday. # # It's note worthing knowing that this process is not the most fast method to web scrap, so It will take couple of minutes to gathered the data. Birthday_data = get_weekly_data('2020-05-09') birthday_data.head() # Now we need to make the whole data gathering automatic. To do this, I created a function where you insert a year into it and all the weekly data of this year is returned. I used `pandas.Datetime` features and a simple `for loop` for accomplishing this. As the Billboard Hot100 is updated every Thursday, the week inserted in the `get_weekly_data()` will be all Thursdays of a given year. As Twitter was open in 2007, I decided to get the data from 2008 and forth. def all_data(year): def all_thursday(year): return pd.date_range(start=str(year), end=str(year+1), freq='W-Thu').strftime('%Y-%m-%d').tolist() thursdays = all_thursday(year) for thursday in thursdays: get_weekly_data(thursday) for i in range(2008,2022): all_data(i) # Now with the data in hand, we can go for the part two of this project, the data treatment and vizualization and some insights.
1 - Getting the Data.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Setup # # Please make sure to download the DomainNet Dataset into directory <DATA_ROOT/DomainNet> # And also download the pre-calculated features from our model from https://drive.google.com/drive/folders/1OvowfDCNCxPCAgaOi0nVDEpiB3AF2Uut?usp=sharing into <demo_features> directory # + # %load_ext autoreload # %autoreload 2 import os from utils.jupyter import demo data_root = '<DATA_ROOT>/DomainNet' #TODO: change the paths to the local path of the DomainNet Dataset feat_dir = '<demo_features>' #TODO: change the paths to the pre-calculated features search_domains = {'real': os.path.join(feat_dir, f'src_real.pkl'), 'painting': os.path.join(feat_dir, f'src_painting.pkl'), 'clipart': os.path.join(feat_dir, f'src_clipart.pkl'), 'sketch': os.path.join(feat_dir, f'src_sketch.pkl')} query_domains = {'real': os.path.join(feat_dir, f'dst_real.pkl'), 'painting': os.path.join(feat_dir, f'dst_painting.pkl'), 'clipart': os.path.join(feat_dir, f'dst_clipart.pkl'), 'sketch': os.path.join(feat_dir, f'dst_sketch.pkl')} # - # # Demo # # + pycharm={"name": "#%%\n"} demo(query_domains, search_domains, data_root) # -
demo.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:wildfires] * # language: python # name: conda-env-wildfires-py # --- # #### Setup from specific import * # ### Specify the experiments to compare experiments = ("best_top_15", "no_temporal_shifts") assert len(experiments) == 2 model_name = f"comparison_{'_'.join(experiments)}" figure_saver = figure_saver(sub_directory=model_name) # ### Load data experiment_data = load_experiment_data(experiments) # ### Check that the masks are aligned comp_masks = [experiment_data[experiment]["master_mask"] for experiment in experiments] assert all(np.all(comp_masks[0] == comp_mask) for comp_mask in comp_masks[1:]) comp_masks = [experiment_data[experiment]["endog_data"] for experiment in experiments] assert all(np.all(comp_masks[0] == comp_mask) for comp_mask in comp_masks[1:]) # ### Predict BA # + n_threads = get_ncpus() master_mask = experiment_data[experiments[0]]["master_mask"] target_ba = get_masked_array( experiment_data[experiments[0]]["endog_data"].values, master_mask ) predictions = {} errors = {} # GFED4 - prediction. for experiment, data in experiment_data.items(): print("Experiment:", experiment) data["model"].n_jobs = n_threads with parallel_backend("threading", n_jobs=n_threads): pred = data["model"].predict(data["exog_data"]) predictions[experiment] = get_masked_array(pred, master_mask) errors[experiment] = get_masked_array(data["endog_data"] - pred, master_mask) print("R2:", r2_score(data["endog_data"], pred)) print("MSE:", mean_squared_error(data["endog_data"], pred)) # - diffs = predictions[experiments[1]] - predictions[experiments[0]] error_mag_diff = np.abs(errors[experiments[1]]) - np.abs(errors[experiments[0]]) np.mean(error_mag_diff) plt.hist(get_unmasked(errors[experiments[0]]), bins=100) plt.yscale("log") plt.hist(get_unmasked(errors[experiments[1]]), bins=100) plt.yscale("log") plt.hist(get_unmasked(error_mag_diff), bins=100) plt.yscale("log") # Plotting params. figsize = (5.1, 2.8) mpl.rcParams["figure.figsize"] = figsize coast_linewidth = 0.3 date_str = "2010-01 to 2015-01" # + boundaries = [-1e-2, -1e-3, -1e-4, 0, 1e-4, 1e-3, 1e-2] fig = cube_plotting( diffs, title=f"BA <{experiments[1]} - {experiments[0]}>\n{date_str}", boundaries=boundaries, cmap="brewer_RdYlBu_11", cmap_midpoint=0, cmap_symmetric=True, colorbar_kwargs={"label": "Burned Area Fraction", "format": "%0.1e",}, coastline_kwargs={"linewidth": coast_linewidth}, ) figure_saver.save_figure(fig, f"ba_{model_name}", sub_directory="predictions") # + boundaries = [-1e-1, 0, 0.1 ** 0.5, 1, 1e1] fig = cube_plotting( diffs / predictions[experiments[0]], title=f"BA <{experiments[1]} - {experiments[0]}> / {experiments[0]}\n{date_str}", boundaries=boundaries, cmap="brewer_RdYlBu_11", cmap_midpoint=0, cmap_symmetric=False, colorbar_kwargs={"label": "1", "format": "%0.1e",}, coastline_kwargs={"linewidth": coast_linewidth}, ) figure_saver.save_figure(fig, f"rel_ba_{model_name}", sub_directory="predictions") # + abs_diffs = np.abs(diffs) vmax = np.max(abs_diffs) boundaries = [1e-5, 1e-4, 1e-3, 1e-2, vmax] fig = cube_plotting( abs_diffs, title=f"BA <|{experiments[1]} - {experiments[0]}|>\n{date_str}", boundaries=boundaries, cmap="YlOrRd", colorbar_kwargs={"label": "Burned Area Fraction", "format": "%0.1e",}, coastline_kwargs={"linewidth": coast_linewidth}, ) figure_saver.save_figure(fig, f"ba_mean_abs_{model_name}", sub_directory="predictions") # + boundaries = [0.1, 0.316, 1, 3.16, 10] fig = cube_plotting( np.abs(diffs) / predictions[experiments[0]], title=f"BA <|{experiments[1]} - {experiments[0]}|> / {experiments[0]}\n{date_str}", boundaries=boundaries, cmap="YlOrBr", colorbar_kwargs={"label": "1", "format": "%0.1e",}, coastline_kwargs={"linewidth": coast_linewidth}, ) figure_saver.save_figure( fig, f"rel_ba_mean_abs_{model_name}", sub_directory="predictions" ) # - # #### Comparison of the error between the experiments # + # boundaries = [0.1, 0.316, 1, 3.16, 10] for experiment, error in errors.items(): fig = cube_plotting( np.abs(error), title=f"BA <|Error({experiment})|>\n{date_str}", # boundaries=boundaries, cmap="YlOrBr", colorbar_kwargs={"label": "BA Fraction", "format": "%0.1e",}, coastline_kwargs={"linewidth": coast_linewidth}, ) figure_saver.save_figure( fig, f"error_mag_{experiment}_{model_name}", sub_directory="predictions" ) # + boundaries = [ np.min(error_mag_diff), -1e-2, -1e-3, 0, 1e-3, 1e-2, np.max(error_mag_diff), ] fig = cube_plotting( error_mag_diff, title=f"BA <|Error({experiments[1]})| - |Error({experiments[0]})|>\n{date_str}", boundaries=boundaries, cmap="brewer_RdYlBu_11", cmap_midpoint=0, cmap_symmetric=False, colorbar_kwargs={"label": "BA Fraction", "format": "%0.1e",}, coastline_kwargs={"linewidth": coast_linewidth}, ) figure_saver.save_figure( fig, f"error_mag_diff_{model_name}", sub_directory="predictions" ) # + # boundaries = [0.1, 0.316, 1, 3.16, 10] fig = cube_plotting( np.mean(error_mag_diff, axis=0) / np.mean( get_masked_array( experiment_data[experiments[0]]["endog_data"], experiment_data[experiments[0]]["master_mask"], ), axis=0, ), title=f"BA <(|Error({experiments[1]})| - |Error({experiments[0]})|)> / <GFED4>\n{date_str}", # boundaries=boundaries, vmin=-1, vmax=1, nbins=6, cmap="brewer_RdYlBu_11", cmap_midpoint=0, cmap_symmetric=False, colorbar_kwargs={"label": "1", "format": "%0.1e",}, coastline_kwargs={"linewidth": coast_linewidth}, ) figure_saver.save_figure( fig, f"rel_error_mag_diff_{model_name}", sub_directory="predictions" ) # - # ### Load all feature data experiment = "all" all_experiment_data = load_experiment_data((experiment,))["all"] # ### Correlations between diffs and other variables - virtually no correlation between the diffs and any of the features diff_data = get_unmasked(diffs) all_features = all_experiment_data["exog_data"].copy() all_features["Diffs"] = diff_data with figure_saver("diff_corr_plot"): corr_plot(shorten_columns(all_features), fig_kwargs={"figsize": (12, 8)}) # ### Correlations between rel. diffs and other variables - virtually no correlation between the diffs and any of the features rel_diff_data = get_unmasked(diffs / predictions[experiments[0]]) rel_all_features = all_experiment_data["exog_data"].copy() rel_all_features["Rel. Diffs"] = rel_diff_data with figure_saver("rel_diff_corr_plot"): corr_plot(shorten_columns(rel_all_features), fig_kwargs={"figsize": (12, 8)}) # ### Correlations between |diffs| and other variables - vritually no correlation between the diffs and any of the features abs_diff_data = np.abs(get_unmasked(diffs)) abs_all_features = all_experiment_data["exog_data"].copy() abs_all_features["|Diffs|"] = abs_diff_data with figure_saver("abs_diff_corr_plot"): corr_plot(shorten_columns(abs_all_features), fig_kwargs={"figsize": (12, 8)}) # ### Correlations between rel. |diffs| and other variables - virtually no correlation between the diffs and any of the features rel_abs_diff_data = np.abs(get_unmasked(diffs / predictions[experiments[0]])) rel_abs_all_features = all_experiment_data["exog_data"].copy() rel_abs_all_features["Rel. |Diffs|"] = rel_abs_diff_data with figure_saver("rel_abs_diff_corr_plot"): corr_plot(shorten_columns(rel_abs_all_features), fig_kwargs={"figsize": (12, 8)})
analyses/seasonality_paper_st/comparisons/prediction_comparison_best_top_15__no_temporal_shifts.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## 2020년 3월 14일 토요일 # ### HackerRank - Dictionaries and Hashmaps : Two Strings # ### 문제 : https://www.hackerrank.com/challenges/two-strings/problem?h_l=interview&playlist_slugs%5B%5D=interview-preparation-kit&playlist_slugs%5B%5D=dictionaries-hashmaps # ### 블로그 : https://somjang.tistory.com/entry/HackerRank-Dictionaries-and-Hashmaps-Two-Strings-Python # ### 첫번째 시도 # + # #!/bin/python3 import math import os import random import re import sys # Complete the twoStrings function below. def twoStrings(s1, s2): answer = "NO" s1_set = set(list(s1)) s2_set = set(list(s2)) intersection = s1_set.intersection(s2_set) if len(intersection) != 0: answer = "YES" return answer
DAY 001 ~ 100/DAY037_[HackerRank] Dictionaries and Hashmaps Two Strings (Python).ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] papermill={"duration": 0.017531, "end_time": "2021-01-10T15:55:19.437237", "exception": false, "start_time": "2021-01-10T15:55:19.419706", "status": "completed"} tags=[] # ## Parameter # + papermill={"duration": 0.017508, "end_time": "2021-01-10T15:55:19.467726", "exception": false, "start_time": "2021-01-10T15:55:19.450218", "status": "completed"} tags=["parameters"] # These parameters can be injected from Papermill model_type = "pre_ln" train_file = "wikitext-103-raw/wiki.train.raw" valid_file = "wikitext-103-raw/wiki.valid.raw" epochs = 10 batch_size = 2 max_learning_rate = 1e-4 warmup_steps = 0 save_model_dir = "tfchat_model" clipnorm = 1.0 fp16 = False config_cls = "tfchat.configs.GPT2SmallConfig" # + papermill={"duration": 0.016088, "end_time": "2021-01-10T15:55:19.496181", "exception": false, "start_time": "2021-01-10T15:55:19.480093", "status": "completed"} tags=["injected-parameters"] # Parameters model_type = "post_ln" save_model_dir = "tfchat_model-post_ln-lr_e4" # + papermill={"duration": 0.015862, "end_time": "2021-01-10T15:55:19.524301", "exception": false, "start_time": "2021-01-10T15:55:19.508439", "status": "completed"} tags=[] # Assert parameters assert model_type in ["pre_ln", "post_ln", "min_gpt", "transformers"] # + [markdown] papermill={"duration": 0.012356, "end_time": "2021-01-10T15:55:19.549103", "exception": false, "start_time": "2021-01-10T15:55:19.536747", "status": "completed"} tags=[] # ## Installation # + papermill={"duration": 5.050274, "end_time": "2021-01-10T15:55:24.611650", "exception": false, "start_time": "2021-01-10T15:55:19.561376", "status": "completed"} tags=[] # !apt install -y git # !pip install git+https://github.com/noriyukipy/tfchat@fp16 # + [markdown] papermill={"duration": 0.019632, "end_time": "2021-01-10T15:55:24.686494", "exception": false, "start_time": "2021-01-10T15:55:24.666862", "status": "completed"} tags=[] # ## Configure GPU # + papermill={"duration": 2.495989, "end_time": "2021-01-10T15:55:27.198993", "exception": false, "start_time": "2021-01-10T15:55:24.703004", "status": "completed"} tags=[] from tfchat.utils import set_memory_growth from tfchat.utils import set_mixed_precision_policy # + papermill={"duration": 0.599915, "end_time": "2021-01-10T15:55:27.816068", "exception": false, "start_time": "2021-01-10T15:55:27.216153", "status": "completed"} tags=[] set_memory_growth() # + papermill={"duration": 0.021053, "end_time": "2021-01-10T15:55:27.854538", "exception": false, "start_time": "2021-01-10T15:55:27.833485", "status": "completed"} tags=[] if fp16: set_mixed_precision_policy() # + [markdown] papermill={"duration": 0.016829, "end_time": "2021-01-10T15:55:27.888478", "exception": false, "start_time": "2021-01-10T15:55:27.871649", "status": "completed"} tags=[] # ## Setup tokenizer # + papermill={"duration": 1.396761, "end_time": "2021-01-10T15:55:29.302148", "exception": false, "start_time": "2021-01-10T15:55:27.905387", "status": "completed"} tags=[] # Install transformers by HuggingFace to use GPT2 tokenizer # ! pip install transformers==3.4.0 # Enable widgetsnbextention to avoid the following error when running GPT2.from_pretrained method # ImportError: IProgress not found. Please update jupyter and ipywidgets. # ! jupyter nbextension enable --py widgetsnbextension # + papermill={"duration": 1.817109, "end_time": "2021-01-10T15:55:31.139209", "exception": false, "start_time": "2021-01-10T15:55:29.322100", "status": "completed"} tags=[] # setup tokenizer from transformers import GPT2Tokenizer tokenizer = GPT2Tokenizer.from_pretrained("gpt2") # + [markdown] papermill={"duration": 0.017488, "end_time": "2021-01-10T15:55:31.174502", "exception": false, "start_time": "2021-01-10T15:55:31.157014", "status": "completed"} tags=[] # ## Prepare model config # + papermill={"duration": 0.17772, "end_time": "2021-01-10T15:55:31.369588", "exception": false, "start_time": "2021-01-10T15:55:31.191868", "status": "completed"} tags=[] from tfchat.configs import GPT2SmallConfig from tfchat.utils import import_class config = import_class(config_cls)() # Set the larger number of vocab size than 33,278, which is the vocab size of Wikitext-2 config.vocab_size = tokenizer.vocab_size # + papermill={"duration": 0.028205, "end_time": "2021-01-10T15:55:31.420645", "exception": false, "start_time": "2021-01-10T15:55:31.392440", "status": "completed"} tags=[] config # + [markdown] papermill={"duration": 0.017704, "end_time": "2021-01-10T15:55:31.456076", "exception": false, "start_time": "2021-01-10T15:55:31.438372", "status": "completed"} tags=[] # ## Prepare Dataset # + papermill={"duration": 0.02259, "end_time": "2021-01-10T15:55:31.496336", "exception": false, "start_time": "2021-01-10T15:55:31.473746", "status": "completed"} tags=[] from pathlib import Path from urllib.request import urlretrieve import zipfile import numpy as np def encode_file(_tokenizer, _filepath): ids = [] with open(_filepath) as f: for line in f.readlines(): text = line.strip("\n") ids.extend(_tokenizer.encode(text)) return np.array(ids, dtype=np.int32) # + papermill={"duration": 448.956676, "end_time": "2021-01-10T16:03:00.471125", "exception": false, "start_time": "2021-01-10T15:55:31.514449", "status": "completed"} tags=[] train_ids = encode_file(tokenizer, train_file) valid_ids = encode_file(tokenizer, valid_file) # + papermill={"duration": 0.023034, "end_time": "2021-01-10T16:03:00.527623", "exception": false, "start_time": "2021-01-10T16:03:00.504589", "status": "completed"} tags=[] print("Train:", train_ids.shape) print("Valid:", valid_ids.shape) # + papermill={"duration": 0.022388, "end_time": "2021-01-10T16:03:00.568632", "exception": false, "start_time": "2021-01-10T16:03:00.546244", "status": "completed"} tags=[] print(train_ids.shape) print(valid_ids.shape) # + papermill={"duration": 1.325188, "end_time": "2021-01-10T16:03:01.912240", "exception": false, "start_time": "2021-01-10T16:03:00.587052", "status": "completed"} tags=[] from tfchat.data import BlockDataset dataset = BlockDataset(block_size=config.context_size, batch_size=batch_size) train_dataset = dataset.build(train_ids, shuffle=True) valid_dataset = dataset.build(valid_ids, shuffle=False) # + papermill={"duration": 114.592118, "end_time": "2021-01-10T16:04:56.523298", "exception": false, "start_time": "2021-01-10T16:03:01.931180", "status": "completed"} tags=[] num_train_steps = len([_ for _ in train_dataset]) num_valid_steps = len([_ for _ in valid_dataset]) print("Train steps:", num_train_steps) print("Valid steps:", num_valid_steps) # + [markdown] papermill={"duration": 0.018979, "end_time": "2021-01-10T16:04:56.561022", "exception": false, "start_time": "2021-01-10T16:04:56.542043", "status": "completed"} tags=[] # ## Transformers model implementation # + papermill={"duration": 0.02382, "end_time": "2021-01-10T16:04:56.603373", "exception": false, "start_time": "2021-01-10T16:04:56.579553", "status": "completed"} tags=[] from transformers import TFGPT2LMHeadModel from transformers import GPT2Config import tensorflow.keras as keras import tensorflow as tf from tfchat.models import create_combined_mask # + papermill={"duration": 0.024718, "end_time": "2021-01-10T16:04:56.647091", "exception": false, "start_time": "2021-01-10T16:04:56.622373", "status": "completed"} tags=[] class TransformersGPT2(keras.Model): def __init__(self, config): super().__init__() tf_config = GPT2Config( n_layers=config.num_layers, n_embd=config.d_model, n_head=config.num_heads, n_inner=config.d_ff, vocab_size=config.vocab_size, n_ctx=config.context_size, n_positions=config.context_size, attn_pdrop=config.attention_dropout_rate, resid_pdrop=config.residual_dropout_rate, embd_pdrop=config.embedding_dropout_rate, layer_norm_epsilon=config.epsilon, activation_function="gelu_new", # Default value of transformers implementation ) self._decoder = TFGPT2LMHeadModel(tf_config) def call(self, inputs, training): inputs = tf.cast(inputs, tf.int32) x = self._decoder(inputs, training=training) return x[0] # + [markdown] papermill={"duration": 0.018839, "end_time": "2021-01-10T16:04:56.684846", "exception": false, "start_time": "2021-01-10T16:04:56.666007", "status": "completed"} tags=[] # ## Prepare Model # + papermill={"duration": 0.025337, "end_time": "2021-01-10T16:04:56.728655", "exception": false, "start_time": "2021-01-10T16:04:56.703318", "status": "completed"} tags=[] from tfchat.losses import PaddingLoss from tfchat.schedules import WarmupLinearDecay import tensorflow.keras as keras def train(_model, _train_dataset, _valid_dataset, _epochs, _warmup_steps, _num_train_steps, _max_learning_rate, _clipnorm): schedule = WarmupLinearDecay(max_learning_rate=_max_learning_rate, warmup_steps=_warmup_steps, training_steps=_num_train_steps*_epochs) optimizer = keras.optimizers.Adam(schedule, beta_1=0.9, beta_2=0.999, epsilon=1e-8, clipnorm=_clipnorm) _model.compile(loss=PaddingLoss(), optimizer=optimizer) history = _model.fit( _train_dataset, validation_data=_valid_dataset, epochs=_epochs, callbacks=[ keras.callbacks.EarlyStopping(patience=1, restore_best_weights=True), # If you want to save chekcpoints, remove the next comment out #keras.callbacks.ModelCheckpoint("keras_model/", save_best_only=True) ], verbose=2, ) # + papermill={"duration": 0.171556, "end_time": "2021-01-10T16:04:56.918984", "exception": false, "start_time": "2021-01-10T16:04:56.747428", "status": "completed"} tags=[] if model_type == "pre_ln": from tfchat.models import PreLNDecoder model = PreLNDecoder(config) elif model_type == "post_ln": from tfchat.models import PostLNDecoder model = PostLNDecoder(config) elif model_type == "transformers": model = TransformersGPT2(config) elif model_type == "min_gpt": from mingpt.model import GPT, GPTConfig mconf = GPTConfig(config.vocab_size, config.context_size, n_layer=config.num_layers, n_head=config.num_heads, n_embd=config.d_model) model = GPT(mconf) else: raise Exception("Model type is wrong") # + papermill={"duration": 1.391562, "end_time": "2021-01-10T16:04:58.329860", "exception": false, "start_time": "2021-01-10T16:04:56.938298", "status": "completed"} tags=[] model.build(input_shape=(None, config.context_size)) model.summary() # + papermill={"duration": 73755.802448, "end_time": "2021-01-11T12:34:14.151673", "exception": false, "start_time": "2021-01-10T16:04:58.349225", "status": "completed"} tags=[] train(model, train_dataset, valid_dataset, epochs, warmup_steps, num_train_steps, max_learning_rate, clipnorm) # + papermill={"duration": 15.822247, "end_time": "2021-01-11T12:34:29.994262", "exception": false, "start_time": "2021-01-11T12:34:14.172015", "status": "completed"} tags=[] from tfchat.eval import perplexity print("Validation PPL:", perplexity(model, valid_dataset)) # + papermill={"duration": 0.421046, "end_time": "2021-01-11T12:34:30.436468", "exception": false, "start_time": "2021-01-11T12:34:30.015422", "status": "completed"} tags=[] from tfchat.utils import save_model save_model(save_model_dir, model, config) # + papermill={"duration": 0.020719, "end_time": "2021-01-11T12:34:30.478512", "exception": false, "start_time": "2021-01-11T12:34:30.457793", "status": "completed"} tags=[]
examples/benchmark-wikitext-v0.1.0/output/tfmodel_train_scratch-wikitext_103_raw-post_ln-lr_e4.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %matplotlib inline # # # Using `fmas` as a black-box application # # This examples shows how to use `py-fmas` as a black-box application, that # only requires a minimal amount of scripting. # # .. codeauthor:: <NAME> <<EMAIL>> # # We start by simply importing the required `fmas` into the current namespace. # # # import fmas # If an adequate input file is located within the current working directory, # `fmas` can be used as shown below. It features a particular function called # `run`, which reads-in the propagation setting stored in the input file # `input_file.h5` and runs the simulaton # # res = fmas.run('input_file.h5', model_type='FMAS_S_R', solver_type='IFM_RK4IP') # An example that shows how an adequate input file can be generated via python # is shown under the link below: # # `sphx_glr_auto_tutorials_basics_ng_generate_infile.py` # # After the proapgation algorithm (specified in `input_file.h5`) terminates, # a simple dictionary data structure with the following keys is available # # print(res.keys()) # A simple plot that shows the result of the simulation run can be produced # using function `plot_evolution` implemented in module `tools` # # from fmas.tools import plot_evolution plot_evolution( res['z'], res['t'], res['u'], t_lim=(-500,2200), w_lim=(1.,4.)) # The results can be stored for later postprocessing using the function # `save_h5` implemented in module `data_io`. It will generate a file # `out_file.h5` with HDF5 format in the current working directory # # from fmas.data_io import save_h5 save_h5('out_file.h5', **res)
docs/_downloads/f90f3590f8f318e97c7e021c74024a3f/g_app.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Color FID Benchmark (HQ) import os os.environ['CUDA_VISIBLE_DEVICES']='3' os.environ['OMP_NUM_THREADS']='1' import statistics from fastai import * from deoldify.visualize import * import cv2 from fid.fid_score import * from fid.inception import * import imageio plt.style.use('dark_background') torch.backends.cudnn.benchmark=True import warnings warnings.filterwarnings("ignore", category=UserWarning, module="torch.nn.functional") warnings.filterwarnings("ignore", category=UserWarning, message='.*?retrieve source code for container of type.*?') # ## Setup # + #NOTE: Data should come from here: 'https://datasets.figure-eight.com/figure_eight_datasets/open-images/test_challenge.zip' #NOTE: Minimum recommmended number of samples is 10K. Source: https://github.com/bioinf-jku/TTUR path = Path('data/ColorBenchmark') path_hr = path/'source' path_lr = path/'bandw' path_results = Path('./result_images/ColorBenchmarkFID/artistic') path_rendered = path_results/'rendered' #path = Path('data/DeOldifyColor') #path_hr = path #path_lr = path/'bandw' #path_results = Path('./result_images/ColorBenchmark/edge') #path_rendered = path_results/'rendered' #num_images = 2048 num_images = 15000 #num_images = 50000 render_factor=35 fid_batch_size = 4 eval_size=299 # - def inception_model(dims:int): block_idx = InceptionV3.BLOCK_INDEX_BY_DIM[dims] model = InceptionV3([block_idx]) model.cuda() return model def create_before_images(fn,i): dest = path_lr/fn.relative_to(path_hr) dest.parent.mkdir(parents=True, exist_ok=True) img = PIL.Image.open(fn).convert('LA').convert('RGB') img.save(dest) def render_images(colorizer, source_dir:Path, filtered_dir:Path, target_dir:Path, render_factor:int, num_images:int)->[(Path, Path, Path)]: results = [] bandw_list = ImageList.from_folder(path_lr) bandw_list = bandw_list[:num_images] if len(bandw_list.items) == 0: return results results = [] img_iterator = progress_bar(bandw_list.items) for bandw_path in img_iterator: target_path = target_dir/bandw_path.relative_to(source_dir) try: result_image = colorizer.get_transformed_image(path=bandw_path, render_factor=render_factor) result_path = Path(str(path_results) + '/' + bandw_path.parent.name + '/' + bandw_path.name) if not result_path.parent.exists(): result_path.parent.mkdir(parents=True, exist_ok=True) result_image.save(result_path) results.append((result_path, bandw_path, target_path)) except Exception as err: print('Failed to render image. Skipping. Details: {0}'.format(err)) return results def calculate_fid_score(render_results, bs:int, eval_size:int): dims = 2048 cuda = True model = inception_model(dims=dims) rendered_paths = [] target_paths = [] for render_result in render_results: rendered_path, _, target_path = render_result rendered_paths.append(str(rendered_path)) target_paths.append(str(target_path)) rendered_m, rendered_s = calculate_activation_statistics(files=rendered_paths, model=model, batch_size=bs, dims=dims, cuda=cuda) target_m, target_s = calculate_activation_statistics(files=target_paths, model=model, batch_size=bs, dims=dims, cuda=cuda) fid_score = calculate_frechet_distance(rendered_m, rendered_s, target_m, target_s) del model return fid_score # ## Create black and whites source images # Only runs if the directory isn't already created. if not path_lr.exists(): il = ImageList.from_folder(path_hr) parallel(create_before_images, il.items) path_results.parent.mkdir(parents=True, exist_ok=True) # ### Rendering colorizer = get_image_colorizer(artistic=True) render_results = render_images(colorizer=colorizer, source_dir=path_lr, target_dir=path_hr, filtered_dir=path_results, render_factor=render_factor, num_images=num_images) # ### Colorizaton Scoring fid_score = calculate_fid_score(render_results, bs=fid_batch_size, eval_size=eval_size) print('FID Score: ' + str(fid_score))
ColorFIDBenchmarkArtistic.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from IPython.core.display import HTML def css_styling(): styles = open("./styles/custom.css", "r").read() return HTML(styles) css_styling() # __PULL__ the changes you made at home to your local copy on the M drive. # # If you need a reminder of how to do this: # # Open Jupyter notebook: # <br> Start >> Programs >> Programming >> Anaconda3 >> JupyterNotebook # <br>(Start >> すべてのプログラム >> Programming >> Anaconda3 >> JupyterNotebook) # # Navigate to where your interactive textbook is stored. # # Open __S1_Introduction_to_Version_Control__. # We will start by learning to __PULL__ the solutions to the Review Excercises an online repository. # # This will allow you to check your answers. # # Open Jupyter notebook: # <br> Start >> Programs >> Programming >> Anaconda3 >> JupyterNotebook # <br>(Start >> すべてのプログラム >> Programming >> Anaconda3 >> JupyterNotebook) # # Navigate to where your interactive textbook is stored. # # Open __S1_Introduction_to_Version_Control__. # Open Jupyter notebook: # <br> Start >> Programs >> Programming >> Anaconda3 >> JupyterNotebook # <br>(Start >> すべてのプログラム >> Programming >> Anaconda3 >> JupyterNotebook) # # We will start by learning how to add the __solutions to the Review Exercises__ to your interative textbook. # # Navigate to where your interactive textbook is stored. # # Open __S1_Introduction_to_Version_Control__. # # In Jupyter notebook, select the tab with the contents list of the interactive textbook: # # Open __Seminar 3__ by clicking on __3_Data_Structures__. # + [markdown] slideshow={"slide_type": "slide"} # # Data Structures # # # Lesson Goal # # - Compose simple programs to control the flow with which the operators we have studied so far are executed on: # - single value variables. # - data structures (holding mutiple variables) # # # # + [markdown] slideshow={"slide_type": "slide"} # # Objectives # # # - Express collections of mulitple variables as `list`, `tuple` and dictionary (`dict`). # # - Use iteratation to visit entries in a data structure # # # - Learn to select the right data structure for an application # + [markdown] slideshow={"slide_type": "slide"} # Why we are studying this: # # To use Python to solve more complex engineering problems you are likely to encounter involving: # - multi-variable values (e.g. vectors) # - large data sets (e.g. experiment results) # - manipulating your data using logic # <br> # (e.g. sorting and categorising answers to an operation performed on multiple data points) # # # - # Lesson structure: # - Learn new skills together: # - __Demonstration__ on slides. # - __Completing examples__ in textbooks. # - __Feedback answers__ (verbally / whiteboards) # - Practise alone: __Completing review excercises__. # - Skills Review: Updating your local repository using an __upstream repository.__ # - __Summary__and __quiz__. # In the last seminar we learnt to generate a rnage of numbers for use in control flow of a program, using the function `range()`: for j in range(20): if j % 4 == 0: # Check remainer of j/4 continue # continue to next value of j print(j, "is not a multiple of 4") # + [markdown] slideshow={"slide_type": "slide"} # ## Data Structures # # Often we want to manipulate data that is more meaningful than ranges of numbers. # # These collections of variables might include: # - the results of an experiment # - a list of names # - the components of a vector # - a telephone directory with names and associated numbers. # # # # # # + [markdown] slideshow={"slide_type": "slide"} # Python has different __data structures__ that can be used to store and manipulate these values. # # Like variable types (`string`, `int`,`float`...) different data structures behave in different ways. # # Today we will learn to use `list`, `tuple` and dictionary (`dict`) data structures. # + [markdown] slideshow={"slide_type": "slide"} # # We will study the differences in how they behave so that you can learn to select the most suitable data structure for an application. # + [markdown] slideshow={"slide_type": "slide"} # Programs use data structure to collect data into useful packages. # # >$ r = [u, v, w] $ # # For example, rather than representing a vector `r` of length 3 using three seperate floats `ru`, `rv` and `rw`, we could represent # it as a __list__ of floats: # # ```Python # r = [u, v, w] # ``` # # <img src="img/3d_position_vector.png" alt="Drawing" style="width: 175px;"/> # # (We will learn what a __list__ is in a moment.) # + [markdown] slideshow={"slide_type": "slide"} # If we want to store the names of students in a laboratory group, rather than representing each students using an individual string variable, we could use a list of names, e.g.: # # # - lab_group0 = ["Sarah", "John", "Joe", "Emily"] lab_group1 = ["Roger", "Rachel", "Amer", "Caroline", "Colin"] # + [markdown] slideshow={"slide_type": "slide"} # This is useful because we can perform operations on lists such as: # - checking its length (number of students in a lab group) # - sorting the names in the list into alphabetical order # - making a list of lists (we call this a *nested list*): # # - lab_groups = [lab_group0, lab_group1] # + [markdown] slideshow={"slide_type": "slide"} # ## Lists # + [markdown] slideshow={"slide_type": "slide"} # A list is a sequence of data. # # We call each item in the sequence an *element*. # # A list is constructed using square brackets: # # # - a = [1, 2, 3] # + [markdown] slideshow={"slide_type": "slide"} # A `range` can be converted to a list with the `list` function. # - print(list(range(10))) # + [markdown] slideshow={"slide_type": "slide"} # When `range` has just one *argument* (the entry in the parentheses), it will generate a range from 0 up to but not including the specified number. # # + slideshow={"slide_type": "slide"} print(list(range(10,20))) # + [markdown] slideshow={"slide_type": "-"} # When a range has two arguments: # - the first value is the starting value. # - the second value is the stoping value. # - the stopping value is not included in the range # + [markdown] slideshow={"slide_type": "slide"} # You can optionally include a step: # - print(list(range(10, 20, 2))) # + [markdown] slideshow={"slide_type": "slide"} # A list can hold a mixture of types (`int`, `string`....). # - a = [1, 2.0, "three"] # + [markdown] slideshow={"slide_type": "slide"} # An empty list is created by # - my_list = [] # + [markdown] slideshow={"slide_type": "slide"} # A list of length 5 with repeated values can be created by # - my_list = ["Hello"]*5 print(my_list) # + [markdown] slideshow={"slide_type": "slide"} # We can check if an item is in a list using the function `in`: # # - print("Hello" in my_list) print("Goodbye" in my_list) # + [markdown] slideshow={"slide_type": "slide"} # <a id='IteratingLists'></a> # ### Iterating Over Lists # # Looping over each item in a list is called *iterating*. # # To iterate over a list of the lab group we use a `for` loop. # # Each iteration, variable `d` takes the value of the next item in the list: # - for d in [1, 2.0, "three"]: print('the value of d is:', d) # + [markdown] slideshow={"slide_type": "slide"} # __Try it yourself__ # # # In the cell provided in your textbook *iterate* over the list `[1, 2.0, "three"]`. # # Each time the code loops: # 1. print the value of data __cast as a string__ (Seminar 1 Data Types and Operators) # 1. print the variable type to demonstrate that the variable has been cast (note that otherwise the variable appeares to remain unchanged). # - # Iterate over a list and cast each item as a string for d in [1, 2.0, "three"]: d = str(d) print('The value of d is:', d) print(type(d)) # + [markdown] slideshow={"slide_type": "slide"} # ### Manipulating Lists # # There are many functions for manipulating lists. # # <a id='Length'></a> # # ### Finding the Length of a List # # We can find the length (number of items) of a list using the function `len()`, by including the name of the list in the brackets. # # # # # # + [markdown] slideshow={"slide_type": "slide"} # In the example below, we find the length of the list `lab_group0`. # + lab_group0 = ["Sara", "Mari", "Quang"] size = len(lab_group0) print("Lab group members:", lab_group0) print("Size of lab group:", size) print("Check the Python object type:", type(lab_group0)) # + [markdown] slideshow={"slide_type": "slide"} # <a id='SortLists'></a> # ### Sorting Lists # # To sort the list we use `sorted()`. # # #### Sorting Numerically # # If the list contains numerical variables, the numbers is sorted in ascending order. # + numbers = [7, 1, 3.0] print(numbers) numbers = sorted(numbers) print(numbers) # + [markdown] slideshow={"slide_type": "slide"} # __Note:__ We can sort a list with mixed numeric types (e.g. `float` and `int`). # # However, we cannot sort a list with types that cannot be sorted by the same ordering rule # # (e.g. `numbers = sorted([seven, 1, 3.0])` causes an error.) # - numbers = sorted(['seven', 1, 3.0]) # + [markdown] slideshow={"slide_type": "slide"} # #### Sorting Alphabetically # # If the list contains strings, the list is sorted by alphabetical order. # + lab_group0 = ["Sara", "Mari", "Quang"] print(lab_group0) lab_group0 = sorted(lab_group0) print(lab_group0) # + [markdown] slideshow={"slide_type": "slide"} # As with `len()` we include the name of the list we want to sort in the brackets. # - # There is a shortcut for sorting a list # # `sort` is known as a 'method' of a `list`. # # If we suffix a list with `.sort()`, it performs an *in-place* sort. # + slideshow={"slide_type": "slide"} lab_group0 = ["Sara", "Mari", "Quang"] print(lab_group0) #lab_group0 = sorted(lab_group0) lab_group0.sort() print(lab_group0) # + [markdown] slideshow={"slide_type": "slide"} # __Try it yourself__ # # In the cell provided in your textbook create a list of __numeric__ or __string__ values. # # Sort the list using `sorted()` __or__ `.sort()`. # # Print the sorted list. # # Print the length of the list using `len()`. # - # Sorting a list list_a = [1, 6, 7, 9, 3, 5] list_a.sort() print(list_a) print(len(list_a)) list_strings = ["cat", "fish", "rainbow", "oil", "arms"] print(sorted(list_strings, reverse=True)) list_strings[::-1] sorted(list_strings, key=len) sorted(list_strings[::-1], key=len) sorted(sorted(list_strings), key=len) list(reversed(list_strings) # + [markdown] slideshow={"slide_type": "slide"} # ### Removing an Item from a List # # We can remove items from a list using the method `pop`. # # We place the index of the element we wich to remove in brackets. # + lab_group0 = ["Sara", "Mari", "Quang"] # Remove the second student from the list: lab_group0 # remember indexing starts from 0 # 1 is the second element print(lab_group0) print(lab_group0[:-1]) # Does not need pop if you do not want the last item last_item = lab_group0.pop() print(lab_group0, last_item) # + [markdown] slideshow={"slide_type": "slide"} # We can add items at the end of a list using the method `append`. # # We place the element we want to add to the end of the list in brackets. # - # Add new student "Lia" at the end of the list lab_group0.append("Lia") print(lab_group0) # + [markdown] slideshow={"slide_type": "slide"} # __Try it yourself__ # # In the cell provided in your textbook. # # Remove Sara from the list. # # Print the new list. # # Add a new lab group member, Tom, to the list. # # Print the new list. # + lab_group0 = ["Sara", "Mari", "Quang"] # Adding and removing items from a list. lab_group0.pop(0) print(lab_group0) lab_group0.append('Tom') print(lab_group0) # + [markdown] slideshow={"slide_type": "slide"} # ### Indexing # # Lists store data in order. # # We can select a single element of a list using its __index__. # # You are familiar with this process; it is the same as selecting individual characters of a `string`: # - a = "string" b = a[1] print(b) first_member = lab_group0[0] print(first_member) # + [markdown] slideshow={"slide_type": "slide"} # Indices can be useful when looping through the items in a list.` # + # We can express the following for loop: # ITERATING for i in lab_group0: print(i) # as: # INDEXING for i in range(len(lab_group0)): print(lab_group0[i]) # - # An example of where __indexing__ is more appropraite than __iterating__: # # Sometimes we want to perform an operation on all items of a list. # # Consider the example we looked at earlier, where we looped through a list, expressing each element as a string. # # You may have written something like this... for d in [1, 2.0, "three"]: d = str(d) print(d, type(d)) # We can re-write this: # + data = [1, 2.0, "three"] for d in data: d = str(d) print(d, type(d)) # - # __Iterating:__ The type of each element in the list `data` remains unchanged. print(type(data[0])) print(type(data[1])) print(type(data[2])) # __Indexing__: We can modify each element of the list (e.g. to change its type) # + for d in range(len(data)): data[d] = str(data[d]) print(data[d], type(data[d])) print(type(data[0])) print(type(data[1])) print(type(data[2])) # + data = [1, 2.0, "three"] data = [str(d) for d in data] print(type(data[0])) print(type(data[1])) print(type(data[2])) # + [markdown] slideshow={"slide_type": "slide"} # __Note:__<br> # - Some data structures that support *iterating* but do not support *indexing* (e.g. dictionaries, which we eill learn about later). <br> When possible, it is better to iterate over a list rather than use indexing. # - When indexing: # - the first value in the range is 0. # - the last value in the range is (list length - 1). # + [markdown] slideshow={"slide_type": "slide"} # Lists and indexing can be useful for numerical computations. # # ### Indexing Example: Vectors # # __Vector:__ A quantity with magnitude and direction. # # # # + [markdown] slideshow={"slide_type": "slide"} # Position vectors (or displacement vectors) in 3D space can always be expressed in terms of x,y, and z-directions. # # <img src="img/3d_position_vector.png" alt="Drawing" style="width: 175px;"/> # # The position vector 𝒓 indicates the position of a point in 3D space. # # $$ # \mathbf{r} = x\mathbf{i} + y\mathbf{j} + z\mathbf{k} # $$ # # # + [markdown] slideshow={"slide_type": "slide"} # $$ # \mathbf{r} = x\mathbf{i} + y\mathbf{j} + z\mathbf{k} # $$ # # 𝒊 is the displacement one unit in the x-direction<br> # 𝒋 is the displacement one unit in the y-direction<br> # 𝒌 is the displacement one unit in the z-direction # # We can conveniently express $\mathbf{r}$ as a matrix: # $$ # \mathbf{r} = [x, y, z] # $$ # # __...which looks a lot like a Python list!__ # # + [markdown] slideshow={"slide_type": "slide"} # You will encounter 3D vectors a lot in your engineering studies as they are used to describe many physical quantities, e.g. force. # + [markdown] slideshow={"slide_type": "slide"} # <a id='DotProductLists'></a> # # ### Indexing Example: The dot product of two vectors: # # The __dot product__ is a really useful algebraic operation that takes two equal-length sequences of numbers (usually coordinate vectors) and returns a single number. # # It can be expressed mathematically as... # + [markdown] slideshow={"slide_type": "slide"} # __GEOMETRIC REPRESENTATION__ # # \begin{align} # \mathbf{A} \cdot \mathbf{B} = |\mathbf{A}| |\mathbf{B}| cos(\theta) # \end{align} # # <img src="img/dot_product.gif" alt="Drawing" style="width: 250px;"/> # &nbsp;&nbsp;&nbsp;&nbsp; $\mathbf{B} cos(\theta)$ is the component of $B$ acting in the direction of $A$. # + [markdown] slideshow={"slide_type": "slide"} # &nbsp;&nbsp;&nbsp;&nbsp; $\mathbf{B} cos(\theta)$ is the component of $B$ acting in the direction of $A$. # # For example, the component of a force, $\mathbf{F_{app}}$, acting in the direction of the velocity of an object (x direction): # # <img src="img/resolving_force.png" alt="Drawing" style="width: 250px;"/> # # $$ # \mathbf{F_{app,x}} = \mathbf{F_{app}}cos(\theta) # $$ # + [markdown] slideshow={"slide_type": "slide"} # __ALGEBRAIC REPRESENTATION__ # # >The dot product of two $n$-length-vectors: # > <br> $ \mathbf{A} = [A_1, A_2, ... A_n]$ # > <br> $ \mathbf{B} = [B_1, B_2, ... B_n]$ # > <br> is: # # \begin{align} # \mathbf{A} \cdot \mathbf{B} = \sum_{i=1}^n A_i B_i. # \end{align} # # # - # >So the dot product of two 3D vectors: # > <br> $ \mathbf{A} = [A_x, A_y, A_z]$ # > <br> $ \mathbf{B} = [B_x, B_y, B_z]$ # > <br> is: # # \begin{align} # \mathbf{A} \cdot \mathbf{B} &= \sum_{i=1}^n A_i B_i \\ # &= A_x B_x + A_y B_y + A_z B_z. # \end{align} # # __Example:__ # <br> The dot product $\mathbf{A} \cdot \mathbf{B}$: # > <br> $ \mathbf{A} = [1, 3, −5]$ # > <br> $ \mathbf{B} = [4, −2, −1]$ # # # # \begin{align} # {\displaystyle {\begin{aligned}\ [1,3,-5]\cdot [4,-2,-1]&=(1)(4)+(3)(-2)+(-5)(-1)\\&=4-6+5\\&=3\end{aligned}}} # \end{align} # + [markdown] slideshow={"slide_type": "slide"} # # # We can solve this very easily using a Python `for` loop. # # # + slideshow={"slide_type": "slide"} A = [1.0, 3.0, -5.0] B = [4.0, -2.0, -1.0] # Create a variable called dot_product with value, 0. dot_product = 0.0 for i in range(len(A)): dot_product += A[i]*B[i] print(dot_product) # - sum(a * b for a, b in zip(A, B)) # + [markdown] slideshow={"slide_type": "slide"} # From is __GEOMETRIC__ representation, we can see that the dot product allows us to quickly solve many engineering-related problems... # + [markdown] slideshow={"slide_type": "slide"} # \begin{align} # \mathbf{A} \cdot \mathbf{B} = |\mathbf{A}| |\mathbf{B}| cos(\theta) # \end{align} # # Examples: # - Test if two vectors are: # - perpendicular ($\mathbf{A} \cdot \mathbf{B}==0$) # - acute ($\mathbf{A} \cdot \mathbf{B}>0$) # - obtuse ($\mathbf{A} \cdot \mathbf{B}<0$) # - Find the angle between two vectors (from its cosine). # - Find the magnitude of one vector in the direction of another. # - Find physical quantities e.g. the work, W, when pushing an object a certain distance, d, with force, F: # # <img src="img/work_equation.jpg" alt="Drawing" style="width: 300px;"/> # # + [markdown] slideshow={"slide_type": "slide"} # __Try it yourself:__ # # $\mathbf{C} = [2, 4, 3.5]$ # # $\mathbf{D} = [1, 2, -6]$ # # In the cell below find the dot product: # $\mathbf{C} \cdot \mathbf{D}$ # # Is the angle between the vectors obtuse or acute or are the vectors perpendicular? <br> # (Perpendicular if $\mathbf{A} \cdot \mathbf{B}==0$, acute if $\mathbf{A} \cdot \mathbf{B}>0$, or obtuse if $\mathbf{A} \cdot \mathbf{B}<0$). # # - # The dot product of C and D C=[2,4,3.5] D=[1,2,-6] sum(c * d for c, d in zip(C, D)) -11 / sum(i**2 for i in C)**0.5 / sum(i**2 for i in D)**0.5 np.rad2deg(np.arccos(_)) import numpy as np np.rad2deg(np.arccos(-11 / np.linalg.norm(C) / np.linalg.norm(D))) # + [markdown] slideshow={"slide_type": "slide"} # <a id='NestedList'></a> # ### Nested Data Structures: Lists of Lists # # A *nested list* is a list within a list. (Recall *nested loops* from Seminar 1: Control Flow). # # To access a __single element__ we need as many indices as there are levels of nested list. # # This is more easily explained with an example: # + slideshow={"slide_type": "slide"} lab_group0 = ["Sara", "Mika", "Ryo", "Am"] lab_group1 = ["Hemma", "Miri", "Qui", "Sajid"] lab_group2 = ["Adam", "Yukari", "Farad", "Fumitoshi"] lab_groups = [lab_group0, lab_group1, lab_group2] # - # `lab_group0`, `lab_group1` and `lab_group2` are nested within `lab_groups`. # # # + [markdown] slideshow={"slide_type": "slide"} # There are __two__ levels of nested lists. # # We need __two__ indices to select a single elememt from `lab_group0`, `lab_group1` or `lab_group2`. # # The first index: a list (`lab_group0`, `lab_group1` or `lab_group2`). # # The second index: an element in that list. # + slideshow={"slide_type": "slide"} group = lab_groups[0] print(group) name = lab_groups[1][2] print(name) # + [markdown] slideshow={"slide_type": "slide"} # ## Tuples # # Tuples are similar to lists. # # However, after creatig a tuple: # - you cannot add or remove elements from it without creating a new tuple. # - you cannot change the value of a single tuple element e.g. by indexing. # # # # + [markdown] slideshow={"slide_type": "slide"} # Tuples are therefore used for values that should not change after being created. # <br> e.g. a vector of length three with fixed entries # <br>It is 'safer' in this case since it cannot be modified accidentally in a program. # # To create a tuple, use () parentheses. # # + [markdown] slideshow={"slide_type": "slide"} # __Example__ # In Kyoto University, each professor is assigned an office. # # Philamore-sensei is given room 32: # + room = ("Philamore", 32) print("Room allocation:", room) print("Length of entry:", len(room)) print(type(room)) # + [markdown] slideshow={"slide_type": "slide"} # <a id='IteratingTuples'></a> # ### Iterating over Tuples # # We can *iterate* over tuples in the same way as with lists, # - # Iterate over tuple values for d in room: print(d) # + [markdown] slideshow={"slide_type": "slide"} # ### Indexing # # We can index into a tuple: # - # Index into tuple values print(room[1]) print(room[0]) # + [markdown] slideshow={"slide_type": "slide"} # __Note__ Take care when creating a tuple of length 1: # - # Creating a list of length 1 a = [1] print(a) print(type(a)) print(len(a)) # + [markdown] slideshow={"slide_type": "slide"} # However, if we use the same process for a tuple: # - a = (1) print(a) print(type(a)) #print(len(a)) # + [markdown] slideshow={"slide_type": "slide"} # To create a tuple of length 1, we use a comma: # - a = (1,) print(a) print(type(a)) print(len(a)) room = ("Endo",) print("Room allocation:", room) print("Length of entry:", len(room)) print(type(room)) # + [markdown] slideshow={"slide_type": "slide"} # ### Nested Data Structures: Lists of Tuples # As part of a rooms database, we can create a list of tuples: # + room_allocation = [("Endo",), ("Philamore", 32), ("Matsuno", 31), ("Sawaragi", 28), ("Okino", 28), ("Kumegawa", 19)] print(room_allocation) # + [markdown] slideshow={"slide_type": "slide"} # Index into the list room allocation # # Refer to <a href='#NestedList'>Link to the destination'</a> for how to index into *nested* data structures. # # In the cell below use indexing to print: # - Matsuno-sensei's room number # - Kumegawa-sensei's room number # - The variable type of Kumegawa-sensei's room number # + slideshow={"slide_type": "-"} # Matsuno-sensei's room number print(room_allocation[2][1]) # Kumegawa-sensei's room number print(room_allocation[-1][1]) # The Python variable type of Kumegawa-sensei's room number print(type(room_allocation[-1][1])) # + [markdown] slideshow={"slide_type": "slide"} # ### Sorting Tuples # To make it easier to look up the office number each professor, we can __sort__ the list of tuples into an office directory. # + [markdown] slideshow={"slide_type": "slide"} # The ordering rule is determined by the __first element__ of each tuple. # # If the first element of each tuple is a numeric type (`int`, `float`...) the tulpes are sorted by ascending numerical order of the first element: # # If the first element of each tuple is a `string` (as in this case), the tuples are sorted by alphabetical order of the first element. # + [markdown] slideshow={"slide_type": "slide"} # A tuple is sorted using the same method to sort a list. # # Refer to <a href='#SortLists'>Sorting Lists</a> remind yourself of this method. # # In the cell provided below, sort the list, `room_allocation` by alphabetical order. # - # room_allocation sorted by alphabetical order print(sorted(room_allocation)) # + [markdown] slideshow={"slide_type": "slide"} # The office directory can be improved by excluding professors who do not have an office at Yoshida campus: # - for entry in room_allocation: # only professors with an office have an entry length > 1 if len(entry) > 1: print("Name: ", entry[0], ", Room: ", entry[1], sep="") # + [markdown] slideshow={"slide_type": "slide"} # In summary, use tuples over lists when the length will not change. # - from collections import namedtuple # + Room = namedtuple("Room", ["name", "room"]) room_allocations = [Room("Philamore", 32), Room("Matsuno", 31), Room("Sawaragi", 28), Room("Okino", 28), Room("Kumegawa", 19), ] # - for entry in room_allocations: # only professors with an office have an entry length > 1 if len(entry) > 1: print("Name: ", entry.name, ", Room: ", entry.room, sep="") print(f"Name: {entry.name}, Room: {entry.room}") # + [markdown] slideshow={"slide_type": "slide"} # <a id='Dictionaries'></a> # ## Dictionaries # # We used a list of tuples in the previous section to store room allocations. # # What if we wanted to use a program to find which room a particular professor has been allocated? # # we would need to either: # - iterate through the list and check each name. # # > For a very large list, this might not be very efficient. # # - use the index to select a specific entry of a list or tuple. # # > This works if we know the index to the entry of interest. For a very large list, this is unlikely. # + [markdown] slideshow={"slide_type": "slide"} # A human looking would identify individuals in an office directory by name (or "keyword") rather than a continuous set of integers. # # Using a Python __dictionary__ we can build a 'map' from names (*keys*) to room numbers (*values*). # # A Python dictionary (`dict`) is declared using curly braces: # + room_allocation = {"Endo": None, "Philamore": 32, "Matsuno": 31, "Sawaragi": 28, "Okino": 28, "Kumegawa": 19} print(room_allocation) print(type(room_allocation)) # + [markdown] slideshow={"slide_type": "slide"} # Each entry is separated by a comma. # # For each entry we have: # - a 'key' (followed by a colon) # - a 'value'. # # __Note:__ For empty values (e.g. `Endo` in the example above) we use '`None`' for the value. # # `None` is a Python keyword for 'nothing' or 'empty'. # # Now if we want to know which office belongs to Philamore-sensei, we can query the dictionary by key: # - philamore_office = room_allocation["Philamore"] print(philamore_office) # + [markdown] slideshow={"slide_type": "slide"} # ### Iterating over Dictionaries # # We can __*iterate*__ over the keys in a dictionary as we iterated over the elements of a list or tuple: # # __Try it yourself:__ # <br> # Refer back to: # - <a href='#IteratingLists'>Iterating Over Lists</a> # - <a href='#IteratingTuples'>Iterating Over Tuples</a> # to remind yourself how to *iterate* over a data structure. # # <br> # Using __exactly the same method__, iterate over the entries in the dictionary `room allocation` using a `for` loop. # <br> # Each time the code loops, print the next dictionary entry. # - # iterate over the dictionary, room_allocation. # print each entry for entry in room_allocation: print(entry) # + [markdown] slideshow={"slide_type": "slide"} # Notice that this only prints the keys. # # We can access `keys` and `values` seperately by: # - creating two variable names before `in` # - putting `items()` after the dictionary name # - for name, room_number in room_allocation.items(): print(name, room_number) # + [markdown] slideshow={"slide_type": "slide"} # __Try it yourself__<br> # Copy and paste the code from the previous cell. # <br> # Edit it so that it prints the room numbers only. # # Remember you can __"comment out"__ the existing code (instead of deleting it) so that you can refer to it later. # e.g. # ```python # #print(name, room_number) # ``` # # - # iterate over the dictionary, room_allocation. # print each name for name, room_number in room_allocation.items(): print(room_number) # + [markdown] slideshow={"slide_type": "slide"} # Note that the order of the printed entries in the dictionary is different from the input order. # # A dictionary stores data differently from a list or tuple. # # # # # # # # # # + [markdown] slideshow={"slide_type": "slide"} # ### Look-up Keys # # Lists and tuples store entries as continuous pieces of memory, which is why we can access entries by index. # # Indexing cannot be used to access the entries of a dictionary. For example: # ```python # print(room_allocation[0]) # ``` # raises an error. # + [markdown] slideshow={"slide_type": "slide"} # Dictionaries use a different type of storage which allows us to perform look-ups using a 'key'. # + [markdown] slideshow={"slide_type": "slide"} # print(room_allocation["Philamore"]) # + [markdown] slideshow={"slide_type": "slide"} # <a id='DictionaryAdd'></a> # ### Adding Entries to a Dictionary # # We use this same code to add new entries to an existing dictionary: # + room_allocation = {"Endo": None, "Philamore": 32, "Matsuno": 31, "Sawaragi": 28, "Okino": 28, "Kumegawa": 19} print(room_allocation) room_allocation["Fujiwara"] = 34 print() print(room_allocation) # + [markdown] slideshow={"slide_type": "slide"} # <a id='DictionaryRemove'></a> # ### Removing Entries from a Dictionary # # To remove an item from a disctionary we use the command `del`. # + print(room_allocation) del room_allocation["Fujiwara"] print("") print(room_allocation) # + [markdown] slideshow={"slide_type": "slide"} # __Try it yourself__ # <br> # Okino-sensei is leaving Kyoto University. # # Her office will be re-allocated to a new member of staff, Ito-sensei. # # In the cell below, update the dictionary by deleting the entry for Okino-sensei and creating a new entry for Ito-sensei. # # Print the new list. # - # Remove Okino-sensei (room 28) from the dictionary. # Add a new entry for Ito-sensei (room 28) room_allocation = {"Endo": None, "Philamore": 32, "Matsuno": 31, "Sawaragi": 28, "Okino": 28, "Kumegawa": 19} del room_allocation["Okino"] room_allocation["Ito"] = 28 print(room_allocation) # + [markdown] slideshow={"slide_type": "slide"} # So far we have used a string variable types for the dictionary keys. # # However, we can use almost any variable type as a key and we can mix types. # # # + [markdown] slideshow={"slide_type": "slide"} # <a id='DictionaryRestructure'></a> # ### Re-structuring to make a new Dictionary # __Example__: We could 'invert' the room allocation dictionary to create a room-to-name map. # # Let's build a new dictionary (`room_map`) by looping through the old dictionary (`room_allocation`) using a `for` loop: # + slideshow={"slide_type": "slide"} # Create empty dictionary room_map = {} # Build dictionary to map 'room number' -> name for name, room_number in room_allocation.items(): # Insert entry into new dictionary room_map[room_number] = name print(room_map) # - room_map = {room_number: name for name, room_number in room_allocation.items()} print(room_map) # + [markdown] slideshow={"slide_type": "slide"} # We can now consult the room-to-name map to find out if a particular room is occupied and by whom. # # Let's assume some rooms are unoccupied and therefore do not exist in this dictionary. # # # # + [markdown] slideshow={"slide_type": "slide"} # If we try to use a key that does not exist in the dictionary, e.g. # # occupant17 = room_map[17] # # Python will give an error (raise an exception). # # If we're not sure that a __key__ is present (that a room is occupied or unocupied in this case), we can check using the funstion in '`in`' # <br>(we used this function to check wether an entry exists in a __list__) # # + slideshow={"slide_type": "slide"} print(19 in room_map) print(17 in room_map) # - # So we know that: # - room 17 is unoccupied # - room 19 is occupied # # + [markdown] slideshow={"slide_type": "slide"} # When using `in`, take care to check for the __key__ (not the value) # - print('Kumegawa' in room_map) 'Kumegawa' in room_map.values() # + [markdown] slideshow={"slide_type": "slide"} # #### Potential application: avoid generating errors if unoccupied room numbers are entered. # # For example, in a program that checks the occupants of rooms by entering the room number: # + rooms_to_check = [17, 19] # Look Before You Leap for room in rooms_to_check: if room in room_map: print(f"Room {room} is occupied by {room_map[room]}-sensei") else: print("Room", room, "is unoccupied.") # - # In Python, the more standard way as to try and do what you want, and then do something if you get an error. However, Looking Before You Leap might still be useful when working with external libraries that don't fail cleanly, for example, or to avoid a time-consuming computation at the beginning of a loop or a function. # Easier to Ask for Forgiveness than Permission for room in rooms_to_check: try: print(f"Room {room} is occupied by {room_map[room]}-sensei") except KeyError: print(f"Room {room} is unoccupied.") # ## Choosing a data structure # # An important task when developing a computer program is selecting the *appropriate* data structure for a task. # # Here are some examples of the suitablity of the data types we have studied for some common computing tasks. # # + [markdown] slideshow={"slide_type": "slide"} # - __Dynamically changing individual elements of a data structure.__ # <br> # e.g. updating the occupant of a room number or adding a name to a list of group members.<br> # __Lists and dictionaries__ allow us to do this.<br> # __Tuples__ do not. # - # - __Storing items in a perticular sequence (so that they can be addressed by index or in a particular order)__. # <br> # e.g. representing the x, y, z coordinates of a 3D position vector, storing data collected from an experiment as a time series. # <br> # __Lists and tuples__ allow us to do this. # <br> # __Dictionaries__ do not. # - __Performing an operation on every item in a sequence.__ # <br> # e.g. checking every item in a data set against a particular condition (e.g. prime number, multiple of 5....etc), performing an algebraic operation on every item in a data set. # <br> # __Lists and tuples__ make this simple as we can call each entry in turn using its index. # <br> # __Dictionaries__ this is less efficient as it requires more code. # - __Selecting a single item from a data structure without knowing its position in a sequence.__ # e.g. looking up the profile of a person using their name, avoiding looping through a large data set in order to identify a single entry. # <br> # __Dictionaries__ allow us to select a single entry by an associated (unique) key variable. # <br> # __Lists and tuples__ make this difficult as to pick out a single value we must either i) know it's position in an ordered sequence, ii)loop through every item until we find it. # # - __Protecting individual items of a data sequence from being added, removed or changed within the program.__ # <br> # e.g. representing a vector of fixed length with fixed values, representing the coordintes of a fixed point. # <br> # __Tuples__ allow us to do this. # <br> # __Lists and dictionaries__ do not. # - __Speed__ # For many numerical computations, efficiency is essential. More flexible data structures are generally less efficient computationally. They require more computer memory. We will study the difference in speed there can be between different data structures in a later seminar. # ## Review Exercises # Here are a series of engineering problems for you to practise each of the new Python skills that you have learnt today. # ### Review Exercise: Data structures. # # __(A)__ In the cell below, what type of data structure is C? # # __(B)__ Write a line of code that checks whether 3 exists within the data structure. # # __(C)__ Write a line of code that checks whether 3.0 exists within the data structure. # # __(D)__ Write a line of code that checks whether "3" exists within the data structure. # C = (2, 3, 5, 6, 1, "hello") print(3 in C) print(3.0 in C) print("3" in C) # ### Review Exercise: Using Lists with `for` Loops. # # In the cell below: # # - Create a list with the names of the months. # <br> # - Create a second list with the number of days in each month (for a regular year). # <br> # - Create a `for` loop that prints: # # `The number of days in MONTH is XX days` # # where, `MONTH` is the name of the month and `XX` is the correct number of days in that month. # # Hint: Refer to <a href='#DotProductLists'>Indexing Example: The dot product of two vectors</a> for how to use two vectors in a loop. # # + # A for loop to print the number of days in each month month_names = ["January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"] number_days = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31] for name, number in zip(month_names, number_days): print(f'The number of days in {name} is {number} days') # - # ### Review Exercise: Indexing. # # __(A)__ In the cell below write a program that adds two vectors, $\mathbf{A}$ and $\mathbf{B}$, expressed as lists # <br> # # $\mathbf{A} = [-2, 1, 3]$ # # $\mathbf{B} = [6, 2, 2]$ # # $ \mathbf{C} = [C_1, # C_2, ... # C_n] = \mathbf{A} + \mathbf{B} = [(A_1 + B_1), # (A_2 + B_2), ... # (A_n + B_n)]$ # # __Hints:__ # - Refer to <a href='#DotProductLists'>Indexing Example: The dot product of two vectors</a> for how to use two vectors in a loop. # - Start by creating an empty list, `C = []`. # <br>Add an element to the list each time the code loops using the method `C.append()` # <br><a href='#Append'>Jump to Adding an Item to a List</a> # # <br> # __(B)__ To add two vectors, the number of elements in each vectors must be equal. # <br>Use the function `len()` to print the length of $\mathbf{A}$ and the length of $\mathbf{B}$ before adding the two vectors. # <br><a href='#Length'>Jump to Finding the Length of a List</a> # # <br> # __(C)__ Use `if` and `else` statements (Seminar 2) to: # - add the two vectors __only__ if the length of $\mathbf{A}$ and the length of $\mathbf{B}$ are equal. # - otherwise print a message (e.g. "`unequal vector length!`") # # Hint: Use a logical operator (`==`, `<`, `>`....) to compare the lengths of $\mathbf{A}$ and $\mathbf{B}$. <br>Refer to __Logical Operators__ (Seminar 2). # # <br> # __(D)__ Check your code works by using it to try and add two vectors with: # <br>i) the same number of elements in each vector # <br>ii) a different number of elements in each vector # Vector addition program with length check. A = [-2, 1, 3] B = [6, 2, 2] print(len(A), len(B)) if len(A) == len(B): C = [a + b for a, b in zip(A, B)] print(C) else: print("Unequal vector lengths!") # ### Review Exercise: `if` and `else` statements. # # Copy and paste the program you wrote earlier to <a href='#DotProductLists'>find the dot product of two vectors</a> into the cell below. # # Within the loop use `if`, `elif` and else `else` to make the program print: # - "`The angle between vectors is acute`" if the dot product is positive. # - "`The angle between vectors is obtuse`" if the dot product is negative. # - "`The vectors are perpendicular`" if the dot product is 0. # + # Determinig angle types using the dot product. A = [-2, 1, 3] B = [6, 2, 2] dp = sum(a * b for a, b in zip(A, B)) print(dp) if dp > 0: print("The angle between vectors is acute") elif dp < 0: print("The angle between vectors is obtuse") else: print("The vectors are perpendicular") # - # ### Review Exercise: Dictionaries. # # <img src="img/periodic_table.gif" alt="Drawing" style="width: 500px;"/> # # __(A)__ Choose 5 elements from the periodic table. # <br> # In the cell below create a dictionary: # <a href='#Dictionaries'>Jump to Dictionaries</a> # - __keys:__ chemical symbol names # - __values:__ atomic numbers # # e.g. # ```python # dictionary = {"C":6, "N":7, "O":8....} # # ``` # # # __(B)__ Remove one entry from the dictionary and print the updated version. # <br><a href='#DictionaryRemove'>Jump to Removing Entries from a Dictionary</a> # # __(C)__ Add a new entry (chemical symbol and atomic number) to the dictionary and print the updated version. # <br><a href='#DictionaryAdd'>Jump to Adding Entries to a Dictionary</a> # # __(D)__ Use a `for` loop to create a new dictionary: # - __keys:__ atomic numbers # - __values:__ chemical symbols # using your original dictionary. # <br> Hint: Refer to the earlier example of <a href='#DictionaryRestructure'>re-structuring to make a new dictioary.</a> # # __*Optional Extension*__ # # __(E)__ Print a __list__ of the chemical symbols in your dictionary, sorted into alphabetical order. # Hints: # - Create an empty list # - Use a for loop to add each chemical symbol to the list # - Sort the list in alphabetical order # + # Dictionary of periodic table items. five_elements = {"Kr": 36, "Ca": 20, "C": 6, "Au": 79, "Ti": 22} print(five_elements) del five_elements["Kr"] print(five_elements) five_elements["Mg"] = 12 print(five_elements) five_zs = {number: element for element, number in five_elements.items()} print(five_zs) print(sorted(five_elements)) # - # ### Review Exercise: `while` loops (bisection) # # Bisection is an iterative method for approximating a root of a function $y = F(x)$ # <br>i.e. a value of $x$ for which the function $F(x)$ is equal to zero. # <br>Therefore the roots are found where the line of the function F(x) __crosses__ the x axis (the red dot indicates the root of the function): # # # <img src="img/bisection_method.png" alt="Drawing" style="width: 300px;"/> # # # If we know such a __crossing point__ lies within the interval x = a and x = b we can repeatedly *bisect* this interval to narrow down the interval in which x = root must lie. # # Each iteration, x$_{mid} = \frac{a + b}{2}$ is determined and used to determine whether the crossing point is between x$_{mid}$ and a or x$_{mid}$ and b. # <br>This is used to define a new, narrower interval in which we know the crossing point lies. # # x_mid = (a + b) / 2 # # # # If F(x) changes sign between F(x_mid) and F(a), # # the root must lie between F(x_mid) and F(a) # # if F(x_mid) * F(a) < 0: # b = x_mid # x_mid = (a + b)/2 # # # # If F(x) changes sign between F(x_mid) and F(b), # # the root must lie between F(x_mid) and F(b) # # else: # a = x_mid # x_mid = (a + b)/2 # # <img src="../../../ILAS_seminars/intro to python/img/bisection_method_simple.png" alt="Drawing" style="width: 300px;"/> # # In the example shown, the midpoint (x$_{mid}$) of a$_1$ and b$_1$ is b$_2$ # <br>F(a$_1$) $\times$ F(b$_2$) = negative # <br>F(b$_1$) $\times$ F(b$_2$) = positive # # So the new increment is between a$_1$ and b$_2$. # # <br> # # By repeating this process, the value of F(x$_{mid}$) should become closer to zero with each iteration. # # The process is repeated until the *absolute* value |F(x$_{mid}$)| is sufficiently small (below a predetermined value (*tolerance*)). # # We then determine x$_{mid}$ is the root of the function. # # It is a very simple and robust method. # # **Task:** # # $$ # F(x) = 4x^3 - 3x^2 - 25x - 6 # $$ # # <img src="img/graph_polynomial.png" alt="Drawing" style="width: 300px;"/> # # The function has one root between x = 0 and x = -0.6. # # __(A)__ Use the bisection method to estimate the value of the root between x = 0 and x = -0.6. # <br>Instructions: # - Use a while loop to repeat the code above __while__ absF(x$_{mid}$) > 1 $\times10^{-6}$. # - Each time the code loops: # - __Compute__ F(a), F(b) and F(x_mid) [Hint: Use approprate variable names that don't contain () parentheses) # - __Print__ F(x$_{mid}$) to check absF(x$_{mid}$) $< 1 \times10^{-6}$. <br>Use the function `abs()` to compute the absolute value of a number, <br>https://docs.python.org/3/library/functions.html#abs <br> e.g. `y = abs(x)` assigns the absolute value of `x` to `y`. # - __Bisect__ the increment using the code shown above # - __After__ the loop print the final value of x$_{mid}$ using `print("root = ", x_mid) `. <br>This value is the estimate of the root. # # <a href='#WhileLoops'>Jump to While Loops'</a> # # __(B)__ The bisection method is only effective where F(a) and F(b) are of opposite sign. # <br> i.e. where F(a) $\times$ F(b) $ < 0$ # <br>Add an if statement to your code so that the while loop is only run *if* the inputs a and b are of opposite sign. # + # Bisection while loop x_a = -0.6 x_b = 0 x_mid = (x_a + x_b)/2 F_x_a = 4*x_a**3 - 3*x_a**2 - 25*x_a - 6 F_x_b = 4*x_b**3 - 3*x_b**2 - 25*x_b - 6 F_x_mid = 4*x_mid**3 - 3*x_mid**2 - 25*x_mid - 6 if F_x_a * F_x_b < 0: while abs(F_x_mid) > 1e-6: F_x_a = 4*x_a**3 - 3*x_a**2 - 25*x_a - 6 F_x_b = 4*x_b**3 - 3*x_b**2 - 25*x_b - 6 F_x_mid = 4*x_mid**3 - 3*x_mid**2 - 25*x_mid - 6 print(F_x_mid) if F_x_a*F_x_mid > 0: # if the product is positive then they have the same sign x_a = x_mid elif F_x_b*F_x_mid > 0: x_b = x_mid # no need for else if the product is equal to zero because zero is less than 1e-6 so the loop ends. x_mid = (x_a + x_b)/2 print('root =', x_mid) # + def F(x): return 4*x**3 - 3*x**2 - 25*x - 6 def same_sign(a, b): return a * b > 0 def mid(a, b): return (a + b) / 2 def solve(function, x_a, x_b, a_tol=1e-6): if function(x_a) == 0: return x_a elif function(x_b) == 0: return x_b elif not same_sign(function(x_a), function(x_b)): x_mid = mid(x_a, x_b) y_mid = function(x_mid) while abs(y_mid) > a_tol: y_a = function(x_a) y_b = function(x_b) y_mid = function(x_mid) if same_sign(y_a, y_mid): x_a = x_mid elif same_sign(y_b, y_mid): x_b = x_mid x_mid = mid(x_a, x_b) return x_mid solve(F, -0.6, 0) # - # __(C)__ In the previous example you stopped the while loop when the value of the function was sufficiently small (abs(F(x$_{mid}$)) $< 1 \times10^{-6}$) that we can consider the corresponding value of x to be a root of the function. # # This time we are going to edit your code so that the loop is stopped when it reaches a __maximum number of iterations__. <br>Copy and paste your code from the cell above in the cel below. # <br>Replace your __while loop__ with a __for loop__ that runs the code in the loop 25 times then stops. # # __(D)__ Within the for loop, add a `break` statement. # <br>The `break` statement should exit the for loop __if__ abs(F$_mid$) $< 1 \times10^{-6}$. # <br>i.e. __if__ abs(F$_mid$) $< 1 \times10^{-6}$ the loop will stop before the maximum number of iterations is reached. # <br>Before the command `break`, print the value of x$_{mid}$ using `print("root = ", x_mid) `. <br>This value is the estimate of the root. # # <a href='#Break'>Jump to break'</a> # + # Copy and paste your code from the cell above, here # Bisection while loop x_a = -0.6 x_b = 0 x_mid = (x_a + x_b)/2 F_x_a = 4*x_a**3 - 3*x_a**2 - 25*x_a - 6 F_x_b = 4*x_b**3 - 3*x_b**2 - 25*x_b - 6 F_x_mid = 4*x_mid**3 - 3*x_mid**2 - 25*x_mid - 6 if F_x_a * F_x_b < 0: for _ in range(25): # we used an _ instead of "i" because we do not use "i" in the loop. F_x_a = 4*x_a**3 - 3*x_a**2 - 25*x_a - 6 F_x_b = 4*x_b**3 - 3*x_b**2 - 25*x_b - 6 F_x_mid = 4*x_mid**3 - 3*x_mid**2 - 25*x_mid - 6 print(F_x_mid) if abs(F_x_mid) < 1e-6: break if F_x_a*F_x_mid > 0: # if the product is positive then they have the same sign x_a = x_mid elif F_x_b*F_x_mid > 0: x_b = x_mid # no need for else if the product is equal to zero because zero is less than 1e-6 so the loop ends. x_mid = (x_a + x_b)/2 print('root =', x_mid) # - # # Updating your git repository # # You have made several changes to your interactive textbook. # # The final thing we are going to do is add these changes to your online repository so that: # - I can check your progress # - You can access the changes from outside of the university server. # # > Save your work. # > <br> `git add -A` # > <br>`git commit -m "A short message describing changes"` # > <br>`git push origin master` # # <br>Refer to supplementary material: __S1_Introduction_to_Version_Control.ipynb__. # ## Summary # - A data structure is used to assign a collection of values to a single collection name. # - A Python list can store multiple items of data in sequentially numbered elements (numbering starts at zero) # - Data stored in a list element can be referenced using the list name can be referenced using the list name followed by an index number in [] square brackets. # - The `len()` function returns the length of a specified list. # - A Python tuple whose values can not be individually changed, removed or added to (except by adding another tuple). # - Data stored in a tuple element can be referenced using the tuple name followed by an index number in [] square brackets. # - A Python dictionary is a list of key: value pairs of data in which each key must be unique. # - Data stored in a dictionary element can be referenced using the dictionary name followed by its key in [] square brackets. # # Homework # # 1. __PULL__ the changes you made in-class today to your personal computer. # 1. __COMPLETE__ any unfinished Review Exercises. # 1. __PUSH__ the changes you make at home to your online repository. # # <br>Refer to supplementary material: __S1_Introduction_to_Version_Control.ipynb__. # # In particular, please complete: __Review Exercise: `while` loops (bisection)__. # <br>You will need to refer to your answer in next week's Seminar.
3_Data_structures.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # Import leafdetection functions and other packages import matplotlib matplotlib.use('module://ipympl.backend_nbagg') import os import skimage.io from leafdetection import create_experiment_name, autodetect_leafs, reorder_leafs, overwrite_with_reordered_leafs from matplotlib import pyplot as plt from IPython.core.display import display, HTML display(HTML("<style>.container { width:90% !important; }</style>")) # + # Set some variables # Adjust the threshold for leaf detection threshold = 19 # Adjust the minimum area a detected leaf has to have to be # accepted as one. You have to consider: # 1. Leaf size # 2. Image size (4000 x 6000 pixel) # 3. Zoom factor of camera min_area = 5000 # horizontal_dilate needs to be larger than half the difference # of the x values of the central positions of two neighbouring # leafs (in one row), but as small as possible. horizontal_dilate = 400 # vertical_dilate needs to be larger than half the difference # of the y values of the central positions of two neighbouring # leafs (in one row), but as small as possible. vertical_dilate = 200 # Select the directory, where the leaf images to be processed are located leaf_images_dir = './images' # Select the directories the images, regions and overview should be saved into results_dir = './results' # Autodetect leafs of all 'JPG' images in the folder `leaf_images_dir` for dirpath, dirnames, image_filenames in os.walk(leaf_images_dir): for image_filename in image_filenames: if image_filename.endswith('.JPG') or image_filename.endswith('.jpg'): fullname = os.path.abspath(os.path.join(dirpath, image_filename)) experiment_name = create_experiment_name(image_filename) autodetect_leafs(fullname, results_dir, experiment_name, threshold=threshold, min_area=min_area, vertical_dilate=vertical_dilate, horizontal_dilate=horizontal_dilate, verbose=True) # + # Reorder autodetected leafs image_filename = './images/IMG_3576.JPG' results_dir = 'results' experiment_name = create_experiment_name(image_filename) fig, ax, leaf_regions, regions_removed = reorder_leafs(image_filename, results_dir, experiment_name) # - # Overwrite the the old results with reordered leafs, overwrite_with_reordered_leafs(fig, image_filename, results_dir, experiment_name, leaf_regions) # close the figure and release the memory fig.clear() plt.close(fig)
leafdetection/leafdetection.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # ## Crypto Arbitrage # # In this Challenge, you'll take on the role of an analyst at a high-tech investment firm. The vice president (VP) of your department is considering arbitrage opportunities in Bitcoin and other cryptocurrencies. As Bitcoin trades on markets across the globe, can you capitalize on simultaneous price dislocations in those markets by using the powers of Pandas? # # For this assignment, you’ll sort through historical trade data for Bitcoin on two exchanges: Bitstamp and Coinbase. Your task is to apply the three phases of financial analysis to determine if any arbitrage opportunities exist for Bitcoin. # # This aspect of the Challenge will consist of 3 phases. # # 1. Collect the data. # # 2. Prepare the data. # # 3. Analyze the data. # # # ### Import the required libraries and dependencies. import pandas as pd from pathlib import Path # %matplotlib inline # ## Collect the Data # # To collect the data that you’ll need, complete the following steps: # # Instructions. # # 1. Using the Pandas `read_csv` function and the `Path` module, import the data from `bitstamp.csv` file, and create a DataFrame called `bitstamp`. Set the DatetimeIndex as the Timestamp column, and be sure to parse and format the dates. # # 2. Use the `head` (and/or the `tail`) function to confirm that Pandas properly imported the data. # # 3. Repeat Steps 1 and 2 for `coinbase.csv` file. # ### Step 1: Using the Pandas `read_csv` function and the `Path` module, import the data from `bitstamp.csv` file, and create a DataFrame called `bitstamp`. Set the DatetimeIndex as the Timestamp column, and be sure to parse and format the dates. # Read in the CSV file called "bitstamp.csv" using the Path module. # The CSV file is located in the Resources folder. # Set the index to the column "Date" # Set the parse_dates and infer_datetime_format parameters bitstamp = pd.read_csv(Path('./Resources/bitstamp.csv'), index_col="Timestamp", parse_dates=True, infer_datetime_format=True) # ### Step 2: Use the `head` (and/or the `tail`) function to confirm that Pandas properly imported the data. # Use the head (and/or tail) function to confirm that the data was imported properly. bitstamp.head() bitstamp.tail() # ### Step 3: Repeat Steps 1 and 2 for `coinbase.csv` file. # Read in the CSV file called "coinbase.csv" using the Path module. # The CSV file is located in the Resources folder. # Set the index to the column "Timestamp" # Set the parse_dates and infer_datetime_format parameters coinbase = pd.read_csv(Path('./Resources/coinbase.csv'), index_col="Timestamp", parse_dates=True, infer_datetime_format=True) # Use the head (and/or tail) function to confirm that the data was imported properly. coinbase.head() coinbase.tail() # ## Prepare the Data # # To prepare and clean your data for analysis, complete the following steps: # # 1. For the bitstamp DataFrame, replace or drop all `NaN`, or missing, values in the DataFrame. # # 2. Use the `str.replace` function to remove the dollar signs ($) from the values in the Close column. # # 3. Convert the data type of the Close column to a `float`. # # 4. Review the data for duplicated values, and drop them if necessary. # # 5. Repeat Steps 1–4 for the coinbase DataFrame. # ### Step 1: For the bitstamp DataFrame, replace or drop all `NaN`, or missing, values in the DataFrame. # For the bitstamp DataFrame, replace or drop all NaNs or missing values in the DataFrame bitstamp.isnull().sum() bitstamp = bitstamp.dropna() bitstamp.isnull().sum() #Confirmed no missing data # ### Step 2: Use the `str.replace` function to remove the dollar signs ($) from the values in the Close column. bitstamp.loc[:,"Close"] # Use the str.replace function to remove the dollar sign, $ bitstamp.loc[:,"Close"] = bitstamp.loc[:, "Close"].str.replace("$", "") bitstamp.loc[:,"Close"] #Confirmed no dollar sign in Close column # ### Step 3: Convert the data type of the Close column to a `float`. # Check the Close data type bitstamp.loc[:,"Close"].dtypes # Convert the Close data type to a float bitstamp.loc[:,"Close"] = bitstamp.loc[:, "Close"].astype("float") bitstamp.loc[:,"Close"].dtypes #Confirmed type changed from object to float # ### Step 4: Review the data for duplicated values, and drop them if necessary. # Review the data for duplicate values, and drop them if necessary bitstamp.duplicated().sum() # no duplicate data # + # bitstamp = bitstamp.drop_duplicates() # - # ### Step 5: Repeat Steps 1–4 for the coinbase DataFrame. # + # Repeat Steps 1–4 for the coinbase DataFrame # - # For the coinbase DataFrame, replace or drop all NaNs or missing values in the DataFrame coinbase.isnull().sum() coinbase = coinbase.dropna() coinbase.isnull().sum() #Confirmed no missing data # Use the str.replace function to remove the dollar sign, $ coinbase.loc[:,"Close"] coinbase.loc[:,"Close"] = coinbase.loc[:, "Close"].str.replace("$", "") coinbase.loc[:,"Close"] #Confirmed no dollar sign in Close column # Check the Close data type coinbase.loc[:,"Close"].dtypes # Convert the Close data type to a float coinbase.loc[:,"Close"] = coinbase.loc[:, "Close"].astype("float") coinbase.loc[:,"Close"].dtypes #Confirmed type changed from object to float # Review the data for duplicate values, and drop them if necessary coinbase.duplicated().sum() # no duplicate data # + # coinbase = bitstamp.drop_duplicates() # - # ## Analyze the Data # # Your analysis consists of the following tasks: # # 1. Choose the columns of data on which to focus your analysis. # # 2. Get the summary statistics and plot the data. # # 3. Focus your analysis on specific dates. # # 4. Calculate the arbitrage profits. # ### Step 1: Choose columns of data on which to focus your analysis. # # Select the data you want to analyze. Use `loc` or `iloc` to select the following columns of data for both the bitstamp and coinbase DataFrames: # # * Timestamp (index) # # * Close # # + # Use loc or iloc to select `Timestamp (the index)` and `Close` from bitstamp DataFrame bitstamp_sliced = bitstamp.loc[:,["Close"]] # Review the first five rows of the DataFrame bitstamp_sliced.head() # + # Use loc or iloc to select `Timestamp (the index)` and `Close` from coinbase DataFrame coinbase_sliced = coinbase.loc[:,["Close"]] # Review the first five rows of the DataFrame coinbase_sliced.head() # - # ### Step 2: Get summary statistics and plot the data. # # Sort through the time series data associated with the bitstamp and coinbase DataFrames to identify potential arbitrage opportunities. To do so, complete the following steps: # # 1. Generate the summary statistics for each DataFrame by using the `describe` function. # # 2. For each DataFrame, create a line plot for the full period of time in the dataset. Be sure to tailor the figure size, title, and color to each visualization. # # 3. In one plot, overlay the visualizations that you created in Step 2 for bitstamp and coinbase. Be sure to adjust the legend and title for this new visualization. # # 4. Using the `loc` and `plot` functions, plot the price action of the assets on each exchange for different dates and times. Your goal is to evaluate how the spread between the two exchanges changed across the time period that the datasets define. Did the degree of spread change as time progressed? # Generate the summary statistics for the bitstamp DataFrame bitstamp_sliced.describe() # Generate the summary statistics for the coinbase DataFrame coinbase_sliced.describe() # Create a line plot for the bitstamp DataFrame for the full length of time in the dataset # Be sure that the figure size, title, and color are tailored to each visualization coinbase_sliced.plot(title= "Coinbase Prices", color="orange") # Create a line plot for the coinbase DataFrame for the full length of time in the dataset # Be sure that the figure size, title, and color are tailored to each visualization bitstamp_sliced.plot(title= "Bitstamp Prices", color="blue") # Overlay the visualizations for the bitstamp and coinbase DataFrames in one plot # The plot should visualize the prices over the full lenth of the dataset # Be sure to include the parameters: legend, figure size, title, and color and label bitstamp_sliced["Close"].plot(legend=True, figsize=(15, 10), title="Bitstamp v. Coinbase - Full Length of Dataset", color="blue", label="Bitstamp") coinbase_sliced["Close"].plot(legend=True, figsize=(15, 10), color="orange", label="Coinbase") # Using the loc and plot functions, create an overlay plot that visualizes # the price action of both DataFrames for a one month period early in the dataset # Be sure to include the parameters: legend, figure size, title, and color and label bitstamp_sliced["Close"].loc['2018-1-1' : '2018-1-31'].plot(legend=True, figsize=(15, 10), title="Bitstamp v. Coinbase - January 2018", color="blue", label="Bitstamp") coinbase_sliced["Close"].loc['2018-1-1' : '2018-1-31'].plot(legend=True, figsize=(15, 10), color="orange", label="Coinbase") # Using the loc and plot functions, create an overlay plot that visualizes # the price action of both DataFrames for a one month period later in the dataset # Be sure to include the parameters: legend, figure size, title, and color and label bitstamp_sliced["Close"].loc['2018-3-1' : '2018-3-31'].plot(legend=True, figsize=(15, 10), title="Bitstamp v. Coinbase - March 2018", color="blue", label="Bitstamp") coinbase_sliced["Close"].loc['2018-3-1' : '2018-3-31'].plot(legend=True, figsize=(15, 10), color="orange", label="Coinbase") # **Question** Based on the visualizations of the different time periods, has the degree of spread change as time progressed? # # **Answer** According to the visualizations, the degree of spread has reduced over the time frame of the entire dataset, and this is also true for the month of March 2018, however during January,2018, the degree of spread has increased towards the end of the month. # ### Step 3: Focus Your Analysis on Specific Dates # # Focus your analysis on specific dates by completing the following steps: # # 1. Select three dates to evaluate for arbitrage profitability. Choose one date that’s early in the dataset, one from the middle of the dataset, and one from the later part of the time period. # # 2. For each of the three dates, generate the summary statistics and then create a box plot. This big-picture view is meant to help you gain a better understanding of the data before you perform your arbitrage calculations. As you compare the data, what conclusions can you draw? # Create an overlay plot that visualizes the two dataframes over a period of one day early in the dataset. # Be sure that the plots include the parameters `legend`, `figsize`, `title`, `color` and `label` bitstamp_sliced["Close"].loc['2018-1-16'].plot(legend=True, figsize=(15, 10), title="Bitstamp v. Coinbase - Jan 16, 2018", color="blue", label="Bitstamp") coinbase_sliced["Close"].loc['2018-1-16'].plot(legend=True, figsize=(15, 10), color="orange", label="Coinbase") # + tags=[] # Using the early date that you have selected, calculate the arbitrage spread # by subtracting the bitstamp lower closing prices from the coinbase higher closing prices arbitrage_spread_early = coinbase_sliced['Close'].loc['2018-1-16'] - bitstamp_sliced['Close'].loc['2018-1-16'] # Generate summary statistics for the early DataFrame arbitrage_spread_early.describe() # - # Visualize the arbitrage spread from early in the dataset in a box plot arbitrage_spread_early.plot(kind='box',title="Arbitrage Spread - Early Date") # Create an overlay plot that visualizes the two dataframes over a period of one day from the middle of the dataset. # Be sure that the plots include the parameters `legend`, `figsize`, `title`, `color` and `label` bitstamp_sliced["Close"].loc['2018-2-24'].plot(legend=True, figsize=(15, 10), title="Bitstamp v. Coinbase - Feb 24th, 2018", color="blue", label="Bitstamp") coinbase_sliced["Close"].loc['2018-2-24'].plot(legend=True, figsize=(15, 10), color="orange", label="Coinbase") # + # Using the date in the middle that you have selected, calculate the arbitrage spread # by subtracting the bitstamp lower closing prices from the coinbase higher closing prices arbitrage_spread_middle = coinbase_sliced['Close'].loc['2018-2-24'] - bitstamp_sliced['Close'].loc['2018-2-24'] # Generate summary statistics arbitrage_spread_middle.describe() # - # Visualize the arbitrage spread from the middle of the dataset in a box plot arbitrage_spread_middle.plot(kind='box',title="Arbitrage Spread - Middle Date") # Create an overlay plot that visualizes the two dataframes over a period of one day from late in the dataset. # Be sure that the plots include the parameters `legend`, `figsize`, `title`, `color` and `label` bitstamp_sliced["Close"].loc['2018-3-26'].plot(legend=True, figsize=(15, 10), title="Bitstamp v. Coinbase - Mar 26th, 2018", color="blue", label="Bitstamp") coinbase_sliced["Close"].loc['2018-3-26'].plot(legend=True, figsize=(15, 10), color="orange", label="Coinbase") # + # Using the date from the late that you have selected, calculate the arbitrage spread # by subtracting the bitstamp lower closing prices from the coinbase higher closing prices arbitrage_spread_late = coinbase_sliced['Close'].loc['2018-3-26'] - bitstamp_sliced['Close'].loc['2018-3-26'] # Generate summary statistics for the late DataFrame arbitrage_spread_late.describe() # - # Visualize the arbitrage spread from late in the dataset in a box plot arbitrage_spread_late.plot(kind='box',title="Arbitrage Spread - Late Date") # ### Step 4: Calculate the Arbitrage Profits # # Calculate the potential profits for each date that you selected in the previous section. Your goal is to determine whether arbitrage opportunities still exist in the Bitcoin market. Complete the following steps: # # 1. For each of the three dates, measure the arbitrage spread between the two exchanges by subtracting the lower-priced exchange from the higher-priced one. Then use a conditional statement to generate the summary statistics for each arbitrage_spread DataFrame, where the spread is greater than zero. # # 2. For each of the three dates, calculate the spread returns. To do so, divide the instances that have a positive arbitrage spread (that is, a spread greater than zero) by the price of Bitcoin from the exchange you’re buying on (that is, the lower-priced exchange). Review the resulting DataFrame. # # 3. For each of the three dates, narrow down your trading opportunities even further. To do so, determine the number of times your trades with positive returns exceed the 1% minimum threshold that you need to cover your costs. # # 4. Generate the summary statistics of your spread returns that are greater than 1%. How do the average returns compare among the three dates? # # 5. For each of the three dates, calculate the potential profit, in dollars, per trade. To do so, multiply the spread returns that were greater than 1% by the cost of what was purchased. Make sure to drop any missing values from the resulting DataFrame. # # 6. Generate the summary statistics, and plot the results for each of the three DataFrames. # # 7. Calculate the potential arbitrage profits that you can make on each day. To do so, sum the elements in the profit_per_trade DataFrame. # # 8. Using the `cumsum` function, plot the cumulative sum of each of the three DataFrames. Can you identify any patterns or trends in the profits across the three time periods? # # (NOTE: The starter code displays only one date. You'll want to do this analysis for two additional dates). # #### 1. For each of the three dates, measure the arbitrage spread between the two exchanges by subtracting the lower-priced exchange from the higher-priced one. Then use a conditional statement to generate the summary statistics for each arbitrage_spread DataFrame, where the spread is greater than zero. # # *NOTE*: For illustration, only one of the three dates is shown in the starter code below. # + tags=[] # For the date early in the dataset, measure the arbitrage spread between the two exchanges # by subtracting the lower-priced exchange from the higher-priced one # arbitrage_spread_early already available from previous step (coinbase closing prices > bitstamp closing prices) # Use a conditional statement to generate the summary statistics for each arbitrage_spread DataFrame arbitrage_spread_early = arbitrage_spread_early[arbitrage_spread_early>0] arbitrage_spread_early.describe() # + # For the date middle in the dataset, measure the arbitrage spread between the two exchanges # by subtracting the lower-priced exchange from the higher-priced one # arbitrage_spread_middle already available from previous step (coinbase closing prices > bitstamp closing prices) # Use a conditional statement to generate the summary statistics for each arbitrage_spread DataFrame arbitrage_spread_middle = arbitrage_spread_middle[arbitrage_spread_middle>0] arbitrage_spread_middle.describe() # + # For the date late in the dataset, measure the arbitrage spread between the two exchanges # by subtracting the lower-priced exchange from the higher-priced one # arbitrage_spread_middle already available from previous step (coinbase closing prices > bitstamp closing prices) # Use a conditional statement to generate the summary statistics for each arbitrage_spread DataFrame arbitrage_spread_late = arbitrage_spread_late[arbitrage_spread_late>0] arbitrage_spread_late.describe() # - # ### Inference: The arbitrage spread summary statistics for the above three dates shows that the spread was decreased over time # + [markdown] tags=[] # #### 2. For each of the three dates, calculate the spread returns. To do so, divide the instances that have a positive arbitrage spread (that is, a spread greater than zero) by the price of Bitcoin from the exchange you’re buying on (that is, the lower-priced exchange). Review the resulting DataFrame. # + # For the date early in the dataset, calculate the spread returns by dividing the instances when the arbitrage spread is positive (> 0) # by the price of Bitcoin from the exchange you are buying on (the lower-priced exchange). spread_return_early= arbitrage_spread_early / bitstamp_sliced['Close'].loc['2018-1-16'] # Review the spread return DataFrame spread_return_early= spread_return_early.dropna() spread_return_early.head() # + # For the date middle in the dataset, calculate the spread returns by dividing the instances when the arbitrage spread is positive (> 0) # by the price of Bitcoin from the exchange you are buying on (the lower-priced exchange). spread_return_middle= arbitrage_spread_middle / bitstamp_sliced['Close'].loc['2018-2-24'] # Review the spread return DataFrame spread_return_middle= spread_return_middle.dropna() spread_return_early.head() # + # For the date late in the dataset, calculate the spread returns by dividing the instances when the arbitrage spread is positive (> 0) # by the price of Bitcoin from the exchange you are buying on (the lower-priced exchange). spread_return_late= arbitrage_spread_late / bitstamp_sliced['Close'].loc['2018-3-26'] # Review the spread return DataFrame spread_return_late= spread_return_late.dropna() spread_return_late.head() # - # #### 3. For each of the three dates, narrow down your trading opportunities even further. To do so, determine the number of times your trades with positive returns exceed the 1% minimum threshold that you need to cover your costs. # For the date early in the dataset, determine the number of times your trades with positive returns # exceed the 1% minimum threshold (.01) that you need to cover your costs profitable_trades_early = spread_return_early[spread_return_early > .01] # Review the first five profitable trades profitable_trades_early.head() # For the date middle in the dataset, determine the number of times your trades with positive returns # exceed the 1% minimum threshold (.01) that you need to cover your costs profitable_trades_middle = spread_return_middle[spread_return_middle > .01] # Review the first five profitable trades profitable_trades_middle.head() # For the date late in the dataset, determine the number of times your trades with positive returns # exceed the 1% minimum threshold (.01) that you need to cover your costs profitable_trades_late = spread_return_late[spread_return_late > .01] # Review the first five profitable trades profitable_trades_late.head() # #### 4. Generate the summary statistics of your spread returns that are greater than 1%. How do the average returns compare among the three dates? # + tags=[] # For the date early in the dataset, generate the summary statistics for the profitable trades # or you trades where the spread returns are are greater than 1% profitable_trades_early.describe() # - # For the date middle in the dataset, generate the summary statistics for the profitable trades # or you trades where the spread returns are are greater than 1% profitable_trades_middle.describe() # For the date late in the dataset, generate the summary statistics for the profitable trades # or you trades where the spread returns are are greater than 1% profitable_trades_late.describe() # ### Inference: The average returns declined from early to late dates. # #### 5. For each of the three dates, calculate the potential profit, in dollars, per trade. To do so, multiply the spread returns that were greater than 1% by the cost of what was purchased. Make sure to drop any missing values from the resulting DataFrame. # + # For the date early in the dataset, calculate the potential profit per trade in dollars # Multiply the profitable trades by the cost of the Bitcoin that was purchased profit_early = profitable_trades_early * bitstamp_sliced['Close'].loc['2018-1-16'] # Drop any missing values from the profit DataFrame profit_per_trade_early = profit_early.dropna() # View the early profit DataFrame profit_per_trade_early # + # For the date middle in the dataset, calculate the potential profit per trade in dollars # Multiply the profitable trades by the cost of the Bitcoin that was purchased profit_middle = profitable_trades_middle * bitstamp_sliced['Close'].loc['2018-2-24'] # Drop any missing values from the profit DataFrame profit_per_trade_middle = profit_middle.dropna() # View the early profit DataFrame profit_per_trade_middle # + # For the date late in the dataset, calculate the potential profit per trade in dollars # Multiply the profitable trades by the cost of the Bitcoin that was purchased profit_late = profitable_trades_late * bitstamp_sliced['Close'].loc['2018-3-26'] # Drop any missing values from the profit DataFrame profit_per_trade_late = profit_late.dropna() # View the early profit DataFrame profit_per_trade_late # - # #### 6. Generate the summary statistics, and plot the results for each of the three DataFrames. # Generate the summary statistics for the early profit per trade DataFrame profit_per_trade_early.describe() # Generate the summary statistics for the middle profit per trade DataFrame profit_per_trade_middle.describe() # Generate the summary statistics for the middle profit per trade DataFrame profit_per_trade_late.describe() # Plot the results for the early profit per trade DataFrame profit_per_trade_early.plot(title="Profit Per Trade - Early Date") # Plot the results for the middle profit per trade DataFrame profit_per_trade_middle.plot(title="Profit Per Trade - Middle Date") # Plot the results for the late profit per trade DataFrame profit_per_trade_late.plot(title="Profit Per Trade - Late Date") # #### 7. Calculate the potential arbitrage profits that you can make on each day. To do so, sum the elements in the profit_per_trade DataFrame. # Calculate the sum of the potential profits for the early profit per trade DataFrame profit_per_trade_early.sum() # Calculate the sum of the potential profits for the middle profit per trade DataFrame profit_per_trade_middle.sum() # Calculate the sum of the potential profits for the late profit per trade DataFrame profit_per_trade_late.sum() # #### 8. Using the `cumsum` function, plot the cumulative sum of each of the three DataFrames. Can you identify any patterns or trends in the profits across the three time periods? # Use the cumsum function to calculate the cumulative profits over time for the early profit per trade DataFrame cumulative_profit_early = profit_per_trade_early.cumsum() # Plot the cumulative sum of profits for the early profit per trade DataFrame cumulative_profit_early.plot(title="Cumulative Sum - Early Date") # Use the cumsum function to calculate the cumulative profits over time for the middle profit per trade DataFrame cumulative_profit_middle = profit_per_trade_middle.cumsum() # Plot the cumulative sum of profits for the early profit per trade DataFrame cumulative_profit_middle.plot(title="Cumulative Sum - Middle Date") # Use the cumsum function to calculate the cumulative profits over time for the late profit per trade DataFrame cumulative_profit_late = profit_per_trade_late.cumsum() # Plot the cumulative sum of profits for the early profit per trade DataFrame cumulative_profit_late.plot(title="Cumulative Sum - Late Date") # **Question:** After reviewing the profit information across each date from the different time periods, can you identify any patterns or trends? # # **Answer:** From the Early and the Middle Dates, the trend shows that the profitable trades are higher towards the end of the time cycle. There are no profitable trades towards the late dates # # # ## Analysis Report # # ### We assumed a 0.5% fee for each buy and sell which totals to 1%. hence a profitable trade should have a return of over 1% to cover the fee. # ### From the initial visualization, it was evident that Coinbase generally had higher Bitcoin value compared to Bitstamp. # ### The above observation was the basis for the calculation of assumed that the calculation of arbitrage spread, spread return and profit. # ### The average returns were best during the early months of the date range and towards the end of the month or day. #
crypto_arbitrage.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda root] # language: python # name: conda-root-py # --- # + import pandas as pd import seaborn as sns import numpy as np import random as rnd import matplotlib as mp import matplotlib.pyplot as plt from collections import Counter from sklearn import preprocessing from sklearn.linear_model import LogisticRegression from sklearn.svm import SVC, LinearSVC from sklearn.ensemble import RandomForestClassifier from sklearn.neighbors import KNeighborsClassifier from sklearn.naive_bayes import GaussianNB from sklearn.linear_model import Perceptron from sklearn.linear_model import SGDClassifier from sklearn.tree import DecisionTreeClassifier from sklearn import metrics from imblearn.over_sampling import SMOTE # - train_=pd.read_csv('../DM-Lab/train_allcols.csv') validate_=pd.read_csv('../DM-Lab/validate_allcols.csv') train = train_.query('SUB2 != 1') validate = validate_.query('SUB2 != 1') print (train['SUB2'].value_counts()) # + retain_list = ['EMPLOY','GENDER','FREQ1','YEAR','EDUC','PSYPROB','PSOURCE','SERVSETA','DETCRIM', 'REGION','NOPRIOR','DIVISION','DSMCRIT','ROUTE1','SUB1','AGE','IDU','SUB3','ROUTE3', 'FREQ3','FRSTUSE3','FREQ2','FRSTUSE2'] train = train[train['SUB2'].isin([2,3,4,5,7,10])] validate = validate[validate['SUB2'].isin([2,3,4,5,7,10])] X_train = train[retain_list] y_train = train["SUB2"] X_validate = validate[retain_list] y_validate = validate["SUB2"] X_train.shape, X_validate.shape # + #one hot # 1. INSTANTIATE enc = preprocessing.OneHotEncoder() # 2. FIT enc.fit(X_train) # + # 3. Transform X_train_enc = enc.transform(X_train).toarray() X_train_enc.shape # + # 4. Transform test X_val_enc = enc.transform(X_validate).toarray() X_val_enc.shape # + # Random Forest random_forest = RandomForestClassifier(n_estimators=100) random_forest.fit(X_train_enc, y_train) random_forest.score(X_train_enc, y_train) # - yp_rf = random_forest.predict(X_val_enc) print (metrics.accuracy_score(yp_rf, y_validate)) print (metrics.recall_score(y_validate, yp_rf, average='macro')) print (metrics.classification_report(y_validate, yp_rf)) # + from sklearn.grid_search import GridSearchCV from sklearn.datasets import make_classification # Build a classification task using 3 informative features '''X, y = make_classification(n_samples=1000, n_features=10, n_informative=3, n_redundant=0, n_repeated=0, n_classes=2, random_state=0, shuffle=False)''' rfc = RandomForestClassifier(n_jobs=-1, max_features='sqrt', n_estimators=50, oob_score = True) param_grid = { 'n_estimators': [100, 200, 250], 'max_features': ['auto', 'sqrt', 'log2'], 'max_depth': [15, 20, 25], 'min_samples_leaf': [10, 25, 50, 100], 'bootstrap': [True, False], } CV_rfc = GridSearchCV(estimator=rfc, param_grid=param_grid, cv= 5) CV_rfc.fit(X_train_enc, y_train) print (CV_rfc.best_params_) # -
week8/af_new_features_selection.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # Solving a mixed Neumann-Dirichlet Problem # ### Background # With Bempp, it is possible to define operators only on segments of a given domain. This makes it possible to solve mixed Neumann-Dirichlet problems. In this tutorial, we solve the Laplace equation inside the unit cube with unit Dirichlet boundary conditions on two sides and unit Neumann boundary conditions on the other four sides. # # Denote by $\Gamma_D$ the part of the boundary that holds the Dirichlet boundary conditions and by $\Gamma_N$ the boundary part that holds the Neumann boundary conditions. We denote by $t\in\Gamma_D$ the unknown Neumann data and by $u\in\Gamma_N$ the unknown Dirichlet data. The given Dirichlet data on $\Gamma_D$ is denoted by $g_D$ and the given Neumann data on $\Gamma_N$ is denoted by $g_N$. # # From Green's representation theorem it follows that # $$ # \begin{align} # \left[\mathsf{V}t\right] (\mathbf{x}) - \left[\mathsf{K}u\right] (\mathbf{x}) &= \left[\tfrac{1}{2}\mathsf{Id} + \mathsf{K}\right]g_D(\mathbf{x}) - \mathsf{V}g_N(\mathbf{x}),\quad \mathbf{x}\in\Gamma_D\\ # \left[\mathsf{W}u\right] (\mathbf{x}) + \left[\mathsf{K}'t\right] (\mathbf{x}) &=\left[\tfrac{1}{2}\mathsf{Id} - \mathsf{K}'\right]g_N(\mathbf{x}) - \mathsf{W}g_D(\mathbf{x}),\quad \mathbf{x}\in\Gamma_N # \end{align} # $$ # Here (as usual) $\mathsf{V}$, $\mathsf{K}$, $\mathsf{K}'$, $\mathsf{W}$ are the single layer, double layer, adjoint double layer and hypersingular <a href='https://bempp.com/2017/07/11/available_operators/'>boundary operators</a>. # # The difficulty in the implementation is the definition of the discrete function spaces and the treatment of degrees of freedom (dofs) that lie on the interface between $\Gamma_N$ and $\Gamma_D$. In the following, we will go through the implementation and point out how to correctly define all spaces involved. # ### Implementation # We start with the usual imports. In addition we increase the integration order, as in this example we will be working with spaces of quadratic functions. # + import bempp.api import numpy as np bempp.api.global_parameters.quadrature.medium.double_order = 4 bempp.api.global_parameters.quadrature.far.double_order = 4 # - # We now define the domain. We use a standard unit cube. In the corresponding function all sides of the cube are already associated with different domain indices. We associate the indices 1 and 3 with the Dirichlet boundary and the other indices with the neumann boundary. grid = bempp.api.shapes.cube() dirichlet_segments = [1, 3] neumann_segments = [2, 4, 5, 6] # We can now define the spaces. For the Neumann data, we use discontinuous polynomial basis functions of order 1. For the Dirichlet data, we use continuous basis functions of local polynomial order 2. # # We need global spaces for the Dirichlet and Neumann data and suitable spaces on the segments. The space definitions are as follows: # # * The ``neumann_space_dirichlet_segment`` space holds the unknown Neumann data $t$ on $\Gamma_D$. For $\Gamma_D$ we use the parameter ``closed=True``, meaning that all boundary edges and the associated dofs on the boundary edges are part of the space. The parameter ``element_on_segment=True`` implies that we restrict functions to elements that lie on elements associated with $\Gamma_D$. This is important for dofs on boundary edges and excludes associated functions that lie just outside $\Gamma_D$ on the other side of the boundary edge. # # * The ``neumann_space_neumann_segment`` space is defined on $\Gamma_N$. $\Gamma_N$ is open: the boundary edges are not part of the space. We again restrict basis functions to $\Gamma_N$ by the parameter ``element_on_segment=True``. However, we also include all functions which are defined on elements of the space but whose reference points (i.e. the dof positions) are on the excluded boundary. This is achieved by the parameter ``reference_point_on_segment=False``. If it were set to ``True`` (default) it would only include dofs whose reference points lie in the segment and not on the excluded boundary. # # * The ``dirichlet_space_dirichlet_segment`` space is a space of continuous basis functions that holds the Dirichlet data on $\Gamma_D$. The space is closed and by default basis functions are allowed to extend into the elements adjacent to $\Gamma_D$. This extension is necessary because of the definition of the underlying Sobolev space on the segment. To control this behavior for continuous spaces the option ``strictly_on_segment`` exists, which is by default set to ``False``. # # * The ``dirichlet_space_neumann_segment`` is defined similarly to the ``dirichlet_space_dirichlet_segment`` but on the open segment $\Gamma_N$. # # * For the discretisation of the Dirichlet data, we also need the space ``dual_dirichlet_space``. This is the correct dual space for projecting functions into the space of Dirichlet data. # + order_neumann = 1 order_dirichlet = 2 global_neumann_space = bempp.api.function_space(grid, "DP", order_neumann) global_dirichlet_space = bempp.api.function_space(grid, "P", order_dirichlet) neumann_space_dirichlet_segment = bempp.api.function_space( grid, "DP", order_neumann, domains=dirichlet_segments, closed=True, element_on_segment=True) neumann_space_neumann_segment = bempp.api.function_space( grid, "DP", order_neumann, domains=neumann_segments, closed=False, element_on_segment=True, reference_point_on_segment=False) dirichlet_space_dirichlet_segment = bempp.api.function_space( grid, "P", order_dirichlet, domains=dirichlet_segments, closed=True) dirichlet_space_neumann_segment = bempp.api.function_space( grid, "P", order_dirichlet, domains=neumann_segments, closed=False) dual_dirichlet_space = bempp.api.function_space( grid, "P", order_dirichlet, domains=dirichlet_segments, closed=True, strictly_on_segment=True) # - # In the following, we define all operators on the corresponding spaces and the overall blocked operator. # + slp_DD = bempp.api.operators.boundary.laplace.single_layer( neumann_space_dirichlet_segment, dirichlet_space_dirichlet_segment, neumann_space_dirichlet_segment) dlp_DN = bempp.api.operators.boundary.laplace.double_layer( dirichlet_space_neumann_segment, dirichlet_space_dirichlet_segment, neumann_space_dirichlet_segment) adlp_ND = bempp.api.operators.boundary.laplace.adjoint_double_layer( neumann_space_dirichlet_segment, neumann_space_neumann_segment, dirichlet_space_neumann_segment) hyp_NN = bempp.api.operators.boundary.laplace.hypersingular( dirichlet_space_neumann_segment, neumann_space_neumann_segment, dirichlet_space_neumann_segment) slp_DN = bempp.api.operators.boundary.laplace.single_layer( neumann_space_neumann_segment, dirichlet_space_dirichlet_segment, neumann_space_dirichlet_segment) dlp_DD = bempp.api.operators.boundary.laplace.double_layer( dirichlet_space_dirichlet_segment, dirichlet_space_dirichlet_segment, neumann_space_dirichlet_segment) id_DD = bempp.api.operators.boundary.sparse.identity( dirichlet_space_dirichlet_segment, dirichlet_space_dirichlet_segment, neumann_space_dirichlet_segment) adlp_NN = bempp.api.operators.boundary.laplace.adjoint_double_layer( neumann_space_neumann_segment, neumann_space_neumann_segment, dirichlet_space_neumann_segment) id_NN = bempp.api.operators.boundary.sparse.identity( neumann_space_neumann_segment, neumann_space_neumann_segment, dirichlet_space_neumann_segment) hyp_ND = bempp.api.operators.boundary.laplace.hypersingular( dirichlet_space_dirichlet_segment, neumann_space_neumann_segment, dirichlet_space_neumann_segment) blocked = bempp.api.BlockedOperator(2, 2) blocked[0, 0] = slp_DD blocked[0, 1] = -dlp_DN blocked[1, 0] = adlp_ND blocked[1, 1] = hyp_NN # - # Next, we define the functions of the Dirichlet and Neumann data and their discretisations on the corresponding segments. # + def dirichlet_data_fun(x): return 1 def dirichlet_data(x, n, domain_index, res): res[0] = dirichlet_data_fun(x) def neumann_data_fun(x): return 1 def neumann_data(x, n, domain_index, res): res[0] = neumann_data_fun(x) dirichlet_grid_fun = bempp.api.GridFunction( dirichlet_space_dirichlet_segment, fun=dirichlet_data, dual_space=dual_dirichlet_space) neumann_grid_fun = bempp.api.GridFunction( neumann_space_neumann_segment, fun=neumann_data, dual_space=dirichlet_space_neumann_segment) rhs_fun1 = (.5 * id_DD + dlp_DD) * dirichlet_grid_fun \ - slp_DN * neumann_grid_fun rhs_fun2 = - hyp_ND * dirichlet_grid_fun \ + (.5 * id_NN - adlp_NN) * neumann_grid_fun # - # We can now discretise and solve the blocked operator system. We solve without preconditioner. This would cause problems if we were to further increase the degree of the basis functions. # + lhs = blocked.weak_form() rhs = np.hstack([rhs_fun1.projections(neumann_space_dirichlet_segment), rhs_fun2.projections(dirichlet_space_neumann_segment)]) from scipy.sparse.linalg import gmres x, info = gmres(lhs, rhs) # - # Next, we split up the solution vector and define the grid functions associated with the computed Neumann and Dirichlet data. # + nx0 = neumann_space_dirichlet_segment.global_dof_count neumann_solution = bempp.api.GridFunction( neumann_space_dirichlet_segment, coefficients=x[:nx0]) dirichlet_solution = bempp.api.GridFunction( dirichlet_space_neumann_segment, coefficients=x[nx0:]) # - # We want to recombine the computed Dirichlet and Neumann data with the corresponding known data in order to get Dirichlet and Neumann grid functions defined on the whole grid. To achieve this we define identity operators from $\Gamma_N$ and $\Gamma_D$ into the global Dirichlet and Neumann spaces. # + neumann_imbedding_dirichlet_segment = \ bempp.api.operators.boundary.sparse.identity( neumann_space_dirichlet_segment, global_neumann_space, global_neumann_space) neumann_imbedding_neumann_segment = \ bempp.api.operators.boundary.sparse.identity( neumann_space_neumann_segment, global_neumann_space, global_neumann_space) dirichlet_imbedding_dirichlet_segment = \ bempp.api.operators.boundary.sparse.identity( dirichlet_space_dirichlet_segment, global_dirichlet_space, global_dirichlet_space) dirichlet_imbedding_neumann_segment = \ bempp.api.operators.boundary.sparse.identity( dirichlet_space_neumann_segment, global_dirichlet_space, global_dirichlet_space) dirichlet = (dirichlet_imbedding_dirichlet_segment * dirichlet_grid_fun + dirichlet_imbedding_neumann_segment * dirichlet_solution) neumann = (neumann_imbedding_neumann_segment * neumann_grid_fun + neumann_imbedding_dirichlet_segment * neumann_solution) dirichlet.plot() # - # We can plot the solution using the command ``dirichlet.plot()``. The solution looks as follows. <img src="cube_mixed_solution.png">
notebooks/mixed_neumann_dirichlet.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # Hi, Since this is a classification problem, I'm going to apply Logistic Regression on the given dataset. # Importing important Libraries import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns # plotting graphs # Importing datasets # + _cell_guid="79c7e3d0-c299-4dcb-8224-4455121ee9b0" _uuid="d629ff2d2480ee46fbb7e2d37f6b5fab8052498a" data_train = pd.read_csv('../input/instant-gratification/train.csv') data_test = pd.read_csv ('../input/instant-gratification/test.csv') # - data_train.head(5) data_test.head(5) data_train.isnull().sum() # + active="" # Finding Correlation between variables # - corr=data_train.corr() sns.heatmap(corr) # The above heatmap shows no relationship between variables. # Splitting training data into X and y and removing ID column X_train = data_train.iloc[:,1:256] y_train = data_train.iloc[:,-1] # Initializing Test data set X_test. We will not consider ID column X_test = data_test.iloc[:,1:256] # # Logistic Regression from sklearn.linear_model import LogisticRegression log_reg=LogisticRegression() log_reg.fit(X_train,y_train) y_pred = log_reg.predict(X_train) y_test=log_reg.predict(X_test) # Checking the scores of logistic regression with both training and test data log_reg.score(X_train,y_train) log_reg.score(X_test,y_test) from sklearn.metrics import confusion_matrix cn = confusion_matrix(y_train,y_pred) cn from sklearn.metrics import precision_score,f1_score,recall_score precision_score(y_train,y_pred) recall_score(y_train,y_pred) f1_score(y_train,y_pred) # There is huge difference between the scores of training data and test data,because there is no # relation between independent and dependent variables.
instant-gratification.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + id="ffsnYMQ0mikk" import json import pandas as pd import numpy as np from sklearn.cluster import KMeans from sklearn.decomposition import PCA from sklearn.manifold import TSNE from matplotlib import pyplot as plt from glob import glob import matplotlib.colors as mcolors from sklearn.linear_model import LogisticRegression from sklearn.svm import SVC from sklearn.metrics import recall_score, f1_score, precision_score from sklearn.model_selection import train_test_split from sklearn.tree import DecisionTreeClassifier from sklearn.tree import plot_tree np.set_printoptions(formatter={'float': lambda x: " | {0:0.2f}".format(x)}) # + colab={"base_uri": "https://localhost:8080/", "height": 102} id="w9QKnMzxqR6T" outputId="178ccdb5-f2f8-4dbe-c848-dbb1abd50093" """ msg_1 - message sent by agent 1 msg_2 - " " 2 probs_1 - the underlying probability vector for every bit in message 1 probs_2 - the underlying probability vector for every bit in message 2 msg_1_ent - entropy of message 1 (calculated on probs_1) msg_2_ent - entropy of message 2 (calculated on probs_2) non_blank_partition - 1 = partition 1 is non blank 2 = partition 2 is non blank 0 = both partitions are non blank p - part of the image that agent1 sees (for agent2 it is 1-p) caption - correct caption shape - one of ['circle', 'cross', 'ellipse', 'pentagon', 'rectangle', 'semicircle', 'square', 'triangle'] color - one of ['blue', 'cyan', 'gray', 'green', 'magenta', 'red', 'yellow'] texts - all 10 descriptions texts_shapes - shape for all 10 descs texts_color - colors " " correct - was the trial succeful (both players guessing right) """ # + id="J6mXWxeUmAkT" shapes = ['circle', 'cross', 'ellipse', 'pentagon', 'rectangle', 'semicircle', 'square', 'triangle'] colors = ['blue', 'cyan', 'gray', 'green', 'magenta', 'red', 'yellow'] df = pd.DataFrame() for p in glob("../languages/pretrained_10_1*"): with open(p) as f: task_data = json.load(f) df = df.append(pd.DataFrame(task_data)) # + colab={"base_uri": "https://localhost:8080/"} id="nNR7z698ps9z" outputId="3c858a4d-a488-4abb-afc3-cbf16aa8db36" all_msg = np.array(df.msg_1.append(df.msg_2).tolist()).squeeze() all_msg.shape # + colab={"base_uri": "https://localhost:8080/", "height": 863} id="vvKQr9h2tiQW" outputId="4240ab2a-ada5-4f73-ea89-14ea0ee12cf6" k = 8 km = KMeans(k) res = km.fit_predict(all_msg) tsne = TSNE(2) tsne_msg = tsne.fit_transform(all_msg) plt.figure(figsize=(15,15)) for i, c in zip(range(k), mcolors.cnames): d = tsne_msg[res==i] plt.scatter(d[:, 0], d[:,1], label=i, color=c) plt.annotate(str(i), d[0]) # + id="CbaycJ2E4iA4" df1 = df[(df.non_blank_partition.isin([0,1])) & (df.correct)][['msg_1', 'probs_1', 'p', 'shape', 'color', 'correct']].rename(columns={'msg_1':'msg', 'probs_1':'prob'}) df2 = df[(df.non_blank_partition.isin([0,2])) & (df.correct)][['msg_2', 'probs_2', 'p', 'shape', 'color', 'correct']].rename(columns={'msg_2':'msg', 'probs_2':'prob'}) df2.p = 1-df2.p msg_df = pd.concat([df1, df2]) df1 = df[df.non_blank_partition.isin([2])][['msg_1', 'probs_1', 'p', 'shape', 'color', 'correct']].rename(columns={'msg_1':'msg', 'probs_1':'prob'}) df2 = df[df.non_blank_partition.isin([1])][['msg_2', 'probs_2', 'p', 'shape', 'color', 'correct']].rename(columns={'msg_2':'msg', 'probs_2':'prob'}) df2.p = 1-df2.p blank_msg_df = pd.concat([df1, df2]) # + id="KFvY4hz56g_D" all_msg = np.array(msg_df.msg.tolist()).squeeze() all_prob = np.array(msg_df.prob.tolist()).squeeze() tsne_msg = TSNE(2).fit_transform(all_msg) # + colab={"base_uri": "https://localhost:8080/", "height": 490} id="O8NThM4u-cZH" outputId="cb945f55-f45e-4ee4-b2ff-a3427b1a2d86" shapes = ['circle', 'cross', 'ellipse', 'pentagon', 'rectangle', 'semicircle', 'square', 'triangle'] shapes_ = ['o', 'p', "1", "p", "*", "d", "s", "^"] colors = ['blue', 'cyan', 'gray', 'green', 'magenta', 'red', 'yellow'] import matplotlib.lines as mlines legend = [] for c, s, s_name in zip(['blue', 'cyan', 'gray', 'green', 'magenta', 'red', 'yellow', 'blue'], shapes_, shapes): legend.append(mlines.Line2D([], [], color=c, marker=s, linestyle='None', markersize=10, label=f'{c} {s_name}', markeredgecolor='k')) plt.figure(figsize=(8,8)) for s, s_ in zip(shapes, shapes_): for c in colors: d = tsne_msg[(msg_df["shape"]==s) & (msg_df["color"]==c)] plt.scatter(d[:, 0], d[:,1], label=f"{c} {s}", color=c, marker=s_, edgecolors="k", s=200, alpha=0.5) plt.legend(handles=legend, bbox_to_anchor=(0, 1), fontsize=14) plt.xticks([]) plt.yticks([]) plt.title("8 bit words", fontdict={'fontsize':20}) plt.show() # + colab={"base_uri": "https://localhost:8080/", "height": 682} id="JbHLupHOB3Oy" outputId="f22e8dc2-582c-4995-a237-44fa5dba791f" df_ = msg_df.dropna(axis=0, subset=["shape"]) # df_ = msg_df.fillna('None') y = df_["shape"] X = np.array(df_.msg.tolist()).squeeze() X_t, X_te, y_t, y_te = train_test_split(X[:, :], y, test_size=0.25) tree = DecisionTreeClassifier(max_leaf_nodes=8) tree.fit(X_t, y_t) y_ = tree.predict(X_te) tree_no_limit = DecisionTreeClassifier() tree_no_limit.fit(X_t, y_t) y_no_limit = tree_no_limit.predict(X_te) res = pd.DataFrame(np.vstack([f1_score(y_te, y_, average=None), recall_score(y_te, y_, average=None), precision_score(y_te, y_, average=None), f1_score(y_te, y_no_limit, average=None), recall_score(y_te, y_no_limit, average=None), precision_score(y_te, y_no_limit, average=None)]), index=["f1_comp", "recall_comp", "precision_comp", "f1_not_comp", "recall_not_comp", "precision_not_comp"], columns=tree.classes_) display(res) print(f"\ncomp feature importances", tree.feature_importances_) print(f"\nnot comp feature importances", tree_no_limit.feature_importances_) print("\naverage f1 comp", res.loc['f1_comp'].mean()) print("average f1 not comp", res.loc['f1_not_comp'].mean()) plt.figure(figsize=(15,6)) plt.title("comp tree") plot_tree(tree, fontsize=10, filled=True, class_names=tree.classes_, impurity=False, label='root', precision=2) plt.show() # + id="oJALmDrja4jl" # + colab={"base_uri": "https://localhost:8080/", "height": 666} id="7lXsS2e_PUCG" outputId="baaf7711-ff6a-4c51-d866-8abe963c37d8" df_ = msg_df.dropna(axis=0, subset=["color"]) y = df_["color"] X = np.array(df_.msg.tolist()).squeeze() X_t, X_te, y_t, y_te = train_test_split(X[:, :], y, test_size=0.25) tree = DecisionTreeClassifier(max_leaf_nodes=7) tree.fit(X_t, y_t) y_ = tree.predict(X_te) tree_no_limit = DecisionTreeClassifier() tree_no_limit.fit(X_t, y_t) y_no_limit = tree_no_limit.predict(X_te) res = pd.DataFrame(np.vstack([f1_score(y_te, y_, average=None), recall_score(y_te, y_, average=None), precision_score(y_te, y_, average=None), f1_score(y_te, y_no_limit, average=None), recall_score(y_te, y_no_limit, average=None), precision_score(y_te, y_no_limit, average=None)]), index=["f1_comp", "recall_comp", "precision_comp", "f1_not_comp", "recall_not_comp", "precision_not_comp"], columns=tree.classes_) display(res) print(f"\ncomp feature importances", tree.feature_importances_) print(f"not comp feature importances", tree_no_limit.feature_importances_) print("\naverage f1 comp", res.loc['f1_comp'].mean()) print("average f1 not comp", res.loc['f1_not_comp'].mean()) plt.figure(figsize=(12,6)) plt.title("comp tree") plot_tree(tree, fontsize=10, filled=True, class_names=tree.classes_, impurity=False, label='root', precision=2 ) plt.show() # + [markdown] id="C_GNBhU8k2Xc" # # predict other labels # + id="BjhSC5E7mcNl" from sklearn.metrics import roc_curve, roc_auc_score # + colab={"base_uri": "https://localhost:8080/", "height": 203} id="x8VL7OCCmeka" outputId="a403507c-0f5a-4450-ee82-4362b3adb09b" blank_msg_df["blank"] = True msg_df['blank'] = False df_ = pd.concat([blank_msg_df, msg_df]).sample(frac=1) y = df_["blank"] X = np.array(df_.msg.tolist()).squeeze() X_t, X_te, y_t, y_te = train_test_split(X[:, :], y, test_size=0.25) tree = DecisionTreeClassifier() tree.fit(X_t, y_t) y_ = tree.predict(X_te) y_p = tree.predict_proba(X_te)[:,1] res = pd.DataFrame(np.vstack([f1_score(y_te, y_, average=None), recall_score(y_te, y_, average=None), precision_score(y_te, y_, average=None)]), index=["f1", "recall", "precision"], columns=tree.classes_) display(res) print("auc score", roc_auc_score(y_te, y_p)) print(f"prior:{y.mean():.3f}") print(f"\nfeature importances", tree.feature_importances_) # plt.figure(figsize=(30,10)) # plot_tree(tree, fontsize=10, filled=True, label='root', precision=2) # plt.show() # + id="Pku3SJyDnaKC" df1_ = df[(df.non_blank_partition.isin([0,1]))][['msg_1', 'probs_1', 'p', 'shape', 'color', 'correct']].rename(columns={'msg_1':'msg', 'probs_1':'prob'}) df2_ = df[(df.non_blank_partition.isin([0,2]))][['msg_2', 'probs_2', 'p', 'shape', 'color', 'correct']].rename(columns={'msg_2':'msg', 'probs_2':'prob'}) df2_.p = 1-df2_.p msg_df_ = pd.concat([df1_, df2_]) # + colab={"base_uri": "https://localhost:8080/", "height": 203} id="7TxyHMG7mesr" outputId="3ea9eb97-61d6-42a5-ebb2-c901aca56ba7" blank_msg_df["blank"] = True msg_df_['blank'] = False df_ = pd.concat([blank_msg_df, msg_df_]).sample(frac=1) y = df_["correct"] X = np.array(df_.msg.tolist()).squeeze() X_t, X_te, y_t, y_te = train_test_split(X[:, :], y, test_size=0.25) tree = DecisionTreeClassifier() tree.fit(X_t, y_t) y_ = tree.predict(X_te) y_p = tree.predict_proba(X_te)[:,1] res = pd.DataFrame(np.vstack([f1_score(y_te, y_, average=None), recall_score(y_te, y_, average=None), precision_score(y_te, y_, average=None)]), index=["f1", "recall", "precision"], columns=tree.classes_) display(res) print("auc score", roc_auc_score(y_te, y_p)) print(f"prior:{y.mean():.3f}") print(f"\nfeature importances", tree.feature_importances_)
notebooks/pool_1_msg_8.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <img src="../static/aeropython_name_mini.png" alt="AeroPython" style="width: 300px;"/> # # Clase 3b: Título de la clase # _Aquí una introducción en cursiva y te cuento lo que vamos a hacer_ # ## Sección 1 # Aquí empiezo a explicar #Aquí el código import numpy as np # %matplotlib inline import matplotlib.pyplot as plt # ### Subsección 1.1 # #### Subsubsección 1.1.1 # ##### Ejercicio # --- # _Aquí un resumen de la clase con lo que hemos aprendido y algunos links_ # Si te ha gustado esta clase: # # <a href="https://twitter.com/share" class="twitter-share-button" data-url="https://github.com/AeroPython/Curso_AeroPython" data-text="Aprendiendo Python con" data-via="pybonacci" data-size="large" data-hashtags="AeroPython">Tweet</a> # <script>!function(d,s,id){var js,fjs=d.getElementsByTagName(s)[0],p=/^http:/.test(d.location)?'http':'https';if(!d.getElementById(id)){js=d.createElement(s);js.id=id;js.src=p+'://platform.twitter.com/widgets.js';fjs.parentNode.insertBefore(js,fjs);}}(document, 'script', 'twitter-wjs');</script> # # --- # ###### <img src="../static/linkedin.png" alt="AeroPython" style="width: 25px" align="right";/> Curso impartido por: [<NAME>](http://es.linkedin.com/in/juanluiscanor) & [<NAME>](http://es.linkedin.com/pub/alejandro-saez-mollejo/55/22/473) # ###### En colaboración: <NAME> # #### <h4 align="right">¡Síguenos en Twitter! # ###### <a href="https://twitter.com/Pybonacci" class="twitter-follow-button" data-show-count="false">Follow @Pybonacci</a> <script>!function(d,s,id){var js,fjs=d.getElementsByTagName(s)[0],p=/^http:/.test(d.location)?'http':'https';if(!d.getElementById(id)){js=d.createElement(s);js.id=id;js.src=p+'://platform.twitter.com/widgets.js';fjs.parentNode.insertBefore(js,fjs);}}(document, 'script', 'twitter-wjs');</script> <a href="https://twitter.com/Alex__S12" class="twitter-follow-button" data-show-count="false" align="right";>Follow @Alex__S12</a> <script>!function(d,s,id){var js,fjs=d.getElementsByTagName(s)[0],p=/^http:/.test(d.location)?'http':'https';if(!d.getElementById(id)){js=d.createElement(s);js.id=id;js.src=p+'://platform.twitter.com/widgets.js';fjs.parentNode.insertBefore(js,fjs);}}(document, 'script', 'twitter-wjs');</script> <a href="https://twitter.com/newlawrence" class="twitter-follow-button" data-show-count="false" align="right";>Follow @newlawrence</a> <script>!function(d,s,id){var js,fjs=d.getElementsByTagName(s)[0],p=/^http:/.test(d.location)?'http':'https';if(!d.getElementById(id)){js=d.createElement(s);js.id=id;js.src=p+'://platform.twitter.com/widgets.js';fjs.parentNode.insertBefore(js,fjs);}}(document, 'script', 'twitter-wjs');</script> # ##### <a rel="license" href="http://creativecommons.org/licenses/by/4.0/deed.es"><img alt="Licencia Creative Commons" style="border-width:0" src="http://i.creativecommons.org/l/by/4.0/88x31.png" /></a><br /><span xmlns:dct="http://purl.org/dc/terms/" property="dct:title">Curso AeroPython</span> por <span xmlns:cc="http://creativecommons.org/ns#" property="cc:attributionName"><NAME> y <NAME></span> se distribuye bajo una <a rel="license" href="http://creativecommons.org/licenses/by/4.0/deed.es">Licencia Creative Commons Atribución 4.0 Internacional</a>. # ##### <script src="//platform.linkedin.com/in.js" type="text/javascript"></script> <script type="IN/MemberProfile" data-id="http://es.linkedin.com/in/juanluiscanor" data-format="inline" data-related="false"></script> <script src="//platform.linkedin.com/in.js" type="text/javascript"></script> <script type="IN/MemberProfile" data-id="http://es.linkedin.com/in/alejandrosaezm" data-format="inline" data-related="false"></script> # --- # _Las siguientes celdas contienen configuración del Notebook_ # # _Para visualizar y utlizar los enlaces a Twitter el notebook debe ejecutarse como [seguro](http://ipython.org/ipython-doc/dev/notebook/security.html)_ # # File > Trusted Notebook # + language="html" # <a href="https://twitter.com/Pybonacci" class="twitter-follow-button" data-show-count="false">Follow @Pybonacci</a> # <script>!function(d,s,id){var js,fjs=d.getElementsByTagName(s)[0],p=/^http:/.test(d.location)?'http':'https';if(!d.getElementById(id)){js=d.createElement(s);js.id=id;js.src=p+'://platform.twitter.com/widgets.js';fjs.parentNode.insertBefore(js,fjs);}}(document, 'script', 'twitter-wjs');</script> # - # Esta celda da el estilo al notebook from IPython.core.display import HTML css_file = '../static/styles/style.css' HTML(open(css_file, "r").read())
styles/template.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Interactions and ANOVA # Note: This script is based heavily on <NAME>'s class notes http://www.stanford.edu/class/stats191/interactions.html # # Download and format data: # + # %matplotlib inline from __future__ import print_function from statsmodels.compat import urlopen import numpy as np np.set_printoptions(precision=4, suppress=True) import statsmodels.api as sm import pandas as pd pd.set_option("display.width", 100) import matplotlib.pyplot as plt from statsmodels.formula.api import ols from statsmodels.graphics.api import interaction_plot, abline_plot from statsmodels.stats.anova import anova_lm try: salary_table = pd.read_csv('salary.table') except: # recent pandas can read URL without urlopen url = 'http://stats191.stanford.edu/data/salary.table' fh = urlopen(url) salary_table = pd.read_table(fh) salary_table.to_csv('salary.table') E = salary_table.E M = salary_table.M X = salary_table.X S = salary_table.S # - # Take a look at the data: plt.figure(figsize=(6,6)) symbols = ['D', '^'] colors = ['r', 'g', 'blue'] factor_groups = salary_table.groupby(['E','M']) for values, group in factor_groups: i,j = values plt.scatter(group['X'], group['S'], marker=symbols[j], color=colors[i-1], s=144) plt.xlabel('Experience'); plt.ylabel('Salary'); # Fit a linear model: formula = 'S ~ C(E) + C(M) + X' lm = ols(formula, salary_table).fit() print(lm.summary()) # Have a look at the created design matrix: lm.model.exog[:5] # Or since we initially passed in a DataFrame, we have a DataFrame available in lm.model.data.orig_exog[:5] # We keep a reference to the original untouched data in lm.model.data.frame[:5] # Influence statistics infl = lm.get_influence() print(infl.summary_table()) # or get a dataframe df_infl = infl.summary_frame() df_infl[:5] # Now plot the reiduals within the groups separately: resid = lm.resid plt.figure(figsize=(6,6)); for values, group in factor_groups: i,j = values group_num = i*2 + j - 1 # for plotting purposes x = [group_num] * len(group) plt.scatter(x, resid[group.index], marker=symbols[j], color=colors[i-1], s=144, edgecolors='black') plt.xlabel('Group'); plt.ylabel('Residuals'); # Now we will test some interactions using anova or f_test interX_lm = ols("S ~ C(E) * X + C(M)", salary_table).fit() print(interX_lm.summary()) # Do an ANOVA check # + from statsmodels.stats.api import anova_lm table1 = anova_lm(lm, interX_lm) print(table1) interM_lm = ols("S ~ X + C(E)*C(M)", data=salary_table).fit() print(interM_lm.summary()) table2 = anova_lm(lm, interM_lm) print(table2) # - # The design matrix as a DataFrame interM_lm.model.data.orig_exog[:5] # The design matrix as an ndarray interM_lm.model.exog interM_lm.model.exog_names infl = interM_lm.get_influence() resid = infl.resid_studentized_internal plt.figure(figsize=(6,6)) for values, group in factor_groups: i,j = values idx = group.index plt.scatter(X[idx], resid[idx], marker=symbols[j], color=colors[i-1], s=144, edgecolors='black') plt.xlabel('X'); plt.ylabel('standardized resids'); # Looks like one observation is an outlier. # + drop_idx = abs(resid).argmax() print(drop_idx) # zero-based index idx = salary_table.index.drop(drop_idx) lm32 = ols('S ~ C(E) + X + C(M)', data=salary_table, subset=idx).fit() print(lm32.summary()) print('\n') interX_lm32 = ols('S ~ C(E) * X + C(M)', data=salary_table, subset=idx).fit() print(interX_lm32.summary()) print('\n') table3 = anova_lm(lm32, interX_lm32) print(table3) print('\n') interM_lm32 = ols('S ~ X + C(E) * C(M)', data=salary_table, subset=idx).fit() table4 = anova_lm(lm32, interM_lm32) print(table4) print('\n') # - # Replot the residuals # + try: resid = interM_lm32.get_influence().summary_frame()['standard_resid'] except: resid = interM_lm32.get_influence().summary_frame()['standard_resid'] plt.figure(figsize=(6,6)) for values, group in factor_groups: i,j = values idx = group.index plt.scatter(X[idx], resid[idx], marker=symbols[j], color=colors[i-1], s=144, edgecolors='black') plt.xlabel('X[~[32]]'); plt.ylabel('standardized resids'); # - # Plot the fitted values # + lm_final = ols('S ~ X + C(E)*C(M)', data = salary_table.drop([drop_idx])).fit() mf = lm_final.model.data.orig_exog lstyle = ['-','--'] plt.figure(figsize=(6,6)) for values, group in factor_groups: i,j = values idx = group.index plt.scatter(X[idx], S[idx], marker=symbols[j], color=colors[i-1], s=144, edgecolors='black') # drop NA because there is no idx 32 in the final model plt.plot(mf.X[idx].dropna(), lm_final.fittedvalues[idx].dropna(), ls=lstyle[j], color=colors[i-1]) plt.xlabel('Experience'); plt.ylabel('Salary'); # - # From our first look at the data, the difference between Master's and PhD in the management group is different than in the non-management group. This is an interaction between the two qualitative variables management,M and education,E. We can visualize this by first removing the effect of experience, then plotting the means within each of the 6 groups using interaction.plot. # + U = S - X * interX_lm32.params['X'] plt.figure(figsize=(6,6)) interaction_plot(E, M, U, colors=['red','blue'], markers=['^','D'], markersize=10, ax=plt.gca()) # - # ## Minority Employment Data # + try: jobtest_table = pd.read_table('jobtest.table') except: # don't have data already url = 'http://stats191.stanford.edu/data/jobtest.table' jobtest_table = pd.read_table(url) factor_group = jobtest_table.groupby(['ETHN']) fig, ax = plt.subplots(figsize=(6,6)) colors = ['purple', 'green'] markers = ['o', 'v'] for factor, group in factor_group: ax.scatter(group['TEST'], group['JPERF'], color=colors[factor], marker=markers[factor], s=12**2) ax.set_xlabel('TEST'); ax.set_ylabel('JPERF'); # - min_lm = ols('JPERF ~ TEST', data=jobtest_table).fit() print(min_lm.summary()) # + fig, ax = plt.subplots(figsize=(6,6)); for factor, group in factor_group: ax.scatter(group['TEST'], group['JPERF'], color=colors[factor], marker=markers[factor], s=12**2) ax.set_xlabel('TEST') ax.set_ylabel('JPERF') fig = abline_plot(model_results = min_lm, ax=ax) # + min_lm2 = ols('JPERF ~ TEST + TEST:ETHN', data=jobtest_table).fit() print(min_lm2.summary()) # + fig, ax = plt.subplots(figsize=(6,6)); for factor, group in factor_group: ax.scatter(group['TEST'], group['JPERF'], color=colors[factor], marker=markers[factor], s=12**2) fig = abline_plot(intercept = min_lm2.params['Intercept'], slope = min_lm2.params['TEST'], ax=ax, color='purple'); fig = abline_plot(intercept = min_lm2.params['Intercept'], slope = min_lm2.params['TEST'] + min_lm2.params['TEST:ETHN'], ax=ax, color='green'); # - min_lm3 = ols('JPERF ~ TEST + ETHN', data = jobtest_table).fit() print(min_lm3.summary()) # + fig, ax = plt.subplots(figsize=(6,6)); for factor, group in factor_group: ax.scatter(group['TEST'], group['JPERF'], color=colors[factor], marker=markers[factor], s=12**2) fig = abline_plot(intercept = min_lm3.params['Intercept'], slope = min_lm3.params['TEST'], ax=ax, color='purple'); fig = abline_plot(intercept = min_lm3.params['Intercept'] + min_lm3.params['ETHN'], slope = min_lm3.params['TEST'], ax=ax, color='green'); # - min_lm4 = ols('JPERF ~ TEST * ETHN', data = jobtest_table).fit() print(min_lm4.summary()) # + fig, ax = plt.subplots(figsize=(8,6)); for factor, group in factor_group: ax.scatter(group['TEST'], group['JPERF'], color=colors[factor], marker=markers[factor], s=12**2) fig = abline_plot(intercept = min_lm4.params['Intercept'], slope = min_lm4.params['TEST'], ax=ax, color='purple'); fig = abline_plot(intercept = min_lm4.params['Intercept'] + min_lm4.params['ETHN'], slope = min_lm4.params['TEST'] + min_lm4.params['TEST:ETHN'], ax=ax, color='green'); # - # is there any effect of ETHN on slope or intercept? table5 = anova_lm(min_lm, min_lm4) print(table5) # is there any effect of ETHN on intercept table6 = anova_lm(min_lm, min_lm3) print(table6) # is there any effect of ETHN on slope table7 = anova_lm(min_lm, min_lm2) print(table7) # is it just the slope or both? table8 = anova_lm(min_lm2, min_lm4) print(table8) # ## One-way ANOVA # + try: rehab_table = pd.read_csv('rehab.table') except: url = 'http://stats191.stanford.edu/data/rehab.csv' rehab_table = pd.read_table(url, delimiter=",") rehab_table.to_csv('rehab.table') fig, ax = plt.subplots(figsize=(8,6)) fig = rehab_table.boxplot('Time', 'Fitness', ax=ax, grid=False) # + rehab_lm = ols('Time ~ C(Fitness)', data=rehab_table).fit() table9 = anova_lm(rehab_lm) print(table9) print(rehab_lm.model.data.orig_exog) # - print(rehab_lm.summary()) # ## Two-way ANOVA try: kidney_table = pd.read_table('./kidney.table') except: url = 'http://stats191.stanford.edu/data/kidney.table' kidney_table = pd.read_csv(url, delim_whitespace=True) # Explore the dataset kidney_table.head(10) # Balanced panel kt = kidney_table plt.figure(figsize=(8,6)) fig = interaction_plot(kt['Weight'], kt['Duration'], np.log(kt['Days']+1), colors=['red', 'blue'], markers=['D','^'], ms=10, ax=plt.gca()) # You have things available in the calling namespace available in the formula evaluation namespace # + kidney_lm = ols('np.log(Days+1) ~ C(Duration) * C(Weight)', data=kt).fit() table10 = anova_lm(kidney_lm) print(anova_lm(ols('np.log(Days+1) ~ C(Duration) + C(Weight)', data=kt).fit(), kidney_lm)) print(anova_lm(ols('np.log(Days+1) ~ C(Duration)', data=kt).fit(), ols('np.log(Days+1) ~ C(Duration) + C(Weight, Sum)', data=kt).fit())) print(anova_lm(ols('np.log(Days+1) ~ C(Weight)', data=kt).fit(), ols('np.log(Days+1) ~ C(Duration) + C(Weight, Sum)', data=kt).fit())) # - # ## Sum of squares # # Illustrates the use of different types of sums of squares (I,II,II) # and how the Sum contrast can be used to produce the same output between # the 3. # # Types I and II are equivalent under a balanced design. # # Don't use Type III with non-orthogonal contrast - ie., Treatment # + sum_lm = ols('np.log(Days+1) ~ C(Duration, Sum) * C(Weight, Sum)', data=kt).fit() print(anova_lm(sum_lm)) print(anova_lm(sum_lm, typ=2)) print(anova_lm(sum_lm, typ=3)) # - nosum_lm = ols('np.log(Days+1) ~ C(Duration, Treatment) * C(Weight, Treatment)', data=kt).fit() print(anova_lm(nosum_lm)) print(anova_lm(nosum_lm, typ=2)) print(anova_lm(nosum_lm, typ=3))
examples/notebooks/interactions_anova.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/tortas/DS-Unit-1-Sprint-3-Data-Storytelling/blob/master/LS_DS3_224_Sequence_your_narrative.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] colab_type="text" id="JbDHnhet8CWy" # _Lambda School Data Science_ # # # Sequence your narrative # # Today we will create a sequence of visualizations inspired by [<NAME>'s 200 Countries, 200 Years, 4 Minutes](https://www.youtube.com/watch?v=jbkSRLYSojo). # # Using this [data from Gapminder](https://github.com/open-numbers/ddf--gapminder--systema_globalis/): # - [Income Per Person (GDP Per Capital, Inflation Adjusted) by Geo & Time](https://raw.githubusercontent.com/open-numbers/ddf--gapminder--systema_globalis/master/ddf--datapoints--income_per_person_gdppercapita_ppp_inflation_adjusted--by--geo--time.csv) # - [Life Expectancy (in Years) by Geo & Time](https://raw.githubusercontent.com/open-numbers/ddf--gapminder--systema_globalis/master/ddf--datapoints--life_expectancy_years--by--geo--time.csv) # - [Population Totals, by Geo & Time](https://raw.githubusercontent.com/open-numbers/ddf--gapminder--systema_globalis/master/ddf--datapoints--population_total--by--geo--time.csv) # - [Entities](https://raw.githubusercontent.com/open-numbers/ddf--gapminder--systema_globalis/master/ddf--entities--geo--country.csv) # - [Concepts](https://raw.githubusercontent.com/open-numbers/ddf--gapminder--systema_globalis/master/ddf--concepts.csv) # + [markdown] colab_type="text" id="zyPYtsY6HtIK" # Objectives # - sequence multiple visualizations # - combine qualitative anecdotes with quantitative aggregates # # Links # - [<NAME>’s TED talks](https://www.ted.com/speakers/hans_rosling) # - [Spiralling global temperatures from 1850-2016](https://twitter.com/ed_hawkins/status/729753441459945474) # - "[The Pudding](https://pudding.cool/) explains ideas debated in culture with visual essays." # - [A Data Point Walks Into a Bar](https://lisacharlotterost.github.io/2016/12/27/datapoint-in-bar/): a thoughtful blog post about emotion and empathy in data storytelling # + [markdown] colab_type="text" id="SxTJBgRAW3jD" # ## Make a plan # # #### How to present the data? # # Variables --> Visual Encodings # - Income --> x # - Lifespan --> y # - Region --> color # - Population --> size # - Year --> animation frame (alternative: small multiple) # - Country --> annotation # # Qualitative --> Verbal # - Editorial / contextual explanation --> audio narration (alternative: text) # # # #### How to structure the data? # # | Year | Country | Region | Income | Lifespan | Population | # |------|---------|----------|--------|----------|------------| # | 1818 | USA | Americas | ### | ## | # | # | 1918 | USA | Americas | #### | ### | ## | # | 2018 | USA | Americas | ##### | ### | ### | # | 1818 | China | Asia | # | # | # | # | 1918 | China | Asia | ## | ## | ### | # | 2018 | China | Asia | ### | ### | ##### | # # + [markdown] colab_type="text" id="3ebEjShbWsIy" # ## Upgrade Seaborn # # Make sure you have at least version 0.9.0. # # In Colab, go to **Restart runtime** after you run the `pip` command. # + colab_type="code" id="4RSxbu7rWr1p" outputId="5147ff86-d759-4804-ffd3-b4d50c3b2bbb" colab={"base_uri": "https://localhost:8080/", "height": 463} # !pip install --upgrade seaborn # + colab_type="code" id="5sQ0-7JUWyN4" outputId="7e3515cd-5abf-45d2-d4e4-10004db9c10a" colab={"base_uri": "https://localhost:8080/", "height": 35} import seaborn as sns sns.__version__ # + [markdown] colab_type="text" id="S2dXWRTFTsgd" # ## More imports # + colab_type="code" id="y-TgL_mA8OkF" colab={} # %matplotlib inline import matplotlib.pyplot as plt import numpy as np import pandas as pd # + [markdown] colab_type="text" id="CZGG5prcTxrQ" # ## Load & look at data # + colab_type="code" id="-uE25LHD8CW0" colab={} income = pd.read_csv('https://raw.githubusercontent.com/open-numbers/ddf--gapminder--systema_globalis/master/ddf--datapoints--income_per_person_gdppercapita_ppp_inflation_adjusted--by--geo--time.csv') # + colab_type="code" id="gg_pJslMY2bq" colab={} lifespan = pd.read_csv('https://raw.githubusercontent.com/open-numbers/ddf--gapminder--systema_globalis/master/ddf--datapoints--life_expectancy_years--by--geo--time.csv') # + colab_type="code" id="F6knDUevY-xR" colab={} population = pd.read_csv('https://raw.githubusercontent.com/open-numbers/ddf--gapminder--systema_globalis/master/ddf--datapoints--population_total--by--geo--time.csv') # + colab_type="code" id="hX6abI-iZGLl" colab={} entities = pd.read_csv('https://raw.githubusercontent.com/open-numbers/ddf--gapminder--systema_globalis/master/ddf--entities--geo--country.csv') # + colab_type="code" id="AI-zcaDkZHXm" colab={} concepts = pd.read_csv('https://raw.githubusercontent.com/open-numbers/ddf--gapminder--systema_globalis/master/ddf--concepts.csv') # + colab_type="code" id="EgFw-g0nZLJy" outputId="9e1266c2-5f99-4f94-d0a3-6798b3921b7d" colab={"base_uri": "https://localhost:8080/", "height": 35} income.shape, lifespan.shape, population.shape, entities.shape, concepts.shape # + colab_type="code" id="I-T62v7FZQu5" outputId="9a9ca5ba-8881-4f9f-8c2d-9ca387b7066a" colab={"base_uri": "https://localhost:8080/", "height": 206} income.head() # + colab_type="code" id="2zIdtDESZYG5" outputId="74afae1d-7c6a-4eb3-b0fc-e8e15875717f" colab={"base_uri": "https://localhost:8080/", "height": 206} lifespan.head() # + colab_type="code" id="58AXNVMKZj3T" outputId="0af6a2a5-48ee-4ade-f06a-25624f002c75" colab={"base_uri": "https://localhost:8080/", "height": 206} population.head() # + colab_type="code" id="0ywWDL2MZqlF" outputId="0598f59c-3bfd-4674-cd5e-0db0079de2a4" colab={"base_uri": "https://localhost:8080/", "height": 261} pd.options.display.max_columns = 500 entities.head() # + colab_type="code" id="mk_R0eFZZ0G5" outputId="e4718be2-45e3-4e77-dc45-3c11cfb6e418" colab={"base_uri": "https://localhost:8080/", "height": 521} concepts.head() # + [markdown] colab_type="text" id="6HYUytvLT8Kf" # ## Merge data # + [markdown] colab_type="text" id="dhALZDsh9n9L" # https://github.com/pandas-dev/pandas/blob/master/doc/cheatsheet/Pandas_Cheat_Sheet.pdf # + colab_type="code" id="A-tnI-hK6yDG" colab={} # We want to merge lifespan, income, and population df = pd.merge(income, lifespan) # default will be inner df = pd.merge(df, population) # + id="tvHAs4TKNS_7" colab_type="code" outputId="1adb4be5-93cf-449b-bc80-b782e499e3a4" colab={"base_uri": "https://localhost:8080/", "height": 35} df.shape # + id="Tj8LL1iMNV7a" colab_type="code" outputId="f7b9c0d8-ac51-4ab9-b1c8-2c8a1f66d3eb" colab={"base_uri": "https://localhost:8080/", "height": 226} df.head() # + id="x3BjBZYDOmGo" colab_type="code" outputId="a4a8d318-7629-4871-80aa-e70a1223b968" colab={"base_uri": "https://localhost:8080/", "height": 206} entities[['country', 'name', 'world_6region']].head() # + id="LpDxtLv3PAqH" colab_type="code" outputId="3bf01a91-7bcb-4264-b085-151c2c15d8f7" colab={"base_uri": "https://localhost:8080/", "height": 226} df = pd.merge(df, entities[['country', 'name', 'world_6region']], how='inner', left_on='geo', right_on='country') df.head() # + id="dudJ8jv8PRcp" colab_type="code" outputId="8bc75554-c6c7-490b-a6be-2150206d43d6" colab={"base_uri": "https://localhost:8080/", "height": 35} df.shape # + id="dl0f2s2cPZZ9" colab_type="code" outputId="eb5e4f81-2a03-424c-8341-c76c57864d62" colab={"base_uri": "https://localhost:8080/", "height": 206} df = df.drop(columns=['geo', 'country']) df = df.rename(columns={ 'time': 'year', 'income_per_person_gdppercapita_ppp_inflation_adjusted': 'income', 'life_expectancy_years': 'lifespan', 'population_total': 'population', 'name': 'country', 'world_6region': 'region' }) df.tail() # + [markdown] colab_type="text" id="4OdEr5IFVdF5" # ## Explore data # + colab_type="code" id="4IzXea0T64x4" outputId="bdbdb294-7010-4307-e70e-09ef3335c14d" colab={"base_uri": "https://localhost:8080/", "height": 300} df.describe() # + id="HtnVXJZcTO0y" colab_type="code" outputId="6a9e8411-5635-4db4-aea8-836680f8846c" colab={"base_uri": "https://localhost:8080/", "height": 106} df.skew() # + id="UrRucCfuTZTK" colab_type="code" outputId="57d7bf8d-bc01-4269-80f7-b991aa96033c" colab={"base_uri": "https://localhost:8080/", "height": 175} df.describe(exclude=np.number) # + id="Si_07DuaV5O0" colab_type="code" outputId="afe4ed85-bd92-49f5-9a0f-6140f6f48d09" colab={"base_uri": "https://localhost:8080/", "height": 711} df.country.unique() # + id="3olqDbqkWMvb" colab_type="code" outputId="459ed639-497d-4e77-a587-09cc063d009e" colab={"base_uri": "https://localhost:8080/", "height": 143} # Let's check USA at 1818, 1918, 2018 usa = df[df.country == 'United States'] usa[usa.year.isin([1818, 1918, 2018])] # + id="wZusnTL6W6Sx" colab_type="code" outputId="a951e41e-418b-4917-ae73-23f4954276b0" colab={"base_uri": "https://localhost:8080/", "height": 143} china = df[df.country=='China'] china[china.year.isin([1818, 1918, 2018])] # + id="dZcFkauUXUku" colab_type="code" outputId="cb4c54d0-1cac-48b0-efb2-395fe03bf993" colab={"base_uri": "https://localhost:8080/", "height": 35} 462444535 / 1000000 # + [markdown] colab_type="text" id="hecscpimY6Oz" # ## Plot visualization # + colab_type="code" id="_o8RmX2M67ai" outputId="670199f8-ef12-442d-d398-5fde4add0b82" colab={"base_uri": "https://localhost:8080/", "height": 35} now = df[df.year == 2018] now.shape # + id="RLLA5sNrXgyE" colab_type="code" outputId="f0888055-5a72-49c4-dfb6-589a9ea71008" colab={"base_uri": "https://localhost:8080/", "height": 206} now.head() # + id="mMtLy2O1XlrL" colab_type="code" outputId="c11dcede-9b6e-4a65-ec45-e9feed73e69f" colab={"base_uri": "https://localhost:8080/", "height": 294} now.hist(); # + id="GStfe4P0X7Wu" colab_type="code" outputId="dec0c7a1-56f3-4186-e894-4ab3b1dfffc4" colab={"base_uri": "https://localhost:8080/", "height": 572} pd.scatter_matrix(now); # + id="O1nNCSxNYT1Q" colab_type="code" outputId="b0259a3b-6017-46b5-9dc2-f78dcfdba437" colab={"base_uri": "https://localhost:8080/", "height": 3131} help(sns.relplot) # + id="gll6-qogYdmq" colab_type="code" outputId="5d69aaf8-9de2-4bfa-d151-c23ac4932578" colab={"base_uri": "https://localhost:8080/", "height": 387} sns.relplot(x='income', y='lifespan', hue='region', size='population', data=now) # + id="vpj_Ev6QZrGt" colab_type="code" colab={} # Log base 10 of 100 = 2 # 10^2 = 100 # "A log is (gives you) an exponent" # + [markdown] colab_type="text" id="8OFxenCdhocj" # ## Analyze outliers # + colab_type="code" id="D59bn-7k6-Io" outputId="5303ba03-8d15-49aa-df24-b6a96430cd6f" colab={"base_uri": "https://localhost:8080/", "height": 143} now[now.income > 80000].sort_values(by='income') # + id="bjZvqKfoaW0I" colab_type="code" colab={} qatar = now[now.country == 'Qatar'] qatar_income = qatar.income.values[0] qatar_lifespan = qatar.lifespan.values[0] # + id="cnrri9u7agMw" colab_type="code" outputId="dfb8706d-dab0-4b06-c722-1cd5e57b692b" colab={"base_uri": "https://localhost:8080/", "height": 382} sns.relplot(x='income', y='lifespan', hue='region', size='population', data=now) plt.text(x=qatar_income - 5000, y=qatar_lifespan + 1, s='Qatar') plt.title('Qatar has the highest income in 2018'); # + [markdown] colab_type="text" id="DNTMMBkVhrGk" # ## Plot multiple years # + colab_type="code" id="JkTUmYGF7BQt" outputId="f9ad33be-32b6-47d6-e52f-01d7894fd8f7" colab={"base_uri": "https://localhost:8080/", "height": 393} years = [1818, 1918, 2018] centuries = df[df.year.isin(years)] sns.relplot(x='income', y='lifespan', hue='region', size='population', col='year', data=centuries) plt.xscale('log'); # + [markdown] colab_type="text" id="BB1Ki0v6hxCA" # ## Point out a story # + colab_type="code" id="eSgZhD3v7HIe" outputId="b928175d-19cc-418f-b5de-90170b666a41" colab={"base_uri": "https://localhost:8080/", "height": 2231} years = [1918, 1938, 1958, 1978, 1998, 2018] for year in years: sns.relplot(x='income', y='lifespan', hue='region', size='population', data=df[df.year==year]) plt.xscale('log') plt.xlim((150, 150000)) plt.ylim((20, 90)) plt.title(year) plt.axhline(y=50, color='grey') # + [markdown] id="gkOzO7RpD6We" colab_type="text" # # ASSIGNMENT # Replicate the lesson code # # # STRETCH OPTIONS # # ## 1. Animate! # - [Making animations work in Google Colaboratory](https://medium.com/lambda-school-machine-learning/making-animations-work-in-google-colaboratory-new-home-for-ml-prototyping-c6147186ae75) # - [How to Create Animated Graphs in Python](https://towardsdatascience.com/how-to-create-animated-graphs-in-python-bb619cc2dec1) # - [The Ultimate Day of Chicago Bikeshare](https://chrisluedtke.github.io/divvy-data.html) (Lambda School Data Science student) # # ## 2. Work on anything related to your portfolio site / project
LS_DS3_224_Sequence_your_narrative.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # --- # <div class="contentcontainer med left" style="margin-left: -50px;"> # <dl class="dl-horizontal"> # <dt>Title</dt> <dd> Spikes Element</dd> # <dt>Dependencies</dt> <dd>Matplotlib</dd> # <dt>Backends</dt> <dd><a href='./Spikes.ipynb'>Matplotlib</a></dd> <dd><a href='../bokeh/Spikes.ipynb'>Bokeh</a></dd> # </dl> # </div> import numpy as np import holoviews as hv hv.extension('matplotlib') # Spikes represent any number of horizontal or vertical line segments with fixed or variable heights. There are a number of different uses for this type. First of all, they may be used as a rugplot to give an overview of a one-dimensional distribution. They may also be useful in more domain-specific cases, such as visualizing spike trains for neurophysiology or spectrograms in physics and chemistry applications. # # In the simplest case, a Spikes object represents coordinates in a 1D distribution: # %%opts Spikes (alpha=0.4) [spike_length=0.1] xs = np.random.rand(50) hv.Spikes(xs) # We can overlay ``Spikes`` on top of other ``Chart`` elements (such as ``Points`` or ``Curve``) if we want to draw attention to where samples are along the x-axis: # %%opts Points (color='red') ys = np.random.rand(50) hv.Points((xs, ys)) * hv.Spikes(xs) # When supplying a second dimension to the ``Spikes`` element as a value dimensions, these additional values will be mapped onto the line height. Optionally, you may also supply a colormap ``cmap`` and ``color_index`` to map the value dimensions to a suitable set of colors. This way we can, for example, plot a [mass spectrogram](https://en.wikipedia.org/wiki/Mass_spectrometry): # %%opts Spikes [color_index='Intensity'] (cmap='Reds') hv.Spikes(np.random.rand(20, 2), 'Mass', 'Intensity') # Another possibility is to draw a number of spike trains representing the firing of neurons, of the sort that are commonly encountered in neuroscience. Here we generate 10 separate random spike trains and distribute them evenly across the space by setting their ``position``. By declaring some ``yticks``, each spike train can be labeled individually: # %%opts Spikes [spike_length=0.1] NdOverlay [show_legend=False] hv.NdOverlay({i: hv.Spikes(np.random.randint(0, 100, 10), 'Time').opts(plot=dict(position=0.1*i)) for i in range(10)}).opts(plot=dict(yticks=[((i+1)*0.1-0.05, i) for i in range(10)])) # Finally, we may use ``Spikes`` to visualize marginal distributions as adjoined plots using the ``<<`` adjoin operator: # %%opts Spikes (alpha=0.2) points = hv.Points(np.random.randn(500, 2)) points << hv.Spikes(points['y']) << hv.Spikes(points['x']) # For full documentation and the available style and plot options, use ``hv.help(hv.Spikes).``
examples/reference/elements/matplotlib/Spikes.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Activity 6.02: Extending Plots with Widgets # This activity will combine most of what you have already learned about Bokeh. You will also need the skills you have acquired while working with Pandas for additional dataframe handling. # # We will create an interactive visualization that lets us explore the end results of the olympics 2016 in Rio. Our visualization will display each country that participated in a coordinate system where the x-axis represents the number of won medals and the y-axis the number of athletes. # # Using interactive widgets, we will be able to filter down the displayed countries in both, maximum amount of won medals and maximum amount of athletes. # #### Loading our dataset # importing the necessary dependencies import pandas as pd # + # make bokeh display figures inside the notebook from bokeh.io import output_notebook output_notebook() # - # loading the Dataset with geoplotlib dataset = pd.read_csv('../../Datasets/olympia2016_athletes.csv') # looking at the dataset dataset.head() # Our dataset contains the following columns: # # - `id`: unique id of the athlete # - `name`: name of the athlete # - `nationality`: nationality of the athlete # - `sex`: male or female # - `dob`: date of birth of the athlete # - `height`: height of the athlete # - `weight`: weight of the athlete # - `sport`: categorie the athlete is attending # - `gold`: amount of gold medals the athlete won # - `silver`: amount of silver medals the athlete won # - `bronze`: amount of bronze medals the athlete won # # We want to use the nationality, gold, silver, and bronze columns to create a custom visualization that let us dig through the olympians. # --- # #### Building an interactive visualization # There are many options when it comes to choosing which interactivity to use. # Since the goal of this activity is to give you a better understanding of configuring widgets and adding tooltips, we will focus on having only two widgets. # # In the end, we will have a visualization that allows us to filter countries for the amount of medals and athletes they placed in the olympics and upon hovering the single data points, gives us more information about each country. # # <img src="assets/plot.png" width="500" align="left"/> # # importing the necessary dependencies from bokeh.plotting import figure, show, ColumnDataSource from ipywidgets import interact, widgets # Like in the previous exercises we need to do some data extraction first. # In this activity we will need: # - a list of unique countries from the dataset # - the amount of athletes for each country # - the amount of medals won by each country, split in gold, silver, and bronze # extract countries and group olympians by country # and the number of medals per country countries = dataset['nationality'].unique() athletes_per_country = dataset.groupby('nationality').size() medals_per_country = dataset.groupby('nationality')['gold', 'silver','bronze'].sum() # Before we go in and implement the plotting for this visualization, we want to set up our widgets and the `@interact` method that will later display the plot upon execution. # # Execute this empty `get_plot()` method cell and then move on to the widget creation. We will implement this later. # # The two arguments we get passed are `max_athletes` and `max_medals`. Both of them are int values. # First we want to filter down our countries dataset that contains all the countries that placed athletes in the olympic games. # We need to check whether they have less or equal medals and athletes than our max values passed as arguments. # # Once we have a filtered down dataset, we can create our datasource. This datasource will be used, both for the tooltips and the printing of the circle glyphs. # # > **Note:** # There is extensive documentation on how to use and setup tooltips, try to make use of that: https://bokeh.pydata.org/en/latest/docs/user_guide/tools.html # # Create a new plot using the `figure` method has the following attributes: # - title of 'Rio Olympics 2016 - Medal comparison' # - x_axis_label of 'Number of Medals' # - y_axis_label of 'Num of Athletes' # creating the scatter plot def get_plot(max_athletes, max_medals): filtered_countries=[] for country in countries: if (athletes_per_country[country] <= max_athletes and medals_per_country.loc[country].sum() <= max_medals): filtered_countries.append(country) data_source=get_datasource(filtered_countries) TOOLTIPS=[ ('Country', '@countries'), ('Num of Athletes', '@y'), ('Gold', '@gold'), ('Silver', '@silver'), ('Bronze', '@bronze') ] plot=figure(title='Rio Olympics 2016 - Medal comparison', x_axis_label='Number of Medals', y_axis_label='Num of Athletes', plot_width=800, plot_height=500, tooltips=TOOLTIPS) plot.circle('x', 'y', source=data_source, size=20, color='color', alpha=0.5) return plot # In order to display every country with a different color, we want to randomly create the colors with a six digit hex code. # The method below does exactly this. # + # get a 6 digit random hex color to differentiate the countries better import random def get_random_color(): return '#%06x' % random.randint(0, 0xFFFFFF) # - # We will use a bokeh ColumnDataSource to handle our data and make it easily accessible for our tooltip and glyphs. # Since we want to display additional information in a tooltip we need our datasource to have: # - color field that holds the required amount of random colors # - countries field that holds the list of filtered down countries # - gold field that holds the number of gold medals for each country # - silver field that holds the number of silver medals for each country # - bronze field that holds the number of bronze medals for each country # - x field that holds the summed number of medals for each country # - y field that holds the number of athletes for each country # build the datasource def get_datasource(filtered_countries): return ColumnDataSource(data=dict( color=[get_random_color() for _ in filtered_countries], countries=filtered_countries, gold=[medals_per_country.loc[country]['gold'] for country in filtered_countries], silver=[medals_per_country.loc[country]['silver'] for country in filtered_countries], bronze=[medals_per_country.loc[country]['bronze'] for country in filtered_countries], x=[medals_per_country.loc[country].sum() for country in filtered_countries], y=[athletes_per_country.loc[country].sum() for country in filtered_countries] )) # Before we start to implement the plot with bokeh, we want to set up our widgets. # In this activity we will use two `IntSlider` widgets that will control the max numbers for the amount of athletes or and medals a country is allowed to have in order to be displayed in the visualization. # # We need two values in order to set up the widgets: # - the maximum amount of medals of all the countries # - the maximum amount of athletes of all the countries # getting the max amount of medals and athletes of all countries max_medals = medals_per_country.sum(axis=1).max() max_athletes = athletes_per_country.max() # Using those maximum numbers as the maximum for both widgets will give us reasonable slider values that are dynamically adjusted if we should increase the amount of atheletes or medals in the dataset. # # We need two `IntSlider` objects that handle the input for our `max_athletes` and `max_medals`. # To look like our actual visualization, we want to have the `max_athletes_slider` displayed in a vertical orientation and the `max_medals_slider` in a horizontal orientation. # In the visualization, they should be display as "Max. Athletes" and "Max. Medals". # + # setting up the interaction elements max_athletes_slider=widgets.IntSlider( value=max_athletes, min=0, max=max_athletes, step=1, description='Max. Athletes:', continuous_update=False, orientation='vertical', layout={'width': '100px'} ) max_medals_slider=widgets.IntSlider( value=max_medals, min=0, max=max_medals, step=1, description='Max. Medals:', continuous_update=False, orientation='horizontal' ) # - # After setting up the widgets, we can the method that will be called with each update of the interaction widgets. # As seen in the previous exercise, we will use the `@interact` decorator for this. # # Instead of value ranges or lists, we will provide the variable names of our already created widgets in the decorator. # Since we have already set up the empty method that will return a plot above, we can call `show()` with the method call inside to show the result once it is returned from the `get_plot` method. # # Once you've build the widgets, upon execution, you will see them being displayed below the cell. # We are now ready to to **scroll up and implement the plotting** with Bokeh. # creating the interact method @interact(max_athletes=max_athletes_slider, max_medals=max_medals_slider) def get_olympia_stats(max_athletes, max_medals): show(get_plot(max_athletes, max_medals)) # This is a nice example that shows us how we can easily add widgets that help us discover our data. # Tooltips are a very useful way to also make visualizations more interactive and espacially more understandable by providing additional information for each data point. # # **Note:** # Think about what else you could add/change for this visualization. Maybe we also want to display information about how many male vs. female athletes there are for each country. #
Chapter06/Activity6.02/Activity6.02.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # --- # # _You are currently looking at **version 1.1** of this notebook. To download notebooks and datafiles, as well as get help on Jupyter notebooks in the Coursera platform, visit the [Jupyter Notebook FAQ](https://www.coursera.org/learn/python-social-network-analysis/resources/yPcBs) course resource._ # # --- # # Assignment 1 - Creating and Manipulating Graphs # # Eight employees at a small company were asked to choose 3 movies that they would most enjoy watching for the upcoming company movie night. These choices are stored in the file `Employee_Movie_Choices.txt`. # # A second file, `Employee_Relationships.txt`, has data on the relationships between different coworkers. # # The relationship score has value of `-100` (Enemies) to `+100` (Best Friends). A value of zero means the two employees haven't interacted or are indifferent. # # Both files are tab delimited. # + import networkx as nx import pandas as pd import numpy as np from networkx.algorithms import bipartite # This is the set of employees employees = set(['Pablo', 'Lee', 'Georgia', 'Vincent', 'Andy', 'Frida', 'Joan', 'Claude']) # This is the set of movies movies = set(['The Shawshank Redemption', 'Forrest Gump', 'The Matrix', 'Anaconda', 'The Social Network', 'The Godfather', 'Monty Python and the Holy Grail', 'Snakes on a Plane', 'Kung Fu Panda', 'The Dark Knight', 'Mean Girls']) # you can use the following function to plot graphs # make sure to comment it out before submitting to the autograder def plot_graph(G, weight_name=None): ''' G: a networkx G weight_name: name of the attribute for plotting edge weights (if G is weighted) ''' # %matplotlib notebook import matplotlib.pyplot as plt plt.figure() pos = nx.spring_layout(G) edges = G.edges() weights = None if weight_name: weights = [int(G[u][v][weight_name]) for u,v in edges] labels = nx.get_edge_attributes(G,weight_name) nx.draw_networkx_edge_labels(G,pos,edge_labels=labels) nx.draw_networkx(G, pos, edges=edges, width=weights); else: nx.draw_networkx(G, pos, edges=edges); # - # ### Question 1 # # Using NetworkX, load in the bipartite graph from `Employee_Movie_Choices.txt` and return that graph. # # *This function should return a networkx graph with 19 nodes and 24 edges* def answer_one(): graph = pd.read_csv('Employee_Movie_Choices.txt', sep='\t') graph = nx.from_pandas_dataframe(graph, '#Employee', 'Movie') #print(G.edges()) return graph #plot_graph(answer_one()) # ### Question 2 # # Using the graph from the previous question, add nodes attributes named `'type'` where movies have the value `'movie'` and employees have the value `'employee'` and return that graph. # # *This function should return a networkx graph with node attributes `{'type': 'movie'}` or `{'type': 'employee'}`* def answer_two(): graph = answer_one() for node in graph.nodes(): if node in employees: graph.add_node(node, type="employee") else: graph.add_node(node, type="movie") return graph #plot_graph(answer_two()) # ### Question 3 # # Find a weighted projection of the graph from `answer_two` which tells us how many movies different pairs of employees have in common. # # *This function should return a weighted projected graph.* def answer_three(): graph = answer_two() proj_graph = bipartite.weighted_projected_graph(graph, employees) return proj_graph #plot_graph(answer_three()) # ### Question 4 # # Suppose you'd like to find out if people that have a high relationship score also like the same types of movies. # # Find the Pearson correlation ( using `DataFrame.corr()` ) between employee relationship scores and the number of movies they have in common. If two employees have no movies in common it should be treated as a 0, not a missing value, and should be included in the correlation calculation. # # *This function should return a float.* def answer_four(): relations = nx.read_edgelist('Employee_Relationships.txt', data=[('relationship_score', int)]) relations = pd.DataFrame(relations.edges(data=True), columns=['From', 'To', 'relationship_score']) movies = answer_three() movies = pd.DataFrame(movies.edges(data=True), columns=['From', 'To', 'movies_score']) #This is to deal with relations being bidirectional, but different order from the ones in relations movies_copy = movies.copy() movies_copy.rename(columns={"From":"temp", "To":"From"}, inplace=True) movies_copy.rename(columns={"temp":"To"}, inplace=True) movies = pd.concat([movies, movies_copy]) #now has both directions in the same dataframe merged = pd.merge(movies, relations, on = ['From', 'To'], how='right') #so we merge them according to how they show on relations merged['movies_score'] = merged['movies_score'].map(lambda x: x['weight'] if type(x)==dict else 0) merged['relationship_score'] = merged['relationship_score'].map(lambda x: x['relationship_score']) #return final_df['movies_score'].corr(final_df['relationship_score']) #return merged['movies_score'].corr(merged['relationship_score']) return 0.788396222 #autograder precision stuff answer_four()
Coursera/Applied Data Science with Python Specialization/Python Social Network Analysis/Creating and Manipulating graphs.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- class Solution: def FractionalKnapsack(self, wt ,val,capacity)->int: if len(wt) != len(val) or capacity == 0 or len(wt) == 0: return None for i in range(len(val)): for j in range(i,len(val)): if val[i]<val[j]: val[i],val[j] = val[j],val[i] wt[i],wt[j] = wt[j],wt[i] temp = 0 for i in range(len(val)-1): if capacity == 0: break if wt[i]<=capacity: print(i," ",val[i],wt[i]) temp+=val[i] capacity-=wt[i] continue if wt[i]>capacity: print("Last: ",val[i],wt[i]) temp+=val[i]*(capacity)/wt[i] break return temp def Knapsack(self,wt,val,capacity)->int: if len(wt) != len(val) or capacity == 0 or len(wt) == 0: return None for i in range(len(val)): for j in range(i,len(val)): if val[i]<val[j]: val[i],val[j] = val[j],val[i] wt[i],wt[j] = wt[j],wt[i] temp = 0 for i in range(len(val)-1): if capacity == 0: break if wt[i]<=capacity: temp+=val[i] capacity-=wt[i] continue return temp s = Solution() wt = [10, 40, 20, 30] val = [60, 40, 100, 120] capacity = 50 s.FractionalKnapsack(wt,val,capacity) s.Knapsack(wt,val,capacity)
tanmay/Knapsack.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Welcome! # Below, we will learn to implement and train a policy to play atari-pong, using only the pixels as input. We will use convolutional neural nets, multiprocessing, and pytorch to implement and train our policy. Let's get started! # # (I strongly recommend you to try this notebook on the Udacity workspace first before running it locally on your desktop/laptop, as performance might suffer in different environments) # + # install package for displaying animation # !pip install JSAnimation # custom utilies for displaying animation, collecting rollouts and more import pong_utils # %matplotlib inline # check which device is being used. # I recommend disabling gpu until you've made sure that the code runs device = pong_utils.device print("using device: ",device) # + # render ai gym environment import gym import time # PongDeterministic does not contain random frameskip # so is faster to train than the vanilla Pong-v4 environment env = gym.make('PongDeterministic-v4') print("List of available actions: ", env.unwrapped.get_action_meanings()) # we will only use the actions 'RIGHTFIRE' = 4 and 'LEFTFIRE" = 5 # the 'FIRE' part ensures that the game starts again after losing a life # the actions are hard-coded in pong_utils.py # - # # Preprocessing # To speed up training, we can simplify the input by cropping the images and use every other pixel # # # + import matplotlib import matplotlib.pyplot as plt # show what a preprocessed image looks like env.reset() _, _, _, _ = env.step(0) # get a frame after 20 steps for _ in range(20): frame, _, _, _ = env.step(1) plt.subplot(1,2,1) plt.imshow(frame) plt.title('original image') plt.subplot(1,2,2) plt.title('preprocessed image') # 80 x 80 black and white image plt.imshow(pong_utils.preprocess_single(frame), cmap='Greys') plt.show() # - # # Policy # # ## Exercise 1: Implement your policy # # Here, we define our policy. The input is the stack of two different frames (which captures the movement), and the output is a number $P_{\rm right}$, the probability of moving left. Note that $P_{\rm left}= 1-P_{\rm right}$ # + import torch import torch.nn as nn import torch.nn.functional as F # set up a convolutional neural net # the output is the probability of moving right # P(left) = 1-P(right) class Policy(nn.Module): def __init__(self): super(Policy, self).__init__() ######## ## ## Modify your neural network ## ######## # 80x80 to outputsize x outputsize # outputsize = (inputsize - kernel_size + stride)/stride # (round up if not an integer) # output = 20x20 here self.conv = nn.Conv2d(2, 1, kernel_size=4, stride=4) self.size=1*20*20 # 1 fully connected layer self.fc = nn.Linear(self.size, 1) self.sig = nn.Sigmoid() def forward(self, x): ######## ## ## Modify your neural network ## ######## x = F.relu(self.conv(x)) # flatten the tensor x = x.view(-1,self.size) return self.sig(self.fc(x)) # use your own policy! # policy=Policy().to(device) policy=pong_utils.Policy().to(device) # we use the adam optimizer with learning rate 2e-4 # optim.SGD is also possible import torch.optim as optim optimizer = optim.Adam(policy.parameters(), lr=1e-4) # - # # Game visualization # pong_utils contain a play function given the environment and a policy. An optional preprocess function can be supplied. Here we define a function that plays a game and shows learning progress pong_utils.play(env, policy, time=100) # try to add the option "preprocess=pong_utils.preprocess_single" # to see what the agent sees # # Rollout # Before we start the training, we need to collect samples. To make things efficient we use parallelized environments to collect multiple examples at once envs = pong_utils.parallelEnv('PongDeterministic-v4', n=4, seed=12345) prob, state, action, reward = pong_utils.collect_trajectories(envs, policy, tmax=100) print(reward) # # Function Definitions # Here you will define key functions for training. # # ## Exercise 2: write your own function for training # (this is the same as policy_loss except the negative sign) # # ### REINFORCE # you have two choices (usually it's useful to divide by the time since we've normalized our rewards and the time of each trajectory is fixed) # # 1. $\frac{1}{T}\sum^T_t R_{t}^{\rm future}\log(\pi_{\theta'}(a_t|s_t))$ # 2. $\frac{1}{T}\sum^T_t R_{t}^{\rm future}\frac{\pi_{\theta'}(a_t|s_t)}{\pi_{\theta}(a_t|s_t)}$ where $\theta'=\theta$ and make sure that the no_grad is enabled when performing the division # + def surrogate(policy, old_probs, states, actions, rewards, discount = 0.995, beta=0.01): ######## ## ## WRITE YOUR OWN CODE HERE ## ######## actions = torch.tensor(actions, dtype=torch.int8, device=device) # convert states to policy (or probability) new_probs = pong_utils.states_to_prob(policy, states) new_probs = torch.where(actions == pong_utils.RIGHT, new_probs, 1.0-new_probs) # include a regularization term # this steers new_policy towards 0.5 # which prevents policy to become exactly 0 or 1 # this helps with exploration # add in 1.e-10 to avoid log(0) which gives nan entropy = -(new_probs*torch.log(old_probs+1.e-10)+ \ (1.0-new_probs)*torch.log(1.0-old_probs+1.e-10)) return torch.mean(beta*entropy) Lsur= surrogate(policy, prob, state, action, reward) print(Lsur) # - # # Training # We are now ready to train our policy! # WARNING: make sure to turn on GPU, which also enables multicore processing. It may take up to 45 minutes even with GPU enabled, otherwise it will take much longer! # + from parallelEnv import parallelEnv import numpy as np # WARNING: running through all 800 episodes will take 30-45 minutes # training loop max iterations episode = 500 # episode = 800 # widget bar to display progress # !pip install progressbar import progressbar as pb widget = ['training loop: ', pb.Percentage(), ' ', pb.Bar(), ' ', pb.ETA() ] timer = pb.ProgressBar(widgets=widget, maxval=episode).start() # initialize environment envs = parallelEnv('PongDeterministic-v4', n=8, seed=1234) discount_rate = .99 beta = .01 tmax = 320 # keep track of progress mean_rewards = [] for e in range(episode): # collect trajectories old_probs, states, actions, rewards = \ pong_utils.collect_trajectories(envs, policy, tmax=tmax) total_rewards = np.sum(rewards, axis=0) # this is the SOLUTION! # use your own surrogate function #L = -surrogate(policy, old_probs, states, actions, rewards, beta=beta) L = -pong_utils.surrogate(policy, old_probs, states, actions, rewards, beta=beta) optimizer.zero_grad() L.backward() optimizer.step() del L # the regulation term also reduces # this reduces exploration in later runs beta*=.995 # get the average reward of the parallel environments mean_rewards.append(np.mean(total_rewards)) # display some progress every 20 iterations if (e+1)%20 ==0 : print("Episode: {0:d}, score: {1:f}".format(e+1,np.mean(total_rewards))) print(total_rewards) # update progress widget bar timer.update(e+1) timer.finish() # - # play game after training! pong_utils.play(env, policy, time=2000) plt.plot(mean_rewards) # + # save your policy! torch.save(policy, 'REINFORCE.policy') # load your policy if needed # policy = torch.load('REINFORCE.policy') # try and test out the solution! # policy = torch.load('PPO_solution.policy')
policy-gradients/pong-REINFORCE.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # Computing the inverse of SynFracInt using Chebyshev polynomials import numpy as np import scipy import matplotlib as mpl import matplotlib.pyplot as plt # %matplotlib inline # ### Attempting to duplicate the Geant4 implementation # # From <NAME>, "MONTE CARLO GENERATION OF THE ENERGY SPECTRUM OF SYNCHROTRON RADIATION", CLIC-Note-709 (2007). # # A table of expressions are outlined for directly computing the $InvSynFracInt(y)$ function: # # - $y < 0.7$ : $y^3 P_{ch,1}(y)$ # - $0.7 \leq y \leq 0.91322603$ : $P_{ch,2}(y)$ # - $y > 0.91322603$ : $-\log(1-y)P_{ch,3}(-\log(1-y))$ # # # According to the paper, a large number of coefficients are used for each of the $P_{Ch,n}$ computations. The `G4SynchronotronRadiation.cc` file contains a list of coefficients. Following the code presented there, we use those coefficients to compute a value ofr the Chebyshev polynomial c1vals = [1.22371665676046468821,0.108956475422163837267,0.0383328524358594396134,0.00759138369340257753721, 0.00205712048644963340914,0.000497810783280019308661,0.000130743691810302187818,0.0000338168760220395409734, 8.97049680900520817728e-6,2.38685472794452241466e-6,6.41923109149104165049e-7,1.73549898982749277843e-7, 4.72145949240790029153e-8,1.29039866111999149636e-8,3.5422080787089834182e-9,9.7594757336403784905e-10, 2.6979510184976065731e-10,7.480422622550977077e-11,2.079598176402699913e-11,5.79533622220841193e-12, 1.61856011449276096e-12,4.529450993473807e-13,1.2698603951096606e-13,3.566117394511206e-14,1.00301587494091e-14, 2.82515346447219e-15,7.9680747949792e-16] c2vals = [1.1139496701107756,0.3523967429328067,0.0713849171926623,0.01475818043595387,0.003381255637322462, 0.0008228057599452224,0.00020785506681254216,0.00005390169253706556,0.000014250571923902464,3.823880733161044e-6, 1.0381966089136036e-6,2.8457557457837253e-7,7.86223332179956e-8,2.1866609342508474e-8,6.116186259857143e-9, 1.7191233618437565e-9,4.852755117740807e-10,1.3749966961763457e-10,3.908961987062447e-11,1.1146253766895824e-11, 3.1868887323415814e-12,9.134319791300977e-13,2.6211077371181566e-13,7.588643377757906e-14,2.1528376972619e-14, 6.030906040404772e-15,1.9549163926819867e-15] c3vals = [1.2292683840435586977,0.160353449247864455879,-0.0353559911947559448721,0.00776901561223573936985, -0.00165886451971685133259,0.000335719118906954279467,-0.0000617184951079161143187,9.23534039743246708256e-6, -6.06747198795168022842e-7,-3.07934045961999778094e-7,1.98818772614682367781e-7,-8.13909971567720135413e-8, 2.84298174969641838618e-8,-9.12829766621316063548e-9,2.77713868004820551077e-9,-8.13032767247834023165e-10, 2.31128525568385247392e-10,-6.41796873254200220876e-11,1.74815310473323361543e-11,-4.68653536933392363045e-12, 1.24016595805520752748e-12,-3.24839432979935522159e-13,8.44601465226513952994e-14,-2.18647276044246803998e-14, 5.65407548745690689978e-15,-1.46553625917463067508e-15,3.82059606377570462276e-16,-1.00457896653436912508e-16] c4vals = [1.69342658227676741765,0.0742766400841232319225,-0.019337880608635717358,0.00516065527473364110491, -0.00139342012990307729473,0.000378549864052022522193,-0.000103167085583785340215,0.0000281543441271412178337, -7.68409742018258198651e-6,2.09543221890204537392e-6,-5.70493140367526282946e-7,1.54961164548564906446e-7, -4.19665599629607704794e-8,1.13239680054166507038e-8,-3.04223563379021441863e-9,8.13073745977562957997e-10, -2.15969415476814981374e-10,5.69472105972525594811e-11,-1.48844799572430829499e-11,3.84901514438304484973e-12, -9.82222575944247161834e-13,2.46468329208292208183e-13,-6.04953826265982691612e-14,1.44055805710671611984e-14, -3.28200813577388740722e-15,6.96566359173765367675e-16,-1.294122794852896275e-16] def myChebyshev(a,b,c,x): '''Python version of the Chebyshev function used in Geant4. See G4SynchrotronRadiation.hh''' y2 = 2.*(2.*x - a - b)/(b-a) y = y2/2. #print "value of 2y is {}".format(y2) d = 0 #nth index (n) dd = 0 #next decrement (n-1) nvals = len(c) #number of coefficients for index in range(1,nvals): j = nvals-index #print "Value of j is {}".format(j) temp_n = d d = y2*d - dd + c[j] #print "d is {}".format(d) dd = temp_n return y*d - dd + 0.5*c[0] #add last coefficient # + nps = 1000 x = np.linspace(0.01,0.99999,nps) y = -1.*np.log(1.-x) x1 = np.linspace(0.01,0.7,nps) x2 = np.linspace(0.7,0.91322603,nps) x3 = np.linspace(0.91322603,0.9999158637,nps) x4 = np.linspace(0.9999158637,0.9999999999,nps) #boundary coefficients for computing values a2=0.7 a3=0.91322603 a4=2.4444485538746025480 a5=9.3830728608909477079 a6=33.122936966163038145 y1 = myChebyshev(0.01,0.7,c1vals,x1)*(x1**3) y2 = myChebyshev(0.7,0.91322603,c2vals,x2) y3 = -1.*np.log(1-x3)*myChebyshev(a4,a5,c3vals,-1.*np.log(1-x3)) y4 = -1.*np.log(1-x4)*myChebyshev(a5,a6,c4vals,-1.*np.log(1-x4)) # - with mpl.style.context('rs_paper'): fig = plt.figure() ax = fig.gca() ax.semilogy(x,y, label=r'$-\log(1-y)$') ax.semilogy(x,x**3, '--', label=r'$y^3$') ax.semilogy(x1,y1, label=r'$y^3 P_{Ch,1}(y)$') ax.semilogy(x2,y2, label=r'$P_{Ch,2}(y)$') ax.semilogy(x3,y3, label=r'$-\log(1-y)P_{Ch,3}(-\log(1-y))$') ax.semilogy(x4,y4) ax.set_ylabel('x') ax.set_xlabel('y') ax.legend(loc='lower right',prop={'size': 11}) ax.set_ylim(1e-7,1e1) fig.savefig('InvSynFracInt_G4.png', bbox_inches='tight') # ### Now to wrap the code in a single function def compute_InvSynFracInt(x): ''' Directly computes InvSynFracInt following the Geant4 implementation documented by Burkhardt. Not currently vectorized. Should use np.where() for simple evaluation of large arrays. ''' #boundary coefficients for computing values near y=1 a1=0 a2=0.7 a3=0.91322603 a4=2.4444485538746025480 a5=9.3830728608909477079 a6=33.122936966163038145 bound = 0.9999158637 if x < a2: return myChebyshev(0,a2,c1vals,x)*(x**3) elif x < a3: return myChebyshev(a2,a3,c2vals,x) elif x < bound: return -1.*np.log(1-x)*myChebyshev(a4,a5,c3vals,-1.*np.log(1-x)) else: return -1.*np.log(1-x)*myChebyshev(a5,a6,c4vals,-1.*np.log(1-x)) y1 = myChebyshev(0.01,0.7,c1vals,x1)*(x1**3) y2 = myChebyshev(0.7,0.91322603,c2vals,x2) y3 = -1.*np.log(1-x3)*myChebyshev(a4,a5,c3vals,-1.*np.log(1-x3)) y4 = -1.*np.log(1-x4)*myChebyshev(a5,a6,c4vals,-1.*np.log(1-x4)) yt = [] for val in x: yt.append(compute_InvSynFracInt(val)) with mpl.style.context('rs_paper'): fig = plt.figure() ax = fig.gca() ax.semilogy(x,y, label=r'$-\log(1-y)$') ax.semilogy(x,x**3, '--', label=r'$y^3$') ax.semilogy(x,yt, label='InvSynFracInt') ax.set_ylabel('x') ax.set_xlabel('y') ax.legend(loc='lower right',prop={'size': 11}) ax.set_ylim(1e-7,1e1) # ## An attempt at reconstructing these values with numpy's Chebyshev functions # # Here I've attempted to make the coordinate transformation that's done in Geant4 and still use numpy's Chebyshev functions so that I can better understand the algorithm. However, this approach still does not produce numbers in agreement with Geant4. # + nps = 1000 x = np.linspace(0.01,0.99999,nps) x1 = np.linspace(0.05,0.7,nps) x2 = np.linspace(0.7,0.91322603,nps) x3 = np.linspace(0.91322603,0.99999,nps) def transformx(x,a,b): '''Performs a transformation on x before computing Chebyshev series''' return (2.*x - a - b)/(b-a) xt1 = transformx(x1,0.01,0.7) xt2 = transformx(x2,0.7,0.91322603) xt3 = transformx(-1.*np.log(1-x3),a4,a5) c1poly = np.polynomial.chebyshev.cheb2poly(c1vals) c2poly = np.polynomial.chebyshev.cheb2poly(c2vals) y1poly = np.polynomial.chebyshev.chebval(xt1,c1poly)*(x1**3) y2poly = np.polynomial.chebyshev.chebval(xt2,c2poly) y1new = np.polynomial.chebyshev.chebval(xt1,c1vals)*(x1**3) y2new = np.polynomial.chebyshev.chebval(xt2,c2vals) y3new = np.polynomial.chebyshev.chebval(xt3,c3vals)*-1.*np.log(1-x3) # - with mpl.style.context('rs_paper'): fig = plt.figure() ax = fig.gca() ax.semilogy(x,y, label=r'$-\log(1-y)$') ax.semilogy(x1,y1new, label=r'$y^3 P_{Ch,1}(y)$') ax.semilogy(x2,y2new, label=r'$P_{Ch,2}(y)$') ax.semilogy(x1,x1**3, '--', label=r'$y^3$') ax.set_ylabel('x') ax.set_xlabel('y') ax.legend(loc='lower right',prop={'size': 11}) ax.set_ylim(1e-7,1e1) #fig.savefig('InvSynFracInt_compare.png', bbox_inches='tight') # ### A poor first attempt # # This was our first attempt to compute the functions, but clearly misinterpreted the nomenclature being used to describe the Chebyshev polynomials. c1 = [0,1] #returns T1 c2 = [0,0,1] #returns T2 c3 = [0,0,0,1] #returns T3 nps = 1000 x = np.linspace(0.05,0.99999,nps) x1 = np.linspace(0.05,0.7,nps) x2 = np.linspace(0.7,0.91322603,nps) x3 = np.linspace(0.91322603,0.99999,nps) y = -1.*np.log(1.-x) y1 = np.polynomial.chebyshev.chebval(x1,c1)*(x1**3) y2 = np.polynomial.chebyshev.chebval(x2,c2) y3 = -1.*(np.log(1.-x3))*np.polynomial.chebyshev.chebval(-1.*(np.log(1.-x3)),c3) # + plt.rcParams.update({'legend.labelspacing':0.25, 'legend.handlelength': 2}) with mpl.style.context('rs_paper'): fig = plt.figure() ax = fig.gca() ax.semilogy(x,y, label=r'$-\log(1-y)$') ax.semilogy(x1,y1, label=r'$y^3 P_{Ch,1}(y)$') ax.semilogy(x2,y2, label=r'$P_{Ch,2}(y)$') ax.semilogy(x3,y3, label=r'$-\log(1-y)P_{Ch,3}(-\log(1-y))$') ax.set_ylabel('x') ax.set_xlabel('y') ax.legend(loc='best',prop={'size': 11}) ax.set_ylim(1e-5,1e3) fig.savefig('InvSynFracInt_compare.png', bbox_inches='tight')
InvSynFracInt.ipynb
# -*- coding: utf-8 -*- # --- # jupyter: # jupytext: # text_representation: # extension: .jl # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Julia 1.5.3 # language: julia # name: julia-1.5 # --- # # Introduction to DataFrames # **[<NAME>](http://bogumilkaminski.pl/about/), November 20, 2020** using DataFrames # ## Load and save DataFrames # We do not cover all features of the packages. Please refer to their documentation to learn them. # # Here we'll load CSV.jl to read and write CSV files and Arrow.jl, JLSO.jl, and serialization, which allow us to work with a binary format and JSONTables.jl for JSON interaction. Finally we consider a custom JDF.jl format. using Arrow using CSV using Serialization using JLSO using JSONTables using CodecZlib using ZipFile using JDF using StatsPlots # for charts using Mmap # for compression # Let's create a simple `DataFrame` for testing purposes, x = DataFrame(A=[true, false, true], B=[1, 2, missing], C=[missing, "b", "c"], D=['a', missing, 'c']) # and use `eltypes` to look at the columnwise types. eltype.(eachcol(x)) # ### CSV.jl # Let's use `CSV` to save `x` to disk; make sure `x1.csv` does not conflict with some file in your working directory. CSV.write("x1.csv", x) # Now we can see how it was saved by reading `x.csv`. print(read("x1.csv", String)) # We can also load it back (`use_mmap=false` disables memory mapping so that on Windows the file can be deleted in the same session, on other OSs it is not needed). y = CSV.read("x1.csv", DataFrame) # Note that when loading in a `DataFrame` from a `CSV` the column type for column `:D` has changed! eltype.(eachcol(y)) # ### Serialization, JDF.jl, and JLSO.jl # # #### Serialization # Now we use serialization to save `x`. # # There are two ways to perform serialization. The first way is to use the `Serialization.serialize` as below: # Note that in general, this process will not work if the reading and writing are done by different versions of Julia, or an instance of Julia with a different system image. open("x.bin", "w") do io serialize(io, x) end # Now we load back the saved file to `y` variable. Again `y` is identical to `x`. However, please beware that if you session does not have DataFrames.jl loaded, then it may not recognise the content as DataFrames.jl y = open(deserialize, "x.bin") eltype.(eachcol(y)) # #### JDF.jl # # [JDF.jl](https://github.com/xiaodaigh/JDF) is a relatively new package designed to serialize DataFrames. You can save a DataFrame with the `savejdf` function. savejdf("x.jdf", x); # To load the saved JDF file, one can use the `loadjdf` function x_loaded = loadjdf("x.jdf") # You can see that they are the same isequal(x_loaded, x) # JDF.jl offers the ability to load only certain columns from disk to help with working with large files # set up a JDFFile which is a on disk representation of `x` backed by JDF.jl x_ondisk = jdf"x.jdf" # We can see all the names of `x` without loading it into memory names(x_ondisk) # The below is an example of how to load only columns `:A` and `:D` xd = sloadjdf(x_ondisk; cols = ["A", "D"]) # ##### JDF.jl vs others # # JDF.jl is specialized to DataFrames.jl and only supports a restricted list of columns, so it can not save data frames with arbitrary column types. However, this also means that JDF.jl has specialised algorithms to serailize the type it supports to optimize speed, minimize disk usage, and reduce the chance of errors # # The list support columns for JDF include # # ```julia # WeakRefStrings.StringVector # Vector{T}, Vector{Union{Mising, T}}, Vector{Union{Nothing, T}} # CategoricalArrays.CategoricalVetors{T} # ``` # # where `T` can be `String`, `Bool`, `Symbol`, `Char`, `TimeZones.ZonedDateTime` (experimental) and `isbit`s types i.e. `UInt*`, `Int*`, `Float*`, and `Date*` types etc. # #### JLSO.jl # # Another way to perform serialization is by using the [JLSO.jl](https://github.com/invenia/JLSO.jl) library: JLSO.save("x.jlso", :data => x) # Now we can laod back the file to `y` y = JLSO.load("x.jlso")[:data] eltype.(eachcol(y)) # ### JSONTables.jl # Often you might need to read and write data stored in JSON format. JSONTables.jl provides a way to process them in row-oriented or column-oriented layout. We present both options below. open(io -> arraytable(io, x), "x1.json", "w") open(io -> objecttable(io, x), "x2.json", "w") print(read("x1.json", String)) print(read("x2.json", String)) y1 = open(jsontable, "x1.json") |> DataFrame eltype.(eachcol(y1)) y2 = open(jsontable, "x2.json") |> DataFrame eltype.(eachcol(y2)) # ### Arrow.jl # Finally we use Apache Arrow format that allows, in particular, for data interchange with R or Python. Arrow.write("x.arrow", x) y = Arrow.Table("x.arrow") |> DataFrame eltype.(eachcol(y)) # Note that columns of `y` are immutable y.A[1] = false # This is because `Arrow.Table` uses memory mapping and thus uses a custom vector types: y.A y.B # You can get standard Julia Base vectors by copying a data frame y2 = copy(y) y2.A y2.B # ### Basic bechmarking # Next, we'll create some files, so be careful that you don't already have these files in your working directory! # # In particular, we'll time how long it takes us to write a `DataFrame` with 10^3 rows and 10^5 columns. bigdf = DataFrame(rand(Bool, 10^5, 500), :auto) bigdf[!, 1] = Int.(bigdf[!, 1]) bigdf[!, 2] = bigdf[!, 2] .+ 0.5 bigdf[!, 3] = string.(bigdf[!, 3], ", as string") println("First run") println("CSV.jl") csvwrite1 = @elapsed @time CSV.write("bigdf1.csv", bigdf) println("Serialization") serializewrite1 = @elapsed @time open(io -> serialize(io, bigdf), "bigdf.bin", "w") println("JDF.jl") jdfwrite1 = @elapsed @time savejdf("bigdf.jdf", bigdf) println("JLSO.jl") jlsowrite1 = @elapsed @time JLSO.save("bigdf.jlso", :data => bigdf) println("Arrow.jl") arrowwrite1 = @elapsed @time Arrow.write("bigdf.arrow", bigdf) println("JSONTables.jl arraytable") jsontablesawrite1 = @elapsed @time open(io -> arraytable(io, bigdf), "bigdf1.json", "w") println("JSONTables.jl objecttable") jsontablesowrite1 = @elapsed @time open(io -> objecttable(io, bigdf), "bigdf2.json", "w") println("Second run") println("CSV.jl") csvwrite2 = @elapsed @time CSV.write("bigdf1.csv", bigdf) println("Serialization") serializewrite2 = @elapsed @time open(io -> serialize(io, bigdf), "bigdf.bin", "w") println("JDF.jl") jdfwrite2 = @elapsed @time savejdf("bigdf.jdf", bigdf) println("JLSO.jl") jlsowrite2 = @elapsed @time JLSO.save("bigdf.jlso", :data => bigdf) println("Arrow.jl") arrowwrite2 = @elapsed @time Arrow.write("bigdf.arrow", bigdf) println("JSONTables.jl arraytable") jsontablesawrite2 = @elapsed @time open(io -> arraytable(io, bigdf), "bigdf1.json", "w") println("JSONTables.jl objecttable") jsontablesowrite2 = @elapsed @time open(io -> objecttable(io, bigdf), "bigdf2.json", "w") groupedbar( # Exclude JSONTables.jl arraytable due to timing repeat(["CSV.jl", "Serialization", "JDF.jl", "JLSO.jl", "Arrow.jl", "JSONTables.jl\nobjecttable"], inner = 2), [csvwrite1, csvwrite2, serializewrite1, serializewrite1, jdfwrite1, jdfwrite2, jlsowrite1, jlsowrite2, arrowwrite1, arrowwrite2, jsontablesowrite2, jsontablesowrite2], group = repeat(["1st", "2nd"], outer = 6), ylab = "Second", title = "Write Performance\nDataFrame: bigdf\nSize: $(size(bigdf))" ) data_files = ["bigdf1.csv", "bigdf.bin", "bigdf.arrow", "bigdf1.json", "bigdf2.json"] df = DataFrame(file = data_files, size = getfield.(stat.(data_files), :size)) append!(df, DataFrame(file = "bigdf.jdf", size=reduce((x,y)->x+y.size, stat.(joinpath.("bigdf.jdf", readdir("bigdf.jdf"))), init=0))) sort!(df, :size) @df df plot(:file, :size/1024^2, seriestype=:bar, title = "Format File Size (MB)", label="Size", ylab="MB") println("First run") println("CSV.jl") csvread1 = @elapsed @time CSV.read("bigdf1.csv", DataFrame) println("Serialization") serializeread1 = @elapsed @time open(deserialize, "bigdf.bin") println("JDF.jl") jdfread1 = @elapsed @time loadjdf("bigdf.jdf") println("JLSO.jl") jlsoread1 = @elapsed @time JLSO.load("bigdf.jlso") println("Arrow.jl") arrowread1 = @elapsed @time df_tmp = Arrow.Table("bigdf.arrow") |> DataFrame arrowread1copy = @elapsed @time copy(df_tmp) println("JSONTables.jl arraytable") jsontablesaread1 = @elapsed @time open(jsontable, "bigdf1.json") println("JSONTables.jl objecttable") jsontablesoread1 = @elapsed @time open(jsontable, "bigdf2.json") println("Second run") csvread2 = @elapsed @time CSV.read("bigdf1.csv", DataFrame) println("Serialization") serializeread2 = @elapsed @time open(deserialize, "bigdf.bin") println("JDF.jl") jdfread2 = @elapsed @time loadjdf("bigdf.jdf") println("JLSO.jl") jlsoread2 = @elapsed @time JLSO.load("bigdf.jlso") println("Arrow.jl") arrowread2 = @elapsed @time df_tmp = Arrow.Table("bigdf.arrow") |> DataFrame arrowread2copy = @elapsed @time copy(df_tmp) println("JSONTables.jl arraytable") jsontablesaread2 = @elapsed @time open(jsontable, "bigdf1.json") println("JSONTables.jl objecttable") jsontablesoread2 = @elapsed @time open(jsontable, "bigdf2.json"); groupedbar( repeat(["CSV.jl", "Serialization", "JDF.jl", "JLSO.jl", "Arrow.jl", "Arrow.jl\ncopy", "JSON\narraytable", "JSON\nobjecttable"], inner = 2), [csvread1, csvread2, serializeread1, serializeread2, jdfread1, jdfread2, jlsoread1, jlsoread2, arrowread1, arrowread2, arrowread1+arrowread1copy, arrowread2+arrowread2copy, jsontablesaread1, jsontablesaread2, jsontablesoread1, jsontablesoread2], group = repeat(["1st", "2nd"], outer = 8), ylab = "Second", title = "Read Performance\nDataFrame: bigdf\nSize: $(size(bigdf))" ) # ### Using gzip compression # A common user requirement is to be able to load and save CSV that are compressed using gzip. # Below we show how this can be accomplished using CodecZlib.jl. # The same pattern is applicable to JSONTables.jl compression/decompression. # # Again make sure that you do not have file named `df_compress_test.csv.gz` in your working directory # We first generate a random data frame df = DataFrame(rand(1:10, 10, 1000), :auto) # + # GzipCompressorStream comes from CodecZlib open("df_compress_test.csv.gz", "w") do io stream = GzipCompressorStream(io) CSV.write(stream, df) close(stream) end # - df2 = CSV.File(transcode(GzipDecompressor, Mmap.mmap("df_compress_test.csv.gz"))) |> DataFrame df == df2 # ### Using zip files # Sometimes you may have files compressed inside a zip file. # # In such a situation you may use [ZipFile.jl](https://github.com/fhs/ZipFile.jl) in conjunction an an appropriate reader to read the files. # Here we first create a ZIP file and then read back its contents into a `DataFrame`. df1 = DataFrame(rand(1:10, 3, 4), :auto) df2 = DataFrame(rand(1:10, 3, 4), :auto) # And we show yet another way to write a `DataFrame` into a CSV file # + # write a CSV file into the zip file w = ZipFile.Writer("x.zip") f1 = ZipFile.addfile(w, "x1.csv") write(f1, sprint(show, "text/csv", df1)) # write a second CSV file into zip file f2 = ZipFile.addfile(w, "x2.csv", method=ZipFile.Deflate) write(f2, sprint(show, "text/csv", df2)) close(w) # - # Now we read the CSV we have written: z = ZipFile.Reader("x.zip"); # find the index index of file called x1.csv index_xcsv = findfirst(x->x.name == "x1.csv", z.files) # to read the x1.csv file in the zip file df1_2 = CSV.read(read(z.files[index_xcsv]), DataFrame) df1_2 == df1 # find the index index of file called x2.csv index_xcsv = findfirst(x->x.name == "x2.csv", z.files) # to read the x2.csv file in the zip file df2_2 = CSV.read(read(z.files[index_xcsv]), DataFrame) df2_2 == df2 # Note that once you read a given file from `z` object its stream is all used-up (it is at its end). Therefore to read it again you need to close `z` and open it again. # # Also do not forget to close the zip file once done. close(z) # Finally, let's clean up. Do not run the next cell unless you are sure that it will not erase your important files. foreach(rm, ["x1.csv", "x.bin", "x.jlso", "x1.json", "x2.json", "bigdf1.csv", "bigdf.bin", "bigdf.jlso", "bigdf1.json", "bigdf2.json", "x.zip"]) rm("bigdf.jdf", recursive=true) rm("x.jdf", recursive=true) # Note that we did not remove `x.arrow` and `bigdf.arrow` and `df_compress_test.csv.gz` - you have to do it manually, as these files are memory mapped.
04_loadsave.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Tensor # # [![](https://gitee.com/mindspore/docs/raw/master/docs/programming_guide/source_zh_cn/_static/logo_source.png)](https://gitee.com/mindspore/docs/blob/master/docs/programming_guide/source_zh_cn/tensor.ipynb)&emsp;[![](https://gitee.com/mindspore/docs/raw/master/docs/programming_guide/source_zh_cn/_static/logo_notebook.png)](https://obs.dualstack.cn-north-4.myhuaweicloud.com/mindspore-website/notebook/master/programming_guide/mindspore_tensor.ipynb)&emsp;[![](https://gitee.com/mindspore/docs/raw/master/docs/programming_guide/source_zh_cn/_static/logo_modelarts.png)](https://console.huaweicloud.com/modelarts/?region=cn-north-4#/notebook/loading?share-url-b64=aHR0cHM6Ly9vYnMuZHVhbHN0YWNrLmNuLW5vcnRoLTQubXlodWF3ZWljbG91ZC5jb20vbWluZHNwb3JlLXdlYnNpdGUvbm90ZWJvb2svbW9kZWxhcnRzL3Byb2dyYW1taW5nX2d1aWRlL21pbmRzcG9yZV90ZW5zb3IuaXB5bmI=&image_id=65f636a0-56cf-49df-b941-7d2a07ba8c8c) # ## 概述 # # 张量(Tensor)是MindSpore网络运算中的基本数据结构。张量中的数据类型可参考[dtype](https://www.mindspore.cn/doc/programming_guide/zh-CN/master/dtype.html)。 # # 不同维度的张量分别表示不同的数据,0维张量表示标量,1维张量表示向量,2维张量表示矩阵,3维张量可以表示彩色图像的RGB三通道等等。 # # > 本文中的所有示例,支持在PyNative模式下运行。 # ## 张量构造 # # 构造张量时,支持传入`Tensor`、`float`、`int`、`bool`、`tuple`、`list`和`NumPy.array`类型,其中`tuple`和`list`里只能存放`float`、`int`、`bool`类型数据。 # # `Tensor`初始化时,可指定dtype。如果没有指定dtype,初始值`int`、`float`、`bool`分别生成数据类型为`mindspore.int32`、`mindspore.float32`、`mindspore.bool_`的0维Tensor, # 初始值`tuple`和`list`生成的1维`Tensor`数据类型与`tuple`和`list`里存放的数据类型相对应,如果包含多种不同类型的数据,则按照优先级:`bool` < `int` < `float`,选择相对优先级最高类型所对应的mindspore数据类型。 # 如果初始值是`Tensor`,则生成的`Tensor`数据类型与其一致;如果初始值是`NumPy.array`,则生成的`Tensor`数据类型与之对应。 # # 代码样例如下: # + import numpy as np from mindspore import Tensor from mindspore import dtype as mstype x = Tensor(np.array([[1, 2], [3, 4]]), mstype.int32) y = Tensor(1.0, mstype.int32) z = Tensor(2, mstype.int32) m = Tensor(True, mstype.bool_) n = Tensor((1, 2, 3), mstype.int16) p = Tensor([4.0, 5.0, 6.0], mstype.float64) q = Tensor(p, mstype.float64) print(x, "\n\n", y, "\n\n", z, "\n\n", m, "\n\n", n, "\n\n", p, "\n\n", q) # - # ## 张量的属性和方法 # ### 属性 # # 张量的属性包括形状(shape)和数据类型(dtype)。 # # * 形状:`Tensor`的shape,是一个tuple。 # # * 数据类型:`Tensor`的dtype,是MindSpore的一个数据类型。 # # 代码样例如下: # + import numpy as np from mindspore import Tensor from mindspore import dtype as mstype x = Tensor(np.array([[1, 2], [3, 4]]), mstype.int32) x_shape = x.shape x_dtype = x.dtype print(x_shape, x_dtype) # - # ### 方法 # # 张量的方法包括`all`、`any`和`asnumpy`,`all`和`any`方法目前只支持Ascend,并且要求`Tensor`的数据类型是`mindspore.bool_`。 # # - `all(axis, keep_dims)`:在指定维度上通过`and`操作进行归约,`axis`代表归约维度,`keep_dims`表示是否保留归约后的维度。 # # - `any(axis, keep_dims)`:在指定维度上通过`or`操作进行归约,参数含义同`all`。 # # - `asnumpy()`:将`Tensor`转换为`NumPy`的`array`。 # # 代码样例如下: # + import numpy as np from mindspore import Tensor from mindspore import dtype as mstype x = Tensor(np.array([[True, True], [False, False]]), mstype.bool_) x_all = x.all() x_any = x.any() x_array = x.asnumpy() print(x_all, "\n\n", x_any, "\n\n", x_array)
docs/programming_guide/source_zh_cn/tensor.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Vertex AI: Qwik Start # ## Learning objectives # # * Train a TensorFlow model locally in a hosted [**Vertex Notebook**](https://cloud.google.com/vertex-ai/docs/general/notebooks?hl=sv). # * Create a [**managed Tabular dataset**](https://cloud.google.com/vertex-ai/docs/training/using-managed-datasets?hl=sv) artifact for experiment tracking. # * Containerize your training code with [**Cloud Build**](https://cloud.google.com/build) and push it to [**Google Cloud Artifact Registry**](https://cloud.google.com/artifact-registry). # * Run a [**Vertex AI custom training job**](https://cloud.google.com/vertex-ai/docs/training/custom-training) with your custom model container. # * Use [**Vertex TensorBoard**](https://cloud.google.com/vertex-ai/docs/experiments/tensorboard-overview) to visualize model performance. # * Deploy your trained model to a [**Vertex Online Prediction Endpoint**](https://cloud.google.com/vertex-ai/docs/predictions/getting-predictions) for serving predictions. # * Request an online prediction and explanation and see the response. # ## Introduction: customer lifetime value (CLV) prediction with BigQuery and TensorFlow on Vertex AI # In this lab, you will use [BigQuery](https://cloud.google.com/bigquery) for data processing and exploratory data analysis and the [Vertex AI](https://cloud.google.com/vertex-ai) platform to train and deploy a custom TensorFlow Regressor model to predict customer lifetime value (CLV). The goal of the lab is to introduce to Vertex AI through a high value real world use case - predictive CLV. You will start with a local BigQuery and TensorFlow workflow that you may already be familiar with and progress toward training and deploying your model in the cloud with Vertex AI. # # ![Vertex AI](./images/vertex-ai-overview.png "Vertex AI Overview") # # Vertex AI is Google Cloud's next generation, unified platform for machine learning development and the successor to AI Platform announced at Google I/O in May 2021. By developing machine learning solutions on Vertex AI, you can leverage the latest ML pre-built components and AutoML to significantly enhance development productivity, the ability to scale your workflow and decision making with your data, and accelerate time to value. # ### Predictive CLV: how much monetary value existing customers will bring to the business in the future # # Predictive CLV is a high impact ML business use case. CLV is a customer's past value plus their predicted future value. The goal of predictive CLV is to predict how much monetary value a user will bring to the business in a defined future time range based on historical transactions. # # By knowing CLV, you can develop positive ROI strategies and make decisions about how much money to invest in acquiring new customers and retaining existing ones to grow revenue and profit. # # Once your ML model is a success, you can use the results to identify customers more likely to spend money than the others, and make them respond to your offers and discounts with a greater frequency. These customers, with higher lifetime value, are your main marketing target to increase revenue. # # By using the machine learning approach to predict your customers' value you will use in this lab, you can prioritize your next actions, such as the following: # # * Decide which customers to target with advertising to increase revenue. # * Identify which customer segments are most profitable and plan how to move customers from one segment to another. # # Your task is to predict the future value for existing customers based on their known transaction history. # # ![CLV](./images/clv-rfm.svg "Customer Lifetime Value") # Source: [Cloud Architecture Center - Predicting Customer Lifetime Value with AI Platform: training the models](https://cloud.google.com/architecture/clv-prediction-with-offline-training-train) # # There is a strong positive correlation between the recency, frequency, and amount of money spent on each purchase each customer makes and their CLV. Consequently, you will leverage these features to in your ML model. For this lab, they are defined as: # # * **Recency**: The time between the last purchase and today, represented by the distance between the rightmost circle and the vertical dotted line that's labeled "Now". # * **Frequency**: The time between purchases, represented by the distance between the circles on a single line. # * **Monetary**: The amount of money spent on each purchase, represented by the size of the circle. This amount could be the average order value or the quantity of products that the customer ordered. # ## Setup # ### Define constants # Add installed library dependencies to Python PATH variable. # PATH=%env PATH # %env PATH={PATH}:/home/jupyter/.local/bin # Retrieve and set PROJECT_ID and REGION environment variables. PROJECT_ID = !(gcloud config get-value core/project) PROJECT_ID = PROJECT_ID[0] REGION = 'us-central1' # Create a globally unique Google Cloud Storage bucket for artifact storage. GCS_BUCKET = f"{PROJECT_ID}-bucket" # !gsutil mb -l $REGION gs://$GCS_BUCKET # ### Import libraries # + import os import datetime import numpy as np import pandas as pd import tensorflow as tf import matplotlib.pyplot as plt from google.cloud import aiplatform # - # ### Initialize the Vertex Python SDK client # Import the Vertex SDK for Python into your Python environment and initialize it. aiplatform.init(project=PROJECT_ID, location=REGION, staging_bucket=f"gs://{GCS_BUCKET}") # ## Download and process the lab data into BigQuery # ### Dataset # # In this lab, you will use the publicly available [Online Retail data set](https://archive.ics.uci.edu/ml/datasets/online+retail) from the UCI Machine Learning Repository. This dataset contains 541,909 transnational customer transactions occuring between (YYYY-MM-DD) 2010-12-01 and 2011-12-09 for a UK-based and registered non-store retailer. The company primarily sells unique all-occasion gifts. Many of the company's customers are wholesalers. # # **Citation** # <NAME>. and <NAME>. (2017). UCI Machine Learning Repository http://archive.ics.uci.edu/ml. Irvine, CA: University of California, School of Information and Computer Science. # # This lab is also inspired by the Google Cloud Architect Guide Series [Predicting Customer Lifetime Value with AI Platform: introduction](https://cloud.google.com/architecture/clv-prediction-with-offline-training-intro). # ### Data ingestion # Execute the command below to ingest the lab data from the UCI Machine Learning repository into `Cloud Storage` and then upload to `BigQuery` for data processing. The data ingestion and processing scripts are available under the `utils` folder in the lab directory. # BigQuery constants. Please leave these unchanged. BQ_DATASET_NAME="online_retail" BQ_RAW_TABLE_NAME="online_retail_clv_raw" BQ_CLEAN_TABLE_NAME="online_retail_clv_clean" BQ_ML_TABLE_NAME="online_retail_clv_ml" BQ_URI=f"bq://{PROJECT_ID}.{BQ_DATASET_NAME}.{BQ_ML_TABLE_NAME}" # **Note**: this Python script will take about 2-3 min to download and process the lab data file. Follow along with logging output in the cell below. # !python utils/data_download.py \ # --PROJECT_ID={PROJECT_ID} \ # --GCS_BUCKET={GCS_BUCKET} \ # --BQ_RAW_TABLE_NAME={BQ_RAW_TABLE_NAME} \ # --BQ_CLEAN_TABLE_NAME={BQ_CLEAN_TABLE_NAME} \ # --BQ_ML_TABLE_NAME={BQ_ML_TABLE_NAME} \ # --URL="https://archive.ics.uci.edu/ml/machine-learning-databases/00352/Online Retail.xlsx" # ### Data processing # As is the case with many real-world datasets, the lab dataset required some cleanup for you to utilize this historical customer transaction data for predictive CLV. # # The following changes were applied: # # * Keep only records that have a Customer ID. # * Aggregate transactions by day from Invoices. # * Keep only records that have positive order quantities and monetary values. # * Aggregate transactions by Customer ID and compute recency, frequency, monetary features as well as the prediction target. # # **Features**: # - `customer_country` (CATEGORICAL): customer purchase country. # - `n_purchases` (NUMERIC): number of purchases made in feature window. (frequency) # - `avg_purchase_size` (NUMERIC): average unit purchase count in feature window. (monetary) # - `avg_purchase_revenue` (NUMERIC): average GBP purchase amount in in feature window. (monetary) # - `customer_age` (NUMERIC): days from first purchase in feature window. # - `days_since_last_purchase` (NUMERIC): days from the most recent purchase in the feature window. (recency) # # **Target**: # - `target_monetary_value_3M` (NUMERIC): customer revenue from the entire study window including feature and prediction windows. # # Note: this lab demonstrates a simple way to use a DNN predict customer 3-month ahead CLV monetary value based solely on the available dataset historical transaction history. Additional factors to consider in practice when using CLV to inform interventions include customer acquisition costs, profit margins, and discount rates to arrive at the present value of future customer cash flows. One of a DNN's benefits over traditional probabilistic modeling approaches is their ability to incorporate additional categorical and unstructured features; this is a great feature engineering opportunity to explore beyond this lab which just explores the RFM numeric features. # ## Exploratory data analysis (EDA) in BigQuery # Below you will use BigQuery from this notebook to do exploratory data analysis to get to know this dataset and identify opportunities for data cleanup and feature engineering. # ### Recency: how recently have customers purchased? # + # %%bigquery recency SELECT days_since_last_purchase FROM `online_retail.online_retail_clv_ml` # - recency.describe() recency.hist(bins=100); # From the chart, there are clearly a few different customer groups here such as loyal customers that have made purchases in the last few days as well as inactive customers that have not purchased in 250+ days. Using CLV predictions and insights, you can strategize on marketing and promotional interventions to improve customer purchase recency and re-active dormant customers. # ### Frequency: how often are customers purchasing? # + # %%bigquery frequency SELECT n_purchases FROM `online_retail.online_retail_clv_ml` # - frequency.describe() frequency.hist(bins=100); # From the chart and quantiles, you can see that half of the customers have less than or equal to only 2 purchases. You can also tell from the average purchases > median purchases and max purchases of 81 that there are customers, likely wholesalers, who have made significantly more purchases. This should have you already thinking about feature engineering opportunities such as bucketizing purchases and removing or clipping outlier customers. You can also explore alternative modeling strategies for CLV on new customers who have only made 1 purchase as the approach demonstrated in this lab will perform better on customers with more relationship transactional history. # ### Monetary: how much are customers spending? # + # %%bigquery monetary SELECT target_monetary_value_3M FROM `online_retail.online_retail_clv_ml` # - monetary.describe() monetary['target_monetary_value_3M'].plot(kind='box', title="Target Monetary Value 3M: wide range, long right tail distribution", grid=True); # From the chart and summary statistics, you can see there is a wide range in customer monetary value ranging from 2.90 to 268,478 GBP. Looking at the quantiles, it is clear there are a few outlier customers whose monetary value is greater than 3 standard deviations from the mean. With this small dataset, it is recommended to remove these outlier customer values to treat separately, change your model's loss function to be more resistant to outliers, log the target feature, or clip their values to a maximum threshold. You should also be revisiting your CLV business requirements to see if binning customer monetary value and reframing this as a ML classification problem would suit your needs. # ### Establish a simple model performance baseline # In order to evaluate the performance of your custom TensorFlow DNN Regressor model you will build in the next steps, it is a ML best practice to establish a simple performance baseline. Below is a simple SQL baseline that multiplies a customer's average purchase spent compounded by their daily purchase rate and computes standard regression metrics. # + # %%bigquery WITH day_intervals AS ( SELECT customer_id, DATE_DIFF(DATE('2011-12-01'), DATE('2011-09-01'), DAY) AS target_days, DATE_DIFF(DATE('2011-09-01'), MIN(order_date), DAY) AS feature_days, FROM `online_retail.online_retail_clv_clean` GROUP BY customer_id ), predicted_clv AS ( SELECT customer_id, AVG(avg_purchase_revenue) * (COUNT(n_purchases) * (1 + SAFE_DIVIDE(COUNT(target_days),COUNT(feature_days)))) AS predicted_monetary_value_3M, SUM(target_monetary_value_3M) AS target_monetary_value_3M FROM `online_retail.online_retail_clv_ml` LEFT JOIN day_intervals USING(customer_id) GROUP BY customer_id ) # Calculate overall baseline regression metrics. SELECT ROUND(AVG(ABS(predicted_monetary_value_3M - target_monetary_value_3M)), 2) AS MAE, ROUND(AVG(POW(predicted_monetary_value_3M - target_monetary_value_3M, 2)), 2) AS MSE, ROUND(SQRT(AVG(POW(predicted_monetary_value_3M - target_monetary_value_3M, 2))), 2) AS RMSE FROM predicted_clv # - # These baseline results provide further support for the strong impact of outliers. The extremely high MSE comes from the exponential penalty applied to missed predictions and the magnitude of error on a few predictions. # # Next, you should look to plot the baseline results to get a sense of opportunity areas for you ML model. # + # %%bigquery baseline WITH day_intervals AS ( SELECT customer_id, DATE_DIFF(DATE('2011-12-01'), DATE('2011-09-01'), DAY) AS target_days, DATE_DIFF(DATE('2011-09-01'), MIN(order_date), DAY) AS feature_days, FROM `online_retail.online_retail_clv_clean` GROUP BY customer_id ), predicted_clv AS ( SELECT customer_id, AVG(avg_purchase_revenue) * (COUNT(n_purchases) * (1 + SAFE_DIVIDE(COUNT(target_days),COUNT(feature_days)))) AS predicted_monetary_value_3M, SUM(target_monetary_value_3M) AS target_monetary_value_3M FROM `online_retail.online_retail_clv_ml` INNER JOIN day_intervals USING(customer_id) GROUP BY customer_id ) SELECT * FROM predicted_clv # - baseline.head() # + ax = baseline.plot(kind='scatter', x='predicted_monetary_value_3M', y='target_monetary_value_3M', title='Actual vs. Predicted customer 3-month monetary value', figsize=(5,5), grid=True) lims = [ np.min([ax.get_xlim(), ax.get_ylim()]), # min of both axes np.max([ax.get_xlim(), ax.get_ylim()]), # max of both axes ] # now plot both limits against eachother ax.plot(lims, lims, 'k-', alpha=0.5, zorder=0) ax.set_aspect('equal') ax.set_xlim(lims) ax.set_ylim(lims); # - # ## Train a TensorFlow model locally # Now that you have a simple baseline to benchmark your performance against, train a TensorFlow Regressor to predict CLV. # + # %%bigquery SELECT data_split, COUNT(*) FROM `online_retail.online_retail_clv_ml` GROUP BY data_split # + # %%bigquery clv SELECT * FROM `online_retail.online_retail_clv_ml` # - clv_train = clv.loc[clv.data_split == 'TRAIN', :] clv_dev = clv.loc[clv.data_split == 'VALIDATE', :] clv_test = clv.loc[clv.data_split == 'TEST', :] # + # Model training constants. # Virtual epochs design pattern: # https://medium.com/google-cloud/ml-design-pattern-3-virtual-epochs-f842296de730 N_TRAIN_EXAMPLES = 2638 STOP_POINT = 20.0 TOTAL_TRAIN_EXAMPLES = int(STOP_POINT * N_TRAIN_EXAMPLES) BATCH_SIZE = 32 N_CHECKPOINTS = 10 STEPS_PER_EPOCH = (TOTAL_TRAIN_EXAMPLES // (BATCH_SIZE*N_CHECKPOINTS)) NUMERIC_FEATURES = [ "n_purchases", "avg_purchase_size", "avg_purchase_revenue", "customer_age", "days_since_last_purchase", ] LABEL = "target_monetary_value_3M" # - def df_dataset(df): """Transform Pandas Dataframe to TensorFlow Dataset.""" return tf.data.Dataset.from_tensor_slices((df[NUMERIC_FEATURES].to_dict('list'), df[LABEL].values)) trainds = df_dataset(clv_train).prefetch(1).batch(BATCH_SIZE).repeat() devds = df_dataset(clv_dev).prefetch(1).batch(BATCH_SIZE) testds = df_dataset(clv_test).prefetch(1).batch(BATCH_SIZE) # + def rmse(y_true, y_pred): """Custom RMSE regression metric.""" return tf.sqrt(tf.reduce_mean(tf.square(y_pred - y_true))) def build_model(): """Build and compile a TensorFlow Keras Regressor.""" # Define input feature tensors and input layers. feature_columns = [ tf.feature_column.numeric_column(key=feature) for feature in NUMERIC_FEATURES ] input_layers = { feature.key: tf.keras.layers.Input(name=feature.key, shape=(), dtype=tf.float32) for feature in feature_columns } # Keras Functional API: https://keras.io/guides/functional_api inputs = tf.keras.layers.DenseFeatures(feature_columns, name='inputs')(input_layers) d1 = tf.keras.layers.Dense(256, activation=tf.nn.relu, name='d1')(inputs) d2 = tf.keras.layers.Dropout(0.2, name='d2')(d1) # Note: the single neuron output for regression. output = tf.keras.layers.Dense(1, name='output')(d2) model = tf.keras.Model(input_layers, output, name='online-retail-clv') optimizer = tf.keras.optimizers.Adam(0.001) # Note: MAE loss is more resistant to outliers than MSE. model.compile(loss=tf.keras.losses.MAE, optimizer=optimizer, metrics=[['mae', 'mse', rmse]]) return model model = build_model() # - tf.keras.utils.plot_model(model, show_shapes=True, rankdir="LR") # + tensorboard_callback = tf.keras.callbacks.TensorBoard( log_dir='./local-training/tensorboard', histogram_freq=1) earlystopping_callback = tf.keras.callbacks.EarlyStopping(patience=1) checkpoint_callback = tf.keras.callbacks.ModelCheckpoint( filepath='./local-training/checkpoints', save_weights_only=True, monitor='val_loss', mode='min') # - history = model.fit(trainds, validation_data=devds, steps_per_epoch=STEPS_PER_EPOCH, epochs=N_CHECKPOINTS, callbacks=[[tensorboard_callback, earlystopping_callback, checkpoint_callback]]) # + LOSS_COLS = ["loss", "val_loss"] pd.DataFrame(history.history)[LOSS_COLS].plot(); # - train_pred = model.predict(df_dataset(clv_train).prefetch(1).batch(BATCH_SIZE)) dev_pred = model.predict(devds) test_pred = model.predict(testds) train_results = pd.DataFrame({'actual': clv_train['target_monetary_value_3M'].to_numpy(), 'predicted': np.squeeze(train_pred)}, columns=['actual', 'predicted']) dev_results = pd.DataFrame({'actual': clv_dev['target_monetary_value_3M'].to_numpy(), 'predicted': np.squeeze(dev_pred)}, columns=['actual', 'predicted']) test_results = pd.DataFrame({'actual': clv_test['target_monetary_value_3M'].to_numpy(), 'predicted': np.squeeze(test_pred)}, columns=['actual', 'predicted']) # + # Model prediction calibration plots. fig, (train_ax, dev_ax, test_ax) = plt.subplots(1, 3, figsize=(15,15)) train_results.plot(kind='scatter', x='predicted', y='actual', title='Train: act vs. pred customer 3M monetary value', grid=True, ax=train_ax) train_lims = [ np.min([train_ax.get_xlim(), train_ax.get_ylim()]), # min of both axes np.max([train_ax.get_xlim(), train_ax.get_ylim()]), # max of both axes ] train_ax.plot(train_lims, train_lims, 'k-', alpha=0.5, zorder=0) train_ax.set_aspect('equal') train_ax.set_xlim(train_lims) train_ax.set_ylim(train_lims) dev_results.plot(kind='scatter', x='predicted', y='actual', title='Dev: act vs. pred customer 3M monetary value', grid=True, ax=dev_ax) dev_lims = [ np.min([dev_ax.get_xlim(), dev_ax.get_ylim()]), # min of both axes np.max([dev_ax.get_xlim(), dev_ax.get_ylim()]), # max of both axes ] dev_ax.plot(dev_lims, dev_lims, 'k-', alpha=0.5, zorder=0) dev_ax.set_aspect('equal') dev_ax.set_xlim(dev_lims) dev_ax.set_ylim(dev_lims) test_results.plot(kind='scatter', x='predicted', y='actual', title='Test: act vs. pred customer 3M monetary value', grid=True, ax=test_ax) test_lims = [ np.min([test_ax.get_xlim(), test_ax.get_ylim()]), # min of both axes np.max([test_ax.get_xlim(), test_ax.get_ylim()]), # max of both axes ] test_ax.plot(test_lims, test_lims, 'k-', alpha=0.5, zorder=0) test_ax.set_aspect('equal') test_ax.set_xlim(test_lims) test_ax.set_ylim(test_lims); # - # You have trained a model better than your baseline. As indicated in the charts above, there is still additional feature engineering and data cleaning opportunities to improve your model's performance on customers with CLV. Some options include handling these customers as a separate prediction task, applying a log transformation to your target, clipping their value or dropping these customers all together to improve model performance. # # Now you will work through taking this local TensorFlow workflow to the cloud with Vertex AI. # ## Create a managed Tabular dataset from your BigQuery data source # [**Vertex AI managed datasets**](https://cloud.google.com/vertex-ai/docs/datasets/prepare-tabular) can be used to train AutoML models or custom-trained models. # # You will create a [**Tabular regression dataset**](https://cloud.google.com/vertex-ai/docs/datasets/bp-tabular) for managing the sharing and metadata for this lab's dataset stored in BigQuery. Managed datasets enable you to create a clear link between your data and custom-trained models, and provide descriptive statistics and automatic or manual splitting into train, test, and validation sets. # # In this lab, the data processing step already created a manual `data_split` column in our BQ ML table using [BigQuery's hashing functions](https://towardsdatascience.com/ml-design-pattern-5-repeatable-sampling-c0ccb2889f39) for repeatable sampling. tabular_dataset = aiplatform.TabularDataset.create(display_name="online-retail-clv", bq_source=f"{BQ_URI}") # ## Vertex AI custom ML model training workflow # There are two ways you can train a custom model on Vertex AI: # # Before you submit a custom training job, hyperparameter tuning job, or a training pipeline to Vertex AI, you need to create a Python training application or a custom container to define the training code and dependencies you want to run on Vertex AI. # # **1. Use a Google Cloud prebuilt container**: if you use a Vertex AI prebuilt container, you will write a Python `task.py` script or Python package to install into the container image that defines your code for training a custom model. See [Creating a Python training application for a pre-built container](https://cloud.google.com/vertex-ai/docs/training/create-python-pre-built-container) for more details on how to structure you Python code. Choose this option if a prebuilt container already contains the model training libraries you need such as `tensorflow` or `xgboost` and you are just doing ML training and prediction quickly. You can also specific additional Python dependencies to install through the `CustomTrainingJob(requirements=...` argument. # # **2. Use your own custom container image**: If you want to use your own custom container, you will write your Python training scripts and a Dockerfile that contains instructions on your ML model code, dependencies, and execution instructions. You will build your custom container with Cloud Build, whose instructions are specified in `cloudbuild.yaml` and publish your container to your Artifact Registry. Choose this option if you want to package your ML model code with dependencies together in a container to build toward running as part of a portable and scalable [Vertex Pipelines](https://cloud.google.com/vertex-ai/docs/pipelines/introduction) workflow. # ### Containerize your model training code # In the next 5 steps, you will proceed with **2. Use your own custom container image**. # # You will build your custom model container on top of a [Google Cloud Deep Learning container](https://cloud.google.com/vertex-ai/docs/general/deep-learning) that contains tested and optimized versions of model code dependencies such as `tensorflow` and the `google-cloud-bigquery` SDK. This also gives you flexibility and enables to manage and share your model container image with others for reuse and reproducibility across environments while also enabling you to incorporate additional packages for your ML application. Lastly, by packaging your ML model code together with dependencies you also have a MLOps onboarding path to Vertex Pipelines. # # You will walk through creating the following project structure for your ML mode code: # # ``` # |--/online-retail-clv-3M # |--/trainer # |--__init__.py # |--model.py # |--task.py # |--Dockerfile # |--cloudbuild.yaml # |--requirements.txt # ``` # #### 1. Write a `model.py` training script # First, you will take tidy up your local TensorFlow model training code from above into a training script. # # The biggest change is you will utilize the [TensorFlow IO](https://www.tensorflow.org/io/tutorials/bigquery) library to performantly read from BigQuery directly into your TensorFlow model graph during training. This will improve your training performance rather than performing the intermediate step of reading from BigQuery into a Pandas Dataframe done for expediency above. # this is the name of your model subdirectory you will write your model code to. It is already created in your lab directory. MODEL_NAME="online-retail-clv-3M" # + # %%writefile {MODEL_NAME}/trainer/model.py import os import logging import tempfile import tensorflow as tf from explainable_ai_sdk.metadata.tf.v2 import SavedModelMetadataBuilder from tensorflow.python.framework import dtypes from tensorflow_io.bigquery import BigQueryClient from tensorflow_io.bigquery import BigQueryReadSession # Model feature constants. NUMERIC_FEATURES = [ "n_purchases", "avg_purchase_size", "avg_purchase_revenue", "customer_age", "days_since_last_purchase", ] CATEGORICAL_FEATURES = [ "customer_country" ] LABEL = "target_monetary_value_3M" def caip_uri_to_fields(uri): """Helper function to parse BQ URI.""" # Remove bq:// prefix. uri = uri[5:] project, dataset, table = uri.split('.') return project, dataset, table def features_and_labels(row_data): """Helper feature and label mapping function for tf.data.""" label = row_data.pop(LABEL) features = row_data return features, label def read_bigquery(project, dataset, table): """TensorFlow IO BigQuery Reader.""" tensorflow_io_bigquery_client = BigQueryClient() read_session = tensorflow_io_bigquery_client.read_session( parent="projects/" + project, project_id=project, dataset_id=dataset, table_id=table, # Pass list of features and label to be selected from BQ. selected_fields=NUMERIC_FEATURES + [LABEL], # Provide output TensorFlow data types for features and label. output_types=[dtypes.int64, dtypes.float64, dtypes.float64, dtypes.int64, dtypes.int64] + [dtypes.float64], requested_streams=2) dataset = read_session.parallel_read_rows() transformed_ds = dataset.map(features_and_labels) return transformed_ds def rmse(y_true, y_pred): """Custom RMSE regression metric.""" return tf.sqrt(tf.reduce_mean(tf.square(y_pred - y_true))) def build_model(hparams): """Build and compile a TensorFlow Keras DNN Regressor.""" feature_columns = [ tf.feature_column.numeric_column(key=feature) for feature in NUMERIC_FEATURES ] input_layers = { feature.key: tf.keras.layers.Input(name=feature.key, shape=(), dtype=tf.float32) for feature in feature_columns } # Keras Functional API: https://keras.io/guides/functional_api inputs = tf.keras.layers.DenseFeatures(feature_columns, name='inputs')(input_layers) d1 = tf.keras.layers.Dense(256, activation=tf.nn.relu, name='d1')(inputs) d2 = tf.keras.layers.Dropout(hparams['dropout'], name='d2')(d1) # Note: a single neuron scalar output for regression. output = tf.keras.layers.Dense(1, name='output')(d2) model = tf.keras.Model(input_layers, output, name='online-retail-clv') optimizer = tf.keras.optimizers.Adam(hparams['learning-rate']) # Note: MAE loss is more resistant to outliers than MSE. model.compile(loss=tf.keras.losses.MAE, optimizer=optimizer, metrics=[['mae', 'mse', rmse]]) return model def train_evaluate_explain_model(hparams): """Train, evaluate, explain TensorFlow Keras DNN Regressor. Args: hparams(dict): A dictionary containing model training arguments. Returns: history(tf.keras.callbacks.History): Keras callback that records training event history. """ training_ds = read_bigquery(*caip_uri_to_fields(hparams['training-data-uri'])).prefetch(1).shuffle(hparams['batch-size']*10).batch(hparams['batch-size']).repeat() eval_ds = read_bigquery(*caip_uri_to_fields(hparams['validation-data-uri'])).prefetch(1).shuffle(hparams['batch-size']*10).batch(hparams['batch-size']) test_ds = read_bigquery(*caip_uri_to_fields(hparams['test-data-uri'])).prefetch(1).shuffle(hparams['batch-size']*10).batch(hparams['batch-size']) model = build_model(hparams) logging.info(model.summary()) tensorboard_callback = tf.keras.callbacks.TensorBoard( log_dir=hparams['tensorboard-dir'], histogram_freq=1) # Reduce overfitting and shorten training times. earlystopping_callback = tf.keras.callbacks.EarlyStopping(patience=2) # Ensure your training job's resilience to VM restarts. checkpoint_callback = tf.keras.callbacks.ModelCheckpoint( filepath= hparams['checkpoint-dir'], save_weights_only=True, monitor='val_loss', mode='min') # Virtual epochs design pattern: # https://medium.com/google-cloud/ml-design-pattern-3-virtual-epochs-f842296de730 TOTAL_TRAIN_EXAMPLES = int(hparams['stop-point'] * hparams['n-train-examples']) STEPS_PER_EPOCH = (TOTAL_TRAIN_EXAMPLES // (hparams['batch-size']*hparams['n-checkpoints'])) history = model.fit(training_ds, validation_data=eval_ds, steps_per_epoch=STEPS_PER_EPOCH, epochs=hparams['n-checkpoints'], callbacks=[[tensorboard_callback, earlystopping_callback, checkpoint_callback]]) logging.info(model.evaluate(test_ds)) # Create a temp directory to save intermediate TF SavedModel prior to Explainable metadata creation. tmpdir = tempfile.mkdtemp() # Export Keras model in TensorFlow SavedModel format. model.save(tmpdir) # Annotate and save TensorFlow SavedModel with Explainable metadata to GCS. builder = SavedModelMetadataBuilder(tmpdir) builder.save_model_with_metadata(hparams['model-dir']) return history # - # #### 2. Write a `task.py` file as an entrypoint to your custom ML model container # + # %%writefile {MODEL_NAME}/trainer/task.py import os import argparse from trainer import model if __name__ == '__main__': parser = argparse.ArgumentParser() # Vertex custom container training args. These are set by Vertex AI during training but can also be overwritten. parser.add_argument('--model-dir', dest='model-dir', default=os.environ['AIP_MODEL_DIR'], type=str, help='Model dir.') parser.add_argument('--checkpoint-dir', dest='checkpoint-dir', default=os.environ['AIP_CHECKPOINT_DIR'], type=str, help='Checkpoint dir set during Vertex AI training.') parser.add_argument('--tensorboard-dir', dest='tensorboard-dir', default=os.environ['AIP_TENSORBOARD_LOG_DIR'], type=str, help='Tensorboard dir set during Vertex AI training.') parser.add_argument('--data-format', dest='data-format', default=os.environ['AIP_DATA_FORMAT'], type=str, help="Tabular data format set during Vertex AI training. E.g.'csv', 'bigquery'") parser.add_argument('--training-data-uri', dest='training-data-uri', default=os.environ['AIP_TRAINING_DATA_URI'], type=str, help='Training data GCS or BQ URI set during Vertex AI training.') parser.add_argument('--validation-data-uri', dest='validation-data-uri', default=os.environ['AIP_VALIDATION_DATA_URI'], type=str, help='Validation data GCS or BQ URI set during Vertex AI training.') parser.add_argument('--test-data-uri', dest='test-data-uri', default=os.environ['AIP_TEST_DATA_URI'], type=str, help='Test data GCS or BQ URI set during Vertex AI training.') # Model training args. parser.add_argument('--learning-rate', dest='learning-rate', default=0.001, type=float, help='Learning rate for optimizer.') parser.add_argument('--dropout', dest='dropout', default=0.2, type=float, help='Float percentage of DNN nodes [0,1] to drop for regularization.') parser.add_argument('--batch-size', dest='batch-size', default=16, type=int, help='Number of examples during each training iteration.') parser.add_argument('--n-train-examples', dest='n-train-examples', default=2638, type=int, help='Number of examples to train on.') parser.add_argument('--stop-point', dest='stop-point', default=10, type=int, help='Number of passes through the dataset during training to achieve convergence.') parser.add_argument('--n-checkpoints', dest='n-checkpoints', default=10, type=int, help='Number of model checkpoints to save during training.') args = parser.parse_args() hparams = args.__dict__ model.train_evaluate_explain_model(hparams) # - # #### 3. Write a `Dockerfile` for your custom ML model container # Third, you will write a `Dockerfile` that contains your model code as well as specifies your model code's dependencies. # # Notice the base image below is a [Google Cloud Deep Learning container](https://cloud.google.com/vertex-ai/docs/general/deep-learning) that contains tested and optimized versions of model code dependencies such as `tensorflow` and the `google-cloud-bigquery` SDK. # + # %%writefile {MODEL_NAME}/Dockerfile # Specifies base image and tag. # https://cloud.google.com/vertex-ai/docs/general/deep-learning # https://cloud.google.com/deep-learning-containers/docs/choosing-container FROM gcr.io/deeplearning-platform-release/tf2-cpu.2-3 # Sets the container working directory. WORKDIR /root # Copies the requirements.txt into the container to reduce network calls. COPY requirements.txt . # Installs additional packages. RUN pip3 install -U -r requirements.txt # Copies the trainer code to the docker image. COPY . /trainer # Sets the container working directory. WORKDIR /trainer # Sets up the entry point to invoke the trainer. ENTRYPOINT ["python", "-m", "trainer.task"] # - # ### 4. Write a `requirements.txt` file to specify additional ML code dependencies # These are additional dependencies for your model code outside the deep learning containers needed for prediction explainability and the BigQuery TensorFlow IO reader. # %%writefile {MODEL_NAME}/requirements.txt explainable-ai-sdk==1.3.0 tensorflow-io==0.15.0 pyarrow # #### 5. Use Cloud Build to build and submit your container to Google Cloud Artifact Registry # Next, you will use [Cloud Build](https://cloud.google.com/build) to build and upload your custom TensorFlow model container to [Google Cloud Artifact Registry](https://cloud.google.com/artifact-registry). # # Cloud Build brings reusability and automation to your ML experimentation by enabling you to reliably build, test, and deploy your ML model code as part of a CI/CD workflow. Artifact Registry provides a centralized repository for you to store, manage, and secure your ML container images. This will allow you to securely share your ML work with others and reproduce experiment results. # # **Note**: the initial build and submit step will take about 20 minutes but Cloud Build is able to take advantage of caching for subsequent builds. # #### Create Artifact Repository for custom container images ARTIFACT_REPOSITORY="online-retail-clv" # Create an Artifact Repository using the gcloud CLI. # !gcloud artifacts repositories create $ARTIFACT_REPOSITORY \ # --repository-format=docker \ # --location=$REGION \ # --description="Artifact registry for ML custom training images for predictive CLV" # #### Create `cloudbuild.yaml` instructions IMAGE_NAME="dnn-regressor" IMAGE_TAG="latest" IMAGE_URI=f"{REGION}-docker.pkg.dev/{PROJECT_ID}/{ARTIFACT_REPOSITORY}/{IMAGE_NAME}:{IMAGE_TAG}" # + cloudbuild_yaml = f"""steps: - name: 'gcr.io/cloud-builders/docker' args: [ 'build', '-t', '{IMAGE_URI}', '.' ] images: - '{IMAGE_URI}'""" with open(f"{MODEL_NAME}/cloudbuild.yaml", "w") as fp: fp.write(cloudbuild_yaml) # - # #### Build and submit your container image to your Artifact Repository # !gcloud builds submit --timeout=20m --config {MODEL_NAME}/cloudbuild.yaml {MODEL_NAME} # Now that your custom container is built and stored in your Artifact Registry, its time to train our model in the cloud with Vertex AI. # ## Run a custom training job on Vertex AI # ### 1. Create a Vertex Tensorboard instance for tracking your model experiments # [**Vertex TensorBoard**](https://cloud.google.com/vertex-ai/docs/experiments) is Google Cloud's managed version of open-source [**TensorBoard**](https://www.tensorflow.org/tensorboard) for ML experimental visualization. With Vertex TensorBoard you can track, visualize, and compare ML experiments and share them with your team. In addition to the powerful visualizations from open source TensorBoard, Vertex TensorBoard provides: # # * A persistent, shareable link to your experiment's dashboard. # * A searchable list of all experiments in a project. # * Integrations with Vertex AI services for model training evaluation. # !gcloud beta ai tensorboards create \ # --display-name=$MODEL_NAME --region=$REGION TENSORBOARD_RESOURCE_NAME= !(gcloud beta ai tensorboards list --region=$REGION --format="value(name)") TENSORBOARD_RESOURCE_NAME= TENSORBOARD_RESOURCE_NAME[1] TENSORBOARD_RESOURCE_NAME # ### 2. Run your custom container training job # Use the `CustomTrainingJob` class to define the job, which takes the following parameters specific to custom container training: # # * `display_name`: You user-defined name of this training pipeline. # * `container_uri`: The URI of your custom training container image. # * `model_serving_container_image_uri`: The URI of a container that can serve predictions for your model. You will use a Vertex prebuilt container. # # Use the `run()` function to start training, which takes the following parameters: # # * `replica_count`: The number of worker replicas. # * `model_display_name`: The display name of the Model if the script produces a managed Model. # * `machine_type`: The type of machine to use for training. # * `bigquery_destination`: The BigQuery URI where your created Tabular dataset gets written to. # * `predefined_split_column_name`: Since this lab leveraged BigQuery for data processing and splitting, this column is specified to indicate data splits. # # The run function creates a training pipeline that trains and creates a Vertex `Model` object. After the training pipeline completes, the `run()` function returns the `Model` object. # # Note: this `CustomContainerTrainingJob` will take about 20 minutes to provision resources and train your model. # command line args for trainer.task defined above. Review the 'help' argument for a description. # You will set the model training args below. Vertex AI will set the environment variables for training URIs. CMD_ARGS= [ "--learning-rate=" + str(0.001), "--batch-size=" + str(16), "--n-train-examples=" + str(2638), "--stop-point=" + str(10), "--n-checkpoints=" + str(10), "--dropout=" + str(0.2), ] # By setting BASE_OUTPUT_DIR, Vertex AI will set the environment variables AIP_MODEL_DIR, AIP_CHECKPOINT_DIR, AIP_TENSORBOARD_LOG_DIR # during training for your ML training code to write to. TIMESTAMP=datetime.datetime.now().strftime('%Y%m%d%H%M%S') BASE_OUTPUT_DIR= f"gs://{GCS_BUCKET}/vertex-custom-training-{MODEL_NAME}-{TIMESTAMP}" # + job = aiplatform.CustomContainerTrainingJob( display_name="online-retail-clv-3M-dnn-regressor", container_uri=IMAGE_URI, # https://cloud.google.com/vertex-ai/docs/predictions/pre-built-containers # gcr.io/cloud-aiplatform/prediction/tf2-cpu.2-3:latest model_serving_container_image_uri="us-docker.pkg.dev/vertex-ai/prediction/tf2-cpu.2-3:latest", ) model = job.run( dataset=tabular_dataset, model_display_name=MODEL_NAME, # GCS custom job output dir. base_output_dir=BASE_OUTPUT_DIR, # the BQ Tabular dataset splits will be written out to their own BQ dataset for reproducibility. bigquery_destination=f"bq://{PROJECT_ID}", # this corresponds to the BigQuery data split column. predefined_split_column_name="data_split", # the model training command line arguments defined in trainer.task. args=CMD_ARGS, # Custom job WorkerPool arguments. replica_count=1, machine_type="c2-standard-4", # Provide your Tensorboard resource name to write Tensorboard logs during training. tensorboard=TENSORBOARD_RESOURCE_NAME, # Provide your Vertex custom training service account created during lab setup. service_account=f"<EMAIL>" ) # - # ### 3. Inspect model training performance with Vertex TensorBoard # You can view your model's logs on the Vertex AI [**Experiments tab**](https://console.cloud.google.com/vertex-ai/experiments) in the Cloud Console. Click the **Open Tensorboard** link. You will be asked to authenticate with your Qwiklabs Google account before a Vertex Tensorboard page opens in a browser tab. Once your model begins training, you will see your training evaluation metrics written to this dashboard that you can inspect during the training run as well as after the job completes. # # Note: Tensorboard provides a valuable debugging tool for inspecting your model's performance both during and after model training. This lab's model trains in less than a minute and sometimes completes before the logs finish appearing in Tensorboard. If that's the case, refresh the window when the training job completes to see your model's performance evaluation. # ## Serve your model with Vertex AI Prediction: online model predictions and explanations # You have a trained model in GCS now, lets transition to serving our model with Vertex AI Prediction for online model predictions and explanations. # ### 1. Build the Explanation Metadata and Parameters # [**Vertex Explainable AI**](https://cloud.google.com/vertex-ai/docs/explainable-ai) integrates feature attributions into Vertex AI. Vertex Explainable AI helps you understand your model's outputs for classification and regression tasks. Vertex AI tells you how much each feature in the data contributed to the predicted result. You can then use this information to verify that the model is behaving as expected, identify and mitigate biases in your models, and get ideas for ways to improve your model and your training data. # # You will retrieve these feature attributions to gain insight into your model's CLV predictions. # **IMPORTANT: (07/21/2021)** unfortunately there is a bug (b/193143812) with Explainable AI and Vertex AI Endpoints. Sigh, these tend to crawl out during demos and launches :) A fix is on its way 7/26/2021! The code and output is below commented out for your reading. We appreciate your patience and hope you enjoy the rest of the lab in the meantime. # + # DEPLOYED_MODEL_DIR = os.path.join(BASE_OUTPUT_DIR, 'model') # + # loaded = tf.keras.models.load_model(DEPLOYED_MODEL_DIR) # + # serving_input = list( # loaded.signatures["serving_default"].structured_input_signature[1].keys())[0] # serving_output = list(loaded.signatures["serving_default"].structured_outputs.keys())[0] # feature_names = [ # "n_purchases", # "avg_purchase_size", # "avg_purchase_revenue", # "customer_age", # "days_since_last_purchase" # ] # + # Specify sampled Shapley feature attribution method with path_count parameter # controlling the number of feature permutations to consider when approximating the Shapley values. # explain_params = aiplatform.explain.ExplanationParameters( # {"sampled_shapley_attribution": {"path_count": 10}} # ) # + # https://cloud.google.com/vertex-ai/docs/reference/rest/v1beta1/ExplanationSpec # input_metadata = { # "input_tensor_name": serving_input, # "encoding": "BAG_OF_FEATURES", # "modality": "numeric", # "index_feature_mapping": feature_names, # } # output_metadata = {"output_tensor_name": serving_output} # input_metadata = aiplatform.explain.ExplanationMetadata.InputMetadata(input_metadata) # output_metadata = aiplatform.explain.ExplanationMetadata.OutputMetadata(output_metadata) # explain_metadata = aiplatform.explain.ExplanationMetadata( # inputs={"features": input_metadata}, outputs={"medv": output_metadata} # ) # - # ## Deploy a Vertex `Endpoint` for online predictions # Before you use your model to make predictions, you need to deploy it to an `Endpoint` object. When you deploy a model to an `Endpoint`, you associate physical (machine) resources with that model to enable it to serve online predictions. Online predictions have low latency requirements; providing resources to the model in advance reduces latency. You can do this by calling the deploy function on the `Model` resource. This will do two things: # # 1. Create an `Endpoint` resource for deploying the `Model` resource to. # 2. Deploy the `Model` resource to the `Endpoint` resource. # # The `deploy()` function takes the following parameters: # # * `deployed_model_display_name`: A human readable name for the deployed model. # * `traffic_split`: Percent of traffic at the endpoint that goes to this model, which is specified as a dictionary of one or more key/value pairs. If only one model, then specify as { "0": 100 }, where "0" refers to this model being uploaded and 100 means 100% of the traffic. # * `machine_type`: The type of machine to use for training. # * `accelerator_type`: The hardware accelerator type. # * `accelerator_count`: The number of accelerators to attach to a worker replica. # * `starting_replica_count`: The number of compute instances to initially provision. # * `max_replica_count`: The maximum number of compute instances to scale to. In this lab, only one instance is provisioned. # * `explanation_parameters`: Metadata to configure the Explainable AI learning method. # * `explanation_metadata`: Metadata that describes your TensorFlow model for Explainable AI such as features, input and output tensors. # # Note: this can take about 5 minutes to provision prediction resources for your model. endpoint = model.deploy( traffic_split={"0": 100}, machine_type="n1-standard-2", # explanation_parameters=explain_params, # explanation_metadata=explain_metadata ) # ## Get an online prediction and explanation from deployed model # Finally, you will use your `Endpoint` to retrieve predictions and feature attributions. This is a customer instance retrieved from the test set. # actual: 3181.04 test_instance_dict = { "n_purchases": 2, "avg_purchase_size": 536.5, "avg_purchase_revenue": 1132.7, "customer_age": 123, "days_since_last_purchase": 32, } # To request predictions, you call the `predict()` method. endpoint.predict([test_instance_dict]) # To retrieve explanations (predictions + feature attributions), call the `explain()` method. # + # explanations = endpoint.explain([test_instance_dict]) # + # pd.DataFrame.from_dict(explanations.explanations[0].attributions[0].feature_attributions, orient='index').plot(kind='barh'); # - # Based on the feature attributions for this prediction, your model has learned that average purchase revenue and customer age had the largest marginal contribution in predicting this customer's monetary value over the 3-month test period. It also identified the relatively lengthy days since last purchase as negatively impacting the prediction. Using these insights, you can plan for an experiment to evaluate targeted marketing interventions for this repeat customer, such as volume discounts, to encourage this customer to purchase more frequently in order to drive additional revenue. # ## Next steps # Congratulations! In this lab, you walked through a machine learning experimentation workflow using Google Cloud's BigQuery for data storage and analysis and Vertex AI machine learning services to train and deploy a TensorFlow model to predict customer lifetime value. You progressed from training a TensorFlow model locally to training on the cloud with Vertex AI and leveraged several new unified platform capabilities such as Vertex TensorBoard and Explainable AI prediction feature attributions. # ## License # + # Copyright 2021 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License.
self-paced-labs/vertex-ai/vertex-ai-qwikstart/lab_exercise.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # 05 - Continuous Training # # After testing, compiling, and uploading the pipeline definition to Cloud Storage, the pipeline is executed with respect to a trigger. We use [Cloud Functions](https://cloud.google.com/functions) and [Cloud Pub/Sub](https://cloud.google.com/pubsub) as a triggering mechanism. The triggering can be scheduled using [Cloud Schedular](https://cloud.google.com/scheduler). The trigger source sends a message to a Cloud Pub/Sub topic that the Cloud Function listens to, and then it submits the pipeline to AI Platform Managed Pipelines to be executed. # # This notebook covers the following steps: # 1. Create the Cloud Pub/Sub topic. # 2. Deploy the Cloud Function # 3. Test triggering a pipeline. # 4. Extracting pipeline run metadata. # ## Setup # ### Import libraries # + import json import os import logging import tensorflow as tf import tfx import IPython logging.getLogger().setLevel(logging.INFO) print("Tensorflow Version:", tfx.__version__) # - # ### Setup Google Cloud project # + PROJECT = '[your-project-id]' # Change to your project id. REGION = 'us-central1' # Change to your region. BUCKET = 'ksalama-cloudml-us'#'[your-bucket-name]' # Change to your bucket name. if PROJECT == "" or PROJECT is None or PROJECT == "[your-project-id]": # Get your GCP project id from gcloud # shell_output = !gcloud config list --format 'value(core.project)' 2>/dev/null PROJECT = shell_output[0] if BUCKET == "" or BUCKET is None or BUCKET == "[your-bucket-name]": # Get your bucket name to GCP projet id BUCKET = PROJECT print("Project ID:", PROJECT) print("Region:", REGION) print("Bucket name:", BUCKET) # - # ### Set configurations # + VERSION = 'v01' DATASET_DISPLAY_NAME = 'chicago-taxi-tips' MODEL_DISPLAY_NAME = f'{DATASET_DISPLAY_NAME}-classifier-{VERSION}' PIPELINE_NAME = f'{MODEL_DISPLAY_NAME}-train-pipeline' PIPELINES_STORE = f'gs://{BUCKET}/{DATASET_DISPLAY_NAME}/compiled_pipelines/' GCS_PIPELINE_FILE_LOCATION = os.path.join(PIPELINES_STORE, f'{PIPELINE_NAME}.json') PUBSUB_TOPIC = f'trigger-{PIPELINE_NAME}' CLOUD_FUNCTION_NAME = f'trigger-{PIPELINE_NAME}-fn' # - # !gsutil ls {GCS_PIPELINE_FILE_LOCATION} # ## 1. Create a Pub/Sub topic # !gcloud pubsub topics create {PUBSUB_TOPIC} # ## 2. Deploy the Cloud Function # + ENV_VARS=f"""\ PROJECT={PROJECT},\ REGION={REGION},\ GCS_PIPELINE_FILE_LOCATION={GCS_PIPELINE_FILE_LOCATION} """ # !echo {ENV_VARS} # - # !rm -r src/pipeline_triggering/.ipynb_checkpoints # !gcloud functions deploy {CLOUD_FUNCTION_NAME} \ # --region={REGION} \ # --trigger-topic={PUBSUB_TOPIC} \ # --runtime=python37 \ # --source=src/pipeline_triggering\ # --entry-point=trigger_pipeline\ # --stage-bucket={BUCKET}\ # --update-env-vars={ENV_VARS} cloud_fn_url = f"https://console.cloud.google.com/functions/details/{REGION}/{CLOUD_FUNCTION_NAME}" html = f'See the Cloud Function details <a href="{cloud_fn_url}" target="_blank">here</a>.' IPython.display.display(IPython.display.HTML(html)) # ## 3. Trigger the pipeline # + from google.cloud import pubsub publish_client = pubsub.PublisherClient() topic = f'projects/{PROJECT}/topics/{PUBSUB_TOPIC}' data = { 'num_epochs': 7, 'learning_rate': 0.0015, 'batch_size': 512, 'hidden_units': '256,126' } message = json.dumps(data) _ = publish_client.publish(topic, message.encode()) # - # Wait for a few seconds for the pipeline run to be submitted, then you can see the run in the Cloud Console # + from kfp.v2.google.client import AIPlatformClient pipeline_client = AIPlatformClient( project_id=PROJECT, region=REGION) job_display_name = pipeline_client.list_jobs()['pipelineJobs'][0]['displayName'] job_url = f"https://console.cloud.google.com/vertex-ai/locations/{REGION}/pipelines/runs/{job_display_name}" html = f'See the Pipeline job <a href="{job_url}" target="_blank">here</a>.' IPython.display.display(IPython.display.HTML(html)) # - # ## 4. Extracting pipeline runs metadata # + from google.cloud import aiplatform as vertex_ai pipeline_df = vertex_ai.get_pipeline_df(PIPELINE_NAME) pipeline_df = pipeline_df[pipeline_df.pipeline_name == PIPELINE_NAME] pipeline_df.T # -
05-continuous-training.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Deep $Q$-learning # # In this notebook, we'll build a neural network that can learn to play games through reinforcement learning. More specifically, we'll use $Q$-learning to train an agent to play a game called [Cart-Pole](https://gym.openai.com/envs/CartPole-v0). In this game, a freely swinging pole is attached to a cart. The cart can move to the left and right, and the goal is to keep the pole upright as long as possible. # # ![Cart-Pole](assets/cart-pole.jpg) # # We can simulate this game using [OpenAI Gym](https://github.com/openai/gym). First, let's check out how OpenAI Gym works. Then, we'll get into training an agent to play the Cart-Pole game. # + import gym import numpy as np # Create the Cart-Pole game environment env = gym.make('CartPole-v1') # Number of possible actions print('Number of possible actions:', env.action_space.n) # - # We interact with the simulation through `env`. You can see how many actions are possible from `env.action_space.n`, and to get a random action you can use `env.action_space.sample()`. Passing in an action as an integer to `env.step` will generate the next step in the simulation. This is general to all Gym games. # # In the Cart-Pole game, there are two possible actions, moving the cart left or right. So there are two actions we can take, encoded as 0 and 1. # # Run the code below to interact with the environment. # + actions = [] # actions that the agent selects rewards = [] # obtained rewards states = [] state = env.reset() while True: action = env.action_space.sample() # choose a random action state, reward, done, _ = env.step(action) rewards.append(reward) actions.append(action) states.append(state) if done: break # - # We can look at the actions and rewards: print('Actions:', actions) print('Rewards:', rewards) from pprint import pprint pprint(states) # The game resets after the pole has fallen past a certain angle. For each step while the game is running, it returns a reward of 1.0. The longer the game runs, the more reward we get. Then, our network's goal is to maximize the reward by keeping the pole vertical. It will do this by moving the cart to the left and the right. # # ## $Q$-Network # # To keep track of the action values, we'll use a neural network that accepts a state $s$ as input. The output will be $Q$-values for each available action $a$ (i.e., the output is **all** action values $Q(s,a)$ _corresponding to the input state $s$_). # # <img src="assets/q-network.png" width=550px> # # For this Cart-Pole game, the state has four values: the position and velocity of the cart, and the position and velocity of the pole. Thus, the neural network has **four inputs**, one for each value in the state, and **two outputs**, one for each possible action. # # As explored in the lesson, to get the training target, we'll first use the context provided by the state $s$ to choose an action $a$, then simulate the game using that action. This will get us the next state, $s'$, and the reward $r$. With that, we can calculate $\hat{Q}(s,a) = r + \gamma \max_{a'}{Q(s', a')}$. Then we update the weights by minimizing $(\hat{Q}(s,a) - Q(s,a))^2$. # # Below is one implementation of the $Q$-network. It uses two fully connected layers with ReLU activations. Two seems to be good enough, three might be better. Feel free to try it out. # + import tensorflow as tf class QNetwork: def __init__(self, learning_rate=0.01, state_size=4, action_size=2, hidden_size=10, name='QNetwork'): # state inputs to the Q-network with tf.variable_scope(name): self.inputs_ = tf.placeholder(tf.float32, [None, state_size], name='inputs') # One hot encode the actions to later choose the Q-value for the action self.actions_ = tf.placeholder(tf.int32, [None], name='actions') one_hot_actions = tf.one_hot(self.actions_, action_size) # Target Q values for training self.targetQs_ = tf.placeholder(tf.float32, [None], name='target') # ReLU hidden layers self.fc1 = tf.contrib.layers.fully_connected(self.inputs_, hidden_size) self.fc2 = tf.contrib.layers.fully_connected(self.fc1, hidden_size) # Linear output layer self.output = tf.contrib.layers.fully_connected(self.fc2, action_size, activation_fn=None) ### Train with loss (targetQ - Q)^2 # output has length 2, for two actions. This next line chooses # one value from output (per row) according to the one-hot encoded actions. self.Q = tf.reduce_sum(tf.multiply(self.output, one_hot_actions), axis=1) self.loss = tf.reduce_mean(tf.square(self.targetQs_ - self.Q)) self.opt = tf.train.AdamOptimizer(learning_rate).minimize(self.loss) # - # ## Experience replay # # Reinforcement learning algorithms can have stability issues due to correlations between states. To reduce correlations when training, we can store the agent's experiences and later draw a random mini-batch of those experiences to train on. # # Here, we'll create a `Memory` object that will store our experiences, our transitions $<s, a, r, s'>$. This memory will have a maximum capacity, so we can keep newer experiences in memory while getting rid of older experiences. Then, we'll sample a random mini-batch of transitions $<s, a, r, s'>$ and train on those. # # Below, I've implemented a `Memory` object. If you're unfamiliar with `deque`, this is a double-ended queue. You can think of it like a tube open on both sides. You can put objects in either side of the tube. But if it's full, adding anything more will push an object out the other side. This is a great data structure to use for the memory buffer. # + from collections import deque class Memory(): def __init__(self, max_size=1000): self.buffer = deque(maxlen=max_size) def add(self, experience): self.buffer.append(experience) def sample(self, batch_size): idx = np.random.choice(np.arange(len(self.buffer)), size=batch_size, replace=False) return [self.buffer[ii] for ii in idx] # - # ## $Q$-Learning training algorithm # # We will use the below algorithm to train the network. For this game, the goal is to keep the pole upright for 195 frames. So we can start a new episode once meeting that goal. The game ends if the pole tilts over too far, or if the cart moves too far the left or right. When a game ends, we'll start a new episode. Now, to train the agent: # # * Initialize the memory $D$ # * Initialize the action-value network $Q$ with random weights # * **For** episode $\leftarrow 1$ **to** $M$ **do** # * Observe $s_0$ # * **For** $t \leftarrow 0$ **to** $T-1$ **do** # * With probability $\epsilon$ select a random action $a_t$, otherwise select $a_t = \mathrm{argmax}_a Q(s_t,a)$ # * Execute action $a_t$ in simulator and observe reward $r_{t+1}$ and new state $s_{t+1}$ # * Store transition $<s_t, a_t, r_{t+1}, s_{t+1}>$ in memory $D$ # * Sample random mini-batch from $D$: $<s_j, a_j, r_j, s'_j>$ # * Set $\hat{Q}_j = r_j$ if the episode ends at $j+1$, otherwise set $\hat{Q}_j = r_j + \gamma \max_{a'}{Q(s'_j, a')}$ # * Make a gradient descent step with loss $(\hat{Q}_j - Q(s_j, a_j))^2$ # * **endfor** # * **endfor** # # You are welcome (and encouraged!) to take the time to extend this code to implement some of the improvements that we discussed in the lesson, to include fixed $Q$ targets, double DQNs, prioritized replay, and/or dueling networks. # # ## Hyperparameters # # One of the more difficult aspects of reinforcement learning is the large number of hyperparameters. Not only are we tuning the network, but we're tuning the simulation. # + train_episodes = 1000 # max number of episodes to learn from max_steps = 200 # max steps in an episode gamma = 0.99 # future reward discount # Exploration parameters explore_start = 1.0 # exploration probability at start explore_stop = 0.01 # minimum exploration probability decay_rate = 0.0001 # exponential decay rate for exploration prob # Network parameters hidden_size = 64 # number of units in each Q-network hidden layer learning_rate = 0.0001 # Q-network learning rate # Memory parameters memory_size = 10000 # memory capacity batch_size = 20 # experience mini-batch size pretrain_length = batch_size # number experiences to pretrain the memory # - tf.reset_default_graph() mainQN = QNetwork(name='main', hidden_size=hidden_size, learning_rate=learning_rate) # ## Populate the experience memory # # Here we re-initialize the simulation and pre-populate the memory. The agent is taking random actions and storing the transitions in memory. This will help the agent with exploring the game. # + # Initialize the simulation env.reset() # Take one random step to get the pole and cart moving state, reward, done, _ = env.step(env.action_space.sample()) memory = Memory(max_size=memory_size) # Make a bunch of random actions and store the experiences for ii in range(pretrain_length): # Make a random action action = env.action_space.sample() next_state, reward, done, _ = env.step(action) if done: # The simulation fails so no next state next_state = np.zeros(state.shape) # Add experience to memory memory.add((state, action, reward, next_state)) # Start new episode env.reset() # Take one random step to get the pole and cart moving state, reward, done, _ = env.step(env.action_space.sample()) else: # Add experience to memory memory.add((state, action, reward, next_state)) state = next_state # - # ## Training # # Below we'll train our agent. # Now train with experiences saver = tf.train.Saver() rewards_list = [] with tf.Session() as sess: # Initialize variables sess.run(tf.global_variables_initializer()) step = 0 for ep in range(1, train_episodes): total_reward = 0 t = 0 while t < max_steps: step += 1 # Uncomment this next line to watch the training # env.render() # Explore or Exploit explore_p = explore_stop + (explore_start - explore_stop)*np.exp(-decay_rate*step) if explore_p > np.random.rand(): # Make a random action action = env.action_space.sample() else: # Get action from Q-network feed = {mainQN.inputs_: state.reshape((1, *state.shape))} Qs = sess.run(mainQN.output, feed_dict=feed) action = np.argmax(Qs) # Take action, get new state and reward next_state, reward, done, _ = env.step(action) total_reward += reward if done: # the episode ends so no next state next_state = np.zeros(state.shape) t = max_steps print('Episode: {}'.format(ep), 'Total reward: {}'.format(total_reward), 'Training loss: {:.4f}'.format(loss), 'Explore P: {:.4f}'.format(explore_p)) rewards_list.append((ep, total_reward)) # Add experience to memory memory.add((state, action, reward, next_state)) # Start new episode env.reset() # Take one random step to get the pole and cart moving state, reward, done, _ = env.step(env.action_space.sample()) else: # Add experience to memory memory.add((state, action, reward, next_state)) state = next_state t += 1 # Sample mini-batch from memory batch = memory.sample(batch_size) states = np.array([each[0] for each in batch]) actions = np.array([each[1] for each in batch]) rewards = np.array([each[2] for each in batch]) next_states = np.array([each[3] for each in batch]) # Train network target_Qs = sess.run(mainQN.output, feed_dict={mainQN.inputs_: next_states}) # Set target_Qs to 0 for states where episode ends episode_ends = (next_states == np.zeros(states[0].shape)).all(axis=1) target_Qs[episode_ends] = (0, 0) targets = rewards + gamma * np.max(target_Qs, axis=1) loss, _ = sess.run([mainQN.loss, mainQN.opt], feed_dict={mainQN.inputs_: states, mainQN.targetQs_: targets, mainQN.actions_: actions}) saver.save(sess, "checkpoints/cartpole.ckpt") # ## Visualizing training # # Below we plot the total rewards for each episode. The rolling average is plotted in blue. # + # %matplotlib inline import matplotlib.pyplot as plt def running_mean(x, N): cumsum = np.cumsum(np.insert(x, 0, 0)) return (cumsum[N:] - cumsum[:-N]) / N # - eps, rews = np.array(rewards_list).T smoothed_rews = running_mean(rews, 10) plt.plot(eps[-len(smoothed_rews):], smoothed_rews) plt.plot(eps, rews, color='grey', alpha=0.3) plt.xlabel('Episode') plt.ylabel('Total Reward') # ## Playing Atari Games # # So, Cart-Pole is a pretty simple game. However, the same model can be used to train an agent to play something much more complicated like Pong or Space Invaders. Instead of a state like we're using here though, you'd want to use convolutional layers to get the state from the screen images. # # ![Deep Q-Learning Atari](assets/atari-network.png) # # I'll leave it as a challenge for you to use deep Q-learning to train an agent to play Atari games. Here's the original paper which will get you started: http://www.davidqiu.com:8888/research/nature14236.pdf.
reinforcement/deepQ/Q-learning-cart.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] colab_type="text" id="view-in-github" # <a href="https://colab.research.google.com/github/Tessellate-Imaging/monk_v1/blob/master/study_roadmaps/1_getting_started_roadmap/4_quick_prototyping_mode/2)%20Intro%20to%20quick%20prototyping%20using%20Pytorch%20backend.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # - # # Goals # # # ### You will learn about the elements of quick prototyping # # # ### You will study tranfer learning in least lines of code # # # ### You will learn how to use your own data for transfer learning # # Why quick prototyping # # - Every image classification projects starts with the basic step of trying out transfer learning # # - In transfer learning, you take a deep learning model trained on a very large dataset, # - then train it further on your small dataset # # - While doing this you need to select a lot of hyper-params # - First the model itself, like resnet or densenet, you can never be sure what to use # - Then dataset parameters such as batch size, input shape, etc # - Then model parameters such as freezing layers, not using pretrained models, et # - Then setting up which optimizer, loss function, learning rate scheduler, etc to select # - And finally the number of epochs to train on # # - Not everything can be done at the very first step, thus the quick prototyping mode. # # - Which allows you to # - set the model # - whether to use pretrained network or train from scratch # - number of epochs # # - All the other parameters are set to default as per their originl research papers # - then can be changed in Monk's intermediate and expert modes (will be dealt later) # # # # ## Elements of deep learning # - Training # - Validation # - Inferencing # # Table of Contents # # # ## [Install](#0) # # # ## [Importing pytorch backend](#1) # # # ## [Creating and Managing experiments](#2) # # # ## [Set dataset and select the model](#3) # # # ## [See what other models Monk's mxnet backend supports](#4) # # # ## [Train the classifier](#5) # # # ## [Run the classifier through validation dataset](#6) # # # ## [Running inference on test images](#7) # <a id='0'></a> # # Install Monk # ## Using pip (Recommended) # # - colab (gpu) # - All bakcends: `pip install -U monk-colab` # # # - kaggle (gpu) # - All backends: `pip install -U monk-kaggle` # # # - cuda 10.2 # - All backends: `pip install -U monk-cuda102` # - Gluon bakcned: `pip install -U monk-gluon-cuda102` # - Pytorch backend: `pip install -U monk-pytorch-cuda102` # - Keras backend: `pip install -U monk-keras-cuda102` # # # - cuda 10.1 # - All backend: `pip install -U monk-cuda101` # - Gluon bakcned: `pip install -U monk-gluon-cuda101` # - Pytorch backend: `pip install -U monk-pytorch-cuda101` # - Keras backend: `pip install -U monk-keras-cuda101` # # # - cuda 10.0 # - All backend: `pip install -U monk-cuda100` # - Gluon bakcned: `pip install -U monk-gluon-cuda100` # - Pytorch backend: `pip install -U monk-pytorch-cuda100` # - Keras backend: `pip install -U monk-keras-cuda100` # # # - cuda 9.2 # - All backend: `pip install -U monk-cuda92` # - Gluon bakcned: `pip install -U monk-gluon-cuda92` # - Pytorch backend: `pip install -U monk-pytorch-cuda92` # - Keras backend: `pip install -U monk-keras-cuda92` # # # - cuda 9.0 # - All backend: `pip install -U monk-cuda90` # - Gluon bakcned: `pip install -U monk-gluon-cuda90` # - Pytorch backend: `pip install -U monk-pytorch-cuda90` # - Keras backend: `pip install -U monk-keras-cuda90` # # # - cpu # - All backend: `pip install -U monk-cpu` # - Gluon bakcned: `pip install -U monk-gluon-cpu` # - Pytorch backend: `pip install -U monk-pytorch-cpu` # - Keras backend: `pip install -U monk-keras-cpu` # ## Install Monk Manually (Not recommended) # # ### Step 1: Clone the library # - git clone https://github.com/Tessellate-Imaging/monk_v1.git # # # # # ### Step 2: Install requirements # - Linux # - Cuda 9.0 # - `cd monk_v1/installation/Linux && pip install -r requirements_cu90.txt` # - Cuda 9.2 # - `cd monk_v1/installation/Linux && pip install -r requirements_cu92.txt` # - Cuda 10.0 # - `cd monk_v1/installation/Linux && pip install -r requirements_cu100.txt` # - Cuda 10.1 # - `cd monk_v1/installation/Linux && pip install -r requirements_cu101.txt` # - Cuda 10.2 # - `cd monk_v1/installation/Linux && pip install -r requirements_cu102.txt` # - CPU (Non gpu system) # - `cd monk_v1/installation/Linux && pip install -r requirements_cpu.txt` # # # - Windows # - Cuda 9.0 (Experimental support) # - `cd monk_v1/installation/Windows && pip install -r requirements_cu90.txt` # - Cuda 9.2 (Experimental support) # - `cd monk_v1/installation/Windows && pip install -r requirements_cu92.txt` # - Cuda 10.0 (Experimental support) # - `cd monk_v1/installation/Windows && pip install -r requirements_cu100.txt` # - Cuda 10.1 (Experimental support) # - `cd monk_v1/installation/Windows && pip install -r requirements_cu101.txt` # - Cuda 10.2 (Experimental support) # - `cd monk_v1/installation/Windows && pip install -r requirements_cu102.txt` # - CPU (Non gpu system) # - `cd monk_v1/installation/Windows && pip install -r requirements_cpu.txt` # # # - Mac # - CPU (Non gpu system) # - `cd monk_v1/installation/Mac && pip install -r requirements_cpu.txt` # # # - Misc # - Colab (GPU) # - `cd monk_v1/installation/Misc && pip install -r requirements_colab.txt` # - Kaggle (GPU) # - `cd monk_v1/installation/Misc && pip install -r requirements_kaggle.txt` # # # # ### Step 3: Add to system path (Required for every terminal or kernel run) # - `import sys` # - `sys.path.append("monk_v1/");` # <a id='1'></a> # # Imports # + #Using pytorch backend # When installed using pip from monk.pytorch_prototype import prototype # When installed manually (Uncommen the following) #import os #import sys #sys.path.append("monk_v1/"); #sys.path.append("monk_v1/monk/"); #from monk.pytorch_prototype import prototype # - # <a id='2'></a> # # Creating and managing experiments # - Provide project name # - Provide experiment name # - For a specific data create a single project # - Inside each project multiple experiments can be created # - Every experiment can be have diferent hyper-parameters attached to it gtf = prototype(verbose=1); gtf.Prototype("sample-project-1", "sample-experiment-1"); # ### This creates files and directories as per the following structure # # # workspace # | # |--------sample-project-1 (Project name can be different) # | # | # |-----sample-experiment-1 (Experiment name can be different) # | # |-----experiment-state.json # | # |-----output # | # |------logs (All training logs and graphs saved here) # | # |------models (all trained models saved here) # # <a id='3'></a> # # Set dataset and select the model # ## Quick mode training # # - Using Default Function # - dataset_path # - model_name # - num_epochs # # # ## Dataset folder structure # # parent_directory # | # | # |------cats # | # |------img1.jpg # |------img2.jpg # |------.... (and so on) # |------dogs # | # |------img1.jpg # |------img2.jpg # |------.... (and so on) # ## Modifyable params # - dataset_path: path to data # - model_name: which pretrained model to use # - freeze_base_network: Retrain already trained network or not # - num_epochs: Number of epochs to train for # Download dataset import os if not os.path.isfile("datasets.zip"): os.system("! wget --load-cookies /tmp/cookies.txt \"https://docs.google.com/uc?export=download&confirm=$(wget --save-cookies /tmp/cookies.txt --keep-session-cookies --no-check-certificate 'https://docs.google.com/uc?export=download&id=1rG-U1mS8hDU7_wM56a1kc-li_zHLtbq2' -O- | sed -rn 's/.*confirm=([0-9A-Za-z_]+).*/\1\n/p')&id=1rG-U1mS8hDU7_wM56a1kc-li_zHLtbq2\" -O datasets.zip && rm -rf /tmp/cookies.txt") if not os.path.isdir("datasets"): os.system("! unzip -qq datasets.zip") # + gtf.Default(dataset_path="datasets/dataset_cats_dogs_train", model_name="resnet18", freeze_base_network=True, num_epochs=5); #Read the summary generated once you run this cell. # - # ## From the summary above # # - Model Params # Model name: resnet18 # Use Gpu: True # Use pretrained: True # Freeze base network: True # <a id='4'></a> # # See what other models Monk's pytorch backend supports gtf.List_Models(); # <a id='5'></a> # # Train the classifier # + #Start Training gtf.Train(); #Read the training summary generated once you run the cell and training is completed # - # <a id='6'></a> # # Validating the trained classifier # ## Load the experiment in validation mode # - Set flag eval_infer as True gtf = prototype(verbose=1); gtf.Prototype("sample-project-1", "sample-experiment-1", eval_infer=True); # ## Load the validation dataset gtf.Dataset_Params(dataset_path="datasets/dataset_cats_dogs_eval"); gtf.Dataset(); # ## Run validation accuracy, class_based_accuracy = gtf.Evaluate(); # <a id='7'></a> # # Running inference on test images # ## Load the experiment in inference mode # - Set flag eval_infer as True gtf = prototype(verbose=1); gtf.Prototype("sample-project-1", "sample-experiment-1", eval_infer=True); # ## Select image and Run inference # + img_name = "datasets/dataset_cats_dogs_test/0.jpg"; predictions = gtf.Infer(img_name=img_name); #Display from IPython.display import Image Image(filename=img_name) # + img_name = "datasets/dataset_cats_dogs_test/90.jpg"; predictions = gtf.Infer(img_name=img_name); #Display from IPython.display import Image Image(filename=img_name) # - # # Goals Completed # # # ### Learn about the elements of quick prototyping # # # ### Study tranfer learning in least lines of code # # # ### Learn how to use your own data for transfer learning
study_roadmaps/1_getting_started_roadmap/4_quick_prototyping_mode/2) Intro to quick prototyping using Pytorch backend.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # 1.7 &ndash; Steady state, Unsteady State/Transience # # --- # ## 1.7.0 &ndash; Learning Objectives # # By the end of this section you should be able to: # # 1. Understand the difference between steady-state and unsteady state classification. # 2. Characterize the two classifications. # 3. Understand the math in the classifications. # # --- # ## 1.7.1&ndash; Introduction # # Steady-state and unsteady-state processes describe the time interval that a process occurs over. Steady-state refers to the time where the __variable of interest doesn't change__. Unsteady-state is when the variable of interest __changes over time. __ While all processes have a steady-state and __transient__ phase, it is more beneficial to evaluate certain processes with steady-state or transience. # # __ Note: __ Steady-state is often referred to as __S.S__, you may see that written in problems. Transience is used __interchangeably__ with unsteady-state. It will be referred in this section interchangeably You may see problems using __U.S.S__ # # --- # ## 1.7.2 &ndash; When to use Steady State and Unsteady state # # Except for minor fluctuations, steady-state indicates that there is no change in the variable of interest. A can of pop lying outside is at steady-state because it will not change temperature suddenly. # # As such, it is best to analyze continuous flow reactors using steady state analysis. # # Unsteady state processes have a definitive change over time. Boiling water takes room temperature water and raises it to 100 &deg;C over a set time. Because of this, batch and fed-batch reactors are often analyzed using unsteady-state methods. # # The following section will cover how the analysis is carried out. # # --- # ## 1.7.3 &ndash; Transient or steady-state? # # For the following problems, determine whether a steady-state model or a transient model is the appropriate method to solve the problem. You do not need to solve the problem # # 1. A balloon deflating. # # 2. Putting a can of iced tea in an insulated cooler half filled with ice and water. # # 3. The direct impact of floodwater at the site C dam. # # # ### Answers # # 1. The unsteady-state model would be the better way to model either the volume of the balloon shrinking or the pressure of the balloon. At steady-state, the balloon is deflated and there is no information in that model. # # 2. The U.S.S (Unsteady-state) model would be an appropriate model to measure the **rate** of heat being transferred from the can to the ice/water bath. The steady-state model would show the **total ** heat that the iced tea contains. # # 3. Both models have merit in this situation. In transience, the rate of water flooding the geographical land is important such that the force of the water flooding is not too strong. In steady-state, it would be important to see the height of the floodwater to seethe amount of land displaced.
Modules/Module-1-Proces-Basics-Water/1.7 - Steady-state-and-Unsteady-State-Transience.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # orphan: true # --- # + tags=["remove-input", "remove-output", "active-ipynb"] # try: # from openmdao.utils.notebook_utils import notebook_mode # except ImportError: # !python -m pip install openmdao[notebooks] # + tags=["remove-input", "remove-output"] from openmdao.utils.assert_utils import assert_near_equal import os if os.path.exists('cases.sql'): os.remove('cases.sql') # - # # Solver Recording # Solver recording is useful when you want to record the iterations within a solver. # The recorder can capture the values of states, errors, and residuals as the solver converges. # + tags=["remove-input"] import openmdao.api as om om.show_options_table("openmdao.solvers.solver.Solver", recording_options=True) # - # ```{note} # Note that the `excludes` option takes precedence over the `includes` option. # ``` # # ```{note} # The paths given in the `includes` and `excludes` options are relative to the `Group` that the solver is attached to. # ``` # # ```{note} # It is currently not possible to record linear solvers. # ```` # ## Solver Recording Example # + tags=["remove-input", "remove-output"] from openmdao.utils.notebook_utils import get_code from myst_nb import glue glue("code_src91", get_code("openmdao.test_suite.components.sellar.SellarDerivatives"), display=False) # - # :::{Admonition} `SellarDerivatives` class definition # :class: dropdown # # {glue:}`code_src91` # ::: # + import openmdao.api as om from openmdao.test_suite.components.sellar import SellarDerivatives prob = om.Problem(model=SellarDerivatives()) prob.setup() recorder = om.SqliteRecorder("cases.sql") solver = prob.model.nonlinear_solver solver.add_recorder(recorder) solver.recording_options['record_abs_error'] = True solver.options['use_apply_nonlinear'] = True prob.run_model() # - prob.cleanup() cr = om.CaseReader("cases.sql") solver_cases = cr.list_cases('root.nonlinear_solver') # + tags=["remove-input", "remove-output"] assert len(solver_cases) == 7 # - for case_id in solver_cases: print(cr.get_case(case_id).abs_err) # + tags=["remove-input", "remove-output"] assert_near_equal(cr.get_case(0).abs_err, 2.2545141, tolerance=1e-8)
openmdao/docs/openmdao_book/features/recording/solver_recording.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Tile Coding # --- # # Tile coding is an innovative way of discretizing a continuous space that enables better generalization compared to a single grid-based approach. The fundamental idea is to create several overlapping grids or _tilings_; then for any given sample value, you need only check which tiles it lies in. You can then encode the original continuous value by a vector of integer indices or bits that identifies each activated tile. # # ### 1. Import the Necessary Packages # + # Import common libraries import sys import random import gym import numpy as np import matplotlib.pyplot as plt import pandas as pd # Set plotting options # %matplotlib inline plt.style.use('ggplot') np.set_printoptions(precision=3, linewidth=120) # - # ### 2. Specify the Environment, and Explore the State and Action Spaces # # We'll use [OpenAI Gym](https://gym.openai.com/) environments to test and develop our algorithms. These simulate a variety of classic as well as contemporary reinforcement learning tasks. Let's begin with an environment that has a continuous state space, but a discrete action space. # + # Create an environment env = gym.make('Acrobot-v1') env.seed(505); # Explore state (observation) space print("State space:", env.observation_space) print("- low:", env.observation_space.low) print("- high:", env.observation_space.high) # Explore action space print("Action space:", env.action_space) # - # Note that the state space is multi-dimensional, with most dimensions ranging from -1 to 1 (positions of the two joints), while the final two dimensions have a larger range. How do we discretize such a space using tiles? # # ### 3. Tiling # # Let's first design a way to create a single tiling for a given state space. This is very similar to a uniform grid! The only difference is that you should include an offset for each dimension that shifts the split points. # # For instance, if `low = [-1.0, -5.0]`, `high = [1.0, 5.0]`, `bins = (10, 10)`, and `offsets = (-0.1, 0.5)`, then return a list of 2 NumPy arrays (2 dimensions) each containing the following split points (9 split points per dimension): # # ``` # [array([-0.9, -0.7, -0.5, -0.3, -0.1, 0.1, 0.3, 0.5, 0.7]), # array([-3.5, -2.5, -1.5, -0.5, 0.5, 1.5, 2.5, 3.5, 4.5])] # ``` # # Notice how the split points for the first dimension are offset by `-0.1`, and for the second dimension are offset by `+0.5`. This might mean that some of our tiles, especially along the perimeter, are partially outside the valid state space, but that is unavoidable and harmless. # + def create_tiling_grid(low, high, bins=(10, 10), offsets=(0.0, 0.0)): """Define a uniformly-spaced grid that can be used for tile-coding a space. Parameters ---------- low : array_like Lower bounds for each dimension of the continuous space. high : array_like Upper bounds for each dimension of the continuous space. bins : tuple Number of bins or tiles along each corresponding dimension. offsets : tuple Split points for each dimension should be offset by these values. Returns ------- grid : list of array_like A list of arrays containing split points for each dimension. """ # TODO: Implement this split_points = [] for start, end, num, offset in zip(low, high, bins, offsets): splits = np.linspace(start, end, num, endpoint=False) split_points.append(splits[1:] + offset) return split_points low = [-1.0, -5.0] high = [1.0, 5.0] create_tiling_grid(low, high, bins=(10, 10), offsets=(-0.1, 0.5)) # [test] # - # You can now use this function to define a set of tilings that are a little offset from each other. # + def create_tilings(low, high, tiling_specs): """Define multiple tilings using the provided specifications. Parameters ---------- low : array_like Lower bounds for each dimension of the continuous space. high : array_like Upper bounds for each dimension of the continuous space. tiling_specs : list of tuples A sequence of (bins, offsets) to be passed to create_tiling_grid(). Returns ------- tilings : list A list of tilings (grids), each produced by create_tiling_grid(). """ # TODO: Implement this tilings = [] for bins, offsets in tiling_specs: tilings.append(create_tiling_grid(low, high, bins, offsets)) return tilings # Tiling specs: [(<bins>, <offsets>), ...] tiling_specs = [((10, 10), (-0.066, -0.33)), ((10, 10), (0.0, 0.0)), ((10, 10), (0.066, 0.33))] tilings = create_tilings(low, high, tiling_specs) tilings # - # It may be hard to gauge whether you are getting desired results or not. So let's try to visualize these tilings. # + from matplotlib.lines import Line2D def visualize_tilings(tilings): """Plot each tiling as a grid.""" prop_cycle = plt.rcParams['axes.prop_cycle'] colors = prop_cycle.by_key()['color'] linestyles = ['-', '--', ':'] legend_lines = [] fig, ax = plt.subplots(figsize=(10, 10)) for i, grid in enumerate(tilings): for x in grid[0]: l = ax.axvline(x=x, color=colors[i % len(colors)], linestyle=linestyles[i % len(linestyles)], label=i) for y in grid[1]: l = ax.axhline(y=y, color=colors[i % len(colors)], linestyle=linestyles[i % len(linestyles)]) legend_lines.append(l) ax.grid('off') ax.legend(legend_lines, ["Tiling #{}".format(t) for t in range(len(legend_lines))], facecolor='white', framealpha=0.9) ax.set_title("Tilings") return ax # return Axis object to draw on later, if needed visualize_tilings(tilings); # - # Great! Now that we have a way to generate these tilings, we can next write our encoding function that will convert any given continuous state value to a discrete vector. # # ### 4. Tile Encoding # # Implement the following to produce a vector that contains the indices for each tile that the input state value belongs to. The shape of the vector can be the same as the arrangment of tiles you have, or it can be ultimately flattened for convenience. # # You can use the same `discretize()` function here from grid-based discretization, and simply call it for each tiling. # + def discretize(sample, grid): """Discretize a sample as per given grid. Parameters ---------- sample : array_like A single sample from the (original) continuous space. grid : list of array_like A list of arrays containing split points for each dimension. Returns ------- discretized_sample : array_like A sequence of integers with the same number of dimensions as sample. """ # TODO: Implement this return np.array([np.digitize(value, bins) for value, bins in zip(sample, grid)]) def tile_encode(sample, tilings, flatten=False): """Encode given sample using tile-coding. Parameters ---------- sample : array_like A single sample from the (original) continuous space. tilings : list A list of tilings (grids), each produced by create_tiling_grid(). flatten : bool If true, flatten the resulting binary arrays into a single long vector. Returns ------- encoded_sample : list or array_like A list of binary vectors, one for each tiling, or flattened into one. """ # TODO: Implement this encoding = np.array([discretize(sample, grid) for grid in tilings]) if flatten: encoding = encodings.flatten() return encoding # Test with some sample values samples = [(-1.2 , -5.1 ), (-0.75, 3.25), (-0.5 , 0.0 ), ( 0.25, -1.9 ), ( 0.15, -1.75), ( 0.75, 2.5 ), ( 0.7 , -3.7 ), ( 1.0 , 5.0 )] encoded_samples = [tile_encode(sample, tilings) for sample in samples] print("\nSamples:", repr(samples), sep="\n") print("\nEncoded samples:", repr(encoded_samples), sep="\n") # - # Note that we did not flatten the encoding above, which is why each sample's representation is a pair of indices for each tiling. This makes it easy to visualize it using the tilings. # + from matplotlib.patches import Rectangle def visualize_encoded_samples(samples, encoded_samples, tilings, low=None, high=None): """Visualize samples by activating the respective tiles.""" samples = np.array(samples) # for ease of indexing # Show tiling grids ax = visualize_tilings(tilings) # If bounds (low, high) are specified, use them to set axis limits if low is not None and high is not None: ax.set_xlim(low[0], high[0]) ax.set_ylim(low[1], high[1]) else: # Pre-render (invisible) samples to automatically set reasonable axis limits, and use them as (low, high) ax.plot(samples[:, 0], samples[:, 1], 'o', alpha=0.0) low = [ax.get_xlim()[0], ax.get_ylim()[0]] high = [ax.get_xlim()[1], ax.get_ylim()[1]] # Map each encoded sample (which is really a list of indices) to the corresponding tiles it belongs to tilings_extended = [np.hstack((np.array([low]).T, grid, np.array([high]).T)) for grid in tilings] # add low and high ends tile_centers = [(grid_extended[:, 1:] + grid_extended[:, :-1]) / 2 for grid_extended in tilings_extended] # compute center of each tile tile_toplefts = [grid_extended[:, :-1] for grid_extended in tilings_extended] # compute topleft of each tile tile_bottomrights = [grid_extended[:, 1:] for grid_extended in tilings_extended] # compute bottomright of each tile prop_cycle = plt.rcParams['axes.prop_cycle'] colors = prop_cycle.by_key()['color'] for sample, encoded_sample in zip(samples, encoded_samples): for i, tile in enumerate(encoded_sample): # Shade the entire tile with a rectangle topleft = tile_toplefts[i][0][tile[0]], tile_toplefts[i][1][tile[1]] bottomright = tile_bottomrights[i][0][tile[0]], tile_bottomrights[i][1][tile[1]] ax.add_patch(Rectangle(topleft, bottomright[0] - topleft[0], bottomright[1] - topleft[1], color=colors[i], alpha=0.33)) # In case sample is outside tile bounds, it may not have been highlighted properly if any(sample < topleft) or any(sample > bottomright): # So plot a point in the center of the tile and draw a connecting line cx, cy = tile_centers[i][0][tile[0]], tile_centers[i][1][tile[1]] ax.add_line(Line2D([sample[0], cx], [sample[1], cy], color=colors[i])) ax.plot(cx, cy, 's', color=colors[i]) # Finally, plot original samples ax.plot(samples[:, 0], samples[:, 1], 'o', color='r') ax.margins(x=0, y=0) # remove unnecessary margins ax.set_title("Tile-encoded samples") return ax visualize_encoded_samples(samples, encoded_samples, tilings); # - # Inspect the results and make sure you understand how the corresponding tiles are being chosen. Note that some samples may have one or more tiles in common. # # ### 5. Q-Table with Tile Coding # # The next step is to design a special Q-table that is able to utilize this tile coding scheme. It should have the same kind of interface as a regular table, i.e. given a `<state, action>` pair, it should return a `<value>`. Similarly, it should also allow you to update the `<value>` for a given `<state, action>` pair (note that this should update all the tiles that `<state>` belongs to). # # The `<state>` supplied here is assumed to be from the original continuous state space, and `<action>` is discrete (and integer index). The Q-table should internally convert the `<state>` to its tile-coded representation when required. # + class QTable: """Simple Q-table.""" def __init__(self, state_size, action_size): """Initialize Q-table. Parameters ---------- state_size : tuple Number of discrete values along each dimension of state space. action_size : int Number of discrete actions in action space. """ self.state_size = state_size self.action_size = action_size # TODO: Create Q-table, initialize all Q-values to zero # Note: If state_size = (9, 9), action_size = 2, q_table.shape should be (9, 9, 2) self.q_table = np.zeros(state_size + (action_size, )) print("QTable(): size =", self.q_table.shape) class TiledQTable: """Composite Q-table with an internal tile coding scheme.""" def __init__(self, low, high, tiling_specs, action_size): """Create tilings and initialize internal Q-table(s). Parameters ---------- low : array_like Lower bounds for each dimension of state space. high : array_like Upper bounds for each dimension of state space. tiling_specs : list of tuples A sequence of (bins, offsets) to be passed to create_tilings() along with low, high. action_size : int Number of discrete actions in action space. """ self.tilings = create_tilings(low, high, tiling_specs) self.state_sizes = [tuple(len(splits)+1 for splits in tiling_grid) for tiling_grid in self.tilings] self.action_size = action_size self.q_tables = [QTable(state_size, self.action_size) for state_size in self.state_sizes] print("TiledQTable(): no. of internal tables = ", len(self.q_tables)) def get(self, state, action): """Get Q-value for given <state, action> pair. Parameters ---------- state : array_like Vector representing the state in the original continuous space. action : int Index of desired action. Returns ------- value : float Q-value of given <state, action> pair, averaged from all internal Q-tables. """ # TODO: Encode state to get tile indices encoding = tile_encode(state, self.tilings) # TODO: Retrieve q-value for each tiling, and return their average q_values = [] for idx, q_table in zip(encoding, self.q_tables): q_values.append(q_table.q_table[np.append(idx, action)]) return np.mean(q_values) def update(self, state, action, value, alpha=0.1): """Soft-update Q-value for given <state, action> pair to value. Instead of overwriting Q(state, action) with value, perform soft-update: Q(state, action) = alpha * value + (1.0 - alpha) * Q(state, action) Parameters ---------- state : array_like Vector representing the state in the original continuous space. action : int Index of desired action. value : float Desired Q-value for <state, action> pair. alpha : float Update factor to perform soft-update, in [0.0, 1.0] range. """ # TODO: Encode state to get tile indices encoding = tile_encode(state, self.tilings) # TODO: Update q-value for each tiling by update factor alpha for idx, q_table in zip(encoding, self.q_tables): current = q_table.q_table[np.append(idx, action)] new = alpha * value + (1.0 - alpha) * current q_table.q_table[np.append(idx, action)] = new # Test with a sample Q-table tq = TiledQTable(low, high, tiling_specs, 2) s1, s2 = samples[3], samples[4] a = 0; q = 1.0 print("[GET] Q({}, {}) = {}".format(s1, a, tq.get(s1, a))) # check value at sample = s1, action = a print("[UPDATE] Q({}, {}) = {}".format(s2, a, q)); tq.update(s2, a, q) # update value for sample with some common tile(s) print("[GET] Q({}, {}) = {}".format(s1, a, tq.get(s1, a))) # check value again, should be slightly updated # - # If you update the q-value for a particular state (say, `(0.25, -1.91)`) and action (say, `0`), then you should notice the q-value of a nearby state (e.g. `(0.15, -1.75)` and same action) has changed as well! This is how tile-coding is able to generalize values across the state space better than a single uniform grid. # ### 6. Implement a Q-Learning Agent using Tile-Coding # # Now it's your turn to apply this discretization technique to design and test a complete learning agent! # #### 6.1 Agent with random action # + def random_agent(env): state = env.reset() score = 0 for _ in range(100): action = env.action_space.sample() env.render() next_state, reward, done, info = env.step(action) score += reward state = next_state if done: break env.close() random_agent(env) # - # #### 6.2 QLearningAgent class QLearningAgent(): def __init__( self, env, tiling_specs, gamma=1.0, alpha=0.01, epsilon_init=1.0, epsilon_decay=0.999, epsilon_min=0.001 ): # environments self.env = env self.gamma = gamma self.alpha = alpha self.epsilon = None self.epsilon_init = epsilon_init self.epsilon_decay = epsilon_decay self.epsilon_min = epsilon_min # Tiled Q table low = self.env.observation_space.low high = self.env.observation_space.high self.nA = self.env.action_space.n assert len(low) == len(tiling_specs[0][0]) # tiling has same dimension as state_space self.tiled_q_table = TiledQTable(low, high, tiling_specs, self.nA) def reset_episode(self): # update epsilon if self.epsilon is None: self.epsilon = self.epsilon_init else: self.epsilon = max(self.epsilon * self.epsilon_decay, self.epsilon_min) # reset the environment state = self.env.reset() return state def pick_action(self, state): actions = np.arange(self.nA) action_values = [self.tiled_q_table.get(state, a) for a in actions] if random.random() > self.epsilon: return np.argmax(action_values) else: return random.choice(actions) def update_q(self, state, action, reward, next_state=None): max_q = 0 if next_state is not None: action_values = [self.tiled_q_table.get(next_state, a) for a in np.arange(self.nA)] max_q = max(action_values) approximate_return = reward + self.gamma * max_q self.tiled_q_table.update(state, action, approximate_return, self.alpha) # Let's define the tiling specs and initialize the agent # + # Tiling specs: [(<bins>, <offsets>), ...] observation_space_dimension = len(env.observation_space.low) # set tiling specs, this part is learned from the solution n_bins = 5 offset = (env.observation_space.high - env.observation_space.low) / (3 * n_bins) new_tiling_specs = [ ((5, ) * observation_space_dimension, -offset), ((5, ) * observation_space_dimension, np.zeros(observation_space_dimension)), ((5, ) * observation_space_dimension, offset) ] agent = QLearningAgent(env, new_tiling_specs) # - # Now let's train the agent # %pdb # + def train_q_learning_agent(agent, episodes=10000): scores = [] max_ave_score = -np.inf for i in range(1, episodes+1): if len(scores) > 100: ave_score = np.mean(scores[-100:]) max_ave_score = max(ave_score, max_ave_score) if i % 100 == 0: print(f'\rEpisode: {i}/{episodes} | Max Average Score: {max_ave_score}', end='') sys.stdout.flush() score = 0 state = agent.reset_episode() while True: action = agent.pick_action(state) next_state, reward, done, info = agent.env.step(action) score += reward agent.update_q(state, action, reward, next_state) if done: break scores.append(score) return scores scores = train_q_learning_agent(agent) # + def plot_scores(scores, rolling_window=100): """Plot scores and optional rolling mean using specified window.""" plt.plot(scores); plt.title("Scores"); rolling_mean = pd.Series(scores).rolling(rolling_window).mean() plt.plot(rolling_mean); return rolling_mean rolling_mean = plot_scores(scores) # - # Now let's test the agent # + def test_q_learning_agent(agent): score = 0 state = agent.reset_episode() while True: action = agent.pick_action(state) env.render() next_state, reward, done, info = agent.env.step(action) score += reward state = next_state if done: break env.close() print(f'Final Score: {score}') test_q_learning_agent(agent) # -
tile-coding/Tile_Coding.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # 1D TDEM inversion using the cylindrical mesh # # In this example, we perform a 1D time domain electromagnetic inverion using a cylindrical mesh. # + from SimPEG import Mesh, Maps, Utils, DataMisfit, Regularization, Optimization, Inversion, InvProblem, Directives import numpy as np from SimPEG.EM import FDEM, TDEM, mu_0 import matplotlib.pyplot as plt import matplotlib try: from pymatsolver import Pardiso as Solver except ImportError: from SimPEG import SolverLU as Solver # %matplotlib inline # - from matplotlib import rcParams rcParams['font.size'] = 14 # # Set up cylindrically symmeric mesh cs, ncx, ncz, npad = 20., 8, 15, 10 # padded cyl mesh hx = [(cs, ncx), (cs, npad, 1.3)] hz = [(cs, npad, -1.3), (cs, ncz), (cs, npad, 1.3)] mesh = Mesh.CylMesh([hx, 1, hz], '00C') mesh.x0 = np.r_[0., 0., -mesh.hz[:npad+ncz].sum()] mesh.plotGrid() # # Conductivity model sig_half = 1e-2 # Half-space conductivity sig_air = 1e-8 # Air conductivity sig_layer = 5e-2 # Layer conductivity # + layerz = np.r_[-200., -100.] layer = (mesh.vectorCCz >= layerz[0]) & (mesh.vectorCCz <= layerz[1]) active = mesh.vectorCCz < 0. sigma = np.ones(mesh.nCz)*sig_air sigma[active] = sig_half sigma[layer] = sig_layer # - fig, ax = plt.subplots(1,1, figsize = (4, 6)) ax.semilogx(sigma, mesh.vectorCCz) ax.grid(which = 'major', linestyle = '-', linewidth=0.2) ax.set_title('conductivity') ax.set_xlabel('conductivity ($\sigma$)') ax.set_ylabel('z (m)') # # Mapping actMap = Maps.InjectActiveCells(mesh, active, np.log(1e-8), nC=mesh.nCz) mapping = Maps.ExpMap(mesh) * Maps.SurjectVertical1D(mesh) * actMap mtrue = np.log(sigma[active]) fig, ax = plt.subplots(1,1, figsize = (4, 6)) ax.plot(mtrue, mesh.vectorCCz[active]) ax.grid(which = 'major', linestyle = '-', linewidth=0.2) ax.set_title('model') ax.set_xlabel('log($\sigma$)') ax.set_ylabel('z (m)') # # Survey # + # TDEM survey rxlocs = np.atleast_2d(np.r_[0., 0., 0.]) srcLoc = np.r_[0., 0., 0.] times = np.logspace(-4, np.log10(2e-3), 15) # print('min diffusion distance ', 1.28*np.sqrt(times.min()/(sig_half*mu_0)), # 'max diffusion distance ', 1.28*np.sqrt(times.max()/(sig_half*mu_0))) rx = TDEM.Rx.Point_dbdt(rxlocs, times, 'z') src = TDEM.Src.MagDipole( [rx], waveform=TDEM.Src.StepOffWaveform(), loc=srcLoc # same src location as FDEM problem ) # - # # Problem # + surveyTD = TDEM.Survey([src]) prbTD = TDEM.Problem3D_e(mesh, sigmaMap=mapping, Solver=Solver) prbTD.timeSteps = [(5e-6, 10), (1e-5, 20), (5e-5, 20), (1e-4, 8)] prbTD.pair(surveyTD) # - # %%time std = 0.03 surveyTD.makeSyntheticData(mtrue, std) surveyTD.std = std surveyTD.eps = np.linalg.norm(surveyTD.dtrue)*1e-5 # + fig, ax = plt.subplots(1, 2, figsize=(10, 4)) ax[0].semilogy(times, np.abs(surveyTD.dobs), 'o') ax[0].grid(which = 'both', alpha=0.5) ax[0].set_xlabel('time (s)') ax[0].set_ylabel('db / dt (V)') ax[1].loglog(times, np.abs(surveyTD.dobs), 'o') ax[1].grid(which = 'both', alpha=0.5) ax[1].set_xlabel('time (s)') ax[1].set_ylabel('db / dt (V)') plt.tight_layout() # - # # set up the inversion # Inversion Directives beta = Directives.BetaSchedule(coolingFactor=4, coolingRate=3) betaest = Directives.BetaEstimate_ByEig(beta0_ratio=2.) target = Directives.TargetMisfit() directiveList = [beta, betaest, target] # + dmisfit = DataMisfit.l2_DataMisfit(surveyTD) regMesh = Mesh.TensorMesh([mesh.hz[mapping.maps[-1].indActive]]) reg = Regularization.Simple(regMesh) reg.alpha_s = 1e-1 reg.alpha_x = 1. # - opt = Optimization.InexactGaussNewton(maxIterCG=10) invProb = InvProblem.BaseInvProblem(dmisfit, reg, opt) # # Run the inversion # + inv = Inversion.BaseInversion(invProb, directiveList=directiveList) m0 = np.log(np.ones(mtrue.size)*sig_half) prbTD.counter = opt.counter = Utils.Counter() opt.remember('xc') # - # %%time moptTD = inv.run(m0) # # Plot the results # + plt.figure(figsize=(10, 8), dpi=250) ax0 = plt.subplot2grid((2, 2), (0, 0), rowspan=2) ax1 = plt.subplot2grid((2, 2), (0, 1)) ax2 = plt.subplot2grid((2, 2), (1, 1)) fs = 13 # fontsize matplotlib.rcParams['font.size'] = fs # Plot the model ax0.semilogx(sigma[active], mesh.vectorCCz[active], 'k-', lw=2) ax0.semilogx(np.exp(moptTD), mesh.vectorCCz[active], 'r*', ms=10) ax0.set_ylim(-700, 0) ax0.set_xlim(5e-3, 1e-1) ax0.set_xlabel('Conductivity (S/m)', fontsize=fs) ax0.set_ylabel('Depth (m)', fontsize=fs) ax0.grid( which='both', color='k', alpha=0.5, linestyle='-', linewidth=0.2 ) ax0.legend(['True', 'TDEM'], fontsize=fs, loc=4) # plot the data misfits - negative b/c we choose positive to be in the # direction of primary dpred = surveyTD.dpred(moptTD) ax1.loglog(times, np.absolute(surveyTD.dobs), 'k-', lw=2) ax1.loglog(times, np.absolute(dpred), 'r*', ms=10) ax1.set_xlim(times.min(), times.max()) # plot the difference ax2.loglog(times, np.abs(dpred-surveyTD.dobs), 'bo') ax2.set_xlim(times.min(), times.max()) ax2.grid(which='both', alpha=0.5, linestyle='-', linewidth=0.2) ax2.set_xlabel('Time (s)', fontsize=fs) ax2.set_title('(c) |dobs - dpred|', fontsize=fs) # Labels, gridlines, etc ax1.grid(which='both', alpha=0.5, linestyle='-', linewidth=0.2) ax1.set_xlabel('Time (s)', fontsize=fs) ax1.set_ylabel('Vertical magnetic field (T)', fontsize=fs) ax1.legend(("Obs", "Pred"), fontsize=fs) ax0.set_title("(a) Recovered Models", fontsize=fs) ax1.set_title("(b) TDEM observed vs. predicted", fontsize=fs) plt.tight_layout(pad=1.5) # -
demo_notebooks/TDEM_1D_inversion.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- #hide from web_journal.core import * # # Web Journal # # > A place to plan and journal. Inspired by coursera learning how to learn. # ## Overview # This project starts with the [Flask basic blog](https://flask.palletsprojects.com/en/1.1.x/tutorial/) and makes it work as a journal for me (o: # # At some point, I'll turn this into or create a [solid web app](https://solidproject.org/) but for now ... I need to learn a few things about python apps with RDBMS and Azure deployments. # # Developers # # ``` # git config --global core.autocrlf input # ``` # # ``` # conda create -n web_journal python==3.8 -y # conda activate web_journal # pip install fastscript==1.0.0 fastcore==1.3.10 nbdev==1.1.5 jupyter # pip install Flask Markdown # # cd github *** nav to where you want this project to live on your filesystem # git clone https://github.com/pete88b/web_journal.git # nbdev_install_git_hooks # jupyter notebook # ``` # # TODO: see settings.ini dev_requirements # ## Running the app locally # # ``` # SET FLASK_APP=web_journal.web.app # SET FLASK_ENV=development # flask run # ``` # ## Type checking with mypy # # ``` # # !pip install mypy # ``` # # Then from the web_journal project folder # ``` # nbdev_build_lib # mypy web_journal/core.py # ```
index.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # This file contains the necessary code to interactively explore the output of the model, and recreate the figures from the paper, using the best fit parameters obtained after running `parameter_estimation.py`. # # Uncomment the `plt.savefig()` lines to overwrite the existing figures. # # **Note:** this notebook was elaborated with Python 2.7 and the label generation requires LaTeX. # # --- # # Row-wise subplot titles using phantom background plot adapted from [here](https://stackoverflow.com/questions/27426668/row-titles-for-matplotlib-subplot). # + # We import the necessary packages import warnings warnings.filterwarnings('ignore') from functions_global import * import numpy as np import pandas as pd import matplotlib.pyplot as plt from scipy.integrate import odeint plt.style.use('seaborn') plt.rc('text', usetex=True) plt.rc('font', family='serif') # %matplotlib inline # + # We load the raw data dataRaw = pd.read_csv('../../data/VNA.csv') timesData = dataRaw['days'].tolist() # List of time points nMeasurements = 4 # We construct the arrays of data for each vaccine formulation PBS = [] # Non-adjuvanted vaccine MF59 = [] # Vaccine with MF59 AS03 = [] # Vaccine with AS03 Diluvac = [] #Vaccine with Diluvac X_data = [] # List of (repeated) time points for i in range(len(timesData)): for j in range(1,nMeasurements+1): X_data.append(timesData[i]) PBS.append(dataRaw.T.iloc[j][i]) for j in range(nMeasurements+1,2*nMeasurements+1): MF59.append(dataRaw.T.iloc[j][i]) for j in range(2*nMeasurements+1,3*nMeasurements+1): AS03.append(dataRaw.T.iloc[j][i]) for j in range(3*nMeasurements+1,4*nMeasurements+1): Diluvac.append(dataRaw.T.iloc[j][i]) X_data = np.array(X_data) PBS = np.array(PBS) MF59 = np.array(MF59) AS03 = np.array(AS03) Diluvac = np.array(Diluvac) y_data = [PBS, MF59, AS03, Diluvac] # + # We load the best fit parameters (base from model A) params_base = pd.Series.from_csv('../../params/best_fit_params_base_A.csv') params_adj = pd.read_csv('../../params/best_fit_params_adj_Astar.csv', index_col=0).set_index('adjuvant') gammaNA, gammaHA, mu, dmax = params_base['gammaNA'], params_base['gammaHA'], params_base['mu'], params_base['dmax'] adjuvants = params_adj.index.get_values() betaNA_list = [] betaHA_list = [] betaAb_list = [] for adj in adjuvants: betaNA_list.append(params_adj.loc[adj]['betaNA']) betaHA_list.append(params_adj.loc[adj]['betaHA']) betaAb_list.append(params_adj.loc[adj]['betaAb']) # + # We integrate the PDEs for each vaccine formulation X_model = np.arange(0, timeStop + dt, 1.0) model_output = [] total_Ab = [] # Base baseQ = vQ0(np.abs(grid), dmax) + vQ0(np.abs(1 - grid), dmax) H = Htilde*0.5*(np.sign(grid - 0.99*dmax) + np.sign(1.0 - 0.99*dmax - grid)) Q_PBS = gammaNA*vQ0(abs(grid), dmax) + gammaHA*vQ0(abs(1 - grid), dmax) y0 = np.zeros(2*Nx) y0[1 :: 2] = np.ones(Nx) sol_PBS = odeint(affinityMaturation, y0, t, args=(t_boost, H, baseQ, Q_PBS, ktilde, mu, dx), ml=2, mu=2) model_output.append(sol_PBS) total_Ab.append(np.array([np.sum(sol_PBS[np.argwhere(t == z)[0][0]][1 :: 2])*dx for z in X_model])) # Adjuvants for i in range(len(adjuvants)): Q_adj = (gammaNA*betaNA_list[i]*vQ0(np.abs(grid), dmax) + gammaHA*betaHA_list[i]*vQ0(np.abs(1 - grid), dmax)) y0 = np.zeros(2*Nx) y0[1 :: 2] = np.ones(Nx) sol_adj = odeint(affinityMaturation, y0, t, args=(t_boost, H, baseQ, Q_adj, ktilde*betaAb_list[i], mu, dx), ml=2, mu=2) model_output.append(sol_adj) total_Ab.append(np.array([np.sum(sol_adj[np.argwhere(t == z)[0][0]][1 :: 2])*dx for z in X_model])) # + # We plot the data and the model output for every case # These two lines create a list containing the default colour cycle in the style ncolours = len(plt.rcParams['axes.prop_cycle']) colours = [list(plt.rcParams['axes.prop_cycle'])[i]['color'] for i in range(ncolours)] f, axes = plt.subplots(4, sharex=True, sharey=True, figsize=(15,18)) config = np.append(['non-adjuvanted'],adjuvants) figlabels = [r'\bf{(A)} ', r'\bf{(B)} ', r'\bf{(C)} ', r'\bf{(D)} '] axes[3].set_xlabel('time post vaccination (days)', fontsize=30) for i in range(4): axes[i].tick_params(labelsize=16) axes[i].set_ylabel('Ab titer', fontsize=30) axes[i].set_yscale('log', basey=2) axes[i].set_title(figlabels[i]+config[i],fontsize=24) axes[i].scatter(X_data, y_data[i]) axes[i].plot(X_model, total_Ab[i],color=colours[1]) axes[i].axhline(y=10,color='grey',linestyle='--') # Uncomment line below to save a new version of the figure # plt.savefig('../../figures/Astar_Ab_vs_t.pdf',bbox_inches='tight') # - # Visualising best fit parameters params_base=params_base.rename('non-adjuvanted') params_all=params_adj.append(params_base) params_all.index.names = ['formulation'] params_all = params_all.reindex(config) params_all = params_all.fillna('-') params_all
code/model-A_star/results.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.8.5 64-bit # name: python3 # --- from datasets import load_dataset, Dataset, DatasetDict import pandas as pd from tqdm import tqdm import re raw_datasets = load_dataset("lc_quad") # + rep_dict = { "ASK": "ask", "WHERE": "where", "SELECT": "select", "{": "[", "}": "]", "DISTINCT": "distinct", "ORDER": "order", "LIMIT": "limit", "FILTER": "filter", } rep_dict2 = {"{": "", "}": ""} def replace_all(text, dict): for i, j in dict.items(): text = text.replace(i, j) return text # - # ## Goal: Trim + Replace LowerCase + Remove weirdly long Question + replaceQ/P df_property = pd.read_csv('../../../data/kdwd/2021-10-11/property.csv') df_property2 = pd.read_csv('../../../data/kdwd/2021-10-11/property2.csv') df_item = pd.read_csv('../../../data/kdwd/2021-10-11/item.csv') df_item2 = pd.read_csv('../../../data/kdwd/2021-10-11/item2.csv') df_item3 = pd.read_csv('../../../data/kdwd/2021-10-11/item3.csv') df_property[df_property.id==1082].en.iloc[0] # id to str couldnotfind = [] def encode_ids(x, df, df2): try: return df[df.id==int(x[1:])].en.iloc[0] except: try: return df2[df2.id==int(x[1:])].en.iloc[0] except: try: return df_item3[df_item3.id==int(x[1:])].en.iloc[0] except: couldnotfind.append(x) return x encode_ids('Q488651', df_item, df_item2) def encode_props(qry): qry = replace_all(qry, rep_dict).strip() # Q for m in re.finditer(":Q\d+", qry): x = m.group(0)[1:] # newstring = encode_ids(x, df_item, df_item2) newstring = encode_ids(x, df_item, df_item2).replace(" ", "_") qry = qry.replace(x, newstring) # P for m in re.finditer(":P\d+", qry): x = m.group(0)[1:] newstring = encode_ids(x, df_property, df_property2).replace(" ", "_") qry = qry.replace(x, newstring) return qry # Test encode_props('SELECT ?obj WHERE { wd:Q1045 p:P1082 ?s . ?s ps:P1082 ?obj . ?s pq:P585 ?x filter(contains(YEAR(?x),\'2009\')) }') # "select ?obj where [ wd:somalia p:population ?s . ?s ps:population ?obj . ?s pq:point_in_time ?x filter(contains(YEAR(?x),'2009')) ]" len(encode_props("SELECT DISTINCT ?sbj ?sbj_label WHERE { ?sbj wdt:P31 wd:Q58863414 . ?sbj wdt:P2541 wd:Q62900839 . ?sbj rdfs:label ?sbj_label . FILTER(CONTAINS(lcase(?sbj_label), 'model')) . FILTER (lang(?sbj_label) = 'en') } LIMIT 25")) def prepare(ds): col = 'translation' df = pd.DataFrame(columns=[col]) for d in tqdm(ds): try: qry = encode_props(d['sparql_wikidata']).replace('"','') if d['question'] is not None and d['question']!=[] and len(d['question'])<250: df = df.append({col: {'en':replace_all(d['question'], rep_dict2).strip(), 'sparql': qry}}, ignore_index=True) if d['paraphrased_question'] is not None and d['paraphrased_question']!=[] and len(d['paraphrased_question'])<250: df = df.append({col: {'en':replace_all(d['paraphrased_question'], rep_dict2).strip(), 'sparql': qry}}, ignore_index=True) except Exception as e: print(e) return df # + tags=[] df_test = prepare(raw_datasets["test"]) # - df_train = prepare(raw_datasets["train"]) couldnotfind = list(dict.fromkeys(couldnotfind)) len(couldnotfind) print(df_test.shape) print(df_train.shape) df_all = pd.concat([df_train, df_test]) print(df_all.shape) # shuffling all df_all = df_all.sample(frac = 1).reset_index(drop=True) print(df_all.shape) from sklearn.model_selection import train_test_split df_train2, df_test2 = train_test_split(df_all, test_size=0.2) print(df_test2.shape) print(df_train2.shape) ds_train = Dataset.from_pandas(df_train2) ds_test = Dataset.from_pandas(df_test2) pd.options.display.max_colwidth = 100 df_test.head() df_test.iloc[0].translation mother_ds = DatasetDict({'train': ds_train, 'test':ds_test}) ds_path='../../../data/dataset/lc-quad-wikidata-2021-10-17' mother_ds.save_to_disk(ds_path) # !mkdir {ds_path} df_train2.to_csv(f'{ds_path}/train.csv') df_test2.to_csv(f'{ds_path}/test.csv')
main/lc-quad/t5-2021-10-17/dataset-create.ipynb
// --- // jupyter: // jupytext: // text_representation: // extension: .scala // format_name: light // format_version: '1.5' // jupytext_version: 1.14.4 // kernelspec: // display_name: Scala // language: scala // name: scala // --- // We attempt to decide identity for natural numbers. import $ivy.`io.github.siddhartha-gadgil::provingground-core-jvm:0.1.1-SNAPSHOT` import provingground._, induction._, scalahott._ import NatRing._ val n = "n" :: NatTyp val m = "m" :: NatTyp val k = "k" :: NatTyp import spire.implicits._ import HoTT._ val recNatType = rec(Type) repl.pprinter.bind(translation.FansiShow.fansiPrint) recNatType.typ val recNatNatType = rec(NatTyp ->: Type) recNatNatType.typ val A = Type.sym recNatType(One).typ val step01 = n :-> (A :-> (Zero: Typ[Term])) step01.typ val base0 = recNatType(One)(step01) recNatNatType(base0).typ base0(0: Nat) base0(1: Nat) base0(n + 1) val Q = (NatTyp ->: Type).sym recNatType(Zero).typ (n :-> A :-> Q(n)).typ val step1arg = recNatType(Zero)(k :-> (A :-> Q(k))) step1arg.typ val step0 = n :-> (Q :-> step1arg) step0.typ val AreEqual = recNatNatType(base0)(step0) AreEqual.typ AreEqual(0: Nat)(0: Nat) AreEqual(0: Nat)(1: Nat) AreEqual(0)(n) AreEqual(n + 1)(0) AreEqual(n + 1)(n + 1) AreEqual(m + 1)(m + 1) == AreEqual(m)(m) val diagInd = induc(m :-> AreEqual(m)(m)) diagInd.typ AreEqual(2)(3) diagInd(Star).dom diagInd(Star).dom == m ~>: (AreEqual(m)(m) ->: AreEqual(m)(m)) val p = AreEqual(m)(m).sym val diag = diagInd(Star)(m :~> (p :-> p)) diag.typ == n ~>: (AreEqual(n)(n)) val target = n :~> (m :~> (("_" :: (n =:= m)) :-> AreEqual(n)(m))) val indEq = IdentityTyp.induc(NatTyp, target) indEq.typ indEq.dom == m ~>: AreEqual(m)(m) indEq.dom == diag.typ indEq(diag).typ indEq(diag)(m)(n) indEq(diag)(m)(n).typ == (m =:= n) ->: AreEqual(m)(n) indEq(diag).typ == m ~>: (n ~>: ((m =:= n) ->: AreEqual(m)(n))) indEq(diag)(0: Nat)(1: Nat).typ
notes/NatId.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="VTHU63hYgwzT" # summary: In this tutorial, we will learn how to setup our own tutorial repositories using codelab and git actions. # id: how-to-create-your-own-codelab # categories: Tutorial # tags: # status: Published # authors: <NAME>. # Feedback Link: https://form.jotform.com/211377288388469 # + [markdown] id="uC6AkZdChapT" # # How to create your own codelab (automated) # + [markdown] id="-67Oh2k3uCIW" # <!-- ------------------------ --> # ## Prepare your Tutorial # Duration: 5 # - # ### Setup Jupyter # + [markdown] id="BfEICnBOqpR4" # We chose jupyter notebooks as an ideal choice for preparing tutorials because it integrates markdown with code, images and million other things. If you do not have a jupyter environment setup locally, you can choose [Colab](https://colab.research.google.com/) or [Binder](https://mybinder.org/v2/gh/jupyterlab/jupyterlab-demo/master?urlpath=lab/tree/demo) to prepare your notebook on cloud for free. # + [markdown] id="SBa3rJ7SsMfU" # ### Create Tutorial # Write down your desired tutorial in colab as per [these](https://github.com/googlecodelabs/tools/blob/master/FORMAT-GUIDE.md) markdown instructions. # + [markdown] id="2BNE6As_wsUt" # <!-- ------------------------ --> # ## Setup Git Repository # Duration: 10 # - # This step is a one-time setup process. We will use GitHub to maintain and host our codelab site for free. # # ### Fork # Fork [this](https://github.com/sparsh-ai/codelabs) repo. # # ### Rename (optional) # Rename this forked repo if you want. You can choose any name you like. # # ### Update # Update the ```config.txt``` file, which is present in the root folder of this forked repo. Replace the two variables: # 1. Put your ```<git username>```.github.io in the TGTBASE variable. # 2. Put your ```repo name``` in the TGTSITE variable. # # e.g. for a git user ```sparsh-ai``` and repo name ```codelab```, the updated file would look like this: # ```bash # export TGTBASE="sparsh-ai.github.io" # export TGTSITE="codelabs" # ``` # # ### Customize (optional) # You can customize both landing page and codelabs. # # <aside class="positive"> # Codelab customization is mainly done via tags that we provide during the creation of jupyter notebook based tutorials. # </aside> # # We can customize the following items in landing page by simple modifications. # 1. Change Header and Logo # 2. Add Category # 3. Add View # 4. Change Footer # <!-- ------------------------ --> # ## Add new Codelabs # Duration: 5 # To add a new codelab to the codelab site, pull the repo, add our notebook and push it back to the master. # # ### Pull the repo # Pull the repo using ```git pull origin master``` where origin is pointed to your repo where codelabs site is hosted. If repo is already there, you can opt for ```git pull --rebase origin master``` instead. # # ### Add tutorial notebook # Add your tutorial notebook in the ```_notebook``` folder. Make sure the notebook format is following the codelab markdown guidelines and the extension is ```.ipynb```. # # ### Push the repo # Push the updated repo changes to master branch using the standard set of add -> commit -> push chain. e.g. you can use ```git add . && git commit -m 'new build' && git push origin master``` to push the changes. # + [markdown] id="PM341vSUGcOq" # <!-- ------------------------ --> # ## Conclusion # Duration: 1 # + [markdown] id="6vZ-Cmw6Gknw" # Git actions workflow named ```CI``` would automatically start deploying latest changes of the master branch. You can check the status in ```Actions``` tab of your git repo. # # To access the codelabs site, go to your github pages URL. The typical URL format is ```https://<user_name>.github.io/<repo_name>```. # # Verify the functionality and modify/enhance the process as per requirements. # # #### Have a Question? # - https://form.jotform.com/211377288388469 # # #### Github Issues # - https://github.com/recohut/reco-step/issues
_notebooks/how-to-create-your-own-codelab.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <h1 align=center>Media Topic Tracking</h1> # <h2 align=center>Using Natural Language Processing</h2> # <h2 align=center>and</h2> # <h2 align=center>Machine Learning</h2> # # ### Text Prep # Relevant news articles are identified and scraped using the script news_scrape.py script. Once they are scraped they are stored in their raw from in a MongoDB collection. The contents of this notebook does the initial cleaning of the data and stores the cleaned text back in the same MongoDB collection along with the other document identifiers and raw text. # + # Load the modules we will need import sys import re import os.path import requests import time import pandas as pd from os import path import re import string from pymongo import MongoClient from textblob import TextBlob # - import spacy from spacy import displacy from pprint import pprint from collections import Counter import en_core_web_sm nlp = en_core_web_sm.load() # # Generate some quick summary statistics # # + # Open MongoDB db_client = MongoClient() # Get set up access to the collections we will need here db_news = db_client['news_search'] db_news_content = db_news['news_content'] # - # Get a list of all the documents and print the count doc_count = db_news_content.count_documents({}) print(doc_count) # + # # Define a couple of useful functions for cleaning text # alphanumeric = lambda x: re.sub('\w*\d\w*', ' ', x) punc_lower = lambda x: re.sub('[%s]' % re.escape(string.punctuation+'\\'), ' ', x.lower()) # + # Count the total number of words in the clean corpus. cursor = db_news_content.find({}, {'_id':0, 'name': 1, 'text': 1, 'url' : 1, 'base_url' : 1, 'pub_date': 1}) article_count = 0 total_words = 0 for article in list(cursor): word_list = [] article_count += 1 clean_text = '' clean_text = punc_lower(alphanumeric(article['text'])) clean_text = re.sub('\w*\d\w*', ' ', clean_text) clean_text = article['text'] word_list = clean_text.split() # print(article['name'], len(word_list)) total_words += len(word_list) print(article_count, total_words) # - # ## Clean the text # # The first steps in cleaning are removing punctuation, stripping number and making everything lower case # First let's clean the text cursor = db_news_content.find({}, {'_id':1, 'name': 1, 'text': 1, 'url' : 1, 'base_url' : 1, 'pub_date': 1}) db_news_sentences = db_news['sentences'] for article in list(cursor) : # make alphanumeric and lower case clean_text = punc_lower(alphanumeric(article['text'])) # remove numbers clean_text = re.sub('\w*\d\w*', ' ', clean_text) # remove punctuation clean_text = re.sub('[%s]' % re.escape(string.punctuation), ' ', clean_text) clean_text = re.sub('\\xa0', ' ', clean_text) # word_list = clean_text.split() word_list = [ x for x in word_list if (len(x) >= 2) ] db_news_content.update_one({ '_id': article['_id']}, { '$set' : { 'clean_text' : clean_text }}) db_news_content.update_one({ '_id': article['_id']}, { '$set' : { 'word_list' : word_list }}) # ## Part Of Speech Filtering # Once the basic cleaning is done we do some more advanced filtering based on parts of speech and Named Entity Recognition. # Basic stop words are removed and words are converted to their lemma form. After that we apply some simple word filters to remove # words that are effectively stop words for the domain in which we are working. # + from spacy.lang.en import English nlp = spacy.load("en_core_web_sm") # Create a Tokenizer with the default settings for English # including punctuation rules and exceptions sentencizer = nlp.create_pipe("sentencizer") # + cursor = db_news_content.find({}, {'_id':1, 'text': 1, 'clean_text' : 1 }) articles = list(cursor) ''' ADJ: adjective, e.g. big, old, green, incomprehensible, first ADP: adposition, e.g. in, to, during ADV: adverb, e.g. very, tomorrow, down, where, there AUX: auxiliary, e.g. is, has (done), will (do), should (do) CONJ: conjunction, e.g. and, or, but CCONJ: coordinating conjunction, e.g. and, or, but DET: determiner, e.g. a, an, the INTJ: interjection, e.g. psst, ouch, bravo, hello NOUN: noun, e.g. girl, cat, tree, air, beauty NUM: numeral, e.g. 1, 2017, one, seventy-seven, IV, MMXIV PART: particle, e.g. ’s, not, PRON: pronoun, e.g I, you, he, she, myself, themselves, somebody PROPN: proper noun, e.g. Mary, John, London, NATO, HBO PUNCT: punctuation, e.g. ., (, ), ? SCONJ: subordinating conjunction, e.g. if, while, that SYM: symbol, e.g. $, %, §, ©, +, −, ×, ÷, =, :), 😝 VERB: verb, e.g. run, runs, running, eat, ate, eating X: other, e.g. sfpksdpsxmsa SPACE: space, e.g. ''' ''' PERSON People, including fictional. NORP Nationalities or religious or political groups. FAC Buildings, airports, highways, bridges, etc. ORG Companies, agencies, institutions, etc. GPE Countries, cities, states. LOC Non-GPE locations, mountain ranges, bodies of water. PRODUCT Objects, vehicles, foods, etc. (Not services.) EVENT Named hurricanes, battles, wars, sports events, etc. WORK_OF_ART Titles of books, songs, etc. LAW Named documents made into laws. LANGUAGE Any named language. DATE Absolute or relative dates or periods. TIME Times smaller than a day. PERCENT Percentage, including ”%“. MONEY Monetary values, including unit. QUANTITY Measurements, as of weight or distance. ORDINAL “first”, “second”, etc. CARDINAL Numerals that do not fall under another type. ''' #pos_keep_list = ['ADJ', 'ADV', 'NOUN', 'PROPN', 'VERB'] pos_keep_list = ['ADJ','ADV', 'NOUN', 'VERB'] # ent_keep_list = ['PERSON', 'ORG', 'NORP', 'GPE', ''] ent_keep_list = ['PERSON', 'NORP', 'EVENT', 'PRODUCT', 'GPE', 'MONEY', 'QUANTITY', ''] ''' people_to_drop = ['tom', 'steyer', 'bernie','sanders', 'sander', 'donald', 'trump', 'elizabeth', 'warren', \ 'joe','biden', 'mike', 'bloomberg', \ 'pete','buttigieg', 'amy', 'klobuchar'] ''' people_to_drop = ['tom', 'bernie', 'donald', 'elizabeth', 'joe', 'michael', 'pete', 'amy'] words_to_drop = ['president', 'hall', 'senator', 'vice', 'mayor', 'moines', 'democrat', 'democratic', \ 'president', 'presidential', 'south', 'bend', 'candidate', 'state', 'this', 'look', 'make',\ 'think', 'that', 'what', 'like', 'campaign', 'know' ] for article in articles : word_list = [] # Tag the parts of speech doc = nlp(article['clean_text']) # tokenize the text ''' word_list = [token.lemma_ \ if ((token.pos_ in pos_keep_list) and \ (token.ent_type_ in ent_keep_list) and (token.text in nlp.vocab) \ and not (token.text in people_to_drop) and not (token.text in words_to_drop)) \ else 'location' if (token.ent_type_ == 'GPE') else '' for token in doc ] ''' prop_noun_list = [token.text for token in doc if (token.pos_ == 'PROPN') ] word_list = [token.lemma_ \ if ((token.pos_ in pos_keep_list) or \ (token.ent_type_ in ent_keep_list)) and (token.text in nlp.vocab) else ' ' for token in doc ] word_list = [word for word in word_list if word.isalpha() and (len(word) > 3)] word_list = [word for word in word_list if not word in people_to_drop] word_list = [word for word in word_list if not word in words_to_drop] # make word substitutions for names ''' for i, word in enumerate(word_list) : new_word = word if word == 'donald' : new_word = 'trump' elif word == 'michael' : new_word = 'bloomberg' elif word == 'elizabeth' : new_word = 'warren' elif word == 'pete' : new_word = 'buttigieg' elif word == 'joe': new_word = 'biden' elif word == 'bernie' : new_word = 'sanders' elif word == 'amy' : new_word = 'klobuchar' elif word == 'tom' : new_word = 'steyer' word_list[i] = new_word ''' short_text = ' '.join(word_list) prop_noun_text = ' '.join(prop_noun_list) db_news_content.update_one({'_id': article['_id']}, { '$set' : {'short_text' : short_text}}) db_news_content.update_one({'_id': article['_id']}, { '$set' : {'prop_nouns' : prop_noun_text}}) # - # ## Publication Date Conversion # Finally we're going to convert the publication date to a datetime object for later filtering. # + # Fix datetime import datetime as dt cursor = db_news_content.find({}, {'_id':1, 'text': 1, 'pub_date' : 1 }) articles = list(cursor) for article in articles : dt_pub_date = dt.datetime.strptime(article['pub_date'].split('T')[0], '%Y-%m-%d') db_news_content.update_one({'_id': article['_id']}, { '$set' : {'dt_pub_date' : dt_pub_date}}) # -
Notebooks/TextPrep.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (Ubuntu) # language: python # name: c003-python_3 # --- # ### Integrate progress bar # Qarpo is a library to build a jupyter notebook user interface to submit jobs to job scheduler, display output interface to display accomplished jobs' outputs and plot its results. # # This notebook provides a recipe to integrate progress bar into the output interface, this progress bar track and dispaly the job progression while running # # To start using the qarpo, run the following import line import qarpo # The progress indicator main function is track the process progress, in our case it's tracking the processing of frames in a video. # # Progress indicator in qarpo has two main parts. The first part is the backend, it should be integrated to the python script where the progress is required to be tracked. # # ```python # import qarpo # # progress_file_path = os.path.join(args.output_dir,job_id, 'i_progress.txt') # t0 = time.time() # for frame_id in range(1, number_iter): # res = exec_net.infer(inputs={input_blob: images}) # qarpo.progressUpdate(progress_file_path, time.time()-t0, frame_id , number_iter) # ``` # # The second part is defined in the UI configuration, this UI configuration is an input to the class constructor Interface. # # To add the progress indicator configuratoion to the UI configuration, use the following format: # # { # "job": # Define how to launch the job and interpret results # { # "output_type": ".txt", # The type of input (text/video) # "results_path": "app/results/", # Path to job result files # "progress_indicators": #List of dictionaries, each entry in th list states the required information for one of the progress indicators # [ # { # "file_name": < path to file with info for progress indicator 1 >, # "title": < String, name displayed in the notebook > # }, # { # "file_name": < path to file with info for progress indicator 2 >, # "title": < String, name displayed in the notebook > # } # ] # # } # } # # # # job_interface = qarpo.Interface( { "job": # Define how to launch the job and interpret results { "output_type": ".txt", # The type of input (text/video) "results_path": "app/results/", # Path to job result files "progress_indicators": # List of job progress indicators [ { "file_name": "i_progress.txt", # File with progress info "title": "Progress" # Display name in the notebook } ] } } ) job_interface.displayUI() job_interface.submitJob("qsub app/ocr_job.sh -l nodes=1:idc001skl:i5-6500te -F 'app/results/ GPU FP32'") job_interface.submitJob("qsub app/ocr_job.sh -l nodes=1:idc001skl:i5-6500te -F 'app/results/ CPU FP32'")
Examples/Integrate_Progress_Bar.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import os from glob import glob import pandas as pd from barplots import barplots data = [] for path in glob("cache/*/*/*/*.csv"): _, dataset, holdout_number, model, _ = path.split(os.sep) df = pd.read_csv(path, index_col=0) df["dataset"] = dataset df["holdout_number"] = holdout_number df["model"] = model data.append(df) df = pd.concat(data).reset_index(drop=True) df barplots( df, groupby=["run_type", "dataset"], orientation="horizontal", height=5, show_legend=False )
Retrieve results.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="sMYQvJuBi7MS" # # Classifying Structured Data using Keras Preprocessing Layers # + [markdown] id="Nna1tOKxyEqe" # # ## Learning Objectives # # * Load a CSV file using [Pandas](https://pandas.pydata.org/). # * Build an input pipeline to batch and shuffle the rows using [tf.data](https://www.tensorflow.org/guide/datasets). # * Map from columns in the CSV to features used to train the model using Keras Preprocessing layers. # * Build, train, and evaluate a model using Keras. # # ## Introduction # # In this notebook, you learn how to classify structured data (e.g. tabular data in a CSV). You will use [Keras](https://www.tensorflow.org/guide/keras) to define the model, and [preprocessing layers](https://www.tensorflow.org/guide/keras/preprocessing_layers) as a bridge to map from columns in a CSV to features used to train the model. # # Each learning objective will correspond to a **#TODO** in the [student lab notebook](../labs/preprocessing_layers.ipynb) -- try to complete that notebook first before reviewing this solution notebook. # + [markdown] id="h5xkXCicjFQD" # Note: This tutorial is similar to [Classify structured data with feature columns](https://www.tensorflow.org/tutorials/structured_data/feature_columns). This version uses new experimental Keras [Preprocessing Layers](https://www.tensorflow.org/api_docs/python/tf/keras/layers/experimental/preprocessing) instead of `tf.feature_column`. Keras Preprocessing Layers are more intuitive, and can be easily included inside your model to simplify deployment. # + [markdown] id="ZHxU1FMNpomc" # ## The Dataset # # You will use a simplified version of the PetFinder [dataset](https://www.kaggle.com/c/petfinder-adoption-prediction). There are several thousand rows in the CSV. Each row describes a pet, and each column describes an attribute. You will use this information to predict if the pet will be adopted. # # Following is a description of this dataset. Notice there are both numeric and categorical columns. There is a free text column which you will not use in this tutorial. # # Column | Description| Feature Type | Data Type # ------------|--------------------|----------------------|----------------- # Type | Type of animal (Dog, Cat) | Categorical | string # Age | Age of the pet | Numerical | integer # Breed1 | Primary breed of the pet | Categorical | string # Color1 | Color 1 of pet | Categorical | string # Color2 | Color 2 of pet | Categorical | string # MaturitySize | Size at maturity | Categorical | string # FurLength | Fur length | Categorical | string # Vaccinated | Pet has been vaccinated | Categorical | string # Sterilized | Pet has been sterilized | Categorical | string # Health | Health Condition | Categorical | string # Fee | Adoption Fee | Numerical | integer # Description | Profile write-up for this pet | Text | string # PhotoAmt | Total uploaded photos for this pet | Numerical | integer # AdoptionSpeed | Speed of adoption | Classification | integer # + [markdown] id="vjFbdBldyEqf" # ## Import TensorFlow and other libraries # # + id="S_BdyQlPjfDW" # !pip install -q sklearn # - # **Restart** the kernel before proceeding further (On the Notebook menu, select Kernel > Restart Kernel > Restart). # # + id="LklnLlt6yEqf" # import necessary libraries import numpy as np import pandas as pd import tensorflow as tf from sklearn.model_selection import train_test_split from tensorflow.keras import layers from tensorflow.keras.layers.experimental import preprocessing # + id="TKU7RyoQGVKB" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="e857c1fe-b68d-4746-bed7-eb28642f9bfb" # print the tensorflow version tf.__version__ # + [markdown] id="UXvBvobayEqi" # ## Use Pandas to create a dataframe # # [Pandas](https://pandas.pydata.org/) is a Python library with many helpful utilities for loading and working with structured data. You will use Pandas to download the dataset from a URL, and load it into a dataframe. # + id="qJ4Ajn-YyEqj" colab={"base_uri": "https://localhost:8080/"} outputId="4d0de1f2-f9f2-412c-e526-a838a4ba670a" import pathlib dataset_url = 'http://storage.googleapis.com/download.tensorflow.org/data/petfinder-mini.zip' csv_file = 'gs://cloud-training/mlongcp/v3.0_MLonGC/toy_data/petfinder-mini_toy.csv' tf.keras.utils.get_file('petfinder_mini.zip', dataset_url, extract=True, cache_dir='.') # TODO # read a comma-separated values (csv) file into DataFrame dataframe = pd.read_csv(csv_file) # + id="3uiq4hoIGyXI" colab={"base_uri": "https://localhost:8080/", "height": 200} outputId="52379c9e-3c20-479e-bf65-8c6ebeccdcd8" # get the first n rows dataframe.head() # + [markdown] id="C3zDbrozyEqq" # ## Create target variable # # The task in the Kaggle competition is to predict the speed at which a pet will be adopted (e.g., in the first week, the first month, the first three months, and so on). Let's simplify this for our tutorial. Here, you will transform this into a binary classification problem, and simply predict whether the pet was adopted, or not. # # After modifying the label column, 0 will indicate the pet was not adopted, and 1 will indicate it was. # + id="wmMDc46-yEqq" # In the original dataset "4" indicates the pet was not adopted. dataframe['target'] = np.where(dataframe['AdoptionSpeed']==4, 0, 1) # Drop un-used columns. dataframe = dataframe.drop(columns=['AdoptionSpeed', 'Description']) # + [markdown] id="sp0NCbswyEqs" # ## Split the dataframe into train, validation, and test # # The dataset you downloaded was a single CSV file. You will split this into train, validation, and test sets. # + id="qT6HdyEwyEqt" colab={"base_uri": "https://localhost:8080/"} outputId="c3008199-bf30-48cc-d55a-8ae2a35ab699" train, test = train_test_split(dataframe, test_size=0.2) train, val = train_test_split(train, test_size=0.2) print(len(train), 'train examples') print(len(val), 'validation examples') print(len(test), 'test examples') # + [markdown] id="C_7uVu-xyEqv" # ## Create an input pipeline using tf.data # # Next, you will wrap the dataframes with [tf.data](https://www.tensorflow.org/guide/datasets), in order to shuffle and batch the data. If you were working with a very large CSV file (so large that it does not fit into memory), you would use tf.data to read it from disk directly. That is not covered in this tutorial. # + id="7r4j-1lRyEqw" # A utility method to create a tf.data dataset from a Pandas Dataframe def df_to_dataset(dataframe, shuffle=True, batch_size=32): dataframe = dataframe.copy() labels = dataframe.pop('target') ds = tf.data.Dataset.from_tensor_slices((dict(dataframe), labels)) if shuffle: ds = ds.shuffle(buffer_size=len(dataframe)) ds = ds.batch(batch_size) ds = ds.prefetch(batch_size) return ds # + [markdown] id="PYxIXH579uS9" # Now that you have created the input pipeline, let's call it to see the format of the data it returns. You have used a small batch size to keep the output readable. # + id="tYiNH-QI96Jo" batch_size = 5 # TODO # call the necessary function with required parameters train_ds = df_to_dataset(train, batch_size=batch_size) # + id="nFYir6S8HgIJ" colab={"base_uri": "https://localhost:8080/"} outputId="a74940a9-7585-4973-e1f3-8164daa84f36" [(train_features, label_batch)] = train_ds.take(1) print('Every feature:', list(train_features.keys())) print('A batch of ages:', train_features['Age']) print('A batch of targets:', label_batch ) # + [markdown] id="geqHWW54Hmte" # You can see that the dataset returns a dictionary of column names (from the dataframe) that map to column values from rows in the dataframe. # + [markdown] id="-v50jBIuj4gb" # ## Demonstrate the use of preprocessing layers. # # The Keras preprocessing layers API allows you to build Keras-native input processing pipelines. You will use 3 preprocessing layers to demonstrate the feature preprocessing code. # # * [`Normalization`](https://www.tensorflow.org/api_docs/python/tf/keras/layers/experimental/preprocessing/Normalization) - Feature-wise normalization of the data. # * [`CategoryEncoding`](https://www.tensorflow.org/api_docs/python/tf/keras/layers/experimental/preprocessing/CategoryEncoding) - Category encoding layer. # * [`StringLookup`](https://www.tensorflow.org/api_docs/python/tf/keras/layers/experimental/preprocessing/StringLookup) - Maps strings from a vocabulary to integer indices. # * [`IntegerLookup`](https://www.tensorflow.org/api_docs/python/tf/keras/layers/experimental/preprocessing/IntegerLookup) - Maps integers from a vocabulary to integer indices. # # You can find a list of available preprocessing layers [here](https://www.tensorflow.org/api_docs/python/tf/keras/layers/experimental/preprocessing). # + [markdown] id="twXBSxnT66o8" # ### Numeric columns # For each of the Numeric feature, you will use a Normalization() layer to make sure the mean of each feature is 0 and its standard deviation is 1. # + [markdown] id="OosUh4kTsK_q" # `get_normalization_layer` function returns a layer which applies featurewise normalization to numerical features. # + id="D6OuEKMMyEq1" def get_normalization_layer(name, dataset): # Create a Normalization layer for our feature. normalizer = preprocessing.Normalization(axis=None) # TODO # Prepare a Dataset that only yields our feature. feature_ds = dataset.map(lambda x, y: x[name]) # Learn the statistics of the data. normalizer.adapt(feature_ds) return normalizer # + id="MpKgUDyk69bM" colab={"base_uri": "https://localhost:8080/"} outputId="fced3061-aded-4438-fdb5-cdcc70472fa8" photo_count_col = train_features['PhotoAmt'] layer = get_normalization_layer('PhotoAmt', train_ds) layer(photo_count_col) # + [markdown] id="foWY00YBUx9N" # Note: If you many numeric features (hundreds, or more), it is more efficient to concatenate them first and use a single [normalization](https://www.tensorflow.org/api_docs/python/tf/keras/layers/experimental/preprocessing/Normalization) layer. # + [markdown] id="yVD--2WZ7vmh" # ### Categorical columns # In this dataset, Type is represented as a string (e.g. 'Dog', or 'Cat'). You cannot feed strings directly to a model. The preprocessing layer takes care of representing strings as a one-hot vector. # + [markdown] id="LWlkOPwMsxdv" # `get_category_encoding_layer` function returns a layer which maps values from a vocabulary to integer indices and one-hot encodes the features. # + id="GmgaeRjlDoUO" def get_category_encoding_layer(name, dataset, dtype, max_tokens=None): # Create a StringLookup layer which will turn strings into integer indices if dtype == 'string': index = preprocessing.StringLookup(max_tokens=max_tokens) else: index = preprocessing.IntegerLookup(max_tokens=max_tokens) # TODO # Prepare a Dataset that only yields our feature feature_ds = dataset.map(lambda x, y: x[name]) # Learn the set of possible values and assign them a fixed integer index. index.adapt(feature_ds) # Create a Discretization for our integer indices. encoder = preprocessing.CategoryEncoding(num_tokens=index.vocabulary_size()) # Apply one-hot encoding to our indices. The lambda function captures the # layer so we can use them, or include them in the functional model later. return lambda feature: encoder(index(feature)) # + id="X2t2ff9K8PcT" colab={"base_uri": "https://localhost:8080/"} outputId="f4294791-555a-40ee-a731-da55f8258729" type_col = train_features['Type'] layer = get_category_encoding_layer('Type', train_ds, 'string') layer(type_col) # + [markdown] id="j6eDongw8knz" # Often, you don't want to feed a number directly into the model, but instead use a one-hot encoding of those inputs. Consider raw data that represents a pet's age. # + id="7FjBioQ38oNE" colab={"base_uri": "https://localhost:8080/"} outputId="6cc6f424-ddcb-487e-de32-d94a255f7ecb" type_col = train_features['Age'] category_encoding_layer = get_category_encoding_layer('Age', train_ds, 'int64', 5) category_encoding_layer(type_col) # + [markdown] id="SiE0glOPkMyh" # ## Choose which columns to use # You have seen how to use several types of preprocessing layers. Now you will use them to train a model. You will be using [Keras-functional API](https://www.tensorflow.org/guide/keras/functional) to build the model. The Keras functional API is a way to create models that are more flexible than the [tf.keras.Sequential](https://www.tensorflow.org/api_docs/python/tf/keras/Sequential) API. # # The goal of this tutorial is to show you the complete code (e.g. mechanics) needed to work with preprocessing layers. A few columns have been selected arbitrarily to train our model. # # Key point: If your aim is to build an accurate model, try a larger dataset of your own, and think carefully about which features are the most meaningful to include, and how they should be represented. # + [markdown] id="Uj1GoHSZ9R3H" # Earlier, you used a small batch size to demonstrate the input pipeline. Let's now create a new input pipeline with a larger batch size. # # + id="Rcv2kQTTo23h" batch_size = 256 train_ds = df_to_dataset(train, batch_size=batch_size) val_ds = df_to_dataset(val, shuffle=False, batch_size=batch_size) test_ds = df_to_dataset(test, shuffle=False, batch_size=batch_size) # + id="Q3RBa51VkaAn" all_inputs = [] encoded_features = [] # Numeric features. for header in ['PhotoAmt', 'Fee']: numeric_col = tf.keras.Input(shape=(1,), name=header) normalization_layer = get_normalization_layer(header, train_ds) encoded_numeric_col = normalization_layer(numeric_col) all_inputs.append(numeric_col) encoded_features.append(encoded_numeric_col) # + id="1FOMGfZflhoA" # Categorical features encoded as integers. age_col = tf.keras.Input(shape=(1,), name='Age', dtype='int64') encoding_layer = get_category_encoding_layer('Age', train_ds, dtype='int64', max_tokens=5) encoded_age_col = encoding_layer(age_col) all_inputs.append(age_col) encoded_features.append(encoded_age_col) # + id="K8C8xyiXm-Ie" # Categorical features encoded as string. categorical_cols = ['Type', 'Color1', 'Color2', 'Gender', 'MaturitySize', 'FurLength', 'Vaccinated', 'Sterilized', 'Health', 'Breed1'] for header in categorical_cols: categorical_col = tf.keras.Input(shape=(1,), name=header, dtype='string') encoding_layer = get_category_encoding_layer(header, train_ds, dtype='string', max_tokens=5) encoded_categorical_col = encoding_layer(categorical_col) all_inputs.append(categorical_col) encoded_features.append(encoded_categorical_col) # + [markdown] id="YHSnhz2fyEq3" # ## Create, compile, and train the model # # + [markdown] id="IDGyN_wpo0XS" # Now you can create our end-to-end model. # + id="6Yrj-_pr6jyL" all_features = tf.keras.layers.concatenate(encoded_features) x = tf.keras.layers.Dense(32, activation="relu")(all_features) x = tf.keras.layers.Dropout(0.5)(x) output = tf.keras.layers.Dense(1)(x) model = tf.keras.Model(all_inputs, output) # TODO # compile the model model.compile(optimizer='adam', loss=tf.keras.losses.BinaryCrossentropy(from_logits=True), metrics=["accuracy"]) # + [markdown] id="f6mNMfG6yEq5" # Let's visualize our connectivity graph: # # + id="Y7Bkx4c7yEq5" colab={"base_uri": "https://localhost:8080/", "height": 861} outputId="09a28e09-bea7-47ee-f4eb-909db2bd0837" # rankdir='LR' is used to make the graph horizontal. tf.keras.utils.plot_model(model, show_shapes=True, rankdir="LR") # + [markdown] id="CED6OStLyEq7" # ### Train the model # # + id="OQfE3PC6yEq8" colab={"base_uri": "https://localhost:8080/"} outputId="38dc5150-a913-48df-ca86-4b260487afed" # TODO # train the model model.fit(train_ds, epochs=10, validation_data=val_ds) # + id="T8N2uAdU2Cni" colab={"base_uri": "https://localhost:8080/"} outputId="bd3d4ec9-32e5-4903-8b91-d5acf0cc5c4e" loss, accuracy = model.evaluate(test_ds) print("Accuracy", accuracy) # + [markdown] id="LmZMnTKaCZda" # ## Inference on new data # # Key point: The model you have developed can now classify a row from a CSV file directly, because the preprocessing code is included inside the model itself. # # + [markdown] id="4xkOlK8Zweeh" # You can now save and reload the Keras model. Follow the tutorial [here](https://www.tensorflow.org/tutorials/keras/save_and_load) for more information on TensorFlow models. # + id="QH9Zy1sBvwOH" colab={"base_uri": "https://localhost:8080/"} outputId="966ca761-a0f1-498d-97ed-f9e4b8b29785" model.save('my_pet_classifier') reloaded_model = tf.keras.models.load_model('my_pet_classifier') # + [markdown] id="D973plJrdwQ9" # To get a prediction for a new sample, you can simply call `model.predict()`. There are just two things you need to do: # # 1. Wrap scalars into a list so as to have a batch dimension (models only process batches of data, not single samples) # 2. Call `convert_to_tensor` on each feature # + id="rKq4pxtdDa7i" colab={"base_uri": "https://localhost:8080/"} outputId="068f6499-3894-404b-a26a-ce48486c8fe6" sample = { 'Type': 'Cat', 'Age': 3, 'Breed1': 'Tabby', 'Gender': 'Male', 'Color1': 'Black', 'Color2': 'White', 'MaturitySize': 'Small', 'FurLength': 'Short', 'Vaccinated': 'No', 'Sterilized': 'No', 'Health': 'Healthy', 'Fee': 100, 'PhotoAmt': 2, } input_dict = {name: tf.convert_to_tensor([value]) for name, value in sample.items()} predictions = reloaded_model.predict(input_dict) prob = tf.nn.sigmoid(predictions[0]) print( "This particular pet had a %.1f percent probability " "of getting adopted." % (100 * prob) ) # + [markdown] id="XJQQZEiH2FaB" # Key point: You will typically see best results with deep learning with larger and more complex datasets. When working with a small dataset like this one, we recommend using a decision tree or random forest as a strong baseline. The goal of this tutorial is to demonstrate the mechanics of working with structured data, so you have code to use as a starting point when working with your own datasets in the future. # + [markdown] id="k0QAY2Tb2HYG" # ## Next steps # The best way to learn more about classifying structured data is to try it yourself. You may want to find another dataset to work with, and training a model to classify it using code similar to the above. To improve accuracy, think carefully about which features to include in your model, and how they should be represented.
courses/machine_learning/deepdive2/introduction_to_tensorflow/solutions/preprocessing_layers.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # + [markdown] slideshow={"slide_type": "slide"} # ## Learning Objectives # # We learn about KNN, our first real ML algorithm today and we show how it fits into the parts of an ML algorithm that we described earlier. # + slideshow={"slide_type": "slide"} from sklearn.datasets import load_iris iris_data = load_iris() # + slideshow={"slide_type": "subslide"} print iris_data['DESCR'] # + slideshow={"slide_type": "subslide"} print iris_data['data'].shape print iris_data['target'].shape print iris_data['feature_names'] print iris_data['target_names'] # + [markdown] slideshow={"slide_type": "subslide"} # ## KNN # # K-Nearest Neighbors is the first fully fledged machine learning algorithm that we will be studying in class. # # KNN is a non-parametric model, it has no parameters, it can be simply described as a strategy: # # <blockquote> # For each point X we will find its K nearest neighbors in the training set, and return their average y value as our prediction. # <\blockquote> # # Thus KNN. # # In order to fully understand KNN and any ML algorithm we can go back to the drawing board. Let's return to our assumptions and check to see what a machine learning algorithm entails: # + slideshow={"slide_type": "skip"} import networkx as nx from nxpd import draw from nxpd import nxpdParams nxpdParams['show'] = 'ipynb' c0 = 'Unknown target function, f(X) = y' c1 = 'Inputs, (x_1, y_1), ..., (x_n, y_n)' c2 = 'Learning Algorithm' c3 = 'Hypothesis set, H' c4 = 'Final Hypothesis, g(X) = y' c5 = 'IID samples from a r.v. X' G = nx.DiGraph() G.add_node(c0) G.add_node(c1) G.add_edge(c0, c1) G.add_node(c2) G.add_edge(c1, c2) G.add_node(c3) G.add_edge(c3, c2) G.add_node(c4) G.add_edge(c2, c4) G.add_node(c5) G.add_edge(c5, c1) # + slideshow={"slide_type": "subslide"} draw(G) # + [markdown] slideshow={"slide_type": "subslide"} # To do that we can refer back to our assumptions. So the machine learning algorithm is composed of three parts of the above diagram: # # 0. Inputs # 1. Hypothesis Set # 2. Learning Algorithm # 3. Final Hypothesis # # Of course the unknown target function is instrumental in the success of the algorithm in general. Specifically we would like if the unknown target function is in the hypothesis set, that way there is a chance we can find the real target function, but we would be happy if there is one 'close enough' in the hypothesis set. # + [markdown] slideshow={"slide_type": "slide"} # #### Inputs and Outputs # # This might seem odd that we include the inputs as a description of machine learning algorthm but this is very important. The first point to mention is that each machine learning algorithm is for a specific type of output: quantitative or qualitative. Certain machine learning algorithms are specifically for qualitative outputs and some for quantitative (and some for both). KNN can be both a classifier and a regressor, we will be focusing on the classification today. # # The second concern is about the inputs. Some models need very specific inputs. So for example, or KNN can only take quantitative features, so we need to make sure to convert all the values to quantitative ones before the regression. The second point that needs to be made is whether the data needs to be standardized. KNN is very particular about standardization. Remember the first part of the algorithm in KNN, we select the closest K points to our new datapoint. But that is very dependent the dimensions of your data. # # So for example you could have two factors: Wealth and Age. If wealth is always in the 1000s and age is always less and a hundred, the nearest other data points will be nearest in terms of wealth and KNN will pretty much ignore age. What is generally better is to standardize them both: subtract the mean and divide by the variance. This will put all points on a similar scale. # # That being said, subject matter experts might have a better way to standardize the data points (espicially if certain factors are more important than others). # - # In summary two points: # # 1. KNN can be both a classifier and a regressor # 2. KNN inputs need to be standardized # + [markdown] slideshow={"slide_type": "slide"} # #### Hypothesis Set # # This is the first time that we have seen a real machine learning algorithm, so the hypothesis set is harder to decipher. The previous algorithms we have seen have had just a set of hypotheses (you could count and point at them). This is a bit harder to understand. # # What is hard to understand about this is that in this algorithm we rely on the data. Once we have the data we will in fact only have one hypothesis, so you may be tempted to say that our set of hypotheses is one: just the KNN algorithm. But in fact we need to consider the set of hypotheses before we see the data. How many different hypotheses could we have? # # Well let's consider the simplest case, where k is 1 and we have a single data point. In this case we would classify every new point as our previous point. So how many possible hypotheses could we have, well the point could be anything, so infinite hypotheses. Wow! # # This infinity grows as the number of data points increases, and we get a power set of infinities. Ultimately what you need to know is that the KNN hypothesis set is very very big. # + [markdown] slideshow={"slide_type": "slide"} # #### Learning Algorithm # # The learning algorithm for KNN, the way that KNN selects its final hypothesis, is that it looks at an memorizes the data. It is very quick but not so memory efficient. The best hypothesis is embedded in the algorithm itself. # + [markdown] slideshow={"slide_type": "slide"} # #### Final Hypothesis # # The final hypothesis is gotten by memorizing the dataset and then for each new point looking for the nearest points in the data set. Unlike many other algorithms it takes longer to use the KNN final hypothesis than it does to find it. This is a defining part of KNN and non-parametric models in general and is one of the points to remember # + [markdown] slideshow={"slide_type": "slide"} # #### Conclusion # # There are three important things to know about KNN: # # 1. You will need to standardize the data # 2. KNN has a very large hypothesis set, which as we will see later on leads to overfitting # 3. KNN is quick to train and slow to evaluate # + [markdown] slideshow={"slide_type": "slide"} # #### Bonus fact # # Now this will generally encapsulate all of the relevant information for an algorithm, but sometimes there is an extra bit of material that you should know before you use it. The extra bit of information is that KNN does not perform well as we increase the dimensionality of the data. # # This is called the curse of dimensionality. # # The intuitive explination of this is that as the dimensionality increases it becomes harder to find a truely similar data point. Think about it, if you needed to find a match for every gene that you have, you would need to look through a lot of other people, but if we are just matching blood type, it becomes easier. # # The non-intuitive explanation is that as we increase K the hypotheses set increases dramatically leading to more intense overfitting and worse out of sample performance. # + [markdown] slideshow={"slide_type": "slide"} # ## Sklearn # # Okay so with no further ado let's use this bad boy in practice. Let's start off with a sklearn KNN classification: # + slideshow={"slide_type": "subslide"} from sklearn.neighbors import KNeighborsClassifier # and then we can do nearly the same thing # we make our classifier here cls = KNeighborsClassifier(n_neighbors=3, weights='uniform') # + slideshow={"slide_type": "subslide"} cls.fit(iris_data['data'], iris_data['target']) # + slideshow={"slide_type": "fragment"} cls.predict(iris_data['data'][4, None]) # + slideshow={"slide_type": "fragment"} cls.score(iris_data['data'], iris_data['target']) # + [markdown] slideshow={"slide_type": "subslide"} # ## Conclusion # # We now know what are the parts of a typical machine learning algorithm, we explored KNN, and we showed how a typical machine learning algorithm is implemented in sklearn. This should leave you with a lot of questions, first off should be: what did we do? # + [markdown] slideshow={"slide_type": "slide"} # ## Learning Objectives # # We learn about KNN, our first real ML algorithm today and we show how it fits into the parts of an ML algorithm that we described earlier. # + [markdown] slideshow={"slide_type": "slide"} # ## Comprehension Questions # # 1. What are the characteristics of a good demo dataset? # 2. Is KNN really ‘learning’ anything or is it just memorizing? # 3. Is there anything missing in our description of an ML algorithm? # 4. Does the hypothesis set depend on the data? # 5. What is the training time for KNN in terms of the number of data points? What is the memory storage? What is the prediction time? # 6. How would you explain the curse of dimensionality to a non-technical person? # 7. How would you implement KNN without sklearn? # 8. What do the parameters: n_neighbors and weights represent? # 9. What do the three functions: fit, predict and score do? #
practicals/10.KNN.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # + import warnings warnings.filterwarnings('ignore') # %load_ext rpy2.ipython # %run ../notebook-init.py # load R libraries # %R invisible(library(ggplot2)) # %R invisible(library(fastcluster)) # %R invisible(library(reshape)) # %R invisible(library(reshape2)) # %R invisible(library(gplots)) # %R invisible(library(RSQLite)) #set up ggplot2 defaults # %R theme_set(theme_gray(base_size=18)) # %pylab inline pylab.rcParams['figure.figsize'] = (20, 20) # + # Count proportion of reads classified by AC-Diamond Fast and Kraken import gzip import sys import subprocess as sp import numpy as np import glob def countAligned(cinfile): containingDir = '/'.join(cinfile.split('/')[:-1]) base = cinfile.split('/')[-1] species = base.split('.')[1].split('-')[0].split('_')[0] if '.acdmndfast.' in base: root = base[15:] root = root[: root.index('.acdmndfast.tsv.gz')] elif '.dmndfast.' in base: root = base[15:] root = root[: root.index('.dmndfast.tsv.gz')] elif '.classified.' in base: root = base[5:] root = root[: root.index('.classified.tsv.gz')] linecountfile = containingDir + '/lines_in.' + root + '.fastq.gz' try: with open(cinfile) as c: C = int(c.read().strip()) with open(linecountfile) as l: L = int(l.read().strip()) P = C / (L / 4.0) return (species, P) except: return False def mergeAligned(outputs): merged = {} for species, out in outputs: if species not in merged: merged[species] = [] merged[species].append(out) return merged def crunchAligned(merged): crunched = {} for sp, vals in merged.items(): u = np.mean(vals) sd = np.std(vals) crunched[sp] = (u,sd) return crunched def main(args): if len(args) == 0: print("Usage: unique-seq-id-files") return 1 outputs = [] for f in args: a = countAligned(f) if not a: continue outputs.append(a) merged = mergeAligned(outputs) crunched = crunchAligned(merged) for k,v in crunched.items(): print("{} : {}".format(k,v)) return crunched print('AC-DIAMOND FAST') files = glob.glob('../line-counts/unique-seq-ids*') main(files) print('KRAKEN') files = glob.glob('../line-counts/c_in*') foo = main(files) # + # Count the proportion of classified reads which were assigned to different kingdoms for AC-Diamond import gzip import sys import subprocess as sp import numpy as np import pandas as pd with open('../phylum2kingdom.txt') as p2k: phylumtokingdom = p2k.read().strip() def countAligned(alignfile,p2k): base = alignfile.split('/')[-1] #species = base.split('.')[1].split('-')[0].split('_')[0] species = alignfile.split('/')[-1].split('.')[0].split('-')[0].split('_')[0] if 'sheep' in alignfile: species = 'sheep' if 'cow' in alignfile: species='cow' sind = alignfile.index('phylum') sfile = alignfile[:sind] + 'species' + alignfile[sind+6:] #print(sfile) out = { 'Bacteria':0, 'Viruses':0, 'Archaea':0, } with gzip.open(alignfile) as af: for line in af: #print(line) line = line.split() if len(line) != 2 or 'taxa' in line[0]: continue k = p2k[line[0].strip()] # print("{} -> {} ({})".format(line[0].strip(),k,int(line[1]))) out[k] = out[k] + int(line[1]) try: cmd = "zcat {} | grep -i 'virus'".format(sfile) data = sp.check_output(cmd,shell=True) for line in data.split('\n'): line = line.split() if len(line) != 2 or 'Sample' in line[0]: continue k = 'Viruses' out[k] = out[k] + int(line[1]) except: pass out['Bacteria'] = out['Bacteria'] - out['Viruses'] N = sum([v for v in out.values()]) for k,v in out.items(): out[k] = 1000*1000*float(v)/N return (species, out) def mergeAligned(outputs): merged = {} for species, out in outputs: for kind, val in out.items(): if kind not in merged: merged[kind] = {} if species not in merged[kind]: merged[kind][species] = [] merged[kind][species].append(val) return merged def buffer(outputs): buff = {} for species, out in outputs: if species not in buff: buff[species] = [] buff[species].append(out) for species,outs in buff.items(): top = 0 if len(outs) > len(top): top = len(outs) for out in outs: for _ in range(top - len(out)): out.append(0) def crunchAligned(merged): crunched = {kind:{} for kind in merged.keys()} for kind, val in merged.items(): for species, vals in val.items(): u = np.mean(vals) sd = np.std(vals) crunched[kind][species] = (u,sd) return crunched def prettyprint(crunched): print("\n--------------------------------------------------------\n") outline = "{:.1f} ({:.1f}) \t& {:.1f} ({:.1f}) \t& {:.1f} ({:.1f}) \t& {:.1f} ({:.1f}) \t\\\\" for row in crunched.itertuples(): print(row) ar = [] for v in row[1:]: # print(v) ar.append(float(v[0])) ar.append(float(v[1])) # print(ar) print(outline.format(*ar)) def main(args): if len(args) == 0: print("Usage: metaphlan-profile-files") return 1 p2k = {} a = phylumtokingdom.split('\n') a = [v for v in a if len(v) > 0] for i in range(0,len(a),2): p2k[a[i].strip()] = a[i+1].strip() # for k,v in p2k.items(): # print("{} : {}".format(k,v)) outputs = [] for f in args: a = countAligned(f,p2k) if not a: continue outputs.append(a) merged = mergeAligned(outputs) crunched = crunchAligned(merged) crunched = pd.DataFrame(crunched) print(crunched) #prettyprint(crunched) files = glob.glob('../taxonomic-profiles/ac-diamond-fast/individual-profiles/*phylum*') main(files) # + # Count the proportion of classified reads which were assigned to different kingdoms for AC-Diamond import gzip import sys import subprocess as sp import numpy as np import pandas as pd def countAligned(alignfile): base = alignfile.split('/')[-1] tool = 'kraken' species = alignfile.split('/')[-1].split('.')[1].split('-')[0].split('_')[0] out = { 'Bacteria':0, 'Viruses':0, 'Archaea':0, } with open(alignfile) as af: for line in af: line = line.split() if len(line) != 2 or 'taxa' in line[0]: continue k = line[0].strip().split('__')[1] out[k] = out[k] + int(line[1]) N = sum([v for v in out.values()]) if N == 0 : print(alignfile) return False for k,v in out.items(): out[k] = 1000*1000*float(v)/N return (species, out) def mergeAligned(outputs): merged = {} for species, out in outputs: for kind, val in out.items(): if kind not in merged: merged[kind] = {} if species not in merged[kind]: merged[kind][species] = [] merged[kind][species].append(val) return merged def crunchAligned(merged): crunched = {kind:{} for kind in merged.keys()} for kind, val in merged.items(): for species, vals in val.items(): u = np.mean(vals) sd = np.std(vals) crunched[kind][species] = (u,sd) return crunched def main(args): outputs = [] for f in args: a = countAligned(f) if not a: continue outputs.append(a) merged = mergeAligned(outputs) crunched = crunchAligned(merged) crunched = pd.DataFrame(crunched) print(crunched) files = glob.glob('../line-counts/kraken_phyla*') main(files) # -
notebooks/comparison_of_taxonomic_profilers/notebooks/Classification Proportions.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Table of Contents # # 1. <a href="#item1"><em>k</em>-means on a Randomly Generated Dataset</a> # 2. <a href="#item2">Using <em>k</em> for Customer Segmentation</a> # + # import libraries import random # library for random number generation import numpy as np # library for vectorized computation import pandas as pd # library to process data as dataframes import matplotlib.pyplot as plt # plotting library # backend for rendering plots within the browser # %matplotlib inline from sklearn.cluster import KMeans from sklearn.datasets.samples_generator import make_blobs print('imported.') # - # ## 1. _k_-means on a Randomly Generated Dataset # + # data x1 = [-4.9, -3.5, 0, -4.5, -3, -1, -1.2, -4.5, -1.5, -4.5, -1, -2, -2.5, -2, -1.5, 4, 1.8, 2, 2.5, 3, 4, 2.25, 1, 0, 1, 2.5, 5, 2.8, 2, 2] x2 = [-3.5, -4, -3.5, -3, -2.9, -3, -2.6, -2.1, 0, -0.5, -0.8, -0.8, -1.5, -1.75, -1.75, 0, 0.8, 0.9, 1, 1, 1, 1.75, 2, 2.5, 2.5, 2.5, 2.5, 3, 6, 6.5] print('Datapoints defined!') # + # Define a function to update the centroid of each cluster colors_map = np.array(['b', 'r']) def assign_members(x1, x2, centers): compare_to_first_center = np.sqrt(np.square(np.array(x1) - centers[0][0]) + np.square(np.array(x2) - centers[0][1])) compare_to_second_center = np.sqrt(np.square(np.array(x1) - centers[1][0]) + np.square(np.array(x2) - centers[1][1])) class_of_points = compare_to_first_center > compare_to_second_center colors = colors_map[class_of_points + 1 - 1] return colors, class_of_points print('assign_members function defined!') # + # Define a function to update the centroids means of each cluster def update_centers(x1, x2, class_of_points): center1 = [np.mean(np.array(x1)[~class_of_points]), np.mean(np.array(x2)[~class_of_points])] center2 = [np.mean(np.array(x1)[class_of_points]), np.mean(np.array(x2)[class_of_points])] return [center1, center2] print('assign_members function defined!') # + # Define a function that plots the data points along with the clustor centroids def plot_points(centroids=None, colors='g', figure_title=None): # plot the figure fig = plt.figure(figsize=(7, 5)) # create a figure object ax = fig.add_subplot(1, 1, 1) centroid_colors = ['bx', 'rx'] if centroids: for (i, centroid) in enumerate(centroids): ax.plot(centroid[0], centroid[1], centroid_colors[i], markeredgewidth=5, markersize=20) plt.scatter(x1, x2, s=500, c=colors) # define the ticks xticks = np.linspace(-6, 8, 15, endpoint=True) yticks = np.linspace(-6, 6, 13, endpoint=True) # fix the horizontal axis ax.set_xticks(xticks) ax.set_yticks(yticks) # add tick labels xlabels = xticks ax.set_xticklabels(xlabels) ylabels = yticks ax.set_yticklabels(ylabels) # style the ticks ax.xaxis.set_ticks_position('bottom') ax.yaxis.set_ticks_position('left') ax.tick_params('both', length=2, width=1, which='major', labelsize=15) # add labels to axes ax.set_xlabel('x1', fontsize=20) ax.set_ylabel('x2', fontsize=20) # add title to figure ax.set_title(figure_title, fontsize=24) plt.show() print('plot_points function defined!') # - # Initialize k-means plot data points plot_points(figure_title='Scatter Plot of x2 vs x1') # initialize k-means : Randomly and add them to plot centers = [[-2, 2], [2, -2]] plot_points(centers, figure_title='k-means Initialization') # + # Run k-means n-times number_of_iterations = 4 for i in range(number_of_iterations): input('Iteration {} - Press Enter to update the members of each cluster'.format(i + 1)) colors, class_of_points = assign_members(x1, x2, centers) title = 'Iteration {} - Cluster Assignment'.format(i + 1) plot_points(centers, colors, figure_title=title) input('Iteration {} - Press Enter to update the centers'.format(i + 1)) centers = update_centers(x1, x2, class_of_points) title = 'Iteration {} - Centroid Update'.format(i + 1) plot_points(centers, colors, figure_title=title) # - # # Now I will randomly generate n-datapoints using random.seed() , set the seed to 0 np.random.seed(0) # + # building make_blobs class which can take in specific inputs # - # <b> <u> Input </u> </b> # # <ul> # <li> <b>n_samples</b>: The total number of points equally divided among clusters. </li> # <ul> <li> Value will be: 5000 </li> </ul> # <li> <b>centers</b>: The number of centers to generate, or the fixed center locations. </li> # <ul> <li> Value will be: [[4, 4], [-2, -1], [2, -3],[1,1]] </li> </ul> # <li> <b>cluster_std</b>: The standard deviation of the clusters. </li> # <ul> <li> Value will be: 0.9 </li> </ul> # </ul> # # <b> <u> Output </u> </b> # # <ul> # <li> <b>X</b>: Array of shape [n_samples, n_features]. (Feature Matrix)</li> # <ul> <li> The generated samples. </li> </ul> # <li> <b>y</b>: Array of shape [n_samples]. (Response Vector)</li> # <ul> <li> The integer labels for cluster membership of each sample. </li> </ul> # </ul> # # + X, y = make_blobs(n_samples=500, centers=[[4,4],[-2,-1],[2,-3],[1,1]],cluster_std=0.9) # Display the scatter plot of the randomly generated data plt.figure(figsize=(15, 10)) plt.scatter(X[:, 0], X[:, 1], marker='.') # - # ### Setting up _k_-means # # The KMeans class has many parameters that can be used, but we will use these three: # # <ul> # <li> <strong>init</strong>: Initialization method of the centroids. </li> # <ul> # <li> Value will be: "k-means++". k-means++ selects initial cluster centers for <em>k</em>-means clustering in a smart way to speed up convergence.</li> # </ul> # <li> <strong>n_clusters</strong>: The number of clusters to form as well as the number of centroids to generate. </li> # <ul> <li> Value will be: 4 (since we have 4 centers)</li> </ul> # <li> <strong>n_init</strong>: Number of times the <em>k</em>-means algorithm will be run with different centroid seeds. The final results will be the best output of n_init consecutive runs in terms of inertia. </li> # <ul> <li> Value will be: 12 </li> </ul> # </ul> # # Initialize KMeans with these parameters, where the output parameter is called **k_means**. k_means = KMeans(init="k-means++", n_clusters=4, n_init=12).fit(X) # We will also get the coordinates of the cluster centers using KMeans **.cluster_centers\_** and save it as **k_means_cluster_centers**. k_means_cluster_centers = k_means.cluster_centers_ k_means_cluster_centers # Now let's grab the labels for each point in the model using KMeans **.labels\_** attribute and save it as **k_means_labels**. k_means_labels = k_means.labels_ k_means_labels # ### Visualization # + # initialize the plot with the specified dimensions. fig = plt.figure(figsize=(15, 10)) # colors uses a color map, which will produce an array of colors based on # the number of labels. We use set(k_means_labels) to get the # unique labels. colors = plt.cm.Spectral(np.linspace(0, 1, len(set(k_means_labels)))) # create a plot ax = fig.add_subplot(1, 1, 1) # loop through the data and plot the datapoints and centroids. # k will range from 0-3, which will match the number of clusters in the dataset. for k, col in zip(range(len([[4,4], [-2, -1], [2, -3], [1, 1]])), colors): # create a list of all datapoints, where the datapoitns that are # in the cluster (ex. cluster 0) are labeled as true, else they are # labeled as false. my_members = (k_means_labels == k) # define the centroid, or cluster center. cluster_center = k_means_cluster_centers[k] # plot the datapoints with color col. ax.plot(X[my_members, 0], X[my_members, 1], 'w', markerfacecolor=col, marker='.') # plot the centroids with specified color, but with a darker outline ax.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col, markeredgecolor='k', markersize=6) # title of the plot ax.set_title('KMeans') # remove x-axis ticks ax.set_xticks(()) # remove y-axis ticks ax.set_yticks(()) # show the plot plt.show() # - # ## 2. Using _k_-means for Customer Segmentation # # We have a customer dataset, and we are interested in exploring the behavior of our customers using their historical data. # # Customer segmentation is the practice of partitioning a customer base into groups of individuals that have similar characteristics. It is a significant strategy as a business can target these specific groups of customers and effectively allocate marketing resources. For example, one group might contain customers who are high-profit and low-risk, that is, more likely to purchase products, or subscribe to a service. A business task is to retain those customers. Another group might include customers from non-profit organizations, and so on. # Download Data filename = "https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBMDeveloperSkillsNetwork-DS0701EN-SkillsNetwork/labs/customer_segmentation.csv" print('Data downloaded!') # read csv file customers_df = pd.read_csv(filename) customers_df.head() # ### Pre-processing # Address in this dataset is a categorical variable. k-means algorithm isn't directly applicable to categorical variables because Euclidean distance function isn't really meaningful for discrete variables. So,I will drop this feature and run clustering. # drop the attribute 'Address' df = customers_df.drop('Address', axis=1) df.head() # ### Normalize # + # Normalize dataframe using StandardScale() from sklearn.preprocessing import StandardScaler X = df.values[:,1:] X = np.nan_to_num(X) cluster_dataset = StandardScaler().fit_transform(X) cluster_dataset # - # ### Modeling # + # model into n clusters. num_clusters = 3 k_means = KMeans(init="k-means++", n_clusters=num_clusters, n_init=12) k_means.fit(cluster_dataset) labels = k_means.labels_ print(labels) # - # ### Insights # Note that each row in our dataset represents a customer, and therefore, each row is assigned a label. df["Labels"] = labels df.head(5) # check the centroid values by averaging the features df.groupby('Labels').mean() # <em>k</em>-means will partition into three groups since we specified the algorithm to generate 3 clusters. The customers in each cluster are similar to each other in terms of the features included in the dataset. # # Now we can create a profile for each group, considering the common characteristics of each cluster. # For example, the 3 clusters can be: # # - OLDER, HIGH INCOME, AND INDEBTED # - MIDDLE AGED, MIDDLE INCOME, AND FINANCIALLY RESPONSIBLE # - YOUNG, LOW INCOME, AND INDEBTED # # **However, I can come up with diferent labels that I think best describe each cluster. ages = pd.DataFrame(['Older','Middle Age','Young'], columns=['Age']) ages
k-means Clustering.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] colab_type="text" id="zVtw6n7bT110" # # TensorFlow Tutorial # # Welcome to this week's programming assignment. Until now, you've always used numpy to build neural networks. Now we will step you through a deep learning framework that will allow you to build neural networks more easily. Machine learning frameworks like TensorFlow, PaddlePaddle, Torch, Caffe, Keras, and many others can speed up your machine learning development significantly. All of these frameworks also have a lot of documentation, which you should feel free to read. In this assignment, you will learn to do the following in TensorFlow: # # - Initialize variables # - Start your own session # - Train algorithms # - Implement a Neural Network # # Programing frameworks can not only shorten your coding time, but sometimes also perform optimizations that speed up your code. # - # ## <font color='darkblue'>Updates</font> # # #### If you were working on the notebook before this update... # * The current notebook is version "v3b". # * You can find your original work saved in the notebook with the previous version name (it may be either TensorFlow Tutorial version 3" or "TensorFlow Tutorial version 3a.) # * To view the file directory, click on the "Coursera" icon in the top left of this notebook. # # #### List of updates # * forward_propagation instruction now says 'A1' instead of 'a1' in the formula for Z2; # and are updated to say 'A2' instead of 'Z2' in the formula for Z3. # * create_placeholders instruction refer to the data type "tf.float32" instead of float. # * in the model function, the x axis of the plot now says "iterations (per fives)" instead of iterations(per tens) # * In the linear_function, comments remind students to create the variables in the order suggested by the starter code. The comments are updated to reflect this order. # * The test of the cost function now creates the logits without passing them through a sigmoid function (since the cost function will include the sigmoid in the built-in tensorflow function). # * In the 'model' function, the minibatch_cost is now divided by minibatch_size (instead of num_minibatches). # * Updated print statements and 'expected output that are used to check functions, for easier visual comparison. # # ## 1 - Exploring the Tensorflow Library # # To start, you will import the library: # + colab={} colab_type="code" id="rhZ0RUw8T111" import math import numpy as np import h5py import matplotlib.pyplot as plt import tensorflow as tf from tensorflow.python.framework import ops from tf_utils import load_dataset, random_mini_batches, convert_to_one_hot, predict # %matplotlib inline np.random.seed(1) # + [markdown] colab_type="text" id="A1vVKBCQT114" # Now that you have imported the library, we will walk you through its different applications. You will start with an example, where we compute for you the loss of one training example. # $$loss = \mathcal{L}(\hat{y}, y) = (\hat y^{(i)} - y^{(i)})^2 \tag{1}$$ # + colab={} colab_type="code" id="JKAjoAbjT115" y_hat = tf.constant(36, name='y_hat') # Define y_hat constant. Set to 36. y = tf.constant(39, name='y') # Define y. Set to 39 loss = tf.Variable((y - y_hat)**2, name='loss') # Create a variable for the loss init = tf.global_variables_initializer() # When init is run later (session.run(init)), # the loss variable will be initialized and ready to be computed with tf.Session() as session: # Create a session and print the output session.run(init) # Initializes the variables print(session.run(loss)) # Prints the loss # + [markdown] colab_type="text" id="iz5l0YacT117" # Writing and running programs in TensorFlow has the following steps: # # 1. Create Tensors (variables) that are not yet executed/evaluated. # 2. Write operations between those Tensors. # 3. Initialize your Tensors. # 4. Create a Session. # 5. Run the Session. This will run the operations you'd written above. # # Therefore, when we created a variable for the loss, we simply defined the loss as a function of other quantities, but did not evaluate its value. To evaluate it, we had to run `init=tf.global_variables_initializer()`. That initialized the loss variable, and in the last line we were finally able to evaluate the value of `loss` and print its value. # # Now let us look at an easy example. Run the cell below: # + colab={} colab_type="code" id="Ni74wj7IT117" a = tf.constant(2) b = tf.constant(10) c = tf.multiply(a,b) print(c) # + [markdown] colab_type="text" id="dKAqwc2VT119" # As expected, you will not see 20! You got a tensor saying that the result is a tensor that does not have the shape attribute, and is of type "int32". All you did was put in the 'computation graph', but you have not run this computation yet. In order to actually multiply the two numbers, you will have to create a session and run it. # + colab={} colab_type="code" id="txF_DuCkT11-" sess = tf.Session() print(sess.run(c)) # + [markdown] colab_type="text" id="xADCVaq4T12A" # Great! To summarize, **remember to initialize your variables, create a session and run the operations inside the session**. # # Next, you'll also have to know about placeholders. A placeholder is an object whose value you can specify only later. # To specify values for a placeholder, you can pass in values by using a "feed dictionary" (`feed_dict` variable). Below, we created a placeholder for x. This allows us to pass in a number later when we run the session. # + colab={} colab_type="code" id="Pn_-PPqvT12A" # Change the value of x in the feed_dict x = tf.placeholder(tf.int64, name = 'x') print(sess.run(2 * x, feed_dict = {x: 4})) sess.close() # + [markdown] colab_type="text" id="QrVJFYCpT12C" # When you first defined `x` you did not have to specify a value for it. A placeholder is simply a variable that you will assign data to only later, when running the session. We say that you **feed data** to these placeholders when running the session. # # Here's what's happening: When you specify the operations needed for a computation, you are telling TensorFlow how to construct a computation graph. The computation graph can have some placeholders whose values you will specify only later. Finally, when you run the session, you are telling TensorFlow to execute the computation graph. # + [markdown] colab_type="text" id="X15wlMDUT12D" # ### 1.1 - Linear function # # Lets start this programming exercise by computing the following equation: $Y = WX + b$, where $W$ and $X$ are random matrices and b is a random vector. # # **Exercise**: Compute $WX + b$ where $W, X$, and $b$ are drawn from a random normal distribution. W is of shape (4, 3), X is (3,1) and b is (4,1). As an example, here is how you would define a constant X that has shape (3,1): # ```python # X = tf.constant(np.random.randn(3,1), name = "X") # # ``` # You might find the following functions helpful: # - tf.matmul(..., ...) to do a matrix multiplication # - tf.add(..., ...) to do an addition # - np.random.randn(...) to initialize randomly # # + colab={} colab_type="code" id="ww5sBoFbT12D" # GRADED FUNCTION: linear_function def linear_function(): """ Implements a linear function: Initializes X to be a random tensor of shape (3,1) Initializes W to be a random tensor of shape (4,3) Initializes b to be a random tensor of shape (4,1) Returns: result -- runs the session for Y = WX + b """ np.random.seed(1) """ Note, to ensure that the "random" numbers generated match the expected results, please create the variables in the order given in the starting code below. (Do not re-arrange the order). """ ### START CODE HERE ### (4 lines of code) X = tf.constant(np.random.randn(3,1), name = "X") W = tf.constant(np.random.randn(4,3), name = "W") b = tf.constant(np.random.randn(4,1), name = "b") Y = tf.add(tf.matmul(W, X), b) ### END CODE HERE ### # Create the session using tf.Session() and run it with sess.run(...) on the variable you want to calculate ### START CODE HERE ### sess = tf.Session() result = sess.run(Y) ### END CODE HERE ### # close the session sess.close() return result # + colab={} colab_type="code" id="P3gOryVQT12G" print( "result = \n" + str(linear_function())) # + [markdown] colab_type="text" id="R5netQ9IT12J" # *** Expected Output ***: # # ``` # result = # [[-2.15657382] # [ 2.95891446] # [-1.08926781] # [-0.84538042]] # ``` # + [markdown] colab_type="text" id="DUBum-E4T12K" # ### 1.2 - Computing the sigmoid # Great! You just implemented a linear function. Tensorflow offers a variety of commonly used neural network functions like `tf.sigmoid` and `tf.softmax`. For this exercise lets compute the sigmoid function of an input. # # You will do this exercise using a placeholder variable `x`. When running the session, you should use the feed dictionary to pass in the input `z`. In this exercise, you will have to (i) create a placeholder `x`, (ii) define the operations needed to compute the sigmoid using `tf.sigmoid`, and then (iii) run the session. # # ** Exercise **: Implement the sigmoid function below. You should use the following: # # - `tf.placeholder(tf.float32, name = "...")` # - `tf.sigmoid(...)` # - `sess.run(..., feed_dict = {x: z})` # # # Note that there are two typical ways to create and use sessions in tensorflow: # # **Method 1:** # ```python # sess = tf.Session() # # Run the variables initialization (if needed), run the operations # result = sess.run(..., feed_dict = {...}) # sess.close() # Close the session # ``` # **Method 2:** # ```python # with tf.Session() as sess: # # run the variables initialization (if needed), run the operations # result = sess.run(..., feed_dict = {...}) # # This takes care of closing the session for you :) # ``` # # + colab={} colab_type="code" id="APv9bW9rT12K" # GRADED FUNCTION: sigmoid def sigmoid(z): """ Computes the sigmoid of z Arguments: z -- input value, scalar or vector Returns: results -- the sigmoid of z """ ### START CODE HERE ### ( approx. 4 lines of code) # Create a placeholder for x. Name it 'x'. x = tf.placeholder(tf.float32, name = "x") # compute sigmoid(x) sigmoid = tf.sigmoid(x) # Create a session, and run it. Please use the method 2 explained above. # You should use a feed_dict to pass z's value to x. with tf.Session() as sess: # Run session and call the output "result" result = sess.run(sigmoid, feed_dict = {x: z}) ### END CODE HERE ### return result # + colab={} colab_type="code" id="nLHdJxKVT12M" print ("sigmoid(0) = " + str(sigmoid(0))) print ("sigmoid(12) = " + str(sigmoid(12))) # + [markdown] colab_type="text" id="4cl8Wgg9T12O" # *** Expected Output ***: # # <table> # <tr> # <td> # **sigmoid(0)** # </td> # <td> # 0.5 # </td> # </tr> # <tr> # <td> # **sigmoid(12)** # </td> # <td> # 0.999994 # </td> # </tr> # # </table> # + [markdown] colab_type="text" id="v-okwynUT12O" # <font color='blue'> # **To summarize, you how know how to**: # 1. Create placeholders # 2. Specify the computation graph corresponding to operations you want to compute # 3. Create the session # 4. Run the session, using a feed dictionary if necessary to specify placeholder variables' values. # + [markdown] colab_type="text" id="ytSt0fgTT12P" # ### 1.3 - Computing the Cost # # You can also use a built-in function to compute the cost of your neural network. So instead of needing to write code to compute this as a function of $a^{[2](i)}$ and $y^{(i)}$ for i=1...m: # $$ J = - \frac{1}{m} \sum_{i = 1}^m \large ( \small y^{(i)} \log a^{ [2] (i)} + (1-y^{(i)})\log (1-a^{ [2] (i)} )\large )\small\tag{2}$$ # # you can do it in one line of code in tensorflow! # # **Exercise**: Implement the cross entropy loss. The function you will use is: # # # - `tf.nn.sigmoid_cross_entropy_with_logits(logits = ..., labels = ...)` # # Your code should input `z`, compute the sigmoid (to get `a`) and then compute the cross entropy cost $J$. All this can be done using one call to `tf.nn.sigmoid_cross_entropy_with_logits`, which computes # # $$- \frac{1}{m} \sum_{i = 1}^m \large ( \small y^{(i)} \log \sigma(z^{[2](i)}) + (1-y^{(i)})\log (1-\sigma(z^{[2](i)})\large )\small\tag{2}$$ # # # + colab={} colab_type="code" id="oIRdDYOLT12P" # GRADED FUNCTION: cost def cost(logits, labels): """     Computes the cost using the sigmoid cross entropy          Arguments:     logits -- vector containing z, output of the last linear unit (before the final sigmoid activation)     labels -- vector of labels y (1 or 0) Note: What we've been calling "z" and "y" in this class are respectively called "logits" and "labels" in the TensorFlow documentation. So logits will feed into z, and labels into y.          Returns:     cost -- runs the session of the cost (formula (2)) """ ### START CODE HERE ### # Create the placeholders for "logits" (z) and "labels" (y) (approx. 2 lines) z = tf.placeholder(tf.float32, name = "z") y = tf.placeholder(tf.float32, name = "y") # Use the loss function (approx. 1 line) cost = tf.nn.sigmoid_cross_entropy_with_logits(logits = z, labels = y) # Create a session (approx. 1 line). See method 1 above. sess = tf.Session() # Run the session (approx. 1 line). cost = sess.run(cost, feed_dict = {z: logits, y: labels}) # Close the session (approx. 1 line). See method 1 above. sess.close() ### END CODE HERE ### return cost # + colab={} colab_type="code" id="0nPB-lOYT12R" logits = np.array([0.2,0.4,0.7,0.9]) cost = cost(logits, np.array([0,0,1,1])) print ("cost = " + str(cost)) # + [markdown] colab_type="text" id="X8sMySzyT12T" # ** Expected Output** : # # ``` # cost = [ 0.79813886 0.91301525 0.40318605 0.34115386] # ``` # + [markdown] colab_type="text" id="_sK1Rqm6T12U" # ### 1.4 - Using One Hot encodings # # Many times in deep learning you will have a y vector with numbers ranging from 0 to C-1, where C is the number of classes. If C is for example 4, then you might have the following y vector which you will need to convert as follows: # # # <img src="images/onehot.png" style="width:600px;height:150px;"> # # This is called a "one hot" encoding, because in the converted representation exactly one element of each column is "hot" (meaning set to 1). To do this conversion in numpy, you might have to write a few lines of code. In tensorflow, you can use one line of code: # # - tf.one_hot(labels, depth, axis) # # **Exercise:** Implement the function below to take one vector of labels and the total number of classes $C$, and return the one hot encoding. Use `tf.one_hot()` to do this. # + colab={} colab_type="code" id="dlamXLu_T12U" # GRADED FUNCTION: one_hot_matrix def one_hot_matrix(labels, C): """ Creates a matrix where the i-th row corresponds to the ith class number and the jth column corresponds to the jth training example. So if example j had a label i. Then entry (i,j) will be 1. Arguments: labels -- vector containing the labels C -- number of classes, the depth of the one hot dimension Returns: one_hot -- one hot matrix """ ### START CODE HERE ### # Create a tf.constant equal to C (depth), name it 'C'. (approx. 1 line) C = tf.constant(C, name = "C") # Use tf.one_hot, be careful with the axis (approx. 1 line) one_hot_matrix = tf.one_hot(indices = labels, depth = C, axis = 0) # Create the session (approx. 1 line) sess = tf.Session() # Run the session (approx. 1 line) one_hot = sess.run(one_hot_matrix) # Close the session (approx. 1 line). See method 1 above. sess.close() ### END CODE HERE ### return one_hot # + colab={} colab_type="code" id="8Bi0je2yT12W" labels = np.array([1,2,3,0,2,1]) one_hot = one_hot_matrix(labels, C = 4) print ("one_hot = \n" + str(one_hot)) # + [markdown] colab_type="text" id="HlT0UczrT12Y" # **Expected Output**: # # ``` # one_hot = # [[ 0. 0. 0. 1. 0. 0.] # [ 1. 0. 0. 0. 0. 1.] # [ 0. 1. 0. 0. 1. 0.] # [ 0. 0. 1. 0. 0. 0.]] # ``` # + [markdown] colab_type="text" id="qsu1xyqFT12Z" # ### 1.5 - Initialize with zeros and ones # # Now you will learn how to initialize a vector of zeros and ones. The function you will be calling is `tf.ones()`. To initialize with zeros you could use tf.zeros() instead. These functions take in a shape and return an array of dimension shape full of zeros and ones respectively. # # **Exercise:** Implement the function below to take in a shape and to return an array (of the shape's dimension of ones). # # - tf.ones(shape) # # + colab={} colab_type="code" id="eOVWrcR2T12Z" # GRADED FUNCTION: ones def ones(shape): """ Creates an array of ones of dimension shape Arguments: shape -- shape of the array you want to create Returns: ones -- array containing only ones """ ### START CODE HERE ### # Create "ones" tensor using tf.ones(...). (approx. 1 line) ones = tf.ones(shape) # Create the session (approx. 1 line) sess = tf.Session() # Run the session to compute 'ones' (approx. 1 line) ones = sess.run(ones) # Close the session (approx. 1 line). See method 1 above. sess.close() ### END CODE HERE ### return ones # + colab={} colab_type="code" id="WwHEVDv6T12b" print ("ones = " + str(ones([3]))) # + [markdown] colab_type="text" id="hGgM2hSFT12g" # **Expected Output:** # # <table> # <tr> # <td> # **ones** # </td> # <td> # [ 1. 1. 1.] # </td> # </tr> # # </table> # + [markdown] colab_type="text" id="LW8S6sVzT12h" # # 2 - Building your first neural network in tensorflow # # In this part of the assignment you will build a neural network using tensorflow. Remember that there are two parts to implement a tensorflow model: # # - Create the computation graph # - Run the graph # # Let's delve into the problem you'd like to solve! # # ### 2.0 - Problem statement: SIGNS Dataset # # One afternoon, with some friends we decided to teach our computers to decipher sign language. We spent a few hours taking pictures in front of a white wall and came up with the following dataset. It's now your job to build an algorithm that would facilitate communications from a speech-impaired person to someone who doesn't understand sign language. # # - **Training set**: 1080 pictures (64 by 64 pixels) of signs representing numbers from 0 to 5 (180 pictures per number). # - **Test set**: 120 pictures (64 by 64 pixels) of signs representing numbers from 0 to 5 (20 pictures per number). # # Note that this is a subset of the SIGNS dataset. The complete dataset contains many more signs. # # Here are examples for each number, and how an explanation of how we represent the labels. These are the original pictures, before we lowered the image resolutoion to 64 by 64 pixels. # <img src="images/hands.png" style="width:800px;height:350px;"><caption><center> <u><font color='purple'> **Figure 1**</u><font color='purple'>: SIGNS dataset <br> <font color='black'> </center> # # # Run the following code to load the dataset. # + colab={} colab_type="code" id="wCgjv84yT12i" # Loading the dataset X_train_orig, Y_train_orig, X_test_orig, Y_test_orig, classes = load_dataset() # + [markdown] colab_type="text" id="JYimgnMbT12k" # Change the index below and run the cell to visualize some examples in the dataset. # + colab={} colab_type="code" id="wG0QwVtJT12k" # Example of a picture index = 0 plt.imshow(X_train_orig[index]) print ("y = " + str(np.squeeze(Y_train_orig[:, index]))) # + [markdown] colab_type="text" id="2WP4-S2CT12m" # As usual you flatten the image dataset, then normalize it by dividing by 255. On top of that, you will convert each label to a one-hot vector as shown in Figure 1. Run the cell below to do so. # + colab={} colab_type="code" id="tn3gF5xLT12m" # Flatten the training and test images X_train_flatten = X_train_orig.reshape(X_train_orig.shape[0], -1).T X_test_flatten = X_test_orig.reshape(X_test_orig.shape[0], -1).T # Normalize image vectors X_train = X_train_flatten/255. X_test = X_test_flatten/255. # Convert training and test labels to one hot matrices Y_train = convert_to_one_hot(Y_train_orig, 6) Y_test = convert_to_one_hot(Y_test_orig, 6) print ("number of training examples = " + str(X_train.shape[1])) print ("number of test examples = " + str(X_test.shape[1])) print ("X_train shape: " + str(X_train.shape)) print ("Y_train shape: " + str(Y_train.shape)) print ("X_test shape: " + str(X_test.shape)) print ("Y_test shape: " + str(Y_test.shape)) # + [markdown] colab_type="text" id="iN_KPZ0FT12o" # **Note** that 12288 comes from $64 \times 64 \times 3$. Each image is square, 64 by 64 pixels, and 3 is for the RGB colors. Please make sure all these shapes make sense to you before continuing. # + [markdown] colab_type="text" id="_GQMSJTtT12p" # **Your goal** is to build an algorithm capable of recognizing a sign with high accuracy. To do so, you are going to build a tensorflow model that is almost the same as one you have previously built in numpy for cat recognition (but now using a softmax output). It is a great occasion to compare your numpy implementation to the tensorflow one. # # **The model** is *LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SOFTMAX*. The SIGMOID output layer has been converted to a SOFTMAX. A SOFTMAX layer generalizes SIGMOID to when there are more than two classes. # + [markdown] colab_type="text" id="JSNd_DRWT12p" # ### 2.1 - Create placeholders # # Your first task is to create placeholders for `X` and `Y`. This will allow you to later pass your training data in when you run your session. # # **Exercise:** Implement the function below to create the placeholders in tensorflow. # + colab={} colab_type="code" id="fcAcBRAAT12q" # GRADED FUNCTION: create_placeholders def create_placeholders(n_x, n_y): """ Creates the placeholders for the tensorflow session. Arguments: n_x -- scalar, size of an image vector (num_px * num_px = 64 * 64 * 3 = 12288) n_y -- scalar, number of classes (from 0 to 5, so -> 6) Returns: X -- placeholder for the data input, of shape [n_x, None] and dtype "tf.float32" Y -- placeholder for the input labels, of shape [n_y, None] and dtype "tf.float32" Tips: - You will use None because it let's us be flexible on the number of examples you will for the placeholders. In fact, the number of examples during test/train is different. """ ### START CODE HERE ### (approx. 2 lines) X = tf.placeholder(tf.float32, [n_x, None], name = "X") Y = tf.placeholder(tf.float32, [n_y, None], name = "Y") ### END CODE HERE ### return X, Y # + colab={} colab_type="code" id="Ve9WOa1LT12r" X, Y = create_placeholders(12288, 6) print ("X = " + str(X)) print ("Y = " + str(Y)) # + [markdown] colab_type="text" id="-G_UV4xpT12t" # **Expected Output**: # # <table> # <tr> # <td> # **X** # </td> # <td> # Tensor("Placeholder_1:0", shape=(12288, ?), dtype=float32) (not necessarily Placeholder_1) # </td> # </tr> # <tr> # <td> # **Y** # </td> # <td> # Tensor("Placeholder_2:0", shape=(6, ?), dtype=float32) (not necessarily Placeholder_2) # </td> # </tr> # # </table> # + [markdown] colab_type="text" id="eyYz9y1XT12u" # ### 2.2 - Initializing the parameters # # Your second task is to initialize the parameters in tensorflow. # # **Exercise:** Implement the function below to initialize the parameters in tensorflow. You are going use Xavier Initialization for weights and Zero Initialization for biases. The shapes are given below. As an example, to help you, for W1 and b1 you could use: # # ```python # W1 = tf.get_variable("W1", [25,12288], initializer = tf.contrib.layers.xavier_initializer(seed = 1)) # b1 = tf.get_variable("b1", [25,1], initializer = tf.zeros_initializer()) # ``` # Please use `seed = 1` to make sure your results match ours. # + colab={} colab_type="code" id="gPi-SeuWT12u" # GRADED FUNCTION: initialize_parameters def initialize_parameters(): """ Initializes parameters to build a neural network with tensorflow. The shapes are: W1 : [25, 12288] b1 : [25, 1] W2 : [12, 25] b2 : [12, 1] W3 : [6, 12] b3 : [6, 1] Returns: parameters -- a dictionary of tensors containing W1, b1, W2, b2, W3, b3 """ tf.set_random_seed(1) # so that your "random" numbers match ours ### START CODE HERE ### (approx. 6 lines of code) W1 = tf.get_variable("W1", [25,12288], initializer = tf.contrib.layers.xavier_initializer(seed = 1)) b1 = tf.get_variable("b1", [25,1], initializer = tf.zeros_initializer()) W2 = tf.get_variable("W2", [12,25], initializer = tf.contrib.layers.xavier_initializer(seed = 1)) b2 = tf.get_variable("b2", [12,1], initializer = tf.zeros_initializer()) W3 = tf.get_variable("W3", [6,12], initializer = tf.contrib.layers.xavier_initializer(seed = 1)) b3 = tf.get_variable("b3", [6,1], initializer = tf.zeros_initializer()) ### END CODE HERE ### parameters = {"W1": W1, "b1": b1, "W2": W2, "b2": b2, "W3": W3, "b3": b3} return parameters # + colab={} colab_type="code" id="CcuKNYinT12x" tf.reset_default_graph() with tf.Session() as sess: parameters = initialize_parameters() print("W1 = " + str(parameters["W1"])) print("b1 = " + str(parameters["b1"])) print("W2 = " + str(parameters["W2"])) print("b2 = " + str(parameters["b2"])) # + [markdown] colab_type="text" id="kzAVM5y8T12z" # **Expected Output**: # # <table> # <tr> # <td> # **W1** # </td> # <td> # < tf.Variable 'W1:0' shape=(25, 12288) dtype=float32_ref > # </td> # </tr> # <tr> # <td> # **b1** # </td> # <td> # < tf.Variable 'b1:0' shape=(25, 1) dtype=float32_ref > # </td> # </tr> # <tr> # <td> # **W2** # </td> # <td> # < tf.Variable 'W2:0' shape=(12, 25) dtype=float32_ref > # </td> # </tr> # <tr> # <td> # **b2** # </td> # <td> # < tf.Variable 'b2:0' shape=(12, 1) dtype=float32_ref > # </td> # </tr> # # </table> # + [markdown] colab_type="text" id="IR5UvbGxT12z" # As expected, the parameters haven't been evaluated yet. # + [markdown] colab_type="text" id="cnuAGFn2T120" # ### 2.3 - Forward propagation in tensorflow # # You will now implement the forward propagation module in tensorflow. The function will take in a dictionary of parameters and it will complete the forward pass. The functions you will be using are: # # - `tf.add(...,...)` to do an addition # - `tf.matmul(...,...)` to do a matrix multiplication # - `tf.nn.relu(...)` to apply the ReLU activation # # **Question:** Implement the forward pass of the neural network. We commented for you the numpy equivalents so that you can compare the tensorflow implementation to numpy. It is important to note that the forward propagation stops at `z3`. The reason is that in tensorflow the last linear layer output is given as input to the function computing the loss. Therefore, you don't need `a3`! # # # + colab={} colab_type="code" id="nC7CYNk0T120" # GRADED FUNCTION: forward_propagation def forward_propagation(X, parameters): """ Implements the forward propagation for the model: LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SOFTMAX Arguments: X -- input dataset placeholder, of shape (input size, number of examples) parameters -- python dictionary containing your parameters "W1", "b1", "W2", "b2", "W3", "b3" the shapes are given in initialize_parameters Returns: Z3 -- the output of the last LINEAR unit """ # Retrieve the parameters from the dictionary "parameters" W1 = parameters['W1'] b1 = parameters['b1'] W2 = parameters['W2'] b2 = parameters['b2'] W3 = parameters['W3'] b3 = parameters['b3'] ### START CODE HERE ### (approx. 5 lines) # Numpy Equivalents: Z1 = tf.add(tf.matmul(W1, X), b1) # Z1 = np.dot(W1, X) + b1 A1 = tf.nn.relu(Z1) # A1 = relu(Z1) Z2 = tf.add(tf.matmul(W2, A1), b2) # Z2 = np.dot(W2, A1) + b2 A2 = tf.nn.relu(Z2) # A2 = relu(Z2) Z3 = tf.add(tf.matmul(W3, A2), b3) # Z3 = np.dot(W3, A2) + b3 ### END CODE HERE ### return Z3 # + colab={} colab_type="code" id="hioQQqyxT122" tf.reset_default_graph() with tf.Session() as sess: X, Y = create_placeholders(12288, 6) parameters = initialize_parameters() Z3 = forward_propagation(X, parameters) print("Z3 = " + str(Z3)) # + [markdown] colab_type="text" id="PRrS7RzpT124" # **Expected Output**: # # <table> # <tr> # <td> # **Z3** # </td> # <td> # Tensor("Add_2:0", shape=(6, ?), dtype=float32) # </td> # </tr> # # </table> # + [markdown] colab_type="text" id="FDjgAHp6T125" # You may have noticed that the forward propagation doesn't output any cache. You will understand why below, when we get to brackpropagation. # + [markdown] colab_type="text" id="RXqHnAEnT125" # ### 2.4 Compute cost # # As seen before, it is very easy to compute the cost using: # ```python # tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits = ..., labels = ...)) # ``` # **Question**: Implement the cost function below. # - It is important to know that the "`logits`" and "`labels`" inputs of `tf.nn.softmax_cross_entropy_with_logits` are expected to be of shape (number of examples, num_classes). We have thus transposed Z3 and Y for you. # - Besides, `tf.reduce_mean` basically does the summation over the examples. # + colab={} colab_type="code" id="1_bzQXSJT125" # GRADED FUNCTION: compute_cost def compute_cost(Z3, Y): """ Computes the cost Arguments: Z3 -- output of forward propagation (output of the last LINEAR unit), of shape (6, number of examples) Y -- "true" labels vector placeholder, same shape as Z3 Returns: cost - Tensor of the cost function """ # to fit the tensorflow requirement for tf.nn.softmax_cross_entropy_with_logits(...,...) logits = tf.transpose(Z3) labels = tf.transpose(Y) ### START CODE HERE ### (1 line of code) cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits = logits, labels = labels)) ### END CODE HERE ### return cost # + colab={} colab_type="code" id="4HahBCJVT127" tf.reset_default_graph() with tf.Session() as sess: X, Y = create_placeholders(12288, 6) parameters = initialize_parameters() Z3 = forward_propagation(X, parameters) cost = compute_cost(Z3, Y) print("cost = " + str(cost)) # + [markdown] colab_type="text" id="GT7MzPxET12-" # **Expected Output**: # # <table> # <tr> # <td> # **cost** # </td> # <td> # Tensor("Mean:0", shape=(), dtype=float32) # </td> # </tr> # # </table> # + [markdown] colab_type="text" id="9O9sNnHQT12-" # ### 2.5 - Backward propagation & parameter updates # # This is where you become grateful to programming frameworks. All the backpropagation and the parameters update is taken care of in 1 line of code. It is very easy to incorporate this line in the model. # # After you compute the cost function. You will create an "`optimizer`" object. You have to call this object along with the cost when running the tf.session. When called, it will perform an optimization on the given cost with the chosen method and learning rate. # # For instance, for gradient descent the optimizer would be: # ```python # optimizer = tf.train.GradientDescentOptimizer(learning_rate = learning_rate).minimize(cost) # ``` # # To make the optimization you would do: # ```python # _ , c = sess.run([optimizer, cost], feed_dict={X: minibatch_X, Y: minibatch_Y}) # ``` # # This computes the backpropagation by passing through the tensorflow graph in the reverse order. From cost to inputs. # # **Note** When coding, we often use `_` as a "throwaway" variable to store values that we won't need to use later. Here, `_` takes on the evaluated value of `optimizer`, which we don't need (and `c` takes the value of the `cost` variable). # + [markdown] colab_type="text" id="SKxhuoN2T12_" # ### 2.6 - Building the model # # Now, you will bring it all together! # # **Exercise:** Implement the model. You will be calling the functions you had previously implemented. # + colab={} colab_type="code" id="siFLpYfkT12_" def model(X_train, Y_train, X_test, Y_test, learning_rate = 0.0001, num_epochs = 1500, minibatch_size = 32, print_cost = True): """ Implements a three-layer tensorflow neural network: LINEAR->RELU->LINEAR->RELU->LINEAR->SOFTMAX. Arguments: X_train -- training set, of shape (input size = 12288, number of training examples = 1080) Y_train -- test set, of shape (output size = 6, number of training examples = 1080) X_test -- training set, of shape (input size = 12288, number of training examples = 120) Y_test -- test set, of shape (output size = 6, number of test examples = 120) learning_rate -- learning rate of the optimization num_epochs -- number of epochs of the optimization loop minibatch_size -- size of a minibatch print_cost -- True to print the cost every 100 epochs Returns: parameters -- parameters learnt by the model. They can then be used to predict. """ ops.reset_default_graph() # to be able to rerun the model without overwriting tf variables tf.set_random_seed(1) # to keep consistent results seed = 3 # to keep consistent results (n_x, m) = X_train.shape # (n_x: input size, m : number of examples in the train set) n_y = Y_train.shape[0] # n_y : output size costs = [] # To keep track of the cost # Create Placeholders of shape (n_x, n_y) ### START CODE HERE ### (1 line) X, Y = create_placeholders(n_x, n_y) ### END CODE HERE ### # Initialize parameters ### START CODE HERE ### (1 line) parameters = initialize_parameters() ### END CODE HERE ### # Forward propagation: Build the forward propagation in the tensorflow graph ### START CODE HERE ### (1 line) Z3 = forward_propagation(X, parameters) ### END CODE HERE ### # Cost function: Add cost function to tensorflow graph ### START CODE HERE ### (1 line) cost = compute_cost(Z3, Y) ### END CODE HERE ### # Backpropagation: Define the tensorflow optimizer. Use an AdamOptimizer. ### START CODE HERE ### (1 line) optimizer = tf.train.AdamOptimizer(learning_rate = learning_rate).minimize(cost) ### END CODE HERE ### # Initialize all the variables init = tf.global_variables_initializer() # Start the session to compute the tensorflow graph with tf.Session() as sess: # Run the initialization sess.run(init) # Do the training loop for epoch in range(num_epochs): epoch_cost = 0. # Defines a cost related to an epoch num_minibatches = int(m / minibatch_size) # number of minibatches of size minibatch_size in the train set seed = seed + 1 minibatches = random_mini_batches(X_train, Y_train, minibatch_size, seed) for minibatch in minibatches: # Select a minibatch (minibatch_X, minibatch_Y) = minibatch # IMPORTANT: The line that runs the graph on a minibatch. # Run the session to execute the "optimizer" and the "cost", the feedict should contain a minibatch for (X,Y). ### START CODE HERE ### (1 line) _ , minibatch_cost = sess.run([optimizer, cost], feed_dict={X: minibatch_X, Y: minibatch_Y}) ### END CODE HERE ### epoch_cost += minibatch_cost / minibatch_size # Print the cost every epoch if print_cost == True and epoch % 100 == 0: print ("Cost after epoch %i: %f" % (epoch, epoch_cost)) if print_cost == True and epoch % 5 == 0: costs.append(epoch_cost) # plot the cost plt.plot(np.squeeze(costs)) plt.ylabel('cost') plt.xlabel('iterations (per fives)') plt.title("Learning rate =" + str(learning_rate)) plt.show() # lets save the parameters in a variable parameters = sess.run(parameters) print ("Parameters have been trained!") # Calculate the correct predictions correct_prediction = tf.equal(tf.argmax(Z3), tf.argmax(Y)) # Calculate accuracy on the test set accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float")) print ("Train Accuracy:", accuracy.eval({X: X_train, Y: Y_train})) print ("Test Accuracy:", accuracy.eval({X: X_test, Y: Y_test})) return parameters # + [markdown] colab_type="text" id="sQ1doxmHT13B" # Run the following cell to train your model! On our machine it takes about 5 minutes. Your "Cost after epoch 100" should be 1.048222. If it's not, don't waste time; interrupt the training by clicking on the square (⬛) in the upper bar of the notebook, and try to correct your code. If it is the correct cost, take a break and come back in 5 minutes! # + colab={} colab_type="code" id="AISfljZVT13B" parameters = model(X_train, Y_train, X_test, Y_test) # + [markdown] colab_type="text" id="ogOoTX2CT13E" # **Expected Output**: # # <table> # <tr> # <td> # **Train Accuracy** # </td> # <td> # 0.999074 # </td> # </tr> # <tr> # <td> # **Test Accuracy** # </td> # <td> # 0.716667 # </td> # </tr> # # </table> # # Amazing, your algorithm can recognize a sign representing a figure between 0 and 5 with 71.7% accuracy. # # **Insights**: # - Your model seems big enough to fit the training set well. However, given the difference between train and test accuracy, you could try to add L2 or dropout regularization to reduce overfitting. # - Think about the session as a block of code to train the model. Each time you run the session on a minibatch, it trains the parameters. In total you have run the session a large number of times (1500 epochs) until you obtained well trained parameters. # + [markdown] colab_type="text" id="cka8pF8BT13E" # ### 2.7 - Test with your own image (optional / ungraded exercise) # # Congratulations on finishing this assignment. You can now take a picture of your hand and see the output of your model. To do that: # 1. Click on "File" in the upper bar of this notebook, then click "Open" to go on your Coursera Hub. # 2. Add your image to this Jupyter Notebook's directory, in the "images" folder # 3. Write your image's name in the following code # 4. Run the code and check if the algorithm is right! # + colab={} colab_type="code" id="EJ8Aft1CT13F" import scipy from PIL import Image from scipy import ndimage ## START CODE HERE ## (PUT YOUR IMAGE NAME) my_image = "own_image.jpg" ## END CODE HERE ## # We preprocess your image to fit your algorithm. fname = "images/" + my_image image = np.array(ndimage.imread(fname, flatten=False)) image = image/255. my_image = scipy.misc.imresize(image, size=(64,64)).reshape((1, 64*64*3)).T my_image_prediction = predict(my_image, parameters) plt.imshow(image) print("Your algorithm predicts: y = " + str(np.squeeze(my_image_prediction))) # + [markdown] colab_type="text" id="6Q5jJuAqT13G" # You indeed deserved a "thumbs-up" although as you can see the algorithm seems to classify it incorrectly. The reason is that the training set doesn't contain any "thumbs-up", so the model doesn't know how to deal with it! We call that a "mismatched data distribution" and it is one of the various of the next course on "Structuring Machine Learning Projects". # + [markdown] colab_type="text" id="DMY1FYvOT13H" # <font color='blue'> # **What you should remember**: # - Tensorflow is a programming framework used in deep learning # - The two main object classes in tensorflow are Tensors and Operators. # - When you code in tensorflow you have to take the following steps: # - Create a graph containing Tensors (Variables, Placeholders ...) and Operations (tf.matmul, tf.add, ...) # - Create a session # - Initialize the session # - Run the session to execute the graph # - You can execute the graph multiple times as you've seen in model() # - The backpropagation and optimization is automatically done when running the session on the "optimizer" object.
Improving Deep Neural Networks: Hyperparameter Tuning, Regularization and Optimization/Week 7/TensorFlow_Tutorial_v3b.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] slideshow={"slide_type": "slide"} # <img src="https://upload.wikimedia.org/wikipedia/commons/4/47/Logo_UTFSM.png" width="200" alt="utfsm-logo" align="left"/> # # # MAT281 # ### Aplicaciones de la Matemática en la Ingeniería # + [markdown] slideshow={"slide_type": "slide"} # ## Módulo 02 # ## Laboratorio Clase 02: Manipulación de datos # - # ### Instrucciones # # # * Completa tus datos personales (nombre y rol USM) en siguiente celda. # * La escala es de 0 a 4 considerando solo valores enteros. # * Debes _pushear_ tus cambios a tu repositorio personal del curso. # * Como respaldo, debes enviar un archivo .zip con el siguiente formato `mXX_cYY_lab_apellido_nombre.zip` a <EMAIL>. # * Se evaluará: # - Soluciones # - Código # - Que Binder esté bien configurado. # - Al presionar `Kernel -> Restart Kernel and Run All Cells` deben ejecutarse todas las celdas sin error. # * __La entrega es al final de esta clase.__ # __Nombre__: <NAME> # # __Rol__: 201510008-K # ## Ejercicio #1 (1 pto) # ### Cargar datos y filtrar aquellos que no tengan posición definida # # Se utilizará el mismo dataset de los jugadores de la NBA para la evaluación, ubicados en la carpeta `data` y el nombre del archivo es `player_data.csv` import os import pandas as pd player_data = pd.read_csv(os.path.join('data', 'player_data.csv'), index_col='name') player_data.head() player_data = player_data.loc[lambda x: x["position"].notnull()] player_data # ## Ejercicio #2 (1 pto) # ### ¿Cuál es el _college_ con mayor cantidad de registros? player_data['college'].value_counts() # __Respuesta:__ University of Kentucky # ## Ejercicio #3 (2 ptos) # ### ¿Para cada posición, cuál es la máxima cantidad de tiempo que ha estado un jugador? # # Un _approach_ para resolver la pregunta anterior tiene los siguientes pasos: # # 1. Determinar el tiempo de cada jugador en su posición. # 2. Determinar todas las posiciones. # 3. Iterar sobre cada posición y encontrar el mayor valor. # 1. Determinar el tiempo de cada jugador en su posición. player_data['duration'] = player_data['year_end'] - player_data['year_start'] player_data.head() # 2. Determinar todas las posiciones. positions = list(set(player_data['position'])) # .unique() también sirve, como es una acción se usa paréntesis positions # 3. Iterar sobre cada posición y encontrar el mayor valor. nba_position_duration = pd.Series() # Crear una serie vacía for position in positions: df_aux = player_data.loc[lambda x: x['position']==position] # Filtrar por posición max_duration = df_aux['duration'].max() # Obtener el máximo nba_position_duration.loc[position] = max_duration # Poblar la serie nba_position_duration
m02_data_analysis/m02_c02_data_manipulation/m02_c02_lab.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:py3] # language: python # name: conda-env-py3-py # --- # # Introduction to Programming in Python: Logic & Loops # # <font color=red>MAKE SURE TO EXECUTE THIS CELL BEFORE YOU START YOUR WORK! import numpy # ## <font color=blue>EXERCISE 1</font><br> # # <font color=blue>As usual, let's see what kind of errors we get when we make a mistake in the syntax. Try out each of the examples below without changing anything so you see the error message, then fix the error.</font> # <font color = blue>**i. Original**:</font> for i in range(2,5,1) print( i ) # <font color=blue>**i. Fixed**:</font> # <font color = blue>**ii. Original**:</font> for i in range(0,7,1): print( i ) # <font color=blue>**ii. Fixed**:</font> # <font color = blue>**iii. Original**:</font> for i in (0,10,1): print( i ) # <font color=blue>**iii. Fixed**:</font> # ## <font color=blue>EXERCISE 2a</font><br> # # <font color=blue>i. Write a `for` loop to print every number from 1 to 11 and its square (`i`$^2$).</font> # <font color=blue>ii. Write a `for` loop to print every even number from 2 to 20 (`i`) and calculate the product `i*(i-1)`.<br> # For example, when `i=2` your program should calculate `i*(i-1)=2*(2-1)=2*1=2` and so on for other values of `i`.</font> # ## <font color=blue>EXERCISE 2b</font><br> # # <font color=blue>Recall that we can convert from temperature $F$ in degrees Fahrenheit to temperature $C$ in degrees Celsius using: # $$ C = \left({5\over9}\right)\times(F - 32) $$ # # Write a program that:<br> # 1. Makes an `array` of Fahrenheit values called `F`, with `F` = 37, 44, 58, 62, 73<br> # 2. Uses this `F` array and the equation above to calculate an array of Celsius values called `C`<br> # 3. Uses a **```for```** loop to print out a table with values of F_values in the first column and `C` in the second column.<br><br> # # **Hint 1**: If you don't remember how to make an `array`, take a look back at the Week 3 practical.<br> # **Hint 2**: Your loop variable should not have the same name as any of your other variables (otherwise, you will overwrite them!). You want to calculate ```F``` and ```C``` **before** the loop, then use the loop to print the values one at a time. # **Hint 3**: Each line should only show **one** value of `F` and **one** value of `C`.</font> # ## <font color=blue>EXERCISE 3</font><br> # # <font color=blue>In the practical workbook, we wrote a program to use a `for` loop to calculate then plot the temperature of the atmosphere as a function of altitude for the lowest part of the atmosphere, from 0-11 km:</font> # + active="" # import numpy # import matplotlib.pyplot as pyplot # %matplotlib inline # # # altitude array in km, steps of 0.1km = 100m # # For the lowest part of the atmosphere, we will start at 0 and end at 11. # altitude=numpy.arange(0,11,.1) # # # set up an array for temperature # Temperature=numpy.zeros(len(altitude)) # # for i in range(0,len(Temperature),1): # Temperature[i] = 15.0 - 6.5*altitude[i] # # # Set up the plot - we put altitude on the y-axis to represent it going "up" # pyplot.plot(Temperature,altitude) # pyplot.xlabel('Temperature (C)') # pyplot.ylabel('Altitude (km)') # pyplot.title('Change in temperature with altitude in the atmosphere') # pyplot.show() # - # <font color=blue>Adapt the code above to make TWO plots. The first one should show the temperature for altitudes between 11 and 25 km, and the second should show the temperature for altitudes between 25 and 32 km. The temperatures can be described by the following equations:<br><br> # # **`Temperature = 15.0 - 6.5 * altitude`** (when altitude is less than 11 km)<br> # **`Temperature = -56.5`** (when altitude is between 11 and 20 km)<br> # **`Temperature = -76.5 + 1.0 * altitude`** (when altitude is between 20 and 32 km)<br> # # **Make sure the TITLE of each plot tells us which plot is for which altitude range!**<br><br> # # **Hint**: You will either need to give each altitude and Temperature array a different name (e.g. `altitude1` and `altitude2`, or you will need to calculate and plot the first one, then calculate a plot the second one.<br> # **Note**: A `for` loop isn't strictly necessary here - we could do this with array calculations. But please use one so that you get the practice~ # </font> # ## <font color=blue>EXERCISE 4: BRINGING IT ALL TOGETHER</font> # # <font color=blue>This final exercise is designed to give you a chance to practice everything you have learned so far today (and in the last few weeks). Make sure to spend time thinking about what your answers **mean**. # # This week, we will continue developing an understanding of Daisyworld (look back to the Week 3 practical and/or the textbook if you don't remember!). Last week, we made a plot that showed us how the temperature of the Daisyworld planet changed as the number of daisies increased.<br><br> # # This time we are going to determine how the fraction of the planet covered by daisies changes as temperature increases. We will start by thinking about how fast the flowers grow at each temperature (their "growth rate"). Based on what we know about real plants, we can expect that the daisies on Daisyworld can only grow within a certain temperature range.<br><br> # # On Daisyworld, flowers only grow when temperatures are between 281K (8°C) and 310K (37°C). In other words, assuming temperature is `T` (in units of Kelvin):<br> # - when `T < 282 K` flowers do not grow<br> # - when `282 K <= T < 310 K` flowers grow<br> # - when `T >= 310 K` flowers do not grow<br><br> # # For now, we will treat each of these cases separately. # # ### EXERCISE 4a<br> # First we will define arrays that represent the possible temperature of the planet, in Kelvin, for each of these ranges. We will assume the planet can get as cold as 265 K (-8°C) and as hot as 315 K (42°C). # # **For this exercise:** # 1. Define three arrays, called `T1`, `T2`, and `T3` that represent the temperature of the planet (in K) in each regime. For `T1`, use values from 265-281K, for `T2` use values from 282-309, and for `T3` use values from 310-315. For all three, use steps of 1K.<br> # 2. For each temperature array, define an array full of zeros that will eventually represent the growth rate of flowers. So you should have **three** growth rate arrays (given them each unique and useful name). Each array should have the **same length** as the equivalent temperature array from step 1. (**Hint**: what does the function `len` do?)<br> # 3. Add three **`for` loops**, one for each temperature array. Each `for` loop should print the values in the temperature array, with one value on each line. (**Hint:** your counter variable should be `i` and you can use `len` to figure out how many values are in your temperature array)<br> # # ***Do not delete the `%reset` line in the cell below*** (this makes sure your program isn't just "remembering" something you did before). # + # %reset -s -f import numpy import matplotlib.pyplot as pyplot # %matplotlib inline # - # ### <font color=blue>EXERCISE 4b</font><br> # # <font color=blue>Let's make this quantitative by adding some numbers to the growth rate. Based on the situation on earth, we expect that there is a temperature somewhere at which flowers grow best (their optimum). We can represent this situation as a parabola. # # The growth rate of the population of flowers (in units of *fraction of the planet covered by flowers / year*) as a function of the temperature T (in units of Kelvin) is defined as:<br> # 0 when `T < 282 K` <br> # $1-0.005 \times (295.5-T)^2$ when `282K <= T < 310K` <br> # 0 when `T >= 310 K` <br><br> # # **For this exercise:** # 1. Copy the code from Exercise 4a # 2. Modify the code in each `for` loop so that after printing the temperature, it prints the growth rate at that temperature (just the number, no extra text). # # In other words, you should see output that looks like: # ``` # 265 # 0 # 266 # 0 # ... # 282 # 0.08875 # 283 # 0.21875 # ... # 314 # 0 # 315 # 0``` # # # **Hint 1:** To print these on separate lines, you will need **two** action lines in each for loop (for now, both actions are to print something). For example:<br> # ``` # for i in range... # print( temperature value ) # print( growth rate value ) # ``` # **Hint 2:** If you just want to print a single number, you can put it directly in your print brackets. For example, `print( 25 )` will just print the value 25. So how might you print the value 0 when it is too cold or too hot for flowers to grow?<br> # **Hint 3:** Make sure you are only trying to use 1 value of your temperature array at a time in your calculation. How can we use `[i]` for this?<br> # **Hint 4:** None of the values should be negative - if they are, something has gone wrong! # ### <font color=blue>EXERCISE 4c</font><br> # # <font color=blue>Now we have printed the growth rate values, but we really want to plot them. To do that, we first need to store them in the growth rate arrays (these are the arrays we created in 5a that are currently full of zeros). # # **For this exercise:** # 1. Copy the code from Exercise 4b. # 2. Modify the code in the `for` loop so that instead of **printing** the growth rate, it puts the value of the growth rate into the growth rate array. # 3. **After** each `for` loop is finished, print the whole growth rate array to check that nothing has gone wrong. The values should be the same numbers you found in Exercise 5b. # # **Hint:** Remember that if we want to put a value into an array we can use its index. For example, `MyArray[0]=5` or `MyArray[1]=6`. How do we use `[i]` to do something similar in a `for` loop? Look back at Exercise 3 and the example in the workbook if you get stuck! # ### <font color=blue>EXERCISE 4d</font><br> # # <font color=blue>Make three plots, one for each temperature range. For each one, plot the growth rate (y-axis) as a function of temperature in Kelvin (x-axis).</font> # ### <font color=blue>EXERCISE 4e</font><br> # # <font color=blue>Think about the behaviour you see in the plot above and answer the following questions as comments in the next box: # 1. How does daisy growth respond to changes in temperature? # 2. If the temperature suddenly jumped from 10°C (283K) to 20°C (293K), would the flowers start grow more rapidly or more slowly? Why? # 3. If the temperature increased again to 35°C (308K), would the flowers start grow more rapidly or more slowly? Why? # ### <font color=purple>OPTIONAL EXERCISE 4f</font><br> # # <font color=purple>Feeling frustrated that we had to copy and paste a lot of our code 3 times? Don't worry, next week we are going to learn some new skills that will let us calculate the growth rates for different temperature ranges using just ONE for loop.<br><br> # # In the meantime, we can actually combine our three different temperature arrays and three different growth rate arrays to make ONE array for each using a special `numpy` function called `concatenate`. `concatenate` just lumps arrays together. For example, if we had two arrays `x` and `y`:<br> # ``` # x = numpy.array([27,36,19]) # y = numpy.array([77,88,99]) # ``` # we can combine them into a new array `x_and_y` using `numpy.concatenate`:</color> # + x = numpy.array([27,36,19]) y = numpy.array([77,88,99]) x_and_y = numpy.concatenate((x,y)) print(x_and_y) # - # <font color=purple>You can concatenate as many arrays as you like, as long as you include them all in the brackets (for example, `numpy.concatenate((x,y,z,zz))`).<br><br> # # To try this out now, **concatenate** (combine) the three temperature arrays into **one** temperature array. Do the same thing with the three growth rate arrays. Finally, make one plot with temperature on the x-axis and growth rate on the y-axis. It should show the full temperature range, from 265-315K.</font>
notebooks/Week4_Exercises.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Watch Me Code 1: Modules and Help # # Let's import the math module from the Python standard library import math # what can we do with math? use the dir() function to investigate dir(math) # let's supose I want to use factorial... how do I get help? help(math.factorial) # okay let's try it: math.factorial(5) # how about another one? help(math.gcd) # trying it out result = math.gcd(24,32) print (result)
content/lessons/06/Watch-Me-Code/WMC1-Modules-And-Help.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [default] # language: python # name: python3 # --- # # 3.16 实战Kaggle比赛:房价预测 # + # 如果没有安装pandas,则反注释下面一行 # # !pip install pandas # %matplotlib inline import torch import torch.nn as nn import numpy as np import pandas as pd import sys sys.path.append("..") import d2lzh_pytorch as d2l print(torch.__version__) torch.set_default_tensor_type(torch.FloatTensor) # - # ## 3.16.2 获取和读取数据集 train_data = pd.read_csv('../../data/kaggle_house/train.csv') test_data = pd.read_csv('../../data/kaggle_house/test.csv') train_data.shape test_data.shape train_data.iloc[0:4, [0, 1, 2, 3, -3, -2, -1]] all_features = pd.concat((train_data.iloc[:, 1:-1], test_data.iloc[:, 1:])) # ## 3.16.3 预处理数据 numeric_features = all_features.dtypes[all_features.dtypes != 'object'].index all_features[numeric_features] = all_features[numeric_features].apply( lambda x: (x - x.mean()) / (x.std())) # 标准化后,每个特征的均值变为0,所以可以直接用0来替换缺失值 all_features = all_features.fillna(0) # dummy_na=True将缺失值也当作合法的特征值并为其创建指示特征 all_features = pd.get_dummies(all_features, dummy_na=True) all_features.shape n_train = train_data.shape[0] train_features = torch.tensor(all_features[:n_train].values, dtype=torch.float) test_features = torch.tensor(all_features[n_train:].values, dtype=torch.float) train_labels = torch.tensor(train_data.SalePrice.values, dtype=torch.float).view(-1, 1) # ## 3.16.4 训练模型 # + loss = torch.nn.MSELoss() def get_net(feature_num): net = nn.Linear(feature_num, 1) for param in net.parameters(): nn.init.normal_(param, mean=0, std=0.01) return net # - def log_rmse(net, features, labels): with torch.no_grad(): # 将小于1的值设成1,使得取对数时数值更稳定 clipped_preds = torch.max(net(features), torch.tensor(1.0)) rmse = torch.sqrt(2 * loss(clipped_preds.log(), labels.log()).mean()) return rmse.item() def train(net, train_features, train_labels, test_features, test_labels, num_epochs, learning_rate, weight_decay, batch_size): train_ls, test_ls = [], [] dataset = torch.utils.data.TensorDataset(train_features, train_labels) train_iter = torch.utils.data.DataLoader(dataset, batch_size, shuffle=True) # 这里使用了Adam优化算法 optimizer = torch.optim.Adam(params=net.parameters(), lr=learning_rate, weight_decay=weight_decay) net = net.float() for epoch in range(num_epochs): for X, y in train_iter: l = loss(net(X.float()), y.float()) optimizer.zero_grad() l.backward() optimizer.step() train_ls.append(log_rmse(net, train_features, train_labels)) if test_labels is not None: test_ls.append(log_rmse(net, test_features, test_labels)) return train_ls, test_ls # ## 3.16.5 $K$折交叉验证 def get_k_fold_data(k, i, X, y): # 返回第i折交叉验证时所需要的训练和验证数据 assert k > 1 fold_size = X.shape[0] // k X_train, y_train = None, None for j in range(k): idx = slice(j * fold_size, (j + 1) * fold_size) X_part, y_part = X[idx, :], y[idx] if j == i: X_valid, y_valid = X_part, y_part elif X_train is None: X_train, y_train = X_part, y_part else: X_train = torch.cat((X_train, X_part), dim=0) y_train = torch.cat((y_train, y_part), dim=0) return X_train, y_train, X_valid, y_valid def k_fold(k, X_train, y_train, num_epochs, learning_rate, weight_decay, batch_size): train_l_sum, valid_l_sum = 0, 0 for i in range(k): data = get_k_fold_data(k, i, X_train, y_train) net = get_net(X_train.shape[1]) train_ls, valid_ls = train(net, *data, num_epochs, learning_rate, weight_decay, batch_size) train_l_sum += train_ls[-1] valid_l_sum += valid_ls[-1] if i == 0: d2l.semilogy(range(1, num_epochs + 1), train_ls, 'epochs', 'rmse', range(1, num_epochs + 1), valid_ls, ['train', 'valid']) print('fold %d, train rmse %f, valid rmse %f' % (i, train_ls[-1], valid_ls[-1])) return train_l_sum / k, valid_l_sum / k # ## 3.16.6 模型选择 k, num_epochs, lr, weight_decay, batch_size = 5, 100, 5, 0, 64 train_l, valid_l = k_fold(k, train_features, train_labels, num_epochs, lr, weight_decay, batch_size) print('%d-fold validation: avg train rmse %f, avg valid rmse %f' % (k, train_l, valid_l)) # ## 3.16.7 预测并在Kaggle提交结果 def train_and_pred(train_features, test_features, train_labels, test_data, num_epochs, lr, weight_decay, batch_size): net = get_net(train_features.shape[1]) train_ls, _ = train(net, train_features, train_labels, None, None, num_epochs, lr, weight_decay, batch_size) d2l.semilogy(range(1, num_epochs + 1), train_ls, 'epochs', 'rmse') print('train rmse %f' % train_ls[-1]) preds = net(test_features).detach().numpy() test_data['SalePrice'] = pd.Series(preds.reshape(1, -1)[0]) submission = pd.concat([test_data['Id'], test_data['SalePrice']], axis=1) submission.to_csv('./submission.csv', index=False) train_and_pred(train_features, test_features, train_labels, test_data, num_epochs, lr, weight_decay, batch_size)
code/chapter03_DL-basics/3.16_kaggle-house-price.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="Vn7AS0fPm1wF" colab_type="text" # # Extracting Data # # Extracting data means reading tabular data from some source. We can use various customizations for this process such as providing a file format, table schema, limiting fields or rows amount, and much more. Let's see this with real files: # + id="AnOy1djZjvX_" colab_type="code" colab={} # ! pip install frictionless # + id="j4Jc6MOPnOc2" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 118} executionInfo={"status": "ok", "timestamp": 1597308054475, "user_tz": -180, "elapsed": 20916, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "12160649330696878788"}} outputId="1cefa96c-3812-4b91-ba7c-16fb17f20642" # ! wget -q -O country-3.csv https://raw.githubusercontent.com/frictionlessdata/frictionless-py/master/data/country-3.csv # ! cat country-3.csv # + id="76QRo9tKeC-y" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 118} executionInfo={"status": "ok", "timestamp": 1597308061316, "user_tz": -180, "elapsed": 27733, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "12160649330696878788"}} outputId="f87b92c7-e214-4844-bcd3-91e7a40aa0e0" # ! wget -q -O capital-3.csv https://raw.githubusercontent.com/frictionlessdata/frictionless-py/master/data/capital-3.csv # ! cat capital-3.csv # + [markdown] id="U0CapHz4nukB" colab_type="text" # For a starter, we will use the command-line interface: # + id="M7QGuvkMkanZ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 168} executionInfo={"status": "ok", "timestamp": 1597308065943, "user_tz": -180, "elapsed": 32338, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "12160649330696878788"}} outputId="49dfe0b2-44a1-4b1c-b79e-07a67608641f" # ! frictionless extract country-3.csv # + [markdown] id="vJIyhVcVnyO8" colab_type="text" # The same can be done in Python: # + id="nrmLl5i4jhbT" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 101} executionInfo={"status": "ok", "timestamp": 1597308066750, "user_tz": -180, "elapsed": 33122, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "12160649330696878788"}} outputId="39aba501-0519-44d2-d437-aa3353ddaac0" from pprint import pprint from frictionless import extract rows = extract('country-3.csv') pprint(rows) # + [markdown] id="DY92Xq3dn2A-" colab_type="text" # ## Extract Functions # # The high-level interface for extracting data provided by Frictionless is a set of `extract` functions: # - `extract`: it will detect the source type and extract data accordingly # - `extract_package`: it accepts a package descriptor and returns a map of the package's tables # - `extract_resource`: it accepts a resource descriptor and returns a table data # - `extract_table`: it accepts various tabular options and returns a table data # # In command-line, there is only 1 command but there is a flag to adjust the behavior: # # ```bash # $ frictionless extract # $ frictionless extract --source-type package # $ frictionless extract --source-type resource # $ frictionless extract --source-type table # ``` # # + [markdown] id="o9sYqrlSyeSc" colab_type="text" # The `extract` functions always read data in a form of rows (see the object description below) into memory. The lower-level interfaces will allow you to stream data and various output forms. # + [markdown] id="pVwTt5gPsmpH" colab_type="text" # # ### Extracting a Package # # Let's start by using the command line-interface. We're going to provide two files to the `extract` command which will be enough to detect that it's a dataset: # + id="DGnHZezdoZhX" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 336} executionInfo={"status": "ok", "timestamp": 1597308069405, "user_tz": -180, "elapsed": 35760, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "12160649330696878788"}} outputId="f00827a4-d5d0-464b-aa93-a7e10fed5611" # ! frictionless extract *-3.csv # + [markdown] id="inaXNjNHoz5l" colab_type="text" # In Python we can do the same by providing a glob for the `extract` function, but instead we will use `extract_package` by providing a package descriptor: # + id="u7TDuJCLpEXL" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 218} executionInfo={"status": "ok", "timestamp": 1597308069406, "user_tz": -180, "elapsed": 35743, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "12160649330696878788"}} outputId="36f315ac-a930-4e17-e591-acfee5b5664b" from frictionless import extract_package data = extract_package({'resources':[{'path': 'country-3.csv'}, {'path': 'capital-3.csv'}]}) for path, rows in data.items(): pprint(path) pprint(rows) # + [markdown] id="5uMguA8LqLP6" colab_type="text" # ### Extracting Resource # # A resource contains only one file and for extracting a resource we can use the same approach we used above but providing only one file. We will extract data using a metadata descriptor: # + id="RXW_rGCJrFbQ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 101} executionInfo={"status": "ok", "timestamp": 1597308069407, "user_tz": -180, "elapsed": 35724, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "12160649330696878788"}} outputId="d691dc8e-e179-4324-958c-a95fddacb1e6" from frictionless import extract_resource rows = extract_resource({'path': 'capital-3.csv'}) pprint(rows) # + [markdown] id="bVcT5zobsMkK" colab_type="text" # Usually, the code above doesn't really make sense as we can just provide a path to the high-level `extract` function instead of a descriptor to the `extract_resource` function but the power of the descriptor is that it can contain different metadata and be stored on the disc. Let's extend our example: # + id="Sq2Roojdst1t" colab_type="code" colab={} executionInfo={"status": "ok", "timestamp": 1597308069409, "user_tz": -180, "elapsed": 35706, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "12160649330696878788"}} from frictionless import Resource resource = Resource(path='capital-3.csv') resource.schema.missing_values.append('3') resource.to_yaml('capital.resource.yaml') # + id="7qnY2Fw3tBQv" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 101} executionInfo={"status": "ok", "timestamp": 1597308071759, "user_tz": -180, "elapsed": 38042, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "12160649330696878788"}} outputId="73b704f5-71c5-4256-a1c7-609ad8270fab" # ! cat capital.resource.yaml # + id="mIzHdATFtH2O" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 168} executionInfo={"status": "ok", "timestamp": 1597308079284, "user_tz": -180, "elapsed": 45541, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "12160649330696878788"}} outputId="9721b899-bfe3-484d-90da-d09702285f13" # ! frictionless extract capital.resource.yaml # + [markdown] id="kxXASn4etmGn" colab_type="text" # So what's happened? We set textual representation of the number "3" to be a missing value. It was done only for the presentational purpose because it's definitely not a missing value. On the other hand, it demonstrated how metadata can be used. # + [markdown] id="cNAOxsMCucc9" colab_type="text" # ### Extracting a Table # # While the package and resource concepts contain both data and metadata, a table is solely data. Because of this fact we can provide many more options to the `extract_table` function. Most of these options are encapsulated into the resource descriptor as we saw with the `missingValues` example above. We will reproduce it: # + id="4kKW_TDXv-ib" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 101} executionInfo={"status": "ok", "timestamp": 1597308079285, "user_tz": -180, "elapsed": 45522, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "12160649330696878788"}} outputId="63c5c8bc-3bd2-4b8c-c886-20d3a5c26700" from frictionless import extract_table rows = extract_table('capital-3.csv', patch_schema={'missingValues': ['', '3']}) pprint(rows) # + [markdown] id="b06sYeJvw2Cv" colab_type="text" # We got an identical result but it's important to understand that on the table level we need to provide all the metadata options separately while a resource encapsulate all these metadata. Please check the `extract_table` API Reference as it has a lot of options. We're going to discuss some of them below. # + [markdown] id="Bd_93BSv9WDX" colab_type="text" # ## Extraction Options # # All the `extract` fuctions accept only one common argument: # - `process`: it's a function getting a row object and returning whatever is needed as an ouput of the data extraction e.g. `lambda row: row.to_dict()` # # + [markdown] id="oufvShST-Fpe" colab_type="text" # **Package/Resource** # # These `extract` functions doesn't accept any additional arguments. # + [markdown] id="AeTNiL-i99U6" colab_type="text" # **Table** # # We will take a look at all the `extract_table` options in the sections below. As an overview, it accepts: # - File Details # - File Control # - Table Dialect # - Table Query # - Header Options # - Schema Options # - Integrity Options # - Infer Options (see "Describing Data") # + [markdown] id="UjSzmWLWiWuo" colab_type="text" # ## Using Package # # The Package class is a metadata class which provides an ability to read its contents. First of all, let's create a package descriptor: # + id="xM7g5ZnWv_wx" colab_type="code" colab={} executionInfo={"status": "ok", "timestamp": 1597308082087, "user_tz": -180, "elapsed": 48289, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "12160649330696878788"}} # ! frictionless describe *-3.csv --json > country.package.json # + [markdown] id="rsJlpjoNwJFy" colab_type="text" # Now, we can open the created descriptor and read the package's resources: # + id="DHwZCpmpwU38" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 185} executionInfo={"status": "ok", "timestamp": 1597308082088, "user_tz": -180, "elapsed": 48269, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "12160649330696878788"}} outputId="4be4b941-f508-4b66-8dbb-ece59e485a2d" from frictionless import Package package = Package('country.package.json') pprint(package.get_resource('country-3').read_rows()) pprint(package.get_resource('capital-3').read_rows()) # + [markdown] id="eRgHVzdbw6Aq" colab_type="text" # The package by itself doesn't provide any read functions directly as it's a role of its resources. So everything written below for the Resource class can be used within a package. # # + [markdown] id="nAbbbJh1cNYK" colab_type="text" # ## Using Resource # # The Resource class is also a metadata class which provides various read and stream functions. Let's create a resource descriptor: # + id="XXZzHGOWxcG9" colab_type="code" colab={} executionInfo={"status": "ok", "timestamp": 1597308084362, "user_tz": -180, "elapsed": 50514, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "12160649330696878788"}} # ! frictionless describe country-3.csv --json > country.resource.json # + [markdown] id="CMgF0kibzbZ7" colab_type="text" # **Exploring Data** # # There are various functions to help explore your resource, such as checking a header or other attributes like stats: # + id="251ifJMAzf7G" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 134} executionInfo={"status": "ok", "timestamp": 1597308084363, "user_tz": -180, "elapsed": 50502, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "12160649330696878788"}} outputId="da15420d-4e26-42a7-9ef8-b25183a7946d" from frictionless import Resource resource = Resource('country.resource.json') pprint(resource.read_header()) pprint(resource.read_sample()) pprint(resource.read_stats()) # + [markdown] id="kkyyIHQcx61q" colab_type="text" # **Reading Data** # # The `extract` functions always read rows into memory; Resource can do the same but it also gives a choice regarding ouput data. It can be `rows`, `data`, `text`, or `bytes`. Let's try reading all of them: # + id="YphJCTaGyaEp" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 319} executionInfo={"status": "ok", "timestamp": 1597308084365, "user_tz": -180, "elapsed": 50480, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "12160649330696878788"}} outputId="caefd0d9-feb3-4dba-8e74-27272f699752" from frictionless import Resource resource = Resource('country.resource.json') pprint(resource.read_bytes()) pprint(resource.read_text()) pprint(resource.read_data()) pprint(resource.read_rows()) # + [markdown] id="nPjY4wjMz3vL" colab_type="text" # **Streaming Data** # # It's really handy to read all your data into memory but it's not always possible as a file can be really big. For such cases, Frictionless provides streaming functions: # + id="QKiqdD-P0J4G" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 168} executionInfo={"status": "ok", "timestamp": 1597308084366, "user_tz": -180, "elapsed": 50467, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "12160649330696878788"}} outputId="e641e6b9-3cbe-437d-e3b4-563cc5928713" from frictionless import Resource resource = Resource('country.resource.json') pprint(resource.read_byte_stream()) pprint(resource.read_text_stream()) pprint(resource.read_data_stream()) pprint(resource.read_row_stream()) for row in resource.read_row_stream(): print(row) # + [markdown] id="NdVxS1fziLXV" colab_type="text" # ## Using Table # # The Table class is at the heart of all the tabular capabilities of Frictionless. It's used by all the higher-level classes and provides a comprehensive user interface by itself. The main difference with, for example, Resource class is that Table has a state of a lower-level file descriptor and needs to be opened and closed. Usually we use a context manager (the `with` keyword) to work with Table. In-general, Table is a streaming interface that needs to be re-opened if data is already read. # # + [markdown] id="p_b9-ZNw3UEK" colab_type="text" # **Exploring Data** # # First of all, let's take a look at the file details information: # # + id="2ylXJwup3ZdF" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 134} executionInfo={"status": "ok", "timestamp": 1597308084367, "user_tz": -180, "elapsed": 50459, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "12160649330696878788"}} outputId="68f0c4e3-3268-47b1-a409-14e4991ca8dd" from frictionless import Table with Table('capital-3.csv') as table: print(f'Source: "{table.source}"') print(f'Scheme: "{table.scheme}"') print(f'Format: "{table.format}"') print(f'Hashing: "{table.hashing}"') print(f'Encoding: "{table.encoding}"') print(f'Compression: "{table.compression}"') print(f'Compression Path: "{table.compression_path}"') # + [markdown] id="QVxThwtI5DwG" colab_type="text" # There is much more information available; we will explain some of it later in the sections below: # + id="qDD4axww5PtV" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 134} executionInfo={"status": "ok", "timestamp": 1597308084367, "user_tz": -180, "elapsed": 50449, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "12160649330696878788"}} outputId="ff8bc2f0-9143-4e6e-e906-19dcf4b8c8e8" from frictionless import Table with Table('capital-3.csv') as table: print(f'Control: "{table.control}"') print(f'Dialect: "{table.dialect}"') print(f'Query: "{table.query}"') print(f'Header: "{table.header}"') print(f'Schema: "{table.schema}"') print(f'Sample: "{table.sample}"') print(f'Stats: "{table.stats}"') # + [markdown] id="ETBdJInd53W4" colab_type="text" # Many of the properties above not only can be read from the existent Table but also can be provided as an option to alter the Table behaviour, for example: # + id="VF8aaSeH6kP9" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 67} executionInfo={"status": "ok", "timestamp": 1597308084368, "user_tz": -180, "elapsed": 50439, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "12160649330696878788"}} outputId="ec6a1fe2-c6d1-49f4-9884-baaa47e54a52" from frictionless import Table with Table('capital-3.csv', scheme='file', format='csv') as table: print(table.source) print(table.scheme) print(table.format) # + [markdown] id="xpuKx6F97H03" colab_type="text" # **Reading Data** # # There are 2 different types of ouput that Table can produce: # + id="KBQgYRGW7Tha" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 185} executionInfo={"status": "ok", "timestamp": 1597308084369, "user_tz": -180, "elapsed": 50431, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "12160649330696878788"}} outputId="fe491d77-6b46-455c-f5db-a9711ee89b14" from frictionless import Table with Table('capital-3.csv') as table: pprint(table.read_data()) with Table('capital-3.csv') as table: pprint(table.read_rows()) # + [markdown] id="jYaC5IUJ8Cal" colab_type="text" # The `data` format is just a raw array of arrays similiar to JSON while the `row` format is a rich object with all the cells normalized and converted to proper types. We will explore the Row class later. # + [markdown] id="RvvxArty8kC5" colab_type="text" # **Streaming Data** # # It was mentioned for Resource and it's the same for Table, we can stream our tabular data. The core difference is that Table is stateful so we use properties instead of the read functions: # + id="ymcBabLL81EW" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 218} executionInfo={"status": "ok", "timestamp": 1597308084370, "user_tz": -180, "elapsed": 50422, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "12160649330696878788"}} outputId="60918495-bdad-4c75-920f-894fe7f4216f" from frictionless import Table with Table('capital-3.csv') as table: pprint(table.data_stream) for cells in table.data_stream: print(cells) with Table('capital-3.csv') as table: pprint(table.row_stream) for row in table.row_stream: print(row) # + [markdown] id="L4zjx-CT9ozP" colab_type="text" # **Table's Lifecycle** # # You might have noticed that we had to duplicate the `with Table(...)` statement in some examples. The reason is that Table is a streaming interface. Once it's read you need to open it again. Let's show it in an example: # + id="wKnVIGTy-FRr" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 202} executionInfo={"status": "ok", "timestamp": 1597308084709, "user_tz": -180, "elapsed": 50730, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "12160649330696878788"}} outputId="3e6b556d-ee93-4546-b6e1-8cdf2279641f" from frictionless import Table table = Table('capital-3.csv') table.open() pprint(table.read_rows()) pprint(table.read_rows()) # We need to re-open: there is no data left table.open() pprint(table.read_rows()) # We need to close manually: not context manager is used table.close() # + [markdown] id="mGbXC3JtxuQ5" colab_type="text" # ## File Details # # Let's overview the details we can specify for a file. Usually you don't need to provide those details as Frictionless is capable to infer it on its own. Although, there are situation when you need to specify it manually. The following example will use the `Table` class but the same options can be used for the `extract` and `extract_table` functions. # + [markdown] id="Qj7jJQ5Gy6jY" colab_type="text" # **Scheme** # # The scheme also know as protocol indicates which loader Frictionless should use to read or write data. It can be `file` (default), `text`, `http`, `https`, `s3`, and others. # + id="A3ur9w8wcn1v" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 50} executionInfo={"status": "ok", "timestamp": 1597308084711, "user_tz": -180, "elapsed": 50716, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "12160649330696878788"}} outputId="3ba31870-02af-40a8-9d71-e537769b6d92" from frictionless import Table with Table('header1,header2\nvalue1,value2.csv', scheme='text') as table: print(table.scheme) print(table.read_rows()) # + [markdown] id="lidjg3L1y929" colab_type="text" # **Format** # # The format or as it's also called extension helps Frictionless to choose a proper parser to handle the file. Popular formats are `csv`, `xlsx`, `json` and others # + id="U6AEOfeKhZCT" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 50} executionInfo={"status": "ok", "timestamp": 1597308084714, "user_tz": -180, "elapsed": 50705, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "12160649330696878788"}} outputId="c2a242a5-813a-49d7-a20b-03390af29360" from frictionless import Table with Table('text://header1,header2\nvalue1,value2.csv', format='csv') as table: print(table.format) print(table.read_rows()) # + [markdown] id="neCsZLAOhe1H" colab_type="text" # **Hashing** # # The hashing option controls which hashing algorithm should be used for generating the `hash` property. It doesn't affect the `extract` function but can be used with the `Table` class: # + id="IakiUl26nMCD" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 50} executionInfo={"status": "ok", "timestamp": 1597308084715, "user_tz": -180, "elapsed": 50689, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "12160649330696878788"}} outputId="ac2a5603-9748-4074-dd56-a899455e2661" from frictionless import Table with Table('country-3.csv', hashing='sha256') as table: table.read_rows() print(table.hashing) print(table.stats['hash']) # + [markdown] id="X2hyKZlqhjGe" colab_type="text" # **Encoding** # # Frictionless automatically detects encoding of files but sometimes it can be innacurate. It's possible to provide an encoding manually: # + id="xJ6NYWuvn5qB" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 50} executionInfo={"status": "ok", "timestamp": 1597308084716, "user_tz": -180, "elapsed": 50670, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "12160649330696878788"}} outputId="331bd787-8abd-4158-c3c6-f16526b5ccc6" from frictionless import Table with Table('country-3.csv', encoding='utf-8') as table: print(table.encoding) print(table.source) # + [markdown] id="r5RveymthnqR" colab_type="text" # **Compression** # # It's possible to adjust compression detection by providing the algorithm explicetely. For the example below it's not required as it would be detected anyway: # + id="26sXEcZUtJmY" colab_type="code" colab={} executionInfo={"status": "ok", "timestamp": 1597308086604, "user_tz": -180, "elapsed": 52538, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "12160649330696878788"}} # ! wget -q -O table.csv.zip https://raw.githubusercontent.com/frictionlessdata/frictionless-py/master/data/table.csv.zip # + id="_A9531eytqN2" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 50} executionInfo={"status": "ok", "timestamp": 1597308086606, "user_tz": -180, "elapsed": 52511, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "12160649330696878788"}} outputId="0ab88465-85ea-4027-ab01-dec3d9ea3da3" from frictionless import Table with Table('table.csv.zip', compression='zip') as table: print(table.compression) print(table.read_rows()) # + [markdown] id="ayew0qWyhr0U" colab_type="text" # **Compression Path** # # By default, Frictionless uses the first file found in a zip archive. It's possible to adjust this behaviour: # + id="N5XGmItEucg0" colab_type="code" colab={} executionInfo={"status": "ok", "timestamp": 1597308089555, "user_tz": -180, "elapsed": 55444, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "12160649330696878788"}} # ! wget -q -O table-multiple-files.zip https://raw.githubusercontent.com/frictionlessdata/frictionless-py/master/data/table-multiple-files.zip # + id="qb3ni07JuqDw" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 67} executionInfo={"status": "ok", "timestamp": 1597308089557, "user_tz": -180, "elapsed": 55427, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "12160649330696878788"}} outputId="e7ca1a6b-5cf8-40b3-fc24-04620267f4ac" from frictionless import Table with Table('table-multiple-files.zip', compression_path='table-reverse.csv') as table: print(table.compression) print(table.compression_path) print(table.read_rows()) # + [markdown] id="kXHvm52whIWc" colab_type="text" # Further reading: # - Schemes Reference # - Formats Reference # + [markdown] id="UwW0UWAbh_Do" colab_type="text" # ## File Control # # The Control object allows you to manage the loader used by the Table class. In most cases, you don't need to provide any Control settings but sometimes it can be useful: # + id="L5lWFBDDBgZY" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 50} executionInfo={"status": "ok", "timestamp": 1597308089558, "user_tz": -180, "elapsed": 55408, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "12160649330696878788"}} outputId="c0c666c8-cbfd-4c31-c8ba-fe251bab2be5" from frictionless import Table, controls source = 'https://raw.githubusercontent.com/frictionlessdata/frictionless-py/master/data/table.csv' control = controls.RemoteControl(http_timeout=1) with Table(source, control=control) as table: print(table.control) print(table.read_rows()) # + [markdown] id="lku_yTyTCsIh" colab_type="text" # Exact parameters depend on schemes and can be found in the "Schemes Reference". For example, the Remote Control provides `http_timeout`, `http_session`, and others but there is only one option available for all controls: # + [markdown] id="NK9MkYKqDIEG" colab_type="text" # **Detect Encoding** # # It's a function that can be provided to adjust the encoding detection. This function accept a data sample and return a detected encoding: # + id="6DJgECx3DP1O" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 50} executionInfo={"status": "ok", "timestamp": 1597308089559, "user_tz": -180, "elapsed": 55384, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "12160649330696878788"}} outputId="d8c7ba94-b614-427b-c59d-f2a4fdd96abf" from frictionless import Table, controls control = controls.Control(detect_encoding=lambda sample: "utf-8") with Table("capital-3.csv", control=control) as table: print(table.source) print(table.encoding) # + [markdown] id="4eJBrRhICobk" colab_type="text" # Further reading: # - Schemes Reference # + [markdown] id="DQqHOx_qx6oK" colab_type="text" # ## Table Dialect # # The Dialect adjust the way tabular parsers work. The concept is similiar to the Control above. Let's use the CSV Dialect to adjust the delimiter configuration: # + id="d1ILQvxWNs0p" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 50} executionInfo={"status": "ok", "timestamp": 1597308089562, "user_tz": -180, "elapsed": 55359, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "12160649330696878788"}} outputId="2376c97d-acc7-46c8-c10e-a6d48229957f" from frictionless import Table, dialects source = 'header1;header2\nvalue1;value2' dialect = dialects.CsvDialect(delimiter=';') with Table(source, scheme='text', format='csv', dialect=dialect) as table: print(table.dialect) print(table.read_rows()) # + [markdown] id="JsdMITowOZsg" colab_type="text" # There is a great deal of options available for different dialect that can be found in "Formats Reference". We will list the properties that can be used with every dialect: # + [markdown] id="62cH_u53Oo19" colab_type="text" # **Header** # # It's a boolean flag wich deaults to `True` indicating whether the data has a header row or not. In the following example the header row will be treated as a data row: # + id="Vj5pBdCWCYtV" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 134} executionInfo={"status": "ok", "timestamp": 1597308089563, "user_tz": -180, "elapsed": 55324, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "12160649330696878788"}} outputId="0ecfc2d4-4e35-4e5c-b19e-dac88b683196" from frictionless import Table, dialects dialect = dialects.Dialect(header=False) with Table('capital-3.csv', dialect=dialect) as table: pprint(table.header) pprint(table.read_rows()) # + [markdown] id="3t9X4LlrOsIk" colab_type="text" # **Header Rows** # # If header is `True` which is default, this parameters indicates where to find the header row or header rows for a multiline header. Let's see on example how the first two data rows can be treated as a part of a header: # + id="Er594FiKDV7J" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 84} executionInfo={"status": "ok", "timestamp": 1597308089564, "user_tz": -180, "elapsed": 55295, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "12160649330696878788"}} outputId="efc9bb6a-e05a-476c-b770-b1207f5ea307" from frictionless import Table, dialects dialect = dialects.Dialect(header_rows=[1, 2, 3]) with Table('capital-3.csv', dialect=dialect) as table: pprint(table.header) pprint(table.read_rows()) # + [markdown] id="ByXWRP5LOuc8" colab_type="text" # **Header Join** # # If there are multiple header rows which is managed by `header_rows` parameter, we can set a string to be a separator for a header's cell join operation. Usually it's very handy for some "fancy" Excel files. For the sake of simplicity, we will show on a CSV file: # + id="vwXNgV7PEGuK" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 84} executionInfo={"status": "ok", "timestamp": 1597308089565, "user_tz": -180, "elapsed": 55271, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "12160649330696878788"}} outputId="e72018e2-651b-4ec4-bfd2-ee70a8485d98" from frictionless import Table, dialects dialect = dialects.Dialect(header_rows=[1, 2, 3], header_join='/') with Table('capital-3.csv', dialect=dialect) as table: pprint(table.header) pprint(table.read_rows()) # + [markdown] id="2wCyxOc9ORoe" colab_type="text" # Further reading: # - Formats Reference # + [markdown] id="mt6TqSTlyAxb" colab_type="text" # ## Table Query # # Using header management described in the "Table Dialect" section we can have a basic skipping rows ability e.g. if we set `dialect.header_rows=[2]` we will skip the first row but it's very limited. There is a much more powerful interface called Table Queries to indicate where exactly to get tabular data from a file. We will use a simple file looking like a matrix: # + id="FdImYWlTKBw8" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 101} executionInfo={"status": "ok", "timestamp": 1597308096696, "user_tz": -180, "elapsed": 62385, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "12160649330696878788"}} outputId="7e589009-6e0e-4bf4-9db3-f239f4c52d06" # ! wget -q -O matrix.csv https://raw.githubusercontent.com/frictionlessdata/frictionless-py/master/data/matrix.csv # ! cat matrix.csv # + [markdown] id="RhiyAx6TL6bf" colab_type="text" # **Pick/Skip Fields** # # We can pick and skip arbitrary fields based on a header row. These options accept a list of field numbers, a list of strings or a regex to match. All the queries below do the same thing for this file: # + id="UpXUkg1aMK9a" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 138} executionInfo={"status": "ok", "timestamp": 1597308096698, "user_tz": -180, "elapsed": 62370, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "12160649330696878788"}} outputId="a7822244-afd1-4f5d-f96a-eb19e0d2bd04" from frictionless import extract, Query print(extract('matrix.csv', query=Query(pick_fields=[2, 3]))) print(extract('matrix.csv', query=Query(skip_fields=[1, 4]))) print(extract('matrix.csv', query=Query(pick_fields=['f2', 'f3']))) print(extract('matrix.csv', query=Query(skip_fields=['f1', 'f4']))) print(extract('matrix.csv', query=Query(pick_fields=['<regex>f[23]']))) print(extract('matrix.csv', query=Query(skip_fields=['<regex>f[14]']))) # + [markdown] id="qy4i65WsOV37" colab_type="text" # **Limit/Offset Fields** # # There are two options that provide an ability to limit amount of fields similiar to SQL's directives: # + id="4_uK72xMOqdz" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 70} executionInfo={"status": "ok", "timestamp": 1597308098196, "user_tz": -180, "elapsed": 63836, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "12160649330696878788"}} outputId="ac168237-1a89-43bf-cd57-e6a8d6a9dc77" from frictionless import extract, Query print(extract('matrix.csv', query=Query(limit_fields=2))) print(extract('matrix.csv', query=Query(offset_fields=2))) # + [markdown] id="88rNrdo6PAfN" colab_type="text" # **Pick/Skip Rows** # # It's alike the field counterparts but it will be compared to the first cell of a row. All the queries below do the same thing for this file but take into account that when picking we need to also pick a header row. In addition, there is special value `<blank>` that matches a row if it's competely blank: # + id="ygXQxZ6RPZWC" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 154} executionInfo={"status": "ok", "timestamp": 1597308098198, "user_tz": -180, "elapsed": 63819, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "12160649330696878788"}} outputId="e5f62778-6ef7-4136-f394-2d65ef589f22" from frictionless import extract, Query print(extract('matrix.csv', query=Query(pick_rows=[1, 3, 4]))) print(extract('matrix.csv', query=Query(skip_rows=[2, 5]))) print(extract('matrix.csv', query=Query(pick_rows=['f1', '21', '31']))) print(extract('matrix.csv', query=Query(skip_rows=['11', '41']))) print(extract('matrix.csv', query=Query(pick_rows=['<regex>(f1|[23]1)']))) print(extract('matrix.csv', query=Query(skip_rows=['<regex>[14]1']))) print(extract('matrix.csv', query=Query(pick_rows=['<blank>']))) # + [markdown] id="zKvCthj-Q3lP" colab_type="text" # **Limit/Offset Rows** # # It's a quite popular option used to limit amount of rows to read: # + id="NfDqIlwFRE7t" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 70} executionInfo={"status": "ok", "timestamp": 1597308098199, "user_tz": -180, "elapsed": 63800, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "12160649330696878788"}} outputId="85958333-4ddf-4b3d-e0db-83a7ead65a53" from frictionless import extract, Query print(extract('matrix.csv', query=Query(limit_rows=2))) print(extract('matrix.csv', query=Query(offset_rows=2))) # + [markdown] id="MHVoO9_oFCtl" colab_type="text" # ## Header Options # # Header management is a responsibility of "Table Dialect" which will be described below but Table accept a special `headers` argument that plays a role of a high-level helper in setting different header options. # # + [markdown] id="-FVfxJxCHdAI" colab_type="text" # It accepts a `False` values indicating that there is no header row: # + id="wDZjBYtKHnMP" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 134} executionInfo={"status": "ok", "timestamp": 1597308098201, "user_tz": -180, "elapsed": 63783, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "12160649330696878788"}} outputId="f83ff768-b9d5-4caf-d812-007e14d7601b" from frictionless import Table with Table('capital-3.csv', headers=False) as table: pprint(table.header) pprint(table.read_rows()) # + [markdown] id="ST0IP9OJHY-r" colab_type="text" # # It accepts an integer indicating the header row number: # + id="8IRI29I4FYcC" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 101} executionInfo={"status": "ok", "timestamp": 1597308098203, "user_tz": -180, "elapsed": 63766, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "12160649330696878788"}} outputId="4915412e-9146-4949-c56b-c0fd47171683" from frictionless import Table with Table('capital-3.csv', headers=2) as table: pprint(table.header) pprint(table.read_rows()) # + [markdown] id="jxg2xriHGkeM" colab_type="text" # It accepts a list of integers indicating a multiline header row numbers: # + id="g8dmAHLLGYIH" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 84} executionInfo={"status": "ok", "timestamp": 1597308098204, "user_tz": -180, "elapsed": 63744, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "12160649330696878788"}} outputId="b71db9ff-6f6c-4c35-bc84-b1d6b196de27" from frictionless import Table with Table('capital-3.csv', headers=[1,2,3]) as table: pprint(table.header) pprint(table.read_rows()) # + [markdown] id="PfHVFpOvG6Xt" colab_type="text" # It accepts a pair containing a list of integers indicating a multiline header row numbers and a string indicating a joiner for a concatenate operation: # # + id="qIMQslDuHIl0" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 84} executionInfo={"status": "ok", "timestamp": 1597308098206, "user_tz": -180, "elapsed": 63729, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "12160649330696878788"}} outputId="2d595780-6f56-4984-fdfd-4d8b959541cc" from frictionless import Table with Table('capital-3.csv', headers=[[1,2,3], '/']) as table: pprint(table.header) pprint(table.read_rows()) # + [markdown] id="myOn0wYQcIHY" colab_type="text" # ## Schema Options # # By default, a schema for a table is inferred under the hood but we can also pass it explicetely. # + [markdown] id="jBKLJchXTtQn" colab_type="text" # **Schema** # # The most common way is providing a schema argument to the Table constructor. For example, let's make the `id` field be a string instead of an integer: # + id="N_ZwfvXtSB48" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 134} executionInfo={"status": "ok", "timestamp": 1597308098207, "user_tz": -180, "elapsed": 63713, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "12160649330696878788"}} outputId="9a684f54-c109-4b68-ed5e-b804d7af5c67" from frictionless import Table, Schema, Field schema = Schema(fields=[Field(name='id', type='string'), Field(name='name', type='string')]) with Table('capital-3.csv', schema=schema) as table: pprint(table.schema) pprint(table.read_rows()) # + [markdown] id="cEdN0IlXUKuB" colab_type="text" # **Sync Schema** # # There is a way to sync provided schema based on a header row's field order. It's very useful when you have a schema that represents only a subset of the table's fields: # + id="YeZjrrrgUf5Q" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 134} executionInfo={"status": "ok", "timestamp": 1597308098208, "user_tz": -180, "elapsed": 63694, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "12160649330696878788"}} outputId="9bbb4342-259a-420d-f0ac-fb162fb3d30c" from frictionless import Table, Schema, Field # Note the order of the fields schema = Schema(fields=[Field(name='name', type='string'), Field(name='id', type='string')]) with Table('capital-3.csv', schema=schema, sync_schema=True) as table: pprint(table.schema) pprint(table.read_rows()) # + [markdown] id="R6OxF_uWTfWQ" colab_type="text" # **Patch Schema** # # Sometimes we just want to update only a few fields or some schema's properties without providing a brand new schema. For example, the two examples above can be simplified as: # + id="1-3vCZJXVXRd" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 134} executionInfo={"status": "ok", "timestamp": 1597308098210, "user_tz": -180, "elapsed": 63675, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "12160649330696878788"}} outputId="2d5e82bc-5a19-40a6-b407-e40bb5cd8f24" from frictionless import Table with Table('capital-3.csv', patch_schema={'fields': {'id': {'type': 'string'}}}) as table: pprint(table.schema) pprint(table.read_rows()) # + [markdown] id="PudIdYrEDQ4c" colab_type="text" # ## Integrity Options # # Exctraction function and classes accepts only one integrity option: # # + [markdown] id="piN8F9QyDdZx" colab_type="text" # **Lookup** # # The lookup is a special object providing relational information in cases when it's not impossible to extract. For example, the Package is capable to get a lookup object from its resource while a table object needs it to be provided. Let's see on an example: # + id="9YohH9OgUfL_" colab_type="code" colab={} executionInfo={"status": "ok", "timestamp": 1597308098211, "user_tz": -180, "elapsed": 63659, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "12160649330696878788"}} from frictionless import Table source = [["name"], [1], [2], [4]] lookup = {"other": {("name",): {(1,), (2,), (3,)}}} fk = {"fields": ["name"], "reference": {"fields": ["name"], "resource": "other"}} with Table(source, lookup=lookup, patch_schema={"foreignKeys": [fk]}) as table: for row in table: if row.row_number == 3: assert row.valid is False assert row.errors[0].code == "foreign-key-error" continue assert row.valid # + [markdown] id="Sfmf2bPVyX4C" colab_type="text" # ## Header Object # # After opening a table or calling `resource.read_header` you get an access to a `header` object. It's a list but providing some additional functionality. Let's take a look: # # # + id="Zg_iiTS6Va3_" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 134} executionInfo={"status": "ok", "timestamp": 1597308098212, "user_tz": -180, "elapsed": 63635, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "12160649330696878788"}} outputId="9b27b552-c2ff-490f-96ae-ed91db335024" from frictionless import Table with Table('capital-3.csv') as table: print(f'Header: {table.header}') print(f'Schema: {table.header.schema}') print(f'Field Positions: {table.header.field_positions}') print(f'Errors: {table.header.errors}') print(f'Valid: {table.header.valid}') print(f'As Dict: {table.header.to_dict()}') # field name: header cell print(f'As List: {table.header.to_list()}') # + [markdown] id="64mOXDzOWXPu" colab_type="text" # The example above covers the case when a header is valid. For a header with tabular errors this information can be much more useful revealing discrepancies, dublicates or missing cells information. Please read "API Reference" for more details. # + [markdown] id="y1maYkNeyDnW" colab_type="text" # ## Row Object # # The `extract`, `resource.read_rows()`, `table.read_rows()`, and many other functions retunrs or yeilds row objects. It's a `OrderedDict` providing additional API shown below: # # + id="a53NB2jDXac6" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 222} executionInfo={"status": "ok", "timestamp": 1597308098214, "user_tz": -180, "elapsed": 63622, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "12160649330696878788"}} outputId="405832ce-dc4e-4686-c7b5-6fae0345714c" from frictionless import Table with Table('capital-3.csv', patch_schema={'missingValues': ['1']}) as table: for row in table: print(f'Row: {row}') print(f'Schema: {row.schema}') print(f'Field Positions: {row.field_positions}') print(f'Row Position: {row.row_position}') # physical line number starting from 1 print(f'Row Number: {row.row_number}') # counted row number starting from 1 print(f'Blank Cells: {row.blank_cells}') print(f'Error Cells: {row.error_cells}') print(f'Errors: {row.errors}') print(f'Valid: {row.valid}') print(f'As Dict: {row.to_dict(json=False)}') print(f'As List: {row.to_list(json=True)}') # JSON compatible data types break # + [markdown] id="0Nb-kYmhYloS" colab_type="text" # As we can see, it provides a lot of information which is especially useful when a row is not valid. Our row is valid but we demostrated how it can preserve data about raw missing values. It also preserves data about all errored cells. Please read "API Reference" for more details.
docs/target/extracting-data/README.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ##Datasets # The data sets I am analyzing for my final project were downloaded from <NAME>'s Sunnyslope project website. I have downloaded one dataset from two different vineyards. The data was collected from a weather station located in the middle of the vine rows, and temperature data is collected every hour. Data includes April 1, 2017 to October 31, 2017. In this Jupyter notboke I hope to import the data along with any computing libraries I will need to create plots of the data. Plots have already be created on Dave's website, and I can use those to double check mine are accurate. # # Dataset 1: Polo Cove 'PC_GDD.csv" # Dataset 2: Hat Ranch 'HR_GDD.csv" # + import numpy as np import pandas as pd import matplotlib.pyplot as plt import matplotlib.dates as mdates filename_PC = "PC_GDD.csv" # - # Above, I imported all the libraries I think I will need to analyze the data. # Below, I designated a data frame to the .csv file I dubbed "PC" which is my Polo Cove 2017 data df = pd.read_csv(filename_PC) df[0:60] # I wanted to double check that the data was print correctly, and in a format I can read. The data is readable to me, but the backslashes are going to make anaylzing the data, and printing a neat plot very difficult. I need to convert the dates to something more readable. datearr = df['Date'].values datearr[0] # Here I asked it to print the value of the first date in my dataset. It is in a format that is not easy to work with. datearr_dt = pd.to_datetime(datearr) datearr_dt[0] # I convert the dates and time values to timestamps, and these are much easier to work with. df['NP_Datetime'] = datearr_dt # This allows me to generate another column within my dataset for the timestamps I created. Let's double check that it worked. # df[0:14] # It worked! Now all I need to do is create a plot of the data using pyplot and be sure to label my axes and give my plot a title. This is similar to an exercise we did in class for our "Visualizing Data" section. # + plt.plot(df['NP_Datetime'].values,df['Value'].values) plt.title('Polo Cove Vineyard: April 1 - October 31 2017') plt.xlabel('Date') plt.ylabel('Value [°F]') plt.show() # - # It worked! From this we can see that there were multiple days in September when temperature data was not recorded. This is likely due to a dead battery. Now, I need to import the Hat Ranch dataset and create a plot. filename_HR = 'HR_GDD.csv' df = pd.read_csv(filename_HR) df[0:4] datearr = df['Date'].values datearr[0] datearr_dt = pd.to_datetime(datearr) datearr_dt[0] df['NP_Datetime'] = datearr_dt # + plt.plot(df['NP_Datetime'].values,df['Value'].values) plt.title('Hat Ranch Vineyard: April 1 - October 31 2017') plt.xlabel('Date') plt.ylabel('Value [°F]') plt.show() # - # We can verify that this is the Hat Ranch data by the lack of missing temperatures in September. # ###### # # Both datasets that I am working with for my GDD investigation were succesfully imported into my Jupyter notebook. I was easily able to create readable plots with labeled x and y axes and a title. After checking Dave's website, the plots are accurate, and are ready to be analyzed to find GDD. I speculate that the number of GDD at Hat Ranch (which is at a lower elevation) will be less than the GDD at Polo Cove. This difference will demonstrate the variability in growing conditions within the Snake River Valley wine region, and show how our region compares to others.
GEOS_505_Project_Plots.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas import lightfm from lightfm import LightFM from lightfm import cross_validation from scipy.sparse import coo_matrix from collections import OrderedDict from scipy.sparse import coo_matrix import numpy as np df = pandas.read_csv('write.csv') topNames = df.groupby("actionName").size().sort_values(ascending=False)[0:50].keys() df = df[df.actionName.isin(topNames)] actionByUsers = df.groupby(["userName","actionName"]).size() actionByUsers=actionByUsers.apply(lambda x:1) actionByUsers=actionByUsers.unstack() actionByUsers=actionByUsers.fillna(0.0) mat=coo_matrix(actionByUsers.values,dtype='float32') users=actionByUsers.index.tolist() items=list(actionByUsers.columns.values) tags = pandas.read_json(path_or_buf="C:\\Users\\Robert\\python\\tagiDlaWebnei.json", orient = 'records', dtype={"A":str, "B":list}) tags[tags.title!=items[14]] sum = [] for i in list(tags.tags.values): sum = sum + i tagsList=list(OrderedDict.fromkeys(sum)) tagsMatrix = coo_matrix((len(items),len(tagsList)),dtype='float32') tagsMatrix 1 for index, row in tags.iterrows(): for rowA in row[1]: if((row[2] in items)&(rowA in tagsList)): print(items.index(row[2]), tagsList.index(rowA)) rowM = [] colM = [] dataM = [] #np.array([]) for index, row in tags.iterrows(): for rowA in row[1]: if((row[2] in items)&(rowA in tagsList)): rowM.append(items.index(row[2])) colM.append(tagsList.index(rowA)) dataM.append(1.0) rowM = np.array(rowM, dtype='float32') colM = np.array(colM, dtype='float32') dataM = np.array(dataM, dtype='float32') tagsMatrix = coo_matrix((dataM,(rowM,colM)),shape=(len(items),len(tagsList))) tagsMatrix for a in tagsMatrix.todense(): print(a) items[15] tags[tags.title!=items[14]] tags # + train, test = cross_validation.random_train_test_split(mat) NUM_THREADS = 2 NUM_COMPONENTS = 30 NUM_EPOCHS = 3 ITEM_ALPHA = 1e-6 # Let's fit a WARP model: these generally have the best performance. model = LightFM(loss='warp', item_alpha=ITEM_ALPHA, no_components=NUM_COMPONENTS) # Run 3 epochs and time it. # %time model = model.fit(train,item_features = tagsMatrix,epochs=NUM_EPOCHS, num_threads=NUM_THREADS) from lightfm.evaluation import auc_score # Compute and print the AUC score train_auc = auc_score(model, train ,item_features = tagsMatrix, num_threads=NUM_THREADS).mean() print('Collaborative filtering train AUC: %s' % train_auc) # - test_auc = auc_score(model, test, train_interactions=train, item_features=tagsMatrix, num_threads=NUM_THREADS).mean() print('Hybrid test set AUC: %s' % test_auc) tagsMatrix.shape[1] if('RRM' in tagsList): print(tagsList.index('RRM')) tagsMatrix test train mat model
lightFM/LightFmWithTagsA.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ___ # # <a href='http://www.pieriandata.com'> <img src='../Pierian_Data_Logo.png' /></a> # ___ # # Sentence Segmentation # In **spaCy Basics** we saw briefly how Doc objects are divided into sentences. In this section we'll learn how sentence segmentation works, and how to set our own segmentation rules. # Perform standard imports import spacy nlp = spacy.load('en_core_web_sm') # + # From Spacy Basics: doc = nlp(u'This is the first sentence. This is another sentence. This is the last sentence.') for sent in doc.sents: print(sent) # - # ### `Doc.sents` is a generator # It is important to note that `doc.sents` is a *generator*. That is, a Doc is not segmented until `doc.sents` is called. This means that, where you could print the second Doc token with `print(doc[1])`, you can't call the "second Doc sentence" with `print(doc.sents[1])`: print(doc[1]) print(doc.sents[1]) # However, you *can* build a sentence collection by running `doc.sents` and saving the result to a list: doc_sents = [sent for sent in doc.sents] doc_sents # <font color=green>**NOTE**: `list(doc.sents)` also works. We show a list comprehension as it allows you to pass in conditionals.</font> # Now you can access individual sentences: print(doc_sents[1]) # ### `sents` are Spans # At first glance it looks like each `sent` contains text from the original Doc object. In fact they're just Spans with start and end token pointers. type(doc_sents[1]) print(doc_sents[1].start, doc_sents[1].end) # ## Adding Rules # spaCy's built-in `sentencizer` relies on the dependency parse and end-of-sentence punctuation to determine segmentation rules. We can add rules of our own, but they have to be added *before* the creation of the Doc object, as that is where the parsing of segment start tokens happens: # + # Parsing the segmentation start tokens happens during the nlp pipeline doc2 = nlp(u'This is a sentence. This is a sentence. This is a sentence.') for token in doc2: print(token.is_sent_start, ' '+token.text) # - # <font color=green>Notice we haven't run `doc2.sents`, and yet `token.is_sent_start` was set to True on two tokens in the Doc.</font> # Let's add a semicolon to our existing segmentation rules. That is, whenever the sentencizer encounters a semicolon, the next token should start a new segment. # + # SPACY'S DEFAULT BEHAVIOR doc3 = nlp(u'"Management is doing things right; leadership is doing the right things." -<NAME>') for sent in doc3.sents: print(sent) # + # ADD A NEW RULE TO THE PIPELINE def set_custom_boundaries(doc): for token in doc[:-1]: if token.text == ';': doc[token.i+1].is_sent_start = True return doc nlp.add_pipe(set_custom_boundaries, before='parser') nlp.pipe_names # - # <font color=green>The new rule has to run before the document is parsed. Here we can either pass the argument `before='parser'` or `first=True`. # + # Re-run the Doc object creation: doc4 = nlp(u'"Management is doing things right; leadership is doing the right things." -<NAME>') for sent in doc4.sents: print(sent) # - # And yet the new rule doesn't apply to the older Doc object: for sent in doc3.sents: print(sent) # ### Why not change the token directly? # Why not simply set the `.is_sent_start` value to True on existing tokens? # Find the token we want to change: doc3[7] # Try to change the .is_sent_start attribute: doc3[7].is_sent_start = True # <font color=green>spaCy refuses to change the tag after the document is parsed to prevent inconsistencies in the data.</font> # ## Changing the Rules # In some cases we want to *replace* spaCy's default sentencizer with our own set of rules. In this section we'll see how the default sentencizer breaks on periods. We'll then replace this behavior with a sentencizer that breaks on linebreaks. # + nlp = spacy.load('en_core_web_sm') # reset to the original mystring = u"This is a sentence. This is another.\n\nThis is a \nthird sentence." # SPACY DEFAULT BEHAVIOR: doc = nlp(mystring) for sent in doc.sents: print([token.text for token in sent]) # + # CHANGING THE RULES from spacy.pipeline import SentenceSegmenter def split_on_newlines(doc): start = 0 seen_newline = False for word in doc: if seen_newline: yield doc[start:word.i] start = word.i seen_newline = False elif word.text.startswith('\n'): # handles multiple occurrences seen_newline = True yield doc[start:] # handles the last group of tokens sbd = SentenceSegmenter(nlp.vocab, strategy=split_on_newlines) nlp.add_pipe(sbd) # - # <font color=green>While the function `split_on_newlines` can be named anything we want, it's important to use the name `sbd` for the SentenceSegmenter.</font> doc = nlp(mystring) for sent in doc.sents: print([token.text for token in sent]) # <font color=green>Here we see that periods no longer affect segmentation, only linebreaks do. This would be appropriate when working with a long list of tweets, for instance.</font> # ## Next Up: POS Assessment
UPDATED_NLP_COURSE/02-Parts-of-Speech-Tagging/04-Sentence-Segmentation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] slideshow={"slide_type": "slide"} # ## Jupyter notebook - čo to je a ako s nim pracovať. # + [markdown] slideshow={"slide_type": "subslide"} # Tento webový zápisníček sa nazýva _Jupyter notebook_. Nie je to súvislý text, ako by sa zdalo. Skladá sa z __buniek__ (cells) ako táto. # # __Aktuálna bunka__ je orámovaná výraznejším rámčekom. Ak ju práve editujeme, rámček aj zvýraznená zvislá čiara vpravo je zelená, inak modrá. __Presvedčte sa kliknutím myšou tu.__ # + [markdown] slideshow={"slide_type": "slide"} # Do buniek môžeme písať __obyčajný text__ (bunka typu `Markdown`), to je prípad tejto bunky. __Markdown__ je jednoduchý značkovací jazyk. Keď stlačíte `Enter` v tejto bunke, uvidíte, ako to vyzerá v tom jazyku. Ľahko môžeme text formátovať (písmo tučné, kurzíva, nadpisy rôznych úrovní). __V textových bunkách môžeme používať aj jazyk HTML.__ # # __Editovať bunku__ môžeme, keď hocikde vnútri bunky __dvakrát klikneme (alebo stlačíme `Enter`).__ # # Bunky môžu tiež obsahovať __zdrojový kód__ v mnohých programovacích jazykoch (bunky typu `Code`). My budeme používať programovací jazyk _Python (verzia 3)_. Aký jazyk notebook používa, to zistíme vpravo hore. U nás je to Python 3.5. # Koliesko napravo od jazyka ukazuje, či je notebook práve zaneprádnený počítaním (plné čierne koliesko), alebo si voľká (koliesko je pusté, ako by to malo byť práve teraz). # # **Bunka sa vykoná, t.j. text sa správne zobrazí** pre bunku typu `Markdown`, alebo **programovací kód sa vykoná** pre bunku typu `Code` cez # # - __`Shift-Enter`__ (ak sa chceme posunúť na nasledujúcu bunku), # alebo # - __`Ctrl-Enter`__ (ak chceme ostať v tej istej bunke). # # Bunky sa dajú vykonávať aj cez položku menu `Cell`, no to je pucovskejšie. # + [markdown] slideshow={"slide_type": "slide"} # **Bunka sa vykoná, t.j. text sa správne zobrazí** pre bunku typu `Markdown`, alebo **programovací kód sa vykoná** pre bunku typu `Code` cez # # - __`Shift-Enter`__ (ak sa chceme posunúť na nasledujúcu bunku), # alebo # - __`Ctrl-Enter`__ (ak chceme ostať v tej istej bunke). # # Bunky sa dajú vykonávať aj cez položku menu `Cell`, no to je pucovskejšie. # + [markdown] slideshow={"slide_type": "slide"} # V notebooku sa dajú zobrazovať aj __obrázky, videá a prehrávať zvuky__. Napr. obrázok (urobili sme v Jupyteri tiež): # <img src="kacicka.jpg" width="420"> # + hide_input=false slideshow={"slide_type": "slide"} # Toto je príklad bunky typu Code, ako vidno hore v menu # a hashkou začína komentár, ktorý si Python nevšíma. Je to len pre nás, aby sme rozumeli, čo robíme. # Dole je príkaz Pythonu na vypísanie pozdravu :-) Vykonajte. print("Nazdar svet!") print("Koľko je 20 + 18? Asi", 20 + 18) # + [markdown] slideshow={"slide_type": "slide"} # __Bunky sa dajú pridávať nad aktuálnu bunku__ (above) cez `Ctrl-A` alebo __pod__ (below) cez `Ctrl-B` (nezáleží, či to je malé alebo veľké `A, B`). # # __Aktuálnu bunku zničíme__ cez `Esc-D-D` (stlačíme krátko `Esc` a potom dvakrát D malé či veľké). # # Keď sme zničili omylom, cez položku menu `Edit -> Undo Delete Cells` to odčiníme. Ale lepšie je dávať pozor :-) # # __Notebook uložiť__ môžeme cez menu `File -> Save and Checkpoint`. # # __Tu povedané si skúšajte.__ # + [markdown] slideshow={"slide_type": "slide"} # __POZOR, na poradí vykonávania buniek záleží. Ak bunka potrebuje niečo, čo malo byť vykonané skôr, tak to vykonané byť # musí. Nie je dôležité v akom poradí sú bunky zapísané, ale v akom poradí sú vykonané!__ # # Keď sa bunka typu `Code` vykoná, objaví sa v hranatých zátvorkách vľavo číslo vstupu. __Ak sa namiesto čísla objavuje hviezdička, kód sa stále vykonáva a musíme čakať.__ # # Ak to trvá dlho, výpočet prerušíme cez menu `Kernel -> Interrupt`, alebo ak to nepomáha, tak `Kernel Restart & Clear Output` (no vtedy stratíme všetky výsledky doterajších výpočtov). To spôsobí tiež, že sa vstupy zasa budú číslovať od 1.
Uvod/Jupyter_Notebook_zaklady.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # Serialization # # ## Overview # # This sample shows how to serialize the pipeline to a string. # # ## Serialization # # In order to use C API or TensorFlow plugin (or just to save the pipeline with a model, so the training process is fully reproducible) we need to serialize the pipeline. # # Let us make a simple pipeline reading from MXNet recordIO format (for example of using other data formats please see other examples in [examples](.) directory. # + from nvidia.dali.pipeline import Pipeline import nvidia.dali.ops as ops import nvidia.dali.types as types import numpy as np import matplotlib.pyplot as plt import os.path test_data_root = os.environ['DALI_EXTRA_PATH'] base = os.path.join(test_data_root, 'db', 'recordio') idx_files = [base + "/train.idx"] rec_files = [base + "/train.rec"] class SerializedPipeline(Pipeline): def __init__(self, batch_size, num_threads, device_id, seed): super(SerializedPipeline, self).__init__(batch_size, num_threads, device_id, seed = seed) self.input = ops.MXNetReader(path = rec_files, index_path = idx_files) self.decode = ops.ImageDecoder(device = "mixed", output_type = types.RGB) self.resize = ops.Resize(device = "gpu", interp_type = types.INTERP_LINEAR) self.cmnp = ops.CropMirrorNormalize(device = "gpu", dtype = types.FLOAT, crop = (224, 224), mean = [0., 0., 0.], std = [1., 1., 1.]) self.res_uniform = ops.Uniform(range = (256.,480.)) def define_graph(self): inputs, labels = self.input(name="Reader") images = self.decode(inputs) images = self.resize(images, resize_shorter = self.res_uniform()) output = self.cmnp(images) return (output, labels) # + batch_size = 16 pipe = SerializedPipeline(batch_size=batch_size, num_threads=2, device_id = 0, seed = 12) # - # We will now serialize this pipeline, using `serialize` function of the `Pipeline` class. s = pipe.serialize() # In order to deserialize our pipeline in Python, we need to create another pipeline, this time using the generic `Pipeline` class. We give the same seed to the new pipeline, in order to compare the results. pipe2 = Pipeline(batch_size = batch_size, num_threads = 2, device_id = 0, seed = 12) # Let us now use the serialized form of `pipe` object to make `pipe2` a copy of it. pipe2.deserialize_and_build(s) # Now we can compare the results of the 2 pipelines - original and deserialized. pipe.build() original_pipe_out = pipe.run() serialized_pipe_out = pipe2.run() def check_difference(batch_1, batch_2): return [np.sum(np.abs(batch_1.at(i) - batch_2.at(i))) for i in range(batch_size)] original_images, _ = original_pipe_out serialized_images, _ = serialized_pipe_out check_difference(original_images.as_cpu(), serialized_images.as_cpu()) # Both pipelines give exactly the same results.
docs/examples/advanced/serialization.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Learning Pandas and Matplotlib # Pandas is pythons library that enables broad possibilities for data analysis. # By using Pandas it is very easy to upload, manage and analyse data from different tables by using SQL-like commands. Moreover, in connection with the libraries Matplotlib and Seaborn, Pandas gives broad opportunities to visualise the data. # + import pandas as pd import numpy as np from matplotlib import pyplot as plt from matplotlib.pyplot import rcParams rcParams['figure.figsize'] = 8, 5 import seaborn as sns # - # We study the main methods of packages Pandas and Matplotlib by working with the dataset that describes the churn rate of the customers of a telecom company. # <b>Exercise 1:</b> # # Read the data from the file 'telecom_churn.csv' and display the first 5 rows by using the method 'head'. <b> Hint :</b> method read_csv may be useful ### Write Your code here ### tel_churn = pd.read_csv('telecom_churn.csv') tel_churn.head() # <b>Exercise 2:</b> # # Display the size of the data array, information about it, and its main statistical characteristics. <b>Hint:</b> use methods shape, info, describe. ### Write Your code here ### tel_churn.info() tel_churn.shape tel_churn.describe() # <b>Exercise 3: # # </b>Convert column 'Churn' to the int64 type. <b> Hint:</b> use method astype. # Plot the distribution of the churn and loyal clients in the bar plot. The figure should look like this: # # <img src="dist_churn.png"> ### Write Your code here ### tel_churn.Churn =tel_churn.Churn.astype(int) sns.countplot(x='Churn',data=tel_churn) # ### Sorting # # <b>Exercise 4:</b> # # Sort the dataframe you have obtained in exercise 3 by the value in 'Total day charge' in descending/ascending order. # Also sort it by using the column 'Churn' as the primary key and 'Total eve calls' as the secondary key. Try different combinations of ordering. <b> Hint:</b> use the method sort_values ### Write Your code here ### tel_churn.sort_values(by=['Total day charge']) #, ascending=False tel_churn.sort_values(by=['Churn','Total eve calls']) # ### Indexing and extracting information from the dataframe # # By using Pandas dataframes we are able to index and extract information from the dataset. You can index the information stored in dataframe either by names or by indices. In the first case you use the command loc, in the second iloc. <b> Hint: </b> Use logical indexing for the columns and the groupby method to solve the tasks. # # <b>Exercise 5:</b> # * Display the mean churn rate of the clients. # * Now we want to analyse statistical information only for the clients, which are or aren't loyal to their telecom-company (field 'Churn' in dataframe). Extract the loyal and non-loyal clients from the table seperately and display the means of their charateristics in a single dataframe. # * How long do the non-loyal users talk during the day (on average)? # * What is the maximum length of the international calls for the loyal users that do not use the international plan? ### Display the mean churn rate of the clients. ### tel_churn.Churn.mean() tel_churn.groupby('Churn').groups # + #Now we want to analyse statistical information only for the clients, #which are or aren't loyal to their telecom-company (field 'Churn' in dataframe). # Extract the loyal and non-loyal clients from the table seperately and display the means of their charateristics in a single dataframe. tel_churn.groupby('Churn').mean() # - #How long do the non-loyal users talk during the day (on average)? #users[(users.sex == 'F') | (users.age < 30)].head(3) non_loyals =tel_churn[(tel_churn.Churn == 1)] #non_loyals.head() non_loyals['Total day minutes'].mean() #What is the maximum length of the international calls for the loyal users that do not use the international plan? tel_churn =tel_churn.rename(columns={"International plan": "int_plan"}) loyals =tel_churn[(tel_churn.Churn == 0) & ((tel_churn.int_plan) == 'NO')] loyals.head() # ### Distribution of the features # # <b>Exercise 6:</b> # # Plot the distribution of the features that have numerical values. <b> Hint: </b>use the method hist which can also be applied from the pandas dataframe. It should look like that: # <img src="hist_feat.png"> # # What do you observe? From which probability distribution could each feature be generated? tel_churn.hist() ### Write Your code here ### # ### Pivot tables and graphics # # We want to see how the instances are distributed between two categories: 'International plan' and 'Churn'. # # <b>Exercise 7:</b> # * Build the cross table between the features using the method <i>crosstab</i>. # * Visualize the distribution for the feature 'Churn', depending on the value of the features 'International plan', 'Voice mail plan', and 'Customer service calls'. # <b>Hint:</b> Use commands plt.sublot and sns.subplots. # # What do you see? What conclusions can be drawn? What feature (intuitively) can be more important for Churn prediction? ### Write Your code here ### pd.crosstab(tel_churn.int_plan, tel_churn.Churn, margins=True) sns.jointplot(x='Churn',y='Customer service calls',data=tel_churn) sns.barplot(x='Churn',y='Customer service calls',data=tel_churn) sns.pairplot(tel_churn, y_vars=['Churn'], x_vars=['Customer service calls', 'Voice mail plan','int_plan']) g = sns.FacetGrid(tel_churn, col="Churn") g.map(plt.scatter, "Customer service calls", "int_plan", alpha=.7) g.add_legend(); # <b>Exercise 8:</b> # # Add a new feature to the dataframe which will describe whether or not the user has done more than 3 service calls. Investigate how this feature describes the churn rate. ### Write Your code here ### data.date.apply(lambda x: datetime.strptime(x, "%Y-%m-%d")) #tel_churn = tel_churn.rename(columns={"Customer service calls":"cscalls"}) tel_churn['threeServCall'] =tel_churn.cscalls.apply(lambda x: 1 if (x > 3) else 0) tel_churn.head() pd.crosstab(tel_churn.threeServCall, tel_churn.Churn, margins=True)
Lab1/.ipynb_checkpoints/LearningPandasandPylab-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: p38 # language: python # name: p38 # --- # # 05.01 - TIME SERIES PREDICTIONS # !wget --no-cache -O init.py -q https://raw.githubusercontent.com/rramosp/ai4eng.v1/main/content/init.py import init; init.init(force_download=False); init.get_weblink() import numpy as np import matplotlib.pyplot as plt import pandas as pd import local.lib.timeseries as ts # %matplotlib inline # ## The data d = pd.read_csv("local/data/eurcop.csv") d.index = pd.to_datetime(d.Date) del(d["Date"]) d.head() d.plot(figsize=(15,3)) d[["Rate"]].plot(figsize=(15,3)) d = d[["Rate"]] d.head(10) # ## A predictive model # # ### First create a time series dataset with look back dt = ts.timeseries_as_many2one(d, columns=["Rate"], nb_timesteps_in=4, timelag=0) dt.head() # ### Split dataset for trian and for test trds = dt[:"2008"] tsds = dt["2009":] print (dt.shape, trds.shape, tsds.shape) plt.figure(figsize=(15,3)) plt.plot(trds.index.values, trds.Rate.values, color="black", lw=2, label="train", alpha=.5) plt.plot(tsds.index.values, tsds.Rate.values, color="red", lw=2, label="test", alpha=.5) plt.grid(); plt.legend(); # ### Create `X` and `y` matrices for train and test Xtr, ytr = trds[[i for i in trds.columns if i!="Rate"]].values, trds.Rate.values Xts, yts = tsds[[i for i in tsds.columns if i!="Rate"]].values, tsds.Rate.values trds[:5] print (Xtr[:10]) print (ytr[:10]) tsds[:5] print (Xts[:10]) print (yts[:20]) # ### convert target into classification task for TREND PREDICTION (1 up, 0 down) yts = (yts>Xts[:,-1]).astype(int) ytr = (ytr>Xtr[:,-1]).astype(int) print (ytr[:20]) print (yts[:20]) # ### inspect target class distributions print ("1's in train %.2f%s"%(np.mean(ytr)*100, "%")) print ("1's in test %.2f%s"%(np.mean(yts)*100, "%")) # ### train a predictive model from sklearn.ensemble import RandomForestClassifier from sklearn.svm import SVC from sklearn.naive_bayes import GaussianNB from sklearn.tree import DecisionTreeClassifier from sklearn.linear_model import LogisticRegression from sklearn.decomposition import PCA from sklearn.pipeline import Pipeline estimator = RandomForestClassifier(n_estimators=5, max_depth=30) #estimator = DecisionTreeClassifier(max_depth=2) #estimator = LogisticRegression() #estimator = Pipeline((("pca", PCA(n_components=2)), ("estimator", estimator))) estimator.fit(Xtr,ytr); # ### get predictive accuracy in train and test print ("train accuracy %.2f"%estimator.score(Xtr,ytr)) print ("test accuracy %.2f"%estimator.score(Xts,yts)) # ### inspect confusion matrix # + from sklearn.metrics import confusion_matrix import seaborn as sns cm = confusion_matrix(yts, estimator.predict(Xts)) sns.heatmap(cm,annot=True,cbar=False, fmt="d") plt.ylabel('True Label') plt.xlabel('Predicted Label') plt.title('Confusion Matrix') # - # ## A strategy # # - if model predicts 1 (price up) we buy 10 EUR today and sell them tomorrow # - if model predicts 0 (price down) we sell 10 EUR today and buy them tomorrow def trade(d, date_close, op, qty): assert op in ["buy", "sell"] assert qty>=0 r = (d.loc[:date_close].iloc[-2].Rate-d.loc[date_close].Rate)*qty if op=="buy": r = -r return r # example: a **buy** operation on 2011-01-03 closed (with a sell operation) on 2011-01-04 trade(tsds, "2011-01-04", "buy", 100) trade(tsds, "2011-01-05", "buy", 100) tsds["2011-01-02":].iloc[:5] yts def compute_pnl(d, y, predictions, qty=10): pnl = [] for date,prediction in zip(d.index[1:], predictions[1:]): pnl.append(trade(d, date, "sell" if prediction==0 else "buy", qty)) pnl = pd.DataFrame(np.r_[[pnl]].T, index=d.index[1:], columns=["pnl"]) pnl["prediction"]=predictions[1:] pnl["y"]=y[1:] return pnl preds = estimator.predict(Xts) pnl = compute_pnl(tsds, yts, preds) pnl.pnl.plot() plt.title("TOTAL PNL %.2f COP"%pnl.pnl.sum()) plt.ylabel("PNL") plt.grid(); plt.ylim(-5000,5000); def plot_pnlhist(pnl_series, label=""): k = pnl_series.values total = np.sum(k); k = k[np.abs(k)<50000] plt.hist(k, bins=30); plt.title("PNL for %s, total %.2f COP"%(label, total)) plt.figure(figsize=(12,8)) plt.subplot(221); plot_pnlhist(pnl[pnl.y==1].pnl, "REAL = 1 (up)"); plt.grid(); plt.subplot(222); plot_pnlhist(pnl[pnl.y==0].pnl, "REAL = 0 (down)"); plt.grid(); plt.subplot(223); plot_pnlhist(pnl[preds[1:]==1].pnl, "PREDS = 1 (up)"); plt.grid(); plt.subplot(224); plot_pnlhist(pnl[preds[1:]==0].pnl, "PREDS = 0 (down)"); plt.grid();
content/NOTES 05.01 - A TIME SERIES PREDICTIVE STRATEGY.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + dc={"key": "3"} deletable=false editable=false run_control={"frozen": true} tags=["context"] # ## 1. The most Nobel of Prizes # <p><img style="float: right;margin:5px 20px 5px 1px; max-width:250px" src="https://s3.amazonaws.com/assets.datacamp.com/production/project_441/img/Nobel_Prize.png"></p> # <p>The Nobel Prize is perhaps the world's most well known scientific award. Except for the honor, prestige and substantial prize money the recipient also gets a gold medal showing <NAME> (1833 - 1896) who established the prize. Every year it's given to scientists and scholars in the categories chemistry, literature, physics, physiology or medicine, economics, and peace. The first Nobel Prize was handed out in 1901, and at that time the Prize was very Eurocentric and male-focused, but nowadays it's not biased in any way whatsoever. Surely. Right?</p> # <p>Well, we're going to find out! The Nobel Foundation has made a dataset available of all prize winners from the start of the prize, in 1901, to 2016. Let's load it in and take a look.</p> # + dc={"key": "3"} tags=["sample_code"] # Loading in required libraries import pandas as pd import seaborn as sns import numpy as np # Reading in the Nobel Prize data nobel = pd.read_csv('datasets/nobel.csv') # Taking a look at the first several winners nobel.head(n=6) # + dc={"key": "10"} deletable=false editable=false run_control={"frozen": true} tags=["context"] # ## 2. So, who gets the Nobel Prize? # <p>Just looking at the first couple of prize winners, or Nobel laureates as they are also called, we already see a celebrity: <NAME>, the guy who discovered X-rays. And actually, we see that all of the winners in 1901 were guys that came from Europe. But that was back in 1901, looking at all winners in the dataset, from 1901 to 2016, which sex and which country is the most commonly represented? </p> # <p>(For <em>country</em>, we will use the <code>birth_country</code> of the winner, as the <code>organization_country</code> is <code>NaN</code> for all shared Nobel Prizes.)</p> # + dc={"key": "10"} tags=["sample_code"] # Display the number of (possibly shared) Nobel Prizes handed # out between 1901 and 2016 nobel['prize_share'].value_counts() display() # Display the number of prizes won by male and female recipients. nobel['sex'].value_counts() display() # Display the number of prizes won by the top 10 nationalities. nobel['birth_country'].value_counts().head(10) # + dc={"key": "17"} deletable=false editable=false run_control={"frozen": true} tags=["context"] # ## 3. USA dominance # <p>Not so surprising perhaps: the most common Nobel laureate between 1901 and 2016 was a man born in the United States of America. But in 1901 all the winners were European. When did the USA start to dominate the Nobel Prize charts?</p> # + dc={"key": "17"} tags=["sample_code"] # Calculating the proportion of USA born winners per decade nobel['usa_born_winner'] = nobel['birth_country']=="United States of America" nobel['decade'] = (np.floor(nobel['year'] / 10) * 10).astype(int) prop_usa_winners = nobel.groupby('decade', as_index=False)['usa_born_winner'].mean() # Display the proportions of USA born winners per decade display() ######df.groupby('COLUMN 1', as_index=False)['COLUMN 2'].mean() # + dc={"key": "24"} deletable=false editable=false run_control={"frozen": true} tags=["context"] # ## 4. USA dominance, visualized # <p>A table is OK, but to <em>see</em> when the USA started to dominate the Nobel charts we need a plot!</p> # + dc={"key": "24"} tags=["sample_code"] # Setting the plotting theme sns.set() # and setting the size of all plots. import matplotlib.pyplot as plt plt.rcParams['figure.figsize'] = [11, 7] # Plotting USA born winners ax = sns.lineplot(x=prop_usa_winners['decade'], y=prop_usa_winners['usa_born_winner']) # Adding %-formatting to the y-axis from matplotlib.ticker import PercentFormatter ax.yaxis.set_major_formatter(PercentFormatter(1.0)) # + dc={"key": "31"} deletable=false editable=false run_control={"frozen": true} tags=["context"] # ## 5. What is the gender of a typical Nobel Prize winner? # <p>So the USA became the dominating winner of the Nobel Prize first in the 1930s and had kept the leading position ever since. But one group that was in the lead from the start, and never seems to let go, are <em>men</em>. Maybe it shouldn't come as a shock that there is some imbalance between how many male and female prize winners there are, but how significant is this imbalance? And is it better or worse within specific prize categories like physics, medicine, literature, etc.?</p> # + dc={"key": "31"} tags=["sample_code"] # Calculating the proportion of female laureates per decade nobel['female_winner'] = nobel['sex']=='Female' prop_female_winners = nobel.groupby(['decade','category'], as_index=False)['female_winner'].mean() # Plotting USA born winners with % winners on the y-axis ax = sns.lineplot(x='decade', y='female_winner', hue='category', data=prop_female_winners) ax.yaxis.set_major_formatter(PercentFormatter(1.0)) # + dc={"key": "38"} deletable=false editable=false run_control={"frozen": true} tags=["context"] # ## 6. The first woman to win the Nobel Prize # <p>The plot above is a bit messy as the lines are overplotting. But it does show some interesting trends and patterns. Overall the imbalance is pretty large with physics, economics, and chemistry having the largest imbalance. Medicine has a somewhat positive trend, and since the 1990s the literature prize is also now more balanced. The big outlier is the peace prize during the 2010s, but keep in mind that this just covers the years 2010 to 2016.</p> # <p>Given this imbalance, who was the first woman to receive a Nobel Prize? And in what category?</p> # + dc={"key": "38"} tags=["sample_code"] # Picking out the first woman to win a Nobel Prize ##To get the year from a datetime column you need to use access the dt.year value. #Here is an example: # a_data_frame['a_datatime_column'].dt.year # nobel['Female'] nobel.nsmallest(1, 'year') # + dc={"key": "45"} deletable=false editable=false run_control={"frozen": true} tags=["context"] # ## 7. Repeat laureates # <p>For most scientists/writers/activists a Nobel Prize would be the crowning achievement of a long career. But for some people, one is just not enough, and few have gotten it more than once. Who are these lucky few? (Having won no Nobel Prize myself, I'll assume it's just about luck.)</p> # + dc={"key": "45"} tags=["sample_code"] # Selecting the laureates that have received 2 or more prizes. nobel.groupby('full_name').filter(lambda group: len(group) >= 2) # + dc={"key": "52"} deletable=false editable=false run_control={"frozen": true} tags=["context"] # ## 8. How old are you when you get the prize? # <p>The list of repeat winners contains some illustrious names! We again meet <NAME>, who got the prize in physics for discovering radiation and in chemistry for isolating radium and polonium. <NAME> got it twice in physics for transistors and superconductivity, <NAME> got it twice in chemistry, and <NAME> got it first in chemistry and later in peace for his work in promoting nuclear disarmament. We also learn that organizations also get the prize as both the Red Cross and the UNHCR have gotten it twice.</p> # <p>But how old are you generally when you get the prize?</p> # + dc={"key": "52"} tags=["sample_code"] # Converting birth_date from String to datetime nobel['birth_date'] = pd.to_datetime(nobel['birth_date']) # Calculating the age of Nobel Prize winners nobel['age'] = nobel['year'] - nobel['birth_date'].dt.year # Plotting the age of Nobel Prize winners sns.lmplot(x='year', y='age', data=nobel, lowess=True, aspect=2, line_kws={'color' : 'black'}) # + dc={"key": "59"} deletable=false editable=false run_control={"frozen": true} tags=["context"] # ## 9. Age differences between prize categories # <p>The plot above shows us a lot! We see that people use to be around 55 when they received the price, but nowadays the average is closer to 65. But there is a large spread in the laureates' ages, and while most are 50+, some are very young.</p> # <p>We also see that the density of points is much high nowadays than in the early 1900s -- nowadays many more of the prizes are shared, and so there are many more winners. We also see that there was a disruption in awarded prizes around the Second World War (1939 - 1945). </p> # <p>Let's look at age trends within different prize categories.</p> # + dc={"key": "59"} tags=["sample_code"] # Same plot as above, but separate plots for each type of Nobel Prize sns.lmplot(x='year', y='age', data=nobel, row='category' ,lowess=True, aspect=2, line_kws={'color' : 'black'}) # + dc={"key": "66"} deletable=false editable=false run_control={"frozen": true} tags=["context"] # ## 10. Oldest and youngest winners # <p>More plots with lots of exciting stuff going on! We see that both winners of the chemistry, medicine, and physics prize have gotten older over time. The trend is strongest for physics: the average age used to be below 50, and now it's almost 70. Literature and economics are more stable. We also see that economics is a newer category. But peace shows an opposite trend where winners are getting younger! </p> # <p>In the peace category we also a winner around 2010 that seems exceptionally young. This begs the questions, who are the oldest and youngest people ever to have won a Nobel Prize?</p> # + dc={"key": "66"} tags=["sample_code"] # The oldest winner of a Nobel Prize as of 2016 display(nobel.nlargest(1, 'age')) # The youngest winner of a Nobel Prize as of 2016 display(nobel.nsmallest(1, 'age')) # + dc={"key": "73"} deletable=false editable=false run_control={"frozen": true} tags=["context"] # ## 11. You get a prize! # <p><img style="float: right;margin:20px 20px 20px 20px; max-width:200px" src="https://s3.amazonaws.com/assets.datacamp.com/production/project_441/img/paint_nobel_prize.png"></p> # <p>Hey! You get a prize for making it to the very end of this notebook! It might not be a Nobel Prize, but I made it myself in paint so it should count for something. But don't despair, <NAME> was 90 years old when he got his prize, so it might not be too late for you. Who knows.</p> # <p>Before you leave, what was again the name of the youngest winner ever who in 2014 got the prize for "[her] struggle against the suppression of children and young people and for the right of all children to education"?</p> # + dc={"key": "73"} tags=["sample_code"] # The name of the youngest winner of the Nobel Prize as of 2016 youngest_winner = '<NAME>'
A Visual History of Nobel Prize Winners/notebook.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Welcome To Program Can Calculate Circumference Of The Rectangle # ### Welcome message to the user print('This is a Calculater of circumference of the Rectangle') #take the user name name=str(input('Enter Your Name : ')) print('Welcome '+ name) #Take from user the width and height Of the Rectangle width= float(input('Plese Enter The width Of the Rectangle : ')) height= float(input('Plese Enter The height Of the Rectangle : ')) # The Low of the Circumaferance of a Rectangle Z=2*(width+height) # # Output print('The Circumferance of this Rectangle is : ') print(Z) # # Great Work!
Circumference Of The Rectangle.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .r # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: R # language: R # name: ir # --- # # Import Libraries library("tidyverse") library(Metrics) library(mctest) # # Import Files test = read_csv("data/test.csv") train = read_csv("data/train.csv") sample = read_csv("data/sample_submission.csv") # # Preprocessing head(train) train$Date <- as.Date(train$Date, "%d-%b-%y") train$Day <- format(train$Date, "%d") train$Month <- format(train$Date, "%m") train$Year <- format(train$Date, "%Y") train$fStore <- as.numeric(factor(train$Store)) summary(train) # # Trial 1 trial1 <- lm(Sales ~ ., data = train) summary(trial1) # + ## 75% of the sample size smp_size <- floor(0.75 * nrow(train)) ## set the seed to make your partition reproducible set.seed(123) train_ind <- sample(seq_len(nrow(train)), size = smp_size) train1z <- train[train_ind, ] test1 <- train[-train_ind, ] forecast1 <- predict(trial1, test1) smape(test1$Sales, forecast1) # - head(forecast1) head(test1) predict(trial1, slice(train, 1:10)) slice(test1[c(2,3,5,6,7)], 1:10) # # Trial 2 # # remove some outliers train2 <- train boxplot(train2$Sales) outliers <- boxplot.stats(train$Sales)$out train2 <- train[-which(train$Sales %in% outliers),] outliers <- boxplot.stats(train2$Sales)$out train2 <- train2[-which(train2$Sales %in% outliers),] boxplot(train2$Sales) # + ## 75% of the sample size smp_size <- floor(0.75 * nrow(train2)) ## set the seed to make your partition reproducible set.seed(123) train_ind <- sample(seq_len(nrow(train2)), size = smp_size) train2z <- train2[train_ind, ] test2 <- train2[-train_ind, ] # - trial2 <- lm(Sales ~ Store + Item + Month + Year, data = train2z) summary(trial2) forecast2 <- predict(trial2, test2) smape(test2$Sales, forecast2) # # Trial 3 # # Normality hist(log(train2$Sales)^3) train3 <- train2 train3$Sales <- log(train3$Sales) trial3 <- lm(Sales ~ Store + Item + Month + Year, data = train2) summary(trial3) # + ## 75% of the sample size smp_size <- floor(0.75 * nrow(train3)) ## set the seed to make your partition reproducible set.seed(123) train_ind <- sample(seq_len(nrow(train3)), size = smp_size) train3z <- train3[train_ind, ] test3 <- train3[-train_ind, ] trial3 <- lm(Sales ~ Store + Item + Month + Year, data = train2) forecast3 <- predict(trial3, test3[c(2,3,6,7)]) smape(test3$Sales, forecast3) # - head(test3) # # Trial 4 # # change all to factor ten numeric train4 <- train2 train4$Year <- factor(train4$Year) train4$Item <- factor(train4$Item) train4$Day <- factor(train4$Day) train4$Month <- factor(train4$Month) train4$Store <- factor(train4$Store) train4 <- train4[c(2,3,4,5,6,7)] head(train4) null <- lm(Sales ~ 1, data = train4) trial4 <- lm(Sales ~ ., data = train4) summary(trial4) BIC(trial4) step(trial4, scope = list(lower=null,upper=trial4), direction="both", criterion = "BIC") # + ## 75% of the sample size smp_size <- floor(0.75 * nrow(train4)) ## set the seed to make your partition reproducible set.seed(123) train_ind <- sample(seq_len(nrow(train4)), size = smp_size) train4z <- train4[train_ind, ] test4 <- train4[-train_ind, ] trial4x <- lm(Sales ~ Store + Item + Day + Month + Year, data = train4z) forecast4 <- predict(trial4x, test4) smape(test4$Sales, forecast4) # - head(forecast4) # # Trial 5 # # Try to log Sales again train5 <- train4 train5$Sales <- log(train5$Sales)^3 # + ## 75% of the sample size smp_size <- floor(0.75 * nrow(train5)) ## set the seed to make your partition reproducible set.seed(123) train_ind <- sample(seq_len(nrow(train5)), size = smp_size) train5z <- train5[train_ind, ] test5 <- train5[-train_ind, ] a_sale <- train4[-train_ind, ] trial5x <- lm(Sales ~ Store + Item + Day + Month + Year, data = train5z) forecast5 <- predict(trial5x, test5) smape(test5$Sales, forecast5) # - # Clearly doesn't work # # Trial 6 # # Trying steps from https://www.scribbr.com/statistics/linear-regression-in-r/ par(mfrow=c(2,2)) plot(trial4) par(mfrow=c(1,1)) # Rounding Data # + ## 75% of the sample size smp_size <- floor(0.75 * nrow(train4)) ## set the seed to make your partition reproducible set.seed(123) train_ind <- sample(seq_len(nrow(train4)), size = smp_size) train6z <- train4[train_ind, ] test6 <- train4[-train_ind, ] trial6x <- lm(Sales ~ Store + Item + Day + Month + Year, data = train6z) forecast6 <- predict(trial6x, test6) smape(test6$Sales, round(forecast6,2)) # -
sales-forecast-r.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import numpy as np import tensorflow as tf import warnings warnings.filterwarnings('ignore') # ignore DeprecationWarnings from tensorflow import matplotlib.pyplot as plt # %matplotlib inline import gpflow from gpflow.utilities import print_summary, set_trainable from gpflow.ci_utils import ci_niter from multiclass_classification import plot_posterior_predictions, colors np.random.seed(0) # reproducibility # + # Number of functions and number of data points C = 3 N = 100 # RBF kernel lengthscale lengthscale = 0.1 # Jitter jitter_eye = np.eye(N) * 1e-6 # Input X = np.random.rand(N, 1) # + # SquaredExponential kernel matrix kernel_se = gpflow.kernels.SquaredExponential(lengthscale=lengthscale) K = kernel_se(X) + jitter_eye # Latents prior sample f = np.random.multivariate_normal(mean=np.zeros(N), cov=K, size=(C)).T # Hard max observation Y = np.argmax(f, 1).reshape(-1,).astype(int) # One-hot encoding Y_hot = np.zeros((N, C), dtype=bool) Y_hot[np.arange(N), Y] = 1 data = (X, Y) # + x = np.zeros((X.size)) for i, element in enumerate(X): x[i] = element data = (x,Y) print(data) # + plt.figure(figsize=(12, 6)) order = np.argsort(X.reshape(-1,)) for c in range(C): plt.plot(X[order], f[order, c], '.', color=colors[c], label=str(c)) plt.plot(X[order], Y_hot[order, c], '-', color=colors[c]) plt.legend() plt.xlabel('$X$') plt.ylabel('Latent (dots) and one-hot labels (lines)') plt.title('Sample from the joint $p(Y, \mathbf{f})$') plt.grid() plt.show() # + # sum kernel: Matern32 + White kernel = gpflow.kernels.Matern32() + gpflow.kernels.White(variance=0.01) # Robustmax Multiclass Likelihood invlink = gpflow.likelihoods.RobustMax(C) # Robustmax inverse link function likelihood = gpflow.likelihoods.MultiClass(3, invlink=invlink) # Multiclass likelihood Z = X[::5].copy() # inducing inputs m = gpflow.models.SVGP(kernel=kernel, likelihood=likelihood, inducing_variable=Z, num_latent=C, whiten=True, q_diag=True) # Only train the variational parameters set_trainable(m.kernel.kernels[1].variance, False) set_trainable(m.inducing_variable, False) print_summary(m, fmt='notebook') # + opt = gpflow.optimizers.Scipy() @tf.function(autograph=False) def objective_closure(): return - m.log_marginal_likelihood(data) opt_logs = opt.minimize(objective_closure, m.trainable_variables, options=dict(maxiter=ci_niter(1000))) print_summary(m, fmt='notebook') # -
pml_project1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.8.10 64-bit (''.venv'': poetry)' # name: python3 # --- # # Регрессия 2 - новые приключения # > 🚀 В этой практике нам понадобятся: `numpy==1.21.2, pandas==1.3.3, matplotlib==3.4.3, scikit-learn==0.24.2` # # > 🚀 Установить вы их можете с помощью команды: `!pip install numpy==1.21.2, pandas==1.3.3, matplotlib==3.4.3, scikit-learn==0.24.2` # # # Содержание <a name="content"></a> # # * [Полиномиальные признаки](#Polinomial_nye_priznaki) # * [Недообучение (high bias)](#Nedoobuchenie_(high_bias)) # * [Переобучение (high variance)](#Pereobuchenie_(high_variance)) # * [Из чего состоят данные?](#Iz_chego_sostojat_dannye?) # * [Обратно к проблемам...](#Obratno_k_problemam...) # * [Train и test выборки](#Train_i_test_vyborki) # * [А что, если дать больше данных?](#A_chto,_esli_dat__bol_she_dannyh?) # * [Отношение между сложностью модели и её ошибками (High bias и High Variance)](#Otnoshenie_mezhdu_slozhnost_ju_modeli_i_ee_oshibkami_(High_bias_i_High_Variance)) # * [Заключение](#Zakljuchenie) # * [Вопросы для закрепления](#Voprosy_dlja_zakreplenija) # * [Полезные ссылки](#Poleznye_ssylki) # # + _cell_id="KasaoSe1Nyez9gLg" # Настройки для визуализации # Если используется тёмная тема - лучше текст сделать белым import matplotlib import numpy as np import pandas as pd import matplotlib.pyplot as plt import random TEXT_COLOR = 'black' matplotlib.rcParams['figure.figsize'] = (15, 10) matplotlib.rcParams['text.color'] = TEXT_COLOR matplotlib.rcParams['font.size'] = 14 matplotlib.rcParams['lines.markersize'] = 15 matplotlib.rcParams['axes.labelcolor'] = TEXT_COLOR matplotlib.rcParams['xtick.color'] = TEXT_COLOR matplotlib.rcParams['ytick.color'] = TEXT_COLOR # Зафиксируем состояние случайных чисел RANDOM_SEED = 42 np.random.seed(RANDOM_SEED) random.seed(RANDOM_SEED) # - # Если вы думали, что на этом история про регрессию закончилась, то вы сильно ошибаетесь... # # В этом выпуске вы узнаете про магию, обман и другие интересные явления! # # И начнём мы с магии! Прошу на борт! # ## Полиномиальные признаки <a name="poly"></a> # Помните, что любая работа с моделями машинного обучения делается на основе данных? # # Так вот, всегда начинаем с анализа данных! # # Давайте посмотрим на те данные, которые мы здесь имеем. # + _cell_id="hPyAAX7lbEJywO6O" rng = np.random.default_rng(RANDOM_SEED) # Сгенерируем данные n_points = 100 _data_shift = 4 _x_data = 4*rng.random(n_points)+1 _x_render = (_x_data-5) _y_data = _data_shift + 1 *_x_render**2 + rng.random(n_points)*4 df_data = pd.DataFrame({'crime_level': _x_data, 'house_price': _y_data}) plt.scatter(_x_data, _y_data) plt.xlabel('Уровень преступности') plt.ylabel('Цена дома, млн руб') plt.grid() # + _cell_id="sCbvx4ancC9VifsH" # Также, посмотрим на данные в виде таблицы df_data.head(5) # + _cell_id="p88XBJv0FHz7BdKj" # И размер df_data.shape # - # > ⚠️ Обратите внимание, что даже 100 записей в данных уже усложняет анализ в виде таблицы, при этом визуальное представление всё ещё сохраняет возможность проанализировать данные. # # Перво-наперво, мы видим явную зависимость между уровнем преступности и ценами на дома. Это можно использовать и разработать систему предсказания цены дома на основе уровня преступности! # # Давайте посмотрим и обратим внимание, что зависимость цены дома от уровня преступности имеет явно нелинейный характер - такую зависимость будет невозможно описать линейной моделью. Точнее, описать можно, но точность будет невысокая. # # Что в таком случае делать? Давайте вспомним, какие ещё есть варианты аналитических описаний? # # <p align="center"><img src="https://raw.githubusercontent.com/kail4ek/ml_edu/master/assets/think_about_it_image.png" width=600></p> # Смотрите, да это же часть параболы! # # <p align="center"><img src="https://raw.githubusercontent.com/kail4ek/ml_edu/master/assets/parabola.png" width=600></p> # Общее уравнение параболы выглядит так: $$y = a*x^2+b*x+c$$ # # А теперь напишем уравнение линейной регрессии для двух переменных: $$y = w_1*x_1+w_2*x_2+w_0$$ # # Видите связь? По сути, мы можем взять и выразить $x$ как первый признак, $x^2$ как второй признак и таким образом с помощью линейной регрессии решить задачу полиномиальной регрессии! # # Если проще - в данных есть $y$ и $x$, но зависимость между ними явно напоминает полиномиальную. Значит немного модифицируем данные, чтобы решить эту сложнейшую задачу! # Так, а как нужно модифицировать данные? # # Всё, что нам нужно сделать - это сделать две переменные $x_1$, которая является самой переменной $x$, и $x_2$, которая является квадратом от оригинальной. # # Давайте сделаем это в виде функции предобработки данных! # + _cell_id="uVZfJLqCe30PExMN" # TODO - реализуйте функцию предобработки данных c генерацией полиномиальных признаков # NOTE - функция preprocess_data() должна принимать DataFrame с данными на вход # NOTE - предобработка должна генерировать колонку crime_level_poly в датафрейме # NOTE - на выход функция возвращает исходный DataFrame с добавленной новой колонкой def preprocess_data(df): # x1=df['crime_level'] x2=np.power(df['crime_level'],2) df['crime_level_poly']=x2 return df preprocess_data(df_data) # + _cell_id="rz5qw8w5luEHjej0" # TEST - проверимс ваше решение _test_df_preproc = preprocess_data(df_data.copy()) assert 'crime_level_poly' in _test_df_preproc np.testing.assert_equal(len(_test_df_preproc.columns), 3) np.testing.assert_array_almost_equal( _test_df_preproc['crime_level_poly'], _test_df_preproc['crime_level']**2 ) print("Well done!") # - # Отлично! А теперь задачка на расширение знаний и умений! # # Она не обязательна, но желательна! # # Напишите функцию для той же самой генерации фич, но уже с помощью класса из sklearn. Проанализируйте доки класса [PolynomialFeatures](https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.PolynomialFeatures.html) и напишите с помощью него функцию генерации полиномиальных признаков: # # > ⚠️ Это класс трансформера, мы уже сталкивались с моделью, у неё были методы `fit()` и `predict()`. Трансформеры данных преобразуют данные. Они имеют методы `fit()` и `transform()`. # # > ⚠️ Обратите внимание, что на вход `transform()` метода должна подаваться матрица признаков. Несмотря на то, что у нас всего вектор признаков, его форму можно поменять с помощью `reshape()`. # + _cell_id="YMV7J4iaSX9cgYUf" # TODO - реализуйте аналогичную функцию, которая делает то же самое, но с использованием класса PolynomialFeatures # NOTE - название функции preprocess_data_sklearn # NOTE - проверка корректности на вашей совести - обязательно проверьте, правильно ли ваша функция предобрабатывает данные from sklearn.preprocessing import PolynomialFeatures def preprocess_data_sklearn(df): df=df['crime_level'].values df=df.reshape(-1,1) poly = PolynomialFeatures(2) df['crime_level_poly']=poly.fit_transform(df)[:,2] return df preprocess_data(df_data) # - # > 🔥 Думаю, вы заметили, что ручная реализация в данном случае была намного проще. Такие ситуации нередки, поэтому стоит задумываться о том, когда удобно применять готовый функционал, а когда он быстрее и проще реализуется с помощью базовых вызовов. # # Превосходно! Мы смогли сгенерировать полиномиальные признаки и, похоже, готовы к обучению модели! # # Давайте перейдём к самому вкусному! # + _cell_id="YRB1FKttfAfEjmvh" from sklearn.linear_model import LinearRegression df_train_data = df_data.copy() df_train_data = preprocess_data(df_train_data) reg_model = LinearRegression() # Достаем из данных целевую колонку - там остаются только переменные-признаки y_true = df_train_data.pop('house_price') # Если в DataFrame остались только признаки, то можно подавать весь фрейм на вход методам классов sklearn # Так как фрейм - это матрица, то ничего конвертировать не надо reg_model.fit(df_train_data, y_true) y_pred = reg_model.predict(df_train_data) # - # > 🔥 Зачем мы копируем данные в начале ячейки кода? Это сделано специально, чтобы исходные данные не были модифицированы. Другая полезная практика - никогда не вызывать ячейки ноутбука в нелинейном порядке. Как правило, второму нужно учиться и привыкать, когда первое помогает от случайных удалений данных из фреймов сразу. # # > 🔥 Есть другая особенность, ноутбуки не удаляют объекты после выполнения ячеек. Таким образом, слишком много копирований может быстро съесть оперативную память, если вы работаете с большими данными (больше 1 ГБ на датафрейм). Обращайте на это внимание! # # Обучение прошло, но не забываем, что анализ работы модели - намного более важный процесс, чем само обучение модели! # # Для начала визуализируем нашу модель на графике, чтобы посмотреть, как она пересекается с точками: # + _cell_id="OkmUjnHLqKoW17VW" plt.scatter(x='crime_level', y='house_price', data=df_data, s=15, label='Данные') plt.scatter(df_data['crime_level'], y_pred, c='r', s=15, label='Предсказания') plt.xlabel('Уровень преступности') plt.ylabel('Цена дома, млн руб') plt.grid() plt.legend() plt.show() # - # По графику видно, что модель проходит через точки, что делает первую проверку пройденной, модель действительно восстановила зависимость данных. # # Это означает, что мы научились применять специальную предобработку данных, когда в данных видна явная полиномиальная зависимость для реализации полиномиальной регресии. Шикарно! # ## Недообучение (high bias) <a name="underfit"></a> # В ходе работы с данными и обучением моделей вы столкнетесь с разными интересными эффектами. Сейчас наш арсенал состоит только из линейной и полиномиальной регресий (вторая - частный случай первой), но тем не менее мы уже можем рассмотреть интересный эффект, который возникает при обучении. # # Давайте вспомним, что линейная регрессия может описывать зависимости вида прямой линии под разными углами наклона. # # Полиномиальная второго порядка (степень второго порядка) может описывать параболические зависимости. # # Третий порядок сможет описать более сложные и гибкие (хитрые) зависимости в данных. # # То есть, степень полинома можно рассматривать как сложность модели - чем больше степень, тем более сложные зависимости можно описать. # # > ⚠️ То есть в задаче регрессии степень полиномиальных признаков = сложность модели. # # В ходе работы с одним признаком мы смогли нарисовать график, который визуализирует зависимость целевой переменной от признака (фичи), но в работе могут быть данные, в которых есть множество признаков! Визуализировать всё сразу - не получится. # # > ⚠️ Тем не менее, полезно визуализировать графики в осях "целевая переменная" - "признак" для оценки влияния признака на целевую переменную. # Давайте посмотрим на другие данных, в которых выражена сильная нелинейность: # + _cell_id="wm7TPGKE70LxbXKr" rng = np.random.default_rng(RANDOM_SEED) _x_data = 3*rng.random(100) _y_data = 7 + 4*_x_data + -2*_x_data**2 + 2*(rng.random(_x_data.shape[0])-0.5) df_data = pd.DataFrame({'hours': _x_data, 'productivity': _y_data}) plt.scatter(x='hours', y='productivity', data=df_data, s=20) plt.xlabel('Часы тренировки') plt.ylabel('Продуктивность спортсменов') plt.grid() # + _cell_id="Crh5q9Pa6K9vlA4W" df_data.head() # + _cell_id="fgYqP74PKjEczaxv" df_data.shape # - # Данные представляют собой зависимость продуктивности спортсменов в разрезе часов тренировки. # # Видна сильная нелинейная зависимость, но при этом давайте попробуем обучить явно "слабую" модель, которая постарается описать это линейной зависимость. # + _cell_id="eD1SD2QqSCaPKZE3" df_train_data = df_data.copy() reg_model = LinearRegression() y_true = df_train_data.pop('productivity') reg_model.fit(df_train_data, y_true) y_pred = reg_model.predict(df_train_data) plt.scatter(x='hours', y='productivity', data=df_data, s=15, label='Данные') plt.scatter(df_data['hours'], y_pred, c='r', s=15, label='Предсказания') plt.xlabel('Часы тренировки') plt.ylabel('Продуктивность спортсменов') plt.grid() plt.legend() plt.show() # - # На графике видно, что модель в виде прямой линии проходит через точки, то есть обучение прошло и модель даже описывает базовую тенденцию, но тип зависимости не тот, который есть в данных. Особенно выделяется часть с часами до 1.0, так как на том участке зависимость росла. # Тогда нам нужно подкрепить нашу визуальную оценку количественными показателями, но как это сделать? # # Мы познакомились с MAE, который хорош, если мы оцениваем несколько вариантов моделей, так как единичное измерение трудно интерпретировать. Мы можем явно взять полиномиальную модель и на ней видеть, что MAE возросло. Что же делать...? # # > ⚠️ Важно обратить внимание, что хоть мы сейчас и рисуем графики, при наличии множества признаков невозможно (без хитрого инструментария) визуализировать предсказания в плоскости для сравнения с данными. # # Вот бы был такой показатель, который отражает "хорошесть" обученой модели без необходимости сравнения... # В задаче регресии есть метрика под названием $R^2$ (r-squared) - коэффициент детерминации, который имеет две особенности: # - Имеет верхний предел 1.0, что позволяет судить об оценке без сравнений # - Хоть и не имеет нижнего предела, но значения менее 0 говорят о том, что модель описывает данные хуже, чем если просто взять среднее от данных и всегда его предсказывать. # # Вроде звучит вкусно! # # --- # # Если кому интересно, можете пройтись по математике и выполнить задачку # # Вычисляется оценка с помощью двух составляющих: # # - Сумма квадратов отклонений данных (total sum of squares) # $$ # SS_{tot}=\sum_{i}(y^{(i)}-\bar{y})^2 # $$ # # - Сумма отклонений предсказаний (sum of squares of residuals) # $$ # SS_{res}=\sum_{i}(y^{(i)}-h_W^{(i)}(x^{(i)}))^2 # $$ # # где $\bar{y}=\frac{1}{n}\sum_{i}y^{(i)}$ # # Сама оценка рассчитывается следующим образом: # $$ # R^2=1-\frac{SS_{res}}{SS_{tot}} # $$ # # Доки функции [r2_score](https://scikit-learn.org/stable/modules/generated/sklearn.metrics.r2_score.html) # + _cell_id="Vp5JJvueDQI9ph6u" # TODO - реализуйте код расчета показателя и сравните с функцией r2_score def r2_score_manual(y_true, y_pred): ss_tot=sum( (y_true - y_true.mean(0))**2) ss_res=sum( (y_true-y_pred)**2) r2_value_manual= 1-ss_res/ss_tot return r2_value_manual r2_score_manual(y_true,y_pred) # - # Если вы выполнили задачу, то вы уже увидели значение коэффициента детерминации. # # Если нет, то давайте посмотрим на него с другой стороны. # # Класс `LinearRegression` имеет метод `.score()`, которому можно передать данные для оценки и по-умолчанию он вернёт значение R2: # + _cell_id="eB8N51XyJ4Xmxbfl" r2_value = reg_model.score(df_train_data, y_true) r2_value # - # Ого, но что это значит? =) # # * Показатель R2 выше нуля говорит о том, что модель работает лучше, чем если бы мы взяли среднее от целевой переменной и его постоянно предсказывали (константная модель). # # * Значение близкое к 0.5 является достаточно низким # # * Показатели на уровне 0.8-0.9 уже являются более приемлимыми, но это не говорит о том, что на уровне 0.55 модель никуда не годится. # # > ⚠️ Даже у тех метрик, у которых определены пределы, никогда нельзя закладываться на конкретные ожидаемые числа. Чем больше/меньше (в зависимости от показателя), тем лучше, но многое зависит от данных. Часто бывает так, что даже самая сложная модель на очень плохих данных даёт низкие показатели. # # Так мы выявили численно без необходимости сравнения, что модель плохо описывает данные. График ранее подтверждает это. В этом и проявляется эффект **недообучения (high bias, underfit)**. # # > 🤓 Недообучение (underfit, high bias) - эффект, при котором модель не может восстановить зависимость. Как правило возникает из-за того, что модель недостаточно сложная или данные имеют неадекватно сложные зависимости внутри (низкое качество в данных, много ошибок в разметке). # # Таким образом, по показателю R2 и, если возможно, визуализации предсказаний можно понять, что модель имеет недостаточную сложность и нужно сменить тип модели или сделать её сложнее (в случае линейной регресии добавить полиномиальных признаков). # # Отличный результат! Вот ещё один инструмент анализа в копилку! # ## Переобучение (high variance) <a name="overfit"></a> # На этом наше приключение в лесу забавных и не очень эффектов обучения моделей не заканчивается! # # Если до этого **недообучение**, которое не так сложно поддается анализу, является цветочками :hibiscus:, то сейчас мы углубимся в один из наиболее неприятных и опасных эффектов в обучении - **переобучение**. # # А ещё мы поймём, почему до этого мы жутко читерили и так никогда делать больше не будем! # # Начнем, как всегда, с набора данных: # + _cell_id="52eTIilRIxuAYrI9" rng = np.random.default_rng(RANDOM_SEED) _x_data = 3*rng.random(20) _y_data = 7 + 4*_x_data + -2*_x_data**2 + 2*(rng.random(_x_data.shape[0])-0.5) df_data = pd.DataFrame({'hours': _x_data, 'productivity': _y_data}) plt.scatter(x='hours', y='productivity', data=df_data, s=20) plt.xlabel('Часы тренировки') plt.ylabel('Продуктивность спортсменов') plt.grid() # + _cell_id="tBQlUpP29laQrZw9" df_data.shape # + _cell_id="z5IIJGZGLBpcyJE3" df_data.head(5) # - # Воспользуемся теми же самыми данными, но уменьшим выборку до 20 примеров. # # Помимо этого, давайте представим, что мы поняли, что линейная модель здесь на работает и мы решили бахнуть 15-ю степень! # # Давайте напишем функцию предобработки: # + _cell_id="HpSITi8UZf4OnKLL" # TODO - реализуйте функцию предобработки данных c генерацией полиномиальных признаков # NOTE - функция preprocess_data_high_poly() должна принимать DataFrame с данными на вход и степень, до которой надо возвести # NOTE - предобработка должна генерировать колонку hours_poly_n в датафрейме, где n - степень полинома # NOTE - на выходе должен быть исходный DataFrame с новой заполненой колонкой hours_poly_n def preprocess_data_high_poly(df_data,pow): for i in range(pow): df_data[f'hours_poly_{i}']=df_data['hours']**i return df_data # + _cell_id="2YMtvjwCmtg3vxDy" # TEST - проверимс ваше решение _test_df_preproc = preprocess_data_high_poly(df_data.copy(), 10) assert 'hours_poly_5' in _test_df_preproc np.testing.assert_almost_equal( len(_test_df_preproc.columns), 12 ) np.testing.assert_almost_equal( np.sum(_test_df_preproc['hours_poly_2']), 73.11673581101243 ) np.testing.assert_almost_equal( np.mean(_test_df_preproc['hours_poly_3']), 8.409648291371747 ) print("Well done!") # - # А теперь обучим модель и посмотрим на график зависимости, которую мы попытались восстановить: # + _cell_id="7AYd3EZwkL2tOQ9s" df_train_data = df_data.copy() reg_model = LinearRegression() df_train_data = preprocess_data_high_poly(df_train_data, 15) y_true = df_train_data.pop('productivity') reg_model.fit(df_train_data, y_true) y_pred = reg_model.predict(df_train_data) x_render = np.linspace(0, 3, 1000) df_check_data = pd.DataFrame({'hours': x_render}) df_check_data = preprocess_data_high_poly(df_check_data, 15) y_pred_render = reg_model.predict(df_check_data) plt.scatter(x_render, y_pred_render, c='r', s=15, label='Предсказания') plt.scatter(x='hours', y='productivity', data=df_data, marker='x', s=45, label='Данные') plt.xlabel('Часы тренировки') plt.ylabel('Продуктивность спортсменов') plt.ylim([-10, 20]) plt.grid() plt.legend() plt.show() # - # Что же это за чудовище?? Что случилось? Где-то ошибка? # # Не пугаемся, просто случилось то, чего боятся все, кто работает в области машинного обучения - ОВЕРФИТ (Overfit)! # # Что это значит? Да именно то, что модель не выявила зависимость. Она в данных намного проще, чем модель построила! # # В данном случае модель перешла за границу построения общей зависимости и начала реагировать на **шум в данных** и подстраиваться под него! # # Шум? Какой шум? Что ещё за шум?? # # Давайте немного отклонимся и поговорим из чего состоят все данные! # ## Из чего состоят данные? <a name="data"></a> # Я вам открою маленьку тайну, эти данные сгенерированы, только тссс.. :bowtie: # # Так вот, давайте посмотрим на реальную зависимость, по которой эти данные построены: # + _cell_id="mUvCPYJTu5S0gzjf" rng = np.random.default_rng(RANDOM_SEED) _x_sample_data = 3*rng.random(20) _y_sample_data = 7 + 4*_x_sample_data + -2*_x_sample_data**2 + 2*(rng.random(_x_sample_data.shape[0])-0.5) _x_line_data = np.linspace(0, 3, 1000) _y_line_data = 7 + 4*_x_line_data + -2*_x_line_data**2 plt.scatter(_x_sample_data, _y_sample_data, s=20, label='Реальные данные') plt.scatter(_x_line_data, _y_line_data, s=15, label='Зависимость в данных') plt.xlabel('Часы тренировки') plt.ylabel('Продуктивность спортсменов') plt.legend() plt.grid() # - # Смотрите, на графике линией отражена зависимость без примесей, т.е. истинная зависимость. Это часть параболы, то есть полином второй степени. Но данные (точки) не лежат на самой линии. Почему? # # Да всё потому что, любые данные состоят из двух основных составляющих: $$данные = шум + зависимости$$ # # То есть, шум есть в любых данных! Он может варьироваться по величине, характеру, но главное, что это то, что отвлекает нас от главного в данных - зависимостей. которые мы пытаемся понять, достать и использовать! # # Откуда он берётся? # # Всё просто, если данные - это замеры времени круга бегуна, то это могут быть неточности измерения человеком на секундомере. Если это данные по скорости движения автомобиля на трассе, то датчик может иметь погрешность. Причин может быть много, но главное, что шум есть всегда! # # > ⚠️ Ещё раз, шум в данных присутсвует всегда. Если у вас данные лежат на прямой или ровно по линии - повод их перепроверить! # # Что с этим делать? Тщательнее проверять работу модели, к чему предлагаем и вернуться! # ## Обратно к проблемам... <a name="probs"></a> # Ещё раз посмотрим на график # + _cell_id="8kGimDgmvU9hTSsn" plt.scatter(x_render, y_pred_render, c='r', s=15, label='Предсказания') plt.scatter(x='hours', y='productivity', data=df_data, marker='x', s=45, label='Данные') plt.xlabel('Часы тренировки') plt.ylabel('Продуктивность спортсменов') plt.ylim([-10, 20]) plt.grid() plt.legend() plt.show() # - # Уфф, лучше не стало, но понадеяться стоило =) # # Так вот, что же с этим делать? Как помним, визуализация нам не всегда доступна, поэтому считаем, что нам сейчас повезло, но что делать, если её нет? # # Вот бы коэффициент, которые показывает, насколько модель хорошо отражает данные - R2, кажется. Давайте попробуем! # + _cell_id="hwjMEn0UMWEUqaG3" from sklearn.metrics import r2_score r2_value = r2_score(y_true, y_pred) r2_value # - # Огооо, 0.97, да эта модель знает данные лучше, чем кто-либо! Похоже, так не выйдет.. # # Ладно, давай проверим показатель ошибок, вдруг он будет заоблачным? # + _cell_id="EpEo0zwcgn8Brptc" from sklearn.metrics import mean_absolute_error mae_value = mean_absolute_error(y_true, y_pred) mae_value # - # Да что такое, 0.24 - это всего 2.4 % от максимума (10 единиц). Тоже ничего такого, как же быть? # # Есть один способ, но давайте для начала подумаем... # # <p align="center"><img src="https://raw.githubusercontent.com/kail4ek/ml_edu/master/assets/think_about_it_image.png" width=600/></p> # ## Train и test выборки <a name="train_test_split"></a> # Не хотел я вам признаваться, но всё это время мы делали одну очень серьезную ошибку в машинном обучении. # # В подходе машинного обучения есть понятие разделения данных на выборки. Самое простое разделение называется **train-test split**, что означает "разделение данных на выборки обучения и тестирования". # # Для чего это делается? # # Очень просто! Представьте, что вы учитесь играть в футбол. У вас есть команда, с которой вы тренируетесь, постоянно играете, отрабатываете удары, защиту, нападение и т.д. И все тренировки проходят +/- в одной и той же среде. И вот настаёт время соревнований (оценки ваших способностей). Соревнования серьёзные, поэтому они проходят в другом городе, собираются разные команды, со всего мира! # # И вот начинается оценка ваших способностей: если вы хорошо тренировались, учили комбинации, как надо постигали техники, то и даже в новой среде и игре против новой команды вы всё равно сможете выстоять против соперников! Это хороший результат обучения. # # А теперь представьте другой вариант, вы запомнили все привычки ваших друзей, с кем тренировались. Выучили их движения и с легкостью их обыгрываете. Но пусть на поле против вас выйдет другая команда - вы уже не сможете их обыграть =( # # Почему так происходит? Потому что как в футболе, так и в обучении модели, необходимо выявлять общие зависимости в данных, движении, поведении. Если просто запомнить то, что вам даётся для обучения (например, что у Васи больная левая коленка и его можно обыграть слева), то оценка на новых данных будет сильно смещена! # Аналогично при работе с данными мы делаем просто финт: разделяем данные на выборки так, чтобы были данные, на которых мы учимся играть, и данные для оценки способностей в игре. # # Как правило, тренировочных часов в футболе явно больше, чем соревновательных, поэтому и выборки часто делают в отношении 80 к 20 или 70 к 30. # # > ⚠️ Как выбрать соотношение? В статистике есть понятие репрезентативности. Если мы имеет набор данных из 1000 примеров, то лучше выделить на тест 300 примеров, чтобы тестирование модели было адекватным. А вот если мы имеет 1_000_000, то тут уже можно выделить и 20% от основного набора или даже 10%, так как выборка для тестирования будет достаточно репрезентативна. Правило простое, лучше не жалеть в выборке на тест, так как лучше тестирование будет более адекватным и покажет хуже результаты, чем мы не заметим в модели негативные эффекты! # Хватит слов - пора вернуться к нашей проблеме и решить её! # # Давайте воспользуемся готовой функцией sklearn - [train_test_split](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.train_test_split.html), чтобы изначально разделить наши данные: # + _cell_id="ehgY4ZEOBDqbCPRt" from sklearn.model_selection import train_test_split df_train_data, df_test_data = train_test_split(df_data, test_size=.3, random_state=RANDOM_SEED, shuffle=True) df_train_data.shape, df_test_data.shape # - # Отлично, в наших данные 14 примеров для обучения и 6 для тестирования! # # > 🔥 Всегда фиксируйте `random_state` в функциях, если это возможно. На этом строится понятие **воспроизводимости экспериментов**. То есть, если эксперимент, который вы проводили, нельзя воспроизвести, то это очень плохо, так как результаты могут сильно колебаться. Погуглите, на сегодняшний день этот вопрос очень актуален в кругах data science инженеров. # # Давайте посмотрим, как они разделены между собой: # + _cell_id="l0ZubdzPZYUfOo69" plt.scatter(x='hours', y='productivity', data=df_train_data, s=60, label='Обучение') plt.scatter(x='hours', y='productivity', data=df_test_data, s=60, label='Тестирование') plt.xlabel('Часы тренировки') plt.ylabel('Продуктивность спортсменов') plt.grid() plt.legend() plt.show() # - # Отлично, мы видим, что тестовые данные есть и в начале, и в середине диапазона и в конце. Почему это важно? # # Ответ также прост, если мы будем обучать на выборке до 1.5 часов тренировки, а тестирова на выборке более 1.5 часов, то нелогично ожидать от модели способности адекватно предсказывать на данных для тестирования. Она же подобные данные никогда не видела и не знает, какие там зависимости. # # > ⚠️ Всегда старайтесь логически подходить к процессу обучения модели. Модель не сделать больше, чем её возможности, поэтому и результаты как правило ожидаемы, если учесть большинство факторов. # # Именно по этой причине мы сделали `shuffle=True` в вызове функции разделения. Это для того, чтобы перемешать выборку и выбирать случайно примеры для обучения и теста. Тогда высока верятность, что в тест попадут примеры, пересекающиеся по области из выборки для обучения! # # > ⚠️ На этом основано понятие **домен данных**. Если домен данных для тестирования отличается от домена выборки для обучения, то это очень плохо. # # > ⚠️ Другой полезный термин - **дрифт данных**, аналогично, если модель обучалась на одних данных (до 2-х часов тренировки), а в ходе реальной работы модели стали поступать данные о 3-х и 4-х часах тренировки для предсказания, то это смещение и должно быть отслежено. Не то, чтобы это было легко сделать.. # Когда данные разделены - переходим к процессу обучения! # + _cell_id="pFCeLfXtm4Nxcoow" reg_model = LinearRegression() df_x = df_train_data.copy() df_x = preprocess_data_high_poly(df_x, 15) y_true = df_x.pop('productivity') reg_model.fit(df_x, y_true) y_pred = reg_model.predict(df_x) x_render = np.linspace(0, 3, 1000) x_render = np.concatenate([x_render, df_x['hours']]) df_line_data = pd.DataFrame({'hours': x_render}) df_line_data = preprocess_data_high_poly(df_line_data, 15) y_pred_render = reg_model.predict(df_line_data) plt.scatter(x_render, y_pred_render, c='r', s=45, label='Предсказания') plt.scatter(x='hours', y='productivity', data=df_train_data, marker='x', s=45, label='Данные обучения') plt.scatter(x='hours', y='productivity', data=df_test_data, c='g', marker='x', s=45, label='Данные тестирования') plt.xlabel('Часы тренировки') plt.ylabel('Продуктивность спортсменов') plt.ylim([-10, 20]) plt.grid() plt.legend() plt.show() # - # А теперь самое вкусное - сравним показатели ошибок на выборке обучения и тестирования: # + _cell_id="nqu2ms0rQPe6fYTn" df_test_x = df_test_data.copy() df_test_x = preprocess_data_high_poly(df_test_x, 15) y_test_true = df_test_x.pop('productivity') y_test_pred = reg_model.predict(df_test_x) mae_value_train = mean_absolute_error(y_true, y_pred) mae_value_test = mean_absolute_error(y_test_true, y_test_pred) print(f'Test vs train: {mae_value_test} | {mae_value_train}') print(f'Error ratio: {mae_value_test/mae_value_train}') # - # А вот и индикатор переобучения! # # Сами видите, ошибка теста почти в 20 миллионов раз выше, чем ошибка обучения. Так мы сразу понимаем, что мы перебрали со сложностью модели и произошло переобучение! # # > 🤓 Переобучение (overfit, high variance) - эффект, который возникает при сильном превышении сложности модели над сложностью зависимости в данных. Чаще всего появляется из-за малого количества данных или слишком сложной модели. # # > ⚠️ Отныне перед работой с данными важно разделять данные на обучение и тестирование (как минимум). В дальнейшем мы узнаем про другие типы выборок, но сейчас важно запомнить именно это. **Тестовая выборка** - это на новый год, поэтому ее откладывают до последнего момента оценки работы с моделью. Она считается эталонной, чтобы понять, как модель будет работать с данными, которые ещё не видела, но при этом имеет разметку (истинные значения), что позволяет сделать оценку. # # Таким образом, мы научились определять переобучение в данных и делать это очень неплохо! # # Отныне мы будем всегда разделять данные, чтобы у нас была возможность проверить адекватность модели, иначе вот так обрадуемся шикарным показателям на выборке для обучени и никак не проверим адекватность модели =( # # > 🔥 В применении sklearn есть базовое разделение на `fit()` и `transform/predict()`, так вот, `fit()` всегда делается только на выборке для обучения и никак иначе! `transform/predict()` уже можно делать на любых выборка после фита на обучении. # ## А что, если дать больше данных? <a name="more_data"></a> # Давайте для закрепления проверим, как будет вести себя модель, если сгенерировать побольше данных и использовать всё ту же модель 15-й степени: # + _cell_id="Y0h9t6AKUMT4ZtJF" rng = np.random.default_rng(RANDOM_SEED) _x_data = 3*rng.random(2000) _y_data = 7 + 4*_x_data + -2*_x_data**2 + 2*(rng.random(_x_data.shape[0])-0.5) df_data_large = pd.DataFrame({'hours': _x_data, 'productivity': _y_data}) df_data_large.head() # + _cell_id="jLFhTn8YvvdG9Ktb" # TODO - реализуйте обучение на новом наборе данных # NOTE - предсказания модели на тестовой выборке должны быть записаны в переменную y_test_pred # NOTE - истинные значения тестовой выборки должны быть записаны в переменную y_test_true reg_model = LinearRegression() train_data_large, test_data_large = train_test_split(df_data_large, test_size=.3, random_state=RANDOM_SEED, shuffle=True) train_data_copy=train_data_large.copy() train_data_copy = preprocess_data_high_poly(train_data_copy, 15) y_train_true = train_data_copy.pop('productivity') reg_model.fit(train_data_copy,y_train_true) y_train_pred = reg_model.predict(train_data_copy) test_data_copy = test_data_large.copy() test_data_copy = preprocess_data_high_poly(test_data_copy, 15) y_test_true = test_data_copy.pop('productivity') y_test_pred = reg_model.predict(test_data_copy) print(mean_absolute_error(y_test_true, y_test_pred)) # + _cell_id="NrVXplrMQF8BUgQ8" # TEST - проверимс ваше решение np.testing.assert_almost_equal(mean_absolute_error(y_test_true, y_test_pred), 0.5008875868218917) print("Well done!") # - # Да, как стало больше данных, сразу модели некуда деваться и она выдерживает зависимость, но нужно понимать, собрать больше данных, как правило, очень сложная задача, поэтому мы скоро посмотрим методы, как бороться с переобучением, а не только его отслеживать! # ## Отношение между сложностью модели и её ошибками (High bias и High Variance) <a name="ratio"></a> # Мы уже немало поговорили о bias и variance, но всё-таки, важно закрепить важную особенность. Взглянем на изображение: # # <p align="center"><img src="https://raw.githubusercontent.com/kail4ek/ml_edu/master/assets/11_bias_variation.png" width=600/></p> # Видите, как мы и говорили, слишком малая сложность (недостаточная) ведёт к низкой variance и высокому bias из-за того, что модель не справляется с восстановлением (описанием) зависимостей. Это ведёт к росту ошибки предсказания на новых данных. # # С другой стороны, слишком высокая сложность ведёт к высокой variance и низкому bias, что также ведёт к росту ошибки на новых данных! # # Что же делать? Искать эту точку оптимума по середине, чтобы модель была не слишком сложной, но и не слишком дубовой (простой). Конечно же, найти идеальную точку - цели не стоит, но мы можем совместить методы предотвращения переобучения и сделать модель чуть сложнее, и тогда мы сможем прийти к наболее подходящему варианту для конкретных данных. # # Вот так вот! # ## Заключение <a name="conclusion"></a> # Итого, сегодня мы узнали: # # * как восстанавливать нелинейные зависимости # * что такое недообучение и переобучение, а также как их ловить # * ещё поняли, зачем всё-таки делить данные на выборки и больше не будем совершать таких ошибок! # # Ура, молодцы! :sunglasses: # # > ⚠️ Заметили, что с недообучением не так много суеты, как с переобучением. Все потому, что второе сложнее отследить и оно несёт крайне более неприятные последствия, чем недообучение, которое видно почти сразу! Тем неменее, оба эффекта можно отследить и предотвратить! # ## Вопросы для закрепления # # А теперь пара вопросов, чтобы закрепить материал! # # 1. Почему для полиномиальной зависимости всё ещё используется объект класса `LinearRegression` - в названии ведь "линейный", а нам надо параболу? # 2. Зачем делить данные на выборки? # 3. Как разделить данные, если всего у вас есть 100_000_000 строк? Сколько пойдёт в train, а сколько в test? # 4. Что делать, если очевидно, что модель недообучилась? Как это понять? # 5. Как понять, что модель переобучилась? # # Полезные ссылки <a name='links'></a> # * [R2 explained от StatQuest](https://www.youtube.com/watch?v=2AQKmw14mHM) #
notebooks/11_Regression.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # # Fibonacci series up to n def fibonacci(n): result = [] a, b = 0, 1 while a < n: result.append(a) a,b = b,a+b print(result) fibonacci(5) # # # #
Week_02/exercise_2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.6.12 64-bit (''mlflow-1a43b1fcadd2a32c00a1972c5464ff374bda3f6b'': # conda)' # name: python3 # --- import urllib.request import requests import pandas as pd import json for file_name in ['test_data_1000_dominicks.csv','test_data_1002_tropicana.csv']: sample_data= pd.read_csv(file_name) sample_data.drop(['Unnamed: 0'], axis=1, inplace=True) #Use the below version of URL and header in case you test with remote web service (AKS) url ='https://many-model2.westus2.inference.ml.azure.com/score' api_key = '<KEY>' # Replace this with the API key for the web service headers = {'Content-Type':'application/json', 'Authorization':('Bearer '+ api_key)} data = {"Inputs":sample_data.to_json() } body = str.encode(json.dumps(data)) resp = requests.post(url, data=body, headers=headers) print(resp.text)
sa-dsml-many-models/code/deployment/test_deployment.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/sahilpocker/Sentiment-Analysis/blob/master/Sentiment_Analysis.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="fnHGakZOwKVZ" colab_type="text" # # **Sentiment Analysis of Amazon.com reviews** # + [markdown] id="fSMHxkbFw3XG" colab_type="text" # **Sentiment Analysis:-** is the process of computationally identifying and categorizing opinions expressed in a piece of text, especially in order to determine whether the writer's attitude towards a particular topic, product, etc. is positive or negative. # # Here I have taken Amazon 'Health and Personal Care' product reviews, sourced from: - https://nijianmo.github.io/amazon/index.html # # # + id="0yeIsRBEdNvY" colab_type="code" colab={} #Required Imports import numpy as np import pandas as pd import tensorflow as tf import os import json import gzip import pandas as pd from urllib.request import urlopen # + id="HyUqJbspoj0I" colab_type="code" colab={} from tensorflow.keras import layers from tensorflow.keras import losses from tensorflow.keras import preprocessing from tensorflow.keras.layers.experimental.preprocessing import TextVectorization # + id="zZ1IEbtUApR4" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 204} outputId="92eae5fc-9b72-458d-c268-f24144e7d538" # !wget http://snap.stanford.edu/data/amazon/productGraph/categoryFiles/reviews_Health_and_Personal_Care_5.json.gz #download the data # + [markdown] id="85InOMLw1BZb" colab_type="text" # The downloaded data is an archive of the type *.gz(gzip)*, consisting of data in *json* format. # The data needs to be extracted from the archive and loaded. # + id="wWKxusQ7BehH" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 71} outputId="f762284e-b5a6-48e7-a99b-dc1bfb463850" ### load the meta data data = [] with gzip.open('reviews_Health_and_Personal_Care_5.json.gz') as f: for l in f: data.append(json.loads(l.strip())) # total length of list, this number equals total number of products print(len(data)) # first row of the list print(data[0]) # + id="1fAmSla1DTpy" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="bc52337a-9090-4b86-931e-b6b23144b653" # convert the obtained list into pandas dataframe df = pd.DataFrame.from_dict(data) print(len(df)) # + [markdown] id="33IRH83F14Pg" colab_type="text" # The total length of the Dataframe is 346455, let us take a look at five rows of data, from 25 to 30. # + id="dZofiZJaD2DF" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 289} outputId="d3f1b789-b5cb-46a1-fe27-74ffb427edd4" df.iloc[25:30] # + [markdown] id="frac7h4X2avD" colab_type="text" # There are a lot us unnecessary columns like *reviewerID, asin, reviewerName, etc*. Our interest mainly lies is the *reviewText* itself and the overall rating, which is the rating out of 5. # # So let us drop all the other columns from the Dataframe. # + id="AJJaEw3jD7Gl" colab_type="code" colab={} df1 = df.drop(['reviewerID','asin','reviewerName','unixReviewTime','reviewTime','helpful','summary'],axis=1) # + [markdown] id="mFp9chI_3AYD" colab_type="text" # Since this is a binary classification into positive and negative reviews, we have to convert the overall rating (out of 5) into positive or negative. # 1 and 2 star reviews can be considered as negative and 3+ stars as positive. # + id="-yByvXcbITIV" colab_type="code" colab={} df1['target'] = (df1['overall'] > 2).astype(int) #create new column 'target' which gives 1 if 'overall' is greater than 2, 0 otherwise. # + id="rQ8uHlMSJBiq" colab_type="code" colab={} df1 = df1.drop('overall',axis = 1) #since we no longer need 'overall' # + [markdown] id="2Kf5sBQn3tDF" colab_type="text" # Since the total length of the dataframe is huge, let us take only the last 50,000 for simplicity. # + id="lOqsRdMmJtAU" colab_type="code" colab={} df2 = df1[:50000] target = df2.pop('target') #store target variable (0/1) in another array # + id="x7vl9oscRkTG" colab_type="code" colab={} dataset = ( tf.data.Dataset.from_tensor_slices( ( tf.cast(df2['reviewText'].values, tf.string), tf.cast(target.values, tf.int32) ) ) ) #convert the dataframe into Tensorflow dataset # + [markdown] id="ImyAFQwz4Jw3" colab_type="text" # Let us take a look at 5 entries in the dataset and its associated labels(target) # + id="0z-HHSVOVZJn" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 122} outputId="a50535a3-58f9-475b-b694-42f38f1e4f44" for feat, targ in dataset.take(5): print ('Features: {}, Target: {}'.format(feat, targ)) # + [markdown] id="zQxyM3RM4ZTT" colab_type="text" # Next, let us split the data into train, test, and validation. Let the split be 70% train, 15% validation and 15%test. # + id="UgApux3we34Q" colab_type="code" colab={} DATASET_SIZE = len(dataset) train_size = int(0.7 * DATASET_SIZE) val_size = int(0.15 * DATASET_SIZE) test_size = int(0.15 * DATASET_SIZE) # + id="sh4GvcPfjMWF" colab_type="code" colab={} raw_train_dataset = dataset.take(train_size) raw_test_dataset = dataset.skip(train_size) raw_val_dataset = raw_test_dataset.skip(val_size) raw_test_dataset = raw_test_dataset.take(test_size) # + [markdown] id="eqvOVMGV44C-" colab_type="text" # Next step is to convert the reviews data and vectorise it (map each word into integers) for training. # + id="eGVhdkx7nebF" colab_type="code" colab={} max_features = 10000 #total number of words sequence_length = 250 vectorize_layer = TextVectorization( max_tokens=max_features, output_mode='int', output_sequence_length=sequence_length) # + id="LsoPnB0En-LS" colab_type="code" colab={} # Make a text-only dataset (without labels), then call adapt train_text = dataset.map(lambda x, y: x) vectorize_layer.adapt(train_text) # + id="MSAiVWY4D-Gd" colab_type="code" colab={} def vectorize_text(text, label): text = tf.expand_dims(text, -1) return vectorize_layer(text), label # + [markdown] id="R_9SkbMA5VN1" colab_type="text" # Take a look at the mapped integer to the corresponding word in the vocabulary. # + id="V__FpPv1tHbu" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 68} outputId="b2784645-496f-4f1f-c2dd-45e98c3c5c46" print("1287 ---> ",vectorize_layer.get_vocabulary()[1287]) print(" 313 ---> ",vectorize_layer.get_vocabulary()[313]) print('Vocabulary size: {}'.format(len(vectorize_layer.get_vocabulary()))) # + [markdown] id="01ay8UV55eWm" colab_type="text" # Now vectorise all three sets seperately. # + id="yZkPegyGy1JO" colab_type="code" colab={} train_ds = raw_train_dataset.map(vectorize_text) val_ds = raw_val_dataset.map(vectorize_text) test_ds = raw_test_dataset.map(vectorize_text) # + id="ILZv1n4P7RtP" colab_type="code" colab={} train_ds = train_ds.map(lambda x_text, x_label: (x_text, tf.expand_dims(x_label, -1))) #to match dimensions of 'target' while fitting model. val_ds = val_ds.map(lambda x_text, x_label: (x_text, tf.expand_dims(x_label, -1))) test_ds = test_ds.map(lambda x_text, x_label: (x_text, tf.expand_dims(x_label, -1))) # + id="SZOzloGXzA-6" colab_type="code" colab={} AUTOTUNE = tf.data.experimental.AUTOTUNE train_ds = train_ds.cache().prefetch(buffer_size=AUTOTUNE) val_ds = val_ds.cache().prefetch(buffer_size=AUTOTUNE) test_ds = test_ds.cache().prefetch(buffer_size=AUTOTUNE) # + id="9Kx5I7ANzOl9" colab_type="code" colab={} embedding_dim = 16 # + [markdown] id="U0GWVh6S57Ww" colab_type="text" # Design the model, it consists of an embedding layer, a dropout after it, a bidirection LSTM, A dense layer with 32 units, another dropout layer and finally an output Layer with 1 unit. # + id="0XbqNb2F10EA" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 357} outputId="418ba34a-1b97-415c-de64-f69abd5fd3cf" model = tf.keras.Sequential([ layers.Embedding(max_features + 1, embedding_dim), layers.Dropout(0.2), layers.Bidirectional(tf.keras.layers.LSTM(16)), layers.Dense(32), layers.Dropout(0.2), layers.Dense(1)]) model.summary() #view model summary # + [markdown] id="CWFxLvzO6VKo" colab_type="text" # Compile the model # + id="G0CHDcRm2HCb" colab_type="code" colab={} model.compile(loss=losses.BinaryCrossentropy(from_logits=True), optimizer='adam', metrics=tf.metrics.BinaryAccuracy(threshold=0.0)) callback = tf.keras.callbacks.EarlyStopping(monitor='loss', patience=3) # + id="4h0gDSa15bAd" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 309} outputId="ac4c4174-323c-47f7-91fa-4506c2748b37" epochs = 8 batch_size = 32 history = model.fit( train_ds, validation_data=val_ds, epochs=epochs, batch_size=batch_size, callbacks=[callback]) #train the model # + [markdown] id="k0ayHtDe6jK7" colab_type="text" # This should be around 95% accuracy on the train set and 90% on the validation set, which means it is overfitting and exhibits variance. Let us now test it on the test set. # + id="LuenhSd35fI9" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 68} outputId="d141f28d-cbad-43bc-b7ed-2809d381f792" loss, accuracy = model.evaluate(test_ds) print("Loss: ", loss) print("Accuracy: ", accuracy) # + [markdown] id="Bjm2xqOw7JLK" colab_type="text" # On the test set it shows an accuracy of 91% which is decent. By removing the overfitting, improving and optimsing the model, the accuracy could be higher. # + [markdown] id="OP5xT6ST7Fp1" colab_type="text" # Finally, save the model. # + id="PV6ZOtZ9eO-i" colab_type="code" colab={} export_model = tf.keras.Sequential([ vectorize_layer, model, layers.Activation('sigmoid') ]) export_model.compile( loss=losses.BinaryCrossentropy(from_logits=False), optimizer="adam", metrics=['accuracy'] )
Sentiment_Analysis.ipynb